text_prompt
stringlengths
100
17.7k
code_prompt
stringlengths
7
9.86k
<SYSTEM_TASK:> Load a DataFrame of data from a Bank of Canada site. <END_TASK> <USER_TASK:> Description: def load_frame(url, skiprows): """ Load a DataFrame of data from a Bank of Canada site. """
return pd.read_csv( url, skiprows=skiprows, skipinitialspace=True, na_values=["Bank holiday", "Not available"], parse_dates=["Date"], index_col="Date", ).dropna(how='all') \ .tz_localize('UTC') \ .rename(columns=COLUMN_NAMES)
<SYSTEM_TASK:> There are a couple quirks in the data provided by Bank of Canada. <END_TASK> <USER_TASK:> Description: def check_known_inconsistencies(bill_data, bond_data): """ There are a couple quirks in the data provided by Bank of Canada. Check that no new quirks have been introduced in the latest download. """
inconsistent_dates = bill_data.index.sym_diff(bond_data.index) known_inconsistencies = [ # bill_data has an entry for 2010-02-15, which bond_data doesn't. # bond_data has an entry for 2006-09-04, which bill_data doesn't. # Both of these dates are bank holidays (Flag Day and Labor Day, # respectively). pd.Timestamp('2006-09-04', tz='UTC'), pd.Timestamp('2010-02-15', tz='UTC'), # 2013-07-25 comes back as "Not available" from the bills endpoint. # This date doesn't seem to be a bank holiday, but the previous # calendar implementation dropped this entry, so we drop it as well. # If someone cares deeply about the integrity of the Canadian trading # calendar, they may want to consider forward-filling here rather than # dropping the row. pd.Timestamp('2013-07-25', tz='UTC'), ] unexpected_inconsistences = inconsistent_dates.drop(known_inconsistencies) if len(unexpected_inconsistences): in_bills = bill_data.index.difference(bond_data.index).difference( known_inconsistencies ) in_bonds = bond_data.index.difference(bill_data.index).difference( known_inconsistencies ) raise ValueError( "Inconsistent dates for Canadian treasury bills vs bonds. \n" "Dates with bills but not bonds: {in_bills}.\n" "Dates with bonds but not bills: {in_bonds}.".format( in_bills=in_bills, in_bonds=in_bonds, ) )
<SYSTEM_TASK:> The earliest date for which we can load data from this module. <END_TASK> <USER_TASK:> Description: def earliest_possible_date(): """ The earliest date for which we can load data from this module. """
today = pd.Timestamp('now', tz='UTC').normalize() # Bank of Canada only has the last 10 years of data at any given time. return today.replace(year=today.year - 10)
<SYSTEM_TASK:> Checks whether the fill price is worse than the order's limit price. <END_TASK> <USER_TASK:> Description: def fill_price_worse_than_limit_price(fill_price, order): """ Checks whether the fill price is worse than the order's limit price. Parameters ---------- fill_price: float The price to check. order: zipline.finance.order.Order The order whose limit price to check. Returns ------- bool: Whether the fill price is above the limit price (for a buy) or below the limit price (for a sell). """
if order.limit: # this is tricky! if an order with a limit price has reached # the limit price, we will try to fill the order. do not fill # these shares if the impacted price is worse than the limit # price. return early to avoid creating the transaction. # buy order is worse if the impacted price is greater than # the limit price. sell order is worse if the impacted price # is less than the limit price if (order.direction > 0 and fill_price > order.limit) or \ (order.direction < 0 and fill_price < order.limit): return True return False
<SYSTEM_TASK:> Internal utility method to return the trailing mean volume over the <END_TASK> <USER_TASK:> Description: def _get_window_data(self, data, asset, window_length): """ Internal utility method to return the trailing mean volume over the past 'window_length' days, and volatility of close prices for a specific asset. Parameters ---------- data : The BarData from which to fetch the daily windows. asset : The Asset whose data we are fetching. window_length : Number of days of history used to calculate the mean volume and close price volatility. Returns ------- (mean volume, volatility) """
try: values = self._window_data_cache.get(asset, data.current_session) except KeyError: try: # Add a day because we want 'window_length' complete days, # excluding the current day. volume_history = data.history( asset, 'volume', window_length + 1, '1d', ) close_history = data.history( asset, 'close', window_length + 1, '1d', ) except HistoryWindowStartsBeforeData: # If there is not enough data to do a full history call, return # values as if there was no data. return 0, np.NaN # Exclude the first value of the percent change array because it is # always just NaN. close_volatility = close_history[:-1].pct_change()[1:].std( skipna=False, ) values = { 'volume': volume_history[:-1].mean(), 'close': close_volatility * SQRT_252, } self._window_data_cache.set(asset, values, data.current_session) return values['volume'], values['close']
<SYSTEM_TASK:> Check that value is a valid categorical missing_value. <END_TASK> <USER_TASK:> Description: def _assert_valid_categorical_missing_value(value): """ Check that value is a valid categorical missing_value. Raises a TypeError if the value is cannot be used as the missing_value for a categorical_dtype Term. """
label_types = LabelArray.SUPPORTED_SCALAR_TYPES if not isinstance(value, label_types): raise TypeError( "Categorical terms must have missing values of type " "{types}.".format( types=' or '.join([t.__name__ for t in label_types]), ) )
<SYSTEM_TASK:> Return the identity of the Term that would be constructed from the <END_TASK> <USER_TASK:> Description: def _static_identity(cls, domain, dtype, missing_value, window_safe, ndim, params): """ Return the identity of the Term that would be constructed from the given arguments. Identities that compare equal will cause us to return a cached instance rather than constructing a new one. We do this primarily because it makes dependency resolution easier. This is a classmethod so that it can be called from Term.__new__ to determine whether to produce a new instance. """
return (cls, domain, dtype, missing_value, window_safe, ndim, params)
<SYSTEM_TASK:> The number of extra rows needed for each of our inputs to compute this <END_TASK> <USER_TASK:> Description: def dependencies(self): """ The number of extra rows needed for each of our inputs to compute this term. """
extra_input_rows = max(0, self.window_length - 1) out = {} for term in self.inputs: out[term] = extra_input_rows out[self.mask] = 0 return out
<SYSTEM_TASK:> Called with a column of the result of a pipeline. This needs to put <END_TASK> <USER_TASK:> Description: def to_workspace_value(self, result, assets): """ Called with a column of the result of a pipeline. This needs to put the data into a format that can be used in a workspace to continue doing computations. Parameters ---------- result : pd.Series A multiindexed series with (dates, assets) whose values are the results of running this pipeline term over the dates. assets : pd.Index All of the assets being requested. This allows us to correctly shape the workspace value. Returns ------- workspace_value : array-like An array like value that the engine can consume. """
return result.unstack().fillna(self.missing_value).reindex( columns=assets, fill_value=self.missing_value, ).values
<SYSTEM_TASK:> Register the number of shares we held at this dividend's ex date so <END_TASK> <USER_TASK:> Description: def earn_stock_dividend(self, stock_dividend): """ Register the number of shares we held at this dividend's ex date so that we can pay out the correct amount on the dividend's pay date. """
return { 'payment_asset': stock_dividend.payment_asset, 'share_count': np.floor( self.amount * float(stock_dividend.ratio) ) }
<SYSTEM_TASK:> Update the position by the split ratio, and return the resulting <END_TASK> <USER_TASK:> Description: def handle_split(self, asset, ratio): """ Update the position by the split ratio, and return the resulting fractional share that will be converted into cash. Returns the unused cash. """
if self.asset != asset: raise Exception("updating split with the wrong asset!") # adjust the # of shares by the ratio # (if we had 100 shares, and the ratio is 3, # we now have 33 shares) # (old_share_count / ratio = new_share_count) # (old_price * ratio = new_price) # e.g., 33.333 raw_share_count = self.amount / float(ratio) # e.g., 33 full_share_count = np.floor(raw_share_count) # e.g., 0.333 fractional_share_count = raw_share_count - full_share_count # adjust the cost basis to the nearest cent, e.g., 60.0 new_cost_basis = round(self.cost_basis * ratio, 2) self.cost_basis = new_cost_basis self.amount = full_share_count return_cash = round(float(fractional_share_count * new_cost_basis), 2) log.info("after split: " + str(self)) log.info("returning cash: " + str(return_cash)) # return the leftover cash, which will be converted into cash # (rounded to the nearest cent) return return_cash
<SYSTEM_TASK:> Used to mark a function as deprecated. <END_TASK> <USER_TASK:> Description: def deprecated(msg=None, stacklevel=2): """ Used to mark a function as deprecated. Parameters ---------- msg : str The message to display in the deprecation warning. stacklevel : int How far up the stack the warning needs to go, before showing the relevant calling lines. Examples -------- @deprecated(msg='function_a is deprecated! Use function_b instead.') def function_a(*args, **kwargs): """
def deprecated_dec(fn): @wraps(fn) def wrapper(*args, **kwargs): warnings.warn( msg or "Function %s is deprecated." % fn.__name__, category=DeprecationWarning, stacklevel=stacklevel ) return fn(*args, **kwargs) return wrapper return deprecated_dec
<SYSTEM_TASK:> Get the Float64Multiply objects to pass to an AdjustedArrayWindow. <END_TASK> <USER_TASK:> Description: def _get_adjustments_in_range(self, asset, dts, field): """ Get the Float64Multiply objects to pass to an AdjustedArrayWindow. For the use of AdjustedArrayWindow in the loader, which looks back from current simulation time back to a window of data the dictionary is structured with: - the key into the dictionary for adjustments is the location of the day from which the window is being viewed. - the start of all multiply objects is always 0 (in each window all adjustments are overlapping) - the end of the multiply object is the location before the calendar location of the adjustment action, making all days before the event adjusted. Parameters ---------- asset : Asset The assets for which to get adjustments. dts : iterable of datetime64-like The dts for which adjustment data is needed. field : str OHLCV field for which to get the adjustments. Returns ------- out : dict[loc -> Float64Multiply] The adjustments as a dict of loc -> Float64Multiply """
sid = int(asset) start = normalize_date(dts[0]) end = normalize_date(dts[-1]) adjs = {} if field != 'volume': mergers = self._adjustments_reader.get_adjustments_for_sid( 'mergers', sid) for m in mergers: dt = m[0] if start < dt <= end: end_loc = dts.searchsorted(dt) adj_loc = end_loc mult = Float64Multiply(0, end_loc - 1, 0, 0, m[1]) try: adjs[adj_loc].append(mult) except KeyError: adjs[adj_loc] = [mult] divs = self._adjustments_reader.get_adjustments_for_sid( 'dividends', sid) for d in divs: dt = d[0] if start < dt <= end: end_loc = dts.searchsorted(dt) adj_loc = end_loc mult = Float64Multiply(0, end_loc - 1, 0, 0, d[1]) try: adjs[adj_loc].append(mult) except KeyError: adjs[adj_loc] = [mult] splits = self._adjustments_reader.get_adjustments_for_sid( 'splits', sid) for s in splits: dt = s[0] if start < dt <= end: if field == 'volume': ratio = 1.0 / s[1] else: ratio = s[1] end_loc = dts.searchsorted(dt) adj_loc = end_loc mult = Float64Multiply(0, end_loc - 1, 0, 0, ratio) try: adjs[adj_loc].append(mult) except KeyError: adjs[adj_loc] = [mult] return adjs
<SYSTEM_TASK:> A window of pricing data with adjustments applied assuming that the <END_TASK> <USER_TASK:> Description: def history(self, assets, dts, field, is_perspective_after): """ A window of pricing data with adjustments applied assuming that the end of the window is the day before the current simulation time. Parameters ---------- assets : iterable of Assets The assets in the window. dts : iterable of datetime64-like The datetimes for which to fetch data. Makes an assumption that all dts are present and contiguous, in the calendar. field : str The OHLCV field for which to retrieve data. is_perspective_after : bool True, if the window is being viewed immediately after the last dt in the sliding window. False, if the window is viewed on the last dt. This flag is used for handling the case where the last dt in the requested window immediately precedes a corporate action, e.g.: - is_perspective_after is True When the viewpoint is after the last dt in the window, as when a daily history window is accessed from a simulation that uses a minute data frequency, the history call to this loader will not include the current simulation dt. At that point in time, the raw data for the last day in the window will require adjustment, so the most recent adjustment with respect to the simulation time is applied to the last dt in the requested window. An example equity which has a 0.5 split ratio dated for 05-27, with the dts for a history call of 5 bars with a '1d' frequency at 05-27 9:31. Simulation frequency is 'minute'. (In this case this function is called with 4 daily dts, and the calling function is responsible for stitching back on the 'current' dt) | | | | | last dt | <-- viewer is here | | | 05-23 | 05-24 | 05-25 | 05-26 | 05-27 9:31 | | raw | 10.10 | 10.20 | 10.30 | 10.40 | | | adj | 5.05 | 5.10 | 5.15 | 5.25 | | The adjustment is applied to the last dt, 05-26, and all previous dts. - is_perspective_after is False, daily When the viewpoint is the same point in time as the last dt in the window, as when a daily history window is accessed from a simulation that uses a daily data frequency, the history call will include the current dt. At that point in time, the raw data for the last day in the window will be post-adjustment, so no adjustment is applied to the last dt. An example equity which has a 0.5 split ratio dated for 05-27, with the dts for a history call of 5 bars with a '1d' frequency at 05-27 0:00. Simulation frequency is 'daily'. | | | | | | <-- viewer is here | | | | | | | last dt | | | 05-23 | 05-24 | 05-25 | 05-26 | 05-27 | | raw | 10.10 | 10.20 | 10.30 | 10.40 | 5.25 | | adj | 5.05 | 5.10 | 5.15 | 5.20 | 5.25 | Adjustments are applied 05-23 through 05-26 but not to the last dt, 05-27 Returns ------- out : np.ndarray with shape(len(days between start, end), len(assets)) """
block = self._ensure_sliding_windows(assets, dts, field, is_perspective_after) end_ix = self._calendar.searchsorted(dts[-1]) return concatenate( [window.get(end_ix) for window in block], axis=1, )
<SYSTEM_TASK:> Attempt to find a unique asset whose symbol is the given string. <END_TASK> <USER_TASK:> Description: def _lookup_unconflicted_symbol(self, symbol): """ Attempt to find a unique asset whose symbol is the given string. If multiple assets have held the given symbol, return a 0. If no asset has held the given symbol, return a NaN. """
try: uppered = symbol.upper() except AttributeError: # The mapping fails because symbol was a non-string return numpy.nan try: return self.finder.lookup_symbol( uppered, as_of_date=None, country_code=self.country_code, ) except MultipleSymbolsFound: # Fill conflicted entries with zeros to mark that they need to be # resolved by date. return 0 except SymbolNotFound: # Fill not found entries with nans. return numpy.nan
<SYSTEM_TASK:> Clear out any assets that have expired before starting a new sim day. <END_TASK> <USER_TASK:> Description: def _cleanup_expired_assets(self, dt, position_assets): """ Clear out any assets that have expired before starting a new sim day. Performs two functions: 1. Finds all assets for which we have open orders and clears any orders whose assets are on or after their auto_close_date. 2. Finds all assets for which we have positions and generates close_position events for any assets that have reached their auto_close_date. """
algo = self.algo def past_auto_close_date(asset): acd = asset.auto_close_date return acd is not None and acd <= dt # Remove positions in any sids that have reached their auto_close date. assets_to_clear = \ [asset for asset in position_assets if past_auto_close_date(asset)] metrics_tracker = algo.metrics_tracker data_portal = self.data_portal for asset in assets_to_clear: metrics_tracker.process_close_position(asset, dt, data_portal) # Remove open orders for any sids that have reached their auto close # date. These orders get processed immediately because otherwise they # would not be processed until the first bar of the next day. blotter = algo.blotter assets_to_cancel = [ asset for asset in blotter.open_orders if past_auto_close_date(asset) ] for asset in assets_to_cancel: blotter.cancel_all_orders_for_asset(asset) # Make a copy here so that we are not modifying the list that is being # iterated over. for order in copy(blotter.new_orders): if order.status == ORDER_STATUS.CANCELLED: metrics_tracker.process_order(order) blotter.new_orders.remove(order)
<SYSTEM_TASK:> Load collection of Adjustment objects from underlying adjustments db. <END_TASK> <USER_TASK:> Description: def load_adjustments(self, dates, assets, should_include_splits, should_include_mergers, should_include_dividends, adjustment_type): """ Load collection of Adjustment objects from underlying adjustments db. Parameters ---------- dates : pd.DatetimeIndex Dates for which adjustments are needed. assets : pd.Int64Index Assets for which adjustments are needed. should_include_splits : bool Whether split adjustments should be included. should_include_mergers : bool Whether merger adjustments should be included. should_include_dividends : bool Whether dividend adjustments should be included. adjustment_type : str Whether price adjustments, volume adjustments, or both, should be included in the output. Returns ------- adjustments : dict[str -> dict[int -> Adjustment]] A dictionary containing price and/or volume adjustment mappings from index to adjustment objects to apply at that index. """
return load_adjustments_from_sqlite( self.conn, dates, assets, should_include_splits, should_include_mergers, should_include_dividends, adjustment_type, )
<SYSTEM_TASK:> Returns the set of known tables in the adjustments file in DataFrame <END_TASK> <USER_TASK:> Description: def unpack_db_to_component_dfs(self, convert_dates=False): """Returns the set of known tables in the adjustments file in DataFrame form. Parameters ---------- convert_dates : bool, optional By default, dates are returned in seconds since EPOCH. If convert_dates is True, all ints in date columns will be converted to datetimes. Returns ------- dfs : dict{str->DataFrame} Dictionary which maps table name to the corresponding DataFrame version of the table, where all date columns have been coerced back from int to datetime. """
return { t_name: self.get_df_from_table(t_name, convert_dates) for t_name in self._datetime_int_cols }
<SYSTEM_TASK:> Get dtypes to use when unpacking sqlite tables as dataframes. <END_TASK> <USER_TASK:> Description: def _df_dtypes(self, table_name, convert_dates): """Get dtypes to use when unpacking sqlite tables as dataframes. """
out = self._raw_table_dtypes[table_name] if convert_dates: out = out.copy() for date_column in self._datetime_int_cols[table_name]: out[date_column] = datetime64ns_dtype return out
<SYSTEM_TASK:> Calculate the ratios to apply to equities when looking back at pricing <END_TASK> <USER_TASK:> Description: def calc_dividend_ratios(self, dividends): """ Calculate the ratios to apply to equities when looking back at pricing history so that the price is smoothed over the ex_date, when the market adjusts to the change in equity value due to upcoming dividend. Returns ------- DataFrame A frame in the same format as splits and mergers, with keys - sid, the id of the equity - effective_date, the date in seconds on which to apply the ratio. - ratio, the ratio to apply to backwards looking pricing data. """
if dividends is None or dividends.empty: return pd.DataFrame(np.array( [], dtype=[ ('sid', uint64_dtype), ('effective_date', uint32_dtype), ('ratio', float64_dtype), ], )) pricing_reader = self._equity_daily_bar_reader input_sids = dividends.sid.values unique_sids, sids_ix = np.unique(input_sids, return_inverse=True) dates = pricing_reader.sessions.values close, = pricing_reader.load_raw_arrays( ['close'], pd.Timestamp(dates[0], tz='UTC'), pd.Timestamp(dates[-1], tz='UTC'), unique_sids, ) date_ix = np.searchsorted(dates, dividends.ex_date.values) mask = date_ix > 0 date_ix = date_ix[mask] sids_ix = sids_ix[mask] input_dates = dividends.ex_date.values[mask] # subtract one day to get the close on the day prior to the merger previous_close = close[date_ix - 1, sids_ix] input_sids = input_sids[mask] amount = dividends.amount.values[mask] ratio = 1.0 - amount / previous_close non_nan_ratio_mask = ~np.isnan(ratio) for ix in np.flatnonzero(~non_nan_ratio_mask): log.warn( "Couldn't compute ratio for dividend" " sid={sid}, ex_date={ex_date:%Y-%m-%d}, amount={amount:.3f}", sid=input_sids[ix], ex_date=pd.Timestamp(input_dates[ix]), amount=amount[ix], ) positive_ratio_mask = ratio > 0 for ix in np.flatnonzero(~positive_ratio_mask & non_nan_ratio_mask): log.warn( "Dividend ratio <= 0 for dividend" " sid={sid}, ex_date={ex_date:%Y-%m-%d}, amount={amount:.3f}", sid=input_sids[ix], ex_date=pd.Timestamp(input_dates[ix]), amount=amount[ix], ) valid_ratio_mask = non_nan_ratio_mask & positive_ratio_mask return pd.DataFrame({ 'sid': input_sids[valid_ratio_mask], 'effective_date': input_dates[valid_ratio_mask], 'ratio': ratio[valid_ratio_mask], })
<SYSTEM_TASK:> Write both dividend payouts and the derived price adjustment ratios. <END_TASK> <USER_TASK:> Description: def write_dividend_data(self, dividends, stock_dividends=None): """ Write both dividend payouts and the derived price adjustment ratios. """
# First write the dividend payouts. self._write_dividends(dividends) self._write_stock_dividends(stock_dividends) # Second from the dividend payouts, calculate ratios. dividend_ratios = self.calc_dividend_ratios(dividends) self.write_frame('dividends', dividend_ratios)
<SYSTEM_TASK:> Writes data to a SQLite file to be read by SQLiteAdjustmentReader. <END_TASK> <USER_TASK:> Description: def write(self, splits=None, mergers=None, dividends=None, stock_dividends=None): """ Writes data to a SQLite file to be read by SQLiteAdjustmentReader. Parameters ---------- splits : pandas.DataFrame, optional Dataframe containing split data. The format of this dataframe is: effective_date : int The date, represented as seconds since Unix epoch, on which the adjustment should be applied. ratio : float A value to apply to all data earlier than the effective date. For open, high, low, and close those values are multiplied by the ratio. Volume is divided by this value. sid : int The asset id associated with this adjustment. mergers : pandas.DataFrame, optional DataFrame containing merger data. The format of this dataframe is: effective_date : int The date, represented as seconds since Unix epoch, on which the adjustment should be applied. ratio : float A value to apply to all data earlier than the effective date. For open, high, low, and close those values are multiplied by the ratio. Volume is unaffected. sid : int The asset id associated with this adjustment. dividends : pandas.DataFrame, optional DataFrame containing dividend data. The format of the dataframe is: sid : int The asset id associated with this adjustment. ex_date : datetime64 The date on which an equity must be held to be eligible to receive payment. declared_date : datetime64 The date on which the dividend is announced to the public. pay_date : datetime64 The date on which the dividend is distributed. record_date : datetime64 The date on which the stock ownership is checked to determine distribution of dividends. amount : float The cash amount paid for each share. Dividend ratios are calculated as: ``1.0 - (dividend_value / "close on day prior to ex_date")`` stock_dividends : pandas.DataFrame, optional DataFrame containing stock dividend data. The format of the dataframe is: sid : int The asset id associated with this adjustment. ex_date : datetime64 The date on which an equity must be held to be eligible to receive payment. declared_date : datetime64 The date on which the dividend is announced to the public. pay_date : datetime64 The date on which the dividend is distributed. record_date : datetime64 The date on which the stock ownership is checked to determine distribution of dividends. payment_sid : int The asset id of the shares that should be paid instead of cash. ratio : float The ratio of currently held shares in the held sid that should be paid with new shares of the payment_sid. See Also -------- zipline.data.adjustments.SQLiteAdjustmentReader """
self.write_frame('splits', splits) self.write_frame('mergers', mergers) self.write_dividend_data(dividends, stock_dividends) # Use IF NOT EXISTS here to allow multiple writes if desired. self.conn.execute( "CREATE INDEX IF NOT EXISTS splits_sids " "ON splits(sid)" ) self.conn.execute( "CREATE INDEX IF NOT EXISTS splits_effective_date " "ON splits(effective_date)" ) self.conn.execute( "CREATE INDEX IF NOT EXISTS mergers_sids " "ON mergers(sid)" ) self.conn.execute( "CREATE INDEX IF NOT EXISTS mergers_effective_date " "ON mergers(effective_date)" ) self.conn.execute( "CREATE INDEX IF NOT EXISTS dividends_sid " "ON dividends(sid)" ) self.conn.execute( "CREATE INDEX IF NOT EXISTS dividends_effective_date " "ON dividends(effective_date)" ) self.conn.execute( "CREATE INDEX IF NOT EXISTS dividend_payouts_sid " "ON dividend_payouts(sid)" ) self.conn.execute( "CREATE INDEX IF NOT EXISTS dividends_payouts_ex_date " "ON dividend_payouts(ex_date)" ) self.conn.execute( "CREATE INDEX IF NOT EXISTS stock_dividend_payouts_sid " "ON stock_dividend_payouts(sid)" ) self.conn.execute( "CREATE INDEX IF NOT EXISTS stock_dividends_payouts_ex_date " "ON stock_dividend_payouts(ex_date)" )
<SYSTEM_TASK:> Override this method with a function that writes a value into `out`. <END_TASK> <USER_TASK:> Description: def compute(self, today, assets, out, *arrays): """ Override this method with a function that writes a value into `out`. """
raise NotImplementedError( "{name} must define a compute method".format( name=type(self).__name__ ) )
<SYSTEM_TASK:> Call the user's `compute` function on each window with a pre-built <END_TASK> <USER_TASK:> Description: def _compute(self, windows, dates, assets, mask): """ Call the user's `compute` function on each window with a pre-built output array. """
format_inputs = self._format_inputs compute = self.compute params = self.params ndim = self.ndim shape = (len(mask), 1) if ndim == 1 else mask.shape out = self._allocate_output(windows, shape) with self.ctx: for idx, date in enumerate(dates): # Never apply a mask to 1D outputs. out_mask = array([True]) if ndim == 1 else mask[idx] # Mask our inputs as usual. inputs_mask = mask[idx] masked_assets = assets[inputs_mask] out_row = out[idx][out_mask] inputs = format_inputs(windows, inputs_mask) compute(date, masked_assets, out_row, *inputs, **params) out[idx][out_mask] = out_row return out
<SYSTEM_TASK:> Ensure that min_extra_rows pushes us back to a computation date. <END_TASK> <USER_TASK:> Description: def compute_extra_rows(self, all_dates, start_date, end_date, min_extra_rows): """ Ensure that min_extra_rows pushes us back to a computation date. Parameters ---------- all_dates : pd.DatetimeIndex The trading sessions against which ``self`` will be computed. start_date : pd.Timestamp The first date for which final output is requested. end_date : pd.Timestamp The last date for which final output is requested. min_extra_rows : int The minimum number of extra rows required of ``self``, as determined by other terms that depend on ``self``. Returns ------- extra_rows : int The number of extra rows to compute. This will be the minimum number of rows required to make our computed start_date fall on a recomputation date. """
try: current_start_pos = all_dates.get_loc(start_date) - min_extra_rows if current_start_pos < 0: raise NoFurtherDataError.from_lookback_window( initial_message="Insufficient data to compute Pipeline:", first_date=all_dates[0], lookback_start=start_date, lookback_length=min_extra_rows, ) except KeyError: before, after = nearest_unequal_elements(all_dates, start_date) raise ValueError( "Pipeline start_date {start_date} is not in calendar.\n" "Latest date before start_date is {before}.\n" "Earliest date after start_date is {after}.".format( start_date=start_date, before=before, after=after, ) ) # Our possible target dates are all the dates on or before the current # starting position. # TODO: Consider bounding this below by self.window_length candidates = all_dates[:current_start_pos + 1] # Choose the latest date in the candidates that is the start of a new # period at our frequency. choices = select_sampling_indices(candidates, self._frequency) # If we have choices, the last choice is the first date if the # period containing current_start_date. Choose it. new_start_date = candidates[choices[-1]] # Add the difference between the new and old start dates to get the # number of rows for the new start_date. new_start_pos = all_dates.get_loc(new_start_date) assert new_start_pos <= current_start_pos, \ "Computed negative extra rows!" return min_extra_rows + (current_start_pos - new_start_pos)
<SYSTEM_TASK:> Compute by delegating to self._wrapped_term._compute on sample dates. <END_TASK> <USER_TASK:> Description: def _compute(self, inputs, dates, assets, mask): """ Compute by delegating to self._wrapped_term._compute on sample dates. On non-sample dates, forward-fill from previously-computed samples. """
to_sample = dates[select_sampling_indices(dates, self._frequency)] assert to_sample[0] == dates[0], \ "Misaligned sampling dates in %s." % type(self).__name__ real_compute = self._wrapped_term._compute # Inputs will contain different kinds of values depending on whether or # not we're a windowed computation. # If we're windowed, then `inputs` is a list of iterators of ndarrays. # If we're not windowed, then `inputs` is just a list of ndarrays. # There are two things we care about doing with the input: # 1. Preparing an input to be passed to our wrapped term. # 2. Skipping an input if we're going to use an already-computed row. # We perform these actions differently based on the expected kind of # input, and we encapsulate these actions with closures so that we # don't clutter the code below with lots of branching. if self.windowed: # If we're windowed, inputs are stateful AdjustedArrays. We don't # need to do any preparation before forwarding to real_compute, but # we need to call `next` on them if we want to skip an iteration. def prepare_inputs(): return inputs def skip_this_input(): for w in inputs: next(w) else: # If we're not windowed, inputs are just ndarrays. We need to # slice out a single row when forwarding to real_compute, but we # don't need to do anything to skip an input. def prepare_inputs(): # i is the loop iteration variable below. return [a[[i]] for a in inputs] def skip_this_input(): pass results = [] samples = iter(to_sample) next_sample = next(samples) for i, compute_date in enumerate(dates): if next_sample == compute_date: results.append( real_compute( prepare_inputs(), dates[i:i + 1], assets, mask[i:i + 1], ) ) try: next_sample = next(samples) except StopIteration: # No more samples to take. Set next_sample to Nat, which # compares False with any other datetime. next_sample = pd_NaT else: skip_this_input() # Copy results from previous sample period. results.append(results[-1]) # We should have exhausted our sample dates. try: next_sample = next(samples) except StopIteration: pass else: raise AssertionError("Unconsumed sample date: %s" % next_sample) # Concatenate stored results. return vstack(results)
<SYSTEM_TASK:> Decorator that applies pre-processors to the arguments of a function before <END_TASK> <USER_TASK:> Description: def preprocess(*_unused, **processors): """ Decorator that applies pre-processors to the arguments of a function before calling the function. Parameters ---------- **processors : dict Map from argument name -> processor function. A processor function takes three arguments: (func, argname, argvalue). `func` is the the function for which we're processing args. `argname` is the name of the argument we're processing. `argvalue` is the value of the argument we're processing. Examples -------- >>> def _ensure_tuple(func, argname, arg): ... if isinstance(arg, tuple): ... return argvalue ... try: ... return tuple(arg) ... except TypeError: ... raise TypeError( ... "%s() expected argument '%s' to" ... " be iterable, but got %s instead." % ( ... func.__name__, argname, arg, ... ) ... ) ... >>> @preprocess(arg=_ensure_tuple) ... def foo(arg): ... return arg ... >>> foo([1, 2, 3]) (1, 2, 3) >>> foo("a") ('a',) >>> foo(2) Traceback (most recent call last): ... TypeError: foo() expected argument 'arg' to be iterable, but got 2 instead. """
if _unused: raise TypeError("preprocess() doesn't accept positional arguments") def _decorator(f): args, varargs, varkw, defaults = argspec = getargspec(f) if defaults is None: defaults = () no_defaults = (NO_DEFAULT,) * (len(args) - len(defaults)) args_defaults = list(zip(args, no_defaults + defaults)) if varargs: args_defaults.append((varargs, NO_DEFAULT)) if varkw: args_defaults.append((varkw, NO_DEFAULT)) argset = set(args) | {varargs, varkw} - {None} # Arguments can be declared as tuples in Python 2. if not all(isinstance(arg, str) for arg in args): raise TypeError( "Can't validate functions using tuple unpacking: %s" % (argspec,) ) # Ensure that all processors map to valid names. bad_names = viewkeys(processors) - argset if bad_names: raise TypeError( "Got processors for unknown arguments: %s." % bad_names ) return _build_preprocessed_function( f, processors, args_defaults, varargs, varkw, ) return _decorator
<SYSTEM_TASK:> Wrap a function in a processor that calls `f` on the argument before <END_TASK> <USER_TASK:> Description: def call(f): """ Wrap a function in a processor that calls `f` on the argument before passing it along. Useful for creating simple arguments to the `@preprocess` decorator. Parameters ---------- f : function Function accepting a single argument and returning a replacement. Examples -------- >>> @preprocess(x=call(lambda x: x + 1)) ... def foo(x): ... return x ... >>> foo(1) 2 """
@wraps(f) def processor(func, argname, arg): return f(arg) return processor
<SYSTEM_TASK:> Build a preprocessed function with the same signature as `func`. <END_TASK> <USER_TASK:> Description: def _build_preprocessed_function(func, processors, args_defaults, varargs, varkw): """ Build a preprocessed function with the same signature as `func`. Uses `exec` internally to build a function that actually has the same signature as `func. """
format_kwargs = {'func_name': func.__name__} def mangle(name): return 'a' + uuid4().hex + name format_kwargs['mangled_func'] = mangled_funcname = mangle(func.__name__) def make_processor_assignment(arg, processor_name): template = "{arg} = {processor}({func}, '{arg}', {arg})" return template.format( arg=arg, processor=processor_name, func=mangled_funcname, ) exec_globals = {mangled_funcname: func, 'wraps': wraps} defaults_seen = 0 default_name_template = 'a' + uuid4().hex + '_%d' signature = [] call_args = [] assignments = [] star_map = { varargs: '*', varkw: '**', } def name_as_arg(arg): return star_map.get(arg, '') + arg for arg, default in args_defaults: if default is NO_DEFAULT: signature.append(name_as_arg(arg)) else: default_name = default_name_template % defaults_seen exec_globals[default_name] = default signature.append('='.join([name_as_arg(arg), default_name])) defaults_seen += 1 if arg in processors: procname = mangle('_processor_' + arg) exec_globals[procname] = processors[arg] assignments.append(make_processor_assignment(arg, procname)) call_args.append(name_as_arg(arg)) exec_str = dedent( """\ @wraps({wrapped_funcname}) def {func_name}({signature}): {assignments} return {wrapped_funcname}({call_args}) """ ).format( func_name=func.__name__, signature=', '.join(signature), assignments='\n '.join(assignments), wrapped_funcname=mangled_funcname, call_args=', '.join(call_args), ) compiled = compile( exec_str, func.__code__.co_filename, mode='exec', ) exec_locals = {} exec_(compiled, exec_globals, exec_locals) new_func = exec_locals[func.__name__] code = new_func.__code__ args = { attr: getattr(code, attr) for attr in dir(code) if attr.startswith('co_') } # Copy the firstlineno out of the underlying function so that exceptions # get raised with the correct traceback. # This also makes dynamic source inspection (like IPython `??` operator) # work as intended. try: # Try to get the pycode object from the underlying function. original_code = func.__code__ except AttributeError: try: # The underlying callable was not a function, try to grab the # `__func__.__code__` which exists on method objects. original_code = func.__func__.__code__ except AttributeError: # The underlying callable does not have a `__code__`. There is # nothing for us to correct. return new_func args['co_firstlineno'] = original_code.co_firstlineno new_func.__code__ = CodeType(*map(getitem(args), _code_argorder)) return new_func
<SYSTEM_TASK:> Get a Series of benchmark returns from IEX associated with `symbol`. <END_TASK> <USER_TASK:> Description: def get_benchmark_returns(symbol): """ Get a Series of benchmark returns from IEX associated with `symbol`. Default is `SPY`. Parameters ---------- symbol : str Benchmark symbol for which we're getting the returns. The data is provided by IEX (https://iextrading.com/), and we can get up to 5 years worth of data. """
r = requests.get( 'https://api.iextrading.com/1.0/stock/{}/chart/5y'.format(symbol) ) data = r.json() df = pd.DataFrame(data) df.index = pd.DatetimeIndex(df['date']) df = df['close'] return df.sort_index().tz_localize('UTC').pct_change(1).iloc[1:]
<SYSTEM_TASK:> Surround `content` with the first and last characters of `delimiters`. <END_TASK> <USER_TASK:> Description: def delimit(delimiters, content): """ Surround `content` with the first and last characters of `delimiters`. >>> delimit('[]', "foo") # doctest: +SKIP '[foo]' >>> delimit('""', "foo") # doctest: +SKIP '"foo"' """
if len(delimiters) != 2: raise ValueError( "`delimiters` must be of length 2. Got %r" % delimiters ) return ''.join([delimiters[0], content, delimiters[1]])
<SYSTEM_TASK:> Draw `g` as a graph to `out`, in format `format`. <END_TASK> <USER_TASK:> Description: def _render(g, out, format_, include_asset_exists=False): """ Draw `g` as a graph to `out`, in format `format`. Parameters ---------- g : zipline.pipeline.graph.TermGraph Graph to render. out : file-like object format_ : str {'png', 'svg'} Output format. include_asset_exists : bool Whether to filter out `AssetExists()` nodes. """
graph_attrs = {'rankdir': 'TB', 'splines': 'ortho'} cluster_attrs = {'style': 'filled', 'color': 'lightgoldenrod1'} in_nodes = g.loadable_terms out_nodes = list(g.outputs.values()) f = BytesIO() with graph(f, "G", **graph_attrs): # Write outputs cluster. with cluster(f, 'Output', labelloc='b', **cluster_attrs): for term in filter_nodes(include_asset_exists, out_nodes): add_term_node(f, term) # Write inputs cluster. with cluster(f, 'Input', **cluster_attrs): for term in filter_nodes(include_asset_exists, in_nodes): add_term_node(f, term) # Write intermediate results. for term in filter_nodes(include_asset_exists, topological_sort(g.graph)): if term in in_nodes or term in out_nodes: continue add_term_node(f, term) # Write edges for source, dest in g.graph.edges(): if source is AssetExists() and not include_asset_exists: continue add_edge(f, id(source), id(dest)) cmd = ['dot', '-T', format_] try: proc = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE) except OSError as e: if e.errno == errno.ENOENT: raise RuntimeError( "Couldn't find `dot` graph layout program. " "Make sure Graphviz is installed and `dot` is on your path." ) else: raise f.seek(0) proc_stdout, proc_stderr = proc.communicate(f.read()) if proc_stderr: raise RuntimeError( "Error(s) while rendering graph: %s" % proc_stderr.decode('utf-8') ) out.write(proc_stdout)
<SYSTEM_TASK:> Display a TermGraph interactively from within IPython. <END_TASK> <USER_TASK:> Description: def display_graph(g, format='svg', include_asset_exists=False): """ Display a TermGraph interactively from within IPython. """
try: import IPython.display as display except ImportError: raise NoIPython("IPython is not installed. Can't display graph.") if format == 'svg': display_cls = display.SVG elif format in ("jpeg", "png"): display_cls = partial(display.Image, format=format, embed=True) out = BytesIO() _render(g, out, format, include_asset_exists=include_asset_exists) return display_cls(data=out.getvalue())
<SYSTEM_TASK:> Format key, value pairs from attrs into graphviz attrs format <END_TASK> <USER_TASK:> Description: def format_attrs(attrs): """ Format key, value pairs from attrs into graphviz attrs format Examples -------- >>> format_attrs({'key1': 'value1', 'key2': 'value2'}) # doctest: +SKIP '[key1=value1, key2=value2]' """
if not attrs: return '' entries = ['='.join((key, value)) for key, value in iteritems(attrs)] return '[' + ', '.join(entries) + ']'
<SYSTEM_TASK:> Apply a function but emulate the API of an asynchronous call. <END_TASK> <USER_TASK:> Description: def apply_async(f, args=(), kwargs=None, callback=None): """Apply a function but emulate the API of an asynchronous call. Parameters ---------- f : callable The function to call. args : tuple, optional The positional arguments. kwargs : dict, optional The keyword arguments. Returns ------- future : ApplyAsyncResult The result of calling the function boxed in a future-like api. Notes ----- This calls the function eagerly but wraps it so that ``SequentialPool`` can be used where a :class:`multiprocessing.Pool` or :class:`gevent.pool.Pool` would be used. """
try: value = (identity if callback is None else callback)( f(*args, **kwargs or {}), ) successful = True except Exception as e: value = e successful = False return ApplyAsyncResult(value, successful)
<SYSTEM_TASK:> Optionally show a progress bar for the given iterator. <END_TASK> <USER_TASK:> Description: def maybe_show_progress(it, show_progress, **kwargs): """Optionally show a progress bar for the given iterator. Parameters ---------- it : iterable The underlying iterator. show_progress : bool Should progress be shown. **kwargs Forwarded to the click progress bar. Returns ------- itercontext : context manager A context manager whose enter is the actual iterator to use. Examples -------- .. code-block:: python with maybe_show_progress([1, 2, 3], True) as ns: for n in ns: ... """
if show_progress: return click.progressbar(it, **kwargs) # context manager that just return `it` when we enter it return CallbackManager(lambda it=it: it)
<SYSTEM_TASK:> Mark that an option should only be exposed in IPython. <END_TASK> <USER_TASK:> Description: def ipython_only(option): """Mark that an option should only be exposed in IPython. Parameters ---------- option : decorator A click.option decorator. Returns ------- ipython_only_dec : decorator A decorator that correctly applies the argument even when not using IPython mode. """
if __IPYTHON__: return option argname = extract_option_object(option).name def d(f): @wraps(f) def _(*args, **kwargs): kwargs[argname] = None return f(*args, **kwargs) return _ return d
<SYSTEM_TASK:> Ingest the data for the given bundle. <END_TASK> <USER_TASK:> Description: def ingest(bundle, assets_version, show_progress): """Ingest the data for the given bundle. """
bundles_module.ingest( bundle, os.environ, pd.Timestamp.utcnow(), assets_version, show_progress, )
<SYSTEM_TASK:> Clean up data downloaded with the ingest command. <END_TASK> <USER_TASK:> Description: def clean(bundle, before, after, keep_last): """Clean up data downloaded with the ingest command. """
bundles_module.clean( bundle, before, after, keep_last, )
<SYSTEM_TASK:> List all of the available data bundles. <END_TASK> <USER_TASK:> Description: def bundles(): """List all of the available data bundles. """
for bundle in sorted(bundles_module.bundles.keys()): if bundle.startswith('.'): # hide the test data continue try: ingestions = list( map(text_type, bundles_module.ingestions_for_bundle(bundle)) ) except OSError as e: if e.errno != errno.ENOENT: raise ingestions = [] # If we got no ingestions, either because the directory didn't exist or # because there were no entries, print a single message indicating that # no ingestions have yet been made. for timestamp in ingestions or ["<no ingestions>"]: click.echo("%s %s" % (bundle, timestamp))
<SYSTEM_TASK:> Factory function for making binary operator methods on a Filter subclass. <END_TASK> <USER_TASK:> Description: def binary_operator(op): """ Factory function for making binary operator methods on a Filter subclass. Returns a function "binary_operator" suitable for implementing functions like __and__ or __or__. """
# When combining a Filter with a NumericalExpression, we use this # attrgetter instance to defer to the commuted interpretation of the # NumericalExpression operator. commuted_method_getter = attrgetter(method_name_for_op(op, commute=True)) def binary_operator(self, other): if isinstance(self, NumericalExpression): self_expr, other_expr, new_inputs = self.build_binary_op( op, other, ) return NumExprFilter.create( "({left}) {op} ({right})".format( left=self_expr, op=op, right=other_expr, ), new_inputs, ) elif isinstance(other, NumericalExpression): # NumericalExpression overrides numerical ops to correctly handle # merging of inputs. Look up and call the appropriate # right-binding operator with ourself as the input. return commuted_method_getter(other)(self) elif isinstance(other, Term): if other.dtype != bool_dtype: raise BadBinaryOperator(op, self, other) if self is other: return NumExprFilter.create( "x_0 {op} x_0".format(op=op), (self,), ) return NumExprFilter.create( "x_0 {op} x_1".format(op=op), (self, other), ) elif isinstance(other, int): # Note that this is true for bool as well return NumExprFilter.create( "x_0 {op} {constant}".format(op=op, constant=int(other)), binds=(self,), ) raise BadBinaryOperator(op, self, other) binary_operator.__doc__ = "Binary Operator: '%s'" % op return binary_operator
<SYSTEM_TASK:> Factory function for making unary operator methods for Filters. <END_TASK> <USER_TASK:> Description: def unary_operator(op): """ Factory function for making unary operator methods for Filters. """
valid_ops = {'~'} if op not in valid_ops: raise ValueError("Invalid unary operator %s." % op) def unary_operator(self): # This can't be hoisted up a scope because the types returned by # unary_op_return_type aren't defined when the top-level function is # invoked. if isinstance(self, NumericalExpression): return NumExprFilter.create( "{op}({expr})".format(op=op, expr=self._expr), self.inputs, ) else: return NumExprFilter.create("{op}x_0".format(op=op), (self,)) unary_operator.__doc__ = "Unary Operator: '%s'" % op return unary_operator
<SYSTEM_TASK:> Helper for creating new NumExprFactors. <END_TASK> <USER_TASK:> Description: def create(cls, expr, binds): """ Helper for creating new NumExprFactors. This is just a wrapper around NumericalExpression.__new__ that always forwards `bool` as the dtype, since Filters can only be of boolean dtype. """
return cls(expr=expr, binds=binds, dtype=bool_dtype)
<SYSTEM_TASK:> Compute our result with numexpr, then re-apply `mask`. <END_TASK> <USER_TASK:> Description: def _compute(self, arrays, dates, assets, mask): """ Compute our result with numexpr, then re-apply `mask`. """
return super(NumExprFilter, self)._compute( arrays, dates, assets, mask, ) & mask
<SYSTEM_TASK:> Ensure that our percentile bounds are well-formed. <END_TASK> <USER_TASK:> Description: def _validate(self): """ Ensure that our percentile bounds are well-formed. """
if not 0.0 <= self._min_percentile < self._max_percentile <= 100.0: raise BadPercentileBounds( min_percentile=self._min_percentile, max_percentile=self._max_percentile, upper_bound=100.0 ) return super(PercentileFilter, self)._validate()
<SYSTEM_TASK:> For each row in the input, compute a mask of all values falling between <END_TASK> <USER_TASK:> Description: def _compute(self, arrays, dates, assets, mask): """ For each row in the input, compute a mask of all values falling between the given percentiles. """
# TODO: Review whether there's a better way of handling small numbers # of columns. data = arrays[0].copy().astype(float64) data[~mask] = nan # FIXME: np.nanpercentile **should** support computing multiple bounds # at once, but there's a bug in the logic for multiple bounds in numpy # 1.9.2. It will be fixed in 1.10. # c.f. https://github.com/numpy/numpy/pull/5981 lower_bounds = nanpercentile( data, self._min_percentile, axis=1, keepdims=True, ) upper_bounds = nanpercentile( data, self._max_percentile, axis=1, keepdims=True, ) return (lower_bounds <= data) & (data <= upper_bounds)
<SYSTEM_TASK:> Parse a treasury CSV column into a more human-readable format. <END_TASK> <USER_TASK:> Description: def parse_treasury_csv_column(column): """ Parse a treasury CSV column into a more human-readable format. Columns start with 'RIFLGFC', followed by Y or M (year or month), followed by a two-digit number signifying number of years/months, followed by _N.B. We only care about the middle two entries, which we turn into a string like 3month or 30year. """
column_re = re.compile( r"^(?P<prefix>RIFLGFC)" "(?P<unit>[YM])" "(?P<periods>[0-9]{2})" "(?P<suffix>_N.B)$" ) match = column_re.match(column) if match is None: raise ValueError("Couldn't parse CSV column %r." % column) unit, periods = get_unit_and_periods(match.groupdict()) # Roundtrip through int to coerce '06' into '6'. return str(int(periods)) + ('year' if unit == 'Y' else 'month')
<SYSTEM_TASK:> Download daily 10 year treasury rates from the Federal Reserve and <END_TASK> <USER_TASK:> Description: def get_daily_10yr_treasury_data(): """Download daily 10 year treasury rates from the Federal Reserve and return a pandas.Series."""
url = "https://www.federalreserve.gov/datadownload/Output.aspx?rel=H15" \ "&series=bcb44e57fb57efbe90002369321bfb3f&lastObs=&from=&to=" \ "&filetype=csv&label=include&layout=seriescolumn" return pd.read_csv(url, header=5, index_col=0, names=['DATE', 'BC_10YEAR'], parse_dates=True, converters={1: dataconverter}, squeeze=True)
<SYSTEM_TASK:> Format subdir path to limit the number directories in any given <END_TASK> <USER_TASK:> Description: def _sid_subdir_path(sid): """ Format subdir path to limit the number directories in any given subdirectory to 100. The number in each directory is designed to support at least 100000 equities. Parameters ---------- sid : int Asset identifier. Returns ------- out : string A path for the bcolz rootdir, including subdirectory prefixes based on the padded string representation of the given sid. e.g. 1 is formatted as 00/00/000001.bcolz """
padded_sid = format(sid, '06') return os.path.join( # subdir 1 00/XX padded_sid[0:2], # subdir 2 XX/00 padded_sid[2:4], "{0}.bcolz".format(str(padded_sid)) )
<SYSTEM_TASK:> Adapt OHLCV columns into uint32 columns. <END_TASK> <USER_TASK:> Description: def convert_cols(cols, scale_factor, sid, invalid_data_behavior): """Adapt OHLCV columns into uint32 columns. Parameters ---------- cols : dict A dict mapping each column name (open, high, low, close, volume) to a float column to convert to uint32. scale_factor : int Factor to use to scale float values before converting to uint32. sid : int Sid of the relevant asset, for logging. invalid_data_behavior : str Specifies behavior when data cannot be converted to uint32. If 'raise', raises an exception. If 'warn', logs a warning and filters out incompatible values. If 'ignore', silently filters out incompatible values. """
scaled_opens = (np.nan_to_num(cols['open']) * scale_factor).round() scaled_highs = (np.nan_to_num(cols['high']) * scale_factor).round() scaled_lows = (np.nan_to_num(cols['low']) * scale_factor).round() scaled_closes = (np.nan_to_num(cols['close']) * scale_factor).round() exclude_mask = np.zeros_like(scaled_opens, dtype=bool) for col_name, scaled_col in [ ('open', scaled_opens), ('high', scaled_highs), ('low', scaled_lows), ('close', scaled_closes), ]: max_val = scaled_col.max() try: check_uint32_safe(max_val, col_name) except ValueError: if invalid_data_behavior == 'raise': raise if invalid_data_behavior == 'warn': logger.warn( 'Values for sid={}, col={} contain some too large for ' 'uint32 (max={}), filtering them out', sid, col_name, max_val, ) # We want to exclude all rows that have an unsafe value in # this column. exclude_mask &= (scaled_col >= np.iinfo(np.uint32).max) # Convert all cols to uint32. opens = scaled_opens.astype(np.uint32) highs = scaled_highs.astype(np.uint32) lows = scaled_lows.astype(np.uint32) closes = scaled_closes.astype(np.uint32) volumes = cols['volume'].astype(np.uint32) # Exclude rows with unsafe values by setting to zero. opens[exclude_mask] = 0 highs[exclude_mask] = 0 lows[exclude_mask] = 0 closes[exclude_mask] = 0 volumes[exclude_mask] = 0 return opens, highs, lows, closes, volumes
<SYSTEM_TASK:> Write the metadata to a JSON file in the rootdir. <END_TASK> <USER_TASK:> Description: def write(self, rootdir): """ Write the metadata to a JSON file in the rootdir. Values contained in the metadata are: version : int The value of FORMAT_VERSION of this class. ohlc_ratio : int The default ratio by which to multiply the pricing data to convert the floats from floats to an integer to fit within the np.uint32. If ohlc_ratios_per_sid is None or does not contain a mapping for a given sid, this ratio is used. ohlc_ratios_per_sid : dict A dict mapping each sid in the output to the factor by which the pricing data is multiplied so that the float data can be stored as an integer. minutes_per_day : int The number of minutes per each period. calendar_name : str The name of the TradingCalendar on which the minute bars are based. start_session : datetime 'YYYY-MM-DD' formatted representation of the first trading session in the data set. end_session : datetime 'YYYY-MM-DD' formatted representation of the last trading session in the data set. Deprecated, but included for backwards compatibility: first_trading_day : string 'YYYY-MM-DD' formatted representation of the first trading day available in the dataset. market_opens : list List of int64 values representing UTC market opens as minutes since epoch. market_closes : list List of int64 values representing UTC market closes as minutes since epoch. """
calendar = self.calendar slicer = calendar.schedule.index.slice_indexer( self.start_session, self.end_session, ) schedule = calendar.schedule[slicer] market_opens = schedule.market_open market_closes = schedule.market_close metadata = { 'version': self.version, 'ohlc_ratio': self.default_ohlc_ratio, 'ohlc_ratios_per_sid': self.ohlc_ratios_per_sid, 'minutes_per_day': self.minutes_per_day, 'calendar_name': self.calendar.name, 'start_session': str(self.start_session.date()), 'end_session': str(self.end_session.date()), # Write these values for backwards compatibility 'first_trading_day': str(self.start_session.date()), 'market_opens': ( market_opens.values.astype('datetime64[m]'). astype(np.int64).tolist()), 'market_closes': ( market_closes.values.astype('datetime64[m]'). astype(np.int64).tolist()), } with open(self.metadata_path(rootdir), 'w+') as fp: json.dump(metadata, fp)
<SYSTEM_TASK:> Open an existing ``rootdir`` for writing. <END_TASK> <USER_TASK:> Description: def open(cls, rootdir, end_session=None): """ Open an existing ``rootdir`` for writing. Parameters ---------- end_session : Timestamp (optional) When appending, the intended new ``end_session``. """
metadata = BcolzMinuteBarMetadata.read(rootdir) return BcolzMinuteBarWriter( rootdir, metadata.calendar, metadata.start_session, end_session if end_session is not None else metadata.end_session, metadata.minutes_per_day, metadata.default_ohlc_ratio, metadata.ohlc_ratios_per_sid, write_metadata=end_session is not None )
<SYSTEM_TASK:> Create empty ctable for given path. <END_TASK> <USER_TASK:> Description: def _init_ctable(self, path): """ Create empty ctable for given path. Parameters ---------- path : string The path to rootdir of the new ctable. """
# Only create the containing subdir on creation. # This is not to be confused with the `.bcolz` directory, but is the # directory up one level from the `.bcolz` directories. sid_containing_dirname = os.path.dirname(path) if not os.path.exists(sid_containing_dirname): # Other sids may have already created the containing directory. os.makedirs(sid_containing_dirname) initial_array = np.empty(0, np.uint32) table = ctable( rootdir=path, columns=[ initial_array, initial_array, initial_array, initial_array, initial_array, ], names=[ 'open', 'high', 'low', 'close', 'volume' ], expectedlen=self._expectedlen, mode='w', ) table.flush() return table
<SYSTEM_TASK:> Ensure that a ctable exists for ``sid``, then return it. <END_TASK> <USER_TASK:> Description: def _ensure_ctable(self, sid): """Ensure that a ctable exists for ``sid``, then return it."""
sidpath = self.sidpath(sid) if not os.path.exists(sidpath): return self._init_ctable(sidpath) return bcolz.ctable(rootdir=sidpath, mode='a')
<SYSTEM_TASK:> Fill sid container with empty data through the specified date. <END_TASK> <USER_TASK:> Description: def pad(self, sid, date): """ Fill sid container with empty data through the specified date. If the last recorded trade is not at the close, then that day will be padded with zeros until its close. Any day after that (up to and including the specified date) will be padded with `minute_per_day` worth of zeros Parameters ---------- sid : int The asset identifier for the data being written. date : datetime-like The date used to calculate how many slots to be pad. The padding is done through the date, i.e. after the padding is done the `last_date_in_output_for_sid` will be equal to `date` """
table = self._ensure_ctable(sid) last_date = self.last_date_in_output_for_sid(sid) tds = self._session_labels if date <= last_date or date < tds[0]: # No need to pad. return if last_date == pd.NaT: # If there is no data, determine how many days to add so that # desired days are written to the correct slots. days_to_zerofill = tds[tds.slice_indexer(end=date)] else: days_to_zerofill = tds[tds.slice_indexer( start=last_date + tds.freq, end=date)] self._zerofill(table, len(days_to_zerofill)) new_last_date = self.last_date_in_output_for_sid(sid) assert new_last_date == date, "new_last_date={0} != date={1}".format( new_last_date, date)
<SYSTEM_TASK:> Write all the supplied kwargs as attributes of the sid's file. <END_TASK> <USER_TASK:> Description: def set_sid_attrs(self, sid, **kwargs): """Write all the supplied kwargs as attributes of the sid's file. """
table = self._ensure_ctable(sid) for k, v in kwargs.items(): table.attrs[k] = v
<SYSTEM_TASK:> Write a stream of minute data. <END_TASK> <USER_TASK:> Description: def write(self, data, show_progress=False, invalid_data_behavior='warn'): """Write a stream of minute data. Parameters ---------- data : iterable[(int, pd.DataFrame)] The data to write. Each element should be a tuple of sid, data where data has the following format: columns : ('open', 'high', 'low', 'close', 'volume') open : float64 high : float64 low : float64 close : float64 volume : float64|int64 index : DatetimeIndex of market minutes. A given sid may appear more than once in ``data``; however, the dates must be strictly increasing. show_progress : bool, optional Whether or not to show a progress bar while writing. """
ctx = maybe_show_progress( data, show_progress=show_progress, item_show_func=lambda e: e if e is None else str(e[0]), label="Merging minute equity files:", ) write_sid = self.write_sid with ctx as it: for e in it: write_sid(*e, invalid_data_behavior=invalid_data_behavior)
<SYSTEM_TASK:> Return the number of data points up to and including the <END_TASK> <USER_TASK:> Description: def data_len_for_day(self, day): """ Return the number of data points up to and including the provided day. """
day_ix = self._session_labels.get_loc(day) # Add one to the 0-indexed day_ix to get the number of days. num_days = day_ix + 1 return num_days * self._minutes_per_day
<SYSTEM_TASK:> Calculate the minutes which should be excluded when a window <END_TASK> <USER_TASK:> Description: def _minutes_to_exclude(self): """ Calculate the minutes which should be excluded when a window occurs on days which had an early close, i.e. days where the close based on the regular period of minutes per day and the market close do not match. Returns ------- List of DatetimeIndex representing the minutes to exclude because of early closes. """
market_opens = self._market_opens.values.astype('datetime64[m]') market_closes = self._market_closes.values.astype('datetime64[m]') minutes_per_day = (market_closes - market_opens).astype(np.int64) early_indices = np.where( minutes_per_day != self._minutes_per_day - 1)[0] early_opens = self._market_opens[early_indices] early_closes = self._market_closes[early_indices] minutes = [(market_open, early_close) for market_open, early_close in zip(early_opens, early_closes)] return minutes
<SYSTEM_TASK:> Retrieve the pricing info for the given sid, dt, and field. <END_TASK> <USER_TASK:> Description: def get_value(self, sid, dt, field): """ Retrieve the pricing info for the given sid, dt, and field. Parameters ---------- sid : int Asset identifier. dt : datetime-like The datetime at which the trade occurred. field : string The type of pricing data to retrieve. ('open', 'high', 'low', 'close', 'volume') Returns ------- out : float|int The market data for the given sid, dt, and field coordinates. For OHLC: Returns a float if a trade occurred at the given dt. If no trade occurred, a np.nan is returned. For volume: Returns the integer value of the volume. (A volume of 0 signifies no trades for the given dt.) """
if self._last_get_value_dt_value == dt.value: minute_pos = self._last_get_value_dt_position else: try: minute_pos = self._find_position_of_minute(dt) except ValueError: raise NoDataOnDate() self._last_get_value_dt_value = dt.value self._last_get_value_dt_position = minute_pos try: value = self._open_minute_file(field, sid)[minute_pos] except IndexError: value = 0 if value == 0: if field == 'volume': return 0 else: return np.nan if field != 'volume': value *= self._ohlc_ratio_inverse_for_sid(sid) return value
<SYSTEM_TASK:> Internal method that returns the position of the given minute in the <END_TASK> <USER_TASK:> Description: def _find_position_of_minute(self, minute_dt): """ Internal method that returns the position of the given minute in the list of every trading minute since market open of the first trading day. Adjusts non market minutes to the last close. ex. this method would return 1 for 2002-01-02 9:32 AM Eastern, if 2002-01-02 is the first trading day of the dataset. Parameters ---------- minute_dt: pd.Timestamp The minute whose position should be calculated. Returns ------- int: The position of the given minute in the list of all trading minutes since market open on the first trading day. """
return find_position_of_minute( self._market_open_values, self._market_close_values, minute_dt.value / NANOS_IN_MINUTE, self._minutes_per_day, False, )
<SYSTEM_TASK:> Write the frames to the target HDF5 file, using the format used by <END_TASK> <USER_TASK:> Description: def write(self, frames): """ Write the frames to the target HDF5 file, using the format used by ``pd.Panel.to_hdf`` Parameters ---------- frames : iter[(int, DataFrame)] or dict[int -> DataFrame] An iterable or other mapping of sid to the corresponding OHLCV pricing data. """
with HDFStore(self._path, 'w', complevel=self._complevel, complib=self._complib) \ as store: panel = pd.Panel.from_dict(dict(frames)) panel.to_hdf(store, 'updates') with tables.open_file(self._path, mode='r+') as h5file: h5file.set_node_attr('/', 'version', 0)
<SYSTEM_TASK:> Construct an index array that, when applied to an array of values, produces <END_TASK> <USER_TASK:> Description: def next_event_indexer(all_dates, data_query_cutoff, all_sids, event_dates, event_timestamps, event_sids): """ Construct an index array that, when applied to an array of values, produces a 2D array containing the values associated with the next event for each sid at each moment in time. Locations where no next event was known will be filled with -1. Parameters ---------- all_dates : ndarray[datetime64[ns], ndim=1] Row labels for the target output. data_query_cutoff : pd.DatetimeIndex The boundaries for the given trading sessions in ``all_dates``. all_sids : ndarray[int, ndim=1] Column labels for the target output. event_dates : ndarray[datetime64[ns], ndim=1] Dates on which each input events occurred/will occur. ``event_dates`` must be in sorted order, and may not contain any NaT values. event_timestamps : ndarray[datetime64[ns], ndim=1] Dates on which we learned about each input event. event_sids : ndarray[int, ndim=1] Sids assocated with each input event. Returns ------- indexer : ndarray[int, ndim=2] An array of shape (len(all_dates), len(all_sids)) of indices into ``event_{dates,timestamps,sids}``. """
validate_event_metadata(event_dates, event_timestamps, event_sids) out = np.full((len(all_dates), len(all_sids)), -1, dtype=np.int64) sid_ixs = all_sids.searchsorted(event_sids) # side='right' here ensures that we include the event date itself # if it's in all_dates. dt_ixs = all_dates.searchsorted(event_dates, side='right') ts_ixs = data_query_cutoff.searchsorted(event_timestamps, side='right') # Walk backward through the events, writing the index of the event into # slots ranging from the event's timestamp to its asof. This depends for # correctness on the fact that event_dates is sorted in ascending order, # because we need to overwrite later events with earlier ones if their # eligible windows overlap. for i in range(len(event_sids) - 1, -1, -1): start_ix = ts_ixs[i] end_ix = dt_ixs[i] out[start_ix:end_ix, sid_ixs[i]] = i return out
<SYSTEM_TASK:> Construct an index array that, when applied to an array of values, produces <END_TASK> <USER_TASK:> Description: def previous_event_indexer(data_query_cutoff_times, all_sids, event_dates, event_timestamps, event_sids): """ Construct an index array that, when applied to an array of values, produces a 2D array containing the values associated with the previous event for each sid at each moment in time. Locations where no previous event was known will be filled with -1. Parameters ---------- data_query_cutoff : pd.DatetimeIndex The boundaries for the given trading sessions. all_dates : ndarray[datetime64[ns], ndim=1] Row labels for the target output. all_sids : ndarray[int, ndim=1] Column labels for the target output. event_dates : ndarray[datetime64[ns], ndim=1] Dates on which each input events occurred/will occur. ``event_dates`` must be in sorted order, and may not contain any NaT values. event_timestamps : ndarray[datetime64[ns], ndim=1] Dates on which we learned about each input event. event_sids : ndarray[int, ndim=1] Sids assocated with each input event. Returns ------- indexer : ndarray[int, ndim=2] An array of shape (len(all_dates), len(all_sids)) of indices into ``event_{dates,timestamps,sids}``. """
validate_event_metadata(event_dates, event_timestamps, event_sids) out = np.full( (len(data_query_cutoff_times), len(all_sids)), -1, dtype=np.int64, ) eff_dts = np.maximum(event_dates, event_timestamps) sid_ixs = all_sids.searchsorted(event_sids) dt_ixs = data_query_cutoff_times.searchsorted(eff_dts, side='right') # Walk backwards through the events, writing the index of the event into # slots ranging from max(event_date, event_timestamp) to the start of the # previously-written event. This depends for correctness on the fact that # event_dates is sorted in ascending order, because we need to have written # later events so we know where to stop forward-filling earlier events. last_written = {} for i in range(len(event_dates) - 1, -1, -1): sid_ix = sid_ixs[i] dt_ix = dt_ixs[i] out[dt_ix:last_written.get(sid_ix, None), sid_ix] = i last_written[sid_ix] = dt_ix return out
<SYSTEM_TASK:> Determine the last piece of information known on each date in the date <END_TASK> <USER_TASK:> Description: def last_in_date_group(df, data_query_cutoff_times, assets, reindex=True, have_sids=True, extra_groupers=None): """ Determine the last piece of information known on each date in the date index for each group. Input df MUST be sorted such that the correct last item is chosen from each group. Parameters ---------- df : pd.DataFrame The DataFrame containing the data to be grouped. Must be sorted so that the correct last item is chosen from each group. data_query_cutoff_times : pd.DatetimeIndex The dates to use for grouping and reindexing. assets : pd.Int64Index The assets that should be included in the column multiindex. reindex : bool Whether or not the DataFrame should be reindexed against the date index. This will add back any dates to the index that were grouped away. have_sids : bool Whether or not the DataFrame has sids. If it does, they will be used in the groupby. extra_groupers : list of str Any extra field names that should be included in the groupby. Returns ------- last_in_group : pd.DataFrame A DataFrame with dates as the index and fields used in the groupby as levels of a multiindex of columns. """
idx = [data_query_cutoff_times[data_query_cutoff_times.searchsorted( df[TS_FIELD_NAME].values, )]] if have_sids: idx += [SID_FIELD_NAME] if extra_groupers is None: extra_groupers = [] idx += extra_groupers last_in_group = df.drop(TS_FIELD_NAME, axis=1).groupby( idx, sort=False, ).last() # For the number of things that we're grouping by (except TS), unstack # the df. Done this way because of an unresolved pandas bug whereby # passing a list of levels with mixed dtypes to unstack causes the # resulting DataFrame to have all object-type columns. for _ in range(len(idx) - 1): last_in_group = last_in_group.unstack(-1) if reindex: if have_sids: cols = last_in_group.columns last_in_group = last_in_group.reindex( index=data_query_cutoff_times, columns=pd.MultiIndex.from_product( tuple(cols.levels[0:len(extra_groupers) + 1]) + (assets,), names=cols.names, ), ) else: last_in_group = last_in_group.reindex(data_query_cutoff_times) return last_in_group
<SYSTEM_TASK:> Forward fill values in a DataFrame with special logic to handle cases <END_TASK> <USER_TASK:> Description: def ffill_across_cols(df, columns, name_map): """ Forward fill values in a DataFrame with special logic to handle cases that pd.DataFrame.ffill cannot and cast columns to appropriate types. Parameters ---------- df : pd.DataFrame The DataFrame to do forward-filling on. columns : list of BoundColumn The BoundColumns that correspond to columns in the DataFrame to which special filling and/or casting logic should be applied. name_map: map of string -> string Mapping from the name of each BoundColumn to the associated column name in `df`. """
df.ffill(inplace=True) # Fill in missing values specified by each column. This is made # significantly more complex by the fact that we need to work around # two pandas issues: # 1) When we have sids, if there are no records for a given sid for any # dates, pandas will generate a column full of NaNs for that sid. # This means that some of the columns in `dense_output` are now # float instead of the intended dtype, so we have to coerce back to # our expected type and convert NaNs into the desired missing value. # 2) DataFrame.ffill assumes that receiving None as a fill-value means # that no value was passed. Consequently, there's no way to tell # pandas to replace NaNs in an object column with None using fillna, # so we have to roll our own instead using df.where. for column in columns: column_name = name_map[column.name] # Special logic for strings since `fillna` doesn't work if the # missing value is `None`. if column.dtype == categorical_dtype: df[column_name] = df[ column.name ].where(pd.notnull(df[column_name]), column.missing_value) else: # We need to execute `fillna` before `astype` in case the # column contains NaNs and needs to be cast to bool or int. # This is so that the NaNs are replaced first, since pandas # can't convert NaNs for those types. df[column_name] = df[ column_name ].fillna(column.missing_value).astype(column.dtype)
<SYSTEM_TASK:> Shift dates of a pipeline query back by `shift` days. <END_TASK> <USER_TASK:> Description: def shift_dates(dates, start_date, end_date, shift): """ Shift dates of a pipeline query back by `shift` days. load_adjusted_array is called with dates on which the user's algo will be shown data, which means we need to return the data that would be known at the start of each date. This is often labeled with a previous date in the underlying data (e.g. at the start of today, we have the data as of yesterday). In this case, we can shift the query dates back to query the appropriate values. Parameters ---------- dates : DatetimeIndex All known dates. start_date : pd.Timestamp Start date of the pipeline query. end_date : pd.Timestamp End date of the pipeline query. shift : int The number of days to shift back the query dates. """
try: start = dates.get_loc(start_date) except KeyError: if start_date < dates[0]: raise NoFurtherDataError( msg=( "Pipeline Query requested data starting on {query_start}, " "but first known date is {calendar_start}" ).format( query_start=str(start_date), calendar_start=str(dates[0]), ) ) else: raise ValueError("Query start %s not in calendar" % start_date) # Make sure that shifting doesn't push us out of the calendar. if start < shift: raise NoFurtherDataError( msg=( "Pipeline Query requested data from {shift}" " days before {query_start}, but first known date is only " "{start} days earlier." ).format(shift=shift, query_start=start_date, start=start), ) try: end = dates.get_loc(end_date) except KeyError: if end_date > dates[-1]: raise NoFurtherDataError( msg=( "Pipeline Query requesting data up to {query_end}, " "but last known date is {calendar_end}" ).format( query_end=end_date, calendar_end=dates[-1], ) ) else: raise ValueError("Query end %s not in calendar" % end_date) return dates[start - shift], dates[end - shift]
<SYSTEM_TASK:> Template ``formatters`` into ``docstring``. <END_TASK> <USER_TASK:> Description: def format_docstring(owner_name, docstring, formatters): """ Template ``formatters`` into ``docstring``. Parameters ---------- owner_name : str The name of the function or class whose docstring is being templated. Only used for error messages. docstring : str The docstring to template. formatters : dict[str -> str] Parameters for a a str.format() call on ``docstring``. Multi-line values in ``formatters`` will have leading whitespace padded to match the leading whitespace of the substitution string. """
# Build a dict of parameters to a vanilla format() call by searching for # each entry in **formatters and applying any leading whitespace to each # line in the desired substitution. format_params = {} for target, doc_for_target in iteritems(formatters): # Search for '{name}', with optional leading whitespace. regex = re.compile(r'^(\s*)' + '({' + target + '})$', re.MULTILINE) matches = regex.findall(docstring) if not matches: raise ValueError( "Couldn't find template for parameter {!r} in docstring " "for {}." "\nParameter name must be alone on a line surrounded by " "braces.".format(target, owner_name), ) elif len(matches) > 1: raise ValueError( "Couldn't found multiple templates for parameter {!r}" "in docstring for {}." "\nParameter should only appear once.".format( target, owner_name ) ) (leading_whitespace, _) = matches[0] format_params[target] = pad_lines_after_first( leading_whitespace, doc_for_target, ) return docstring.format(**format_params)
<SYSTEM_TASK:> Add a column. <END_TASK> <USER_TASK:> Description: def add(self, term, name, overwrite=False): """ Add a column. The results of computing `term` will show up as a column in the DataFrame produced by running this pipeline. Parameters ---------- column : zipline.pipeline.Term A Filter, Factor, or Classifier to add to the pipeline. name : str Name of the column to add. overwrite : bool Whether to overwrite the existing entry if we already have a column named `name`. """
self.validate_column(name, term) columns = self.columns if name in columns: if overwrite: self.remove(name) else: raise KeyError("Column '{}' already exists.".format(name)) if not isinstance(term, ComputableTerm): raise TypeError( "{term} is not a valid pipeline column. Did you mean to " "append '.latest'?".format(term=term) ) self._columns[name] = term
<SYSTEM_TASK:> Set a screen on this Pipeline. <END_TASK> <USER_TASK:> Description: def set_screen(self, screen, overwrite=False): """ Set a screen on this Pipeline. Parameters ---------- filter : zipline.pipeline.Filter The filter to apply as a screen. overwrite : bool Whether to overwrite any existing screen. If overwrite is False and self.screen is not None, we raise an error. """
if self._screen is not None and not overwrite: raise ValueError( "set_screen() called with overwrite=False and screen already " "set.\n" "If you want to apply multiple filters as a screen use " "set_screen(filter1 & filter2 & ...).\n" "If you want to replace the previous screen with a new one, " "use set_screen(new_filter, overwrite=True)." ) self._screen = screen
<SYSTEM_TASK:> Compile into an ExecutionPlan. <END_TASK> <USER_TASK:> Description: def to_execution_plan(self, domain, default_screen, start_date, end_date): """ Compile into an ExecutionPlan. Parameters ---------- domain : zipline.pipeline.domain.Domain Domain on which the pipeline will be executed. default_screen : zipline.pipeline.term.Term Term to use as a screen if self.screen is None. all_dates : pd.DatetimeIndex A calendar of dates to use to calculate starts and ends for each term. start_date : pd.Timestamp The first date of requested output. end_date : pd.Timestamp The last date of requested output. Returns ------- graph : zipline.pipeline.graph.ExecutionPlan Graph encoding term dependencies, including metadata about extra row requirements. """
if self._domain is not GENERIC and self._domain is not domain: raise AssertionError( "Attempted to compile Pipeline with domain {} to execution " "plan with different domain {}.".format(self._domain, domain) ) return ExecutionPlan( domain=domain, terms=self._prepare_graph_terms(default_screen), start_date=start_date, end_date=end_date, )
<SYSTEM_TASK:> Helper for to_graph and to_execution_plan. <END_TASK> <USER_TASK:> Description: def _prepare_graph_terms(self, default_screen): """Helper for to_graph and to_execution_plan."""
columns = self.columns.copy() screen = self.screen if screen is None: screen = default_screen columns[SCREEN_NAME] = screen return columns
<SYSTEM_TASK:> Render this Pipeline as a DAG. <END_TASK> <USER_TASK:> Description: def show_graph(self, format='svg'): """ Render this Pipeline as a DAG. Parameters ---------- format : {'svg', 'png', 'jpeg'} Image format to render with. Default is 'svg'. """
g = self.to_simple_graph(AssetExists()) if format == 'svg': return g.svg elif format == 'png': return g.png elif format == 'jpeg': return g.jpeg else: # We should never get here because of the expect_element decorator # above. raise AssertionError("Unknown graph format %r." % format)
<SYSTEM_TASK:> A list of terms that are outputs of this pipeline. <END_TASK> <USER_TASK:> Description: def _output_terms(self): """ A list of terms that are outputs of this pipeline. Includes all terms registered as data outputs of the pipeline, plus the screen, if present. """
terms = list(six.itervalues(self._columns)) screen = self.screen if screen is not None: terms.append(screen) return terms
<SYSTEM_TASK:> Get the domain for this pipeline. <END_TASK> <USER_TASK:> Description: def domain(self, default): """ Get the domain for this pipeline. - If an explicit domain was provided at construction time, use it. - Otherwise, infer a domain from the registered columns. - If no domain can be inferred, return ``default``. Parameters ---------- default : zipline.pipeline.Domain Domain to use if no domain can be inferred from this pipeline by itself. Returns ------- domain : zipline.pipeline.Domain The domain for the pipeline. Raises ------ AmbiguousDomain ValueError If the terms in ``self`` conflict with self._domain. """
# Always compute our inferred domain to ensure that it's compatible # with our explicit domain. inferred = infer_domain(self._output_terms) if inferred is GENERIC and self._domain is GENERIC: # Both generic. Fall back to default. return default elif inferred is GENERIC and self._domain is not GENERIC: # Use the non-generic domain. return self._domain elif inferred is not GENERIC and self._domain is GENERIC: # Use the non-generic domain. return inferred else: # Both non-generic. They have to match. if inferred is not self._domain: raise ValueError( "Conflicting domains in Pipeline. Inferred {}, but {} was " "passed at construction.".format(inferred, self._domain) ) return inferred
<SYSTEM_TASK:> Create a tuple containing all elements of tup, plus elem. <END_TASK> <USER_TASK:> Description: def _ensure_element(tup, elem): """ Create a tuple containing all elements of tup, plus elem. Returns the new tuple and the index of elem in the new tuple. """
try: return tup, tup.index(elem) except ValueError: return tuple(chain(tup, (elem,))), len(tup)
<SYSTEM_TASK:> Compute our stored expression string with numexpr. <END_TASK> <USER_TASK:> Description: def _compute(self, arrays, dates, assets, mask): """ Compute our stored expression string with numexpr. """
out = full(mask.shape, self.missing_value, dtype=self.dtype) # This writes directly into our output buffer. numexpr.evaluate( self._expr, local_dict={ "x_%d" % idx: array for idx, array in enumerate(arrays) }, global_dict={'inf': inf}, out=out, ) return out
<SYSTEM_TASK:> Return self._expr with all variables rebound to the indices implied by <END_TASK> <USER_TASK:> Description: def _rebind_variables(self, new_inputs): """ Return self._expr with all variables rebound to the indices implied by new_inputs. """
expr = self._expr # If we have 11+ variables, some of our variable names may be # substrings of other variable names. For example, we might have x_1, # x_10, and x_100. By enumerating in reverse order, we ensure that # every variable name which is a substring of another variable name is # processed after the variable of which it is a substring. This # guarantees that the substitution of any given variable index only # ever affects exactly its own index. For example, if we have variables # with indices going up to 100, we will process all of the x_1xx names # before x_1x, which will be before x_1, so the substitution of x_1 # will not affect x_1x, which will not affect x_1xx. for idx, input_ in reversed(list(enumerate(self.inputs))): old_varname = "x_%d" % idx # Temporarily rebind to x_temp_N so that we don't overwrite the # same value multiple times. temp_new_varname = "x_temp_%d" % new_inputs.index(input_) expr = expr.replace(old_varname, temp_new_varname) # Clear out the temp variables now that we've finished iteration. return expr.replace("_temp_", "_")
<SYSTEM_TASK:> Merge the inputs of two NumericalExpressions into a single input tuple, <END_TASK> <USER_TASK:> Description: def _merge_expressions(self, other): """ Merge the inputs of two NumericalExpressions into a single input tuple, rewriting their respective string expressions to make input names resolve correctly. Returns a tuple of (new_self_expr, new_other_expr, new_inputs) """
new_inputs = tuple(set(self.inputs).union(other.inputs)) new_self_expr = self._rebind_variables(new_inputs) new_other_expr = other._rebind_variables(new_inputs) return new_self_expr, new_other_expr, new_inputs
<SYSTEM_TASK:> Compute new expression strings and a new inputs tuple for combining <END_TASK> <USER_TASK:> Description: def build_binary_op(self, op, other): """ Compute new expression strings and a new inputs tuple for combining self and other with a binary operator. """
if isinstance(other, NumericalExpression): self_expr, other_expr, new_inputs = self._merge_expressions(other) elif isinstance(other, Term): self_expr = self._expr new_inputs, other_idx = _ensure_element(self.inputs, other) other_expr = "x_%d" % other_idx elif isinstance(other, Number): self_expr = self._expr other_expr = str(other) new_inputs = self.inputs else: raise BadBinaryOperator(op, other) return self_expr, other_expr, new_inputs
<SYSTEM_TASK:> Short repr to use when rendering Pipeline graphs. <END_TASK> <USER_TASK:> Description: def graph_repr(self): """Short repr to use when rendering Pipeline graphs."""
# Replace any floating point numbers in the expression # with their scientific notation final = re.sub(r"[-+]?\d*\.\d+", lambda x: format(float(x.group(0)), '.2E'), self._expr) # Graphviz interprets `\l` as "divide label into lines, left-justified" return "Expression:\\l {}\\l".format( final, )
<SYSTEM_TASK:> Get the last modified time of path as a Timestamp. <END_TASK> <USER_TASK:> Description: def last_modified_time(path): """ Get the last modified time of path as a Timestamp. """
return pd.Timestamp(os.path.getmtime(path), unit='s', tz='UTC')
<SYSTEM_TASK:> Get the root directory for all zipline-managed files. <END_TASK> <USER_TASK:> Description: def zipline_root(environ=None): """ Get the root directory for all zipline-managed files. For testing purposes, this accepts a dictionary to interpret as the os environment. Parameters ---------- environ : dict, optional A dict to interpret as the os environment. Returns ------- root : string Path to the zipline root dir. """
if environ is None: environ = os.environ root = environ.get('ZIPLINE_ROOT', None) if root is None: root = expanduser('~/.zipline') return root
<SYSTEM_TASK:> Build a dict of Adjustment objects in the format expected by <END_TASK> <USER_TASK:> Description: def format_adjustments(self, dates, assets): """ Build a dict of Adjustment objects in the format expected by AdjustedArray. Returns a dict of the form: { # Integer index into `dates` for the date on which we should # apply the list of adjustments. 1 : [ Float64Multiply(first_row=2, last_row=4, col=3, value=0.5), Float64Overwrite(first_row=3, last_row=5, col=1, value=2.0), ... ], ... } """
make_adjustment = partial(make_adjustment_from_labels, dates, assets) min_date, max_date = dates[[0, -1]] # TODO: Consider porting this to Cython. if len(self.adjustments) == 0: return {} # Mask for adjustments whose apply_dates are in the requested window of # dates. date_bounds = self.adjustment_apply_dates.slice_indexer( min_date, max_date, ) dates_filter = zeros(len(self.adjustments), dtype='bool') dates_filter[date_bounds] = True # Ignore adjustments whose apply_date is in range, but whose end_date # is out of range. dates_filter &= (self.adjustment_end_dates >= min_date) # Mask for adjustments whose sids are in the requested assets. sids_filter = self.adjustment_sids.isin(assets.values) adjustments_to_use = self.adjustments.loc[ dates_filter & sids_filter ].set_index('apply_date') # For each apply_date on which we have an adjustment, compute # the integer index of that adjustment's apply_date in `dates`. # Then build a list of Adjustment objects for that apply_date. # This logic relies on the sorting applied on the previous line. out = {} previous_apply_date = object() for row in adjustments_to_use.itertuples(): # This expansion depends on the ordering of the DataFrame columns, # defined above. apply_date, sid, value, kind, start_date, end_date = row if apply_date != previous_apply_date: # Get the next apply date if no exact match. row_loc = dates.get_loc(apply_date, method='bfill') current_date_adjustments = out[row_loc] = [] previous_apply_date = apply_date # Look up the approprate Adjustment constructor based on the value # of `kind`. current_date_adjustments.append( make_adjustment(start_date, end_date, sid, kind, value) ) return out
<SYSTEM_TASK:> Load data from our stored baseline. <END_TASK> <USER_TASK:> Description: def load_adjusted_array(self, domain, columns, dates, sids, mask): """ Load data from our stored baseline. """
if len(columns) != 1: raise ValueError( "Can't load multiple columns with DataFrameLoader" ) column = columns[0] self._validate_input_column(column) date_indexer = self.dates.get_indexer(dates) assets_indexer = self.assets.get_indexer(sids) # Boolean arrays with True on matched entries good_dates = (date_indexer != -1) good_assets = (assets_indexer != -1) data = self.baseline[ix_(date_indexer, assets_indexer)] mask = (good_assets & as_column(good_dates)) & mask # Mask out requested columns/rows that didn't match. data[~mask] = column.missing_value return { column: AdjustedArray( # Pull out requested columns/rows from our baseline data. data=data, adjustments=self.format_adjustments(dates, sids), missing_value=column.missing_value, ), }
<SYSTEM_TASK:> Make sure a passed column is our column. <END_TASK> <USER_TASK:> Description: def _validate_input_column(self, column): """Make sure a passed column is our column. """
if column != self.column and column.unspecialize() != self.column: raise ValueError("Can't load unknown column %s" % column)
<SYSTEM_TASK:> To resolve the symbol in the LEVERAGED_ETF list, <END_TASK> <USER_TASK:> Description: def load_from_directory(list_name): """ To resolve the symbol in the LEVERAGED_ETF list, the date on which the symbol was in effect is needed. Furthermore, to maintain a point in time record of our own maintenance of the restricted list, we need a knowledge date. Thus, restricted lists are dictionaries of datetime->symbol lists. new symbols should be entered as a new knowledge date entry. This method assumes a directory structure of: SECURITY_LISTS_DIR/listname/knowledge_date/lookup_date/add.txt SECURITY_LISTS_DIR/listname/knowledge_date/lookup_date/delete.txt The return value is a dictionary with: knowledge_date -> lookup_date -> {add: [symbol list], 'delete': [symbol list]} """
data = {} dir_path = os.path.join(SECURITY_LISTS_DIR, list_name) for kd_name in listdir(dir_path): kd = datetime.strptime(kd_name, DATE_FORMAT).replace( tzinfo=pytz.utc) data[kd] = {} kd_path = os.path.join(dir_path, kd_name) for ld_name in listdir(kd_path): ld = datetime.strptime(ld_name, DATE_FORMAT).replace( tzinfo=pytz.utc) data[kd][ld] = {} ld_path = os.path.join(kd_path, ld_name) for fname in listdir(ld_path): fpath = os.path.join(ld_path, fname) with open(fpath) as f: symbols = f.read().splitlines() data[kd][ld][fname] = symbols return data
<SYSTEM_TASK:> Weak least-recently-used cache decorator. <END_TASK> <USER_TASK:> Description: def weak_lru_cache(maxsize=100): """Weak least-recently-used cache decorator. If *maxsize* is set to None, the LRU features are disabled and the cache can grow without bound. Arguments to the cached function must be hashable. Any that are weak- referenceable will be stored by weak reference. Once any of the args have been garbage collected, the entry will be removed from the cache. View the cache statistics named tuple (hits, misses, maxsize, currsize) with f.cache_info(). Clear the cache and statistics with f.cache_clear(). See: http://en.wikipedia.org/wiki/Cache_algorithms#Least_Recently_Used """
class desc(lazyval): def __get__(self, instance, owner): if instance is None: return self try: return self._cache[instance] except KeyError: inst = ref(instance) @_weak_lru_cache(maxsize) @wraps(self._get) def wrapper(*args, **kwargs): return self._get(inst(), *args, **kwargs) self._cache[instance] = wrapper return wrapper @_weak_lru_cache(maxsize) def __call__(self, *args, **kwargs): return self._get(*args, **kwargs) return desc
<SYSTEM_TASK:> Bind a `Column` object to its name. <END_TASK> <USER_TASK:> Description: def bind(self, name): """ Bind a `Column` object to its name. """
return _BoundColumnDescr( dtype=self.dtype, missing_value=self.missing_value, name=name, doc=self.doc, metadata=self.metadata, )
<SYSTEM_TASK:> Specialize ``self`` to a concrete domain. <END_TASK> <USER_TASK:> Description: def specialize(self, domain): """Specialize ``self`` to a concrete domain. """
if domain == self.domain: return self return type(self)( dtype=self.dtype, missing_value=self.missing_value, dataset=self._dataset.specialize(domain), name=self._name, doc=self.__doc__, metadata=self._metadata, )
<SYSTEM_TASK:> Look up a column by name. <END_TASK> <USER_TASK:> Description: def get_column(cls, name): """Look up a column by name. Parameters ---------- name : str Name of the column to look up. Returns ------- column : zipline.pipeline.data.BoundColumn Column with the given name. Raises ------ AttributeError If no column with the given name exists. """
clsdict = vars(cls) try: maybe_column = clsdict[name] if not isinstance(maybe_column, _BoundColumnDescr): raise KeyError(name) except KeyError: raise AttributeError( "{dset} has no column {colname!r}:\n\n" "Possible choices are:\n" "{choices}".format( dset=cls.qualname, colname=name, choices=bulleted_list( sorted(cls._column_names), max_count=10, ), ) ) # Resolve column descriptor into a BoundColumn. return maybe_column.__get__(None, cls)
<SYSTEM_TASK:> Construct a new dataset given the coordinates. <END_TASK> <USER_TASK:> Description: def _make_dataset(cls, coords): """Construct a new dataset given the coordinates. """
class Slice(cls._SliceType): extra_coords = coords Slice.__name__ = '%s.slice(%s)' % ( cls.__name__, ', '.join('%s=%r' % item for item in coords.items()), ) return Slice
<SYSTEM_TASK:> Take a slice of a DataSetFamily to produce a dataset <END_TASK> <USER_TASK:> Description: def slice(cls, *args, **kwargs): """Take a slice of a DataSetFamily to produce a dataset indexed by asset and date. Parameters ---------- *args **kwargs The coordinates to fix along each extra dimension. Returns ------- dataset : DataSet A regular pipeline dataset indexed by asset and date. Notes ----- The extra dimensions coords used to produce the result are available under the ``extra_coords`` attribute. """
coords, hash_key = cls._canonical_key(args, kwargs) try: return cls._slice_cache[hash_key] except KeyError: pass Slice = cls._make_dataset(coords) cls._slice_cache[hash_key] = Slice return Slice
<SYSTEM_TASK:> Load by delegating to sub-loaders. <END_TASK> <USER_TASK:> Description: def load_adjusted_array(self, domain, columns, dates, sids, mask): """ Load by delegating to sub-loaders. """
out = {} for col in columns: try: loader = self._loaders.get(col) if loader is None: loader = self._loaders[col.unspecialize()] except KeyError: raise ValueError("Couldn't find loader for %s" % col) out.update( loader.load_adjusted_array(domain, [col], dates, sids, mask) ) return out
<SYSTEM_TASK:> Return uniformly-distributed floats between -0.0 and 100.0. <END_TASK> <USER_TASK:> Description: def _float_values(self, shape): """ Return uniformly-distributed floats between -0.0 and 100.0. """
return self.state.uniform(low=0.0, high=100.0, size=shape)
<SYSTEM_TASK:> Return uniformly-distributed integers between 0 and 100. <END_TASK> <USER_TASK:> Description: def _int_values(self, shape): """ Return uniformly-distributed integers between 0 and 100. """
return (self.state.randint(low=0, high=100, size=shape) .astype('int64'))
<SYSTEM_TASK:> Compute rowwise array quantiles on an input. <END_TASK> <USER_TASK:> Description: def quantiles(data, nbins_or_partition_bounds): """ Compute rowwise array quantiles on an input. """
return apply_along_axis( qcut, 1, data, q=nbins_or_partition_bounds, labels=False, )
<SYSTEM_TASK:> Handles the close of the given minute in minute emission. <END_TASK> <USER_TASK:> Description: def handle_minute_close(self, dt, data_portal): """ Handles the close of the given minute in minute emission. Parameters ---------- dt : Timestamp The minute that is ending Returns ------- A minute perf packet. """
self.sync_last_sale_prices(dt, data_portal) packet = { 'period_start': self._first_session, 'period_end': self._last_session, 'capital_base': self._capital_base, 'minute_perf': { 'period_open': self._market_open, 'period_close': dt, }, 'cumulative_perf': { 'period_open': self._first_session, 'period_close': self._last_session, }, 'progress': self._progress(self), 'cumulative_risk_metrics': {}, } ledger = self._ledger ledger.end_of_bar(self._session_count) self.end_of_bar( packet, ledger, dt, self._session_count, data_portal, ) return packet
<SYSTEM_TASK:> Handles the start of each session. <END_TASK> <USER_TASK:> Description: def handle_market_open(self, session_label, data_portal): """Handles the start of each session. Parameters ---------- session_label : Timestamp The label of the session that is about to begin. data_portal : DataPortal The current data portal. """
ledger = self._ledger ledger.start_of_session(session_label) adjustment_reader = data_portal.adjustment_reader if adjustment_reader is not None: # this is None when running with a dataframe source ledger.process_dividends( session_label, self._asset_finder, adjustment_reader, ) self._current_session = session_label cal = self._trading_calendar self._market_open, self._market_close = self._execution_open_and_close( cal, session_label, ) self.start_of_session(ledger, session_label, data_portal)
<SYSTEM_TASK:> Handles the close of the given day. <END_TASK> <USER_TASK:> Description: def handle_market_close(self, dt, data_portal): """Handles the close of the given day. Parameters ---------- dt : Timestamp The most recently completed simulation datetime. data_portal : DataPortal The current data portal. Returns ------- A daily perf packet. """
completed_session = self._current_session if self.emission_rate == 'daily': # this method is called for both minutely and daily emissions, but # this chunk of code here only applies for daily emissions. (since # it's done every minute, elsewhere, for minutely emission). self.sync_last_sale_prices(dt, data_portal) session_ix = self._session_count # increment the day counter before we move markers forward. self._session_count += 1 packet = { 'period_start': self._first_session, 'period_end': self._last_session, 'capital_base': self._capital_base, 'daily_perf': { 'period_open': self._market_open, 'period_close': dt, }, 'cumulative_perf': { 'period_open': self._first_session, 'period_close': self._last_session, }, 'progress': self._progress(self), 'cumulative_risk_metrics': {}, } ledger = self._ledger ledger.end_of_session(session_ix) self.end_of_session( packet, ledger, completed_session, session_ix, data_portal, ) return packet