text_prompt
stringlengths
100
17.7k
code_prompt
stringlengths
7
9.86k
<SYSTEM_TASK:> Extra sources always have a sid column. <END_TASK> <USER_TASK:> Description: def handle_extra_source(self, source_df, sim_params): """ Extra sources always have a sid column. We expand the given data (by forward filling) to the full range of the simulation dates, so that lookup is fast during simulation. """
if source_df is None: return # Normalize all the dates in the df source_df.index = source_df.index.normalize() # source_df's sid column can either consist of assets we know about # (such as sid(24)) or of assets we don't know about (such as # palladium). # # In both cases, we break up the dataframe into individual dfs # that only contain a single asset's information. ie, if source_df # has data for PALLADIUM and GOLD, we split source_df into two # dataframes, one for each. (same applies if source_df has data for # AAPL and IBM). # # We then take each child df and reindex it to the simulation's date # range by forward-filling missing values. this makes reads simpler. # # Finally, we store the data. For each column, we store a mapping in # self.augmented_sources_map from the column to a dictionary of # asset -> df. In other words, # self.augmented_sources_map['days_to_cover']['AAPL'] gives us the df # holding that data. source_date_index = self.trading_calendar.sessions_in_range( sim_params.start_session, sim_params.end_session ) # Break the source_df up into one dataframe per sid. This lets # us (more easily) calculate accurate start/end dates for each sid, # de-dup data, and expand the data to fit the backtest start/end date. grouped_by_sid = source_df.groupby(["sid"]) group_names = grouped_by_sid.groups.keys() group_dict = {} for group_name in group_names: group_dict[group_name] = grouped_by_sid.get_group(group_name) # This will be the dataframe which we query to get fetcher assets at # any given time. Get's overwritten every time there's a new fetcher # call extra_source_df = pd.DataFrame() for identifier, df in iteritems(group_dict): # Since we know this df only contains a single sid, we can safely # de-dupe by the index (dt). If minute granularity, will take the # last data point on any given day df = df.groupby(level=0).last() # Reindex the dataframe based on the backtest start/end date. # This makes reads easier during the backtest. df = self._reindex_extra_source(df, source_date_index) for col_name in df.columns.difference(['sid']): if col_name not in self._augmented_sources_map: self._augmented_sources_map[col_name] = {} self._augmented_sources_map[col_name][identifier] = df # Append to extra_source_df the reindexed dataframe for the single # sid extra_source_df = extra_source_df.append(df) self._extra_source_df = extra_source_df
<SYSTEM_TASK:> Given an asset and dt, returns the last traded dt from the viewpoint <END_TASK> <USER_TASK:> Description: def get_last_traded_dt(self, asset, dt, data_frequency): """ Given an asset and dt, returns the last traded dt from the viewpoint of the given dt. If there is a trade on the dt, the answer is dt provided. """
return self._get_pricing_reader(data_frequency).get_last_traded_dt( asset, dt)
<SYSTEM_TASK:> Returns a list of adjustments between the dt and perspective_dt for the <END_TASK> <USER_TASK:> Description: def get_adjustments(self, assets, field, dt, perspective_dt): """ Returns a list of adjustments between the dt and perspective_dt for the given field and list of assets Parameters ---------- assets : list of type Asset, or Asset The asset, or assets whose adjustments are desired. field : {'open', 'high', 'low', 'close', 'volume', \ 'price', 'last_traded'} The desired field of the asset. dt : pd.Timestamp The timestamp for the desired value. perspective_dt : pd.Timestamp The timestamp from which the data is being viewed back from. Returns ------- adjustments : list[Adjustment] The adjustments to that field. """
if isinstance(assets, Asset): assets = [assets] adjustment_ratios_per_asset = [] def split_adj_factor(x): return x if field != 'volume' else 1.0 / x for asset in assets: adjustments_for_asset = [] split_adjustments = self._get_adjustment_list( asset, self._splits_dict, "SPLITS" ) for adj_dt, adj in split_adjustments: if dt < adj_dt <= perspective_dt: adjustments_for_asset.append(split_adj_factor(adj)) elif adj_dt > perspective_dt: break if field != 'volume': merger_adjustments = self._get_adjustment_list( asset, self._mergers_dict, "MERGERS" ) for adj_dt, adj in merger_adjustments: if dt < adj_dt <= perspective_dt: adjustments_for_asset.append(adj) elif adj_dt > perspective_dt: break dividend_adjustments = self._get_adjustment_list( asset, self._dividends_dict, "DIVIDENDS", ) for adj_dt, adj in dividend_adjustments: if dt < adj_dt <= perspective_dt: adjustments_for_asset.append(adj) elif adj_dt > perspective_dt: break ratio = reduce(mul, adjustments_for_asset, 1.0) adjustment_ratios_per_asset.append(ratio) return adjustment_ratios_per_asset
<SYSTEM_TASK:> Returns a scalar value representing the value <END_TASK> <USER_TASK:> Description: def get_adjusted_value(self, asset, field, dt, perspective_dt, data_frequency, spot_value=None): """ Returns a scalar value representing the value of the desired asset's field at the given dt with adjustments applied. Parameters ---------- asset : Asset The asset whose data is desired. field : {'open', 'high', 'low', 'close', 'volume', \ 'price', 'last_traded'} The desired field of the asset. dt : pd.Timestamp The timestamp for the desired value. perspective_dt : pd.Timestamp The timestamp from which the data is being viewed back from. data_frequency : str The frequency of the data to query; i.e. whether the data is 'daily' or 'minute' bars Returns ------- value : float, int, or pd.Timestamp The value of the given ``field`` for ``asset`` at ``dt`` with any adjustments known by ``perspective_dt`` applied. The return type is based on the ``field`` requested. If the field is one of 'open', 'high', 'low', 'close', or 'price', the value will be a float. If the ``field`` is 'volume' the value will be a int. If the ``field`` is 'last_traded' the value will be a Timestamp. """
if spot_value is None: # if this a fetcher field, we want to use perspective_dt (not dt) # because we want the new value as of midnight (fetcher only works # on a daily basis, all timestamps are on midnight) if self._is_extra_source(asset, field, self._augmented_sources_map): spot_value = self.get_spot_value(asset, field, perspective_dt, data_frequency) else: spot_value = self.get_spot_value(asset, field, dt, data_frequency) if isinstance(asset, Equity): ratio = self.get_adjustments(asset, field, dt, perspective_dt)[0] spot_value *= ratio return spot_value
<SYSTEM_TASK:> Internal method that returns a dataframe containing history bars <END_TASK> <USER_TASK:> Description: def _get_history_daily_window(self, assets, end_dt, bar_count, field_to_use, data_frequency): """ Internal method that returns a dataframe containing history bars of daily frequency for the given sids. """
session = self.trading_calendar.minute_to_session_label(end_dt) days_for_window = self._get_days_for_window(session, bar_count) if len(assets) == 0: return pd.DataFrame(None, index=days_for_window, columns=None) data = self._get_history_daily_window_data( assets, days_for_window, end_dt, field_to_use, data_frequency ) return pd.DataFrame( data, index=days_for_window, columns=assets )
<SYSTEM_TASK:> Internal method that returns a dataframe containing history bars <END_TASK> <USER_TASK:> Description: def _get_history_minute_window(self, assets, end_dt, bar_count, field_to_use): """ Internal method that returns a dataframe containing history bars of minute frequency for the given sids. """
# get all the minutes for this window try: minutes_for_window = self.trading_calendar.minutes_window( end_dt, -bar_count ) except KeyError: self._handle_minute_history_out_of_bounds(bar_count) if minutes_for_window[0] < self._first_trading_minute: self._handle_minute_history_out_of_bounds(bar_count) asset_minute_data = self._get_minute_window_data( assets, field_to_use, minutes_for_window, ) return pd.DataFrame( asset_minute_data, index=minutes_for_window, columns=assets )
<SYSTEM_TASK:> Public API method that returns a dataframe containing the requested <END_TASK> <USER_TASK:> Description: def get_history_window(self, assets, end_dt, bar_count, frequency, field, data_frequency, ffill=True): """ Public API method that returns a dataframe containing the requested history window. Data is fully adjusted. Parameters ---------- assets : list of zipline.data.Asset objects The assets whose data is desired. bar_count: int The number of bars desired. frequency: string "1d" or "1m" field: string The desired field of the asset. data_frequency: string The frequency of the data to query; i.e. whether the data is 'daily' or 'minute' bars. ffill: boolean Forward-fill missing values. Only has effect if field is 'price'. Returns ------- A dataframe containing the requested data. """
if field not in OHLCVP_FIELDS and field != 'sid': raise ValueError("Invalid field: {0}".format(field)) if bar_count < 1: raise ValueError( "bar_count must be >= 1, but got {}".format(bar_count) ) if frequency == "1d": if field == "price": df = self._get_history_daily_window(assets, end_dt, bar_count, "close", data_frequency) else: df = self._get_history_daily_window(assets, end_dt, bar_count, field, data_frequency) elif frequency == "1m": if field == "price": df = self._get_history_minute_window(assets, end_dt, bar_count, "close") else: df = self._get_history_minute_window(assets, end_dt, bar_count, field) else: raise ValueError("Invalid frequency: {0}".format(frequency)) # forward-fill price if field == "price": if frequency == "1m": ffill_data_frequency = 'minute' elif frequency == "1d": ffill_data_frequency = 'daily' else: raise Exception( "Only 1d and 1m are supported for forward-filling.") assets_with_leading_nan = np.where(isnull(df.iloc[0]))[0] history_start, history_end = df.index[[0, -1]] if ffill_data_frequency == 'daily' and data_frequency == 'minute': # When we're looking for a daily value, but we haven't seen any # volume in today's minute bars yet, we need to use the # previous day's ffilled daily price. Using today's daily price # could yield a value from later today. history_start -= self.trading_calendar.day initial_values = [] for asset in df.columns[assets_with_leading_nan]: last_traded = self.get_last_traded_dt( asset, history_start, ffill_data_frequency, ) if isnull(last_traded): initial_values.append(nan) else: initial_values.append( self.get_adjusted_value( asset, field, dt=last_traded, perspective_dt=history_end, data_frequency=ffill_data_frequency, ) ) # Set leading values for assets that were missing data, then ffill. df.ix[0, assets_with_leading_nan] = np.array( initial_values, dtype=np.float64 ) df.fillna(method='ffill', inplace=True) # forward-filling will incorrectly produce values after the end of # an asset's lifetime, so write NaNs back over the asset's # end_date. normed_index = df.index.normalize() for asset in df.columns: if history_end >= asset.end_date: # if the window extends past the asset's end date, set # all post-end-date values to NaN in that asset's series df.loc[normed_index > asset.end_date, asset] = nan return df
<SYSTEM_TASK:> Internal method that gets a window of adjusted minute data for an asset <END_TASK> <USER_TASK:> Description: def _get_minute_window_data(self, assets, field, minutes_for_window): """ Internal method that gets a window of adjusted minute data for an asset and specified date range. Used to support the history API method for minute bars. Missing bars are filled with NaN. Parameters ---------- assets : iterable[Asset] The assets whose data is desired. field: string The specific field to return. "open", "high", "close_price", etc. minutes_for_window: pd.DateTimeIndex The list of minutes representing the desired window. Each minute is a pd.Timestamp. Returns ------- A numpy array with requested values. """
return self._minute_history_loader.history(assets, minutes_for_window, field, False)
<SYSTEM_TASK:> Internal method that gets a window of adjusted daily data for a sid <END_TASK> <USER_TASK:> Description: def _get_daily_window_data(self, assets, field, days_in_window, extra_slot=True): """ Internal method that gets a window of adjusted daily data for a sid and specified date range. Used to support the history API method for daily bars. Parameters ---------- asset : Asset The asset whose data is desired. start_dt: pandas.Timestamp The start of the desired window of data. bar_count: int The number of days of data to return. field: string The specific field to return. "open", "high", "close_price", etc. extra_slot: boolean Whether to allocate an extra slot in the returned numpy array. This extra slot will hold the data for the last partial day. It's much better to create it here than to create a copy of the array later just to add a slot. Returns ------- A numpy array with requested values. Any missing slots filled with nan. """
bar_count = len(days_in_window) # create an np.array of size bar_count dtype = float64 if field != 'sid' else int64 if extra_slot: return_array = np.zeros((bar_count + 1, len(assets)), dtype=dtype) else: return_array = np.zeros((bar_count, len(assets)), dtype=dtype) if field != "volume": # volumes default to 0, so we don't need to put NaNs in the array return_array[:] = np.NAN if bar_count != 0: data = self._history_loader.history(assets, days_in_window, field, extra_slot) if extra_slot: return_array[:len(return_array) - 1, :] = data else: return_array[:len(data)] = data return return_array
<SYSTEM_TASK:> Internal method that returns a list of adjustments for the given sid. <END_TASK> <USER_TASK:> Description: def _get_adjustment_list(self, asset, adjustments_dict, table_name): """ Internal method that returns a list of adjustments for the given sid. Parameters ---------- asset : Asset The asset for which to return adjustments. adjustments_dict: dict A dictionary of sid -> list that is used as a cache. table_name: string The table that contains this data in the adjustments db. Returns ------- adjustments: list A list of [multiplier, pd.Timestamp], earliest first """
if self._adjustment_reader is None: return [] sid = int(asset) try: adjustments = adjustments_dict[sid] except KeyError: adjustments = adjustments_dict[sid] = self._adjustment_reader.\ get_adjustments_for_sid(table_name, sid) return adjustments
<SYSTEM_TASK:> Returns any splits for the given sids and the given dt. <END_TASK> <USER_TASK:> Description: def get_splits(self, assets, dt): """ Returns any splits for the given sids and the given dt. Parameters ---------- assets : container Assets for which we want splits. dt : pd.Timestamp The date for which we are checking for splits. Note: this is expected to be midnight UTC. Returns ------- splits : list[(asset, float)] List of splits, where each split is a (asset, ratio) tuple. """
if self._adjustment_reader is None or not assets: return [] # convert dt to # of seconds since epoch, because that's what we use # in the adjustments db seconds = int(dt.value / 1e9) splits = self._adjustment_reader.conn.execute( "SELECT sid, ratio FROM SPLITS WHERE effective_date = ?", (seconds,)).fetchall() splits = [split for split in splits if split[0] in assets] splits = [(self.asset_finder.retrieve_asset(split[0]), split[1]) for split in splits] return splits
<SYSTEM_TASK:> Returns all the stock dividends for a specific sid that occur <END_TASK> <USER_TASK:> Description: def get_stock_dividends(self, sid, trading_days): """ Returns all the stock dividends for a specific sid that occur in the given trading range. Parameters ---------- sid: int The asset whose stock dividends should be returned. trading_days: pd.DatetimeIndex The trading range. Returns ------- list: A list of objects with all relevant attributes populated. All timestamp fields are converted to pd.Timestamps. """
if self._adjustment_reader is None: return [] if len(trading_days) == 0: return [] start_dt = trading_days[0].value / 1e9 end_dt = trading_days[-1].value / 1e9 dividends = self._adjustment_reader.conn.execute( "SELECT * FROM stock_dividend_payouts WHERE sid = ? AND " "ex_date > ? AND pay_date < ?", (int(sid), start_dt, end_dt,)).\ fetchall() dividend_info = [] for dividend_tuple in dividends: dividend_info.append({ "declared_date": dividend_tuple[1], "ex_date": pd.Timestamp(dividend_tuple[2], unit="s"), "pay_date": pd.Timestamp(dividend_tuple[3], unit="s"), "payment_sid": dividend_tuple[4], "ratio": dividend_tuple[5], "record_date": pd.Timestamp(dividend_tuple[6], unit="s"), "sid": dividend_tuple[7] }) return dividend_info
<SYSTEM_TASK:> Returns a list of assets for the current date, as defined by the <END_TASK> <USER_TASK:> Description: def get_fetcher_assets(self, dt): """ Returns a list of assets for the current date, as defined by the fetcher data. Returns ------- list: a list of Asset objects. """
# return a list of assets for the current date, as defined by the # fetcher source if self._extra_source_df is None: return [] day = normalize_date(dt) if day in self._extra_source_df.index: assets = self._extra_source_df.loc[day]['sid'] else: return [] if isinstance(assets, pd.Series): return [x for x in assets if isinstance(x, Asset)] else: return [assets] if isinstance(assets, Asset) else []
<SYSTEM_TASK:> Retrieves the future chain for the contract at the given `dt` according <END_TASK> <USER_TASK:> Description: def get_current_future_chain(self, continuous_future, dt): """ Retrieves the future chain for the contract at the given `dt` according the `continuous_future` specification. Returns ------- future_chain : list[Future] A list of active futures, where the first index is the current contract specified by the continuous future definition, the second is the next upcoming contract and so on. """
rf = self._roll_finders[continuous_future.roll_style] session = self.trading_calendar.minute_to_session_label(dt) contract_center = rf.get_contract_center( continuous_future.root_symbol, session, continuous_future.offset) oc = self.asset_finder.get_ordered_contracts( continuous_future.root_symbol) chain = oc.active_chain(contract_center, session.value) return self.asset_finder.retrieve_all(chain)
<SYSTEM_TASK:> Make a value with the specified numpy dtype. <END_TASK> <USER_TASK:> Description: def coerce_to_dtype(dtype, value): """ Make a value with the specified numpy dtype. Only datetime64[ns] and datetime64[D] are supported for datetime dtypes. """
name = dtype.name if name.startswith('datetime64'): if name == 'datetime64[D]': return make_datetime64D(value) elif name == 'datetime64[ns]': return make_datetime64ns(value) else: raise TypeError( "Don't know how to coerce values of dtype %s" % dtype ) return dtype.type(value)
<SYSTEM_TASK:> Restride `array` to repeat `count` times along the first axis. <END_TASK> <USER_TASK:> Description: def repeat_first_axis(array, count): """ Restride `array` to repeat `count` times along the first axis. Parameters ---------- array : np.array The array to restride. count : int Number of times to repeat `array`. Returns ------- result : array Array of shape (count,) + array.shape, composed of `array` repeated `count` times along the first axis. Example ------- >>> from numpy import arange >>> a = arange(3); a array([0, 1, 2]) >>> repeat_first_axis(a, 2) array([[0, 1, 2], [0, 1, 2]]) >>> repeat_first_axis(a, 4) array([[0, 1, 2], [0, 1, 2], [0, 1, 2], [0, 1, 2]]) Notes ---- The resulting array will share memory with `array`. If you need to assign to the input or output, you should probably make a copy first. See Also -------- repeat_last_axis """
return as_strided(array, (count,) + array.shape, (0,) + array.strides)
<SYSTEM_TASK:> Restride `array` to repeat `count` times along the last axis. <END_TASK> <USER_TASK:> Description: def repeat_last_axis(array, count): """ Restride `array` to repeat `count` times along the last axis. Parameters ---------- array : np.array The array to restride. count : int Number of times to repeat `array`. Returns ------- result : array Array of shape array.shape + (count,) composed of `array` repeated `count` times along the last axis. Example ------- >>> from numpy import arange >>> a = arange(3); a array([0, 1, 2]) >>> repeat_last_axis(a, 2) array([[0, 0], [1, 1], [2, 2]]) >>> repeat_last_axis(a, 4) array([[0, 0, 0, 0], [1, 1, 1, 1], [2, 2, 2, 2]]) Notes ---- The resulting array will share memory with `array`. If you need to assign to the input or output, you should probably make a copy first. See Also -------- repeat_last_axis """
return as_strided(array, array.shape + (count,), array.strides + (0,))
<SYSTEM_TASK:> Check if a value is np.NaT. <END_TASK> <USER_TASK:> Description: def isnat(obj): """ Check if a value is np.NaT. """
if obj.dtype.kind not in ('m', 'M'): raise ValueError("%s is not a numpy datetime or timedelta") return obj.view(int64_dtype) == iNaT
<SYSTEM_TASK:> Simple of numpy.busday_count that returns `float` arrays rather than int <END_TASK> <USER_TASK:> Description: def busday_count_mask_NaT(begindates, enddates, out=None): """ Simple of numpy.busday_count that returns `float` arrays rather than int arrays, and handles `NaT`s by returning `NaN`s where the inputs were `NaT`. Doesn't support custom weekdays or calendars, but probably should in the future. See Also -------- np.busday_count """
if out is None: out = empty(broadcast(begindates, enddates).shape, dtype=float) beginmask = isnat(begindates) endmask = isnat(enddates) out = busday_count( # Temporarily fill in non-NaT values. where(beginmask, _notNaT, begindates), where(endmask, _notNaT, enddates), out=out, ) # Fill in entries where either comparison was NaT with nan in the output. out[beginmask | endmask] = nan return out
<SYSTEM_TASK:> Compute indices of values in ``a`` that differ from the previous value. <END_TASK> <USER_TASK:> Description: def changed_locations(a, include_first): """ Compute indices of values in ``a`` that differ from the previous value. Parameters ---------- a : np.ndarray The array on which to indices of change. include_first : bool Whether or not to consider the first index of the array as "changed". Example ------- >>> import numpy as np >>> changed_locations(np.array([0, 0, 5, 5, 1, 1]), include_first=False) array([2, 4]) >>> changed_locations(np.array([0, 0, 5, 5, 1, 1]), include_first=True) array([0, 2, 4]) """
if a.ndim > 1: raise ValueError("indices_of_changed_values only supports 1D arrays.") indices = flatnonzero(diff(a)) + 1 if not include_first: return indices return hstack([[0], indices])
<SYSTEM_TASK:> Compute the start and end dates to run a pipeline for. <END_TASK> <USER_TASK:> Description: def compute_date_range_chunks(sessions, start_date, end_date, chunksize): """Compute the start and end dates to run a pipeline for. Parameters ---------- sessions : DatetimeIndex The available dates. start_date : pd.Timestamp The first date in the pipeline. end_date : pd.Timestamp The last date in the pipeline. chunksize : int or None The size of the chunks to run. Setting this to None returns one chunk. Returns ------- ranges : iterable[(np.datetime64, np.datetime64)] A sequence of start and end dates to run the pipeline for. """
if start_date not in sessions: raise KeyError("Start date %s is not found in calendar." % (start_date.strftime("%Y-%m-%d"),)) if end_date not in sessions: raise KeyError("End date %s is not found in calendar." % (end_date.strftime("%Y-%m-%d"),)) if end_date < start_date: raise ValueError("End date %s cannot precede start date %s." % (end_date.strftime("%Y-%m-%d"), start_date.strftime("%Y-%m-%d"))) if chunksize is None: return [(start_date, end_date)] start_ix, end_ix = sessions.slice_locs(start_date, end_date) return ( (r[0], r[-1]) for r in partition_all( chunksize, sessions[start_ix:end_ix] ) )
<SYSTEM_TASK:> Compute a pipeline. <END_TASK> <USER_TASK:> Description: def run_pipeline(self, pipeline, start_date, end_date): """ Compute a pipeline. Parameters ---------- pipeline : zipline.pipeline.Pipeline The pipeline to run. start_date : pd.Timestamp Start date of the computed matrix. end_date : pd.Timestamp End date of the computed matrix. Returns ------- result : pd.DataFrame A frame of computed results. The ``result`` columns correspond to the entries of `pipeline.columns`, which should be a dictionary mapping strings to instances of :class:`zipline.pipeline.term.Term`. For each date between ``start_date`` and ``end_date``, ``result`` will contain a row for each asset that passed `pipeline.screen`. A screen of ``None`` indicates that a row should be returned for each asset that existed each day. See Also -------- :meth:`zipline.pipeline.engine.PipelineEngine.run_pipeline` :meth:`zipline.pipeline.engine.PipelineEngine.run_chunked_pipeline` """
# See notes at the top of this module for a description of the # algorithm implemented here. if end_date < start_date: raise ValueError( "start_date must be before or equal to end_date \n" "start_date=%s, end_date=%s" % (start_date, end_date) ) domain = self.resolve_domain(pipeline) graph = pipeline.to_execution_plan( domain, self._root_mask_term, start_date, end_date, ) extra_rows = graph.extra_rows[self._root_mask_term] root_mask = self._compute_root_mask( domain, start_date, end_date, extra_rows, ) dates, assets, root_mask_values = explode(root_mask) initial_workspace = self._populate_initial_workspace( { self._root_mask_term: root_mask_values, self._root_mask_dates_term: as_column(dates.values) }, self._root_mask_term, graph, dates, assets, ) results = self.compute_chunk(graph, dates, assets, initial_workspace) return self._to_narrow( graph.outputs, results, results.pop(graph.screen_name), dates[extra_rows:], assets, )
<SYSTEM_TASK:> Resolve a concrete domain for ``pipeline``. <END_TASK> <USER_TASK:> Description: def resolve_domain(self, pipeline): """Resolve a concrete domain for ``pipeline``. """
domain = pipeline.domain(default=self._default_domain) if domain is GENERIC: raise ValueError( "Unable to determine domain for Pipeline.\n" "Pass domain=<desired domain> to your Pipeline to set a " "domain." ) return domain
<SYSTEM_TASK:> Decorator for API methods that should only be called after <END_TASK> <USER_TASK:> Description: def require_initialized(exception): """ Decorator for API methods that should only be called after TradingAlgorithm.initialize. `exception` will be raised if the method is called before initialize has completed. Examples -------- @require_initialized(SomeException("Don't do that!")) def method(self): # Do stuff that should only be allowed after initialize. """
def decorator(method): @wraps(method) def wrapped_method(self, *args, **kwargs): if not self.initialized: raise exception return method(self, *args, **kwargs) return wrapped_method return decorator
<SYSTEM_TASK:> Decorator for API methods that cannot be called from within <END_TASK> <USER_TASK:> Description: def disallowed_in_before_trading_start(exception): """ Decorator for API methods that cannot be called from within TradingAlgorithm.before_trading_start. `exception` will be raised if the method is called inside `before_trading_start`. Examples -------- @disallowed_in_before_trading_start(SomeException("Don't do that!")) def method(self): # Do stuff that is not allowed inside before_trading_start. """
def decorator(method): @wraps(method) def wrapped_method(self, *args, **kwargs): if self._in_before_trading_start: raise exception return method(self, *args, **kwargs) return wrapped_method return decorator
<SYSTEM_TASK:> Simple implementation of grouped row-wise function application. <END_TASK> <USER_TASK:> Description: def naive_grouped_rowwise_apply(data, group_labels, func, func_args=(), out=None): """ Simple implementation of grouped row-wise function application. Parameters ---------- data : ndarray[ndim=2] Input array over which to apply a grouped function. group_labels : ndarray[ndim=2, dtype=int64] Labels to use to bucket inputs from array. Should be the same shape as array. func : function[ndarray[ndim=1]] -> function[ndarray[ndim=1]] Function to apply to pieces of each row in array. func_args : tuple Additional positional arguments to provide to each row in array. out : ndarray, optional Array into which to write output. If not supplied, a new array of the same shape as ``data`` is allocated and returned. Examples -------- >>> data = np.array([[1., 2., 3.], ... [2., 3., 4.], ... [5., 6., 7.]]) >>> labels = np.array([[0, 0, 1], ... [0, 1, 0], ... [1, 0, 2]]) >>> naive_grouped_rowwise_apply(data, labels, lambda row: row - row.min()) array([[ 0., 1., 0.], [ 0., 0., 2.], [ 0., 0., 0.]]) >>> naive_grouped_rowwise_apply(data, labels, lambda row: row / row.sum()) array([[ 0.33333333, 0.66666667, 1. ], [ 0.33333333, 1. , 0.66666667], [ 1. , 1. , 1. ]]) """
if out is None: out = np.empty_like(data) for (row, label_row, out_row) in zip(data, group_labels, out): for label in np.unique(label_row): locs = (label_row == label) out_row[locs] = func(row[locs], *func_args) return out
<SYSTEM_TASK:> Create a DataFrame representing lifetimes of assets that are constantly <END_TASK> <USER_TASK:> Description: def make_rotating_equity_info(num_assets, first_start, frequency, periods_between_starts, asset_lifetime, exchange='TEST'): """ Create a DataFrame representing lifetimes of assets that are constantly rotating in and out of existence. Parameters ---------- num_assets : int How many assets to create. first_start : pd.Timestamp The start date for the first asset. frequency : str or pd.tseries.offsets.Offset (e.g. trading_day) Frequency used to interpret next two arguments. periods_between_starts : int Create a new asset every `frequency` * `periods_between_new` asset_lifetime : int Each asset exists for `frequency` * `asset_lifetime` days. exchange : str, optional The exchange name. Returns ------- info : pd.DataFrame DataFrame representing newly-created assets. """
return pd.DataFrame( { 'symbol': [chr(ord('A') + i) for i in range(num_assets)], # Start a new asset every `periods_between_starts` days. 'start_date': pd.date_range( first_start, freq=(periods_between_starts * frequency), periods=num_assets, ), # Each asset lasts for `asset_lifetime` days. 'end_date': pd.date_range( first_start + (asset_lifetime * frequency), freq=(periods_between_starts * frequency), periods=num_assets, ), 'exchange': exchange, }, index=range(num_assets), )
<SYSTEM_TASK:> Create a DataFrame representing assets that exist for the full duration <END_TASK> <USER_TASK:> Description: def make_simple_equity_info(sids, start_date, end_date, symbols=None, names=None, exchange='TEST'): """ Create a DataFrame representing assets that exist for the full duration between `start_date` and `end_date`. Parameters ---------- sids : array-like of int start_date : pd.Timestamp, optional end_date : pd.Timestamp, optional symbols : list, optional Symbols to use for the assets. If not provided, symbols are generated from the sequence 'A', 'B', ... names : list, optional Names to use for the assets. If not provided, names are generated by adding " INC." to each of the symbols (which might also be auto-generated). exchange : str, optional The exchange name. Returns ------- info : pd.DataFrame DataFrame representing newly-created assets. """
num_assets = len(sids) if symbols is None: symbols = list(ascii_uppercase[:num_assets]) else: symbols = list(symbols) if names is None: names = [str(s) + " INC." for s in symbols] return pd.DataFrame( { 'symbol': symbols, 'start_date': pd.to_datetime([start_date] * num_assets), 'end_date': pd.to_datetime([end_date] * num_assets), 'asset_name': list(names), 'exchange': exchange, }, index=sids, columns=( 'start_date', 'end_date', 'symbol', 'exchange', 'asset_name', ), )
<SYSTEM_TASK:> Create a DataFrame representing assets that exist for the full duration <END_TASK> <USER_TASK:> Description: def make_simple_multi_country_equity_info(countries_to_sids, countries_to_exchanges, start_date, end_date): """Create a DataFrame representing assets that exist for the full duration between `start_date` and `end_date`, from multiple countries. """
sids = [] symbols = [] exchanges = [] for country, country_sids in countries_to_sids.items(): exchange = countries_to_exchanges[country] for i, sid in enumerate(country_sids): sids.append(sid) symbols.append('-'.join([country, str(i)])) exchanges.append(exchange) return pd.DataFrame( { 'symbol': symbols, 'start_date': start_date, 'end_date': end_date, 'asset_name': symbols, 'exchange': exchanges, }, index=sids, columns=( 'start_date', 'end_date', 'symbol', 'exchange', 'asset_name', ), )
<SYSTEM_TASK:> Create a DataFrame representing assets that all begin at the same start <END_TASK> <USER_TASK:> Description: def make_jagged_equity_info(num_assets, start_date, first_end, frequency, periods_between_ends, auto_close_delta): """ Create a DataFrame representing assets that all begin at the same start date, but have cascading end dates. Parameters ---------- num_assets : int How many assets to create. start_date : pd.Timestamp The start date for all the assets. first_end : pd.Timestamp The date at which the first equity will end. frequency : str or pd.tseries.offsets.Offset (e.g. trading_day) Frequency used to interpret the next argument. periods_between_ends : int Starting after the first end date, end each asset every `frequency` * `periods_between_ends`. Returns ------- info : pd.DataFrame DataFrame representing newly-created assets. """
frame = pd.DataFrame( { 'symbol': [chr(ord('A') + i) for i in range(num_assets)], 'start_date': start_date, 'end_date': pd.date_range( first_end, freq=(periods_between_ends * frequency), periods=num_assets, ), 'exchange': 'TEST', }, index=range(num_assets), ) # Explicitly pass None to disable setting the auto_close_date column. if auto_close_delta is not None: frame['auto_close_date'] = frame['end_date'] + auto_close_delta return frame
<SYSTEM_TASK:> Create a DataFrame representing futures for `root_symbols` during `year`. <END_TASK> <USER_TASK:> Description: def make_future_info(first_sid, root_symbols, years, notice_date_func, expiration_date_func, start_date_func, month_codes=None, multiplier=500): """ Create a DataFrame representing futures for `root_symbols` during `year`. Generates a contract per triple of (symbol, year, month) supplied to `root_symbols`, `years`, and `month_codes`. Parameters ---------- first_sid : int The first sid to use for assigning sids to the created contracts. root_symbols : list[str] A list of root symbols for which to create futures. years : list[int or str] Years (e.g. 2014), for which to produce individual contracts. notice_date_func : (Timestamp) -> Timestamp Function to generate notice dates from first of the month associated with asset month code. Return NaT to simulate futures with no notice date. expiration_date_func : (Timestamp) -> Timestamp Function to generate expiration dates from first of the month associated with asset month code. start_date_func : (Timestamp) -> Timestamp, optional Function to generate start dates from first of the month associated with each asset month code. Defaults to a start_date one year prior to the month_code date. month_codes : dict[str -> [1..12]], optional Dictionary of month codes for which to create contracts. Entries should be strings mapped to values from 1 (January) to 12 (December). Default is zipline.futures.CMES_CODE_TO_MONTH multiplier : int The contract multiplier. Returns ------- futures_info : pd.DataFrame DataFrame of futures data suitable for passing to an AssetDBWriter. """
if month_codes is None: month_codes = CMES_CODE_TO_MONTH year_strs = list(map(str, years)) years = [pd.Timestamp(s, tz='UTC') for s in year_strs] # Pairs of string/date like ('K06', 2006-05-01) contract_suffix_to_beginning_of_month = tuple( (month_code + year_str[-2:], year + MonthBegin(month_num)) for ((year, year_str), (month_code, month_num)) in product( zip(years, year_strs), iteritems(month_codes), ) ) contracts = [] parts = product(root_symbols, contract_suffix_to_beginning_of_month) for sid, (root_sym, (suffix, month_begin)) in enumerate(parts, first_sid): contracts.append({ 'sid': sid, 'root_symbol': root_sym, 'symbol': root_sym + suffix, 'start_date': start_date_func(month_begin), 'notice_date': notice_date_func(month_begin), 'expiration_date': notice_date_func(month_begin), 'multiplier': multiplier, 'exchange': "TEST", }) return pd.DataFrame.from_records(contracts, index='sid')
<SYSTEM_TASK:> Construct a Filter matching values starting with ``prefix``. <END_TASK> <USER_TASK:> Description: def startswith(self, prefix): """ Construct a Filter matching values starting with ``prefix``. Parameters ---------- prefix : str String prefix against which to compare values produced by ``self``. Returns ------- matches : Filter Filter returning True for all sid/date pairs for which ``self`` produces a string starting with ``prefix``. """
return ArrayPredicate( term=self, op=LabelArray.startswith, opargs=(prefix,), )
<SYSTEM_TASK:> Construct a Filter matching values ending with ``suffix``. <END_TASK> <USER_TASK:> Description: def endswith(self, suffix): """ Construct a Filter matching values ending with ``suffix``. Parameters ---------- suffix : str String suffix against which to compare values produced by ``self``. Returns ------- matches : Filter Filter returning True for all sid/date pairs for which ``self`` produces a string ending with ``prefix``. """
return ArrayPredicate( term=self, op=LabelArray.endswith, opargs=(suffix,), )
<SYSTEM_TASK:> Construct a Filter matching values containing ``substring``. <END_TASK> <USER_TASK:> Description: def has_substring(self, substring): """ Construct a Filter matching values containing ``substring``. Parameters ---------- substring : str Sub-string against which to compare values produced by ``self``. Returns ------- matches : Filter Filter returning True for all sid/date pairs for which ``self`` produces a string containing ``substring``. """
return ArrayPredicate( term=self, op=LabelArray.has_substring, opargs=(substring,), )
<SYSTEM_TASK:> Construct a Filter that checks regex matches against ``pattern``. <END_TASK> <USER_TASK:> Description: def matches(self, pattern): """ Construct a Filter that checks regex matches against ``pattern``. Parameters ---------- pattern : str Regex pattern against which to compare values produced by ``self``. Returns ------- matches : Filter Filter returning True for all sid/date pairs for which ``self`` produces a string matched by ``pattern``. See Also -------- :mod:`Python Regular Expressions <re>` """
return ArrayPredicate( term=self, op=LabelArray.matches, opargs=(pattern,), )
<SYSTEM_TASK:> Construct a Filter indicating whether values are in ``choices``. <END_TASK> <USER_TASK:> Description: def element_of(self, choices): """ Construct a Filter indicating whether values are in ``choices``. Parameters ---------- choices : iterable[str or int] An iterable of choices. Returns ------- matches : Filter Filter returning True for all sid/date pairs for which ``self`` produces an entry in ``choices``. """
try: choices = frozenset(choices) except Exception as e: raise TypeError( "Expected `choices` to be an iterable of hashable values," " but got {} instead.\n" "This caused the following error: {!r}.".format(choices, e) ) if self.missing_value in choices: raise ValueError( "Found self.missing_value ({mv!r}) in choices supplied to" " {typename}.{meth_name}().\n" "Missing values have NaN semantics, so the" " requested comparison would always produce False.\n" "Use the isnull() method to check for missing values.\n" "Received choices were {choices}.".format( mv=self.missing_value, typename=(type(self).__name__), choices=sorted(choices), meth_name=self.element_of.__name__, ) ) def only_contains(type_, values): return all(isinstance(v, type_) for v in values) if self.dtype == int64_dtype: if only_contains(int, choices): return ArrayPredicate( term=self, op=vectorized_is_element, opargs=(choices,), ) else: raise TypeError( "Found non-int in choices for {typename}.element_of.\n" "Supplied choices were {choices}.".format( typename=type(self).__name__, choices=choices, ) ) elif self.dtype == categorical_dtype: if only_contains((bytes, unicode), choices): return ArrayPredicate( term=self, op=LabelArray.element_of, opargs=(choices,), ) else: raise TypeError( "Found non-string in choices for {typename}.element_of.\n" "Supplied choices were {choices}.".format( typename=type(self).__name__, choices=choices, ) ) assert False, "Unknown dtype in Classifier.element_of %s." % self.dtype
<SYSTEM_TASK:> Called with the result of a pipeline. This needs to return an object <END_TASK> <USER_TASK:> Description: def to_workspace_value(self, result, assets): """ Called with the result of a pipeline. This needs to return an object which can be put into the workspace to continue doing computations. This is the inverse of :func:`~zipline.pipeline.term.Term.postprocess`. """
if self.dtype == int64_dtype: return super(Classifier, self).to_workspace_value(result, assets) assert isinstance(result.values, pd.Categorical), ( 'Expected a Categorical, got %r.' % type(result.values) ) with_missing = pd.Series( data=pd.Categorical( result.values, result.values.categories.union([self.missing_value]), ), index=result.index, ) return LabelArray( super(Classifier, self).to_workspace_value( with_missing, assets, ), self.missing_value, )
<SYSTEM_TASK:> Convert an array produced by this classifier into an array of integer <END_TASK> <USER_TASK:> Description: def _to_integral(self, output_array): """ Convert an array produced by this classifier into an array of integer labels and a missing value label. """
if self.dtype == int64_dtype: group_labels = output_array null_label = self.missing_value elif self.dtype == categorical_dtype: # Coerce LabelArray into an isomorphic array of ints. This is # necessary because np.where doesn't know about LabelArrays or the # void dtype. group_labels = output_array.as_int_array() null_label = output_array.missing_value_code else: raise AssertionError( "Unexpected Classifier dtype: %s." % self.dtype ) return group_labels, null_label
<SYSTEM_TASK:> Override the default array allocation to produce a LabelArray when we <END_TASK> <USER_TASK:> Description: def _allocate_output(self, windows, shape): """ Override the default array allocation to produce a LabelArray when we have a string-like dtype. """
if self.dtype == int64_dtype: return super(CustomClassifier, self)._allocate_output( windows, shape, ) # This is a little bit of a hack. We might not know what the # categories for a LabelArray are until it's actually been loaded, so # we need to look at the underlying data. return windows[0].data.empty_like(shape)
<SYSTEM_TASK:> Check that all axes of a pandas object are unique. <END_TASK> <USER_TASK:> Description: def verify_indices_all_unique(obj): """ Check that all axes of a pandas object are unique. Parameters ---------- obj : pd.Series / pd.DataFrame / pd.Panel The object to validate. Returns ------- obj : pd.Series / pd.DataFrame / pd.Panel The validated object, unchanged. Raises ------ ValueError If any axis has duplicate entries. """
axis_names = [ ('index',), # Series ('index', 'columns'), # DataFrame ('items', 'major_axis', 'minor_axis') # Panel ][obj.ndim - 1] # ndim = 1 should go to entry 0, for axis_name, index in zip(axis_names, obj.axes): if index.is_unique: continue raise ValueError( "Duplicate entries in {type}.{axis}: {dupes}.".format( type=type(obj).__name__, axis=axis_name, dupes=sorted(index[index.duplicated()]), ) ) return obj
<SYSTEM_TASK:> Modify a preprocessor to explicitly allow `None`. <END_TASK> <USER_TASK:> Description: def optionally(preprocessor): """Modify a preprocessor to explicitly allow `None`. Parameters ---------- preprocessor : callable[callable, str, any -> any] A preprocessor to delegate to when `arg is not None`. Returns ------- optional_preprocessor : callable[callable, str, any -> any] A preprocessor that delegates to `preprocessor` when `arg is not None`. Examples -------- >>> def preprocessor(func, argname, arg): ... if not isinstance(arg, int): ... raise TypeError('arg must be int') ... return arg ... >>> @preprocess(a=optionally(preprocessor)) ... def f(a): ... return a ... >>> f(1) # call with int 1 >>> f('a') # call with not int Traceback (most recent call last): ... TypeError: arg must be int >>> f(None) is None # call with explicit None True """
@wraps(preprocessor) def wrapper(func, argname, arg): return arg if arg is None else preprocessor(func, argname, arg) return wrapper
<SYSTEM_TASK:> Argument preprocessor that converts the input into a numpy dtype. <END_TASK> <USER_TASK:> Description: def ensure_dtype(func, argname, arg): """ Argument preprocessor that converts the input into a numpy dtype. Examples -------- >>> import numpy as np >>> from zipline.utils.preprocess import preprocess >>> @preprocess(dtype=ensure_dtype) ... def foo(dtype): ... return dtype ... >>> foo(float) dtype('float64') """
try: return dtype(arg) except TypeError: raise TypeError( "{func}() couldn't convert argument " "{argname}={arg!r} to a numpy dtype.".format( func=_qualified_name(func), argname=argname, arg=arg, ), )
<SYSTEM_TASK:> Argument preprocessor that converts the input into a tzinfo object. <END_TASK> <USER_TASK:> Description: def ensure_timezone(func, argname, arg): """Argument preprocessor that converts the input into a tzinfo object. Examples -------- >>> from zipline.utils.preprocess import preprocess >>> @preprocess(tz=ensure_timezone) ... def foo(tz): ... return tz >>> foo('utc') <UTC> """
if isinstance(arg, tzinfo): return arg if isinstance(arg, string_types): return timezone(arg) raise TypeError( "{func}() couldn't convert argument " "{argname}={arg!r} to a timezone.".format( func=_qualified_name(func), argname=argname, arg=arg, ), )
<SYSTEM_TASK:> Argument preprocessor that converts the input into a pandas Timestamp <END_TASK> <USER_TASK:> Description: def ensure_timestamp(func, argname, arg): """Argument preprocessor that converts the input into a pandas Timestamp object. Examples -------- >>> from zipline.utils.preprocess import preprocess >>> @preprocess(ts=ensure_timestamp) ... def foo(ts): ... return ts >>> foo('2014-01-01') Timestamp('2014-01-01 00:00:00') """
try: return pd.Timestamp(arg) except ValueError as e: raise TypeError( "{func}() couldn't convert argument " "{argname}={arg!r} to a pandas Timestamp.\n" "Original error was: {t}: {e}".format( func=_qualified_name(func), argname=argname, arg=arg, t=_qualified_name(type(e)), e=e, ), )
<SYSTEM_TASK:> Preprocessing decorator that verifies inputs have expected numpy dtypes. <END_TASK> <USER_TASK:> Description: def expect_dtypes(__funcname=_qualified_name, **named): """ Preprocessing decorator that verifies inputs have expected numpy dtypes. Examples -------- >>> from numpy import dtype, arange, int8, float64 >>> @expect_dtypes(x=dtype(int8)) ... def foo(x, y): ... return x, y ... >>> foo(arange(3, dtype=int8), 'foo') (array([0, 1, 2], dtype=int8), 'foo') >>> foo(arange(3, dtype=float64), 'foo') # doctest: +NORMALIZE_WHITESPACE ... # doctest: +ELLIPSIS Traceback (most recent call last): ... TypeError: ...foo() expected a value with dtype 'int8' for argument 'x', but got 'float64' instead. """
for name, type_ in iteritems(named): if not isinstance(type_, (dtype, tuple)): raise TypeError( "expect_dtypes() expected a numpy dtype or tuple of dtypes" " for argument {name!r}, but got {dtype} instead.".format( name=name, dtype=dtype, ) ) if isinstance(__funcname, str): def get_funcname(_): return __funcname else: get_funcname = __funcname @preprocess(dtypes=call(lambda x: x if isinstance(x, tuple) else (x,))) def _expect_dtype(dtypes): """ Factory for dtype-checking functions that work with the @preprocess decorator. """ def error_message(func, argname, value): # If the bad value has a dtype, but it's wrong, show the dtype # name. Otherwise just show the value. try: value_to_show = value.dtype.name except AttributeError: value_to_show = value return ( "{funcname}() expected a value with dtype {dtype_str} " "for argument {argname!r}, but got {value!r} instead." ).format( funcname=get_funcname(func), dtype_str=' or '.join(repr(d.name) for d in dtypes), argname=argname, value=value_to_show, ) def _actual_preprocessor(func, argname, argvalue): if getattr(argvalue, 'dtype', object()) not in dtypes: raise TypeError(error_message(func, argname, argvalue)) return argvalue return _actual_preprocessor return preprocess(**valmap(_expect_dtype, named))
<SYSTEM_TASK:> Preprocessing decorator that verifies inputs have expected dtype kinds. <END_TASK> <USER_TASK:> Description: def expect_kinds(**named): """ Preprocessing decorator that verifies inputs have expected dtype kinds. Examples -------- >>> from numpy import int64, int32, float32 >>> @expect_kinds(x='i') ... def foo(x): ... return x ... >>> foo(int64(2)) 2 >>> foo(int32(2)) 2 >>> foo(float32(2)) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS Traceback (most recent call last): ... TypeError: ...foo() expected a numpy object of kind 'i' for argument 'x', but got 'f' instead. """
for name, kind in iteritems(named): if not isinstance(kind, (str, tuple)): raise TypeError( "expect_dtype_kinds() expected a string or tuple of strings" " for argument {name!r}, but got {kind} instead.".format( name=name, kind=dtype, ) ) @preprocess(kinds=call(lambda x: x if isinstance(x, tuple) else (x,))) def _expect_kind(kinds): """ Factory for kind-checking functions that work the @preprocess decorator. """ def error_message(func, argname, value): # If the bad value has a dtype, but it's wrong, show the dtype # kind. Otherwise just show the value. try: value_to_show = value.dtype.kind except AttributeError: value_to_show = value return ( "{funcname}() expected a numpy object of kind {kinds} " "for argument {argname!r}, but got {value!r} instead." ).format( funcname=_qualified_name(func), kinds=' or '.join(map(repr, kinds)), argname=argname, value=value_to_show, ) def _actual_preprocessor(func, argname, argvalue): if getattrs(argvalue, ('dtype', 'kind'), object()) not in kinds: raise TypeError(error_message(func, argname, argvalue)) return argvalue return _actual_preprocessor return preprocess(**valmap(_expect_kind, named))
<SYSTEM_TASK:> Preprocessing decorator that verifies inputs have expected types. <END_TASK> <USER_TASK:> Description: def expect_types(__funcname=_qualified_name, **named): """ Preprocessing decorator that verifies inputs have expected types. Examples -------- >>> @expect_types(x=int, y=str) ... def foo(x, y): ... return x, y ... >>> foo(2, '3') (2, '3') >>> foo(2.0, '3') # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS Traceback (most recent call last): ... TypeError: ...foo() expected a value of type int for argument 'x', but got float instead. Notes ----- A special argument, __funcname, can be provided as a string to override the function name shown in error messages. This is most often used on __init__ or __new__ methods to make errors refer to the class name instead of the function name. """
for name, type_ in iteritems(named): if not isinstance(type_, (type, tuple)): raise TypeError( "expect_types() expected a type or tuple of types for " "argument '{name}', but got {type_} instead.".format( name=name, type_=type_, ) ) def _expect_type(type_): # Slightly different messages for type and tuple of types. _template = ( "%(funcname)s() expected a value of type {type_or_types} " "for argument '%(argname)s', but got %(actual)s instead." ) if isinstance(type_, tuple): template = _template.format( type_or_types=' or '.join(map(_qualified_name, type_)) ) else: template = _template.format(type_or_types=_qualified_name(type_)) return make_check( exc_type=TypeError, template=template, pred=lambda v: not isinstance(v, type_), actual=compose(_qualified_name, type), funcname=__funcname, ) return preprocess(**valmap(_expect_type, named))
<SYSTEM_TASK:> Factory for making preprocessing functions that check a predicate on the <END_TASK> <USER_TASK:> Description: def make_check(exc_type, template, pred, actual, funcname): """ Factory for making preprocessing functions that check a predicate on the input value. Parameters ---------- exc_type : Exception The exception type to raise if the predicate fails. template : str A template string to use to create error messages. Should have %-style named template parameters for 'funcname', 'argname', and 'actual'. pred : function[object -> bool] A function to call on the argument being preprocessed. If the predicate returns `True`, we raise an instance of `exc_type`. actual : function[object -> object] A function to call on bad values to produce the value to display in the error message. funcname : str or callable Name to use in error messages, or function to call on decorated functions to produce a name. Passing an explicit name is useful when creating checks for __init__ or __new__ methods when you want the error to refer to the class name instead of the method name. """
if isinstance(funcname, str): def get_funcname(_): return funcname else: get_funcname = funcname def _check(func, argname, argvalue): if pred(argvalue): raise exc_type( template % { 'funcname': get_funcname(func), 'argname': argname, 'actual': actual(argvalue), }, ) return argvalue return _check
<SYSTEM_TASK:> Preprocessing decorator that verifies inputs are elements of some <END_TASK> <USER_TASK:> Description: def expect_element(__funcname=_qualified_name, **named): """ Preprocessing decorator that verifies inputs are elements of some expected collection. Examples -------- >>> @expect_element(x=('a', 'b')) ... def foo(x): ... return x.upper() ... >>> foo('a') 'A' >>> foo('b') 'B' >>> foo('c') # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS Traceback (most recent call last): ... ValueError: ...foo() expected a value in ('a', 'b') for argument 'x', but got 'c' instead. Notes ----- A special argument, __funcname, can be provided as a string to override the function name shown in error messages. This is most often used on __init__ or __new__ methods to make errors refer to the class name instead of the function name. This uses the `in` operator (__contains__) to make the containment check. This allows us to use any custom container as long as the object supports the container protocol. """
def _expect_element(collection): if isinstance(collection, (set, frozenset)): # Special case the error message for set and frozen set to make it # less verbose. collection_for_error_message = tuple(sorted(collection)) else: collection_for_error_message = collection template = ( "%(funcname)s() expected a value in {collection} " "for argument '%(argname)s', but got %(actual)s instead." ).format(collection=collection_for_error_message) return make_check( ValueError, template, complement(op.contains(collection)), repr, funcname=__funcname, ) return preprocess(**valmap(_expect_element, named))
<SYSTEM_TASK:> Preprocessing decorator verifying that inputs fall INCLUSIVELY between <END_TASK> <USER_TASK:> Description: def expect_bounded(__funcname=_qualified_name, **named): """ Preprocessing decorator verifying that inputs fall INCLUSIVELY between bounds. Bounds should be passed as a pair of ``(min_value, max_value)``. ``None`` may be passed as ``min_value`` or ``max_value`` to signify that the input is only bounded above or below. Examples -------- >>> @expect_bounded(x=(1, 5)) ... def foo(x): ... return x + 1 ... >>> foo(1) 2 >>> foo(5) 6 >>> foo(6) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS Traceback (most recent call last): ... ValueError: ...foo() expected a value inclusively between 1 and 5 for argument 'x', but got 6 instead. >>> @expect_bounded(x=(2, None)) ... def foo(x): ... return x ... >>> foo(100000) 100000 >>> foo(1) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS Traceback (most recent call last): ... ValueError: ...foo() expected a value greater than or equal to 2 for argument 'x', but got 1 instead. >>> @expect_bounded(x=(None, 5)) ... def foo(x): ... return x ... >>> foo(6) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS Traceback (most recent call last): ... ValueError: ...foo() expected a value less than or equal to 5 for argument 'x', but got 6 instead. """
def _make_bounded_check(bounds): (lower, upper) = bounds if lower is None: def should_fail(value): return value > upper predicate_descr = "less than or equal to " + str(upper) elif upper is None: def should_fail(value): return value < lower predicate_descr = "greater than or equal to " + str(lower) else: def should_fail(value): return not (lower <= value <= upper) predicate_descr = "inclusively between %s and %s" % bounds template = ( "%(funcname)s() expected a value {predicate}" " for argument '%(argname)s', but got %(actual)s instead." ).format(predicate=predicate_descr) return make_check( exc_type=ValueError, template=template, pred=should_fail, actual=repr, funcname=__funcname, ) return _expect_bounded(_make_bounded_check, __funcname=__funcname, **named)
<SYSTEM_TASK:> Preprocessing decorator that verifies inputs are numpy arrays with a <END_TASK> <USER_TASK:> Description: def expect_dimensions(__funcname=_qualified_name, **dimensions): """ Preprocessing decorator that verifies inputs are numpy arrays with a specific dimensionality. Examples -------- >>> from numpy import array >>> @expect_dimensions(x=1, y=2) ... def foo(x, y): ... return x[0] + y[0, 0] ... >>> foo(array([1, 1]), array([[1, 1], [2, 2]])) 2 >>> foo(array([1, 1]), array([1, 1])) # doctest: +NORMALIZE_WHITESPACE ... # doctest: +ELLIPSIS Traceback (most recent call last): ... ValueError: ...foo() expected a 2-D array for argument 'y', but got a 1-D array instead. """
if isinstance(__funcname, str): def get_funcname(_): return __funcname else: get_funcname = __funcname def _expect_dimension(expected_ndim): def _check(func, argname, argvalue): actual_ndim = argvalue.ndim if actual_ndim != expected_ndim: if actual_ndim == 0: actual_repr = 'scalar' else: actual_repr = "%d-D array" % actual_ndim raise ValueError( "{func}() expected a {expected:d}-D array" " for argument {argname!r}, but got a {actual}" " instead.".format( func=get_funcname(func), expected=expected_ndim, argname=argname, actual=actual_repr, ) ) return argvalue return _check return preprocess(**valmap(_expect_dimension, dimensions))
<SYSTEM_TASK:> A preprocessing decorator that coerces inputs of a given type by passing <END_TASK> <USER_TASK:> Description: def coerce(from_, to, **to_kwargs): """ A preprocessing decorator that coerces inputs of a given type by passing them to a callable. Parameters ---------- from : type or tuple or types Inputs types on which to call ``to``. to : function Coercion function to call on inputs. **to_kwargs Additional keywords to forward to every call to ``to``. Examples -------- >>> @preprocess(x=coerce(float, int), y=coerce(float, int)) ... def floordiff(x, y): ... return x - y ... >>> floordiff(3.2, 2.5) 1 >>> @preprocess(x=coerce(str, int, base=2), y=coerce(str, int, base=2)) ... def add_binary_strings(x, y): ... return bin(x + y)[2:] ... >>> add_binary_strings('101', '001') '110' """
def preprocessor(func, argname, arg): if isinstance(arg, from_): return to(arg, **to_kwargs) return arg return preprocessor
<SYSTEM_TASK:> Preprocessing decorator that applies type coercions. <END_TASK> <USER_TASK:> Description: def coerce_types(**kwargs): """ Preprocessing decorator that applies type coercions. Parameters ---------- **kwargs : dict[str -> (type, callable)] Keyword arguments mapping function parameter names to pairs of (from_type, to_type). Examples -------- >>> @coerce_types(x=(float, int), y=(int, str)) ... def func(x, y): ... return (x, y) ... >>> func(1.0, 3) (1, '3') """
def _coerce(types): return coerce(*types) return preprocess(**valmap(_coerce, kwargs))
<SYSTEM_TASK:> Validate that a dictionary has an expected set of keys. <END_TASK> <USER_TASK:> Description: def validate_keys(dict_, expected, funcname): """Validate that a dictionary has an expected set of keys. """
expected = set(expected) received = set(dict_) missing = expected - received if missing: raise ValueError( "Missing keys in {}:\n" "Expected Keys: {}\n" "Received Keys: {}".format( funcname, sorted(expected), sorted(received), ) ) unexpected = received - expected if unexpected: raise ValueError( "Unexpected keys in {}:\n" "Expected Keys: {}\n" "Received Keys: {}".format( funcname, sorted(expected), sorted(received), ) )
<SYSTEM_TASK:> Construct a new enum object. <END_TASK> <USER_TASK:> Description: def enum(option, *options): """ Construct a new enum object. Parameters ---------- *options : iterable of str The names of the fields for the enum. Returns ------- enum A new enum collection. Examples -------- >>> e = enum('a', 'b', 'c') >>> e <enum: ('a', 'b', 'c')> >>> e.a 0 >>> e.b 1 >>> e.a in e True >>> tuple(e) (0, 1, 2) Notes ----- Identity checking is not guaranteed to work with enum members, instead equality checks should be used. From CPython's documentation: "The current implementation keeps an array of integer objects for all integers between -5 and 256, when you create an int in that range you actually just get back a reference to the existing object. So it should be possible to change the value of 1. I suspect the behaviour of Python in this case is undefined. :-)" """
options = (option,) + options rangeob = range(len(options)) try: inttype = _inttypes[int(np.log2(len(options) - 1)) // 8] except IndexError: raise OverflowError( 'Cannot store enums with more than sys.maxsize elements, got %d' % len(options), ) class _enum(Structure): _fields_ = [(o, inttype) for o in options] def __iter__(self): return iter(rangeob) def __contains__(self, value): return 0 <= value < len(options) def __repr__(self): return '<enum: %s>' % ( ('%d fields' % len(options)) if len(options) > 10 else repr(options) ) return _enum(*rangeob)
<SYSTEM_TASK:> Resizes the buffer to hold a new window with a new cap_multiple. <END_TASK> <USER_TASK:> Description: def extend_back(self, missing_dts): """ Resizes the buffer to hold a new window with a new cap_multiple. If cap_multiple is None, then the old cap_multiple is used. """
delta = len(missing_dts) if not delta: raise ValueError( 'missing_dts must be a non-empty index', ) self._window += delta self._pos += delta self.date_buf = self.date_buf.copy() self.date_buf.resize(self.cap) self.date_buf = np.roll(self.date_buf, delta) old_vals = self.buffer.values shape = old_vals.shape nan_arr = np.empty((shape[0], delta, shape[2])) nan_arr.fill(np.nan) new_vals = np.column_stack( (nan_arr, old_vals, np.empty((shape[0], delta * (self.cap_multiple - 1), shape[2]))), ) self.buffer = pd.Panel( data=new_vals, items=self.items, minor_axis=self.minor_axis, major_axis=np.arange(self.cap), dtype=self.dtype, ) # Fill the delta with the dates we calculated. where = slice(self._start_index, self._start_index + delta) self.date_buf[where] = missing_dts
<SYSTEM_TASK:> Set the values stored in our current in-view data to be values of the <END_TASK> <USER_TASK:> Description: def set_current(self, panel): """ Set the values stored in our current in-view data to be values of the passed panel. The passed panel must have the same indices as the panel that would be returned by self.get_current. """
where = slice(self._start_index, self._pos) self.buffer.values[:, where, :] = panel.values
<SYSTEM_TASK:> Roll window worth of data up to position zero. <END_TASK> <USER_TASK:> Description: def _roll_data(self): """ Roll window worth of data up to position zero. Save the effort of having to expensively roll at each iteration """
self.buffer.values[:, :self._window, :] = \ self.buffer.values[:, -self._window:, :] self.date_buf[:self._window] = self.date_buf[-self._window:] self._pos = self._window
<SYSTEM_TASK:> Update internal state based on price triggers and the <END_TASK> <USER_TASK:> Description: def check_triggers(self, price, dt): """ Update internal state based on price triggers and the trade event's price. """
stop_reached, limit_reached, sl_stop_reached = \ self.check_order_triggers(price) if (stop_reached, limit_reached) \ != (self.stop_reached, self.limit_reached): self.dt = dt self.stop_reached = stop_reached self.limit_reached = limit_reached if sl_stop_reached: # Change the STOP LIMIT order into a LIMIT order self.stop = None
<SYSTEM_TASK:> For a market order, True. <END_TASK> <USER_TASK:> Description: def triggered(self): """ For a market order, True. For a stop order, True IFF stop_reached. For a limit order, True IFF limit_reached. """
if self.stop is not None and not self.stop_reached: return False if self.limit is not None and not self.limit_reached: return False return True
<SYSTEM_TASK:> Define a unique string for any set of representable args. <END_TASK> <USER_TASK:> Description: def hash_args(*args, **kwargs): """Define a unique string for any set of representable args."""
arg_string = '_'.join([str(arg) for arg in args]) kwarg_string = '_'.join([str(key) + '=' + str(value) for key, value in iteritems(kwargs)]) combined = ':'.join([arg_string, kwarg_string]) hasher = md5() hasher.update(b(combined)) return hasher.hexdigest()
<SYSTEM_TASK:> Assert that an event meets the protocol for datasource outputs. <END_TASK> <USER_TASK:> Description: def assert_datasource_protocol(event): """Assert that an event meets the protocol for datasource outputs."""
assert event.type in DATASOURCE_TYPE # Done packets have no dt. if not event.type == DATASOURCE_TYPE.DONE: assert isinstance(event.dt, datetime) assert event.dt.tzinfo == pytz.utc
<SYSTEM_TASK:> Assert that an event meets the protocol for datasource TRADE outputs. <END_TASK> <USER_TASK:> Description: def assert_trade_protocol(event): """Assert that an event meets the protocol for datasource TRADE outputs."""
assert_datasource_protocol(event) assert event.type == DATASOURCE_TYPE.TRADE assert isinstance(event.price, numbers.Real) assert isinstance(event.volume, numbers.Integral) assert isinstance(event.dt, datetime)
<SYSTEM_TASK:> Takes an iterable of sources, generating namestrings and <END_TASK> <USER_TASK:> Description: def date_sorted_sources(*sources): """ Takes an iterable of sources, generating namestrings and piping their output into date_sort. """
sorted_stream = heapq.merge(*(_decorate_source(s) for s in sources)) # Strip out key decoration for _, message in sorted_stream: yield message
<SYSTEM_TASK:> creates trade_count trades for each sid in sids list. <END_TASK> <USER_TASK:> Description: def create_daily_trade_source(sids, sim_params, asset_finder, trading_calendar): """ creates trade_count trades for each sid in sids list. first trade will be on sim_params.start_session, and daily thereafter for each sid. Thus, two sids should result in two trades per day. """
return create_trade_source( sids, timedelta(days=1), sim_params, asset_finder, trading_calendar=trading_calendar, )
<SYSTEM_TASK:> Load data table from zip file provided by Quandl. <END_TASK> <USER_TASK:> Description: def load_data_table(file, index_col, show_progress=False): """ Load data table from zip file provided by Quandl. """
with ZipFile(file) as zip_file: file_names = zip_file.namelist() assert len(file_names) == 1, "Expected a single file from Quandl." wiki_prices = file_names.pop() with zip_file.open(wiki_prices) as table_file: if show_progress: log.info('Parsing raw data.') data_table = pd.read_csv( table_file, parse_dates=['date'], index_col=index_col, usecols=[ 'ticker', 'date', 'open', 'high', 'low', 'close', 'volume', 'ex-dividend', 'split_ratio', ], ) data_table.rename( columns={ 'ticker': 'symbol', 'ex-dividend': 'ex_dividend', }, inplace=True, copy=False, ) return data_table
<SYSTEM_TASK:> quandl_bundle builds a daily dataset using Quandl's WIKI Prices dataset. <END_TASK> <USER_TASK:> Description: def quandl_bundle(environ, asset_db_writer, minute_bar_writer, daily_bar_writer, adjustment_writer, calendar, start_session, end_session, cache, show_progress, output_dir): """ quandl_bundle builds a daily dataset using Quandl's WIKI Prices dataset. For more information on Quandl's API and how to obtain an API key, please visit https://docs.quandl.com/docs#section-authentication """
api_key = environ.get('QUANDL_API_KEY') if api_key is None: raise ValueError( "Please set your QUANDL_API_KEY environment variable and retry." ) raw_data = fetch_data_table( api_key, show_progress, environ.get('QUANDL_DOWNLOAD_ATTEMPTS', 5) ) asset_metadata = gen_asset_metadata( raw_data[['symbol', 'date']], show_progress ) asset_db_writer.write(asset_metadata) symbol_map = asset_metadata.symbol sessions = calendar.sessions_in_range(start_session, end_session) raw_data.set_index(['date', 'symbol'], inplace=True) daily_bar_writer.write( parse_pricing_and_vol( raw_data, sessions, symbol_map ), show_progress=show_progress ) raw_data.reset_index(inplace=True) raw_data['symbol'] = raw_data['symbol'].astype('category') raw_data['sid'] = raw_data.symbol.cat.codes adjustment_writer.write( splits=parse_splits( raw_data[[ 'sid', 'date', 'split_ratio', ]].loc[raw_data.split_ratio != 1], show_progress=show_progress ), dividends=parse_dividends( raw_data[[ 'sid', 'date', 'ex_dividend', ]].loc[raw_data.ex_dividend != 0], show_progress=show_progress ) )
<SYSTEM_TASK:> Download streaming data from a URL, printing progress information to the <END_TASK> <USER_TASK:> Description: def download_with_progress(url, chunk_size, **progress_kwargs): """ Download streaming data from a URL, printing progress information to the terminal. Parameters ---------- url : str A URL that can be understood by ``requests.get``. chunk_size : int Number of bytes to read at a time from requests. **progress_kwargs Forwarded to click.progressbar. Returns ------- data : BytesIO A BytesIO containing the downloaded data. """
resp = requests.get(url, stream=True) resp.raise_for_status() total_size = int(resp.headers['content-length']) data = BytesIO() with progressbar(length=total_size, **progress_kwargs) as pbar: for chunk in resp.iter_content(chunk_size=chunk_size): data.write(chunk) pbar.update(len(chunk)) data.seek(0) return data
<SYSTEM_TASK:> Download data from a URL, returning a BytesIO containing the loaded data. <END_TASK> <USER_TASK:> Description: def download_without_progress(url): """ Download data from a URL, returning a BytesIO containing the loaded data. Parameters ---------- url : str A URL that can be understood by ``requests.get``. Returns ------- data : BytesIO A BytesIO containing the downloaded data. """
resp = requests.get(url) resp.raise_for_status() return BytesIO(resp.content)
<SYSTEM_TASK:> Resample a DataFrame with minute data into the frame expected by a <END_TASK> <USER_TASK:> Description: def minute_frame_to_session_frame(minute_frame, calendar): """ Resample a DataFrame with minute data into the frame expected by a BcolzDailyBarWriter. Parameters ---------- minute_frame : pd.DataFrame A DataFrame with the columns `open`, `high`, `low`, `close`, `volume`, and `dt` (minute dts) calendar : trading_calendars.trading_calendar.TradingCalendar A TradingCalendar on which session labels to resample from minute to session. Return ------ session_frame : pd.DataFrame A DataFrame with the columns `open`, `high`, `low`, `close`, `volume`, and `day` (datetime-like). """
how = OrderedDict((c, _MINUTE_TO_SESSION_OHCLV_HOW[c]) for c in minute_frame.columns) labels = calendar.minute_index_to_session_labels(minute_frame.index) return minute_frame.groupby(labels).agg(how)
<SYSTEM_TASK:> Resample an array with minute data into an array with session data. <END_TASK> <USER_TASK:> Description: def minute_to_session(column, close_locs, data, out): """ Resample an array with minute data into an array with session data. This function assumes that the minute data is the exact length of all minutes in the sessions in the output. Parameters ---------- column : str The `open`, `high`, `low`, `close`, or `volume` column. close_locs : array[intp] The locations in `data` which are the market close minutes. data : array[float64|uint32] The minute data to be sampled into session data. The first value should align with the market open of the first session, containing values for all minutes for all sessions. With the last value being the market close of the last session. out : array[float64|uint32] The output array into which to write the sampled sessions. """
if column == 'open': _minute_to_session_open(close_locs, data, out) elif column == 'high': _minute_to_session_high(close_locs, data, out) elif column == 'low': _minute_to_session_low(close_locs, data, out) elif column == 'close': _minute_to_session_close(close_locs, data, out) elif column == 'volume': _minute_to_session_volume(close_locs, data, out) return out
<SYSTEM_TASK:> The open field's aggregation returns the first value that occurs <END_TASK> <USER_TASK:> Description: def opens(self, assets, dt): """ The open field's aggregation returns the first value that occurs for the day, if there has been no data on or before the `dt` the open is `nan`. Once the first non-nan open is seen, that value remains constant per asset for the remainder of the day. Returns ------- np.array with dtype=float64, in order of assets parameter. """
market_open, prev_dt, dt_value, entries = self._prelude(dt, 'open') opens = [] session_label = self._trading_calendar.minute_to_session_label(dt) for asset in assets: if not asset.is_alive_for_session(session_label): opens.append(np.NaN) continue if prev_dt is None: val = self._minute_reader.get_value(asset, dt, 'open') entries[asset] = (dt_value, val) opens.append(val) continue else: try: last_visited_dt, first_open = entries[asset] if last_visited_dt == dt_value: opens.append(first_open) continue elif not pd.isnull(first_open): opens.append(first_open) entries[asset] = (dt_value, first_open) continue else: after_last = pd.Timestamp( last_visited_dt + self._one_min, tz='UTC') window = self._minute_reader.load_raw_arrays( ['open'], after_last, dt, [asset], )[0] nonnan = window[~pd.isnull(window)] if len(nonnan): val = nonnan[0] else: val = np.nan entries[asset] = (dt_value, val) opens.append(val) continue except KeyError: window = self._minute_reader.load_raw_arrays( ['open'], market_open, dt, [asset], )[0] nonnan = window[~pd.isnull(window)] if len(nonnan): val = nonnan[0] else: val = np.nan entries[asset] = (dt_value, val) opens.append(val) continue return np.array(opens)
<SYSTEM_TASK:> The high field's aggregation returns the largest high seen between <END_TASK> <USER_TASK:> Description: def highs(self, assets, dt): """ The high field's aggregation returns the largest high seen between the market open and the current dt. If there has been no data on or before the `dt` the high is `nan`. Returns ------- np.array with dtype=float64, in order of assets parameter. """
market_open, prev_dt, dt_value, entries = self._prelude(dt, 'high') highs = [] session_label = self._trading_calendar.minute_to_session_label(dt) for asset in assets: if not asset.is_alive_for_session(session_label): highs.append(np.NaN) continue if prev_dt is None: val = self._minute_reader.get_value(asset, dt, 'high') entries[asset] = (dt_value, val) highs.append(val) continue else: try: last_visited_dt, last_max = entries[asset] if last_visited_dt == dt_value: highs.append(last_max) continue elif last_visited_dt == prev_dt: curr_val = self._minute_reader.get_value( asset, dt, 'high') if pd.isnull(curr_val): val = last_max elif pd.isnull(last_max): val = curr_val else: val = max(last_max, curr_val) entries[asset] = (dt_value, val) highs.append(val) continue else: after_last = pd.Timestamp( last_visited_dt + self._one_min, tz='UTC') window = self._minute_reader.load_raw_arrays( ['high'], after_last, dt, [asset], )[0].T val = np.nanmax(np.append(window, last_max)) entries[asset] = (dt_value, val) highs.append(val) continue except KeyError: window = self._minute_reader.load_raw_arrays( ['high'], market_open, dt, [asset], )[0].T val = np.nanmax(window) entries[asset] = (dt_value, val) highs.append(val) continue return np.array(highs)
<SYSTEM_TASK:> The low field's aggregation returns the smallest low seen between <END_TASK> <USER_TASK:> Description: def lows(self, assets, dt): """ The low field's aggregation returns the smallest low seen between the market open and the current dt. If there has been no data on or before the `dt` the low is `nan`. Returns ------- np.array with dtype=float64, in order of assets parameter. """
market_open, prev_dt, dt_value, entries = self._prelude(dt, 'low') lows = [] session_label = self._trading_calendar.minute_to_session_label(dt) for asset in assets: if not asset.is_alive_for_session(session_label): lows.append(np.NaN) continue if prev_dt is None: val = self._minute_reader.get_value(asset, dt, 'low') entries[asset] = (dt_value, val) lows.append(val) continue else: try: last_visited_dt, last_min = entries[asset] if last_visited_dt == dt_value: lows.append(last_min) continue elif last_visited_dt == prev_dt: curr_val = self._minute_reader.get_value( asset, dt, 'low') val = np.nanmin([last_min, curr_val]) entries[asset] = (dt_value, val) lows.append(val) continue else: after_last = pd.Timestamp( last_visited_dt + self._one_min, tz='UTC') window = self._minute_reader.load_raw_arrays( ['low'], after_last, dt, [asset], )[0].T val = np.nanmin(np.append(window, last_min)) entries[asset] = (dt_value, val) lows.append(val) continue except KeyError: window = self._minute_reader.load_raw_arrays( ['low'], market_open, dt, [asset], )[0].T val = np.nanmin(window) entries[asset] = (dt_value, val) lows.append(val) continue return np.array(lows)
<SYSTEM_TASK:> The close field's aggregation returns the latest close at the given <END_TASK> <USER_TASK:> Description: def closes(self, assets, dt): """ The close field's aggregation returns the latest close at the given dt. If the close for the given dt is `nan`, the most recent non-nan `close` is used. If there has been no data on or before the `dt` the close is `nan`. Returns ------- np.array with dtype=float64, in order of assets parameter. """
market_open, prev_dt, dt_value, entries = self._prelude(dt, 'close') closes = [] session_label = self._trading_calendar.minute_to_session_label(dt) def _get_filled_close(asset): """ Returns the most recent non-nan close for the asset in this session. If there has been no data in this session on or before the `dt`, returns `nan` """ window = self._minute_reader.load_raw_arrays( ['close'], market_open, dt, [asset], )[0] try: return window[~np.isnan(window)][-1] except IndexError: return np.NaN for asset in assets: if not asset.is_alive_for_session(session_label): closes.append(np.NaN) continue if prev_dt is None: val = self._minute_reader.get_value(asset, dt, 'close') entries[asset] = (dt_value, val) closes.append(val) continue else: try: last_visited_dt, last_close = entries[asset] if last_visited_dt == dt_value: closes.append(last_close) continue elif last_visited_dt == prev_dt: val = self._minute_reader.get_value( asset, dt, 'close') if pd.isnull(val): val = last_close entries[asset] = (dt_value, val) closes.append(val) continue else: val = self._minute_reader.get_value( asset, dt, 'close') if pd.isnull(val): val = _get_filled_close(asset) entries[asset] = (dt_value, val) closes.append(val) continue except KeyError: val = self._minute_reader.get_value( asset, dt, 'close') if pd.isnull(val): val = _get_filled_close(asset) entries[asset] = (dt_value, val) closes.append(val) continue return np.array(closes)
<SYSTEM_TASK:> The volume field's aggregation returns the sum of all volumes <END_TASK> <USER_TASK:> Description: def volumes(self, assets, dt): """ The volume field's aggregation returns the sum of all volumes between the market open and the `dt` If there has been no data on or before the `dt` the volume is 0. Returns ------- np.array with dtype=int64, in order of assets parameter. """
market_open, prev_dt, dt_value, entries = self._prelude(dt, 'volume') volumes = [] session_label = self._trading_calendar.minute_to_session_label(dt) for asset in assets: if not asset.is_alive_for_session(session_label): volumes.append(0) continue if prev_dt is None: val = self._minute_reader.get_value(asset, dt, 'volume') entries[asset] = (dt_value, val) volumes.append(val) continue else: try: last_visited_dt, last_total = entries[asset] if last_visited_dt == dt_value: volumes.append(last_total) continue elif last_visited_dt == prev_dt: val = self._minute_reader.get_value( asset, dt, 'volume') val += last_total entries[asset] = (dt_value, val) volumes.append(val) continue else: after_last = pd.Timestamp( last_visited_dt + self._one_min, tz='UTC') window = self._minute_reader.load_raw_arrays( ['volume'], after_last, dt, [asset], )[0] val = np.nansum(window) + last_total entries[asset] = (dt_value, val) volumes.append(val) continue except KeyError: window = self._minute_reader.load_raw_arrays( ['volume'], market_open, dt, [asset], )[0] val = np.nansum(window) entries[asset] = (dt_value, val) volumes.append(val) continue return np.array(volumes)
<SYSTEM_TASK:> Infer the domain from a collection of terms. <END_TASK> <USER_TASK:> Description: def infer_domain(terms): """ Infer the domain from a collection of terms. The algorithm for inferring domains is as follows: - If all input terms have a domain of GENERIC, the result is GENERIC. - If there is exactly one non-generic domain in the input terms, the result is that domain. - Otherwise, an AmbiguousDomain error is raised. Parameters ---------- terms : iterable[zipline.pipeline.term.Term] Returns ------- inferred : Domain or NotSpecified Raises ------ AmbiguousDomain Raised if more than one concrete domain is present in the input terms. """
domains = {t.domain for t in terms} num_domains = len(domains) if num_domains == 0: return GENERIC elif num_domains == 1: return domains.pop() elif num_domains == 2 and GENERIC in domains: domains.remove(GENERIC) return domains.pop() else: # Remove GENERIC if it's present before raising. Showing it to the user # is confusing because it doesn't contribute to the error. domains.discard(GENERIC) raise AmbiguousDomain(sorted(domains, key=repr))
<SYSTEM_TASK:> Given a date, align it to the calendar of the pipeline's domain. <END_TASK> <USER_TASK:> Description: def roll_forward(self, dt): """ Given a date, align it to the calendar of the pipeline's domain. Parameters ---------- dt : pd.Timestamp Returns ------- pd.Timestamp """
dt = pd.Timestamp(dt, tz='UTC') trading_days = self.all_sessions() try: return trading_days[trading_days.searchsorted(dt)] except IndexError: raise ValueError( "Date {} was past the last session for domain {}. " "The last session for this domain is {}.".format( dt.date(), self, trading_days[-1].date() ) )
<SYSTEM_TASK:> Returns the date index and sid columns shared by a list of dataframes, <END_TASK> <USER_TASK:> Description: def days_and_sids_for_frames(frames): """ Returns the date index and sid columns shared by a list of dataframes, ensuring they all match. Parameters ---------- frames : list[pd.DataFrame] A list of dataframes indexed by day, with a column per sid. Returns ------- days : np.array[datetime64[ns]] The days in these dataframes. sids : np.array[int64] The sids in these dataframes. Raises ------ ValueError If the dataframes passed are not all indexed by the same days and sids. """
if not frames: days = np.array([], dtype='datetime64[ns]') sids = np.array([], dtype='int64') return days, sids # Ensure the indices and columns all match. check_indexes_all_same( [frame.index for frame in frames], message='Frames have mistmatched days.', ) check_indexes_all_same( [frame.columns for frame in frames], message='Frames have mismatched sids.', ) return frames[0].index.values, frames[0].columns.values
<SYSTEM_TASK:> Write the OHLCV data for one country to the HDF5 file. <END_TASK> <USER_TASK:> Description: def write(self, country_code, frames, scaling_factors=None): """Write the OHLCV data for one country to the HDF5 file. Parameters ---------- country_code : str The ISO 3166 alpha-2 country code for this country. frames : dict[str, pd.DataFrame] A dict mapping each OHLCV field to a dataframe with a row for each date and a column for each sid. The dataframes need to have the same index and columns. scaling_factors : dict[str, float], optional A dict mapping each OHLCV field to a scaling factor, which is applied (as a multiplier) to the values of field to efficiently store them as uint32, while maintaining desired precision. These factors are written to the file as metadata, which is consumed by the reader to adjust back to the original float values. Default is None, in which case DEFAULT_SCALING_FACTORS is used. """
if scaling_factors is None: scaling_factors = DEFAULT_SCALING_FACTORS with self.h5_file(mode='a') as h5_file: # ensure that the file version has been written h5_file.attrs['version'] = VERSION country_group = h5_file.create_group(country_code) data_group = country_group.create_group(DATA) index_group = country_group.create_group(INDEX) lifetimes_group = country_group.create_group(LIFETIMES) # Note that this functions validates that all of the frames # share the same days and sids. days, sids = days_and_sids_for_frames(list(frames.values())) # Write sid and date indices. index_group.create_dataset(SID, data=sids) # h5py does not support datetimes, so they need to be stored # as integers. index_group.create_dataset(DAY, data=days.astype(np.int64)) log.debug( 'Wrote {} group to file {}', index_group.name, self._filename, ) # Write start and end dates for each sid. start_date_ixs, end_date_ixs = compute_asset_lifetimes(frames) lifetimes_group.create_dataset(START_DATE, data=start_date_ixs) lifetimes_group.create_dataset(END_DATE, data=end_date_ixs) if len(sids): chunks = (len(sids), min(self._date_chunk_size, len(days))) else: # h5py crashes if we provide chunks for empty data. chunks = None for field in FIELDS: frame = frames[field] # Sort rows by increasing sid, and columns by increasing date. frame.sort_index(inplace=True) frame.sort_index(axis='columns', inplace=True) data = coerce_to_uint32( frame.T.fillna(0).values, scaling_factors[field], ) dataset = data_group.create_dataset( field, compression='lzf', shuffle=True, data=data, chunks=chunks, ) dataset.attrs[SCALING_FACTOR] = scaling_factors[field] log.debug( 'Writing dataset {} to file {}', dataset.name, self._filename )
<SYSTEM_TASK:> Construct from an h5py.File and a country code. <END_TASK> <USER_TASK:> Description: def from_file(cls, h5_file, country_code): """ Construct from an h5py.File and a country code. Parameters ---------- h5_file : h5py.File An HDF5 daily pricing file. country_code : str The ISO 3166 alpha-2 country code for the country to read. """
if h5_file.attrs['version'] != VERSION: raise ValueError( 'mismatched version: file is of version %s, expected %s' % ( h5_file.attrs['version'], VERSION, ), ) return cls(h5_file[country_code])
<SYSTEM_TASK:> Construct from a file path and a country code. <END_TASK> <USER_TASK:> Description: def from_path(cls, path, country_code): """ Construct from a file path and a country code. Parameters ---------- path : str The path to an HDF5 daily pricing file. country_code : str The ISO 3166 alpha-2 country code for the country to read. """
return cls.from_file(h5py.File(path), country_code)
<SYSTEM_TASK:> Build an indexer mapping ``self.sids`` to ``assets``. <END_TASK> <USER_TASK:> Description: def _make_sid_selector(self, assets): """ Build an indexer mapping ``self.sids`` to ``assets``. Parameters ---------- assets : list[int] List of assets requested by a caller of ``load_raw_arrays``. Returns ------- index : np.array[int64] Index array containing the index in ``self.sids`` for each location in ``assets``. Entries in ``assets`` for which we don't have a sid will contain -1. It is caller's responsibility to handle these values correctly. """
assets = np.array(assets) sid_selector = self.sids.searchsorted(assets) unknown = np.in1d(assets, self.sids, invert=True) sid_selector[unknown] = -1 return sid_selector
<SYSTEM_TASK:> Validate that asset identifiers are contained in the daily bars. <END_TASK> <USER_TASK:> Description: def _validate_assets(self, assets): """Validate that asset identifiers are contained in the daily bars. Parameters ---------- assets : array-like[int] The asset identifiers to validate. Raises ------ NoDataForSid If one or more of the provided asset identifiers are not contained in the daily bars. """
missing_sids = np.setdiff1d(assets, self.sids) if len(missing_sids): raise NoDataForSid( 'Assets not contained in daily pricing file: {}'.format( missing_sids ) )
<SYSTEM_TASK:> Update dataframes in place to set indentifier columns as indices. <END_TASK> <USER_TASK:> Description: def _normalize_index_columns_in_place(equities, equity_supplementary_mappings, futures, exchanges, root_symbols): """ Update dataframes in place to set indentifier columns as indices. For each input frame, if the frame has a column with the same name as its associated index column, set that column as the index. Otherwise, assume the index already contains identifiers. If frames are passed as None, they're ignored. """
for frame, column_name in ((equities, 'sid'), (equity_supplementary_mappings, 'sid'), (futures, 'sid'), (exchanges, 'exchange'), (root_symbols, 'root_symbol')): if frame is not None and column_name in frame: frame.set_index(column_name, inplace=True)
<SYSTEM_TASK:> Takes in a symbol that may be delimited and splits it in to a company <END_TASK> <USER_TASK:> Description: def split_delimited_symbol(symbol): """ Takes in a symbol that may be delimited and splits it in to a company symbol and share class symbol. Also returns the fuzzy symbol, which is the symbol without any fuzzy characters at all. Parameters ---------- symbol : str The possibly-delimited symbol to be split Returns ------- company_symbol : str The company part of the symbol. share_class_symbol : str The share class part of a symbol. """
# return blank strings for any bad fuzzy symbols, like NaN or None if symbol in _delimited_symbol_default_triggers: return '', '' symbol = symbol.upper() split_list = re.split( pattern=_delimited_symbol_delimiters_regex, string=symbol, maxsplit=1, ) # Break the list up in to its two components, the company symbol and the # share class symbol company_symbol = split_list[0] if len(split_list) > 1: share_class_symbol = split_list[1] else: share_class_symbol = '' return company_symbol, share_class_symbol
<SYSTEM_TASK:> Generates an output dataframe from the given subset of user-provided <END_TASK> <USER_TASK:> Description: def _generate_output_dataframe(data_subset, defaults): """ Generates an output dataframe from the given subset of user-provided data, the given column names, and the given default values. Parameters ---------- data_subset : DataFrame A DataFrame, usually from an AssetData object, that contains the user's input metadata for the asset type being processed defaults : dict A dict where the keys are the names of the columns of the desired output DataFrame and the values are a function from dataframe and column name to the default values to insert in the DataFrame if no user data is provided Returns ------- DataFrame A DataFrame containing all user-provided metadata, and default values wherever user-provided metadata was missing """
# The columns provided. cols = set(data_subset.columns) desired_cols = set(defaults) # Drop columns with unrecognised headers. data_subset.drop(cols - desired_cols, axis=1, inplace=True) # Get those columns which we need but # for which no data has been supplied. for col in desired_cols - cols: # write the default value for any missing columns data_subset[col] = defaults[col](data_subset, col) return data_subset
<SYSTEM_TASK:> Check that there are no cases where multiple symbols resolve to the same <END_TASK> <USER_TASK:> Description: def _check_symbol_mappings(df, exchanges, asset_exchange): """Check that there are no cases where multiple symbols resolve to the same asset at the same time in the same country. Parameters ---------- df : pd.DataFrame The equity symbol mappings table. exchanges : pd.DataFrame The exchanges table. asset_exchange : pd.Series A series that maps sids to the exchange the asset is in. Raises ------ ValueError Raised when there are ambiguous symbol mappings. """
mappings = df.set_index('sid')[list(mapping_columns)].copy() mappings['country_code'] = exchanges['country_code'][ asset_exchange.loc[df['sid']] ].values ambigious = {} def check_intersections(persymbol): intersections = list(intersecting_ranges(map( from_tuple, zip(persymbol.start_date, persymbol.end_date), ))) if intersections: data = persymbol[ ['start_date', 'end_date'] ].astype('datetime64[ns]') # indent the dataframe string, also compute this early because # ``persymbol`` is a view and ``astype`` doesn't copy the index # correctly in pandas 0.22 msg_component = '\n '.join(str(data).splitlines()) ambigious[persymbol.name] = intersections, msg_component mappings.groupby(['symbol', 'country_code']).apply(check_intersections) if ambigious: raise ValueError( 'Ambiguous ownership for %d symbol%s, multiple assets held the' ' following symbols:\n%s' % ( len(ambigious), '' if len(ambigious) == 1 else 's', '\n'.join( '%s (%s):\n intersections: %s\n %s' % ( symbol, country_code, tuple(map(_format_range, intersections)), cs, ) for (symbol, country_code), (intersections, cs) in sorted( ambigious.items(), key=first, ), ), ) )
<SYSTEM_TASK:> Convert a timeseries into an Int64Index of nanoseconds since the epoch. <END_TASK> <USER_TASK:> Description: def _dt_to_epoch_ns(dt_series): """Convert a timeseries into an Int64Index of nanoseconds since the epoch. Parameters ---------- dt_series : pd.Series The timeseries to convert. Returns ------- idx : pd.Int64Index The index converted to nanoseconds since the epoch. """
index = pd.to_datetime(dt_series.values) if index.tzinfo is None: index = index.tz_localize('UTC') else: index = index.tz_convert('UTC') return index.view(np.int64)
<SYSTEM_TASK:> Checks for a version value in the version table. <END_TASK> <USER_TASK:> Description: def check_version_info(conn, version_table, expected_version): """ Checks for a version value in the version table. Parameters ---------- conn : sa.Connection The connection to use to perform the check. version_table : sa.Table The version table of the asset database expected_version : int The expected version of the asset database Raises ------ AssetDBVersionError If the version is in the table and not equal to ASSET_DB_VERSION. """
# Read the version out of the table version_from_table = conn.execute( sa.select((version_table.c.version,)), ).scalar() # A db without a version is considered v0 if version_from_table is None: version_from_table = 0 # Raise an error if the versions do not match if (version_from_table != expected_version): raise AssetDBVersionError(db_version=version_from_table, expected_version=expected_version)
<SYSTEM_TASK:> Inserts the version value in to the version table. <END_TASK> <USER_TASK:> Description: def write_version_info(conn, version_table, version_value): """ Inserts the version value in to the version table. Parameters ---------- conn : sa.Connection The connection to use to execute the insert. version_table : sa.Table The version table of the asset database version_value : int The version to write in to the database """
conn.execute(sa.insert(version_table, values={'version': version_value}))
<SYSTEM_TASK:> Write asset metadata to a sqlite database in the format that it is <END_TASK> <USER_TASK:> Description: def write_direct(self, equities=None, equity_symbol_mappings=None, equity_supplementary_mappings=None, futures=None, exchanges=None, root_symbols=None, chunk_size=DEFAULT_CHUNK_SIZE): """Write asset metadata to a sqlite database in the format that it is stored in the assets db. Parameters ---------- equities : pd.DataFrame, optional The equity metadata. The columns for this dataframe are: symbol : str The ticker symbol for this equity. asset_name : str The full name for this asset. start_date : datetime The date when this asset was created. end_date : datetime, optional The last date we have trade data for this asset. first_traded : datetime, optional The first date we have trade data for this asset. auto_close_date : datetime, optional The date on which to close any positions in this asset. exchange : str The exchange where this asset is traded. The index of this dataframe should contain the sids. futures : pd.DataFrame, optional The future contract metadata. The columns for this dataframe are: symbol : str The ticker symbol for this futures contract. root_symbol : str The root symbol, or the symbol with the expiration stripped out. asset_name : str The full name for this asset. start_date : datetime, optional The date when this asset was created. end_date : datetime, optional The last date we have trade data for this asset. first_traded : datetime, optional The first date we have trade data for this asset. exchange : str The exchange where this asset is traded. notice_date : datetime The date when the owner of the contract may be forced to take physical delivery of the contract's asset. expiration_date : datetime The date when the contract expires. auto_close_date : datetime The date when the broker will automatically close any positions in this contract. tick_size : float The minimum price movement of the contract. multiplier: float The amount of the underlying asset represented by this contract. exchanges : pd.DataFrame, optional The exchanges where assets can be traded. The columns of this dataframe are: exchange : str The full name of the exchange. canonical_name : str The canonical name of the exchange. country_code : str The ISO 3166 alpha-2 country code of the exchange. root_symbols : pd.DataFrame, optional The root symbols for the futures contracts. The columns for this dataframe are: root_symbol : str The root symbol name. root_symbol_id : int The unique id for this root symbol. sector : string, optional The sector of this root symbol. description : string, optional A short description of this root symbol. exchange : str The exchange where this root symbol is traded. equity_supplementary_mappings : pd.DataFrame, optional Additional mappings from values of abitrary type to assets. chunk_size : int, optional The amount of rows to write to the SQLite table at once. This defaults to the default number of bind params in sqlite. If you have compiled sqlite3 with more bind or less params you may want to pass that value here. """
if equities is not None: equities = _generate_output_dataframe( equities, _direct_equities_defaults, ) if equity_symbol_mappings is None: raise ValueError( 'equities provided with no symbol mapping data', ) equity_symbol_mappings = _generate_output_dataframe( equity_symbol_mappings, _equity_symbol_mappings_defaults, ) _check_symbol_mappings( equity_symbol_mappings, exchanges, equities['exchange'], ) if equity_supplementary_mappings is not None: equity_supplementary_mappings = _generate_output_dataframe( equity_supplementary_mappings, _equity_supplementary_mappings_defaults, ) if futures is not None: futures = _generate_output_dataframe(_futures_defaults, futures) if exchanges is not None: exchanges = _generate_output_dataframe( exchanges.set_index('exchange'), _exchanges_defaults, ) if root_symbols is not None: root_symbols = _generate_output_dataframe( root_symbols, _root_symbols_defaults, ) # Set named identifier columns as indices, if provided. _normalize_index_columns_in_place( equities=equities, equity_supplementary_mappings=equity_supplementary_mappings, futures=futures, exchanges=exchanges, root_symbols=root_symbols, ) self._real_write( equities=equities, equity_symbol_mappings=equity_symbol_mappings, equity_supplementary_mappings=equity_supplementary_mappings, futures=futures, exchanges=exchanges, root_symbols=root_symbols, chunk_size=chunk_size, )
<SYSTEM_TASK:> Write asset metadata to a sqlite database. <END_TASK> <USER_TASK:> Description: def write(self, equities=None, futures=None, exchanges=None, root_symbols=None, equity_supplementary_mappings=None, chunk_size=DEFAULT_CHUNK_SIZE): """Write asset metadata to a sqlite database. Parameters ---------- equities : pd.DataFrame, optional The equity metadata. The columns for this dataframe are: symbol : str The ticker symbol for this equity. asset_name : str The full name for this asset. start_date : datetime The date when this asset was created. end_date : datetime, optional The last date we have trade data for this asset. first_traded : datetime, optional The first date we have trade data for this asset. auto_close_date : datetime, optional The date on which to close any positions in this asset. exchange : str The exchange where this asset is traded. The index of this dataframe should contain the sids. futures : pd.DataFrame, optional The future contract metadata. The columns for this dataframe are: symbol : str The ticker symbol for this futures contract. root_symbol : str The root symbol, or the symbol with the expiration stripped out. asset_name : str The full name for this asset. start_date : datetime, optional The date when this asset was created. end_date : datetime, optional The last date we have trade data for this asset. first_traded : datetime, optional The first date we have trade data for this asset. exchange : str The exchange where this asset is traded. notice_date : datetime The date when the owner of the contract may be forced to take physical delivery of the contract's asset. expiration_date : datetime The date when the contract expires. auto_close_date : datetime The date when the broker will automatically close any positions in this contract. tick_size : float The minimum price movement of the contract. multiplier: float The amount of the underlying asset represented by this contract. exchanges : pd.DataFrame, optional The exchanges where assets can be traded. The columns of this dataframe are: exchange : str The full name of the exchange. canonical_name : str The canonical name of the exchange. country_code : str The ISO 3166 alpha-2 country code of the exchange. root_symbols : pd.DataFrame, optional The root symbols for the futures contracts. The columns for this dataframe are: root_symbol : str The root symbol name. root_symbol_id : int The unique id for this root symbol. sector : string, optional The sector of this root symbol. description : string, optional A short description of this root symbol. exchange : str The exchange where this root symbol is traded. equity_supplementary_mappings : pd.DataFrame, optional Additional mappings from values of abitrary type to assets. chunk_size : int, optional The amount of rows to write to the SQLite table at once. This defaults to the default number of bind params in sqlite. If you have compiled sqlite3 with more bind or less params you may want to pass that value here. See Also -------- zipline.assets.asset_finder """
if exchanges is None: exchange_names = [ df['exchange'] for df in (equities, futures, root_symbols) if df is not None ] if exchange_names: exchanges = pd.DataFrame({ 'exchange': pd.concat(exchange_names).unique(), }) data = self._load_data( equities if equities is not None else pd.DataFrame(), futures if futures is not None else pd.DataFrame(), exchanges if exchanges is not None else pd.DataFrame(), root_symbols if root_symbols is not None else pd.DataFrame(), ( equity_supplementary_mappings if equity_supplementary_mappings is not None else pd.DataFrame() ), ) self._real_write( equities=data.equities, equity_symbol_mappings=data.equities_mappings, equity_supplementary_mappings=data.equity_supplementary_mappings, futures=data.futures, root_symbols=data.root_symbols, exchanges=data.exchanges, chunk_size=chunk_size, )
<SYSTEM_TASK:> Checks if any tables are present in the current assets database. <END_TASK> <USER_TASK:> Description: def _all_tables_present(self, txn): """ Checks if any tables are present in the current assets database. Parameters ---------- txn : Transaction The open transaction to check in. Returns ------- has_tables : bool True if any tables are present, otherwise False. """
conn = txn.connect() for table_name in asset_db_table_names: if txn.dialect.has_table(conn, table_name): return True return False
<SYSTEM_TASK:> Connect to database and create tables. <END_TASK> <USER_TASK:> Description: def init_db(self, txn=None): """Connect to database and create tables. Parameters ---------- txn : sa.engine.Connection, optional The transaction to execute in. If this is not provided, a new transaction will be started with the engine provided. Returns ------- metadata : sa.MetaData The metadata that describes the new assets db. """
with ExitStack() as stack: if txn is None: txn = stack.enter_context(self.engine.begin()) tables_already_exist = self._all_tables_present(txn) # Create the SQL tables if they do not already exist. metadata.create_all(txn, checkfirst=True) if tables_already_exist: check_version_info(txn, version_info, ASSET_DB_VERSION) else: write_version_info(txn, version_info, ASSET_DB_VERSION)
<SYSTEM_TASK:> Given an expression representing data to load, perform normalization and <END_TASK> <USER_TASK:> Description: def load_raw_data(assets, data_query_cutoff_times, expr, odo_kwargs, checkpoints=None): """ Given an expression representing data to load, perform normalization and forward-filling and return the data, materialized. Only accepts data with a `sid` field. Parameters ---------- assets : pd.int64index the assets to load data for. data_query_cutoff_times : pd.DatetimeIndex The datetime when data should no longer be considered available for a session. expr : expr the expression representing the data to load. odo_kwargs : dict extra keyword arguments to pass to odo when executing the expression. checkpoints : expr, optional the expression representing the checkpointed data for `expr`. Returns ------- raw : pd.dataframe The result of computing expr and materializing the result as a dataframe. """
lower_dt, upper_dt = data_query_cutoff_times[[0, -1]] raw = ffill_query_in_range( expr, lower_dt, upper_dt, checkpoints=checkpoints, odo_kwargs=odo_kwargs, ) sids = raw[SID_FIELD_NAME] raw.drop( sids[~sids.isin(assets)].index, inplace=True ) return raw
<SYSTEM_TASK:> Convert a tuple into a range with error handling. <END_TASK> <USER_TASK:> Description: def from_tuple(tup): """Convert a tuple into a range with error handling. Parameters ---------- tup : tuple (len 2 or 3) The tuple to turn into a range. Returns ------- range : range The range from the tuple. Raises ------ ValueError Raised when the tuple length is not 2 or 3. """
if len(tup) not in (2, 3): raise ValueError( 'tuple must contain 2 or 3 elements, not: %d (%r' % ( len(tup), tup, ), ) return range(*tup)
<SYSTEM_TASK:> Convert a tuple into a range but pass ranges through silently. <END_TASK> <USER_TASK:> Description: def maybe_from_tuple(tup_or_range): """Convert a tuple into a range but pass ranges through silently. This is useful to ensure that input is a range so that attributes may be accessed with `.start`, `.stop` or so that containment checks are constant time. Parameters ---------- tup_or_range : tuple or range A tuple to pass to from_tuple or a range to return. Returns ------- range : range The input to convert to a range. Raises ------ ValueError Raised when the input is not a tuple or a range. ValueError is also raised if the input is a tuple whose length is not 2 or 3. """
if isinstance(tup_or_range, tuple): return from_tuple(tup_or_range) elif isinstance(tup_or_range, range): return tup_or_range raise ValueError( 'maybe_from_tuple expects a tuple or range, got %r: %r' % ( type(tup_or_range).__name__, tup_or_range, ), )
<SYSTEM_TASK:> Check that the steps of ``a`` and ``b`` are both 1. <END_TASK> <USER_TASK:> Description: def _check_steps(a, b): """Check that the steps of ``a`` and ``b`` are both 1. Parameters ---------- a : range The first range to check. b : range The second range to check. Raises ------ ValueError Raised when either step is not 1. """
if a.step != 1: raise ValueError('a.step must be equal to 1, got: %s' % a.step) if b.step != 1: raise ValueError('b.step must be equal to 1, got: %s' % b.step)
<SYSTEM_TASK:> Check if two ranges overlap. <END_TASK> <USER_TASK:> Description: def overlap(a, b): """Check if two ranges overlap. Parameters ---------- a : range The first range. b : range The second range. Returns ------- overlaps : bool Do these ranges overlap. Notes ----- This function does not support ranges with step != 1. """
_check_steps(a, b) return a.stop >= b.start and b.stop >= a.start