text_prompt
stringlengths
100
17.7k
code_prompt
stringlengths
7
9.86k
<SYSTEM_TASK:> Merge two ranges with step == 1. <END_TASK> <USER_TASK:> Description: def merge(a, b): """Merge two ranges with step == 1. Parameters ---------- a : range The first range. b : range The second range. """
_check_steps(a, b) return range(min(a.start, b.start), max(a.stop, b.stop))
<SYSTEM_TASK:> Return any ranges that intersect. <END_TASK> <USER_TASK:> Description: def intersecting_ranges(ranges): """Return any ranges that intersect. Parameters ---------- ranges : iterable[ranges] A sequence of ranges to check for intersections. Returns ------- intersections : iterable[ranges] A sequence of all of the ranges that intersected in ``ranges``. Examples -------- >>> ranges = [range(0, 1), range(2, 5), range(4, 7)] >>> list(intersecting_ranges(ranges)) [range(2, 5), range(4, 7)] >>> ranges = [range(0, 1), range(2, 3)] >>> list(intersecting_ranges(ranges)) [] >>> ranges = [range(0, 1), range(1, 2)] >>> list(intersecting_ranges(ranges)) [range(0, 1), range(1, 2)] """
ranges = sorted(ranges, key=op.attrgetter('start')) return sorted_diff(ranges, group_ranges(ranges))
<SYSTEM_TASK:> Returns a handle to data file. <END_TASK> <USER_TASK:> Description: def get_data_filepath(name, environ=None): """ Returns a handle to data file. Creates containing directory, if needed. """
dr = data_root(environ) if not os.path.exists(dr): os.makedirs(dr) return os.path.join(dr, name)
<SYSTEM_TASK:> Does `series_or_df` have data on or before first_date and on or after <END_TASK> <USER_TASK:> Description: def has_data_for_dates(series_or_df, first_date, last_date): """ Does `series_or_df` have data on or before first_date and on or after last_date? """
dts = series_or_df.index if not isinstance(dts, pd.DatetimeIndex): raise TypeError("Expected a DatetimeIndex, but got %s." % type(dts)) first, last = dts[[0, -1]] return (first <= first_date) and (last >= last_date)
<SYSTEM_TASK:> Load benchmark returns and treasury yield curves for the given calendar and <END_TASK> <USER_TASK:> Description: def load_market_data(trading_day=None, trading_days=None, bm_symbol='SPY', environ=None): """ Load benchmark returns and treasury yield curves for the given calendar and benchmark symbol. Benchmarks are downloaded as a Series from IEX Trading. Treasury curves are US Treasury Bond rates and are downloaded from 'www.federalreserve.gov' by default. For Canadian exchanges, a loader for Canadian bonds from the Bank of Canada is also available. Results downloaded from the internet are cached in ~/.zipline/data. Subsequent loads will attempt to read from the cached files before falling back to redownload. Parameters ---------- trading_day : pandas.CustomBusinessDay, optional A trading_day used to determine the latest day for which we expect to have data. Defaults to an NYSE trading day. trading_days : pd.DatetimeIndex, optional A calendar of trading days. Also used for determining what cached dates we should expect to have cached. Defaults to the NYSE calendar. bm_symbol : str, optional Symbol for the benchmark index to load. Defaults to 'SPY', the ticker for the S&P 500, provided by IEX Trading. Returns ------- (benchmark_returns, treasury_curves) : (pd.Series, pd.DataFrame) Notes ----- Both return values are DatetimeIndexed with values dated to midnight in UTC of each stored date. The columns of `treasury_curves` are: '1month', '3month', '6month', '1year','2year','3year','5year','7year','10year','20year','30year' """
if trading_day is None: trading_day = get_calendar('XNYS').day if trading_days is None: trading_days = get_calendar('XNYS').all_sessions first_date = trading_days[0] now = pd.Timestamp.utcnow() # we will fill missing benchmark data through latest trading date last_date = trading_days[trading_days.get_loc(now, method='ffill')] br = ensure_benchmark_data( bm_symbol, first_date, last_date, now, # We need the trading_day to figure out the close prior to the first # date so that we can compute returns for the first date. trading_day, environ, ) tc = ensure_treasury_data( bm_symbol, first_date, last_date, now, environ, ) # combine dt indices and reindex using ffill then bfill all_dt = br.index.union(tc.index) br = br.reindex(all_dt, method='ffill').fillna(method='bfill') tc = tc.reindex(all_dt, method='ffill').fillna(method='bfill') benchmark_returns = br[br.index.slice_indexer(first_date, last_date)] treasury_curves = tc[tc.index.slice_indexer(first_date, last_date)] return benchmark_returns, treasury_curves
<SYSTEM_TASK:> Ensure we have benchmark data for `symbol` from `first_date` to `last_date` <END_TASK> <USER_TASK:> Description: def ensure_benchmark_data(symbol, first_date, last_date, now, trading_day, environ=None): """ Ensure we have benchmark data for `symbol` from `first_date` to `last_date` Parameters ---------- symbol : str The symbol for the benchmark to load. first_date : pd.Timestamp First required date for the cache. last_date : pd.Timestamp Last required date for the cache. now : pd.Timestamp The current time. This is used to prevent repeated attempts to re-download data that isn't available due to scheduling quirks or other failures. trading_day : pd.CustomBusinessDay A trading day delta. Used to find the day before first_date so we can get the close of the day prior to first_date. We attempt to download data unless we already have data stored at the data cache for `symbol` whose first entry is before or on `first_date` and whose last entry is on or after `last_date`. If we perform a download and the cache criteria are not satisfied, we wait at least one hour before attempting a redownload. This is determined by comparing the current time to the result of os.path.getmtime on the cache path. """
filename = get_benchmark_filename(symbol) data = _load_cached_data(filename, first_date, last_date, now, 'benchmark', environ) if data is not None: return data # If no cached data was found or it was missing any dates then download the # necessary data. logger.info( ('Downloading benchmark data for {symbol!r} ' 'from {first_date} to {last_date}'), symbol=symbol, first_date=first_date - trading_day, last_date=last_date ) try: data = get_benchmark_returns(symbol) data.to_csv(get_data_filepath(filename, environ)) except (OSError, IOError, HTTPError): logger.exception('Failed to cache the new benchmark returns') raise if not has_data_for_dates(data, first_date, last_date): logger.warn( ("Still don't have expected benchmark data for {symbol!r} " "from {first_date} to {last_date} after redownload!"), symbol=symbol, first_date=first_date - trading_day, last_date=last_date ) return data
<SYSTEM_TASK:> Ensure we have treasury data from treasury module associated with <END_TASK> <USER_TASK:> Description: def ensure_treasury_data(symbol, first_date, last_date, now, environ=None): """ Ensure we have treasury data from treasury module associated with `symbol`. Parameters ---------- symbol : str Benchmark symbol for which we're loading associated treasury curves. first_date : pd.Timestamp First date required to be in the cache. last_date : pd.Timestamp Last date required to be in the cache. now : pd.Timestamp The current time. This is used to prevent repeated attempts to re-download data that isn't available due to scheduling quirks or other failures. We attempt to download data unless we already have data stored in the cache for `module_name` whose first entry is before or on `first_date` and whose last entry is on or after `last_date`. If we perform a download and the cache criteria are not satisfied, we wait at least one hour before attempting a redownload. This is determined by comparing the current time to the result of os.path.getmtime on the cache path. """
loader_module, filename, source = INDEX_MAPPING.get( symbol, INDEX_MAPPING['SPY'], ) first_date = max(first_date, loader_module.earliest_possible_date()) data = _load_cached_data(filename, first_date, last_date, now, 'treasury', environ) if data is not None: return data # If no cached data was found or it was missing any dates then download the # necessary data. logger.info( ('Downloading treasury data for {symbol!r} ' 'from {first_date} to {last_date}'), symbol=symbol, first_date=first_date, last_date=last_date ) try: data = loader_module.get_treasury_data(first_date, last_date) data.to_csv(get_data_filepath(filename, environ)) except (OSError, IOError, HTTPError): logger.exception('failed to cache treasury data') if not has_data_for_dates(data, first_date, last_date): logger.warn( ("Still don't have expected treasury data for {symbol!r} " "from {first_date} to {last_date} after redownload!"), symbol=symbol, first_date=first_date, last_date=last_date ) return data
<SYSTEM_TASK:> Specialize a term if it's loadable. <END_TASK> <USER_TASK:> Description: def maybe_specialize(term, domain): """Specialize a term if it's loadable. """
if isinstance(term, LoadableTerm): return term.specialize(domain) return term
<SYSTEM_TASK:> Add a term and all its children to ``graph``. <END_TASK> <USER_TASK:> Description: def _add_to_graph(self, term, parents): """ Add a term and all its children to ``graph``. ``parents`` is the set of all the parents of ``term` that we've added so far. It is only used to detect dependency cycles. """
if self._frozen: raise ValueError( "Can't mutate %s after construction." % type(self).__name__ ) # If we've seen this node already as a parent of the current traversal, # it means we have an unsatisifiable dependency. This should only be # possible if the term's inputs are mutated after construction. if term in parents: raise CyclicDependency(term) parents.add(term) self.graph.add_node(term) for dependency in term.dependencies: self._add_to_graph(dependency, parents) self.graph.add_edge(dependency, term) parents.remove(term)
<SYSTEM_TASK:> Return a topologically-sorted iterator over the terms in ``self`` which <END_TASK> <USER_TASK:> Description: def execution_order(self, refcounts): """ Return a topologically-sorted iterator over the terms in ``self`` which need to be computed. """
return iter(nx.topological_sort( self.graph.subgraph( {term for term, refcount in refcounts.items() if refcount > 0}, ), ))
<SYSTEM_TASK:> Calculate initial refcounts for execution of this graph. <END_TASK> <USER_TASK:> Description: def initial_refcounts(self, initial_terms): """ Calculate initial refcounts for execution of this graph. Parameters ---------- initial_terms : iterable[Term] An iterable of terms that were pre-computed before graph execution. Each node starts with a refcount equal to its outdegree, and output nodes get one extra reference to ensure that they're still in the graph at the end of execution. """
refcounts = self.graph.out_degree() for t in self.outputs.values(): refcounts[t] += 1 for t in initial_terms: self._decref_dependencies_recursive(t, refcounts, set()) return refcounts
<SYSTEM_TASK:> Decrement terms recursively. <END_TASK> <USER_TASK:> Description: def _decref_dependencies_recursive(self, term, refcounts, garbage): """ Decrement terms recursively. Notes ----- This should only be used to build the initial workspace, after that we should use: :meth:`~zipline.pipeline.graph.TermGraph.decref_dependencies` """
# Edges are tuple of (from, to). for parent, _ in self.graph.in_edges([term]): refcounts[parent] -= 1 # No one else depends on this term. Remove it from the # workspace to conserve memory. if refcounts[parent] == 0: garbage.add(parent) self._decref_dependencies_recursive(parent, refcounts, garbage)
<SYSTEM_TASK:> Decrement in-edges for ``term`` after computation. <END_TASK> <USER_TASK:> Description: def decref_dependencies(self, term, refcounts): """ Decrement in-edges for ``term`` after computation. Parameters ---------- term : zipline.pipeline.Term The term whose parents should be decref'ed. refcounts : dict[Term -> int] Dictionary of refcounts. Return ------ garbage : set[Term] Terms whose refcounts hit zero after decrefing. """
garbage = set() # Edges are tuple of (from, to). for parent, _ in self.graph.in_edges([term]): refcounts[parent] -= 1 # No one else depends on this term. Remove it from the # workspace to conserve memory. if refcounts[parent] == 0: garbage.add(parent) return garbage
<SYSTEM_TASK:> Ensure that we're going to compute at least N extra rows of `term`. <END_TASK> <USER_TASK:> Description: def _ensure_extra_rows(self, term, N): """ Ensure that we're going to compute at least N extra rows of `term`. """
attrs = self.graph.node[term] attrs['extra_rows'] = max(N, attrs.get('extra_rows', 0))
<SYSTEM_TASK:> Load mask and mask row labels for term. <END_TASK> <USER_TASK:> Description: def mask_and_dates_for_term(self, term, root_mask_term, workspace, all_dates): """ Load mask and mask row labels for term. Parameters ---------- term : Term The term to load the mask and labels for. root_mask_term : Term The term that represents the root asset exists mask. workspace : dict[Term, any] The values that have been computed for each term. all_dates : pd.DatetimeIndex All of the dates that are being computed for in the pipeline. Returns ------- mask : np.ndarray The correct mask for this term. dates : np.ndarray The slice of dates for this term. """
mask = term.mask mask_offset = self.extra_rows[mask] - self.extra_rows[term] # This offset is computed against root_mask_term because that is what # determines the shape of the top-level dates array. dates_offset = ( self.extra_rows[root_mask_term] - self.extra_rows[term] ) return workspace[mask][mask_offset:], all_dates[dates_offset:]
<SYSTEM_TASK:> Make sure that we've specialized all loadable terms in the graph. <END_TASK> <USER_TASK:> Description: def _assert_all_loadable_terms_specialized_to(self, domain): """Make sure that we've specialized all loadable terms in the graph. """
for term in self.graph.node: if isinstance(term, LoadableTerm): assert term.domain is domain
<SYSTEM_TASK:> Read a requirements.txt file, expressed as a path relative to Zipline root. <END_TASK> <USER_TASK:> Description: def read_requirements(path, strict_bounds, conda_format=False, filter_names=None): """ Read a requirements.txt file, expressed as a path relative to Zipline root. Returns requirements with the pinned versions as lower bounds if `strict_bounds` is falsey. """
real_path = join(dirname(abspath(__file__)), path) with open(real_path) as f: reqs = _filter_requirements(f.readlines(), filter_names=filter_names, filter_sys_version=not conda_format) if not strict_bounds: reqs = map(_with_bounds, reqs) if conda_format: reqs = map(_conda_format, reqs) return list(reqs)
<SYSTEM_TASK:> Normalize a time. If the time is tz-naive, assume it is UTC. <END_TASK> <USER_TASK:> Description: def ensure_utc(time, tz='UTC'): """ Normalize a time. If the time is tz-naive, assume it is UTC. """
if not time.tzinfo: time = time.replace(tzinfo=pytz.timezone(tz)) return time.replace(tzinfo=pytz.utc)
<SYSTEM_TASK:> Builds the offset argument for event rules. <END_TASK> <USER_TASK:> Description: def _build_offset(offset, kwargs, default): """ Builds the offset argument for event rules. """
if offset is None: if not kwargs: return default # use the default. else: return _td_check(datetime.timedelta(**kwargs)) elif kwargs: raise ValueError('Cannot pass kwargs and an offset') elif isinstance(offset, datetime.timedelta): return _td_check(offset) else: raise TypeError("Must pass 'hours' and/or 'minutes' as keywords")
<SYSTEM_TASK:> Builds the date argument for event rules. <END_TASK> <USER_TASK:> Description: def _build_date(date, kwargs): """ Builds the date argument for event rules. """
if date is None: if not kwargs: raise ValueError('Must pass a date or kwargs') else: return datetime.date(**kwargs) elif kwargs: raise ValueError('Cannot pass kwargs and a date') else: return date
<SYSTEM_TASK:> Builds the time argument for event rules. <END_TASK> <USER_TASK:> Description: def _build_time(time, kwargs): """ Builds the time argument for event rules. """
tz = kwargs.pop('tz', 'UTC') if time: if kwargs: raise ValueError('Cannot pass kwargs and a time') else: return ensure_utc(time, tz) elif not kwargs: raise ValueError('Must pass a time or kwargs') else: return datetime.time(**kwargs)
<SYSTEM_TASK:> A preprocessor that coerces integral floats to ints. <END_TASK> <USER_TASK:> Description: def lossless_float_to_int(funcname, func, argname, arg): """ A preprocessor that coerces integral floats to ints. Receipt of non-integral floats raises a TypeError. """
if not isinstance(arg, float): return arg arg_as_int = int(arg) if arg == arg_as_int: warnings.warn( "{f} expected an int for argument {name!r}, but got float {arg}." " Coercing to int.".format( f=funcname, name=argname, arg=arg, ), ) return arg_as_int raise TypeError(arg)
<SYSTEM_TASK:> Adds an event to the manager. <END_TASK> <USER_TASK:> Description: def add_event(self, event, prepend=False): """ Adds an event to the manager. """
if prepend: self._events.insert(0, event) else: self._events.append(event)
<SYSTEM_TASK:> Calls the callable only when the rule is triggered. <END_TASK> <USER_TASK:> Description: def handle_data(self, context, data, dt): """ Calls the callable only when the rule is triggered. """
if self.rule.should_trigger(dt): self.callback(context, data)
<SYSTEM_TASK:> Composes the two rules with a lazy composer. <END_TASK> <USER_TASK:> Description: def should_trigger(self, dt): """ Composes the two rules with a lazy composer. """
return self.composer( self.first.should_trigger, self.second.should_trigger, dt )
<SYSTEM_TASK:> Drops any record where a value would not fit into a uint32. <END_TASK> <USER_TASK:> Description: def winsorise_uint32(df, invalid_data_behavior, column, *columns): """Drops any record where a value would not fit into a uint32. Parameters ---------- df : pd.DataFrame The dataframe to winsorise. invalid_data_behavior : {'warn', 'raise', 'ignore'} What to do when data is outside the bounds of a uint32. *columns : iterable[str] The names of the columns to check. Returns ------- truncated : pd.DataFrame ``df`` with values that do not fit into a uint32 zeroed out. """
columns = list((column,) + columns) mask = df[columns] > UINT32_MAX if invalid_data_behavior != 'ignore': mask |= df[columns].isnull() else: # we are not going to generate a warning or error for this so just use # nan_to_num df[columns] = np.nan_to_num(df[columns]) mv = mask.values if mv.any(): if invalid_data_behavior == 'raise': raise ValueError( '%d values out of bounds for uint32: %r' % ( mv.sum(), df[mask.any(axis=1)], ), ) if invalid_data_behavior == 'warn': warnings.warn( 'Ignoring %d values because they are out of bounds for' ' uint32: %r' % ( mv.sum(), df[mask.any(axis=1)], ), stacklevel=3, # one extra frame for `expect_element` ) df[mask] = 0 return df
<SYSTEM_TASK:> Read CSVs as DataFrames from our asset map. <END_TASK> <USER_TASK:> Description: def write_csvs(self, asset_map, show_progress=False, invalid_data_behavior='warn'): """Read CSVs as DataFrames from our asset map. Parameters ---------- asset_map : dict[int -> str] A mapping from asset id to file path with the CSV data for that asset show_progress : bool Whether or not to show a progress bar while writing. invalid_data_behavior : {'warn', 'raise', 'ignore'} What to do when data is encountered that is outside the range of a uint32. """
read = partial( read_csv, parse_dates=['day'], index_col='day', dtype=self._csv_dtypes, ) return self.write( ((asset, read(path)) for asset, path in iteritems(asset_map)), assets=viewkeys(asset_map), show_progress=show_progress, invalid_data_behavior=invalid_data_behavior, )
<SYSTEM_TASK:> Compute the raw row indices to load for each asset on a query for the <END_TASK> <USER_TASK:> Description: def _compute_slices(self, start_idx, end_idx, assets): """ Compute the raw row indices to load for each asset on a query for the given dates after applying a shift. Parameters ---------- start_idx : int Index of first date for which we want data. end_idx : int Index of last date for which we want data. assets : pandas.Int64Index Assets for which we want to compute row indices Returns ------- A 3-tuple of (first_rows, last_rows, offsets): first_rows : np.array[intp] Array with length == len(assets) containing the index of the first row to load for each asset in `assets`. last_rows : np.array[intp] Array with length == len(assets) containing the index of the last row to load for each asset in `assets`. offset : np.array[intp] Array with length == (len(asset) containing the index in a buffer of length `dates` corresponding to the first row of each asset. The value of offset[i] will be 0 if asset[i] existed at the start of a query. Otherwise, offset[i] will be equal to the number of entries in `dates` for which the asset did not yet exist. """
# The core implementation of the logic here is implemented in Cython # for efficiency. return _compute_row_slices( self._first_rows, self._last_rows, self._calendar_offsets, start_idx, end_idx, assets, )
<SYSTEM_TASK:> Get the colname from daily_bar_table and read all of it into memory, <END_TASK> <USER_TASK:> Description: def _spot_col(self, colname): """ Get the colname from daily_bar_table and read all of it into memory, caching the result. Parameters ---------- colname : string A name of a OHLCV carray in the daily_bar_table Returns ------- array (uint32) Full read array of the carray in the daily_bar_table with the given colname. """
try: col = self._spot_cols[colname] except KeyError: col = self._spot_cols[colname] = self._table[colname] return col
<SYSTEM_TASK:> Construct and store a PipelineEngine from loader. <END_TASK> <USER_TASK:> Description: def init_engine(self, get_loader): """ Construct and store a PipelineEngine from loader. If get_loader is None, constructs an ExplodingPipelineEngine """
if get_loader is not None: self.engine = SimplePipelineEngine( get_loader, self.asset_finder, self.default_pipeline_domain(self.trading_calendar), ) else: self.engine = ExplodingPipelineEngine()
<SYSTEM_TASK:> Call self._initialize with `self` made available to Zipline API <END_TASK> <USER_TASK:> Description: def initialize(self, *args, **kwargs): """ Call self._initialize with `self` made available to Zipline API functions. """
with ZiplineAPI(self): self._initialize(self, *args, **kwargs)
<SYSTEM_TASK:> If the clock property is not set, then create one based on frequency. <END_TASK> <USER_TASK:> Description: def _create_clock(self): """ If the clock property is not set, then create one based on frequency. """
trading_o_and_c = self.trading_calendar.schedule.ix[ self.sim_params.sessions] market_closes = trading_o_and_c['market_close'] minutely_emission = False if self.sim_params.data_frequency == 'minute': market_opens = trading_o_and_c['market_open'] minutely_emission = self.sim_params.emission_rate == "minute" # The calendar's execution times are the minutes over which we # actually want to run the clock. Typically the execution times # simply adhere to the market open and close times. In the case of # the futures calendar, for example, we only want to simulate over # a subset of the full 24 hour calendar, so the execution times # dictate a market open time of 6:31am US/Eastern and a close of # 5:00pm US/Eastern. execution_opens = \ self.trading_calendar.execution_time_from_open(market_opens) execution_closes = \ self.trading_calendar.execution_time_from_close(market_closes) else: # in daily mode, we want to have one bar per session, timestamped # as the last minute of the session. execution_closes = \ self.trading_calendar.execution_time_from_close(market_closes) execution_opens = execution_closes # FIXME generalize these values before_trading_start_minutes = days_at_time( self.sim_params.sessions, time(8, 45), "US/Eastern" ) return MinuteSimulationClock( self.sim_params.sessions, execution_opens, execution_closes, before_trading_start_minutes, minute_emission=minutely_emission, )
<SYSTEM_TASK:> Compute any pipelines attached with eager=True. <END_TASK> <USER_TASK:> Description: def compute_eager_pipelines(self): """ Compute any pipelines attached with eager=True. """
for name, pipe in self._pipelines.items(): if pipe.eager: self.pipeline_output(name)
<SYSTEM_TASK:> If there is a capital change for a given dt, this means the the change <END_TASK> <USER_TASK:> Description: def calculate_capital_changes(self, dt, emission_rate, is_interday, portfolio_value_adjustment=0.0): """ If there is a capital change for a given dt, this means the the change occurs before `handle_data` on the given dt. In the case of the change being a target value, the change will be computed on the portfolio value according to prices at the given dt `portfolio_value_adjustment`, if specified, will be removed from the portfolio_value of the cumulative performance when calculating deltas from target capital changes. """
try: capital_change = self.capital_changes[dt] except KeyError: return self._sync_last_sale_prices() if capital_change['type'] == 'target': target = capital_change['value'] capital_change_amount = ( target - ( self.portfolio.portfolio_value - portfolio_value_adjustment ) ) log.info('Processing capital change to target %s at %s. Capital ' 'change delta is %s' % (target, dt, capital_change_amount)) elif capital_change['type'] == 'delta': target = None capital_change_amount = capital_change['value'] log.info('Processing capital change of delta %s at %s' % (capital_change_amount, dt)) else: log.error("Capital change %s does not indicate a valid type " "('target' or 'delta')" % capital_change) return self.capital_change_deltas.update({dt: capital_change_amount}) self.metrics_tracker.capital_change(capital_change_amount) yield { 'capital_change': {'date': dt, 'type': 'cash', 'target': target, 'delta': capital_change_amount} }
<SYSTEM_TASK:> Query the execution environment. <END_TASK> <USER_TASK:> Description: def get_environment(self, field='platform'): """Query the execution environment. Parameters ---------- field : {'platform', 'arena', 'data_frequency', 'start', 'end', 'capital_base', 'platform', '*'} The field to query. The options have the following meanings: arena : str The arena from the simulation parameters. This will normally be ``'backtest'`` but some systems may use this distinguish live trading from backtesting. data_frequency : {'daily', 'minute'} data_frequency tells the algorithm if it is running with daily data or minute data. start : datetime The start date for the simulation. end : datetime The end date for the simulation. capital_base : float The starting capital for the simulation. platform : str The platform that the code is running on. By default this will be the string 'zipline'. This can allow algorithms to know if they are running on the Quantopian platform instead. * : dict[str -> any] Returns all of the fields in a dictionary. Returns ------- val : any The value for the field queried. See above for more information. Raises ------ ValueError Raised when ``field`` is not a valid option. """
env = { 'arena': self.sim_params.arena, 'data_frequency': self.sim_params.data_frequency, 'start': self.sim_params.first_open, 'end': self.sim_params.last_close, 'capital_base': self.sim_params.capital_base, 'platform': self._platform } if field == '*': return env else: try: return env[field] except KeyError: raise ValueError( '%r is not a valid field for get_environment' % field, )
<SYSTEM_TASK:> Fetch a csv from a remote url and register the data so that it is <END_TASK> <USER_TASK:> Description: def fetch_csv(self, url, pre_func=None, post_func=None, date_column='date', date_format=None, timezone=pytz.utc.zone, symbol=None, mask=True, symbol_column=None, special_params_checker=None, country_code=None, **kwargs): """Fetch a csv from a remote url and register the data so that it is queryable from the ``data`` object. Parameters ---------- url : str The url of the csv file to load. pre_func : callable[pd.DataFrame -> pd.DataFrame], optional A callback to allow preprocessing the raw data returned from fetch_csv before dates are paresed or symbols are mapped. post_func : callable[pd.DataFrame -> pd.DataFrame], optional A callback to allow postprocessing of the data after dates and symbols have been mapped. date_column : str, optional The name of the column in the preprocessed dataframe containing datetime information to map the data. date_format : str, optional The format of the dates in the ``date_column``. If not provided ``fetch_csv`` will attempt to infer the format. For information about the format of this string, see :func:`pandas.read_csv`. timezone : tzinfo or str, optional The timezone for the datetime in the ``date_column``. symbol : str, optional If the data is about a new asset or index then this string will be the name used to identify the values in ``data``. For example, one may use ``fetch_csv`` to load data for VIX, then this field could be the string ``'VIX'``. mask : bool, optional Drop any rows which cannot be symbol mapped. symbol_column : str If the data is attaching some new attribute to each asset then this argument is the name of the column in the preprocessed dataframe containing the symbols. This will be used along with the date information to map the sids in the asset finder. country_code : str, optional Country code to use to disambiguate symbol lookups. **kwargs Forwarded to :func:`pandas.read_csv`. Returns ------- csv_data_source : zipline.sources.requests_csv.PandasRequestsCSV A requests source that will pull data from the url specified. """
if country_code is None: country_code = self.default_fetch_csv_country_code( self.trading_calendar, ) # Show all the logs every time fetcher is used. csv_data_source = PandasRequestsCSV( url, pre_func, post_func, self.asset_finder, self.trading_calendar.day, self.sim_params.start_session, self.sim_params.end_session, date_column, date_format, timezone, symbol, mask, symbol_column, data_frequency=self.data_frequency, country_code=country_code, special_params_checker=special_params_checker, **kwargs ) # ingest this into dataportal self.data_portal.handle_extra_source(csv_data_source.df, self.sim_params) return csv_data_source
<SYSTEM_TASK:> Adds an event to the algorithm's EventManager. <END_TASK> <USER_TASK:> Description: def add_event(self, rule, callback): """Adds an event to the algorithm's EventManager. Parameters ---------- rule : EventRule The rule for when the callback should be triggered. callback : callable[(context, data) -> None] The function to execute when the rule is triggered. """
self.event_manager.add_event( zipline.utils.events.Event(rule, callback), )
<SYSTEM_TASK:> Schedules a function to be called according to some timed rules. <END_TASK> <USER_TASK:> Description: def schedule_function(self, func, date_rule=None, time_rule=None, half_days=True, calendar=None): """Schedules a function to be called according to some timed rules. Parameters ---------- func : callable[(context, data) -> None] The function to execute when the rule is triggered. date_rule : EventRule, optional The rule for the dates to execute this function. time_rule : EventRule, optional The rule for the times to execute this function. half_days : bool, optional Should this rule fire on half days? calendar : Sentinel, optional Calendar used to reconcile date and time rules. See Also -------- :class:`zipline.api.date_rules` :class:`zipline.api.time_rules` """
# When the user calls schedule_function(func, <time_rule>), assume that # the user meant to specify a time rule but no date rule, instead of # a date rule and no time rule as the signature suggests if isinstance(date_rule, (AfterOpen, BeforeClose)) and not time_rule: warnings.warn('Got a time rule for the second positional argument ' 'date_rule. You should use keyword argument ' 'time_rule= when calling schedule_function without ' 'specifying a date_rule', stacklevel=3) date_rule = date_rule or date_rules.every_day() time_rule = ((time_rule or time_rules.every_minute()) if self.sim_params.data_frequency == 'minute' else # If we are in daily mode the time_rule is ignored. time_rules.every_minute()) # Check the type of the algorithm's schedule before pulling calendar # Note that the ExchangeTradingSchedule is currently the only # TradingSchedule class, so this is unlikely to be hit if calendar is None: cal = self.trading_calendar elif calendar is calendars.US_EQUITIES: cal = get_calendar('XNYS') elif calendar is calendars.US_FUTURES: cal = get_calendar('us_futures') else: raise ScheduleFunctionInvalidCalendar( given_calendar=calendar, allowed_calendars=( '[calendars.US_EQUITIES, calendars.US_FUTURES]' ), ) self.add_event( make_eventrule(date_rule, time_rule, cal, half_days), func, )
<SYSTEM_TASK:> Create a specifier for a continuous contract. <END_TASK> <USER_TASK:> Description: def continuous_future(self, root_symbol_str, offset=0, roll='volume', adjustment='mul'): """Create a specifier for a continuous contract. Parameters ---------- root_symbol_str : str The root symbol for the future chain. offset : int, optional The distance from the primary contract. Default is 0. roll_style : str, optional How rolls are determined. Default is 'volume'. adjustment : str, optional Method for adjusting lookback prices between rolls. Options are 'mul', 'add', and None. Default is 'mul'. Returns ------- continuous_future : ContinuousFuture The continuous future specifier. """
return self.asset_finder.create_continuous_future( root_symbol_str, offset, roll, adjustment, )
<SYSTEM_TASK:> Lookup an Equity by its ticker symbol. <END_TASK> <USER_TASK:> Description: def symbol(self, symbol_str, country_code=None): """Lookup an Equity by its ticker symbol. Parameters ---------- symbol_str : str The ticker symbol for the equity to lookup. country_code : str or None, optional A country to limit symbol searches to. Returns ------- equity : Equity The equity that held the ticker symbol on the current symbol lookup date. Raises ------ SymbolNotFound Raised when the symbols was not held on the current lookup date. See Also -------- :func:`zipline.api.set_symbol_lookup_date` """
# If the user has not set the symbol lookup date, # use the end_session as the date for symbol->sid resolution. _lookup_date = self._symbol_lookup_date \ if self._symbol_lookup_date is not None \ else self.sim_params.end_session return self.asset_finder.lookup_symbol( symbol_str, as_of_date=_lookup_date, country_code=country_code, )
<SYSTEM_TASK:> Lookup multuple Equities as a list. <END_TASK> <USER_TASK:> Description: def symbols(self, *args, **kwargs): """Lookup multuple Equities as a list. Parameters ---------- *args : iterable[str] The ticker symbols to lookup. country_code : str or None, optional A country to limit symbol searches to. Returns ------- equities : list[Equity] The equities that held the given ticker symbols on the current symbol lookup date. Raises ------ SymbolNotFound Raised when one of the symbols was not held on the current lookup date. See Also -------- :func:`zipline.api.set_symbol_lookup_date` """
return [self.symbol(identifier, **kwargs) for identifier in args]
<SYSTEM_TASK:> Helper method for validating parameters to the order API function. <END_TASK> <USER_TASK:> Description: def validate_order_params(self, asset, amount, limit_price, stop_price, style): """ Helper method for validating parameters to the order API function. Raises an UnsupportedOrderParameters if invalid arguments are found. """
if not self.initialized: raise OrderDuringInitialize( msg="order() can only be called from within handle_data()" ) if style: if limit_price: raise UnsupportedOrderParameters( msg="Passing both limit_price and style is not supported." ) if stop_price: raise UnsupportedOrderParameters( msg="Passing both stop_price and style is not supported." ) for control in self.trading_controls: control.validate(asset, amount, self.portfolio, self.get_datetime(), self.trading_client.current_data)
<SYSTEM_TASK:> Helper method for converting deprecated limit_price and stop_price <END_TASK> <USER_TASK:> Description: def __convert_order_params_for_blotter(asset, limit_price, stop_price, style): """ Helper method for converting deprecated limit_price and stop_price arguments into ExecutionStyle instances. This function assumes that either style == None or (limit_price, stop_price) == (None, None). """
if style: assert (limit_price, stop_price) == (None, None) return style if limit_price and stop_price: return StopLimitOrder(limit_price, stop_price, asset=asset) if limit_price: return LimitOrder(limit_price, asset=asset) if stop_price: return StopOrder(stop_price, asset=asset) else: return MarketOrder()
<SYSTEM_TASK:> Place an order by desired value rather than desired number of <END_TASK> <USER_TASK:> Description: def order_value(self, asset, value, limit_price=None, stop_price=None, style=None): """Place an order by desired value rather than desired number of shares. Parameters ---------- asset : Asset The asset that this order is for. value : float If the requested asset exists, the requested value is divided by its price to imply the number of shares to transact. If the Asset being ordered is a Future, the 'value' calculated is actually the exposure, as Futures have no 'value'. value > 0 :: Buy/Cover value < 0 :: Sell/Short limit_price : float, optional The limit price for the order. stop_price : float, optional The stop price for the order. style : ExecutionStyle The execution style for the order. Returns ------- order_id : str The unique identifier for this order. Notes ----- See :func:`zipline.api.order` for more information about ``limit_price``, ``stop_price``, and ``style`` See Also -------- :class:`zipline.finance.execution.ExecutionStyle` :func:`zipline.api.order` :func:`zipline.api.order_percent` """
if not self._can_order_asset(asset): return None amount = self._calculate_order_value_amount(asset, value) return self.order(asset, amount, limit_price=limit_price, stop_price=stop_price, style=style)
<SYSTEM_TASK:> Sync the last sale prices on the metrics tracker to a given <END_TASK> <USER_TASK:> Description: def _sync_last_sale_prices(self, dt=None): """Sync the last sale prices on the metrics tracker to a given datetime. Parameters ---------- dt : datetime The time to sync the prices to. Notes ----- This call is cached by the datetime. Repeated calls in the same bar are cheap. """
if dt is None: dt = self.datetime if dt != self._last_sync_time: self.metrics_tracker.sync_last_sale_prices( dt, self.data_portal, ) self._last_sync_time = dt
<SYSTEM_TASK:> Callback triggered by the simulation loop whenever the current dt <END_TASK> <USER_TASK:> Description: def on_dt_changed(self, dt): """ Callback triggered by the simulation loop whenever the current dt changes. Any logic that should happen exactly once at the start of each datetime group should happen here. """
self.datetime = dt self.blotter.set_date(dt)
<SYSTEM_TASK:> Returns the current simulation datetime. <END_TASK> <USER_TASK:> Description: def get_datetime(self, tz=None): """ Returns the current simulation datetime. Parameters ---------- tz : tzinfo or str, optional The timezone to return the datetime in. This defaults to utc. Returns ------- dt : datetime The current simulation datetime converted to ``tz``. """
dt = self.datetime assert dt.tzinfo == pytz.utc, "Algorithm should have a utc datetime" if tz is not None: dt = dt.astimezone(tz) return dt
<SYSTEM_TASK:> Sets the order cancellation policy for the simulation. <END_TASK> <USER_TASK:> Description: def set_cancel_policy(self, cancel_policy): """Sets the order cancellation policy for the simulation. Parameters ---------- cancel_policy : CancelPolicy The cancellation policy to use. See Also -------- :class:`zipline.api.EODCancel` :class:`zipline.api.NeverCancel` """
if not isinstance(cancel_policy, CancelPolicy): raise UnsupportedCancelPolicy() if self.initialized: raise SetCancelPolicyPostInit() self.blotter.cancel_policy = cancel_policy
<SYSTEM_TASK:> Place an order in the specified asset corresponding to the given <END_TASK> <USER_TASK:> Description: def order_percent(self, asset, percent, limit_price=None, stop_price=None, style=None): """Place an order in the specified asset corresponding to the given percent of the current portfolio value. Parameters ---------- asset : Asset The asset that this order is for. percent : float The percentage of the portfolio value to allocate to ``asset``. This is specified as a decimal, for example: 0.50 means 50%. limit_price : float, optional The limit price for the order. stop_price : float, optional The stop price for the order. style : ExecutionStyle The execution style for the order. Returns ------- order_id : str The unique identifier for this order. Notes ----- See :func:`zipline.api.order` for more information about ``limit_price``, ``stop_price``, and ``style`` See Also -------- :class:`zipline.finance.execution.ExecutionStyle` :func:`zipline.api.order` :func:`zipline.api.order_value` """
if not self._can_order_asset(asset): return None amount = self._calculate_order_percent_amount(asset, percent) return self.order(asset, amount, limit_price=limit_price, stop_price=stop_price, style=style)
<SYSTEM_TASK:> Place an order to adjust a position to a target number of shares. If <END_TASK> <USER_TASK:> Description: def order_target(self, asset, target, limit_price=None, stop_price=None, style=None): """Place an order to adjust a position to a target number of shares. If the position doesn't already exist, this is equivalent to placing a new order. If the position does exist, this is equivalent to placing an order for the difference between the target number of shares and the current number of shares. Parameters ---------- asset : Asset The asset that this order is for. target : int The desired number of shares of ``asset``. limit_price : float, optional The limit price for the order. stop_price : float, optional The stop price for the order. style : ExecutionStyle The execution style for the order. Returns ------- order_id : str The unique identifier for this order. Notes ----- ``order_target`` does not take into account any open orders. For example: .. code-block:: python order_target(sid(0), 10) order_target(sid(0), 10) This code will result in 20 shares of ``sid(0)`` because the first call to ``order_target`` will not have been filled when the second ``order_target`` call is made. See :func:`zipline.api.order` for more information about ``limit_price``, ``stop_price``, and ``style`` See Also -------- :class:`zipline.finance.execution.ExecutionStyle` :func:`zipline.api.order` :func:`zipline.api.order_target_percent` :func:`zipline.api.order_target_value` """
if not self._can_order_asset(asset): return None amount = self._calculate_order_target_amount(asset, target) return self.order(asset, amount, limit_price=limit_price, stop_price=stop_price, style=style)
<SYSTEM_TASK:> Place an order to adjust a position to a target value. If <END_TASK> <USER_TASK:> Description: def order_target_value(self, asset, target, limit_price=None, stop_price=None, style=None): """Place an order to adjust a position to a target value. If the position doesn't already exist, this is equivalent to placing a new order. If the position does exist, this is equivalent to placing an order for the difference between the target value and the current value. If the Asset being ordered is a Future, the 'target value' calculated is actually the target exposure, as Futures have no 'value'. Parameters ---------- asset : Asset The asset that this order is for. target : float The desired total value of ``asset``. limit_price : float, optional The limit price for the order. stop_price : float, optional The stop price for the order. style : ExecutionStyle The execution style for the order. Returns ------- order_id : str The unique identifier for this order. Notes ----- ``order_target_value`` does not take into account any open orders. For example: .. code-block:: python order_target_value(sid(0), 10) order_target_value(sid(0), 10) This code will result in 20 dollars of ``sid(0)`` because the first call to ``order_target_value`` will not have been filled when the second ``order_target_value`` call is made. See :func:`zipline.api.order` for more information about ``limit_price``, ``stop_price``, and ``style`` See Also -------- :class:`zipline.finance.execution.ExecutionStyle` :func:`zipline.api.order` :func:`zipline.api.order_target` :func:`zipline.api.order_target_percent` """
if not self._can_order_asset(asset): return None target_amount = self._calculate_order_value_amount(asset, target) amount = self._calculate_order_target_amount(asset, target_amount) return self.order(asset, amount, limit_price=limit_price, stop_price=stop_price, style=style)
<SYSTEM_TASK:> Place an order to adjust a position to a target percent of the <END_TASK> <USER_TASK:> Description: def order_target_percent(self, asset, target, limit_price=None, stop_price=None, style=None): """Place an order to adjust a position to a target percent of the current portfolio value. If the position doesn't already exist, this is equivalent to placing a new order. If the position does exist, this is equivalent to placing an order for the difference between the target percent and the current percent. Parameters ---------- asset : Asset The asset that this order is for. target : float The desired percentage of the portfolio value to allocate to ``asset``. This is specified as a decimal, for example: 0.50 means 50%. limit_price : float, optional The limit price for the order. stop_price : float, optional The stop price for the order. style : ExecutionStyle The execution style for the order. Returns ------- order_id : str The unique identifier for this order. Notes ----- ``order_target_value`` does not take into account any open orders. For example: .. code-block:: python order_target_percent(sid(0), 10) order_target_percent(sid(0), 10) This code will result in 20% of the portfolio being allocated to sid(0) because the first call to ``order_target_percent`` will not have been filled when the second ``order_target_percent`` call is made. See :func:`zipline.api.order` for more information about ``limit_price``, ``stop_price``, and ``style`` See Also -------- :class:`zipline.finance.execution.ExecutionStyle` :func:`zipline.api.order` :func:`zipline.api.order_target` :func:`zipline.api.order_target_value` """
if not self._can_order_asset(asset): return None amount = self._calculate_order_target_percent_amount(asset, target) return self.order(asset, amount, limit_price=limit_price, stop_price=stop_price, style=style)
<SYSTEM_TASK:> Place a batch market order for multiple assets. <END_TASK> <USER_TASK:> Description: def batch_market_order(self, share_counts): """Place a batch market order for multiple assets. Parameters ---------- share_counts : pd.Series[Asset -> int] Map from asset to number of shares to order for that asset. Returns ------- order_ids : pd.Index[str] Index of ids for newly-created orders. """
style = MarketOrder() order_args = [ (asset, amount, style) for (asset, amount) in iteritems(share_counts) if amount ] return self.blotter.batch_order(order_args)
<SYSTEM_TASK:> Retrieve all of the current open orders. <END_TASK> <USER_TASK:> Description: def get_open_orders(self, asset=None): """Retrieve all of the current open orders. Parameters ---------- asset : Asset If passed and not None, return only the open orders for the given asset instead of all open orders. Returns ------- open_orders : dict[list[Order]] or list[Order] If no asset is passed this will return a dict mapping Assets to a list containing all the open orders for the asset. If an asset is passed then this will return a list of the open orders for this asset. """
if asset is None: return { key: [order.to_api_obj() for order in orders] for key, orders in iteritems(self.blotter.open_orders) if orders } if asset in self.blotter.open_orders: orders = self.blotter.open_orders[asset] return [order.to_api_obj() for order in orders] return []
<SYSTEM_TASK:> Lookup an order based on the order id returned from one of the <END_TASK> <USER_TASK:> Description: def get_order(self, order_id): """Lookup an order based on the order id returned from one of the order functions. Parameters ---------- order_id : str The unique identifier for the order. Returns ------- order : Order The order object. """
if order_id in self.blotter.orders: return self.blotter.orders[order_id].to_api_obj()
<SYSTEM_TASK:> Cancel an open order. <END_TASK> <USER_TASK:> Description: def cancel_order(self, order_param): """Cancel an open order. Parameters ---------- order_param : str or Order The order_id or order object to cancel. """
order_id = order_param if isinstance(order_param, zipline.protocol.Order): order_id = order_param.id self.blotter.cancel(order_id)
<SYSTEM_TASK:> Register a new AccountControl to be checked on each bar. <END_TASK> <USER_TASK:> Description: def register_account_control(self, control): """ Register a new AccountControl to be checked on each bar. """
if self.initialized: raise RegisterAccountControlPostInit() self.account_controls.append(control)
<SYSTEM_TASK:> Set a limit on the minimum leverage of the algorithm. <END_TASK> <USER_TASK:> Description: def set_min_leverage(self, min_leverage, grace_period): """Set a limit on the minimum leverage of the algorithm. Parameters ---------- min_leverage : float The minimum leverage for the algorithm. grace_period : pd.Timedelta The offset from the start date used to enforce a minimum leverage. """
deadline = self.sim_params.start_session + grace_period control = MinLeverage(min_leverage, deadline) self.register_account_control(control)
<SYSTEM_TASK:> Register a new TradingControl to be checked prior to order calls. <END_TASK> <USER_TASK:> Description: def register_trading_control(self, control): """ Register a new TradingControl to be checked prior to order calls. """
if self.initialized: raise RegisterTradingControlPostInit() self.trading_controls.append(control)
<SYSTEM_TASK:> Set a limit on the number of orders that can be placed in a single <END_TASK> <USER_TASK:> Description: def set_max_order_count(self, max_count, on_error='fail'): """Set a limit on the number of orders that can be placed in a single day. Parameters ---------- max_count : int The maximum number of orders that can be placed on any single day. """
control = MaxOrderCount(on_error, max_count) self.register_trading_control(control)
<SYSTEM_TASK:> Register a pipeline to be computed at the start of each day. <END_TASK> <USER_TASK:> Description: def attach_pipeline(self, pipeline, name, chunks=None, eager=True): """Register a pipeline to be computed at the start of each day. Parameters ---------- pipeline : Pipeline The pipeline to have computed. name : str The name of the pipeline. chunks : int or iterator, optional The number of days to compute pipeline results for. Increasing this number will make it longer to get the first results but may improve the total runtime of the simulation. If an iterator is passed, we will run in chunks based on values of the iterator. Default is True. eager : bool, optional Whether or not to compute this pipeline prior to before_trading_start. Returns ------- pipeline : Pipeline Returns the pipeline that was attached unchanged. See Also -------- :func:`zipline.api.pipeline_output` """
if chunks is None: # Make the first chunk smaller to get more immediate results: # (one week, then every half year) chunks = chain([5], repeat(126)) elif isinstance(chunks, int): chunks = repeat(chunks) if name in self._pipelines: raise DuplicatePipelineName(name=name) self._pipelines[name] = AttachedPipeline(pipeline, iter(chunks), eager) # Return the pipeline to allow expressions like # p = attach_pipeline(Pipeline(), 'name') return pipeline
<SYSTEM_TASK:> Compute `pipeline`, providing values for at least `start_date`. <END_TASK> <USER_TASK:> Description: def run_pipeline(self, pipeline, start_session, chunksize): """ Compute `pipeline`, providing values for at least `start_date`. Produces a DataFrame containing data for days between `start_date` and `end_date`, where `end_date` is defined by: `end_date = min(start_date + chunksize trading days, simulation_end)` Returns ------- (data, valid_until) : tuple (pd.DataFrame, pd.Timestamp) See Also -------- PipelineEngine.run_pipeline """
sessions = self.trading_calendar.all_sessions # Load data starting from the previous trading day... start_date_loc = sessions.get_loc(start_session) # ...continuing until either the day before the simulation end, or # until chunksize days of data have been loaded. sim_end_session = self.sim_params.end_session end_loc = min( start_date_loc + chunksize, sessions.get_loc(sim_end_session) ) end_session = sessions[end_loc] return \ self.engine.run_pipeline(pipeline, start_session, end_session), \ end_session
<SYSTEM_TASK:> Checks for the presence of an extra to the argument list. Raises expections <END_TASK> <USER_TASK:> Description: def _expect_extra(expected, present, exc_unexpected, exc_missing, exc_args): """ Checks for the presence of an extra to the argument list. Raises expections if this is unexpected or if it is missing and expected. """
if present: if not expected: raise exc_unexpected(*exc_args) elif expected and expected is not Argument.ignore: raise exc_missing(*exc_args)
<SYSTEM_TASK:> An asset is restricted for all dts if it is in the static list. <END_TASK> <USER_TASK:> Description: def is_restricted(self, assets, dt): """ An asset is restricted for all dts if it is in the static list. """
if isinstance(assets, Asset): return assets in self._restricted_set return pd.Series( index=pd.Index(assets), data=vectorized_is_element(assets, self._restricted_set) )
<SYSTEM_TASK:> Returns whether or not an asset or iterable of assets is restricted <END_TASK> <USER_TASK:> Description: def is_restricted(self, assets, dt): """ Returns whether or not an asset or iterable of assets is restricted on a dt. """
if isinstance(assets, Asset): return self._is_restricted_for_asset(assets, dt) is_restricted = partial(self._is_restricted_for_asset, dt=dt) return pd.Series( index=pd.Index(assets), data=vectorize(is_restricted, otypes=[bool])(assets) )
<SYSTEM_TASK:> Returns a cash payment based on the dividends that should be paid out <END_TASK> <USER_TASK:> Description: def pay_dividends(self, next_trading_day): """ Returns a cash payment based on the dividends that should be paid out according to the accumulated bookkeeping of earned, unpaid, and stock dividends. """
net_cash_payment = 0.0 try: payments = self._unpaid_dividends[next_trading_day] # Mark these dividends as paid by dropping them from our unpaid del self._unpaid_dividends[next_trading_day] except KeyError: payments = [] # representing the fact that we're required to reimburse the owner of # the stock for any dividends paid while borrowing. for payment in payments: net_cash_payment += payment['amount'] # Add stock for any stock dividends paid. Again, the values here may # be negative in the case of short positions. try: stock_payments = self._unpaid_stock_dividends[next_trading_day] except KeyError: stock_payments = [] for stock_payment in stock_payments: payment_asset = stock_payment['payment_asset'] share_count = stock_payment['share_count'] # note we create a Position for stock dividend if we don't # already own the asset if payment_asset in self.positions: position = self.positions[payment_asset] else: position = self.positions[payment_asset] = Position( payment_asset, ) position.amount += share_count return net_cash_payment
<SYSTEM_TASK:> The current status of the positions. <END_TASK> <USER_TASK:> Description: def stats(self): """The current status of the positions. Returns ------- stats : PositionStats The current stats position stats. Notes ----- This is cached, repeated access will not recompute the stats until the stats may have changed. """
if self._dirty_stats: calculate_position_tracker_stats(self.positions, self._stats) self._dirty_stats = False return self._stats
<SYSTEM_TASK:> Add a transaction to ledger, updating the current state as needed. <END_TASK> <USER_TASK:> Description: def process_transaction(self, transaction): """Add a transaction to ledger, updating the current state as needed. Parameters ---------- transaction : zp.Transaction The transaction to execute. """
asset = transaction.asset if isinstance(asset, Future): try: old_price = self._payout_last_sale_prices[asset] except KeyError: self._payout_last_sale_prices[asset] = transaction.price else: position = self.position_tracker.positions[asset] amount = position.amount price = transaction.price self._cash_flow( self._calculate_payout( asset.price_multiplier, amount, old_price, price, ), ) if amount + transaction.amount == 0: del self._payout_last_sale_prices[asset] else: self._payout_last_sale_prices[asset] = price else: self._cash_flow(-(transaction.price * transaction.amount)) self.position_tracker.execute_transaction(transaction) # we only ever want the dict form from now on transaction_dict = transaction.to_dict() try: self._processed_transactions[transaction.dt].append( transaction_dict, ) except KeyError: self._processed_transactions[transaction.dt] = [transaction_dict]
<SYSTEM_TASK:> Keep track of an order that was placed. <END_TASK> <USER_TASK:> Description: def process_order(self, order): """Keep track of an order that was placed. Parameters ---------- order : zp.Order The order to record. """
try: dt_orders = self._orders_by_modified[order.dt] except KeyError: self._orders_by_modified[order.dt] = OrderedDict([ (order.id, order), ]) self._orders_by_id[order.id] = order else: self._orders_by_id[order.id] = dt_orders[order.id] = order # to preserve the order of the orders by modified date move_to_end(dt_orders, order.id, last=True) move_to_end(self._orders_by_id, order.id, last=True)
<SYSTEM_TASK:> Process the commission. <END_TASK> <USER_TASK:> Description: def process_commission(self, commission): """Process the commission. Parameters ---------- commission : zp.Event The commission being paid. """
asset = commission['asset'] cost = commission['cost'] self.position_tracker.handle_commission(asset, cost) self._cash_flow(-cost)
<SYSTEM_TASK:> Process dividends for the next session. <END_TASK> <USER_TASK:> Description: def process_dividends(self, next_session, asset_finder, adjustment_reader): """Process dividends for the next session. This will earn us any dividends whose ex-date is the next session as well as paying out any dividends whose pay-date is the next session """
position_tracker = self.position_tracker # Earn dividends whose ex_date is the next trading day. We need to # check if we own any of these stocks so we know to pay them out when # the pay date comes. held_sids = set(position_tracker.positions) if held_sids: cash_dividends = adjustment_reader.get_dividends_with_ex_date( held_sids, next_session, asset_finder ) stock_dividends = ( adjustment_reader.get_stock_dividends_with_ex_date( held_sids, next_session, asset_finder ) ) # Earning a dividend just marks that we need to get paid out on # the dividend's pay-date. This does not affect our cash yet. position_tracker.earn_dividends( cash_dividends, stock_dividends, ) # Pay out the dividends whose pay-date is the next session. This does # affect out cash. self._cash_flow( position_tracker.pay_dividends( next_session, ), )
<SYSTEM_TASK:> Retrieve the dict-form of all of the transactions in a given bar or <END_TASK> <USER_TASK:> Description: def transactions(self, dt=None): """Retrieve the dict-form of all of the transactions in a given bar or for the whole simulation. Parameters ---------- dt : pd.Timestamp or None, optional The particular datetime to look up transactions for. If not passed, or None is explicitly passed, all of the transactions will be returned. Returns ------- transactions : list[dict] The transaction information. """
if dt is None: # flatten the by-day transactions return [ txn for by_day in itervalues(self._processed_transactions) for txn in by_day ] return self._processed_transactions.get(dt, [])
<SYSTEM_TASK:> Retrieve the dict-form of all of the orders in a given bar or for <END_TASK> <USER_TASK:> Description: def orders(self, dt=None): """Retrieve the dict-form of all of the orders in a given bar or for the whole simulation. Parameters ---------- dt : pd.Timestamp or None, optional The particular datetime to look up order for. If not passed, or None is explicitly passed, all of the orders will be returned. Returns ------- orders : list[dict] The order information. """
if dt is None: # orders by id is already flattened return [o.to_dict() for o in itervalues(self._orders_by_id)] return [ o.to_dict() for o in itervalues(self._orders_by_modified.get(dt, {})) ]
<SYSTEM_TASK:> Force a computation of the current portfolio state. <END_TASK> <USER_TASK:> Description: def update_portfolio(self): """Force a computation of the current portfolio state. """
if not self._dirty_portfolio: return portfolio = self._portfolio pt = self.position_tracker portfolio.positions = pt.get_positions() position_stats = pt.stats portfolio.positions_value = position_value = ( position_stats.net_value ) portfolio.positions_exposure = position_stats.net_exposure self._cash_flow(self._get_payout_total(pt.positions)) start_value = portfolio.portfolio_value # update the new starting value portfolio.portfolio_value = end_value = portfolio.cash + position_value pnl = end_value - start_value if start_value != 0: returns = pnl / start_value else: returns = 0.0 portfolio.pnl += pnl portfolio.returns = ( (1 + portfolio.returns) * (1 + returns) - 1 ) # the portfolio has been fully synced self._dirty_portfolio = False
<SYSTEM_TASK:> Creates or returns a dataset from a blaze expression. <END_TASK> <USER_TASK:> Description: def new_dataset(expr, missing_values, domain): """ Creates or returns a dataset from a blaze expression. Parameters ---------- expr : Expr The blaze expression representing the values. missing_values : frozenset((name, value) pairs Association pairs column name and missing_value for that column. This needs to be a frozenset rather than a dict or tuple of tuples because we want a collection that's unordered but still hashable. domain : zipline.pipeline.domain.Domain Domain of the dataset to be created. Returns ------- ds : type A new dataset type. Notes ----- This function is memoized. repeated calls with the same inputs will return the same type. """
missing_values = dict(missing_values) class_dict = {'ndim': 2 if SID_FIELD_NAME in expr.fields else 1} for name, type_ in expr.dshape.measure.fields: # Don't generate a column for sid or timestamp, since they're # implicitly the labels if the arrays that will be passed to pipeline # Terms. if name in (SID_FIELD_NAME, TS_FIELD_NAME): continue type_ = datashape_type_to_numpy(type_) if can_represent_dtype(type_): col = Column( type_, missing_values.get(name, NotSpecified), ) else: col = NonPipelineField(name, type_) class_dict[name] = col if 'domain' in class_dict: raise ValueError("Got a column named 'domain' in new_dataset(). " "'domain' is reserved.") class_dict['domain'] = domain name = expr._name if name is None: name = next(_new_names) # unicode is a name error in py3 but the branch is only hit # when we are in python 2. if PY2 and isinstance(name, unicode): # pragma: no cover # noqa name = name.encode('utf-8') return type(name, (DataSet,), class_dict)
<SYSTEM_TASK:> Validate that the expression and resources passed match up. <END_TASK> <USER_TASK:> Description: def _check_resources(name, expr, resources): """Validate that the expression and resources passed match up. Parameters ---------- name : str The name of the argument we are checking. expr : Expr The potentially bound expr. resources The explicitly passed resources to compute expr. Raises ------ ValueError If the resources do not match for an expression. """
if expr is None: return bound = expr._resources() if not bound and resources is None: raise ValueError('no resources provided to compute %s' % name) if bound and resources: raise ValueError( 'explicit and implicit resources provided to compute %s' % name, )
<SYSTEM_TASK:> Check that a field is a datetime inside some measure. <END_TASK> <USER_TASK:> Description: def _check_datetime_field(name, measure): """Check that a field is a datetime inside some measure. Parameters ---------- name : str The name of the field to check. measure : Record The record to check the field of. Raises ------ TypeError If the field is not a datetime inside ``measure``. """
if not isinstance(measure[name], (Date, DateTime)): raise TypeError( "'{name}' field must be a '{dt}', not: '{dshape}'".format( name=name, dt=DateTime(), dshape=measure[name], ), )
<SYSTEM_TASK:> Find the correct metadata expression for the expression. <END_TASK> <USER_TASK:> Description: def _get_metadata(field, expr, metadata_expr, no_metadata_rule): """Find the correct metadata expression for the expression. Parameters ---------- field : {'deltas', 'checkpoints'} The kind of metadata expr to lookup. expr : Expr The baseline expression. metadata_expr : Expr, 'auto', or None The metadata argument. If this is 'auto', then the metadata table will be searched for by walking up the expression tree. If this cannot be reflected, then an action will be taken based on the ``no_metadata_rule``. no_metadata_rule : {'warn', 'raise', 'ignore'} How to handle the case where the metadata_expr='auto' but no expr could be found. Returns ------- metadata : Expr or None The deltas or metadata table to use. """
if isinstance(metadata_expr, bz.Expr) or metadata_expr is None: return metadata_expr try: return expr._child['_'.join(((expr._name or ''), field))] except (ValueError, AttributeError): if no_metadata_rule == 'raise': raise ValueError( "no %s table could be reflected for %s" % (field, expr) ) elif no_metadata_rule == 'warn': warnings.warn(NoMetaDataWarning(expr, field), stacklevel=4) return None
<SYSTEM_TASK:> Verify that the baseline and deltas expressions have a timestamp field. <END_TASK> <USER_TASK:> Description: def _ensure_timestamp_field(dataset_expr, deltas, checkpoints): """Verify that the baseline and deltas expressions have a timestamp field. If there is not a ``TS_FIELD_NAME`` on either of the expressions, it will be copied from the ``AD_FIELD_NAME``. If one is provided, then we will verify that it is the correct dshape. Parameters ---------- dataset_expr : Expr The baseline expression. deltas : Expr or None The deltas expression if any was provided. checkpoints : Expr or None The checkpoints expression if any was provided. Returns ------- dataset_expr, deltas : Expr The new baseline and deltas expressions to use. """
measure = dataset_expr.dshape.measure if TS_FIELD_NAME not in measure.names: dataset_expr = bz.transform( dataset_expr, **{TS_FIELD_NAME: dataset_expr[AD_FIELD_NAME]} ) deltas = _ad_as_ts(deltas) checkpoints = _ad_as_ts(checkpoints) else: _check_datetime_field(TS_FIELD_NAME, measure) return dataset_expr, deltas, checkpoints
<SYSTEM_TASK:> Bind a Blaze expression to resources. <END_TASK> <USER_TASK:> Description: def bind_expression_to_resources(expr, resources): """ Bind a Blaze expression to resources. Parameters ---------- expr : bz.Expr The expression to which we want to bind resources. resources : dict[bz.Symbol -> any] Mapping from the loadable terms of ``expr`` to actual data resources. Returns ------- bound_expr : bz.Expr ``expr`` with bound resources. """
# bind the resources into the expression if resources is None: resources = {} # _subs stands for substitute. It's not actually private, blaze just # prefixes symbol-manipulation methods with underscores to prevent # collisions with data column names. return expr._subs({ k: bz.data(v, dshape=k.dshape) for k, v in iteritems(resources) })
<SYSTEM_TASK:> Computes a lower bound and a DataFrame checkpoints. <END_TASK> <USER_TASK:> Description: def get_materialized_checkpoints(checkpoints, colnames, lower_dt, odo_kwargs): """ Computes a lower bound and a DataFrame checkpoints. Parameters ---------- checkpoints : Expr Bound blaze expression for a checkpoints table from which to get a computed lower bound. colnames : iterable of str The names of the columns for which checkpoints should be computed. lower_dt : pd.Timestamp The lower date being queried for that serves as an upper bound for checkpoints. odo_kwargs : dict, optional The extra keyword arguments to pass to ``odo``. """
if checkpoints is not None: ts = checkpoints[TS_FIELD_NAME] checkpoints_ts = odo( ts[ts < lower_dt].max(), pd.Timestamp, **odo_kwargs ) if pd.isnull(checkpoints_ts): # We don't have a checkpoint for before our start date so just # don't constrain the lower date. materialized_checkpoints = pd.DataFrame(columns=colnames) lower = None else: materialized_checkpoints = odo( checkpoints[ts == checkpoints_ts][colnames], pd.DataFrame, **odo_kwargs ) lower = checkpoints_ts else: materialized_checkpoints = pd.DataFrame(columns=colnames) lower = None # we don't have a good lower date constraint return lower, materialized_checkpoints
<SYSTEM_TASK:> Query a blaze expression in a given time range properly forward filling <END_TASK> <USER_TASK:> Description: def ffill_query_in_range(expr, lower, upper, checkpoints=None, odo_kwargs=None, ts_field=TS_FIELD_NAME): """Query a blaze expression in a given time range properly forward filling from values that fall before the lower date. Parameters ---------- expr : Expr Bound blaze expression. lower : datetime The lower date to query for. upper : datetime The upper date to query for. checkpoints : Expr, optional Bound blaze expression for a checkpoints table from which to get a computed lower bound. odo_kwargs : dict, optional The extra keyword arguments to pass to ``odo``. ts_field : str, optional The name of the timestamp field in the given blaze expression. Returns ------- raw : pd.DataFrame A strict dataframe for the data in the given date range. This may start before the requested start date if a value is needed to ffill. """
odo_kwargs = odo_kwargs or {} computed_lower, materialized_checkpoints = get_materialized_checkpoints( checkpoints, expr.fields, lower, odo_kwargs, ) pred = expr[ts_field] <= upper if computed_lower is not None: # only constrain the lower date if we computed a new lower date pred &= expr[ts_field] >= computed_lower raw = pd.concat( ( materialized_checkpoints, odo( expr[pred], pd.DataFrame, **odo_kwargs ), ), ignore_index=True, ) raw.loc[:, ts_field] = raw.loc[:, ts_field].astype('datetime64[ns]') return raw
<SYSTEM_TASK:> Explicitly map a datset to a collection of blaze expressions. <END_TASK> <USER_TASK:> Description: def register_dataset(self, dataset, expr, deltas=None, checkpoints=None, odo_kwargs=None): """Explicitly map a datset to a collection of blaze expressions. Parameters ---------- dataset : DataSet The pipeline dataset to map to the given expressions. expr : Expr The baseline values. deltas : Expr, optional The deltas for the data. checkpoints : Expr, optional The forward fill checkpoints for the data. odo_kwargs : dict, optional The keyword arguments to forward to the odo calls internally. See Also -------- :func:`zipline.pipeline.loaders.blaze.from_blaze` """
expr_data = ExprData( expr, deltas, checkpoints, odo_kwargs, ) for column in dataset.columns: self._table_expressions[column] = expr_data
<SYSTEM_TASK:> Explicitly map a single bound column to a collection of blaze <END_TASK> <USER_TASK:> Description: def register_column(self, column, expr, deltas=None, checkpoints=None, odo_kwargs=None): """Explicitly map a single bound column to a collection of blaze expressions. The expressions need to have ``timestamp`` and ``as_of`` columns. Parameters ---------- column : BoundColumn The pipeline dataset to map to the given expressions. expr : Expr The baseline values. deltas : Expr, optional The deltas for the data. checkpoints : Expr, optional The forward fill checkpoints for the data. odo_kwargs : dict, optional The keyword arguments to forward to the odo calls internally. See Also -------- :func:`zipline.pipeline.loaders.blaze.from_blaze` """
self._table_expressions[column] = ExprData( expr, deltas, checkpoints, odo_kwargs, )
<SYSTEM_TASK:> Given a dict of mappings where the values are lists of <END_TASK> <USER_TASK:> Description: def merge_ownership_periods(mappings): """ Given a dict of mappings where the values are lists of OwnershipPeriod objects, returns a dict with the same structure with new OwnershipPeriod objects adjusted so that the periods have no gaps. Orders the periods chronologically, and pushes forward the end date of each period to match the start date of the following period. The end date of the last period pushed forward to the max Timestamp. """
return valmap( lambda v: tuple( OwnershipPeriod( a.start, b.start, a.sid, a.value, ) for a, b in sliding_window( 2, concatv( sorted(v), # concat with a fake ownership object to make the last # end date be max timestamp [OwnershipPeriod( pd.Timestamp.max.tz_localize('utc'), None, None, None, )], ), ) ), mappings, )
<SYSTEM_TASK:> Builds a dict mapping to lists of OwnershipPeriods, from a db table. <END_TASK> <USER_TASK:> Description: def build_ownership_map(table, key_from_row, value_from_row): """ Builds a dict mapping to lists of OwnershipPeriods, from a db table. """
return _build_ownership_map_from_rows( sa.select(table.c).execute().fetchall(), key_from_row, value_from_row, )
<SYSTEM_TASK:> Builds a dict mapping group keys to maps of keys to to lists of <END_TASK> <USER_TASK:> Description: def build_grouped_ownership_map(table, key_from_row, value_from_row, group_key): """ Builds a dict mapping group keys to maps of keys to to lists of OwnershipPeriods, from a db table. """
grouped_rows = groupby( group_key, sa.select(table.c).execute().fetchall(), ) return { key: _build_ownership_map_from_rows( rows, key_from_row, value_from_row, ) for key, rows in grouped_rows.items() }
<SYSTEM_TASK:> Filter out kwargs from a dictionary. <END_TASK> <USER_TASK:> Description: def _filter_kwargs(names, dict_): """Filter out kwargs from a dictionary. Parameters ---------- names : set[str] The names to select from ``dict_``. dict_ : dict[str, any] The dictionary to select from. Returns ------- kwargs : dict[str, any] ``dict_`` where the keys intersect with ``names`` and the values are not None. """
return {k: v for k, v in dict_.items() if k in names and v is not None}
<SYSTEM_TASK:> Takes in a dict of Asset init args and converts dates to pd.Timestamps <END_TASK> <USER_TASK:> Description: def _convert_asset_timestamp_fields(dict_): """ Takes in a dict of Asset init args and converts dates to pd.Timestamps """
for key in _asset_timestamp_fields & viewkeys(dict_): value = pd.Timestamp(dict_[key], tz='UTC') dict_[key] = None if isnull(value) else value return dict_
<SYSTEM_TASK:> Whether or not `asset` was active at the time corresponding to <END_TASK> <USER_TASK:> Description: def was_active(reference_date_value, asset): """ Whether or not `asset` was active at the time corresponding to `reference_date_value`. Parameters ---------- reference_date_value : int Date, represented as nanoseconds since EPOCH, for which we want to know if `asset` was alive. This is generally the result of accessing the `value` attribute of a pandas Timestamp. asset : Asset The asset object to check. Returns ------- was_active : bool Whether or not the `asset` existed at the specified time. """
return ( asset.start_date.value <= reference_date_value <= asset.end_date.value )
<SYSTEM_TASK:> Retrieve asset types for a list of sids. <END_TASK> <USER_TASK:> Description: def lookup_asset_types(self, sids): """ Retrieve asset types for a list of sids. Parameters ---------- sids : list[int] Returns ------- types : dict[sid -> str or None] Asset types for the provided sids. """
found = {} missing = set() for sid in sids: try: found[sid] = self._asset_type_cache[sid] except KeyError: missing.add(sid) if not missing: return found router_cols = self.asset_router.c for assets in group_into_chunks(missing): query = sa.select((router_cols.sid, router_cols.asset_type)).where( self.asset_router.c.sid.in_(map(int, assets)) ) for sid, type_ in query.execute().fetchall(): missing.remove(sid) found[sid] = self._asset_type_cache[sid] = type_ for sid in missing: found[sid] = self._asset_type_cache[sid] = None return found
<SYSTEM_TASK:> Retrieve the most recent symbol for a set of sids. <END_TASK> <USER_TASK:> Description: def _select_most_recent_symbols_chunk(self, sid_group): """Retrieve the most recent symbol for a set of sids. Parameters ---------- sid_group : iterable[int] The sids to lookup. The length of this sequence must be less than or equal to SQLITE_MAX_VARIABLE_NUMBER because the sids will be passed in as sql bind params. Returns ------- sel : Selectable The sqlalchemy selectable that will query for the most recent symbol for each sid. Notes ----- This is implemented as an inner select of the columns of interest ordered by the end date of the (sid, symbol) mapping. We then group that inner select on the sid with no aggregations to select the last row per group which gives us the most recently active symbol for all of the sids. """
cols = self.equity_symbol_mappings.c # These are the columns we actually want. data_cols = (cols.sid,) + tuple(cols[name] for name in symbol_columns) # Also select the max of end_date so that all non-grouped fields take # on the value associated with the max end_date. The SQLite docs say # this: # # When the min() or max() aggregate functions are used in an aggregate # query, all bare columns in the result set take values from the input # row which also contains the minimum or maximum. Only the built-in # min() and max() functions work this way. # # See https://www.sqlite.org/lang_select.html#resultset, for more info. to_select = data_cols + (sa.func.max(cols.end_date),) return sa.select( to_select, ).where( cols.sid.in_(map(int, sid_group)) ).group_by( cols.sid, )
<SYSTEM_TASK:> Internal function for loading assets from a table. <END_TASK> <USER_TASK:> Description: def _retrieve_assets(self, sids, asset_tbl, asset_type): """ Internal function for loading assets from a table. This should be the only method of `AssetFinder` that writes Assets into self._asset_cache. Parameters --------- sids : iterable of int Asset ids to look up. asset_tbl : sqlalchemy.Table Table from which to query assets. asset_type : type Type of asset to be constructed. Returns ------- assets : dict[int -> Asset] Dict mapping requested sids to the retrieved assets. """
# Fastpath for empty request. if not sids: return {} cache = self._asset_cache hits = {} querying_equities = issubclass(asset_type, Equity) filter_kwargs = ( _filter_equity_kwargs if querying_equities else _filter_future_kwargs ) rows = self._retrieve_asset_dicts(sids, asset_tbl, querying_equities) for row in rows: sid = row['sid'] asset = asset_type(**filter_kwargs(row)) hits[sid] = cache[sid] = asset # If we get here, it means something in our code thought that a # particular sid was an equity/future and called this function with a # concrete type, but we couldn't actually resolve the asset. This is # an error in our code, not a user-input error. misses = tuple(set(sids) - viewkeys(hits)) if misses: if querying_equities: raise EquitiesNotFound(sids=misses) else: raise FutureContractsNotFound(sids=misses) return hits
<SYSTEM_TASK:> Resolve a symbol to an asset object without fuzzy matching. <END_TASK> <USER_TASK:> Description: def _lookup_symbol_strict(self, ownership_map, multi_country, symbol, as_of_date): """ Resolve a symbol to an asset object without fuzzy matching. Parameters ---------- ownership_map : dict[(str, str), list[OwnershipPeriod]] The mapping from split symbols to ownership periods. multi_country : bool Does this mapping span multiple countries? symbol : str The symbol to look up. as_of_date : datetime or None If multiple assets have held this sid, which day should the resolution be checked against? If this value is None and multiple sids have held the ticker, then a MultipleSymbolsFound error will be raised. Returns ------- asset : Asset The asset that held the given symbol. Raises ------ SymbolNotFound Raised when the symbol or symbol as_of_date pair do not map to any assets. MultipleSymbolsFound Raised when multiple assets held the symbol. This happens if multiple assets held the symbol at disjoint times and ``as_of_date`` is None, or if multiple assets held the symbol at the same time and``multi_country`` is True. Notes ----- The resolution algorithm is as follows: - Split the symbol into the company and share class component. - Do a dictionary lookup of the ``(company_symbol, share_class_symbol)`` in the provided ownership map. - If there is no entry in the dictionary, we don't know about this symbol so raise a ``SymbolNotFound`` error. - If ``as_of_date`` is None: - If more there is more than one owner, raise ``MultipleSymbolsFound`` - Otherwise, because the list mapped to a symbol cannot be empty, return the single asset. - Iterate through all of the owners: - If the ``as_of_date`` is between the start and end of the ownership period: - If multi_country is False, return the found asset. - Otherwise, put the asset in a list. - At the end of the loop, if there are no candidate assets, raise a ``SymbolNotFound``. - If there is exactly one candidate, return it. - Othewise, raise ``MultipleSymbolsFound`` because the ticker is not unique across countries. """
# split the symbol into the components, if there are no # company/share class parts then share_class_symbol will be empty company_symbol, share_class_symbol = split_delimited_symbol(symbol) try: owners = ownership_map[company_symbol, share_class_symbol] assert owners, 'empty owners list for %r' % symbol except KeyError: # no equity has ever held this symbol raise SymbolNotFound(symbol=symbol) if not as_of_date: # exactly one equity has ever held this symbol, we may resolve # without the date if len(owners) == 1: return self.retrieve_asset(owners[0].sid) options = {self.retrieve_asset(owner.sid) for owner in owners} if multi_country: country_codes = map(attrgetter('country_code'), options) if len(set(country_codes)) > 1: raise SameSymbolUsedAcrossCountries( symbol=symbol, options=dict(zip(country_codes, options)) ) # more than one equity has held this ticker, this # is ambiguous without the date raise MultipleSymbolsFound(symbol=symbol, options=options) options = [] country_codes = [] for start, end, sid, _ in owners: if start <= as_of_date < end: # find the equity that owned it on the given asof date asset = self.retrieve_asset(sid) # if this asset owned the symbol on this asof date and we are # only searching one country, return that asset if not multi_country: return asset else: options.append(asset) country_codes.append(asset.country_code) if not options: # no equity held the ticker on the given asof date raise SymbolNotFound(symbol=symbol) # if there is one valid option given the asof date, return that option if len(options) == 1: return options[0] # if there's more than one option given the asof date, a country code # must be passed to resolve the symbol to an asset raise SameSymbolUsedAcrossCountries( symbol=symbol, options=dict(zip(country_codes, options)) )
<SYSTEM_TASK:> Lookup an equity by symbol. <END_TASK> <USER_TASK:> Description: def lookup_symbol(self, symbol, as_of_date, fuzzy=False, country_code=None): """Lookup an equity by symbol. Parameters ---------- symbol : str The ticker symbol to resolve. as_of_date : datetime or None Look up the last owner of this symbol as of this datetime. If ``as_of_date`` is None, then this can only resolve the equity if exactly one equity has ever owned the ticker. fuzzy : bool, optional Should fuzzy symbol matching be used? Fuzzy symbol matching attempts to resolve differences in representations for shareclasses. For example, some people may represent the ``A`` shareclass of ``BRK`` as ``BRK.A``, where others could write ``BRK_A``. country_code : str or None, optional The country to limit searches to. If not provided, the search will span all countries which increases the likelihood of an ambiguous lookup. Returns ------- equity : Equity The equity that held ``symbol`` on the given ``as_of_date``, or the only equity to hold ``symbol`` if ``as_of_date`` is None. Raises ------ SymbolNotFound Raised when no equity has ever held the given symbol. MultipleSymbolsFound Raised when no ``as_of_date`` is given and more than one equity has held ``symbol``. This is also raised when ``fuzzy=True`` and there are multiple candidates for the given ``symbol`` on the ``as_of_date``. Also raised when no ``country_code`` is given and the symbol is ambiguous across multiple countries. """
if symbol is None: raise TypeError("Cannot lookup asset for symbol of None for " "as of date %s." % as_of_date) if fuzzy: f = self._lookup_symbol_fuzzy mapping = self._choose_fuzzy_symbol_ownership_map(country_code) else: f = self._lookup_symbol_strict mapping = self._choose_symbol_ownership_map(country_code) if mapping is None: raise SymbolNotFound(symbol=symbol) return f( mapping, country_code is None, symbol, as_of_date, )
<SYSTEM_TASK:> Lookup a list of equities by symbol. <END_TASK> <USER_TASK:> Description: def lookup_symbols(self, symbols, as_of_date, fuzzy=False, country_code=None): """ Lookup a list of equities by symbol. Equivalent to:: [finder.lookup_symbol(s, as_of, fuzzy) for s in symbols] but potentially faster because repeated lookups are memoized. Parameters ---------- symbols : sequence[str] Sequence of ticker symbols to resolve. as_of_date : pd.Timestamp Forwarded to ``lookup_symbol``. fuzzy : bool, optional Forwarded to ``lookup_symbol``. country_code : str or None, optional The country to limit searches to. If not provided, the search will span all countries which increases the likelihood of an ambiguous lookup. Returns ------- equities : list[Equity] """
if not symbols: return [] multi_country = country_code is None if fuzzy: f = self._lookup_symbol_fuzzy mapping = self._choose_fuzzy_symbol_ownership_map(country_code) else: f = self._lookup_symbol_strict mapping = self._choose_symbol_ownership_map(country_code) if mapping is None: raise SymbolNotFound(symbol=symbols[0]) memo = {} out = [] append_output = out.append for sym in symbols: if sym in memo: append_output(memo[sym]) else: equity = memo[sym] = f( mapping, multi_country, sym, as_of_date, ) append_output(equity) return out
<SYSTEM_TASK:> Lookup a future contract by symbol. <END_TASK> <USER_TASK:> Description: def lookup_future_symbol(self, symbol): """Lookup a future contract by symbol. Parameters ---------- symbol : str The symbol of the desired contract. Returns ------- future : Future The future contract referenced by ``symbol``. Raises ------ SymbolNotFound Raised when no contract named 'symbol' is found. """
data = self._select_asset_by_symbol(self.futures_contracts, symbol)\ .execute().fetchone() # If no data found, raise an exception if not data: raise SymbolNotFound(symbol=symbol) return self.retrieve_asset(data['sid'])
<SYSTEM_TASK:> Get the value of a supplementary field for an asset. <END_TASK> <USER_TASK:> Description: def get_supplementary_field(self, sid, field_name, as_of_date): """Get the value of a supplementary field for an asset. Parameters ---------- sid : int The sid of the asset to query. field_name : str Name of the supplementary field. as_of_date : pd.Timestamp, None The last known value on this date is returned. If None, a value is returned only if we've only ever had one value for this sid. If None and we've had multiple values, MultipleValuesFoundForSid is raised. Raises ------ NoValueForSid If we have no values for this asset, or no values was known on this as_of_date. MultipleValuesFoundForSid If we have had multiple values for this asset over time, and None was passed for as_of_date. """
try: periods = self.equity_supplementary_map_by_sid[ field_name, sid, ] assert periods, 'empty periods list for %r' % (field_name, sid) except KeyError: raise NoValueForSid(field=field_name, sid=sid) if not as_of_date: if len(periods) > 1: # This equity has held more than one value, this is ambigious # without the date raise MultipleValuesFoundForSid( field=field_name, sid=sid, options={p.value for p in periods}, ) # this equity has only ever held this value, we may resolve # without the date return periods[0].value for start, end, _, value in periods: if start <= as_of_date < end: return value # Could not find a value for this sid on the as_of_date. raise NoValueForSid(field=field_name, sid=sid)
<SYSTEM_TASK:> Convert asset_convertible to an asset. <END_TASK> <USER_TASK:> Description: def _lookup_generic_scalar(self, obj, as_of_date, country_code, matches, missing): """ Convert asset_convertible to an asset. On success, append to matches. On failure, append to missing. """
result = self._lookup_generic_scalar_helper( obj, as_of_date, country_code, ) if result is not None: matches.append(result) else: missing.append(obj)
<SYSTEM_TASK:> Convert an object into an Asset or sequence of Assets. <END_TASK> <USER_TASK:> Description: def lookup_generic(self, obj, as_of_date, country_code): """ Convert an object into an Asset or sequence of Assets. This method exists primarily as a convenience for implementing user-facing APIs that can handle multiple kinds of input. It should not be used for internal code where we already know the expected types of our inputs. Parameters ---------- obj : int, str, Asset, ContinuousFuture, or iterable The object to be converted into one or more Assets. Integers are interpreted as sids. Strings are interpreted as tickers. Assets and ContinuousFutures are returned unchanged. as_of_date : pd.Timestamp or None Timestamp to use to disambiguate ticker lookups. Has the same semantics as in `lookup_symbol`. country_code : str or None ISO-3166 country code to use to disambiguate ticker lookups. Has the same semantics as in `lookup_symbol`. Returns ------- matches, missing : tuple ``matches`` is the result of the conversion. ``missing`` is a list containing any values that couldn't be resolved. If ``obj`` is not an iterable, ``missing`` will be an empty list. """
matches = [] missing = [] # Interpret input as scalar. if isinstance(obj, (AssetConvertible, ContinuousFuture)): self._lookup_generic_scalar( obj=obj, as_of_date=as_of_date, country_code=country_code, matches=matches, missing=missing, ) try: return matches[0], missing except IndexError: if hasattr(obj, '__int__'): raise SidsNotFound(sids=[obj]) else: raise SymbolNotFound(symbol=obj) # Interpret input as iterable. try: iterator = iter(obj) except TypeError: raise NotAssetConvertible( "Input was not a AssetConvertible " "or iterable of AssetConvertible." ) for obj in iterator: self._lookup_generic_scalar( obj=obj, as_of_date=as_of_date, country_code=country_code, matches=matches, missing=missing, ) return matches, missing