text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
When the simulation is complete, run the full period risk report
<END_TASK>
<USER_TASK:>
Description:
def handle_simulation_end(self, data_portal):
"""
When the simulation is complete, run the full period risk report
and send it out on the results socket.
""" |
log.info(
'Simulated {} trading days\n'
'first open: {}\n'
'last close: {}',
self._session_count,
self._trading_calendar.session_open(self._first_session),
self._trading_calendar.session_close(self._last_session),
)
packet = {}
self.end_of_simulation(
packet,
self._ledger,
self._trading_calendar,
self._sessions,
data_portal,
self._benchmark_source,
)
return packet |
<SYSTEM_TASK:>
Encapsulates a set of custom command line arguments in key=value
<END_TASK>
<USER_TASK:>
Description:
def create_args(args, root):
"""
Encapsulates a set of custom command line arguments in key=value
or key.namespace=value form into a chain of Namespace objects,
where each next level is an attribute of the Namespace object on the
current level
Parameters
----------
args : list
A list of strings representing arguments in key=value form
root : Namespace
The top-level element of the argument tree
""" |
extension_args = {}
for arg in args:
parse_extension_arg(arg, extension_args)
for name in sorted(extension_args, key=len):
path = name.split('.')
update_namespace(root, path, extension_args[name]) |
<SYSTEM_TASK:>
Converts argument strings in key=value or key.namespace=value form
<END_TASK>
<USER_TASK:>
Description:
def parse_extension_arg(arg, arg_dict):
"""
Converts argument strings in key=value or key.namespace=value form
to dictionary entries
Parameters
----------
arg : str
The argument string to parse, which must be in key=value or
key.namespace=value form.
arg_dict : dict
The dictionary into which the key/value pair will be added
""" |
match = re.match(r'^(([^\d\W]\w*)(\.[^\d\W]\w*)*)=(.*)$', arg)
if match is None:
raise ValueError(
"invalid extension argument '%s', must be in key=value form" % arg
)
name = match.group(1)
value = match.group(4)
arg_dict[name] = value |
<SYSTEM_TASK:>
A recursive function that takes a root element, list of namespaces,
<END_TASK>
<USER_TASK:>
Description:
def update_namespace(namespace, path, name):
"""
A recursive function that takes a root element, list of namespaces,
and the value being stored, and assigns namespaces to the root object
via a chain of Namespace objects, connected through attributes
Parameters
----------
namespace : Namespace
The object onto which an attribute will be added
path : list
A list of strings representing namespaces
name : str
The value to be stored at the bottom level
""" |
if len(path) == 1:
setattr(namespace, path[0], name)
else:
if hasattr(namespace, path[0]):
if isinstance(getattr(namespace, path[0]), six.string_types):
raise ValueError("Conflicting assignments at namespace"
" level '%s'" % path[0])
else:
a = Namespace()
setattr(namespace, path[0], a)
update_namespace(getattr(namespace, path[0]), path[1:], name) |
<SYSTEM_TASK:>
Create a new registry for an extensible interface.
<END_TASK>
<USER_TASK:>
Description:
def create_registry(interface):
"""
Create a new registry for an extensible interface.
Parameters
----------
interface : type
The abstract data type for which to create a registry,
which will manage registration of factories for this type.
Returns
-------
interface : type
The data type specified/decorated, unaltered.
""" |
if interface in custom_types:
raise ValueError('there is already a Registry instance '
'for the specified type')
custom_types[interface] = Registry(interface)
return interface |
<SYSTEM_TASK:>
Construct an object from a registered factory.
<END_TASK>
<USER_TASK:>
Description:
def load(self, name):
"""Construct an object from a registered factory.
Parameters
----------
name : str
Name with which the factory was registered.
""" |
try:
return self._factories[name]()
except KeyError:
raise ValueError(
"no %s factory registered under name %r, options are: %r" %
(self.interface.__name__, name, sorted(self._factories)),
) |
<SYSTEM_TASK:>
Pay commission based on dollar value of shares.
<END_TASK>
<USER_TASK:>
Description:
def calculate(self, order, transaction):
"""
Pay commission based on dollar value of shares.
""" |
cost_per_share = transaction.price * self.cost_per_dollar
return abs(transaction.amount) * cost_per_share |
<SYSTEM_TASK:>
Creates a dictionary representing the state of the risk report.
<END_TASK>
<USER_TASK:>
Description:
def risk_metric_period(cls,
start_session,
end_session,
algorithm_returns,
benchmark_returns,
algorithm_leverages):
"""
Creates a dictionary representing the state of the risk report.
Parameters
----------
start_session : pd.Timestamp
Start of period (inclusive) to produce metrics on
end_session : pd.Timestamp
End of period (inclusive) to produce metrics on
algorithm_returns : pd.Series(pd.Timestamp -> float)
Series of algorithm returns as of the end of each session
benchmark_returns : pd.Series(pd.Timestamp -> float)
Series of benchmark returns as of the end of each session
algorithm_leverages : pd.Series(pd.Timestamp -> float)
Series of algorithm leverages as of the end of each session
Returns
-------
risk_metric : dict[str, any]
Dict of metrics that with fields like:
{
'algorithm_period_return': 0.0,
'benchmark_period_return': 0.0,
'treasury_period_return': 0,
'excess_return': 0.0,
'alpha': 0.0,
'beta': 0.0,
'sharpe': 0.0,
'sortino': 0.0,
'period_label': '1970-01',
'trading_days': 0,
'algo_volatility': 0.0,
'benchmark_volatility': 0.0,
'max_drawdown': 0.0,
'max_leverage': 0.0,
}
""" |
algorithm_returns = algorithm_returns[
(algorithm_returns.index >= start_session) &
(algorithm_returns.index <= end_session)
]
# Benchmark needs to be masked to the same dates as the algo returns
benchmark_returns = benchmark_returns[
(benchmark_returns.index >= start_session) &
(benchmark_returns.index <= algorithm_returns.index[-1])
]
benchmark_period_returns = ep.cum_returns(benchmark_returns).iloc[-1]
algorithm_period_returns = ep.cum_returns(algorithm_returns).iloc[-1]
alpha, beta = ep.alpha_beta_aligned(
algorithm_returns.values,
benchmark_returns.values,
)
sharpe = ep.sharpe_ratio(algorithm_returns)
# The consumer currently expects a 0.0 value for sharpe in period,
# this differs from cumulative which was np.nan.
# When factoring out the sharpe_ratio, the different return types
# were collapsed into `np.nan`.
# TODO: Either fix consumer to accept `np.nan` or make the
# `sharpe_ratio` return type configurable.
# In the meantime, convert nan values to 0.0
if pd.isnull(sharpe):
sharpe = 0.0
sortino = ep.sortino_ratio(
algorithm_returns.values,
_downside_risk=ep.downside_risk(algorithm_returns.values),
)
rval = {
'algorithm_period_return': algorithm_period_returns,
'benchmark_period_return': benchmark_period_returns,
'treasury_period_return': 0,
'excess_return': algorithm_period_returns,
'alpha': alpha,
'beta': beta,
'sharpe': sharpe,
'sortino': sortino,
'period_label': end_session.strftime("%Y-%m"),
'trading_days': len(benchmark_returns),
'algo_volatility': ep.annual_volatility(algorithm_returns),
'benchmark_volatility': ep.annual_volatility(benchmark_returns),
'max_drawdown': ep.max_drawdown(algorithm_returns.values),
'max_leverage': algorithm_leverages.max(),
}
# check if a field in rval is nan or inf, and replace it with None
# except period_label which is always a str
return {
k: (
None
if k != 'period_label' and not np.isfinite(v) else
v
)
for k, v in iteritems(rval)
} |
<SYSTEM_TASK:>
For the given root symbol, find the contract that is considered active
<END_TASK>
<USER_TASK:>
Description:
def _get_active_contract_at_offset(self, root_symbol, dt, offset):
"""
For the given root symbol, find the contract that is considered active
on a specific date at a specific offset.
""" |
oc = self.asset_finder.get_ordered_contracts(root_symbol)
session = self.trading_calendar.minute_to_session_label(dt)
front = oc.contract_before_auto_close(session.value)
back = oc.contract_at_offset(front, 1, dt.value)
if back is None:
return front
primary = self._active_contract(oc, front, back, session)
return oc.contract_at_offset(primary, offset, session.value) |
<SYSTEM_TASK:>
Get the rolls, i.e. the session at which to hop from contract to
<END_TASK>
<USER_TASK:>
Description:
def get_rolls(self, root_symbol, start, end, offset):
"""
Get the rolls, i.e. the session at which to hop from contract to
contract in the chain.
Parameters
----------
root_symbol : str
The root symbol for which to calculate rolls.
start : Timestamp
Start of the date range.
end : Timestamp
End of the date range.
offset : int
Offset from the primary.
Returns
-------
rolls - list[tuple(sid, roll_date)]
A list of rolls, where first value is the first active `sid`,
and the `roll_date` on which to hop to the next contract.
The last pair in the chain has a value of `None` since the roll
is after the range.
""" |
oc = self.asset_finder.get_ordered_contracts(root_symbol)
front = self._get_active_contract_at_offset(root_symbol, end, 0)
back = oc.contract_at_offset(front, 1, end.value)
if back is not None:
end_session = self.trading_calendar.minute_to_session_label(end)
first = self._active_contract(oc, front, back, end_session)
else:
first = front
first_contract = oc.sid_to_contract[first]
rolls = [((first_contract >> offset).contract.sid, None)]
tc = self.trading_calendar
sessions = tc.sessions_in_range(tc.minute_to_session_label(start),
tc.minute_to_session_label(end))
freq = sessions.freq
if first == front:
# This is a bit tricky to grasp. Once we have the active contract
# on the given end date, we want to start walking backwards towards
# the start date and checking for rolls. For this, we treat the
# previous month's contract as the 'first' contract, and the
# contract we just found to be active as the 'back'. As we walk
# towards the start date, if the 'back' is no longer active, we add
# that date as a roll.
curr = first_contract << 1
else:
curr = first_contract << 2
session = sessions[-1]
while session > start and curr is not None:
front = curr.contract.sid
back = rolls[0][0]
prev_c = curr.prev
while session > start:
prev = session - freq
if prev_c is not None:
if prev < prev_c.contract.auto_close_date:
break
if back != self._active_contract(oc, front, back, prev):
# TODO: Instead of listing each contract with its roll date
# as tuples, create a series which maps every day to the
# active contract on that day.
rolls.insert(0, ((curr >> offset).contract.sid, session))
break
session = prev
curr = curr.prev
if curr is not None:
session = min(session, curr.contract.auto_close_date + freq)
return rolls |
<SYSTEM_TASK:>
r"""
<END_TASK>
<USER_TASK:>
Description:
def _active_contract(self, oc, front, back, dt):
r"""
Return the active contract based on the previous trading day's volume.
In the rare case that a double volume switch occurs we treat the first
switch as the roll. Take the following case for example:
| +++++ _____
| + __ / <--- 'G'
| ++/++\++++/++
| _/ \__/ +
| / +
| ____/ + <--- 'F'
|_________|__|___|________
a b c <--- Switches
We should treat 'a' as the roll date rather than 'c' because from the
perspective of 'a', if a switch happens and we are pretty close to the
auto-close date, we would probably assume it is time to roll. This
means that for every date after 'a', `data.current(cf, 'contract')`
should return the 'G' contract.
""" |
front_contract = oc.sid_to_contract[front].contract
back_contract = oc.sid_to_contract[back].contract
tc = self.trading_calendar
trading_day = tc.day
prev = dt - trading_day
get_value = self.session_reader.get_value
# If the front contract is past its auto close date it cannot be the
# active contract, so return the back contract. Similarly, if the back
# contract has not even started yet, just return the front contract.
# The reason for using 'prev' to see if the contracts are alive instead
# of using 'dt' is because we need to get each contract's volume on the
# previous day, so we need to make sure that each contract exists on
# 'prev' in order to call 'get_value' below.
if dt > min(front_contract.auto_close_date, front_contract.end_date):
return back
elif front_contract.start_date > prev:
return back
elif dt > min(back_contract.auto_close_date, back_contract.end_date):
return front
elif back_contract.start_date > prev:
return front
front_vol = get_value(front, prev, 'volume')
back_vol = get_value(back, prev, 'volume')
if back_vol > front_vol:
return back
gap_start = max(
back_contract.start_date,
front_contract.auto_close_date - (trading_day * self.GRACE_DAYS),
)
gap_end = prev - trading_day
if dt < gap_start:
return front
# If we are within `self.GRACE_DAYS` of the front contract's auto close
# date, and a volume flip happened during that period, return the back
# contract as the active one.
sessions = tc.sessions_in_range(
tc.minute_to_session_label(gap_start),
tc.minute_to_session_label(gap_end),
)
for session in sessions:
front_vol = get_value(front, session, 'volume')
back_vol = get_value(back, session, 'volume')
if back_vol > front_vol:
return back
return front |
<SYSTEM_TASK:>
Coerce buffer data for an AdjustedArray into a standard scalar
<END_TASK>
<USER_TASK:>
Description:
def _normalize_array(data, missing_value):
"""
Coerce buffer data for an AdjustedArray into a standard scalar
representation, returning the coerced array and a dict of argument to pass
to np.view to use when providing a user-facing view of the underlying data.
- float* data is coerced to float64 with viewtype float64.
- int32, int64, and uint32 are converted to int64 with viewtype int64.
- datetime[*] data is coerced to int64 with a viewtype of datetime64[ns].
- bool_ data is coerced to uint8 with a viewtype of bool_.
Parameters
----------
data : np.ndarray
Returns
-------
coerced, view_kwargs : (np.ndarray, np.dtype)
""" |
if isinstance(data, LabelArray):
return data, {}
data_dtype = data.dtype
if data_dtype in BOOL_DTYPES:
return data.astype(uint8), {'dtype': dtype(bool_)}
elif data_dtype in FLOAT_DTYPES:
return data.astype(float64), {'dtype': dtype(float64)}
elif data_dtype in INT_DTYPES:
return data.astype(int64), {'dtype': dtype(int64)}
elif is_categorical(data_dtype):
if not isinstance(missing_value, LabelArray.SUPPORTED_SCALAR_TYPES):
raise TypeError(
"Invalid missing_value for categorical array.\n"
"Expected None, bytes or unicode. Got %r." % missing_value,
)
return LabelArray(data, missing_value), {}
elif data_dtype.kind == 'M':
try:
outarray = data.astype('datetime64[ns]').view('int64')
return outarray, {'dtype': datetime64ns_dtype}
except OverflowError:
raise ValueError(
"AdjustedArray received a datetime array "
"not representable as datetime64[ns].\n"
"Min Date: %s\n"
"Max Date: %s\n"
% (data.min(), data.max())
)
else:
raise TypeError(
"Don't know how to construct AdjustedArray "
"on data of type %s." % data_dtype
) |
<SYSTEM_TASK:>
Merge lists of new and existing adjustments for a given index by appending
<END_TASK>
<USER_TASK:>
Description:
def _merge_simple(adjustment_lists, front_idx, back_idx):
"""
Merge lists of new and existing adjustments for a given index by appending
or prepending new adjustments to existing adjustments.
Notes
-----
This method is meant to be used with ``toolz.merge_with`` to merge
adjustment mappings. In case of a collision ``adjustment_lists`` contains
two lists, existing adjustments at index 0 and new adjustments at index 1.
When there are no collisions, ``adjustment_lists`` contains a single list.
Parameters
----------
adjustment_lists : list[list[Adjustment]]
List(s) of new and/or existing adjustments for a given index.
front_idx : int
Index of list in ``adjustment_lists`` that should be used as baseline
in case of a collision.
back_idx : int
Index of list in ``adjustment_lists`` that should extend baseline list
in case of a collision.
Returns
-------
adjustments : list[Adjustment]
List of merged adjustments for a given index.
""" |
if len(adjustment_lists) == 1:
return list(adjustment_lists[0])
else:
return adjustment_lists[front_idx] + adjustment_lists[back_idx] |
<SYSTEM_TASK:>
Return the input as a numpy ndarray.
<END_TASK>
<USER_TASK:>
Description:
def ensure_ndarray(ndarray_or_adjusted_array):
"""
Return the input as a numpy ndarray.
This is a no-op if the input is already an ndarray. If the input is an
adjusted_array, this extracts a read-only view of its internal data buffer.
Parameters
----------
ndarray_or_adjusted_array : numpy.ndarray | zipline.data.adjusted_array
Returns
-------
out : The input, converted to an ndarray.
""" |
if isinstance(ndarray_or_adjusted_array, ndarray):
return ndarray_or_adjusted_array
elif isinstance(ndarray_or_adjusted_array, AdjustedArray):
return ndarray_or_adjusted_array.data
else:
raise TypeError(
"Can't convert %s to ndarray" %
type(ndarray_or_adjusted_array).__name__
) |
<SYSTEM_TASK:>
Check that a window of length `window_length` is well-defined on `data`.
<END_TASK>
<USER_TASK:>
Description:
def _check_window_params(data, window_length):
"""
Check that a window of length `window_length` is well-defined on `data`.
Parameters
----------
data : np.ndarray[ndim=2]
The array of data to check.
window_length : int
Length of the desired window.
Returns
-------
None
Raises
------
WindowLengthNotPositive
If window_length < 1.
WindowLengthTooLong
If window_length is greater than the number of rows in `data`.
""" |
if window_length < 1:
raise WindowLengthNotPositive(window_length=window_length)
if window_length > data.shape[0]:
raise WindowLengthTooLong(
nrows=data.shape[0],
window_length=window_length,
) |
<SYSTEM_TASK:>
Merge ``adjustments`` with existing adjustments, handling index
<END_TASK>
<USER_TASK:>
Description:
def update_adjustments(self, adjustments, method):
"""
Merge ``adjustments`` with existing adjustments, handling index
collisions according to ``method``.
Parameters
----------
adjustments : dict[int -> list[Adjustment]]
The mapping of row indices to lists of adjustments that should be
appended to existing adjustments.
method : {'append', 'prepend'}
How to handle index collisions. If 'append', new adjustments will
be applied after previously-existing adjustments. If 'prepend', new
adjustments will be applied before previously-existing adjustments.
""" |
try:
merge_func = _merge_methods[method]
except KeyError:
raise ValueError(
"Invalid merge method %s\n"
"Valid methods are: %s" % (method, ', '.join(_merge_methods))
)
self.adjustments = merge_with(
merge_func,
self.adjustments,
adjustments,
) |
<SYSTEM_TASK:>
The iterator produced when `traverse` is called on this Array.
<END_TASK>
<USER_TASK:>
Description:
def _iterator_type(self):
"""
The iterator produced when `traverse` is called on this Array.
""" |
if isinstance(self._data, LabelArray):
return LabelWindow
return CONCRETE_WINDOW_TYPES[self._data.dtype] |
<SYSTEM_TASK:>
Produce an iterator rolling windows rows over our data.
<END_TASK>
<USER_TASK:>
Description:
def traverse(self,
window_length,
offset=0,
perspective_offset=0):
"""
Produce an iterator rolling windows rows over our data.
Each emitted window will have `window_length` rows.
Parameters
----------
window_length : int
The number of rows in each emitted window.
offset : int, optional
Number of rows to skip before the first window. Default is 0.
perspective_offset : int, optional
Number of rows past the end of the current window from which to
"view" the underlying data.
""" |
data = self._data.copy()
_check_window_params(data, window_length)
return self._iterator_type(
data,
self._view_kwargs,
self.adjustments,
offset,
window_length,
perspective_offset,
rounding_places=None,
) |
<SYSTEM_TASK:>
Return a string representation of the data stored in this array.
<END_TASK>
<USER_TASK:>
Description:
def inspect(self):
"""
Return a string representation of the data stored in this array.
""" |
return dedent(
"""\
Adjusted Array ({dtype}):
Data:
{data!r}
Adjustments:
{adjustments}
"""
).format(
dtype=self.dtype.name,
data=self.data,
adjustments=self.adjustments,
) |
<SYSTEM_TASK:>
Map a function over baseline and adjustment values in place.
<END_TASK>
<USER_TASK:>
Description:
def update_labels(self, func):
"""
Map a function over baseline and adjustment values in place.
Note that the baseline data values must be a LabelArray.
""" |
if not isinstance(self.data, LabelArray):
raise TypeError(
'update_labels only supported if data is of type LabelArray.'
)
# Map the baseline values.
self._data = self._data.map(func)
# Map each of the adjustments.
for _, row_adjustments in iteritems(self.adjustments):
for adjustment in row_adjustments:
adjustment.value = func(adjustment.value) |
<SYSTEM_TASK:>
Handle a TradingControlViolation, either by raising or logging and
<END_TASK>
<USER_TASK:>
Description:
def handle_violation(self, asset, amount, datetime, metadata=None):
"""
Handle a TradingControlViolation, either by raising or logging and
error with information about the failure.
If dynamic information should be displayed as well, pass it in via
`metadata`.
""" |
constraint = self._constraint_msg(metadata)
if self.on_error == 'fail':
raise TradingControlViolation(
asset=asset,
amount=amount,
datetime=datetime,
constraint=constraint)
elif self.on_error == 'log':
log.error("Order for {amount} shares of {asset} at {dt} "
"violates trading constraint {constraint}",
amount=amount, asset=asset, dt=datetime,
constraint=constraint) |
<SYSTEM_TASK:>
Fail if we've already placed self.max_count orders today.
<END_TASK>
<USER_TASK:>
Description:
def validate(self,
asset,
amount,
portfolio,
algo_datetime,
algo_current_data):
"""
Fail if we've already placed self.max_count orders today.
""" |
algo_date = algo_datetime.date()
# Reset order count if it's a new day.
if self.current_date and self.current_date != algo_date:
self.orders_placed = 0
self.current_date = algo_date
if self.orders_placed >= self.max_count:
self.handle_violation(asset, amount, algo_datetime)
self.orders_placed += 1 |
<SYSTEM_TASK:>
Fail if the asset is in the restricted_list.
<END_TASK>
<USER_TASK:>
Description:
def validate(self,
asset,
amount,
portfolio,
algo_datetime,
algo_current_data):
"""
Fail if the asset is in the restricted_list.
""" |
if self.restrictions.is_restricted(asset, algo_datetime):
self.handle_violation(asset, amount, algo_datetime) |
<SYSTEM_TASK:>
Fail if the magnitude of the given order exceeds either self.max_shares
<END_TASK>
<USER_TASK:>
Description:
def validate(self,
asset,
amount,
portfolio,
algo_datetime,
algo_current_data):
"""
Fail if the magnitude of the given order exceeds either self.max_shares
or self.max_notional.
""" |
if self.asset is not None and self.asset != asset:
return
if self.max_shares is not None and abs(amount) > self.max_shares:
self.handle_violation(asset, amount, algo_datetime)
current_asset_price = algo_current_data.current(asset, "price")
order_value = amount * current_asset_price
too_much_value = (self.max_notional is not None and
abs(order_value) > self.max_notional)
if too_much_value:
self.handle_violation(asset, amount, algo_datetime) |
<SYSTEM_TASK:>
Fail if the given order would cause the magnitude of our position to be
<END_TASK>
<USER_TASK:>
Description:
def validate(self,
asset,
amount,
portfolio,
algo_datetime,
algo_current_data):
"""
Fail if the given order would cause the magnitude of our position to be
greater in shares than self.max_shares or greater in dollar value than
self.max_notional.
""" |
if self.asset is not None and self.asset != asset:
return
current_share_count = portfolio.positions[asset].amount
shares_post_order = current_share_count + amount
too_many_shares = (self.max_shares is not None and
abs(shares_post_order) > self.max_shares)
if too_many_shares:
self.handle_violation(asset, amount, algo_datetime)
current_price = algo_current_data.current(asset, "price")
value_post_order = shares_post_order * current_price
too_much_value = (self.max_notional is not None and
abs(value_post_order) > self.max_notional)
if too_much_value:
self.handle_violation(asset, amount, algo_datetime) |
<SYSTEM_TASK:>
Fail if we would hold negative shares of asset after completing this
<END_TASK>
<USER_TASK:>
Description:
def validate(self,
asset,
amount,
portfolio,
algo_datetime,
algo_current_data):
"""
Fail if we would hold negative shares of asset after completing this
order.
""" |
if portfolio.positions[asset].amount + amount < 0:
self.handle_violation(asset, amount, algo_datetime) |
<SYSTEM_TASK:>
Fail if the algo has passed this Asset's end_date, or before the
<END_TASK>
<USER_TASK:>
Description:
def validate(self,
asset,
amount,
portfolio,
algo_datetime,
algo_current_data):
"""
Fail if the algo has passed this Asset's end_date, or before the
Asset's start date.
""" |
# If the order is for 0 shares, then silently pass through.
if amount == 0:
return
normalized_algo_dt = pd.Timestamp(algo_datetime).normalize()
# Fail if the algo is before this Asset's start_date
if asset.start_date:
normalized_start = pd.Timestamp(asset.start_date).normalize()
if normalized_algo_dt < normalized_start:
metadata = {
'asset_start_date': normalized_start
}
self.handle_violation(
asset, amount, algo_datetime, metadata=metadata)
# Fail if the algo has passed this Asset's end_date
if asset.end_date:
normalized_end = pd.Timestamp(asset.end_date).normalize()
if normalized_algo_dt > normalized_end:
metadata = {
'asset_end_date': normalized_end
}
self.handle_violation(
asset, amount, algo_datetime, metadata=metadata) |
<SYSTEM_TASK:>
Fail if the leverage is greater than the allowed leverage.
<END_TASK>
<USER_TASK:>
Description:
def validate(self,
_portfolio,
_account,
_algo_datetime,
_algo_current_data):
"""
Fail if the leverage is greater than the allowed leverage.
""" |
if _account.leverage > self.max_leverage:
self.fail() |
<SYSTEM_TASK:>
Make validation checks if we are after the deadline.
<END_TASK>
<USER_TASK:>
Description:
def validate(self,
_portfolio,
account,
algo_datetime,
_algo_current_data):
"""
Make validation checks if we are after the deadline.
Fail if the leverage is less than the min leverage.
""" |
if (algo_datetime > self.deadline and
account.leverage < self.min_leverage):
self.fail() |
<SYSTEM_TASK:>
Alter columns from a table.
<END_TASK>
<USER_TASK:>
Description:
def alter_columns(op, name, *columns, **kwargs):
"""Alter columns from a table.
Parameters
----------
name : str
The name of the table.
*columns
The new columns to have.
selection_string : str, optional
The string to use in the selection. If not provided, it will select all
of the new columns from the old table.
Notes
-----
The columns are passed explicitly because this should only be used in a
downgrade where ``zipline.assets.asset_db_schema`` could change.
""" |
selection_string = kwargs.pop('selection_string', None)
if kwargs:
raise TypeError(
'alter_columns received extra arguments: %r' % sorted(kwargs),
)
if selection_string is None:
selection_string = ', '.join(column.name for column in columns)
tmp_name = '_alter_columns_' + name
op.rename_table(name, tmp_name)
for column in columns:
# Clear any indices that already exist on this table, otherwise we will
# fail to create the table because the indices will already be present.
# When we create the table below, the indices that we want to preserve
# will just get recreated.
for table in name, tmp_name:
try:
op.drop_index('ix_%s_%s' % (table, column.name))
except sa.exc.OperationalError:
pass
op.create_table(name, *columns)
op.execute(
'insert into %s select %s from %s' % (
name,
selection_string,
tmp_name,
),
)
op.drop_table(tmp_name) |
<SYSTEM_TASK:>
Downgrades the assets db at the given engine to the desired version.
<END_TASK>
<USER_TASK:>
Description:
def downgrade(engine, desired_version):
"""Downgrades the assets db at the given engine to the desired version.
Parameters
----------
engine : Engine
An SQLAlchemy engine to the assets database.
desired_version : int
The desired resulting version for the assets database.
""" |
# Check the version of the db at the engine
with engine.begin() as conn:
metadata = sa.MetaData(conn)
metadata.reflect()
version_info_table = metadata.tables['version_info']
starting_version = sa.select((version_info_table.c.version,)).scalar()
# Check for accidental upgrade
if starting_version < desired_version:
raise AssetDBImpossibleDowngrade(db_version=starting_version,
desired_version=desired_version)
# Check if the desired version is already the db version
if starting_version == desired_version:
# No downgrade needed
return
# Create alembic context
ctx = MigrationContext.configure(conn)
op = Operations(ctx)
# Integer keys of downgrades to run
# E.g.: [5, 4, 3, 2] would downgrade v6 to v2
downgrade_keys = range(desired_version, starting_version)[::-1]
# Disable foreign keys until all downgrades are complete
_pragma_foreign_keys(conn, False)
# Execute the downgrades in order
for downgrade_key in downgrade_keys:
_downgrade_methods[downgrade_key](op, conn, version_info_table)
# Re-enable foreign keys
_pragma_foreign_keys(conn, True) |
<SYSTEM_TASK:>
Decorator for marking that a method is a downgrade to a version to the
<END_TASK>
<USER_TASK:>
Description:
def downgrades(src):
"""Decorator for marking that a method is a downgrade to a version to the
previous version.
Parameters
----------
src : int
The version this downgrades from.
Returns
-------
decorator : callable[(callable) -> callable]
The decorator to apply.
""" |
def _(f):
destination = src - 1
@do(operator.setitem(_downgrade_methods, destination))
@wraps(f)
def wrapper(op, conn, version_info_table):
conn.execute(version_info_table.delete()) # clear the version
f(op)
write_version_info(conn, version_info_table, destination)
return wrapper
return _ |
<SYSTEM_TASK:>
Downgrade assets db by removing the 'tick_size' column and renaming the
<END_TASK>
<USER_TASK:>
Description:
def _downgrade_v1(op):
"""
Downgrade assets db by removing the 'tick_size' column and renaming the
'multiplier' column.
""" |
# Drop indices before batch
# This is to prevent index collision when creating the temp table
op.drop_index('ix_futures_contracts_root_symbol')
op.drop_index('ix_futures_contracts_symbol')
# Execute batch op to allow column modification in SQLite
with op.batch_alter_table('futures_contracts') as batch_op:
# Rename 'multiplier'
batch_op.alter_column(column_name='multiplier',
new_column_name='contract_multiplier')
# Delete 'tick_size'
batch_op.drop_column('tick_size')
# Recreate indices after batch
op.create_index('ix_futures_contracts_root_symbol',
table_name='futures_contracts',
columns=['root_symbol'])
op.create_index('ix_futures_contracts_symbol',
table_name='futures_contracts',
columns=['symbol'],
unique=True) |
<SYSTEM_TASK:>
Downgrade assets db by removing the 'auto_close_date' column.
<END_TASK>
<USER_TASK:>
Description:
def _downgrade_v2(op):
"""
Downgrade assets db by removing the 'auto_close_date' column.
""" |
# Drop indices before batch
# This is to prevent index collision when creating the temp table
op.drop_index('ix_equities_fuzzy_symbol')
op.drop_index('ix_equities_company_symbol')
# Execute batch op to allow column modification in SQLite
with op.batch_alter_table('equities') as batch_op:
batch_op.drop_column('auto_close_date')
# Recreate indices after batch
op.create_index('ix_equities_fuzzy_symbol',
table_name='equities',
columns=['fuzzy_symbol'])
op.create_index('ix_equities_company_symbol',
table_name='equities',
columns=['company_symbol']) |
<SYSTEM_TASK:>
Downgrade assets db by adding a not null constraint on
<END_TASK>
<USER_TASK:>
Description:
def _downgrade_v3(op):
"""
Downgrade assets db by adding a not null constraint on
``equities.first_traded``
""" |
op.create_table(
'_new_equities',
sa.Column(
'sid',
sa.Integer,
unique=True,
nullable=False,
primary_key=True,
),
sa.Column('symbol', sa.Text),
sa.Column('company_symbol', sa.Text),
sa.Column('share_class_symbol', sa.Text),
sa.Column('fuzzy_symbol', sa.Text),
sa.Column('asset_name', sa.Text),
sa.Column('start_date', sa.Integer, default=0, nullable=False),
sa.Column('end_date', sa.Integer, nullable=False),
sa.Column('first_traded', sa.Integer, nullable=False),
sa.Column('auto_close_date', sa.Integer),
sa.Column('exchange', sa.Text),
)
op.execute(
"""
insert into _new_equities
select * from equities
where equities.first_traded is not null
""",
)
op.drop_table('equities')
op.rename_table('_new_equities', 'equities')
# we need to make sure the indices have the proper names after the rename
op.create_index(
'ix_equities_company_symbol',
'equities',
['company_symbol'],
)
op.create_index(
'ix_equities_fuzzy_symbol',
'equities',
['fuzzy_symbol'],
) |
<SYSTEM_TASK:>
Downgrades assets db by copying the `exchange_full` column to `exchange`,
<END_TASK>
<USER_TASK:>
Description:
def _downgrade_v4(op):
"""
Downgrades assets db by copying the `exchange_full` column to `exchange`,
then dropping the `exchange_full` column.
""" |
op.drop_index('ix_equities_fuzzy_symbol')
op.drop_index('ix_equities_company_symbol')
op.execute("UPDATE equities SET exchange = exchange_full")
with op.batch_alter_table('equities') as batch_op:
batch_op.drop_column('exchange_full')
op.create_index('ix_equities_fuzzy_symbol',
table_name='equities',
columns=['fuzzy_symbol'])
op.create_index('ix_equities_company_symbol',
table_name='equities',
columns=['company_symbol']) |
<SYSTEM_TASK:>
Create a family of metrics sets functions that read from the same
<END_TASK>
<USER_TASK:>
Description:
def _make_metrics_set_core():
"""Create a family of metrics sets functions that read from the same
metrics set mapping.
Returns
-------
metrics_sets : mappingproxy
The mapping of metrics sets to load functions.
register : callable
The function which registers new metrics sets in the ``metrics_sets``
mapping.
unregister : callable
The function which deregisters metrics sets from the ``metrics_sets``
mapping.
load : callable
The function which loads the ingested metrics sets back into memory.
""" |
_metrics_sets = {}
# Expose _metrics_sets through a proxy so that users cannot mutate this
# accidentally. Users may go through `register` to update this which will
# warn when trampling another metrics set.
metrics_sets = mappingproxy(_metrics_sets)
def register(name, function=None):
"""Register a new metrics set.
Parameters
----------
name : str
The name of the metrics set
function : callable
The callable which produces the metrics set.
Notes
-----
This may be used as a decorator if only ``name`` is passed.
See Also
--------
zipline.finance.metrics.get_metrics_set
zipline.finance.metrics.unregister_metrics_set
"""
if function is None:
# allow as decorator with just name.
return partial(register, name)
if name in _metrics_sets:
raise ValueError('metrics set %r is already registered' % name)
_metrics_sets[name] = function
return function
def unregister(name):
"""Unregister an existing metrics set.
Parameters
----------
name : str
The name of the metrics set
See Also
--------
zipline.finance.metrics.register_metrics_set
"""
try:
del _metrics_sets[name]
except KeyError:
raise ValueError(
'metrics set %r was not already registered' % name,
)
def load(name):
"""Return an instance of the metrics set registered with the given name.
Returns
-------
metrics : set[Metric]
A new instance of the metrics set.
Raises
------
ValueError
Raised when no metrics set is registered to ``name``
"""
try:
function = _metrics_sets[name]
except KeyError:
raise ValueError(
'no metrics set registered as %r, options are: %r' % (
name,
sorted(_metrics_sets),
),
)
return function()
return metrics_sets, register, unregister, load |
<SYSTEM_TASK:>
Verify that the columns of ``events`` can be used by a
<END_TASK>
<USER_TASK:>
Description:
def validate_column_specs(events, columns):
"""
Verify that the columns of ``events`` can be used by a
EarningsEstimatesLoader to serve the BoundColumns described by
`columns`.
""" |
required = required_estimates_fields(columns)
received = set(events.columns)
missing = required - received
if missing:
raise ValueError(
"EarningsEstimatesLoader missing required columns {missing}.\n"
"Got Columns: {received}\n"
"Expected Columns: {required}".format(
missing=sorted(missing),
received=sorted(received),
required=sorted(required),
)
) |
<SYSTEM_TASK:>
Selects the requested data for each date.
<END_TASK>
<USER_TASK:>
Description:
def get_requested_quarter_data(self,
zero_qtr_data,
zeroth_quarter_idx,
stacked_last_per_qtr,
num_announcements,
dates):
"""
Selects the requested data for each date.
Parameters
----------
zero_qtr_data : pd.DataFrame
The 'time zero' data for each calendar date per sid.
zeroth_quarter_idx : pd.Index
An index of calendar dates, sid, and normalized quarters, for only
the rows that have a next or previous earnings estimate.
stacked_last_per_qtr : pd.DataFrame
The latest estimate known with the dates, normalized quarter, and
sid as the index.
num_announcements : int
The number of annoucements out the user requested relative to
each date in the calendar dates.
dates : pd.DatetimeIndex
The calendar dates for which estimates data is requested.
Returns
--------
requested_qtr_data : pd.DataFrame
The DataFrame with the latest values for the requested quarter
for all columns; `dates` are the index and columns are a MultiIndex
with sids at the top level and the dataset columns on the bottom.
""" |
zero_qtr_data_idx = zero_qtr_data.index
requested_qtr_idx = pd.MultiIndex.from_arrays(
[
zero_qtr_data_idx.get_level_values(0),
zero_qtr_data_idx.get_level_values(1),
self.get_shifted_qtrs(
zeroth_quarter_idx.get_level_values(
NORMALIZED_QUARTERS,
),
num_announcements,
),
],
names=[
zero_qtr_data_idx.names[0],
zero_qtr_data_idx.names[1],
SHIFTED_NORMALIZED_QTRS,
],
)
requested_qtr_data = stacked_last_per_qtr.loc[requested_qtr_idx]
requested_qtr_data = requested_qtr_data.reset_index(
SHIFTED_NORMALIZED_QTRS,
)
# Calculate the actual year/quarter being requested and add those in
# as columns.
(requested_qtr_data[FISCAL_YEAR_FIELD_NAME],
requested_qtr_data[FISCAL_QUARTER_FIELD_NAME]) = \
split_normalized_quarters(
requested_qtr_data[SHIFTED_NORMALIZED_QTRS]
)
# Once we're left with just dates as the index, we can reindex by all
# dates so that we have a value for each calendar date.
return requested_qtr_data.unstack(SID_FIELD_NAME).reindex(dates) |
<SYSTEM_TASK:>
Compute the index in `dates` where the split-adjusted-asof-date
<END_TASK>
<USER_TASK:>
Description:
def get_split_adjusted_asof_idx(self, dates):
"""
Compute the index in `dates` where the split-adjusted-asof-date
falls. This is the date up to which, and including which, we will
need to unapply all adjustments for and then re-apply them as they
come in. After this date, adjustments are applied as normal.
Parameters
----------
dates : pd.DatetimeIndex
The calendar dates over which the Pipeline is being computed.
Returns
-------
split_adjusted_asof_idx : int
The index in `dates` at which the data should be split.
""" |
split_adjusted_asof_idx = dates.searchsorted(
self._split_adjusted_asof
)
# The split-asof date is after the date index.
if split_adjusted_asof_idx == len(dates):
split_adjusted_asof_idx = len(dates) - 1
elif self._split_adjusted_asof < dates[0].tz_localize(None):
split_adjusted_asof_idx = -1
return split_adjusted_asof_idx |
<SYSTEM_TASK:>
Given a sid, collect all overwrites that should be applied for this
<END_TASK>
<USER_TASK:>
Description:
def collect_overwrites_for_sid(self,
group,
dates,
requested_qtr_data,
last_per_qtr,
sid_idx,
columns,
all_adjustments_for_sid,
sid):
"""
Given a sid, collect all overwrites that should be applied for this
sid at each quarter boundary.
Parameters
----------
group : pd.DataFrame
The data for `sid`.
dates : pd.DatetimeIndex
The calendar dates for which estimates data is requested.
requested_qtr_data : pd.DataFrame
The DataFrame with the latest values for the requested quarter
for all columns.
last_per_qtr : pd.DataFrame
A DataFrame with a column MultiIndex of [self.estimates.columns,
normalized_quarters, sid] that allows easily getting the timeline
of estimates for a particular sid for a particular quarter.
sid_idx : int
The sid's index in the asset index.
columns : list of BoundColumn
The columns for which the overwrites should be computed.
all_adjustments_for_sid : dict[int -> AdjustedArray]
A dictionary of the integer index of each timestamp into the date
index, mapped to adjustments that should be applied at that
index for the given sid (`sid`). This dictionary is modified as
adjustments are collected.
sid : int
The sid for which overwrites should be computed.
""" |
# If data was requested for only 1 date, there can never be any
# overwrites, so skip the extra work.
if len(dates) == 1:
return
next_qtr_start_indices = dates.searchsorted(
group[EVENT_DATE_FIELD_NAME].values,
side=self.searchsorted_side,
)
qtrs_with_estimates = group.index.get_level_values(
NORMALIZED_QUARTERS
).values
for idx in next_qtr_start_indices:
if 0 < idx < len(dates):
# Find the quarter being requested in the quarter we're
# crossing into.
requested_quarter = requested_qtr_data[
SHIFTED_NORMALIZED_QTRS, sid,
].iloc[idx]
# Only add adjustments if the next quarter starts somewhere
# in our date index for this sid. Our 'next' quarter can
# never start at index 0; a starting index of 0 means that
# the next quarter's event date was NaT.
self.create_overwrites_for_quarter(
all_adjustments_for_sid,
idx,
last_per_qtr,
qtrs_with_estimates,
requested_quarter,
sid,
sid_idx,
columns
) |
<SYSTEM_TASK:>
Merge adjustments for a particular sid into a dictionary containing
<END_TASK>
<USER_TASK:>
Description:
def merge_into_adjustments_for_all_sids(self,
all_adjustments_for_sid,
col_to_all_adjustments):
"""
Merge adjustments for a particular sid into a dictionary containing
adjustments for all sids.
Parameters
----------
all_adjustments_for_sid : dict[int -> AdjustedArray]
All adjustments for a particular sid.
col_to_all_adjustments : dict[int -> AdjustedArray]
All adjustments for all sids.
""" |
for col_name in all_adjustments_for_sid:
if col_name not in col_to_all_adjustments:
col_to_all_adjustments[col_name] = {}
for ts in all_adjustments_for_sid[col_name]:
adjs = all_adjustments_for_sid[col_name][ts]
add_new_adjustments(col_to_all_adjustments,
adjs,
col_name,
ts) |
<SYSTEM_TASK:>
Creates an AdjustedArray from the given estimates data for the given
<END_TASK>
<USER_TASK:>
Description:
def get_adjustments(self,
zero_qtr_data,
requested_qtr_data,
last_per_qtr,
dates,
assets,
columns,
**kwargs):
"""
Creates an AdjustedArray from the given estimates data for the given
dates.
Parameters
----------
zero_qtr_data : pd.DataFrame
The 'time zero' data for each calendar date per sid.
requested_qtr_data : pd.DataFrame
The requested quarter data for each calendar date per sid.
last_per_qtr : pd.DataFrame
A DataFrame with a column MultiIndex of [self.estimates.columns,
normalized_quarters, sid] that allows easily getting the timeline
of estimates for a particular sid for a particular quarter.
dates : pd.DatetimeIndex
The calendar dates for which estimates data is requested.
assets : pd.Int64Index
An index of all the assets from the raw data.
columns : list of BoundColumn
The columns for which adjustments need to be calculated.
kwargs :
Additional keyword arguments that should be forwarded to
`get_adjustments_for_sid` and to be used in computing adjustments
for each sid.
Returns
-------
col_to_all_adjustments : dict[int -> AdjustedArray]
A dictionary of all adjustments that should be applied.
""" |
zero_qtr_data.sort_index(inplace=True)
# Here we want to get the LAST record from each group of records
# corresponding to a single quarter. This is to ensure that we select
# the most up-to-date event date in case the event date changes.
quarter_shifts = zero_qtr_data.groupby(
level=[SID_FIELD_NAME, NORMALIZED_QUARTERS]
).nth(-1)
col_to_all_adjustments = {}
sid_to_idx = dict(zip(assets, range(len(assets))))
quarter_shifts.groupby(level=SID_FIELD_NAME).apply(
self.get_adjustments_for_sid,
dates,
requested_qtr_data,
last_per_qtr,
sid_to_idx,
columns,
col_to_all_adjustments,
**kwargs
)
return col_to_all_adjustments |
<SYSTEM_TASK:>
Add entries to the dictionary of columns to adjustments for the given
<END_TASK>
<USER_TASK:>
Description:
def create_overwrites_for_quarter(self,
col_to_overwrites,
next_qtr_start_idx,
last_per_qtr,
quarters_with_estimates_for_sid,
requested_quarter,
sid,
sid_idx,
columns):
"""
Add entries to the dictionary of columns to adjustments for the given
sid and the given quarter.
Parameters
----------
col_to_overwrites : dict [column_name -> list of ArrayAdjustment]
A dictionary mapping column names to all overwrites for those
columns.
next_qtr_start_idx : int
The index of the first day of the next quarter in the calendar
dates.
last_per_qtr : pd.DataFrame
A DataFrame with a column MultiIndex of [self.estimates.columns,
normalized_quarters, sid] that allows easily getting the timeline
of estimates for a particular sid for a particular quarter; this
is particularly useful for getting adjustments for 'next'
estimates.
quarters_with_estimates_for_sid : np.array
An array of all quarters for which there are estimates for the
given sid.
requested_quarter : float
The quarter for which the overwrite should be created.
sid : int
The sid for which to create overwrites.
sid_idx : int
The index of the sid in `assets`.
columns : list of BoundColumn
The columns for which to create overwrites.
""" |
for col in columns:
column_name = self.name_map[col.name]
if column_name not in col_to_overwrites:
col_to_overwrites[column_name] = {}
# If there are estimates for the requested quarter,
# overwrite all values going up to the starting index of
# that quarter with estimates for that quarter.
if requested_quarter in quarters_with_estimates_for_sid:
adjs = self.create_overwrite_for_estimate(
col,
column_name,
last_per_qtr,
next_qtr_start_idx,
requested_quarter,
sid,
sid_idx,
)
add_new_adjustments(col_to_overwrites,
adjs,
column_name,
next_qtr_start_idx)
# There are no estimates for the quarter. Overwrite all
# values going up to the starting index of that quarter
# with the missing value for this column.
else:
adjs = [self.overwrite_with_null(
col,
next_qtr_start_idx,
sid_idx)]
add_new_adjustments(col_to_overwrites,
adjs,
column_name,
next_qtr_start_idx) |
<SYSTEM_TASK:>
Determine the last piece of information we know for each column on each
<END_TASK>
<USER_TASK:>
Description:
def get_last_data_per_qtr(self,
assets_with_data,
columns,
dates,
data_query_cutoff_times):
"""
Determine the last piece of information we know for each column on each
date in the index for each sid and quarter.
Parameters
----------
assets_with_data : pd.Index
Index of all assets that appear in the raw data given to the
loader.
columns : iterable of BoundColumn
The columns that need to be loaded from the raw data.
data_query_cutoff_times : pd.DatetimeIndex
The calendar of dates for which data should be loaded.
Returns
-------
stacked_last_per_qtr : pd.DataFrame
A DataFrame indexed by [dates, sid, normalized_quarters] that has
the latest information for each row of the index, sorted by event
date.
last_per_qtr : pd.DataFrame
A DataFrame with columns that are a MultiIndex of [
self.estimates.columns, normalized_quarters, sid].
""" |
# Get a DataFrame indexed by date with a MultiIndex of columns of
# [self.estimates.columns, normalized_quarters, sid], where each cell
# contains the latest data for that day.
last_per_qtr = last_in_date_group(
self.estimates,
data_query_cutoff_times,
assets_with_data,
reindex=True,
extra_groupers=[NORMALIZED_QUARTERS],
)
last_per_qtr.index = dates
# Forward fill values for each quarter/sid/dataset column.
ffill_across_cols(last_per_qtr, columns, self.name_map)
# Stack quarter and sid into the index.
stacked_last_per_qtr = last_per_qtr.stack(
[SID_FIELD_NAME, NORMALIZED_QUARTERS],
)
# Set date index name for ease of reference
stacked_last_per_qtr.index.set_names(
SIMULATION_DATES,
level=0,
inplace=True,
)
stacked_last_per_qtr = stacked_last_per_qtr.sort_values(
EVENT_DATE_FIELD_NAME,
)
stacked_last_per_qtr[EVENT_DATE_FIELD_NAME] = pd.to_datetime(
stacked_last_per_qtr[EVENT_DATE_FIELD_NAME]
)
return last_per_qtr, stacked_last_per_qtr |
<SYSTEM_TASK:>
Filters for releases that are on or after each simulation date and
<END_TASK>
<USER_TASK:>
Description:
def get_zeroth_quarter_idx(self, stacked_last_per_qtr):
"""
Filters for releases that are on or after each simulation date and
determines the previous quarter by picking out the most recent
release relative to each date in the index.
Parameters
----------
stacked_last_per_qtr : pd.DataFrame
A DataFrame with index of calendar dates, sid, and normalized
quarters with each row being the latest estimate for the row's
index values, sorted by event date.
Returns
-------
previous_releases_per_date_index : pd.MultiIndex
An index of calendar dates, sid, and normalized quarters, for only
the rows that have a previous event.
""" |
previous_releases_per_date = stacked_last_per_qtr.loc[
stacked_last_per_qtr[EVENT_DATE_FIELD_NAME] <=
stacked_last_per_qtr.index.get_level_values(SIMULATION_DATES)
].groupby(
level=[SIMULATION_DATES, SID_FIELD_NAME],
as_index=False,
# Here we take advantage of the fact that `stacked_last_per_qtr` is
# sorted by event date.
).nth(-1)
return previous_releases_per_date.index |
<SYSTEM_TASK:>
Collects both overwrites and adjustments for a particular sid.
<END_TASK>
<USER_TASK:>
Description:
def get_adjustments_for_sid(self,
group,
dates,
requested_qtr_data,
last_per_qtr,
sid_to_idx,
columns,
col_to_all_adjustments,
split_adjusted_asof_idx=None,
split_adjusted_cols_for_group=None):
"""
Collects both overwrites and adjustments for a particular sid.
Parameters
----------
split_adjusted_asof_idx : int
The integer index of the date on which the data was split-adjusted.
split_adjusted_cols_for_group : list of str
The names of requested columns that should also be split-adjusted.
""" |
all_adjustments_for_sid = {}
sid = int(group.name)
self.collect_overwrites_for_sid(group,
dates,
requested_qtr_data,
last_per_qtr,
sid_to_idx[sid],
columns,
all_adjustments_for_sid,
sid)
(pre_adjustments,
post_adjustments) = self.retrieve_split_adjustment_data_for_sid(
dates, sid, split_adjusted_asof_idx
)
sid_estimates = self.estimates[
self.estimates[SID_FIELD_NAME] == sid
]
# We might not have any overwrites but still have
# adjustments, and we will need to manually add columns if
# that is the case.
for col_name in split_adjusted_cols_for_group:
if col_name not in all_adjustments_for_sid:
all_adjustments_for_sid[col_name] = {}
self.collect_split_adjustments(
all_adjustments_for_sid,
requested_qtr_data,
dates,
sid,
sid_to_idx[sid],
sid_estimates,
split_adjusted_asof_idx,
pre_adjustments,
post_adjustments,
split_adjusted_cols_for_group
)
self.merge_into_adjustments_for_all_sids(
all_adjustments_for_sid, col_to_all_adjustments
) |
<SYSTEM_TASK:>
Calculates both split adjustments and overwrites for all sids.
<END_TASK>
<USER_TASK:>
Description:
def get_adjustments(self,
zero_qtr_data,
requested_qtr_data,
last_per_qtr,
dates,
assets,
columns,
**kwargs):
"""
Calculates both split adjustments and overwrites for all sids.
""" |
split_adjusted_cols_for_group = [
self.name_map[col.name]
for col in columns
if self.name_map[col.name] in self._split_adjusted_column_names
]
# Add all splits to the adjustment dict for this sid.
split_adjusted_asof_idx = self.get_split_adjusted_asof_idx(
dates
)
return super(SplitAdjustedEstimatesLoader, self).get_adjustments(
zero_qtr_data,
requested_qtr_data,
last_per_qtr,
dates,
assets,
columns,
split_adjusted_cols_for_group=split_adjusted_cols_for_group,
split_adjusted_asof_idx=split_adjusted_asof_idx
) |
<SYSTEM_TASK:>
Determines the date until which the adjustment at the given date
<END_TASK>
<USER_TASK:>
Description:
def determine_end_idx_for_adjustment(self,
adjustment_ts,
dates,
upper_bound,
requested_quarter,
sid_estimates):
"""
Determines the date until which the adjustment at the given date
index should be applied for the given quarter.
Parameters
----------
adjustment_ts : pd.Timestamp
The timestamp at which the adjustment occurs.
dates : pd.DatetimeIndex
The calendar dates over which the Pipeline is being computed.
upper_bound : int
The index of the upper bound in the calendar dates. This is the
index until which the adjusment will be applied unless there is
information for the requested quarter that comes in on or before
that date.
requested_quarter : float
The quarter for which we are determining how the adjustment
should be applied.
sid_estimates : pd.DataFrame
The DataFrame of estimates data for the sid for which we're
applying the given adjustment.
Returns
-------
end_idx : int
The last index to which the adjustment should be applied for the
given quarter/sid.
""" |
end_idx = upper_bound
# Find the next newest kd that happens on or after
# the date of this adjustment
newest_kd_for_qtr = sid_estimates[
(sid_estimates[NORMALIZED_QUARTERS] == requested_quarter) &
(sid_estimates[TS_FIELD_NAME] >= adjustment_ts)
][TS_FIELD_NAME].min()
if pd.notnull(newest_kd_for_qtr):
newest_kd_idx = dates.searchsorted(
newest_kd_for_qtr
)
# We have fresh information that comes in
# before the end of the overwrite and
# presumably is already split-adjusted to the
# current split. We should stop applying the
# adjustment the day before this new
# information comes in.
if newest_kd_idx <= upper_bound:
end_idx = newest_kd_idx - 1
return end_idx |
<SYSTEM_TASK:>
Collect split adjustments that occur before the
<END_TASK>
<USER_TASK:>
Description:
def collect_pre_split_asof_date_adjustments(
self,
split_adjusted_asof_date_idx,
sid_idx,
pre_adjustments,
requested_split_adjusted_columns
):
"""
Collect split adjustments that occur before the
split-adjusted-asof-date. All those adjustments must first be
UN-applied at the first date index and then re-applied on the
appropriate dates in order to match point in time share pricing data.
Parameters
----------
split_adjusted_asof_date_idx : int
The index in the calendar dates as-of which all data was
split-adjusted.
sid_idx : int
The index of the sid for which adjustments should be collected in
the adjusted array.
pre_adjustments : tuple(list(float), list(int))
The adjustment values, indexes in `dates`, and timestamps for
adjustments that happened after the split-asof-date.
requested_split_adjusted_columns : list of str
The requested split adjusted columns.
Returns
-------
col_to_split_adjustments : dict[str -> dict[int -> list of Adjustment]]
The adjustments for this sid that occurred on or before the
split-asof-date.
""" |
col_to_split_adjustments = {}
if len(pre_adjustments[0]):
adjustment_values, date_indexes = pre_adjustments
for column_name in requested_split_adjusted_columns:
col_to_split_adjustments[column_name] = {}
# We need to undo all adjustments that happen before the
# split_asof_date here by reversing the split ratio.
col_to_split_adjustments[column_name][0] = [Float64Multiply(
0,
split_adjusted_asof_date_idx,
sid_idx,
sid_idx,
1 / future_adjustment
) for future_adjustment in adjustment_values]
for adjustment, date_index in zip(adjustment_values,
date_indexes):
adj = Float64Multiply(
0,
split_adjusted_asof_date_idx,
sid_idx,
sid_idx,
adjustment
)
add_new_adjustments(col_to_split_adjustments,
[adj],
column_name,
date_index)
return col_to_split_adjustments |
<SYSTEM_TASK:>
Collect split adjustments that occur after the
<END_TASK>
<USER_TASK:>
Description:
def collect_post_asof_split_adjustments(self,
post_adjustments,
requested_qtr_data,
sid,
sid_idx,
sid_estimates,
requested_split_adjusted_columns):
"""
Collect split adjustments that occur after the
split-adjusted-asof-date. Each adjustment needs to be applied to all
dates on which knowledge for the requested quarter was older than the
date of the adjustment.
Parameters
----------
post_adjustments : tuple(list(float), list(int), pd.DatetimeIndex)
The adjustment values, indexes in `dates`, and timestamps for
adjustments that happened after the split-asof-date.
requested_qtr_data : pd.DataFrame
The requested quarter data for each calendar date per sid.
sid : int
The sid for which adjustments need to be collected.
sid_idx : int
The index of `sid` in the adjusted array.
sid_estimates : pd.DataFrame
The raw estimates data for this sid.
requested_split_adjusted_columns : list of str
The requested split adjusted columns.
Returns
-------
col_to_split_adjustments : dict[str -> dict[int -> list of Adjustment]]
The adjustments for this sid that occurred after the
split-asof-date.
""" |
col_to_split_adjustments = {}
if post_adjustments:
# Get an integer index
requested_qtr_timeline = requested_qtr_data[
SHIFTED_NORMALIZED_QTRS
][sid].reset_index()
requested_qtr_timeline = requested_qtr_timeline[
requested_qtr_timeline[sid].notnull()
]
# Split the data into range by quarter and determine which quarter
# was being requested in each range.
# Split integer indexes up by quarter range
qtr_ranges_idxs = np.split(
requested_qtr_timeline.index,
np.where(np.diff(requested_qtr_timeline[sid]) != 0)[0] + 1
)
requested_quarters_per_range = [requested_qtr_timeline[sid][r[0]]
for r in qtr_ranges_idxs]
# Try to apply each adjustment to each quarter range.
for i, qtr_range in enumerate(qtr_ranges_idxs):
for adjustment, date_index, timestamp in zip(
*post_adjustments
):
# In the default case, apply through the end of the quarter
upper_bound = qtr_range[-1]
# Find the smallest KD in estimates that is on or after the
# date of the given adjustment. Apply the given adjustment
# until that KD.
end_idx = self.determine_end_idx_for_adjustment(
timestamp,
requested_qtr_data.index,
upper_bound,
requested_quarters_per_range[i],
sid_estimates
)
# In the default case, apply adjustment on the first day of
# the quarter.
start_idx = qtr_range[0]
# If the adjustment happens during this quarter, apply the
# adjustment on the day it happens.
if date_index > start_idx:
start_idx = date_index
# We only want to apply the adjustment if we have any stale
# data to apply it to.
if qtr_range[0] <= end_idx:
for column_name in requested_split_adjusted_columns:
if column_name not in col_to_split_adjustments:
col_to_split_adjustments[column_name] = {}
adj = Float64Multiply(
# Always apply from first day of qtr
qtr_range[0],
end_idx,
sid_idx,
sid_idx,
adjustment
)
add_new_adjustments(
col_to_split_adjustments,
[adj],
column_name,
start_idx
)
return col_to_split_adjustments |
<SYSTEM_TASK:>
Merge split adjustments with the dict containing overwrites.
<END_TASK>
<USER_TASK:>
Description:
def merge_split_adjustments_with_overwrites(
self,
pre,
post,
overwrites,
requested_split_adjusted_columns
):
"""
Merge split adjustments with the dict containing overwrites.
Parameters
----------
pre : dict[str -> dict[int -> list]]
The adjustments that occur before the split-adjusted-asof-date.
post : dict[str -> dict[int -> list]]
The adjustments that occur after the split-adjusted-asof-date.
overwrites : dict[str -> dict[int -> list]]
The overwrites across all time. Adjustments will be merged into
this dictionary.
requested_split_adjusted_columns : list of str
List of names of split adjusted columns that are being requested.
""" |
for column_name in requested_split_adjusted_columns:
# We can do a merge here because the timestamps in 'pre' and
# 'post' are guaranteed to not overlap.
if pre:
# Either empty or contains all columns.
for ts in pre[column_name]:
add_new_adjustments(
overwrites,
pre[column_name][ts],
column_name,
ts
)
if post:
# Either empty or contains all columns.
for ts in post[column_name]:
add_new_adjustments(
overwrites,
post[column_name][ts],
column_name,
ts
) |
<SYSTEM_TASK:>
Collect split adjustments for previous quarters and apply them to the
<END_TASK>
<USER_TASK:>
Description:
def collect_split_adjustments(self,
adjustments_for_sid,
requested_qtr_data,
dates,
sid,
sid_idx,
sid_estimates,
split_adjusted_asof_idx,
pre_adjustments,
post_adjustments,
requested_split_adjusted_columns):
"""
Collect split adjustments for previous quarters and apply them to the
given dictionary of splits for the given sid. Since overwrites just
replace all estimates before the new quarter with NaN, we don't need to
worry about re-applying split adjustments.
Parameters
----------
adjustments_for_sid : dict[str -> dict[int -> list]]
The dictionary of adjustments to which splits need to be added.
Initially it contains only overwrites.
requested_qtr_data : pd.DataFrame
The requested quarter data for each calendar date per sid.
dates : pd.DatetimeIndex
The calendar dates for which estimates data is requested.
sid : int
The sid for which adjustments need to be collected.
sid_idx : int
The index of `sid` in the adjusted array.
sid_estimates : pd.DataFrame
The raw estimates data for the given sid.
split_adjusted_asof_idx : int
The index in `dates` as-of which the data is split adjusted.
pre_adjustments : tuple(list(float), list(int), pd.DatetimeIndex)
The adjustment values and indexes in `dates` for
adjustments that happened before the split-asof-date.
post_adjustments : tuple(list(float), list(int), pd.DatetimeIndex)
The adjustment values, indexes in `dates`, and timestamps for
adjustments that happened after the split-asof-date.
requested_split_adjusted_columns : list of str
List of requested split adjusted column names.
""" |
(pre_adjustments_dict,
post_adjustments_dict) = self._collect_adjustments(
requested_qtr_data,
sid,
sid_idx,
sid_estimates,
split_adjusted_asof_idx,
pre_adjustments,
post_adjustments,
requested_split_adjusted_columns
)
self.merge_split_adjustments_with_overwrites(
pre_adjustments_dict,
post_adjustments_dict,
adjustments_for_sid,
requested_split_adjusted_columns
) |
<SYSTEM_TASK:>
Collect split adjustments for future quarters. Re-apply adjustments
<END_TASK>
<USER_TASK:>
Description:
def collect_split_adjustments(self,
adjustments_for_sid,
requested_qtr_data,
dates,
sid,
sid_idx,
sid_estimates,
split_adjusted_asof_idx,
pre_adjustments,
post_adjustments,
requested_split_adjusted_columns):
"""
Collect split adjustments for future quarters. Re-apply adjustments
that would be overwritten by overwrites. Merge split adjustments with
overwrites into the given dictionary of splits for the given sid.
Parameters
----------
adjustments_for_sid : dict[str -> dict[int -> list]]
The dictionary of adjustments to which splits need to be added.
Initially it contains only overwrites.
requested_qtr_data : pd.DataFrame
The requested quarter data for each calendar date per sid.
dates : pd.DatetimeIndex
The calendar dates for which estimates data is requested.
sid : int
The sid for which adjustments need to be collected.
sid_idx : int
The index of `sid` in the adjusted array.
sid_estimates : pd.DataFrame
The raw estimates data for the given sid.
split_adjusted_asof_idx : int
The index in `dates` as-of which the data is split adjusted.
pre_adjustments : tuple(list(float), list(int), pd.DatetimeIndex)
The adjustment values and indexes in `dates` for
adjustments that happened before the split-asof-date.
post_adjustments : tuple(list(float), list(int), pd.DatetimeIndex)
The adjustment values, indexes in `dates`, and timestamps for
adjustments that happened after the split-asof-date.
requested_split_adjusted_columns : list of str
List of requested split adjusted column names.
""" |
(pre_adjustments_dict,
post_adjustments_dict) = self._collect_adjustments(
requested_qtr_data,
sid,
sid_idx,
sid_estimates,
split_adjusted_asof_idx,
pre_adjustments,
post_adjustments,
requested_split_adjusted_columns,
)
for column_name in requested_split_adjusted_columns:
for overwrite_ts in adjustments_for_sid[column_name]:
# We need to cumulatively re-apply all adjustments up to the
# split-adjusted-asof-date. We might not have any
# pre-adjustments, so we should check for that.
if overwrite_ts <= split_adjusted_asof_idx \
and pre_adjustments_dict:
for split_ts in pre_adjustments_dict[column_name]:
# The split has to have occurred during the span of
# the overwrite.
if split_ts < overwrite_ts:
# Create new adjustments here so that we can
# re-apply all applicable adjustments to ONLY
# the dates being overwritten.
adjustments_for_sid[
column_name
][overwrite_ts].extend([
Float64Multiply(
0,
overwrite_ts - 1,
sid_idx,
sid_idx,
adjustment.value
)
for adjustment
in pre_adjustments_dict[
column_name
][split_ts]
])
# After the split-adjusted-asof-date, we need to re-apply all
# adjustments that occur after that date and within the
# bounds of the overwrite. They need to be applied starting
# from the first date and until an end date. The end date is
# the date of the newest information we get about
# `requested_quarter` that is >= `split_ts`, or if there is no
# new knowledge before `overwrite_ts`, then it is the date
# before `overwrite_ts`.
else:
# Overwrites happen at the first index of a new quarter,
# so determine here which quarter that is.
requested_quarter = requested_qtr_data[
SHIFTED_NORMALIZED_QTRS, sid
].iloc[overwrite_ts]
for adjustment_value, date_index, timestamp in zip(
*post_adjustments
):
if split_adjusted_asof_idx < date_index < overwrite_ts:
# Assume the entire overwrite contains stale data
upper_bound = overwrite_ts - 1
end_idx = self.determine_end_idx_for_adjustment(
timestamp,
dates,
upper_bound,
requested_quarter,
sid_estimates
)
adjustments_for_sid[
column_name
][overwrite_ts].append(
Float64Multiply(
0,
end_idx,
sid_idx,
sid_idx,
adjustment_value
)
)
self.merge_split_adjustments_with_overwrites(
pre_adjustments_dict,
post_adjustments_dict,
adjustments_for_sid,
requested_split_adjusted_columns
) |
<SYSTEM_TASK:>
Convenience constructor for passing `decay_rate` in terms of `span`.
<END_TASK>
<USER_TASK:>
Description:
def from_span(cls, inputs, window_length, span, **kwargs):
"""
Convenience constructor for passing `decay_rate` in terms of `span`.
Forwards `decay_rate` as `1 - (2.0 / (1 + span))`. This provides the
behavior equivalent to passing `span` to pandas.ewma.
Examples
--------
.. code-block:: python
# Equivalent to:
# my_ewma = EWMA(
# inputs=[EquityPricing.close],
# window_length=30,
# decay_rate=(1 - (2.0 / (1 + 15.0))),
# )
my_ewma = EWMA.from_span(
inputs=[EquityPricing.close],
window_length=30,
span=15,
)
Notes
-----
This classmethod is provided by both
:class:`ExponentialWeightedMovingAverage` and
:class:`ExponentialWeightedMovingStdDev`.
""" |
if span <= 1:
raise ValueError(
"`span` must be a positive number. %s was passed." % span
)
decay_rate = (1.0 - (2.0 / (1.0 + span)))
assert 0.0 < decay_rate <= 1.0
return cls(
inputs=inputs,
window_length=window_length,
decay_rate=decay_rate,
**kwargs
) |
<SYSTEM_TASK:>
Convenience constructor for passing ``decay_rate`` in terms of half
<END_TASK>
<USER_TASK:>
Description:
def from_halflife(cls, inputs, window_length, halflife, **kwargs):
"""
Convenience constructor for passing ``decay_rate`` in terms of half
life.
Forwards ``decay_rate`` as ``exp(log(.5) / halflife)``. This provides
the behavior equivalent to passing `halflife` to pandas.ewma.
Examples
--------
.. code-block:: python
# Equivalent to:
# my_ewma = EWMA(
# inputs=[EquityPricing.close],
# window_length=30,
# decay_rate=np.exp(np.log(0.5) / 15),
# )
my_ewma = EWMA.from_halflife(
inputs=[EquityPricing.close],
window_length=30,
halflife=15,
)
Notes
-----
This classmethod is provided by both
:class:`ExponentialWeightedMovingAverage` and
:class:`ExponentialWeightedMovingStdDev`.
""" |
if halflife <= 0:
raise ValueError(
"`span` must be a positive number. %s was passed." % halflife
)
decay_rate = exp(log(.5) / halflife)
assert 0.0 < decay_rate <= 1.0
return cls(
inputs=inputs,
window_length=window_length,
decay_rate=decay_rate,
**kwargs
) |
<SYSTEM_TASK:>
Convenience constructor for passing `decay_rate` in terms of center of
<END_TASK>
<USER_TASK:>
Description:
def from_center_of_mass(cls,
inputs,
window_length,
center_of_mass,
**kwargs):
"""
Convenience constructor for passing `decay_rate` in terms of center of
mass.
Forwards `decay_rate` as `1 - (1 / 1 + center_of_mass)`. This provides
behavior equivalent to passing `center_of_mass` to pandas.ewma.
Examples
--------
.. code-block:: python
# Equivalent to:
# my_ewma = EWMA(
# inputs=[EquityPricing.close],
# window_length=30,
# decay_rate=(1 - (1 / 15.0)),
# )
my_ewma = EWMA.from_center_of_mass(
inputs=[EquityPricing.close],
window_length=30,
center_of_mass=15,
)
Notes
-----
This classmethod is provided by both
:class:`ExponentialWeightedMovingAverage` and
:class:`ExponentialWeightedMovingStdDev`.
""" |
return cls(
inputs=inputs,
window_length=window_length,
decay_rate=(1.0 - (1.0 / (1.0 + center_of_mass))),
**kwargs
) |
<SYSTEM_TASK:>
Check if a and b are equal with some tolerance.
<END_TASK>
<USER_TASK:>
Description:
def tolerant_equals(a, b, atol=10e-7, rtol=10e-7, equal_nan=False):
"""Check if a and b are equal with some tolerance.
Parameters
----------
a, b : float
The floats to check for equality.
atol : float, optional
The absolute tolerance.
rtol : float, optional
The relative tolerance.
equal_nan : bool, optional
Should NaN compare equal?
See Also
--------
numpy.isclose
Notes
-----
This function is just a scalar version of numpy.isclose for performance.
See the docstring of ``isclose`` for more information about ``atol`` and
``rtol``.
""" |
if equal_nan and isnan(a) and isnan(b):
return True
return math.fabs(a - b) <= (atol + rtol * math.fabs(b)) |
<SYSTEM_TASK:>
Round a to the nearest integer if that integer is within an epsilon
<END_TASK>
<USER_TASK:>
Description:
def round_if_near_integer(a, epsilon=1e-4):
"""
Round a to the nearest integer if that integer is within an epsilon
of a.
""" |
if abs(a - round(a)) <= epsilon:
return round(a)
else:
return a |
<SYSTEM_TASK:>
Compute the expected return dtype for the given binary operator.
<END_TASK>
<USER_TASK:>
Description:
def binop_return_dtype(op, left, right):
"""
Compute the expected return dtype for the given binary operator.
Parameters
----------
op : str
Operator symbol, (e.g. '+', '-', ...).
left : numpy.dtype
Dtype of left hand side.
right : numpy.dtype
Dtype of right hand side.
Returns
-------
outdtype : numpy.dtype
The dtype of the result of `left <op> right`.
""" |
if is_comparison(op):
if left != right:
raise TypeError(
"Don't know how to compute {left} {op} {right}.\n"
"Comparisons are only supported between Factors of equal "
"dtypes.".format(left=left, op=op, right=right)
)
return bool_dtype
elif left != float64_dtype or right != float64_dtype:
raise TypeError(
"Don't know how to compute {left} {op} {right}.\n"
"Arithmetic operators are only supported between Factors of "
"dtype 'float64'.".format(
left=left.name,
op=op,
right=right.name,
)
)
return float64_dtype |
<SYSTEM_TASK:>
Factory function for making binary operator methods on a Factor subclass.
<END_TASK>
<USER_TASK:>
Description:
def binary_operator(op):
"""
Factory function for making binary operator methods on a Factor subclass.
Returns a function, "binary_operator" suitable for implementing functions
like __add__.
""" |
# When combining a Factor with a NumericalExpression, we use this
# attrgetter instance to defer to the commuted implementation of the
# NumericalExpression operator.
commuted_method_getter = attrgetter(method_name_for_op(op, commute=True))
@with_doc("Binary Operator: '%s'" % op)
@with_name(method_name_for_op(op))
@coerce_numbers_to_my_dtype
def binary_operator(self, other):
# This can't be hoisted up a scope because the types returned by
# binop_return_type aren't defined when the top-level function is
# invoked in the class body of Factor.
return_type = binop_return_type(op)
if isinstance(self, NumExprFactor):
self_expr, other_expr, new_inputs = self.build_binary_op(
op, other,
)
return return_type(
"({left}) {op} ({right})".format(
left=self_expr,
op=op,
right=other_expr,
),
new_inputs,
dtype=binop_return_dtype(op, self.dtype, other.dtype),
)
elif isinstance(other, NumExprFactor):
# NumericalExpression overrides ops to correctly handle merging of
# inputs. Look up and call the appropriate reflected operator with
# ourself as the input.
return commuted_method_getter(other)(self)
elif isinstance(other, Term):
if self is other:
return return_type(
"x_0 {op} x_0".format(op=op),
(self,),
dtype=binop_return_dtype(op, self.dtype, other.dtype),
)
return return_type(
"x_0 {op} x_1".format(op=op),
(self, other),
dtype=binop_return_dtype(op, self.dtype, other.dtype),
)
elif isinstance(other, Number):
return return_type(
"x_0 {op} ({constant})".format(op=op, constant=other),
binds=(self,),
# .dtype access is safe here because coerce_numbers_to_my_dtype
# will convert any input numbers to numpy equivalents.
dtype=binop_return_dtype(op, self.dtype, other.dtype)
)
raise BadBinaryOperator(op, self, other)
return binary_operator |
<SYSTEM_TASK:>
Factory function for making binary operator methods on a Factor.
<END_TASK>
<USER_TASK:>
Description:
def reflected_binary_operator(op):
"""
Factory function for making binary operator methods on a Factor.
Returns a function, "reflected_binary_operator" suitable for implementing
functions like __radd__.
""" |
assert not is_comparison(op)
@with_name(method_name_for_op(op, commute=True))
@coerce_numbers_to_my_dtype
def reflected_binary_operator(self, other):
if isinstance(self, NumericalExpression):
self_expr, other_expr, new_inputs = self.build_binary_op(
op, other
)
return NumExprFactor(
"({left}) {op} ({right})".format(
left=other_expr,
right=self_expr,
op=op,
),
new_inputs,
dtype=binop_return_dtype(op, other.dtype, self.dtype)
)
# Only have to handle the numeric case because in all other valid cases
# the corresponding left-binding method will be called.
elif isinstance(other, Number):
return NumExprFactor(
"{constant} {op} x_0".format(op=op, constant=other),
binds=(self,),
dtype=binop_return_dtype(op, other.dtype, self.dtype),
)
raise BadBinaryOperator(op, other, self)
return reflected_binary_operator |
<SYSTEM_TASK:>
Factory function for making unary operator methods for Factors.
<END_TASK>
<USER_TASK:>
Description:
def unary_operator(op):
"""
Factory function for making unary operator methods for Factors.
""" |
# Only negate is currently supported.
valid_ops = {'-'}
if op not in valid_ops:
raise ValueError("Invalid unary operator %s." % op)
@with_doc("Unary Operator: '%s'" % op)
@with_name(unary_op_name(op))
def unary_operator(self):
if self.dtype != float64_dtype:
raise TypeError(
"Can't apply unary operator {op!r} to instance of "
"{typename!r} with dtype {dtypename!r}.\n"
"{op!r} is only supported for Factors of dtype "
"'float64'.".format(
op=op,
typename=type(self).__name__,
dtypename=self.dtype.name,
)
)
# This can't be hoisted up a scope because the types returned by
# unary_op_return_type aren't defined when the top-level function is
# invoked.
if isinstance(self, NumericalExpression):
return NumExprFactor(
"{op}({expr})".format(op=op, expr=self._expr),
self.inputs,
dtype=float64_dtype,
)
else:
return NumExprFactor(
"{op}x_0".format(op=op),
(self,),
dtype=float64_dtype,
)
return unary_operator |
<SYSTEM_TASK:>
Factory function for producing function application methods for Factor
<END_TASK>
<USER_TASK:>
Description:
def function_application(func):
"""
Factory function for producing function application methods for Factor
subclasses.
""" |
if func not in NUMEXPR_MATH_FUNCS:
raise ValueError("Unsupported mathematical function '%s'" % func)
@with_doc(func)
@with_name(func)
def mathfunc(self):
if isinstance(self, NumericalExpression):
return NumExprFactor(
"{func}({expr})".format(func=func, expr=self._expr),
self.inputs,
dtype=float64_dtype,
)
else:
return NumExprFactor(
"{func}(x_0)".format(func=func),
(self,),
dtype=float64_dtype,
)
return mathfunc |
<SYSTEM_TASK:>
This implementation is based on scipy.stats.mstats.winsorize
<END_TASK>
<USER_TASK:>
Description:
def winsorize(row, min_percentile, max_percentile):
"""
This implementation is based on scipy.stats.mstats.winsorize
""" |
a = row.copy()
nan_count = isnan(row).sum()
nonnan_count = a.size - nan_count
# NOTE: argsort() sorts nans to the end of the array.
idx = a.argsort()
# Set values at indices below the min percentile to the value of the entry
# at the cutoff.
if min_percentile > 0:
lower_cutoff = int(min_percentile * nonnan_count)
a[idx[:lower_cutoff]] = a[idx[lower_cutoff]]
# Set values at indices above the max percentile to the value of the entry
# at the cutoff.
if max_percentile < 1:
upper_cutoff = int(ceil(nonnan_count * max_percentile))
# if max_percentile is close to 1, then upper_cutoff might not
# remove any values.
if upper_cutoff < nonnan_count:
start_of_nans = (-nan_count) if nan_count else None
a[idx[upper_cutoff:start_of_nans]] = a[idx[upper_cutoff - 1]]
return a |
<SYSTEM_TASK:>
Construct a Factor that computes ``self`` and subtracts the mean from
<END_TASK>
<USER_TASK:>
Description:
def demean(self, mask=NotSpecified, groupby=NotSpecified):
"""
Construct a Factor that computes ``self`` and subtracts the mean from
row of the result.
If ``mask`` is supplied, ignore values where ``mask`` returns False
when computing row means, and output NaN anywhere the mask is False.
If ``groupby`` is supplied, compute by partitioning each row based on
the values produced by ``groupby``, de-meaning the partitioned arrays,
and stitching the sub-results back together.
Parameters
----------
mask : zipline.pipeline.Filter, optional
A Filter defining values to ignore when computing means.
groupby : zipline.pipeline.Classifier, optional
A classifier defining partitions over which to compute means.
Examples
--------
Let ``f`` be a Factor which would produce the following output::
AAPL MSFT MCD BK
2017-03-13 1.0 2.0 3.0 4.0
2017-03-14 1.5 2.5 3.5 1.0
2017-03-15 2.0 3.0 4.0 1.5
2017-03-16 2.5 3.5 1.0 2.0
Let ``c`` be a Classifier producing the following output::
AAPL MSFT MCD BK
2017-03-13 1 1 2 2
2017-03-14 1 1 2 2
2017-03-15 1 1 2 2
2017-03-16 1 1 2 2
Let ``m`` be a Filter producing the following output::
AAPL MSFT MCD BK
2017-03-13 False True True True
2017-03-14 True False True True
2017-03-15 True True False True
2017-03-16 True True True False
Then ``f.demean()`` will subtract the mean from each row produced by
``f``.
::
AAPL MSFT MCD BK
2017-03-13 -1.500 -0.500 0.500 1.500
2017-03-14 -0.625 0.375 1.375 -1.125
2017-03-15 -0.625 0.375 1.375 -1.125
2017-03-16 0.250 1.250 -1.250 -0.250
``f.demean(mask=m)`` will subtract the mean from each row, but means
will be calculated ignoring values on the diagonal, and NaNs will
written to the diagonal in the output. Diagonal values are ignored
because they are the locations where the mask ``m`` produced False.
::
AAPL MSFT MCD BK
2017-03-13 NaN -1.000 0.000 1.000
2017-03-14 -0.500 NaN 1.500 -1.000
2017-03-15 -0.166 0.833 NaN -0.666
2017-03-16 0.166 1.166 -1.333 NaN
``f.demean(groupby=c)`` will subtract the group-mean of AAPL/MSFT and
MCD/BK from their respective entries. The AAPL/MSFT are grouped
together because both assets always produce 1 in the output of the
classifier ``c``. Similarly, MCD/BK are grouped together because they
always produce 2.
::
AAPL MSFT MCD BK
2017-03-13 -0.500 0.500 -0.500 0.500
2017-03-14 -0.500 0.500 1.250 -1.250
2017-03-15 -0.500 0.500 1.250 -1.250
2017-03-16 -0.500 0.500 -0.500 0.500
``f.demean(mask=m, groupby=c)`` will also subtract the group-mean of
AAPL/MSFT and MCD/BK, but means will be calculated ignoring values on
the diagonal , and NaNs will be written to the diagonal in the output.
::
AAPL MSFT MCD BK
2017-03-13 NaN 0.000 -0.500 0.500
2017-03-14 0.000 NaN 1.250 -1.250
2017-03-15 -0.500 0.500 NaN 0.000
2017-03-16 -0.500 0.500 0.000 NaN
Notes
-----
Mean is sensitive to the magnitudes of outliers. When working with
factor that can potentially produce large outliers, it is often useful
to use the ``mask`` parameter to discard values at the extremes of the
distribution::
>>> base = MyFactor(...) # doctest: +SKIP
>>> normalized = base.demean(
... mask=base.percentile_between(1, 99),
... ) # doctest: +SKIP
``demean()`` is only supported on Factors of dtype float64.
See Also
--------
:meth:`pandas.DataFrame.groupby`
""" |
return GroupedRowTransform(
transform=demean,
transform_args=(),
factor=self,
groupby=groupby,
dtype=self.dtype,
missing_value=self.missing_value,
window_safe=self.window_safe,
mask=mask,
) |
<SYSTEM_TASK:>
Construct a Factor that Z-Scores each day's results.
<END_TASK>
<USER_TASK:>
Description:
def zscore(self, mask=NotSpecified, groupby=NotSpecified):
"""
Construct a Factor that Z-Scores each day's results.
The Z-Score of a row is defined as::
(row - row.mean()) / row.stddev()
If ``mask`` is supplied, ignore values where ``mask`` returns False
when computing row means and standard deviations, and output NaN
anywhere the mask is False.
If ``groupby`` is supplied, compute by partitioning each row based on
the values produced by ``groupby``, z-scoring the partitioned arrays,
and stitching the sub-results back together.
Parameters
----------
mask : zipline.pipeline.Filter, optional
A Filter defining values to ignore when Z-Scoring.
groupby : zipline.pipeline.Classifier, optional
A classifier defining partitions over which to compute Z-Scores.
Returns
-------
zscored : zipline.pipeline.Factor
A Factor producing that z-scores the output of self.
Notes
-----
Mean and standard deviation are sensitive to the magnitudes of
outliers. When working with factor that can potentially produce large
outliers, it is often useful to use the ``mask`` parameter to discard
values at the extremes of the distribution::
>>> base = MyFactor(...) # doctest: +SKIP
>>> normalized = base.zscore(
... mask=base.percentile_between(1, 99),
... ) # doctest: +SKIP
``zscore()`` is only supported on Factors of dtype float64.
Examples
--------
See :meth:`~zipline.pipeline.factors.Factor.demean` for an in-depth
example of the semantics for ``mask`` and ``groupby``.
See Also
--------
:meth:`pandas.DataFrame.groupby`
""" |
return GroupedRowTransform(
transform=zscore,
transform_args=(),
factor=self,
groupby=groupby,
dtype=self.dtype,
missing_value=self.missing_value,
mask=mask,
window_safe=True,
) |
<SYSTEM_TASK:>
Construct a new Factor representing the sorted rank of each column
<END_TASK>
<USER_TASK:>
Description:
def rank(self,
method='ordinal',
ascending=True,
mask=NotSpecified,
groupby=NotSpecified):
"""
Construct a new Factor representing the sorted rank of each column
within each row.
Parameters
----------
method : str, {'ordinal', 'min', 'max', 'dense', 'average'}
The method used to assign ranks to tied elements. See
`scipy.stats.rankdata` for a full description of the semantics for
each ranking method. Default is 'ordinal'.
ascending : bool, optional
Whether to return sorted rank in ascending or descending order.
Default is True.
mask : zipline.pipeline.Filter, optional
A Filter representing assets to consider when computing ranks.
If mask is supplied, ranks are computed ignoring any asset/date
pairs for which `mask` produces a value of False.
groupby : zipline.pipeline.Classifier, optional
A classifier defining partitions over which to perform ranking.
Returns
-------
ranks : zipline.pipeline.factors.Rank
A new factor that will compute the ranking of the data produced by
`self`.
Notes
-----
The default value for `method` is different from the default for
`scipy.stats.rankdata`. See that function's documentation for a full
description of the valid inputs to `method`.
Missing or non-existent data on a given day will cause an asset to be
given a rank of NaN for that day.
See Also
--------
:func:`scipy.stats.rankdata`
:class:`zipline.pipeline.factors.factor.Rank`
""" |
if groupby is NotSpecified:
return Rank(self, method=method, ascending=ascending, mask=mask)
return GroupedRowTransform(
transform=rankdata if ascending else rankdata_1d_descending,
transform_args=(method,),
factor=self,
groupby=groupby,
dtype=float64_dtype,
missing_value=nan,
mask=mask,
window_safe=True,
) |
<SYSTEM_TASK:>
Construct a new Factor that computes rolling pearson correlation
<END_TASK>
<USER_TASK:>
Description:
def pearsonr(self, target, correlation_length, mask=NotSpecified):
"""
Construct a new Factor that computes rolling pearson correlation
coefficients between `target` and the columns of `self`.
This method can only be called on factors which are deemed safe for use
as inputs to other factors. This includes `Returns` and any factors
created from `Factor.rank` or `Factor.zscore`.
Parameters
----------
target : zipline.pipeline.Term with a numeric dtype
The term used to compute correlations against each column of data
produced by `self`. This may be a Factor, a BoundColumn or a Slice.
If `target` is two-dimensional, correlations are computed
asset-wise.
correlation_length : int
Length of the lookback window over which to compute each
correlation coefficient.
mask : zipline.pipeline.Filter, optional
A Filter describing which assets should have their correlation with
the target slice computed each day.
Returns
-------
correlations : zipline.pipeline.factors.RollingPearson
A new Factor that will compute correlations between `target` and
the columns of `self`.
Examples
--------
Suppose we want to create a factor that computes the correlation
between AAPL's 10-day returns and the 10-day returns of all other
assets, computing each correlation over 30 days. This can be achieved
by doing the following::
returns = Returns(window_length=10)
returns_slice = returns[sid(24)]
aapl_correlations = returns.pearsonr(
target=returns_slice, correlation_length=30,
)
This is equivalent to doing::
aapl_correlations = RollingPearsonOfReturns(
target=sid(24), returns_length=10, correlation_length=30,
)
See Also
--------
:func:`scipy.stats.pearsonr`
:class:`zipline.pipeline.factors.RollingPearsonOfReturns`
:meth:`Factor.spearmanr`
""" |
from .statistical import RollingPearson
return RollingPearson(
base_factor=self,
target=target,
correlation_length=correlation_length,
mask=mask,
) |
<SYSTEM_TASK:>
Construct a new Factor that computes rolling spearman rank correlation
<END_TASK>
<USER_TASK:>
Description:
def spearmanr(self, target, correlation_length, mask=NotSpecified):
"""
Construct a new Factor that computes rolling spearman rank correlation
coefficients between `target` and the columns of `self`.
This method can only be called on factors which are deemed safe for use
as inputs to other factors. This includes `Returns` and any factors
created from `Factor.rank` or `Factor.zscore`.
Parameters
----------
target : zipline.pipeline.Term with a numeric dtype
The term used to compute correlations against each column of data
produced by `self`. This may be a Factor, a BoundColumn or a Slice.
If `target` is two-dimensional, correlations are computed
asset-wise.
correlation_length : int
Length of the lookback window over which to compute each
correlation coefficient.
mask : zipline.pipeline.Filter, optional
A Filter describing which assets should have their correlation with
the target slice computed each day.
Returns
-------
correlations : zipline.pipeline.factors.RollingSpearman
A new Factor that will compute correlations between `target` and
the columns of `self`.
Examples
--------
Suppose we want to create a factor that computes the correlation
between AAPL's 10-day returns and the 10-day returns of all other
assets, computing each correlation over 30 days. This can be achieved
by doing the following::
returns = Returns(window_length=10)
returns_slice = returns[sid(24)]
aapl_correlations = returns.spearmanr(
target=returns_slice, correlation_length=30,
)
This is equivalent to doing::
aapl_correlations = RollingSpearmanOfReturns(
target=sid(24), returns_length=10, correlation_length=30,
)
See Also
--------
:func:`scipy.stats.spearmanr`
:class:`zipline.pipeline.factors.RollingSpearmanOfReturns`
:meth:`Factor.pearsonr`
""" |
from .statistical import RollingSpearman
return RollingSpearman(
base_factor=self,
target=target,
correlation_length=correlation_length,
mask=mask,
) |
<SYSTEM_TASK:>
Construct a new Factor that performs an ordinary least-squares
<END_TASK>
<USER_TASK:>
Description:
def linear_regression(self, target, regression_length, mask=NotSpecified):
"""
Construct a new Factor that performs an ordinary least-squares
regression predicting the columns of `self` from `target`.
This method can only be called on factors which are deemed safe for use
as inputs to other factors. This includes `Returns` and any factors
created from `Factor.rank` or `Factor.zscore`.
Parameters
----------
target : zipline.pipeline.Term with a numeric dtype
The term to use as the predictor/independent variable in each
regression. This may be a Factor, a BoundColumn or a Slice. If
`target` is two-dimensional, regressions are computed asset-wise.
regression_length : int
Length of the lookback window over which to compute each
regression.
mask : zipline.pipeline.Filter, optional
A Filter describing which assets should be regressed with the
target slice each day.
Returns
-------
regressions : zipline.pipeline.factors.RollingLinearRegression
A new Factor that will compute linear regressions of `target`
against the columns of `self`.
Examples
--------
Suppose we want to create a factor that regresses AAPL's 10-day returns
against the 10-day returns of all other assets, computing each
regression over 30 days. This can be achieved by doing the following::
returns = Returns(window_length=10)
returns_slice = returns[sid(24)]
aapl_regressions = returns.linear_regression(
target=returns_slice, regression_length=30,
)
This is equivalent to doing::
aapl_regressions = RollingLinearRegressionOfReturns(
target=sid(24), returns_length=10, regression_length=30,
)
See Also
--------
:func:`scipy.stats.linregress`
:class:`zipline.pipeline.factors.RollingLinearRegressionOfReturns`
""" |
from .statistical import RollingLinearRegression
return RollingLinearRegression(
dependent=self,
independent=target,
regression_length=regression_length,
mask=mask,
) |
<SYSTEM_TASK:>
Construct a new factor that winsorizes the result of this factor.
<END_TASK>
<USER_TASK:>
Description:
def winsorize(self,
min_percentile,
max_percentile,
mask=NotSpecified,
groupby=NotSpecified):
"""
Construct a new factor that winsorizes the result of this factor.
Winsorizing changes values ranked less than the minimum percentile to
the value at the minimum percentile. Similarly, values ranking above
the maximum percentile are changed to the value at the maximum
percentile.
Winsorizing is useful for limiting the impact of extreme data points
without completely removing those points.
If ``mask`` is supplied, ignore values where ``mask`` returns False
when computing percentile cutoffs, and output NaN anywhere the mask is
False.
If ``groupby`` is supplied, winsorization is applied separately
separately to each group defined by ``groupby``.
Parameters
----------
min_percentile: float, int
Entries with values at or below this percentile will be replaced
with the (len(input) * min_percentile)th lowest value. If low
values should not be clipped, use 0.
max_percentile: float, int
Entries with values at or above this percentile will be replaced
with the (len(input) * max_percentile)th lowest value. If high
values should not be clipped, use 1.
mask : zipline.pipeline.Filter, optional
A Filter defining values to ignore when winsorizing.
groupby : zipline.pipeline.Classifier, optional
A classifier defining partitions over which to winsorize.
Returns
-------
winsorized : zipline.pipeline.Factor
A Factor producing a winsorized version of self.
Examples
--------
.. code-block:: python
price = USEquityPricing.close.latest
columns={
'PRICE': price,
'WINSOR_1: price.winsorize(
min_percentile=0.25, max_percentile=0.75
),
'WINSOR_2': price.winsorize(
min_percentile=0.50, max_percentile=1.0
),
'WINSOR_3': price.winsorize(
min_percentile=0.0, max_percentile=0.5
),
}
Given a pipeline with columns, defined above, the result for a
given day could look like:
::
'PRICE' 'WINSOR_1' 'WINSOR_2' 'WINSOR_3'
Asset_1 1 2 4 3
Asset_2 2 2 4 3
Asset_3 3 3 4 3
Asset_4 4 4 4 4
Asset_5 5 5 5 4
Asset_6 6 5 5 4
See Also
--------
:func:`scipy.stats.mstats.winsorize`
:meth:`pandas.DataFrame.groupby`
""" |
if not 0.0 <= min_percentile < max_percentile <= 1.0:
raise BadPercentileBounds(
min_percentile=min_percentile,
max_percentile=max_percentile,
upper_bound=1.0,
)
return GroupedRowTransform(
transform=winsorize,
transform_args=(min_percentile, max_percentile),
factor=self,
groupby=groupby,
dtype=self.dtype,
missing_value=self.missing_value,
mask=mask,
window_safe=self.window_safe,
) |
<SYSTEM_TASK:>
Construct a Classifier computing quantiles of the output of ``self``.
<END_TASK>
<USER_TASK:>
Description:
def quantiles(self, bins, mask=NotSpecified):
"""
Construct a Classifier computing quantiles of the output of ``self``.
Every non-NaN data point the output is labelled with an integer value
from 0 to (bins - 1). NaNs are labelled with -1.
If ``mask`` is supplied, ignore data points in locations for which
``mask`` produces False, and emit a label of -1 at those locations.
Parameters
----------
bins : int
Number of bins labels to compute.
mask : zipline.pipeline.Filter, optional
Mask of values to ignore when computing quantiles.
Returns
-------
quantiles : zipline.pipeline.classifiers.Quantiles
A Classifier producing integer labels ranging from 0 to (bins - 1).
""" |
if mask is NotSpecified:
mask = self.mask
return Quantiles(inputs=(self,), bins=bins, mask=mask) |
<SYSTEM_TASK:>
Construct a Filter matching the top N asset values of self each day.
<END_TASK>
<USER_TASK:>
Description:
def top(self, N, mask=NotSpecified, groupby=NotSpecified):
"""
Construct a Filter matching the top N asset values of self each day.
If ``groupby`` is supplied, returns a Filter matching the top N asset
values for each group.
Parameters
----------
N : int
Number of assets passing the returned filter each day.
mask : zipline.pipeline.Filter, optional
A Filter representing assets to consider when computing ranks.
If mask is supplied, top values are computed ignoring any
asset/date pairs for which `mask` produces a value of False.
groupby : zipline.pipeline.Classifier, optional
A classifier defining partitions over which to perform ranking.
Returns
-------
filter : zipline.pipeline.filters.Filter
""" |
if N == 1:
# Special case: if N == 1, we can avoid doing a full sort on every
# group, which is a big win.
return self._maximum(mask=mask, groupby=groupby)
return self.rank(ascending=False, mask=mask, groupby=groupby) <= N |
<SYSTEM_TASK:>
Construct a Filter matching the bottom N asset values of self each day.
<END_TASK>
<USER_TASK:>
Description:
def bottom(self, N, mask=NotSpecified, groupby=NotSpecified):
"""
Construct a Filter matching the bottom N asset values of self each day.
If ``groupby`` is supplied, returns a Filter matching the bottom N
asset values for each group.
Parameters
----------
N : int
Number of assets passing the returned filter each day.
mask : zipline.pipeline.Filter, optional
A Filter representing assets to consider when computing ranks.
If mask is supplied, bottom values are computed ignoring any
asset/date pairs for which `mask` produces a value of False.
groupby : zipline.pipeline.Classifier, optional
A classifier defining partitions over which to perform ranking.
Returns
-------
filter : zipline.pipeline.Filter
""" |
return self.rank(ascending=True, mask=mask, groupby=groupby) <= N |
<SYSTEM_TASK:>
Construct a new Filter representing entries from the output of this
<END_TASK>
<USER_TASK:>
Description:
def percentile_between(self,
min_percentile,
max_percentile,
mask=NotSpecified):
"""
Construct a new Filter representing entries from the output of this
Factor that fall within the percentile range defined by min_percentile
and max_percentile.
Parameters
----------
min_percentile : float [0.0, 100.0]
Return True for assets falling above this percentile in the data.
max_percentile : float [0.0, 100.0]
Return True for assets falling below this percentile in the data.
mask : zipline.pipeline.Filter, optional
A Filter representing assets to consider when percentile
calculating thresholds. If mask is supplied, percentile cutoffs
are computed each day using only assets for which ``mask`` returns
True. Assets for which ``mask`` produces False will produce False
in the output of this Factor as well.
Returns
-------
out : zipline.pipeline.filters.PercentileFilter
A new filter that will compute the specified percentile-range mask.
See Also
--------
zipline.pipeline.filters.filter.PercentileFilter
""" |
return PercentileFilter(
self,
min_percentile=min_percentile,
max_percentile=max_percentile,
mask=mask,
) |
<SYSTEM_TASK:>
Verify that the stored rank method is valid.
<END_TASK>
<USER_TASK:>
Description:
def _validate(self):
"""
Verify that the stored rank method is valid.
""" |
if self._method not in _RANK_METHODS:
raise UnknownRankMethod(
method=self._method,
choices=set(_RANK_METHODS),
)
return super(Rank, self)._validate() |
<SYSTEM_TASK:>
For each row in the input, compute a like-shaped array of per-row
<END_TASK>
<USER_TASK:>
Description:
def _compute(self, arrays, dates, assets, mask):
"""
For each row in the input, compute a like-shaped array of per-row
ranks.
""" |
return masked_rankdata_2d(
arrays[0],
mask,
self.inputs[0].missing_value,
self._method,
self._ascending,
) |
<SYSTEM_TASK:>
Find the index of ``dt`` in ``dts``.
<END_TASK>
<USER_TASK:>
Description:
def find_in_sorted_index(dts, dt):
"""
Find the index of ``dt`` in ``dts``.
This function should be used instead of `dts.get_loc(dt)` if the index is
large enough that we don't want to initialize a hash table in ``dts``. In
particular, this should always be used on minutely trading calendars.
Parameters
----------
dts : pd.DatetimeIndex
Index in which to look up ``dt``. **Must be sorted**.
dt : pd.Timestamp
``dt`` to be looked up.
Returns
-------
ix : int
Integer index such that dts[ix] == dt.
Raises
------
KeyError
If dt is not in ``dts``.
""" |
ix = dts.searchsorted(dt)
if ix == len(dts) or dts[ix] != dt:
raise LookupError("{dt} is not in {dts}".format(dt=dt, dts=dts))
return ix |
<SYSTEM_TASK:>
Find values in ``dts`` closest but not equal to ``dt``.
<END_TASK>
<USER_TASK:>
Description:
def nearest_unequal_elements(dts, dt):
"""
Find values in ``dts`` closest but not equal to ``dt``.
Returns a pair of (last_before, first_after).
When ``dt`` is less than any element in ``dts``, ``last_before`` is None.
When ``dt`` is greater any element in ``dts``, ``first_after`` is None.
``dts`` must be unique and sorted in increasing order.
Parameters
----------
dts : pd.DatetimeIndex
Dates in which to search.
dt : pd.Timestamp
Date for which to find bounds.
""" |
if not dts.is_unique:
raise ValueError("dts must be unique")
if not dts.is_monotonic_increasing:
raise ValueError("dts must be sorted in increasing order")
if not len(dts):
return None, None
sortpos = dts.searchsorted(dt, side='left')
try:
sortval = dts[sortpos]
except IndexError:
# dt is greater than any value in the array.
return dts[-1], None
if dt < sortval:
lower_ix = sortpos - 1
upper_ix = sortpos
elif dt == sortval:
lower_ix = sortpos - 1
upper_ix = sortpos + 1
else:
lower_ix = sortpos
upper_ix = sortpos + 1
lower_value = dts[lower_ix] if lower_ix >= 0 else None
upper_value = dts[upper_ix] if upper_ix < len(dts) else None
return lower_value, upper_value |
<SYSTEM_TASK:>
Prepare list of pandas DataFrames to be used as input to pd.concat.
<END_TASK>
<USER_TASK:>
Description:
def categorical_df_concat(df_list, inplace=False):
"""
Prepare list of pandas DataFrames to be used as input to pd.concat.
Ensure any columns of type 'category' have the same categories across each
dataframe.
Parameters
----------
df_list : list
List of dataframes with same columns.
inplace : bool
True if input list can be modified. Default is False.
Returns
-------
concatenated : df
Dataframe of concatenated list.
""" |
if not inplace:
df_list = deepcopy(df_list)
# Assert each dataframe has the same columns/dtypes
df = df_list[0]
if not all([(df.dtypes.equals(df_i.dtypes)) for df_i in df_list[1:]]):
raise ValueError("Input DataFrames must have the same columns/dtypes.")
categorical_columns = df.columns[df.dtypes == 'category']
for col in categorical_columns:
new_categories = sorted(
set().union(
*(frame[col].cat.categories for frame in df_list)
)
)
with ignore_pandas_nan_categorical_warning():
for df in df_list:
df[col].cat.set_categories(new_categories, inplace=True)
return pd.concat(df_list) |
<SYSTEM_TASK:>
Check that a list of Index objects are all equal.
<END_TASK>
<USER_TASK:>
Description:
def check_indexes_all_same(indexes, message="Indexes are not equal."):
"""Check that a list of Index objects are all equal.
Parameters
----------
indexes : iterable[pd.Index]
Iterable of indexes to check.
Raises
------
ValueError
If the indexes are not all the same.
""" |
iterator = iter(indexes)
first = next(iterator)
for other in iterator:
same = (first == other)
if not same.all():
bad_loc = np.flatnonzero(~same)[0]
raise ValueError(
"{}\nFirst difference is at index {}: "
"{} != {}".format(
message, bad_loc, first[bad_loc], other[bad_loc]
),
) |
<SYSTEM_TASK:>
Compute the set of resource columns required to serve
<END_TASK>
<USER_TASK:>
Description:
def required_event_fields(next_value_columns, previous_value_columns):
"""
Compute the set of resource columns required to serve
``next_value_columns`` and ``previous_value_columns``.
""" |
# These metadata columns are used to align event indexers.
return {
TS_FIELD_NAME,
SID_FIELD_NAME,
EVENT_DATE_FIELD_NAME,
}.union(
# We also expect any of the field names that our loadable columns
# are mapped to.
viewvalues(next_value_columns),
viewvalues(previous_value_columns),
) |
<SYSTEM_TASK:>
Verify that the columns of ``events`` can be used by an EventsLoader to
<END_TASK>
<USER_TASK:>
Description:
def validate_column_specs(events, next_value_columns, previous_value_columns):
"""
Verify that the columns of ``events`` can be used by an EventsLoader to
serve the BoundColumns described by ``next_value_columns`` and
``previous_value_columns``.
""" |
required = required_event_fields(next_value_columns,
previous_value_columns)
received = set(events.columns)
missing = required - received
if missing:
raise ValueError(
"EventsLoader missing required columns {missing}.\n"
"Got Columns: {received}\n"
"Expected Columns: {required}".format(
missing=sorted(missing),
received=sorted(received),
required=sorted(required),
)
) |
<SYSTEM_TASK:>
Split requested columns into columns that should load the next known
<END_TASK>
<USER_TASK:>
Description:
def split_next_and_previous_event_columns(self, requested_columns):
"""
Split requested columns into columns that should load the next known
value and columns that should load the previous known value.
Parameters
----------
requested_columns : iterable[BoundColumn]
Returns
-------
next_cols, previous_cols : iterable[BoundColumn], iterable[BoundColumn]
``requested_columns``, partitioned into sub-sequences based on
whether the column should produce values from the next event or the
previous event
""" |
def next_or_previous(c):
if c in self.next_value_columns:
return 'next'
elif c in self.previous_value_columns:
return 'previous'
raise ValueError(
"{c} not found in next_value_columns "
"or previous_value_columns".format(c=c)
)
groups = groupby(next_or_previous, requested_columns)
return groups.get('next', ()), groups.get('previous', ()) |
<SYSTEM_TASK:>
Rehydrate a LabelArray from the codes and metadata.
<END_TASK>
<USER_TASK:>
Description:
def from_codes_and_metadata(cls,
codes,
categories,
reverse_categories,
missing_value):
"""
Rehydrate a LabelArray from the codes and metadata.
Parameters
----------
codes : np.ndarray[integral]
The codes for the label array.
categories : np.ndarray[object]
The unique string categories.
reverse_categories : dict[str, int]
The mapping from category to its code-index.
missing_value : any
The value used to represent missing data.
""" |
ret = codes.view(type=cls, dtype=np.void)
ret._categories = categories
ret._reverse_categories = reverse_categories
ret._missing_value = missing_value
return ret |
<SYSTEM_TASK:>
Convert self into a regular ndarray of ints.
<END_TASK>
<USER_TASK:>
Description:
def as_int_array(self):
"""
Convert self into a regular ndarray of ints.
This is an O(1) operation. It does not copy the underlying data.
""" |
return self.view(
type=ndarray,
dtype=unsigned_int_dtype_with_size_in_bytes(self.itemsize),
) |
<SYSTEM_TASK:>
Coerce self into a pandas categorical.
<END_TASK>
<USER_TASK:>
Description:
def as_categorical(self):
"""
Coerce self into a pandas categorical.
This is only defined on 1D arrays, since that's all pandas supports.
""" |
if len(self.shape) > 1:
raise ValueError("Can't convert a 2D array to a categorical.")
with ignore_pandas_nan_categorical_warning():
return pd.Categorical.from_codes(
self.as_int_array(),
# We need to make a copy because pandas >= 0.17 fails if this
# buffer isn't writeable.
self.categories.copy(),
ordered=False,
) |
<SYSTEM_TASK:>
Coerce self into a pandas DataFrame of Categoricals.
<END_TASK>
<USER_TASK:>
Description:
def as_categorical_frame(self, index, columns, name=None):
"""
Coerce self into a pandas DataFrame of Categoricals.
""" |
if len(self.shape) != 2:
raise ValueError(
"Can't convert a non-2D LabelArray into a DataFrame."
)
expected_shape = (len(index), len(columns))
if expected_shape != self.shape:
raise ValueError(
"Can't construct a DataFrame with provided indices:\n\n"
"LabelArray shape is {actual}, but index and columns imply "
"that shape should be {expected}.".format(
actual=self.shape,
expected=expected_shape,
)
)
return pd.Series(
index=pd.MultiIndex.from_product([index, columns]),
data=self.ravel().as_categorical(),
name=name,
).unstack() |
<SYSTEM_TASK:>
Set scalar value into the array.
<END_TASK>
<USER_TASK:>
Description:
def set_scalar(self, indexer, value):
"""
Set scalar value into the array.
Parameters
----------
indexer : any
The indexer to set the value at.
value : str
The value to assign at the given locations.
Raises
------
ValueError
Raised when ``value`` is not a value element of this this label
array.
""" |
try:
value_code = self.reverse_categories[value]
except KeyError:
raise ValueError("%r is not in LabelArray categories." % value)
self.as_int_array()[indexer] = value_code |
<SYSTEM_TASK:>
Make an empty LabelArray with the same categories as ``self``, filled
<END_TASK>
<USER_TASK:>
Description:
def empty_like(self, shape):
"""
Make an empty LabelArray with the same categories as ``self``, filled
with ``self.missing_value``.
""" |
return type(self).from_codes_and_metadata(
codes=np.full(
shape,
self.reverse_categories[self.missing_value],
dtype=unsigned_int_dtype_with_size_in_bytes(self.itemsize),
),
categories=self.categories,
reverse_categories=self.reverse_categories,
missing_value=self.missing_value,
) |
<SYSTEM_TASK:>
Map a function from str -> bool element-wise over ``self``.
<END_TASK>
<USER_TASK:>
Description:
def map_predicate(self, f):
"""
Map a function from str -> bool element-wise over ``self``.
``f`` will be applied exactly once to each non-missing unique value in
``self``. Missing values will always return False.
""" |
# Functions passed to this are of type str -> bool. Don't ever call
# them on None, which is the only non-str value we ever store in
# categories.
if self.missing_value is None:
def f_to_use(x):
return False if x is None else f(x)
else:
f_to_use = f
# Call f on each unique value in our categories.
results = np.vectorize(f_to_use, otypes=[bool_dtype])(self.categories)
# missing_value should produce False no matter what
results[self.reverse_categories[self.missing_value]] = False
# unpack the results form each unique value into their corresponding
# locations in our indices.
return results[self.as_int_array()] |
<SYSTEM_TASK:>
Map a function from str -> str element-wise over ``self``.
<END_TASK>
<USER_TASK:>
Description:
def map(self, f):
"""
Map a function from str -> str element-wise over ``self``.
``f`` will be applied exactly once to each non-missing unique value in
``self``. Missing values will always map to ``self.missing_value``.
""" |
# f() should only return None if None is our missing value.
if self.missing_value is None:
allowed_outtypes = self.SUPPORTED_SCALAR_TYPES
else:
allowed_outtypes = self.SUPPORTED_NON_NONE_SCALAR_TYPES
def f_to_use(x,
missing_value=self.missing_value,
otypes=allowed_outtypes):
# Don't call f on the missing value; those locations don't exist
# semantically. We return _sortable_sentinel rather than None
# because the np.unique call below sorts the categories array,
# which raises an error on Python 3 because None and str aren't
# comparable.
if x == missing_value:
return _sortable_sentinel
ret = f(x)
if not isinstance(ret, otypes):
raise TypeError(
"LabelArray.map expected function {f} to return a string"
" or None, but got {type} instead.\n"
"Value was {value}.".format(
f=f.__name__,
type=type(ret).__name__,
value=ret,
)
)
if ret == missing_value:
return _sortable_sentinel
return ret
new_categories_with_duplicates = (
np.vectorize(f_to_use, otypes=[object])(self.categories)
)
# If f() maps multiple inputs to the same output, then we can end up
# with the same code duplicated multiple times. Compress the categories
# by running them through np.unique, and then use the reverse lookup
# table to compress codes as well.
new_categories, bloated_inverse_index = np.unique(
new_categories_with_duplicates,
return_inverse=True
)
if new_categories[0] is _sortable_sentinel:
# f_to_use return _sortable_sentinel for locations that should be
# missing values in our output. Since np.unique returns the uniques
# in sorted order, and since _sortable_sentinel sorts before any
# string, we only need to check the first array entry.
new_categories[0] = self.missing_value
# `reverse_index` will always be a 64 bit integer even if we can hold a
# smaller array.
reverse_index = bloated_inverse_index.astype(
smallest_uint_that_can_hold(len(new_categories))
)
new_codes = np.take(reverse_index, self.as_int_array())
return self.from_codes_and_metadata(
new_codes,
new_categories,
dict(zip(new_categories, range(len(new_categories)))),
missing_value=self.missing_value,
) |
<SYSTEM_TASK:>
Asymmetric rounding function for adjusting prices to the specified number
<END_TASK>
<USER_TASK:>
Description:
def asymmetric_round_price(price, prefer_round_down, tick_size, diff=0.95):
"""
Asymmetric rounding function for adjusting prices to the specified number
of places in a way that "improves" the price. For limit prices, this means
preferring to round down on buys and preferring to round up on sells.
For stop prices, it means the reverse.
If prefer_round_down == True:
When .05 below to .95 above a specified decimal place, use it.
If prefer_round_down == False:
When .95 below to .05 above a specified decimal place, use it.
In math-speak:
If prefer_round_down: [<X-1>.0095, X.0195) -> round to X.01.
If not prefer_round_down: (<X-1>.0005, X.0105] -> round to X.01.
""" |
precision = zp_math.number_of_decimal_places(tick_size)
multiplier = int(tick_size * (10 ** precision))
diff -= 0.5 # shift the difference down
diff *= (10 ** -precision) # adjust diff to precision of tick size
diff *= multiplier # adjust diff to value of tick_size
# Subtracting an epsilon from diff to enforce the open-ness of the upper
# bound on buys and the lower bound on sells. Using the actual system
# epsilon doesn't quite get there, so use a slightly less epsilon-ey value.
epsilon = float_info.epsilon * 10
diff = diff - epsilon
# relies on rounding half away from zero, unlike numpy's bankers' rounding
rounded = tick_size * consistent_round(
(price - (diff if prefer_round_down else -diff)) / tick_size
)
if zp_math.tolerant_equals(rounded, 0.0):
return 0.0
return rounded |
<SYSTEM_TASK:>
Build a zipline data bundle from the directory with csv files.
<END_TASK>
<USER_TASK:>
Description:
def csvdir_bundle(environ,
asset_db_writer,
minute_bar_writer,
daily_bar_writer,
adjustment_writer,
calendar,
start_session,
end_session,
cache,
show_progress,
output_dir,
tframes=None,
csvdir=None):
"""
Build a zipline data bundle from the directory with csv files.
""" |
if not csvdir:
csvdir = environ.get('CSVDIR')
if not csvdir:
raise ValueError("CSVDIR environment variable is not set")
if not os.path.isdir(csvdir):
raise ValueError("%s is not a directory" % csvdir)
if not tframes:
tframes = set(["daily", "minute"]).intersection(os.listdir(csvdir))
if not tframes:
raise ValueError("'daily' and 'minute' directories "
"not found in '%s'" % csvdir)
divs_splits = {'divs': DataFrame(columns=['sid', 'amount',
'ex_date', 'record_date',
'declared_date', 'pay_date']),
'splits': DataFrame(columns=['sid', 'ratio',
'effective_date'])}
for tframe in tframes:
ddir = os.path.join(csvdir, tframe)
symbols = sorted(item.split('.csv')[0]
for item in os.listdir(ddir)
if '.csv' in item)
if not symbols:
raise ValueError("no <symbol>.csv* files found in %s" % ddir)
dtype = [('start_date', 'datetime64[ns]'),
('end_date', 'datetime64[ns]'),
('auto_close_date', 'datetime64[ns]'),
('symbol', 'object')]
metadata = DataFrame(empty(len(symbols), dtype=dtype))
if tframe == 'minute':
writer = minute_bar_writer
else:
writer = daily_bar_writer
writer.write(_pricing_iter(ddir, symbols, metadata,
divs_splits, show_progress),
show_progress=show_progress)
# Hardcode the exchange to "CSVDIR" for all assets and (elsewhere)
# register "CSVDIR" to resolve to the NYSE calendar, because these
# are all equities and thus can use the NYSE calendar.
metadata['exchange'] = "CSVDIR"
asset_db_writer.write(equities=metadata)
divs_splits['divs']['sid'] = divs_splits['divs']['sid'].astype(int)
divs_splits['splits']['sid'] = divs_splits['splits']['sid'].astype(int)
adjustment_writer.write(splits=divs_splits['splits'],
dividends=divs_splits['divs']) |
<SYSTEM_TASK:>
A factory for decorators that restrict Term methods to only be callable on
<END_TASK>
<USER_TASK:>
Description:
def restrict_to_dtype(dtype, message_template):
"""
A factory for decorators that restrict Term methods to only be callable on
Terms with a specific dtype.
This is conceptually similar to
zipline.utils.input_validation.expect_dtypes, but provides more flexibility
for providing error messages that are specifically targeting Term methods.
Parameters
----------
dtype : numpy.dtype
The dtype on which the decorated method may be called.
message_template : str
A template for the error message to be raised.
`message_template.format` will be called with keyword arguments
`method_name`, `expected_dtype`, and `received_dtype`.
Examples
--------
@restrict_to_dtype(
dtype=float64_dtype,
message_template=(
"{method_name}() was called on a factor of dtype {received_dtype}."
"{method_name}() requires factors of dtype{expected_dtype}."
),
)
def some_factor_method(self, ...):
self.stuff_that_requires_being_float64(...)
""" |
def processor(term_method, _, term_instance):
term_dtype = term_instance.dtype
if term_dtype != dtype:
raise TypeError(
message_template.format(
method_name=term_method.__name__,
expected_dtype=dtype.name,
received_dtype=term_dtype,
)
)
return term_instance
return preprocess(self=processor) |
<SYSTEM_TASK:>
Returns the daily returns for the given period.
<END_TASK>
<USER_TASK:>
Description:
def daily_returns(self, start, end=None):
"""Returns the daily returns for the given period.
Parameters
----------
start : datetime
The inclusive starting session label.
end : datetime, optional
The inclusive ending session label. If not provided, treat
``start`` as a scalar key.
Returns
-------
returns : pd.Series or float
The returns in the given period. The index will be the trading
calendar in the range [start, end]. If just ``start`` is provided,
return the scalar value on that day.
""" |
if end is None:
return self._daily_returns[start]
return self._daily_returns[start:end] |
<SYSTEM_TASK:>
Internal method that pre-calculates the benchmark return series for
<END_TASK>
<USER_TASK:>
Description:
def _initialize_precalculated_series(self,
asset,
trading_calendar,
trading_days,
data_portal):
"""
Internal method that pre-calculates the benchmark return series for
use in the simulation.
Parameters
----------
asset: Asset to use
trading_calendar: TradingCalendar
trading_days: pd.DateTimeIndex
data_portal: DataPortal
Notes
-----
If the benchmark asset started trading after the simulation start,
or finished trading before the simulation end, exceptions are raised.
If the benchmark asset started trading the same day as the simulation
start, the first available minute price on that day is used instead
of the previous close.
We use history to get an adjusted price history for each day's close,
as of the look-back date (the last day of the simulation). Prices are
fully adjusted for dividends, splits, and mergers.
Returns
-------
returns : pd.Series
indexed by trading day, whose values represent the %
change from close to close.
daily_returns : pd.Series
the partial daily returns for each minute
""" |
if self.emission_rate == "minute":
minutes = trading_calendar.minutes_for_sessions_in_range(
self.sessions[0], self.sessions[-1]
)
benchmark_series = data_portal.get_history_window(
[asset],
minutes[-1],
bar_count=len(minutes) + 1,
frequency="1m",
field="price",
data_frequency=self.emission_rate,
ffill=True
)[asset]
return (
benchmark_series.pct_change()[1:],
self.downsample_minute_return_series(
trading_calendar,
benchmark_series,
),
)
start_date = asset.start_date
if start_date < trading_days[0]:
# get the window of close prices for benchmark_asset from the
# last trading day of the simulation, going up to one day
# before the simulation start day (so that we can get the %
# change on day 1)
benchmark_series = data_portal.get_history_window(
[asset],
trading_days[-1],
bar_count=len(trading_days) + 1,
frequency="1d",
field="price",
data_frequency=self.emission_rate,
ffill=True
)[asset]
returns = benchmark_series.pct_change()[1:]
return returns, returns
elif start_date == trading_days[0]:
# Attempt to handle case where stock data starts on first
# day, in this case use the open to close return.
benchmark_series = data_portal.get_history_window(
[asset],
trading_days[-1],
bar_count=len(trading_days),
frequency="1d",
field="price",
data_frequency=self.emission_rate,
ffill=True
)[asset]
# get a minute history window of the first day
first_open = data_portal.get_spot_value(
asset,
'open',
trading_days[0],
'daily',
)
first_close = data_portal.get_spot_value(
asset,
'close',
trading_days[0],
'daily',
)
first_day_return = (first_close - first_open) / first_open
returns = benchmark_series.pct_change()[:]
returns[0] = first_day_return
return returns, returns
else:
raise ValueError(
'cannot set benchmark to asset that does not exist during'
' the simulation period (asset start date=%r)' % start_date
) |
<SYSTEM_TASK:>
Load all of the given extensions. This should be called by run_algo
<END_TASK>
<USER_TASK:>
Description:
def load_extensions(default, extensions, strict, environ, reload=False):
"""Load all of the given extensions. This should be called by run_algo
or the cli.
Parameters
----------
default : bool
Load the default exension (~/.zipline/extension.py)?
extension : iterable[str]
The paths to the extensions to load. If the path ends in ``.py`` it is
treated as a script and executed. If it does not end in ``.py`` it is
treated as a module to be imported.
strict : bool
Should failure to load an extension raise. If this is false it will
still warn.
environ : mapping
The environment to use to find the default extension path.
reload : bool, optional
Reload any extensions that have already been loaded.
""" |
if default:
default_extension_path = pth.default_extension(environ=environ)
pth.ensure_file(default_extension_path)
# put the default extension first so other extensions can depend on
# the order they are loaded
extensions = concatv([default_extension_path], extensions)
for ext in extensions:
if ext in _loaded_extensions and not reload:
continue
try:
# load all of the zipline extensionss
if ext.endswith('.py'):
with open(ext) as f:
ns = {}
six.exec_(compile(f.read(), ext, 'exec'), ns, ns)
else:
__import__(ext)
except Exception as e:
if strict:
# if `strict` we should raise the actual exception and fail
raise
# without `strict` we should just log the failure
warnings.warn(
'Failed to load extension: %r\n%s' % (ext, e),
stacklevel=2
)
else:
_loaded_extensions.add(ext) |
<SYSTEM_TASK:>
Run a trading algorithm.
<END_TASK>
<USER_TASK:>
Description:
def run_algorithm(start,
end,
initialize,
capital_base,
handle_data=None,
before_trading_start=None,
analyze=None,
data_frequency='daily',
bundle='quantopian-quandl',
bundle_timestamp=None,
trading_calendar=None,
metrics_set='default',
benchmark_returns=None,
default_extension=True,
extensions=(),
strict_extensions=True,
environ=os.environ,
blotter='default'):
"""
Run a trading algorithm.
Parameters
----------
start : datetime
The start date of the backtest.
end : datetime
The end date of the backtest..
initialize : callable[context -> None]
The initialize function to use for the algorithm. This is called once
at the very begining of the backtest and should be used to set up
any state needed by the algorithm.
capital_base : float
The starting capital for the backtest.
handle_data : callable[(context, BarData) -> None], optional
The handle_data function to use for the algorithm. This is called
every minute when ``data_frequency == 'minute'`` or every day
when ``data_frequency == 'daily'``.
before_trading_start : callable[(context, BarData) -> None], optional
The before_trading_start function for the algorithm. This is called
once before each trading day (after initialize on the first day).
analyze : callable[(context, pd.DataFrame) -> None], optional
The analyze function to use for the algorithm. This function is called
once at the end of the backtest and is passed the context and the
performance data.
data_frequency : {'daily', 'minute'}, optional
The data frequency to run the algorithm at.
bundle : str, optional
The name of the data bundle to use to load the data to run the backtest
with. This defaults to 'quantopian-quandl'.
bundle_timestamp : datetime, optional
The datetime to lookup the bundle data for. This defaults to the
current time.
trading_calendar : TradingCalendar, optional
The trading calendar to use for your backtest.
metrics_set : iterable[Metric] or str, optional
The set of metrics to compute in the simulation. If a string is passed,
resolve the set with :func:`zipline.finance.metrics.load`.
default_extension : bool, optional
Should the default zipline extension be loaded. This is found at
``$ZIPLINE_ROOT/extension.py``
extensions : iterable[str], optional
The names of any other extensions to load. Each element may either be
a dotted module path like ``a.b.c`` or a path to a python file ending
in ``.py`` like ``a/b/c.py``.
strict_extensions : bool, optional
Should the run fail if any extensions fail to load. If this is false,
a warning will be raised instead.
environ : mapping[str -> str], optional
The os environment to use. Many extensions use this to get parameters.
This defaults to ``os.environ``.
blotter : str or zipline.finance.blotter.Blotter, optional
Blotter to use with this algorithm. If passed as a string, we look for
a blotter construction function registered with
``zipline.extensions.register`` and call it with no parameters.
Default is a :class:`zipline.finance.blotter.SimulationBlotter` that
never cancels orders.
Returns
-------
perf : pd.DataFrame
The daily performance of the algorithm.
See Also
--------
zipline.data.bundles.bundles : The available data bundles.
""" |
load_extensions(default_extension, extensions, strict_extensions, environ)
return _run(
handle_data=handle_data,
initialize=initialize,
before_trading_start=before_trading_start,
analyze=analyze,
algofile=None,
algotext=None,
defines=(),
data_frequency=data_frequency,
capital_base=capital_base,
bundle=bundle,
bundle_timestamp=bundle_timestamp,
start=start,
end=end,
output=os.devnull,
trading_calendar=trading_calendar,
print_algo=False,
metrics_set=metrics_set,
local_namespace=False,
environ=environ,
blotter=blotter,
benchmark_returns=benchmark_returns,
) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.