text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Remove a pip package in given environment by `name` or `prefix`.
<END_TASK>
<USER_TASK:>
Description:
def pip_remove(self, name=None, prefix=None, pkgs=None):
"""Remove a pip package in given environment by `name` or `prefix`.""" |
logger.debug(str((prefix, pkgs)))
if isinstance(pkgs, (list, tuple)):
pkg = ' '.join(pkgs)
else:
pkg = pkgs
extra_args = ['uninstall', '--yes', pkg]
return self._call_pip(name=name, prefix=prefix, extra_args=extra_args) |
<SYSTEM_TASK:>
Search for pip packages in PyPI matching `search_string`.
<END_TASK>
<USER_TASK:>
Description:
def pip_search(self, search_string=None):
"""Search for pip packages in PyPI matching `search_string`.""" |
extra_args = ['search', search_string]
return self._call_pip(name='root', extra_args=extra_args,
callback=self._pip_search) |
<SYSTEM_TASK:>
Add some moving points to the dependency resolution text.
<END_TASK>
<USER_TASK:>
Description:
def _timer_update(self):
"""Add some moving points to the dependency resolution text.""" |
self._timer_counter += 1
dot = self._timer_dots.pop(0)
self._timer_dots = self._timer_dots + [dot]
self._rows = [[_(u'Resolving dependencies') + dot, u'', u'', u'']]
index = self.createIndex(0, 0)
self.dataChanged.emit(index, index)
if self._timer_counter > 150:
self._timer.stop()
self._timer_counter = 0 |
<SYSTEM_TASK:>
Create a worker for this client to be run in a separate thread.
<END_TASK>
<USER_TASK:>
Description:
def _create_worker(self, method, *args, **kwargs):
"""Create a worker for this client to be run in a separate thread.""" |
# FIXME: this might be heavy...
thread = QThread()
worker = ClientWorker(method, args, kwargs)
worker.moveToThread(thread)
worker.sig_finished.connect(self._start)
worker.sig_finished.connect(thread.quit)
thread.started.connect(worker.start)
self._queue.append(thread)
self._threads.append(thread)
self._workers.append(worker)
self._start()
return worker |
<SYSTEM_TASK:>
Load all the available pacakges information.
<END_TASK>
<USER_TASK:>
Description:
def _load_repodata(filepaths, extra_data=None, metadata=None):
"""Load all the available pacakges information.
For downloaded repodata files (repo.continuum.io), additional
data provided (anaconda cloud), and additional metadata and merge into
a single set of packages and apps.
""" |
extra_data = extra_data if extra_data else {}
metadata = metadata if metadata else {}
repodata = []
for filepath in filepaths:
compressed = filepath.endswith('.bz2')
mode = 'rb' if filepath.endswith('.bz2') else 'r'
if os.path.isfile(filepath):
with open(filepath, mode) as f:
raw_data = f.read()
if compressed:
data = bz2.decompress(raw_data)
else:
data = raw_data
try:
data = json.loads(to_text_string(data, 'UTF-8'))
except Exception as error:
logger.error(str(error))
data = {}
repodata.append(data)
all_packages = {}
for data in repodata:
packages = data.get('packages', {})
for canonical_name in packages:
data = packages[canonical_name]
name, version, b = tuple(canonical_name.rsplit('-', 2))
if name not in all_packages:
all_packages[name] = {'versions': set(),
'size': {},
'type': {},
'app_entry': {},
'app_type': {},
}
elif name in metadata:
temp_data = all_packages[name]
temp_data['home'] = metadata[name].get('home', '')
temp_data['license'] = metadata[name].get('license', '')
temp_data['summary'] = metadata[name].get('summary', '')
temp_data['latest_version'] = metadata[name].get('version')
all_packages[name] = temp_data
all_packages[name]['versions'].add(version)
all_packages[name]['size'][version] = data.get('size', '')
# Only the latest builds will have the correct metadata for
# apps, so only store apps that have the app metadata
if data.get('type'):
all_packages[name]['type'][version] = data.get('type')
all_packages[name]['app_entry'][version] = data.get(
'app_entry')
all_packages[name]['app_type'][version] = data.get(
'app_type')
all_apps = {}
for name in all_packages:
versions = sort_versions(list(all_packages[name]['versions']))
all_packages[name]['versions'] = versions[:]
for version in versions:
has_type = all_packages[name].get('type')
# Has type in this case implies being an app
if has_type:
all_apps[name] = all_packages[name].copy()
# Remove all versions that are not apps!
versions = all_apps[name]['versions'][:]
types = all_apps[name]['type']
app_versions = [v for v in versions if v in types]
all_apps[name]['versions'] = app_versions
return all_packages, all_apps |
<SYSTEM_TASK:>
Logout from anaconda cloud.
<END_TASK>
<USER_TASK:>
Description:
def logout(self):
"""Logout from anaconda cloud.""" |
logger.debug('Logout')
method = self._anaconda_client_api.remove_authentication
return self._create_worker(method) |
<SYSTEM_TASK:>
Load all the available pacakges information for downloaded repodata.
<END_TASK>
<USER_TASK:>
Description:
def load_repodata(self, filepaths, extra_data=None, metadata=None):
"""
Load all the available pacakges information for downloaded repodata.
Files include repo.continuum.io, additional data provided (anaconda
cloud), and additional metadata and merge into a single set of packages
and apps.
""" |
logger.debug(str((filepaths)))
method = self._load_repodata
return self._create_worker(method, filepaths, extra_data=extra_data,
metadata=metadata) |
<SYSTEM_TASK:>
Prepare downloaded package info along with pip pacakges info.
<END_TASK>
<USER_TASK:>
Description:
def prepare_model_data(self, packages, linked, pip=None,
private_packages=None):
"""Prepare downloaded package info along with pip pacakges info.""" |
logger.debug('')
return self._prepare_model_data(packages, linked, pip=pip,
private_packages=private_packages) |
<SYSTEM_TASK:>
Return all the available packages for a given user.
<END_TASK>
<USER_TASK:>
Description:
def packages(self, login=None, platform=None, package_type=None,
type_=None, access=None):
"""Return all the available packages for a given user.
Parameters
----------
type_: Optional[str]
Only find packages that have this conda `type`, (i.e. 'app').
access : Optional[str]
Only find packages that have this access level (e.g. 'private',
'authenticated', 'public').
""" |
logger.debug('')
method = self._anaconda_client_api.user_packages
return self._create_worker(method, login=login, platform=platform,
package_type=package_type,
type_=type_, access=access) |
<SYSTEM_TASK:>
Creates and returns a mapper function to access country data.
<END_TASK>
<USER_TASK:>
Description:
def country(from_key='name', to_key='iso'):
"""Creates and returns a mapper function to access country data.
The mapper function that is returned must be called with one argument. In
the default case you call it with a name and it returns a 3-letter
ISO_3166-1 code, e. g. called with ``Spain`` it would return ``ESP``.
:param from_key: (optional) the country attribute you give as input.
Defaults to ``name``.
:param to_key: (optional) the country attribute you want as output.
Defaults to ``iso``.
:return: mapper
:rtype: function
""" |
gc = GeonamesCache()
dataset = gc.get_dataset_by_key(gc.get_countries(), from_key)
def mapper(input):
# For country name inputs take the names mapping into account.
if 'name' == from_key:
input = mappings.country_names.get(input, input)
# If there is a record return the demanded attribute.
item = dataset.get(input)
if item:
return item[to_key]
return mapper |
<SYSTEM_TASK:>
Get a dictionary of cities keyed by geonameid.
<END_TASK>
<USER_TASK:>
Description:
def get_cities(self):
"""Get a dictionary of cities keyed by geonameid.""" |
if self.cities is None:
self.cities = self._load_data(self.cities, 'cities.json')
return self.cities |
<SYSTEM_TASK:>
Get a list of city dictionaries with the given name.
<END_TASK>
<USER_TASK:>
Description:
def get_cities_by_name(self, name):
"""Get a list of city dictionaries with the given name.
City names cannot be used as keys, as they are not unique.
""" |
if name not in self.cities_by_names:
if self.cities_items is None:
self.cities_items = list(self.get_cities().items())
self.cities_by_names[name] = [dict({gid: city})
for gid, city in self.cities_items if city['name'] == name]
return self.cities_by_names[name] |
<SYSTEM_TASK:>
Convert a channel into a normalized repo name including.
<END_TASK>
<USER_TASK:>
Description:
def _set_repo_urls_from_channels(self, channels):
"""
Convert a channel into a normalized repo name including.
Channels are assumed in normalized url form.
""" |
repos = []
sys_platform = self._conda_api.get_platform()
for channel in channels:
url = '{0}/{1}/repodata.json.bz2'.format(channel, sys_platform)
repos.append(url)
return repos |
<SYSTEM_TASK:>
Convert a `repo` url to a file path for local storage.
<END_TASK>
<USER_TASK:>
Description:
def _repo_url_to_path(self, repo):
"""Convert a `repo` url to a file path for local storage.""" |
repo = repo.replace('http://', '')
repo = repo.replace('https://', '')
repo = repo.replace('/', '_')
return os.sep.join([self._data_directory, repo]) |
<SYSTEM_TASK:>
Return the repodata paths based on `channels` and the `data_directory`.
<END_TASK>
<USER_TASK:>
Description:
def repodata_files(self, channels=None):
"""
Return the repodata paths based on `channels` and the `data_directory`.
There is no check for validity here.
""" |
if channels is None:
channels = self.conda_get_condarc_channels()
repodata_urls = self._set_repo_urls_from_channels(channels)
repopaths = []
for repourl in repodata_urls:
fullpath = os.sep.join([self._repo_url_to_path(repourl)])
repopaths.append(fullpath)
return repopaths |
<SYSTEM_TASK:>
Update the metadata available for packages in repo.continuum.io.
<END_TASK>
<USER_TASK:>
Description:
def update_metadata(self):
"""
Update the metadata available for packages in repo.continuum.io.
Returns a download worker.
""" |
if self._data_directory is None:
raise Exception('Need to call `api.set_data_directory` first.')
metadata_url = 'https://repo.continuum.io/pkgs/metadata.json'
filepath = os.sep.join([self._data_directory, 'metadata.json'])
worker = self.download_requests(metadata_url, filepath)
return worker |
<SYSTEM_TASK:>
Check if channel is valid.
<END_TASK>
<USER_TASK:>
Description:
def check_valid_channel(self,
channel,
conda_url='https://conda.anaconda.org'):
"""Check if channel is valid.""" |
if channel.startswith('https://') or channel.startswith('http://'):
url = channel
else:
url = "{0}/{1}".format(conda_url, channel)
if url[-1] == '/':
url = url[:-1]
plat = self.conda_platform()
repodata_url = "{0}/{1}/{2}".format(url, plat, 'repodata.json')
worker = self.download_is_valid_url(repodata_url)
worker.url = url
return worker |
<SYSTEM_TASK:>
Get all instances matching a tag.
<END_TASK>
<USER_TASK:>
Description:
def _aws_get_instance_by_tag(region, name, tag, raw):
"""Get all instances matching a tag.""" |
client = boto3.session.Session().client('ec2', region)
matching_reservations = client.describe_instances(Filters=[{'Name': tag, 'Values': [name]}]).get('Reservations', [])
instances = []
[[instances.append(_aws_instance_from_dict(region, instance, raw)) # pylint: disable=expression-not-assigned
for instance in reservation.get('Instances')] for reservation in matching_reservations if reservation]
return instances |
<SYSTEM_TASK:>
Get intsances from GCP and AWS by name.
<END_TASK>
<USER_TASK:>
Description:
def get_instances_by_name(name, sort_by_order=('cloud', 'name'), projects=None, raw=True, regions=None, gcp_credentials=None, clouds=SUPPORTED_CLOUDS):
"""Get intsances from GCP and AWS by name.""" |
matching_instances = all_clouds_get_instances_by_name(
name, projects, raw, credentials=gcp_credentials, clouds=clouds)
if regions:
matching_instances = [instance for instance in matching_instances if instance.region in regions]
matching_instances.sort(key=lambda instance: [getattr(instance, field) for field in sort_by_order])
return matching_instances |
<SYSTEM_TASK:>
Returns the public ip address of an instance.
<END_TASK>
<USER_TASK:>
Description:
def get_persistent_address(instance):
"""Returns the public ip address of an instance.""" |
if instance.cloud == 'aws':
client = boto3.client('ec2', instance.region)
try:
client.describe_addresses(PublicIps=[instance.ip_address])
return instance.ip_address
except botocore.client.ClientError as exc:
if exc.response.get('Error', {}).get('Code') != 'InvalidAddress.NotFound':
raise
# Address is not public
return None
if instance.cloud == 'gcp':
credentials = GoogleCredentials.get_application_default()
compute = discovery.build('compute', 'v1', credentials=credentials)
try:
return compute.addresses().get(address=instance.name, project=instance.project, region=instance.region).execute()['address']
except errors.HttpError as exc:
if 'was not found' in str(exc):
return None
raise
raise ValueError('Unknown cloud %s' % instance.cloud) |
<SYSTEM_TASK:>
Use pip to find pip installed packages in a given prefix.
<END_TASK>
<USER_TASK:>
Description:
def main():
"""Use pip to find pip installed packages in a given prefix.""" |
pip_packages = {}
for package in pip.get_installed_distributions():
name = package.project_name
version = package.version
full_name = "{0}-{1}-pip".format(name.lower(), version)
pip_packages[full_name] = {'version': version}
data = json.dumps(pip_packages)
print(data) |
<SYSTEM_TASK:>
Write all data to created file. Also overwrite previous file.
<END_TASK>
<USER_TASK:>
Description:
def _save(file, data, mode='w+'):
"""
Write all data to created file. Also overwrite previous file.
""" |
with open(file, mode) as fh:
fh.write(data) |
<SYSTEM_TASK:>
Merge contents.
<END_TASK>
<USER_TASK:>
Description:
def merge(obj):
"""
Merge contents.
It does a simply merge of all files defined under 'static' key.
If you have JS or CSS file with embeded django tags like {% url ... %} or
{% static ... %} you should declare them under 'template' key. This
function will render them and append to the merged output.
To use the render option you have to define both 'config' and 'path' on
merger dictionary.
""" |
merge = ''
for f in obj.get('static', []):
print 'Merging: {}'. format(f)
merge += _read(f)
def doless(f):
print 'Compiling LESS: {}'.format(f)
ret, tmp = commands.getstatusoutput('lesscpy '+f)
if ret == 0:
return tmp
else:
print 'LESS to CSS failed for: {} (Do you have lesscpy installed?)'.format(f)
return ''
if merger.get('config'): #only imports django if we have a config file defined
import re
for p in merger['path']: sys.path.append(p)
os.environ.setdefault("DJANGO_SETTINGS_MODULE", merger['config'])
try:
from django.template.loader import get_template_from_string
from django.template.base import Context
from django.utils.encoding import smart_str
from django.conf import settings
except:
print 'Do you really have django well installed?'
sys.exit(1)
for f in obj.get('template', []):
print 'Merging django template: {}'. format(f)
t = _read(f)
if settings.FORCE_SCRIPT_NAME:
t = re.sub(r'\{%\s+url\b', settings.FORCE_SCRIPT_NAME+'{% url ', t)
tmp = smart_str(get_template_from_string(t).render(Context({})))
if f.endswith('.less'):
pass
#TODO compilar tmp para css
merge += tmp
for f in obj.get('less', []):
merge += doless(f)
return merge |
<SYSTEM_TASK:>
Minify JS data and saves to file.
<END_TASK>
<USER_TASK:>
Description:
def jsMin(data, file):
"""
Minify JS data and saves to file.
Data should be a string will whole JS content, and file will be
overwrited if exists.
""" |
print 'Minifying JS... ',
url = 'http://javascript-minifier.com/raw' #POST
req = urllib2.Request(url, urllib.urlencode({'input': data}))
try:
f = urllib2.urlopen(req)
response = f.read()
f.close()
print 'Final: {:.1f}%'.format(100.0*len(response)/len(data))
print 'Saving: {} ({:.2f}kB)'.format(file, len(response)/1024.0)
_save(file, response)
except:
print 'Oops!! Failed :('
return 1
return 0 |
<SYSTEM_TASK:>
Try to optimise a JPG file.
<END_TASK>
<USER_TASK:>
Description:
def jpgMin(file, force=False):
"""
Try to optimise a JPG file.
The original will be saved at the same place with '.original' appended to its name.
Once a .original exists the function will ignore this file unless force is True.
""" |
if not os.path.isfile(file+'.original') or force:
data = _read(file, 'rb')
_save(file+'.original', data, 'w+b')
print 'Optmising JPG {} - {:.2f}kB'.format(file, len(data)/1024.0),
url = 'http://jpgoptimiser.com/optimise'
parts, headers = encode_multipart({}, {'input': {'filename': 'wherever.jpg', 'content': data}})
req = urllib2.Request(url, data=parts, headers=headers)
try:
f = urllib2.urlopen(req)
response = f.read()
f.close()
print ' - {:.2f} - {:.1f}%'.format(len(response)/1024.0, 100.0*len(response)/len(data))
_save(file, response, 'w+b')
except:
print 'Oops!! Failed :('
return 1
else:
print 'Ignoring file: {}'.format(file)
return 0 |
<SYSTEM_TASK:>
Process each block of the merger object.
<END_TASK>
<USER_TASK:>
Description:
def process(obj):
"""
Process each block of the merger object.
""" |
#merge all static and templates and less files
merged = merge(obj)
#save the full file if name defined
if obj.get('full'):
print 'Saving: {} ({:.2f}kB)'.format(obj['full'], len(merged)/1024.0)
_save(obj['full'], merged)
else:
print 'Full merged size: {:.2f}kB'.format(len(merged)/1024.0)
#minify js and save to file
if obj.get('jsmin'):
jsMin(merged, obj['jsmin'])
#minify css and save to file
if obj.get('cssmin'):
cssMin(merged, obj['cssmin']) |
<SYSTEM_TASK:>
Return parameters for portfolio optimization.
<END_TASK>
<USER_TASK:>
Description:
def optimize(exp_rets, covs):
"""
Return parameters for portfolio optimization.
Parameters
----------
exp_rets : ndarray
Vector of expected returns for each investment..
covs : ndarray
Covariance matrix for the given investments.
Returns
---------
a : ndarray
The first vector (to be combined with target return as scalar)
in the linear equation for optimal weights.
b : ndarray
The second (constant) vector in the linear equation for
optimal weights.
least_risk_ret : int
The return achieved on the portfolio that combines the given
equities so as to achieve the lowest possible risk.
Notes
---------
* The length of `exp_rets` must match the number of rows
and columns in the `covs` matrix.
* The weights for an optimal portfolio with expected return
`ret` is given by the formula `w = ret * a + b` where `a`
and `b` are the vectors returned here. The weights `w` for
the portfolio with lowest risk are given by `w = least_risk_ret * a + b`.
* An exception will be raised if the covariance matrix
is singular or if each prospective investment has the
same expected return.
""" |
_cov_inv = np.linalg.inv(covs)
# unit vector
_u = np.ones((len(exp_rets)))
# compute some dot products one time only
_u_cov_inv = _u.dot(_cov_inv)
_rets_cov_inv = exp_rets.dot(_cov_inv)
# helper matrix for deriving Lagrange multipliers
_m = np.empty((2, 2))
_m[0, 0] = _rets_cov_inv.dot(exp_rets)
_m[0, 1] = _u_cov_inv.dot(exp_rets)
_m[1, 0] = _rets_cov_inv.dot(_u)
_m[1, 1] = _u_cov_inv.dot(_u)
# compute values to return
_m_inv = np.linalg.inv(_m)
a = _m_inv[0, 0] * _rets_cov_inv + _m_inv[1, 0] * _u_cov_inv
b = _m_inv[0, 1] * _rets_cov_inv + _m_inv[1, 1] * _u_cov_inv
least_risk_ret = _m[0, 1] / _m[1, 1]
return a, b, least_risk_ret |
<SYSTEM_TASK:>
Annual growth given growth from start date to end date.
<END_TASK>
<USER_TASK:>
Description:
def growthfromrange(rangegrowth, startdate, enddate):
"""
Annual growth given growth from start date to end date.
""" |
_yrs = (pd.Timestamp(enddate) - pd.Timestamp(startdate)).total_seconds() /\
dt.timedelta(365.25).total_seconds()
return yrlygrowth(rangegrowth, _yrs) |
<SYSTEM_TASK:>
Return a DataFrame of current US equities.
<END_TASK>
<USER_TASK:>
Description:
def equities(country='US'):
"""
Return a DataFrame of current US equities.
.. versionadded:: 0.4.0
.. versionchanged:: 0.5.0
Return a DataFrame
Parameters
----------
country : str, optional
Country code for equities to return, defaults to 'US'.
Returns
-------
eqs : :class:`pandas.DataFrame`
DataFrame whose index is a list of all current ticker symbols.
Columns are 'Security Name' (e.g. 'Zynerba Pharmaceuticals, Inc. - Common Stock')
and 'Exchange' ('NASDAQ', 'NYSE', 'NYSE MKT', etc.)
Examples
--------
>>> eqs = pn.data.equities('US')
Notes
-----
Currently only US markets are supported.
""" |
nasdaqblob, otherblob = _getrawdata()
eq_triples = []
eq_triples.extend(_get_nas_triples(nasdaqblob))
eq_triples.extend(_get_other_triples(otherblob))
eq_triples.sort()
index = [triple[0] for triple in eq_triples]
data = [triple[1:] for triple in eq_triples]
return pd.DataFrame(data, index, columns=['Security Name', 'Exchange'], dtype=str) |
<SYSTEM_TASK:>
Metrics for evaluating a straddle.
<END_TASK>
<USER_TASK:>
Description:
def straddle(self, strike, expiry):
"""
Metrics for evaluating a straddle.
Parameters
------------
strike : numeric
Strike price.
expiry : date or date str (e.g. '2015-01-01')
Expiration date.
Returns
------------
metrics : DataFrame
Metrics for evaluating straddle.
""" |
_rows = {}
_prices = {}
for _opttype in _constants.OPTTYPES:
_rows[_opttype] = _relevant_rows(self.data, (strike, expiry, _opttype,),
"No key for {} strike {} {}".format(expiry, strike, _opttype))
_prices[_opttype] = _getprice(_rows[_opttype])
_eq = _rows[_constants.OPTTYPES[0]].loc[:, 'Underlying_Price'].values[0]
_qt = _rows[_constants.OPTTYPES[0]].loc[:, 'Quote_Time'].values[0]
_index = ['Call', 'Put', 'Credit', 'Underlying_Price', 'Quote_Time']
_vals = np.array([_prices['call'], _prices['put'], _prices['call'] + _prices['put'], _eq, _qt])
return pd.DataFrame(_vals, index=_index, columns=['Value']) |
<SYSTEM_TASK:>
Retrieve all current options chains for given equity.
<END_TASK>
<USER_TASK:>
Description:
def get(equity):
"""
Retrieve all current options chains for given equity.
.. versionchanged:: 0.5.0
Eliminate special exception handling.
Parameters
-------------
equity : str
Equity for which to retrieve options data.
Returns
-------------
optdata : :class:`~pynance.opt.core.Options`
All options data for given equity currently available
from Yahoo! Finance.
Examples
-------------
Basic usage::
>>> fopt = pn.opt.get('f')
To show useful information (expiration dates, stock price, quote time)
when retrieving options data, you can chain the call to
:func:`get` with :meth:`~pynance.opt.core.Options.info`::
>>> fopt = pn.opt.get('f').info()
Expirations:
...
Stock: 15.93
Quote time: 2015-03-07 16:00
""" |
_optmeta = pdr.data.Options(equity, 'yahoo')
_optdata = _optmeta.get_all_data()
return Options(_optdata) |
<SYSTEM_TASK:>
Return a transformed DataFrame.
<END_TASK>
<USER_TASK:>
Description:
def transform(data_frame, **kwargs):
"""
Return a transformed DataFrame.
Transform data_frame along the given axis. By default, each row will be normalized (axis=0).
Parameters
-----------
data_frame : DataFrame
Data to be normalized.
axis : int, optional
0 (default) to normalize each row, 1 to normalize each column.
method : str, optional
Valid methods are:
- "vector" : Default for normalization by row (axis=0).
Normalize along axis as a vector with norm `norm`
- "last" : Linear normalization setting last value along the axis to `norm`
- "first" : Default for normalization of columns (axis=1).
Linear normalization setting first value along the given axis to `norm`
- "mean" : Normalize so that the mean of each vector along the given axis is `norm`
norm : float, optional
Target value of normalization, defaults to 1.0.
labels : DataFrame, optional
Labels may be passed as keyword argument, in which
case the label values will also be normalized and returned.
Returns
-----------
df : DataFrame
Normalized data.
labels : DataFrame, optional
Normalized labels, if provided as input.
Notes
-----------
If labels are real-valued, they should also be normalized.
..
Having row_norms as a numpy array should be benchmarked against
using a DataFrame:
http://stackoverflow.com/questions/12525722/normalize-data-in-pandas
Note: This isn't a bottleneck. Using a feature set with 13k rows and 256
data_frame ('ge' from 1962 until now), the normalization was immediate.
""" |
norm = kwargs.get('norm', 1.0)
axis = kwargs.get('axis', 0)
if axis == 0:
norm_vector = _get_norms_of_rows(data_frame, kwargs.get('method', 'vector'))
else:
norm_vector = _get_norms_of_cols(data_frame, kwargs.get('method', 'first'))
if 'labels' in kwargs:
if axis == 0:
return data_frame.apply(lambda col: col * norm / norm_vector, axis=0), \
kwargs['labels'].apply(lambda col: col * norm / norm_vector, axis=0)
else:
raise ValueError("label normalization incompatible with normalization by column")
else:
if axis == 0:
return data_frame.apply(lambda col: col * norm / norm_vector, axis=0)
else:
return data_frame.apply(lambda row: row * norm / norm_vector, axis=1) |
<SYSTEM_TASK:>
return a column vector containing the norm of each row
<END_TASK>
<USER_TASK:>
Description:
def _get_norms_of_rows(data_frame, method):
""" return a column vector containing the norm of each row """ |
if method == 'vector':
norm_vector = np.linalg.norm(data_frame.values, axis=1)
elif method == 'last':
norm_vector = data_frame.iloc[:, -1].values
elif method == 'mean':
norm_vector = np.mean(data_frame.values, axis=1)
elif method == 'first':
norm_vector = data_frame.iloc[:, 0].values
else:
raise ValueError("no normalization method '{0}'".format(method))
return norm_vector |
<SYSTEM_TASK:>
Price as midpoint between bid and ask.
<END_TASK>
<USER_TASK:>
Description:
def get(self, opttype, strike, expiry):
"""
Price as midpoint between bid and ask.
Parameters
----------
opttype : str
'call' or 'put'.
strike : numeric
Strike price.
expiry : date-like
Expiration date. Can be a :class:`datetime.datetime` or
a string that :mod:`pandas` can interpret as such, e.g.
'2015-01-01'.
Returns
-------
out : float
Examples
--------
>>> geopts = pn.opt.get('ge')
>>> geopts.price.get('call', 26., '2015-09-18')
0.94
""" |
_optrow = _relevant_rows(self.data, (strike, expiry, opttype,),
"No key for {} strike {} {}".format(expiry, strike, opttype))
return _getprice(_optrow) |
<SYSTEM_TASK:>
Basic metrics for a specific option.
<END_TASK>
<USER_TASK:>
Description:
def metrics(self, opttype, strike, expiry):
"""
Basic metrics for a specific option.
Parameters
----------
opttype : str ('call' or 'put')
strike : numeric
Strike price.
expiry : date-like
Expiration date. Can be a :class:`datetime.datetime` or
a string that :mod:`pandas` can interpret as such, e.g.
'2015-01-01'.
Returns
-------
out : :class:`pandas.DataFrame`
""" |
_optrow = _relevant_rows(self.data, (strike, expiry, opttype,),
"No key for {} strike {} {}".format(expiry, strike, opttype))
_index = ['Opt_Price', 'Time_Val', 'Last', 'Bid', 'Ask', 'Vol', 'Open_Int', 'Underlying_Price', 'Quote_Time']
_out = pd.DataFrame(index=_index, columns=['Value'])
_out.loc['Opt_Price', 'Value'] = _opt_price = _getprice(_optrow)
for _name in _index[2:]:
_out.loc[_name, 'Value'] = _optrow.loc[:, _name].values[0]
_eq_price = _out.loc['Underlying_Price', 'Value']
if opttype == 'put':
_out.loc['Time_Val'] = _get_put_time_val(_opt_price, strike, _eq_price)
else:
_out.loc['Time_Val'] = _get_call_time_val(_opt_price, strike, _eq_price)
return _out |
<SYSTEM_TASK:>
Retrieve option prices for all strikes of a given type with a given expiration.
<END_TASK>
<USER_TASK:>
Description:
def strikes(self, opttype, expiry):
"""
Retrieve option prices for all strikes of a given type with a given expiration.
Parameters
----------
opttype : str ('call' or 'put')
expiry : date-like
Expiration date. Can be a :class:`datetime.datetime` or
a string that :mod:`pandas` can interpret as such, e.g.
'2015-01-01'.
Returns
----------
df : :class:`pandas.DataFrame`
eq : float
Price of underlying.
qt : datetime.datetime
Time of quote.
See Also
--------
:meth:`exps`
""" |
_relevant = _relevant_rows(self.data, (slice(None), expiry, opttype,),
"No key for {} {}".format(expiry, opttype))
_index = _relevant.index.get_level_values('Strike')
_columns = ['Price', 'Time_Val', 'Last', 'Bid', 'Ask', 'Vol', 'Open_Int']
_df = pd.DataFrame(index=_index, columns=_columns)
_underlying = _relevant.loc[:, 'Underlying_Price'].values[0]
_quotetime = pd.to_datetime(_relevant.loc[:, 'Quote_Time'].values[0], utc=True).to_datetime()
for _col in _columns[2:]:
_df.loc[:, _col] = _relevant.loc[:, _col].values
_df.loc[:, 'Price'] = (_df.loc[:, 'Bid'] + _df.loc[:, 'Ask']) / 2.
_set_tv_strike_ix(_df, opttype, 'Price', 'Time_Val', _underlying)
return _df, _underlying, _quotetime |
<SYSTEM_TASK:>
Prices for given strike on all available dates.
<END_TASK>
<USER_TASK:>
Description:
def exps(self, opttype, strike):
"""
Prices for given strike on all available dates.
Parameters
----------
opttype : str ('call' or 'put')
strike : numeric
Returns
----------
df : :class:`pandas.DataFrame`
eq : float
Price of underlying.
qt : :class:`datetime.datetime`
Time of quote.
See Also
--------
:meth:`strikes`
""" |
_relevant = _relevant_rows(self.data, (strike, slice(None), opttype,),
"No key for {} {}".format(strike, opttype))
_index = _relevant.index.get_level_values('Expiry')
_columns = ['Price', 'Time_Val', 'Last', 'Bid', 'Ask', 'Vol', 'Open_Int']
_df = pd.DataFrame(index=_index, columns=_columns)
_eq = _relevant.loc[:, 'Underlying_Price'].values[0]
_qt = pd.to_datetime(_relevant.loc[:, 'Quote_Time'].values[0], utc=True).to_datetime()
for _col in _columns[2:]:
_df.loc[:, _col] = _relevant.loc[:, _col].values
_df.loc[:, 'Price'] = (_df.loc[:, 'Bid'] + _df.loc[:, 'Ask']) / 2.
_set_tv_other_ix(_df, opttype, 'Price', 'Time_Val', _eq, strike)
return _df, _eq, _qt |
<SYSTEM_TASK:>
Return features and labels for the given equity data.
<END_TASK>
<USER_TASK:>
Description:
def labeledfeatures(eqdata, featurefunc, labelfunc):
"""
Return features and labels for the given equity data.
Each row of the features returned contains `2 * n_sessions + 1` columns
(or 1 less if the constant feature is excluded). After the constant feature,
if present, there will be `n_sessions` columns derived from daily growth
of the given price column, which defaults to 'Adj Close'. There will then
follow another `n_sessions` columns representing current volume as
a multiple of average volume over the previous 252 (or other value determined
by the user) sessions.
The returned features are not centered or normalized because these
adjustments need to be made after test or cross-validation data has
been removed.
The constant feature is prepended by default.
The labels are derived from `eqdata` using `labelfunc`.
Parameters
----------
eqdata : DataFrame
Expected is a dataframe as return by the `get()` function. A column
labeled 'Volume' must be present.
featurefunc : function
Function taking a dataframe of simple equity data as argument
and returning a dataframe of features and an integer representing
the number of rows that had to be skipped at the beginning of
the index of the input dataframe. The rows skipped are used
to synchronize the indices of features and labels. For example,
if the features are composed of 4 successive daily returns, then
the date of row 0 of features would be the same as the date of row 3
(counting from 0) of input data. So the corresponding `featurefunc`
would return a dataframe and the value 3.
labelfunc : function
function for deriving labels from `eqdata`. `labelfunc` must
take a single argument: `df`, a dataframe to which `labelfunc` will be applied.
`labelfunc` should return a dataframe of labels followed by an int
specifying the number of feature rows to skip at the end of the feature
dataframe. For example, if features are relative prices 64 days out,
these features will only be known up until 64 days before the data
runs out. In order to properly align features and labels, the features
should not include the last 64 rows that would otherwise be possible.
Usage:
`labels, skipatend = labelfunc(eqdata)`
Returns
-------
features : DataFrame
The features derived from the given parameters.
labels : DataFrame
The labels derived from the given parameters.
""" |
_size = len(eqdata.index)
_labels, _skipatend = labelfunc(eqdata)
_features, _skipatstart = featurefunc(eqdata.iloc[:(_size - _skipatend), :])
return _features, _labels.iloc[_skipatstart:, :] |
<SYSTEM_TASK:>
Retrieve growth labels.
<END_TASK>
<USER_TASK:>
Description:
def growth(interval, pricecol, eqdata):
"""
Retrieve growth labels.
Parameters
--------------
interval : int
Number of sessions over which growth is measured. For example, if
the value of 32 is passed for `interval`, the data returned will
show the growth 32 sessions ahead for each data point.
eqdata : DataFrame
Data for evaluating growth.
pricecol : str
Column of `eqdata` to be used for prices (Normally 'Adj Close').
Returns
--------
labels : DataFrame
Growth labels for the specified period
skipatend : int
Number of rows skipped at the end of `eqdata` for the given labels.
Used to synchronize labels and features.
Examples
---------------
>>> from functools import partial
>>> features, labels = pn.data.labeledfeatures(eqdata, 256,
... partial(pn.data.lab.growth, 32, 'Adj Close'))
""" |
size = len(eqdata.index)
labeldata = eqdata.loc[:, pricecol].values[interval:] /\
eqdata.loc[:, pricecol].values[:(size - interval)]
df = pd.DataFrame(data=labeldata, index=eqdata.index[:(size - interval)],
columns=['Growth'], dtype='float64')
return df |
<SYSTEM_TASK:>
simple moving average
<END_TASK>
<USER_TASK:>
Description:
def sma(eqdata, **kwargs):
"""
simple moving average
Parameters
----------
eqdata : DataFrame
window : int, optional
Lookback period for sma. Defaults to 20.
outputcol : str, optional
Column to use for output. Defaults to 'SMA'.
selection : str, optional
Column of eqdata on which to calculate sma. If
`eqdata` has only 1 column, `selection` is ignored,
and sma is calculated on that column. Defaults
to 'Adj Close'.
""" |
if len(eqdata.shape) > 1 and eqdata.shape[1] != 1:
_selection = kwargs.get('selection', 'Adj Close')
_eqdata = eqdata.loc[:, _selection]
else:
_eqdata = eqdata
_window = kwargs.get('window', 20)
_outputcol = kwargs.get('outputcol', 'SMA')
ret = pd.DataFrame(index=_eqdata.index, columns=[_outputcol], dtype=np.float64)
ret.loc[:, _outputcol] = _eqdata.rolling(window=_window, center=False).mean().values.flatten()
return ret |
<SYSTEM_TASK:>
Exponential moving average with the given span.
<END_TASK>
<USER_TASK:>
Description:
def ema(eqdata, **kwargs):
"""
Exponential moving average with the given span.
Parameters
----------
eqdata : DataFrame
Must have exactly 1 column on which to calculate EMA
span : int, optional
Span for exponential moving average. Cf. `pandas.stats.moments.ewma
<http://pandas.pydata.org/pandas-docs/stable/generated/pandas.stats.moments.ewma.html>`_ and
`additional Pandas documentation
<http://pandas.pydata.org/pandas-docs/stable/computation.html#exponentially-weighted-moment-functions>`_.
outputcol : str, optional
Column to use for output. Defaults to 'EMA'.
selection : str, optional
Column of eqdata on which to calculate ema. If
`eqdata` has only 1 column, `selection` is ignored,
and ema is calculated on that column. Defaults
to 'Adj Close'.
Returns
---------
emadf : DataFrame
Exponential moving average using the given `span`.
""" |
if len(eqdata.shape) > 1 and eqdata.shape[1] != 1:
_selection = kwargs.get('selection', 'Adj Close')
_eqdata = eqdata.loc[:, _selection]
else:
_eqdata = eqdata
_span = kwargs.get('span', 20)
_col = kwargs.get('outputcol', 'EMA')
_emadf = pd.DataFrame(index=_eqdata.index, columns=[_col], dtype=np.float64)
_emadf.loc[:, _col] = _eqdata.ewm(span=_span, min_periods=0, adjust=True, ignore_na=False).mean().values.flatten()
return _emadf |
<SYSTEM_TASK:>
Growth of exponential moving average.
<END_TASK>
<USER_TASK:>
Description:
def ema_growth(eqdata, **kwargs):
"""
Growth of exponential moving average.
Parameters
----------
eqdata : DataFrame
span : int, optional
Span for exponential moving average. Defaults to 20.
outputcol : str, optional.
Column to use for output. Defaults to 'EMA Growth'.
selection : str, optional
Column of eqdata on which to calculate ema growth. If
`eqdata` has only 1 column, `selection` is ignored,
and ema growth is calculated on that column. Defaults
to 'Adj Close'.
Returns
---------
out : DataFrame
Growth of exponential moving average from one day to next
""" |
_growth_outputcol = kwargs.get('outputcol', 'EMA Growth')
_ema_outputcol = 'EMA'
kwargs['outputcol'] = _ema_outputcol
_emadf = ema(eqdata, **kwargs)
return simple.growth(_emadf, selection=_ema_outputcol, outputcol=_growth_outputcol) |
<SYSTEM_TASK:>
Return the volatility of growth.
<END_TASK>
<USER_TASK:>
Description:
def growth_volatility(eqdata, **kwargs):
"""
Return the volatility of growth.
Note that, like :func:`pynance.tech.simple.growth` but in contrast to
:func:`volatility`, :func:`growth_volatility`
applies directly to a dataframe like that returned by
:func:`pynance.data.retrieve.get`, not necessarily to a single-column dataframe.
Parameters
----------
eqdata : DataFrame
Data from which to extract growth volatility. An exception
will be raised if `eqdata` does not contain a column 'Adj Close'
or an optional name specified by the `selection` parameter.
window : int, optional
Window on which to calculate volatility. Defaults to 20.
selection : str, optional
Column of eqdata on which to calculate volatility of growth. Defaults
to 'Adj Close'
outputcol : str, optional
Column to use for output. Defaults to 'Growth Risk'.
Returns
---------
out : DataFrame
Dataframe showing the volatility of growth over the specified `window`.
""" |
_window = kwargs.get('window', 20)
_selection = kwargs.get('selection', 'Adj Close')
_outputcol = kwargs.get('outputcol', 'Growth Risk')
_growthdata = simple.growth(eqdata, selection=_selection)
return volatility(_growthdata, outputcol=_outputcol, window=_window) |
<SYSTEM_TASK:>
Return values expressed as ratios to the average over some number
<END_TASK>
<USER_TASK:>
Description:
def ratio_to_ave(window, eqdata, **kwargs):
"""
Return values expressed as ratios to the average over some number
of prior sessions.
Parameters
----------
eqdata : DataFrame
Must contain a column with name matching `selection`, or, if
`selection` is not specified, a column named 'Volume'
window : int
Interval over which to calculate the average. Normally 252 (1 year)
selection : str, optional
Column to select for calculating ratio. Defaults to 'Volume'
skipstartrows : int, optional
Rows to skip at beginning in addition to the `window` rows
that must be skipped to get the baseline volume. Defaults to 0.
skipendrows : int, optional
Rows to skip at end. Defaults to 0.
outputcol : str, optional
Name of column in output dataframe. Defaults to 'Ratio to Ave'
Returns
---------
out : DataFrame
""" |
_selection = kwargs.get('selection', 'Volume')
_skipstartrows = kwargs.get('skipstartrows', 0)
_skipendrows = kwargs.get('skipendrows', 0)
_outputcol = kwargs.get('outputcol', 'Ratio to Ave')
_size = len(eqdata.index)
_eqdata = eqdata.loc[:, _selection]
_sma = _eqdata.iloc[:-1 - _skipendrows].rolling(window=window, center=False).mean().values
_outdata = _eqdata.values[window + _skipstartrows:_size - _skipendrows] /\
_sma[window + _skipstartrows - 1:]
_index = eqdata.index[window + _skipstartrows:_size - _skipendrows]
return pd.DataFrame(_outdata, index=_index, columns=[_outputcol], dtype=np.float64) |
<SYSTEM_TASK:>
Run linear regression on the given data.
<END_TASK>
<USER_TASK:>
Description:
def run(features, labels, regularization=0., constfeat=True):
"""
Run linear regression on the given data.
.. versionadded:: 0.5.0
If a regularization parameter is provided, this function
is a simplification and specialization of ridge
regression, as implemented in `scikit-learn
<http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.Ridge.html#sklearn.linear_model.Ridge>`_.
Setting `solver` to `'svd'` in :class:`sklearn.linear_model.Ridge` and equating
our `regularization` with their `alpha` will yield the same results.
Parameters
----------
features : ndarray
Features on which to run linear regression.
labels : ndarray
Labels for the given features. Multiple columns
of labels are allowed.
regularization : float, optional
Regularization parameter. Defaults to 0.
constfeat : bool, optional
Whether or not the first column of features is
the constant feature 1. If True, the first column
will be excluded from regularization. Defaults to True.
Returns
-------
model : ndarray
Regression model for the given data.
""" |
n_col = (features.shape[1] if len(features.shape) > 1 else 1)
reg_matrix = regularization * np.identity(n_col, dtype='float64')
if constfeat:
reg_matrix[0, 0] = 0.
# http://stackoverflow.com/questions/27476933/numpy-linear-regression-with-regularization
return np.linalg.lstsq(features.T.dot(features) + reg_matrix, features.T.dot(labels))[0] |
<SYSTEM_TASK:>
Metrics for evaluating a calendar spread.
<END_TASK>
<USER_TASK:>
Description:
def cal(self, opttype, strike, exp1, exp2):
"""
Metrics for evaluating a calendar spread.
Parameters
------------
opttype : str ('call' or 'put')
Type of option on which to collect data.
strike : numeric
Strike price.
exp1 : date or date str (e.g. '2015-01-01')
Earlier expiration date.
exp2 : date or date str (e.g. '2015-01-01')
Later expiration date.
Returns
------------
metrics : DataFrame
Metrics for evaluating spread.
""" |
assert pd.Timestamp(exp1) < pd.Timestamp(exp2)
_row1 = _relevant_rows(self.data, (strike, exp1, opttype,),
"No key for {} strike {} {}".format(exp1, strike, opttype))
_row2 = _relevant_rows(self.data, (strike, exp2, opttype,),
"No key for {} strike {} {}".format(exp2, strike, opttype))
_price1 = _getprice(_row1)
_price2 = _getprice(_row2)
_eq = _row1.loc[:, 'Underlying_Price'].values[0]
_qt = _row1.loc[:, 'Quote_Time'].values[0]
_index = ['Near', 'Far', 'Debit', 'Underlying_Price', 'Quote_Time']
_vals = np.array([_price1, _price2, _price2 - _price1, _eq, _qt])
return pd.DataFrame(_vals, index=_index, columns=['Value']) |
<SYSTEM_TASK:>
Wrap a function applying to a single column to make a function
<END_TASK>
<USER_TASK:>
Description:
def expand(fn, col, inputtype=pd.DataFrame):
"""
Wrap a function applying to a single column to make a function
applying to a multi-dimensional dataframe or ndarray
Parameters
----------
fn : function
Function that applies to a series or vector.
col : str or int
Index of column to which to apply `fn`.
inputtype : class or type
Type of input to be expected by the wrapped function.
Normally pd.DataFrame or np.ndarray. Defaults to pd.DataFrame.
Returns
----------
wrapped : function
Function that takes an input of type `inputtype` and applies
`fn` to the specified `col`.
""" |
if inputtype == pd.DataFrame:
if isinstance(col, int):
def _wrapper(*args, **kwargs):
return fn(args[0].iloc[:, col], *args[1:], **kwargs)
return _wrapper
def _wrapper(*args, **kwargs):
return fn(args[0].loc[:, col], *args[1:], **kwargs)
return _wrapper
elif inputtype == np.ndarray:
def _wrapper(*args, **kwargs):
return fn(args[0][:, col], *args[1:], **kwargs)
return _wrapper
raise TypeError("invalid input type") |
<SYSTEM_TASK:>
Return false if `eqdata` contains no missing values.
<END_TASK>
<USER_TASK:>
Description:
def has_na(eqdata):
"""
Return false if `eqdata` contains no missing values.
Parameters
----------
eqdata : DataFrame or ndarray
Data to check for missing values (NaN, None)
Returns
----------
answer : bool
False iff `eqdata` contains no missing values.
""" |
if isinstance(eqdata, pd.DataFrame):
_values = eqdata.values
else:
_values = eqdata
return len(_values[pd.isnull(_values)]) > 0 |
<SYSTEM_TASK:>
Prepend the constant feature 1 as first feature and return the modified
<END_TASK>
<USER_TASK:>
Description:
def add_const(features):
"""
Prepend the constant feature 1 as first feature and return the modified
feature set.
Parameters
----------
features : ndarray or DataFrame
""" |
content = np.empty((features.shape[0], features.shape[1] + 1), dtype='float64')
content[:, 0] = 1.
if isinstance(features, np.ndarray):
content[:, 1:] = features
return content
content[:, 1:] = features.iloc[:, :].values
cols = ['Constant'] + features.columns.tolist()
return pd.DataFrame(data=content, index=features.index, columns=cols, dtype='float64') |
<SYSTEM_TASK:>
Generate features from selected columns of a dataframe.
<END_TASK>
<USER_TASK:>
Description:
def fromcols(selection, n_sessions, eqdata, **kwargs):
"""
Generate features from selected columns of a dataframe.
Parameters
----------
selection : list or tuple of str
Columns to be used as features.
n_sessions : int
Number of sessions over which to create features.
eqdata : DataFrame
Data from which to generate feature set. Must contain
as columns the values from which the features are to
be generated.
constfeat : bool, optional
Whether or not the returned features will have the constant
feature.
Returns
----------
features : DataFrame
""" |
_constfeat = kwargs.get('constfeat', True)
_outcols = ['Constant'] if _constfeat else []
_n_rows = len(eqdata.index)
for _col in selection:
_outcols += map(partial(_concat, strval=' ' + _col), range(-n_sessions + 1, 1))
_features = pd.DataFrame(index=eqdata.index[n_sessions - 1:], columns=_outcols, dtype=np.float64)
_offset = 0
if _constfeat:
_features.iloc[:, 0] = 1.
_offset += 1
for _col in selection:
_values = eqdata.loc[:, _col].values
for i in range(n_sessions):
_features.iloc[:, _offset + i] = _values[i:_n_rows - n_sessions + i + 1]
_offset += n_sessions
return _features |
<SYSTEM_TASK:>
Generate features using a list of functions to apply to input data
<END_TASK>
<USER_TASK:>
Description:
def fromfuncs(funcs, n_sessions, eqdata, **kwargs):
"""
Generate features using a list of functions to apply to input data
Parameters
----------
funcs : list of function
Functions to apply to eqdata. Each function is expected
to output a dataframe with index identical to a slice of `eqdata`.
The slice must include at least `eqdata.index[skipatstart + n_sessions - 1:]`.
Each function is also expected to have a function attribute
`title`, which is used to generate the column names of the
output features.
n_sessions : int
Number of sessions over which to create features.
eqdata : DataFrame
Data from which to generate features. The data will often
be retrieved using `pn.get()`.
constfeat : bool, optional
Whether or not the returned features will have the constant
feature.
skipatstart : int, optional
Number of rows to omit at the start of the output DataFrame.
This parameter is necessary if any of the functions requires
a rampup period before returning valid results, e.g. `sma()` or
functions calculating volume relative to a past baseline.
Defaults to 0.
Returns
----------
features : DataFrame
""" |
_skipatstart = kwargs.get('skipatstart', 0)
_constfeat = kwargs.get('constfeat', True)
_outcols = ['Constant'] if _constfeat else []
_n_allrows = len(eqdata.index)
_n_featrows = _n_allrows - _skipatstart - n_sessions + 1
for _func in funcs:
_outcols += map(partial(_concat, strval=' ' + _func.title), range(-n_sessions + 1, 1))
_features = pd.DataFrame(index=eqdata.index[_skipatstart + n_sessions - 1:],
columns=_outcols, dtype=np.float64)
_offset = 0
if _constfeat:
_features.iloc[:, 0] = 1.
_offset += 1
for _func in funcs:
_values = _func(eqdata).values
_n_values = len(_values)
for i in range(n_sessions):
_val_end = _n_values - n_sessions + i + 1
_features.iloc[:, _offset + i] = _values[_val_end - _n_featrows:_val_end]
_offset += n_sessions
return _features |
<SYSTEM_TASK:>
Return the natural log of growth.
<END_TASK>
<USER_TASK:>
Description:
def ln_growth(eqdata, **kwargs):
"""
Return the natural log of growth.
See also
--------
:func:`growth`
""" |
if 'outputcol' not in kwargs:
kwargs['outputcol'] = 'LnGrowth'
return np.log(growth(eqdata, **kwargs)) |
<SYSTEM_TASK:>
Mean squared error of predictions.
<END_TASK>
<USER_TASK:>
Description:
def mse(predicted, actual):
"""
Mean squared error of predictions.
.. versionadded:: 0.5.0
Parameters
----------
predicted : ndarray
Predictions on which to measure error. May
contain a single or multiple column but must
match `actual` in shape.
actual : ndarray
Actual values against which to measure predictions.
Returns
-------
err : ndarray
Mean squared error of predictions relative to actual
values.
""" |
diff = predicted - actual
return np.average(diff * diff, axis=0) |
<SYSTEM_TASK:>
Metrics for covered calls.
<END_TASK>
<USER_TASK:>
Description:
def get(eqprice, callprice, strike, shares=1, buycomm=0., excomm=0., dividend=0.):
"""
Metrics for covered calls.
Parameters
----------
eqprice : float
Price at which stock is purchased.
callprice : float
Price for which call is sold.
strike : float
Strike price of call sold.
shares : int, optional
Number of shares of stock. Defaults to 1.
buycomm : float, optional
Commission paid on total initial purchase.
excomm : float optional
Commission to be paid if option is exercised.
dividend : float, optional
Total dividends per share expected between purchase and expiration.
Returns
----------
metrics : :class:`pandas.DataFrame`
Investment metrics
Notes
-----
Cf. Lawrence McMillan, Options as a Strategic Investment, 5th ed., p. 43
""" |
_index = ['Eq Cost', 'Option Premium', 'Commission', 'Total Invested', 'Dividends', 'Eq if Ex',
'Comm if Ex', 'Profit if Ex', 'Ret if Ex', 'Profit if Unch', 'Ret if Unch', 'Break_Even Price',
'Protection Pts', 'Protection Pct']
_metrics = pd.DataFrame(index=_index, columns=['Value'])
_shares = float(shares)
_dividends = _shares * dividend
_metrics.loc['Eq Cost', 'Value'] = _eqcost = _shares * eqprice
_metrics.loc['Option Premium', 'Value'] = _optprem = _shares * callprice
_metrics.loc['Commission', 'Value'] = float(buycomm)
_metrics.loc['Total Invested', 'Value'] = _invested = _eqcost - _optprem + buycomm
_metrics.loc['Dividends', 'Value'] = _dividends
_metrics.loc['Eq if Ex', 'Value'] = _eqsale = strike * _shares
_metrics.loc['Comm if Ex', 'Value'] = float(excomm)
_metrics.loc['Profit if Ex', 'Value'] = _profitex = _eqsale + _dividends - _invested - excomm
_metrics.loc['Ret if Ex', 'Value'] = round(_profitex / _invested, _constants.NDIGITS_SIG)
_metrics.loc['Profit if Unch', 'Value'] = _profitunch = _eqcost + _dividends - _invested
_metrics.loc['Ret if Unch', 'Value'] = round(_profitunch / _invested, _constants.NDIGITS_SIG)
_metrics.loc['Break_Even Price', 'Value'] = _breakeven = round((_invested - _dividends) / _shares,
_constants.NDIGITS_SIG)
_metrics.loc['Protection Pts', 'Value'] = _protpts = eqprice - _breakeven
_metrics.loc['Protection Pct', 'Value'] = round(_protpts / eqprice, _constants.NDIGITS_SIG)
return _metrics |
<SYSTEM_TASK:>
Return true iff the given date is a business day.
<END_TASK>
<USER_TASK:>
Description:
def is_bday(date, bday=None):
"""
Return true iff the given date is a business day.
Parameters
----------
date : :class:`pandas.Timestamp`
Any value that can be converted to a pandas Timestamp--e.g.,
'2012-05-01', dt.datetime(2012, 5, 1, 3)
bday : :class:`pandas.tseries.offsets.CustomBusinessDay`
Defaults to `CustomBusinessDay(calendar=USFederalHolidayCalendar())`.
Pass this parameter in performance-sensitive contexts, such
as when calling this function in a loop. The creation of the `CustomBusinessDay`
object is the performance bottleneck of this function.
Cf. `pandas.tseries.offsets.CustomBusinessDay
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#custom-business-days-experimental>`_.
Returns
-------
val : bool
True iff `date` is a business day
""" |
_date = Timestamp(date)
if bday is None:
bday = CustomBusinessDay(calendar=USFederalHolidayCalendar())
return _date == (_date + bday) - bday |
<SYSTEM_TASK:>
Get the relative performance of multiple equities.
<END_TASK>
<USER_TASK:>
Description:
def compare(eq_dfs, columns=None, selection='Adj Close'):
"""
Get the relative performance of multiple equities.
.. versionadded:: 0.5.0
Parameters
----------
eq_dfs : list or tuple of DataFrame
Performance data for multiple equities over
a consistent time frame.
columns : iterable of str, default None
Labels to use for the columns of the output DataFrame.
The labels, if provided, should normally be the names
of the equities whose performance is being compared.
selection : str, default 'Adj Close'
Column containing prices to be compared. Defaults
to 'Adj Close'.
Returns
-------
rel_perf : DataFrame
A DataFrame whose columns contain normalized data
for each equity represented in `eq_dfs`. The initial
price for each equity will be normalized to 1.0.
Examples
--------
.. code-block:: python
import pynance as pn
eqs = ('FSLR', 'SCTY', 'SPWR')
eq_dfs = []
for eq in eqs:
eq_dfs.append(pn.data.get(eq, '2016'))
rel_perf = pn.data.compare(eq_dfs, eqs)
Notes
-----
Each set of data passed in `eq_dfs` is assumed to have
the same start and end dates as the other data sets.
""" |
content = np.empty((eq_dfs[0].shape[0], len(eq_dfs)), dtype=np.float64)
rel_perf = pd.DataFrame(content, eq_dfs[0].index, columns, dtype=np.float64)
for i in range(len(eq_dfs)):
rel_perf.iloc[:, i] = eq_dfs[i].loc[:, selection] / eq_dfs[i].iloc[0].loc[selection]
return rel_perf |
<SYSTEM_TASK:>
Metrics for evaluating a diagonal butterfly spread.
<END_TASK>
<USER_TASK:>
Description:
def diagbtrfly(self, lowstrike, midstrike, highstrike, expiry1, expiry2):
"""
Metrics for evaluating a diagonal butterfly spread.
Parameters
------------
opttype : str ('call' or 'put')
Type of option on which to collect data.
lowstrike : numeric
Lower strike price. To be used for far put.
midstrike : numeric
Middle strike price. To be used for near straddle.
Typically at the money.
highstrike : numeric
Higher strike price. To be used for far call.
expiry1 : date or date str (e.g. '2015-01-01')
Earlier expiration date.
expiry2 : date or date str (e.g. '2015-01-01')
Later expiration date.
Returns
------------
metrics : DataFrame
Metrics for evaluating spread.
""" |
assert lowstrike < midstrike
assert midstrike < highstrike
assert pd.Timestamp(expiry1) < pd.Timestamp(expiry2)
_rows1 = {}
_rows2 = {}
_prices1 = {}
_prices2 = {}
_index = ['Straddle Call', 'Straddle Put', 'Straddle Total', 'Far Call', 'Far Put', 'Far Total',
'Straddle to Far Ratio', 'Credit', 'Underlying_Price', 'Quote_Time']
_metrics = pd.DataFrame(index=_index, columns=['Value'])
_errmsg = "No key for {} strike {} {}"
_opttype = 'call'
_rows1[_opttype] = _relevant_rows(self.data, (midstrike, expiry1, _opttype),
_errmsg.format(expiry1, midstrike, _opttype))
_prices1[_opttype] = _getprice(_rows1[_opttype])
_rows2[_opttype] = _relevant_rows(self.data, (highstrike, expiry2, _opttype),
_errmsg.format(expiry2, highstrike, _opttype))
_prices2[_opttype] = _getprice(_rows2[_opttype])
_metrics.loc['Straddle Call', 'Value'] = _prices1[_opttype]
_metrics.loc['Far Call', 'Value'] = _prices2[_opttype]
_metrics.loc['Underlying_Price', 'Value'], _metrics.loc['Quote_Time', 'Value'] =\
_getkeys(_rows1[_opttype], ['Underlying_Price', 'Quote_Time'])
_opttype = 'put'
_rows1[_opttype] = _relevant_rows(self.data, (midstrike, expiry1, _opttype),
_errmsg.format(expiry1, midstrike, _opttype))
_prices1[_opttype] = _getprice(_rows1[_opttype])
_rows2[_opttype] = _relevant_rows(self.data, (lowstrike, expiry2, _opttype),
_errmsg.format(expiry2, lowstrike, _opttype))
_prices2[_opttype] = _getprice(_rows2[_opttype])
_metrics.loc['Straddle Put', 'Value'] = _prices1[_opttype]
_metrics.loc['Far Put', 'Value'] = _prices2[_opttype]
_metrics.loc['Straddle Total', 'Value'] = _neartot = sum(_prices1.values())
_metrics.loc['Far Total', 'Value'] = _fartot = sum(_prices2.values())
_metrics.loc['Straddle to Far Ratio', 'Value'] = _neartot / _fartot
_metrics.loc['Credit', 'Value'] = _neartot - _fartot
return _metrics |
<SYSTEM_TASK:>
Show expiration dates, equity price, quote time.
<END_TASK>
<USER_TASK:>
Description:
def info(self):
"""
Show expiration dates, equity price, quote time.
Returns
-------
self : :class:`~pynance.opt.core.Options`
Returns a reference to the calling object to allow
chaining.
expiries : :class:`pandas.tseries.index.DatetimeIndex`
Examples
--------
>>> fopt, fexp = pn.opt.get('f').info()
Expirations:
...
Stock: 16.25
Quote time: 2015-03-01 16:00
""" |
print("Expirations:")
_i = 0
for _datetime in self.data.index.levels[1].to_pydatetime():
print("{:2d} {}".format(_i, _datetime.strftime('%Y-%m-%d')))
_i += 1
print("Stock: {:.2f}".format(self.data.iloc[0].loc['Underlying_Price']))
print("Quote time: {}".format(self.quotetime().strftime('%Y-%m-%d %H:%M%z')))
return self, self.exps() |
<SYSTEM_TASK:>
Return the array as a list of rows.
<END_TASK>
<USER_TASK:>
Description:
def tolist(self):
"""
Return the array as a list of rows.
Each row is a `dict` of values. Facilitates inserting data into a database.
.. versionadded:: 0.3.1
Returns
-------
quotes : list
A list in which each entry is a dictionary representing
a single options quote.
""" |
return [_todict(key, self.data.loc[key, :]) for key in self.data.index] |
<SYSTEM_TASK:>
Generate a unique username
<END_TASK>
<USER_TASK:>
Description:
def _generate_username(self):
""" Generate a unique username """ |
while True:
# Generate a UUID username, removing dashes and the last 2 chars
# to make it fit into the 30 char User.username field. Gracefully
# handle any unlikely, but possible duplicate usernames.
username = str(uuid.uuid4())
username = username.replace('-', '')
username = username[:-2]
try:
User.objects.get(username=username)
except User.DoesNotExist:
return username |
<SYSTEM_TASK:>
Updates model cache by generating a new key for the model
<END_TASK>
<USER_TASK:>
Description:
def update_model_cache(table_name):
"""
Updates model cache by generating a new key for the model
""" |
model_cache_info = ModelCacheInfo(table_name, uuid.uuid4().hex)
model_cache_backend.share_model_cache_info(model_cache_info) |
<SYSTEM_TASK:>
Signal receiver for models to invalidate model cache of sender and related models.
<END_TASK>
<USER_TASK:>
Description:
def invalidate_model_cache(sender, instance, **kwargs):
"""
Signal receiver for models to invalidate model cache of sender and related models.
Model cache is invalidated by generating new key for each model.
Parameters
~~~~~~~~~~
sender
The model class
instance
The actual instance being saved.
""" |
logger.debug('Received post_save/post_delete signal from sender {0}'.format(sender))
if django.VERSION >= (1, 8):
related_tables = set(
[f.related_model._meta.db_table for f in sender._meta.get_fields()
if f.related_model is not None
and (((f.one_to_many or f.one_to_one) and f.auto_created)
or f.many_to_one or (f.many_to_many and not f.auto_created))])
else:
related_tables = set([rel.model._meta.db_table for rel in sender._meta.get_all_related_objects()])
# temporary fix for m2m relations with an intermediate model, goes away after better join caching
related_tables |= set([field.rel.to._meta.db_table for field in sender._meta.fields if issubclass(type(field), RelatedField)])
logger.debug('Related tables of sender {0} are {1}'.format(sender, related_tables))
update_model_cache(sender._meta.db_table)
for related_table in related_tables:
update_model_cache(related_table) |
<SYSTEM_TASK:>
Signal receiver for models to invalidate model cache for many-to-many relationship.
<END_TASK>
<USER_TASK:>
Description:
def invalidate_m2m_cache(sender, instance, model, **kwargs):
"""
Signal receiver for models to invalidate model cache for many-to-many relationship.
Parameters
~~~~~~~~~~
sender
The model class
instance
The instance whose many-to-many relation is updated.
model
The class of the objects that are added to, removed from or cleared from the relation.
""" |
logger.debug('Received m2m_changed signals from sender {0}'.format(sender))
update_model_cache(instance._meta.db_table)
update_model_cache(model._meta.db_table) |
<SYSTEM_TASK:>
Generate cache key for the current query. If a new key is created for the model it is
<END_TASK>
<USER_TASK:>
Description:
def generate_key(self):
"""
Generate cache key for the current query. If a new key is created for the model it is
then shared with other consumers.
""" |
sql = self.sql()
key, created = self.get_or_create_model_key()
if created:
db_table = self.model._meta.db_table
logger.debug('created new key {0} for model {1}'.format(key, db_table))
model_cache_info = ModelCacheInfo(db_table, key)
model_cache_backend.share_model_cache_info(model_cache_info)
query_key = u'{model_key}{qs}{db}'.format(model_key=key,
qs=sql,
db=self.db)
key = hashlib.md5(query_key.encode('utf-8')).hexdigest()
return key |
<SYSTEM_TASK:>
Get or create key for the model.
<END_TASK>
<USER_TASK:>
Description:
def get_or_create_model_key(self):
"""
Get or create key for the model.
Returns
~~~~~~~
(model_key, boolean) tuple
""" |
model_cache_info = model_cache_backend.retrieve_model_cache_info(self.model._meta.db_table)
if not model_cache_info:
return uuid.uuid4().hex, True
return model_cache_info.table_key, False |
<SYSTEM_TASK:>
Invalidate model cache by generating new key for the model.
<END_TASK>
<USER_TASK:>
Description:
def invalidate_model_cache(self):
"""
Invalidate model cache by generating new key for the model.
""" |
logger.info('Invalidating cache for table {0}'.format(self.model._meta.db_table))
if django.VERSION >= (1, 8):
related_tables = set(
[f.related_model._meta.db_table for f in self.model._meta.get_fields()
if ((f.one_to_many or f.one_to_one) and f.auto_created)
or f.many_to_one or (f.many_to_many and not f.auto_created)])
else:
related_tables = set([rel.model._meta.db_table for rel in self.model._meta.get_all_related_objects()])
# temporary fix for m2m relations with an intermediate model, goes away after better join caching
related_tables |= set([field.rel.to._meta.db_table for field in self.model._meta.fields if issubclass(type(field), RelatedField)])
logger.debug('Related tables of model {0} are {1}'.format(self.model, related_tables))
update_model_cache(self.model._meta.db_table)
for related_table in related_tables:
update_model_cache(related_table) |
<SYSTEM_TASK:>
Import a file that will trigger the population of Orca.
<END_TASK>
<USER_TASK:>
Description:
def import_file(filename):
"""
Import a file that will trigger the population of Orca.
Parameters
----------
filename : str
""" |
pathname, filename = os.path.split(filename)
modname = re.match(
r'(?P<modname>\w+)\.py', filename).group('modname')
file, path, desc = imp.find_module(modname, [pathname])
try:
imp.load_module(modname, file, path, desc)
finally:
file.close() |
<SYSTEM_TASK:>
Decorator that will check whether the "table_name" keyword argument
<END_TASK>
<USER_TASK:>
Description:
def check_is_table(func):
"""
Decorator that will check whether the "table_name" keyword argument
to the wrapped function matches a registered Orca table.
""" |
@wraps(func)
def wrapper(**kwargs):
if not orca.is_table(kwargs['table_name']):
abort(404)
return func(**kwargs)
return wrapper |
<SYSTEM_TASK:>
Decorator that will check whether the "table_name" and "col_name"
<END_TASK>
<USER_TASK:>
Description:
def check_is_column(func):
"""
Decorator that will check whether the "table_name" and "col_name"
keyword arguments to the wrapped function match a registered Orca
table and column.
""" |
@wraps(func)
def wrapper(**kwargs):
table_name = kwargs['table_name']
col_name = kwargs['col_name']
if not orca.is_table(table_name):
abort(404)
if col_name not in orca.get_table(table_name).columns:
abort(404)
return func(**kwargs)
return wrapper |
<SYSTEM_TASK:>
Decorator that will check whether the "inj_name" keyword argument to
<END_TASK>
<USER_TASK:>
Description:
def check_is_injectable(func):
"""
Decorator that will check whether the "inj_name" keyword argument to
the wrapped function matches a registered Orca injectable.
""" |
@wraps(func)
def wrapper(**kwargs):
name = kwargs['inj_name']
if not orca.is_injectable(name):
abort(404)
return func(**kwargs)
return wrapper |
<SYSTEM_TASK:>
All tables, columns, steps, injectables and broadcasts registered with
<END_TASK>
<USER_TASK:>
Description:
def schema():
"""
All tables, columns, steps, injectables and broadcasts registered with
Orca. Includes local columns on tables.
""" |
tables = orca.list_tables()
cols = {t: orca.get_table(t).columns for t in tables}
steps = orca.list_steps()
injectables = orca.list_injectables()
broadcasts = orca.list_broadcasts()
return jsonify(
tables=tables, columns=cols, steps=steps, injectables=injectables,
broadcasts=broadcasts) |
<SYSTEM_TASK:>
Returns the first five rows of a table as JSON. Inlcudes all columns.
<END_TASK>
<USER_TASK:>
Description:
def table_preview(table_name):
"""
Returns the first five rows of a table as JSON. Inlcudes all columns.
Uses Pandas' "split" JSON format.
""" |
preview = orca.get_table(table_name).to_frame().head()
return (
preview.to_json(orient='split', date_format='iso'),
200,
{'Content-Type': 'application/json'}) |
<SYSTEM_TASK:>
Return summary statistics of a table as JSON. Includes all columns.
<END_TASK>
<USER_TASK:>
Description:
def table_describe(table_name):
"""
Return summary statistics of a table as JSON. Includes all columns.
Uses Pandas' "split" JSON format.
""" |
desc = orca.get_table(table_name).to_frame().describe()
return (
desc.to_json(orient='split', date_format='iso'),
200,
{'Content-Type': 'application/json'}) |
<SYSTEM_TASK:>
Get the source of a table function.
<END_TASK>
<USER_TASK:>
Description:
def table_definition(table_name):
"""
Get the source of a table function.
If a table is registered DataFrame and not a function then all that is
returned is {'type': 'dataframe'}.
If the table is a registered function then the JSON returned has keys
"type", "filename", "lineno", "text", and "html". "text" is the raw
text of the function, "html" has been marked up by Pygments.
""" |
if orca.table_type(table_name) == 'dataframe':
return jsonify(type='dataframe')
filename, lineno, source = \
orca.get_raw_table(table_name).func_source_data()
html = highlight(source, PythonLexer(), HtmlFormatter())
return jsonify(
type='function', filename=filename, lineno=lineno, text=source,
html=html) |
<SYSTEM_TASK:>
Perform a groupby on a table and return an aggregation on a single column.
<END_TASK>
<USER_TASK:>
Description:
def table_groupbyagg(table_name):
"""
Perform a groupby on a table and return an aggregation on a single column.
This depends on some request parameters in the URL.
"column" and "agg" must always be present, and one of "by" or "level"
must be present. "column" is the table column on which aggregation will
be performed, "agg" is the aggregation that will be performed, and
"by"/"level" define how to group the data.
Supported "agg" parameters are: mean, median, std, sum, and size.
""" |
table = orca.get_table(table_name)
# column to aggregate
column = request.args.get('column', None)
if not column or column not in table.columns:
abort(400)
# column or index level to group by
by = request.args.get('by', None)
level = request.args.get('level', None)
if (not by and not level) or (by and level):
abort(400)
# aggregation type
agg = request.args.get('agg', None)
if not agg or agg not in _GROUPBY_AGG_MAP:
abort(400)
column = table.get_column(column)
# level can either be an integer level number or a string level name.
# try converting to integer, but if that doesn't work
# we go ahead with the string.
if level:
try:
level = int(level)
except ValueError:
pass
gby = column.groupby(level=level)
else:
by = table.get_column(by)
gby = column.groupby(by)
result = _GROUPBY_AGG_MAP[agg](gby)
return (
result.to_json(orient='split', date_format='iso'),
200,
{'Content-Type': 'application/json'}) |
<SYSTEM_TASK:>
Return the first ten elements of a column as JSON in Pandas'
<END_TASK>
<USER_TASK:>
Description:
def column_preview(table_name, col_name):
"""
Return the first ten elements of a column as JSON in Pandas'
"split" format.
""" |
col = orca.get_table(table_name).get_column(col_name).head(10)
return (
col.to_json(orient='split', date_format='iso'),
200,
{'Content-Type': 'application/json'}) |
<SYSTEM_TASK:>
Get the source of a column function.
<END_TASK>
<USER_TASK:>
Description:
def column_definition(table_name, col_name):
"""
Get the source of a column function.
If a column is a registered Series and not a function then all that is
returned is {'type': 'series'}.
If the column is a registered function then the JSON returned has keys
"type", "filename", "lineno", "text", and "html". "text" is the raw
text of the function, "html" has been marked up by Pygments.
""" |
col_type = orca.get_table(table_name).column_type(col_name)
if col_type != 'function':
return jsonify(type=col_type)
filename, lineno, source = \
orca.get_raw_column(table_name, col_name).func_source_data()
html = highlight(source, PythonLexer(), HtmlFormatter())
return jsonify(
type='function', filename=filename, lineno=lineno, text=source,
html=html) |
<SYSTEM_TASK:>
Returns the type and repr of an injectable. JSON response has
<END_TASK>
<USER_TASK:>
Description:
def injectable_repr(inj_name):
"""
Returns the type and repr of an injectable. JSON response has
"type" and "repr" keys.
""" |
i = orca.get_injectable(inj_name)
return jsonify(type=str(type(i)), repr=repr(i)) |
<SYSTEM_TASK:>
Get the source of an injectable function.
<END_TASK>
<USER_TASK:>
Description:
def injectable_definition(inj_name):
"""
Get the source of an injectable function.
If an injectable is a registered Python variable and not a function
then all that is returned is {'type': 'variable'}.
If the column is a registered function then the JSON returned has keys
"type", "filename", "lineno", "text", and "html". "text" is the raw
text of the function, "html" has been marked up by Pygments.
""" |
inj_type = orca.injectable_type(inj_name)
if inj_type == 'variable':
return jsonify(type='variable')
else:
filename, lineno, source = \
orca.get_injectable_func_source_data(inj_name)
html = highlight(source, PythonLexer(), HtmlFormatter())
return jsonify(
type='function', filename=filename, lineno=lineno, text=source,
html=html) |
<SYSTEM_TASK:>
List all registered broadcasts as a list of objects with
<END_TASK>
<USER_TASK:>
Description:
def list_broadcasts():
"""
List all registered broadcasts as a list of objects with
keys "cast" and "onto".
""" |
casts = [{'cast': b[0], 'onto': b[1]} for b in orca.list_broadcasts()]
return jsonify(broadcasts=casts) |
<SYSTEM_TASK:>
Return the definition of a broadcast as an object with keys
<END_TASK>
<USER_TASK:>
Description:
def broadcast_definition(cast_name, onto_name):
"""
Return the definition of a broadcast as an object with keys
"cast", "onto", "cast_on", "onto_on", "cast_index", and "onto_index".
These are the same as the arguments to the ``broadcast`` function.
""" |
if not orca.is_broadcast(cast_name, onto_name):
abort(404)
b = orca.get_broadcast(cast_name, onto_name)
return jsonify(
cast=b.cast, onto=b.onto, cast_on=b.cast_on, onto_on=b.onto_on,
cast_index=b.cast_index, onto_index=b.onto_index) |
<SYSTEM_TASK:>
Get the source of a step function. Returned object has keys
<END_TASK>
<USER_TASK:>
Description:
def step_definition(step_name):
"""
Get the source of a step function. Returned object has keys
"filename", "lineno", "text" and "html". "text" is the raw
text of the function, "html" has been marked up by Pygments.
""" |
if not orca.is_step(step_name):
abort(404)
filename, lineno, source = \
orca.get_step(step_name).func_source_data()
html = highlight(source, PythonLexer(), HtmlFormatter())
return jsonify(filename=filename, lineno=lineno, text=source, html=html) |
<SYSTEM_TASK:>
Add a logging handler to Orca.
<END_TASK>
<USER_TASK:>
Description:
def _add_log_handler(
handler, level=None, fmt=None, datefmt=None, propagate=None):
"""
Add a logging handler to Orca.
Parameters
----------
handler : logging.Handler subclass
level : int, optional
An optional logging level that will apply only to this stream
handler.
fmt : str, optional
An optional format string that will be used for the log
messages.
datefmt : str, optional
An optional format string for formatting dates in the log
messages.
propagate : bool, optional
Whether the Orca logger should propagate. If None the
propagation will not be modified, otherwise it will be set
to this value.
""" |
if not fmt:
fmt = US_LOG_FMT
if not datefmt:
datefmt = US_LOG_DATE_FMT
handler.setFormatter(logging.Formatter(fmt=fmt, datefmt=datefmt))
if level is not None:
handler.setLevel(level)
logger = logging.getLogger('orca')
logger.addHandler(handler)
if propagate is not None:
logger.propagate = propagate |
<SYSTEM_TASK:>
Send log messages to the console.
<END_TASK>
<USER_TASK:>
Description:
def log_to_stream(level=None, fmt=None, datefmt=None):
"""
Send log messages to the console.
Parameters
----------
level : int, optional
An optional logging level that will apply only to this stream
handler.
fmt : str, optional
An optional format string that will be used for the log
messages.
datefmt : str, optional
An optional format string for formatting dates in the log
messages.
""" |
_add_log_handler(
logging.StreamHandler(), fmt=fmt, datefmt=datefmt, propagate=False) |
<SYSTEM_TASK:>
Clear any and all stored state from Orca.
<END_TASK>
<USER_TASK:>
Description:
def clear_all():
"""
Clear any and all stored state from Orca.
""" |
_TABLES.clear()
_COLUMNS.clear()
_STEPS.clear()
_BROADCASTS.clear()
_INJECTABLES.clear()
_TABLE_CACHE.clear()
_COLUMN_CACHE.clear()
_INJECTABLE_CACHE.clear()
for m in _MEMOIZED.values():
m.value.clear_cached()
_MEMOIZED.clear()
logger.debug('pipeline state cleared') |
<SYSTEM_TASK:>
Map labels and expressions to registered variables.
<END_TASK>
<USER_TASK:>
Description:
def _collect_variables(names, expressions=None):
"""
Map labels and expressions to registered variables.
Handles argument matching.
Example:
_collect_variables(names=['zones', 'zone_id'],
expressions=['parcels.zone_id'])
Would return a dict representing:
{'parcels': <DataFrameWrapper for zones>,
'zone_id': <pandas.Series for parcels.zone_id>}
Parameters
----------
names : list of str
List of registered variable names and/or labels.
If mixing names and labels, labels must come at the end.
expressions : list of str, optional
List of registered variable expressions for labels defined
at end of `names`. Length must match the number of labels.
Returns
-------
variables : dict
Keys match `names`. Values correspond to registered variables,
which may be wrappers or evaluated functions if appropriate.
""" |
# Map registered variable labels to expressions.
if not expressions:
expressions = []
offset = len(names) - len(expressions)
labels_map = dict(tz.concatv(
tz.compatibility.zip(names[:offset], names[:offset]),
tz.compatibility.zip(names[offset:], expressions)))
all_variables = tz.merge(_INJECTABLES, _TABLES)
variables = {}
for label, expression in labels_map.items():
# In the future, more registered variable expressions could be
# supported. Currently supports names of registered variables
# and references to table columns.
if '.' in expression:
# Registered variable expression refers to column.
table_name, column_name = expression.split('.')
table = get_table(table_name)
variables[label] = table.get_column(column_name)
else:
thing = all_variables[expression]
if isinstance(thing, (_InjectableFuncWrapper, TableFuncWrapper)):
# Registered variable object is function.
variables[label] = thing()
else:
variables[label] = thing
return variables |
<SYSTEM_TASK:>
Register a table with Orca.
<END_TASK>
<USER_TASK:>
Description:
def add_table(
table_name, table, cache=False, cache_scope=_CS_FOREVER,
copy_col=True):
"""
Register a table with Orca.
Parameters
----------
table_name : str
Should be globally unique to this table.
table : pandas.DataFrame or function
If a function, the function should return a DataFrame.
The function's argument names and keyword argument values
will be matched to registered variables when the function
needs to be evaluated by Orca.
cache : bool, optional
Whether to cache the results of a provided callable. Does not
apply if `table` is a DataFrame.
cache_scope : {'step', 'iteration', 'forever'}, optional
Scope for which to cache data. Default is to cache forever
(or until manually cleared). 'iteration' caches data for each
complete iteration of the pipeline, 'step' caches data for
a single step of the pipeline.
copy_col : bool, optional
Whether to return copies when evaluating columns.
Returns
-------
wrapped : `DataFrameWrapper` or `TableFuncWrapper`
""" |
if isinstance(table, Callable):
table = TableFuncWrapper(table_name, table, cache=cache,
cache_scope=cache_scope, copy_col=copy_col)
else:
table = DataFrameWrapper(table_name, table, copy_col=copy_col)
# clear any cached data from a previously registered table
table.clear_cached()
logger.debug('registering table {!r}'.format(table_name))
_TABLES[table_name] = table
return table |
<SYSTEM_TASK:>
Decorates functions that return DataFrames.
<END_TASK>
<USER_TASK:>
Description:
def table(
table_name=None, cache=False, cache_scope=_CS_FOREVER, copy_col=True):
"""
Decorates functions that return DataFrames.
Decorator version of `add_table`. Table name defaults to
name of function.
The function's argument names and keyword argument values
will be matched to registered variables when the function
needs to be evaluated by Orca.
The argument name "iter_var" may be used to have the current
iteration variable injected.
""" |
def decorator(func):
if table_name:
name = table_name
else:
name = func.__name__
add_table(
name, func, cache=cache, cache_scope=cache_scope,
copy_col=copy_col)
return func
return decorator |
<SYSTEM_TASK:>
Returns the type of a registered table.
<END_TASK>
<USER_TASK:>
Description:
def table_type(table_name):
"""
Returns the type of a registered table.
The type can be either "dataframe" or "function".
Parameters
----------
table_name : str
Returns
-------
table_type : {'dataframe', 'function'}
""" |
table = get_raw_table(table_name)
if isinstance(table, DataFrameWrapper):
return 'dataframe'
elif isinstance(table, TableFuncWrapper):
return 'function' |
<SYSTEM_TASK:>
Add a new column to a table from a Series or callable.
<END_TASK>
<USER_TASK:>
Description:
def add_column(
table_name, column_name, column, cache=False, cache_scope=_CS_FOREVER):
"""
Add a new column to a table from a Series or callable.
Parameters
----------
table_name : str
Table with which the column will be associated.
column_name : str
Name for the column.
column : pandas.Series or callable
Series should have an index matching the table to which it
is being added. If a callable, the function's argument
names and keyword argument values will be matched to
registered variables when the function needs to be
evaluated by Orca. The function should return a Series.
cache : bool, optional
Whether to cache the results of a provided callable. Does not
apply if `column` is a Series.
cache_scope : {'step', 'iteration', 'forever'}, optional
Scope for which to cache data. Default is to cache forever
(or until manually cleared). 'iteration' caches data for each
complete iteration of the pipeline, 'step' caches data for
a single step of the pipeline.
""" |
if isinstance(column, Callable):
column = \
_ColumnFuncWrapper(
table_name, column_name, column,
cache=cache, cache_scope=cache_scope)
else:
column = _SeriesWrapper(table_name, column_name, column)
# clear any cached data from a previously registered column
column.clear_cached()
logger.debug('registering column {!r} on table {!r}'.format(
column_name, table_name))
_COLUMNS[(table_name, column_name)] = column
return column |
<SYSTEM_TASK:>
Decorates functions that return a Series.
<END_TASK>
<USER_TASK:>
Description:
def column(table_name, column_name=None, cache=False, cache_scope=_CS_FOREVER):
"""
Decorates functions that return a Series.
Decorator version of `add_column`. Series index must match
the named table. Column name defaults to name of function.
The function's argument names and keyword argument values
will be matched to registered variables when the function
needs to be evaluated by Orca.
The argument name "iter_var" may be used to have the current
iteration variable injected.
The index of the returned Series must match the named table.
""" |
def decorator(func):
if column_name:
name = column_name
else:
name = func.__name__
add_column(
table_name, name, func, cache=cache, cache_scope=cache_scope)
return func
return decorator |
<SYSTEM_TASK:>
Return all of the columns registered for a given table.
<END_TASK>
<USER_TASK:>
Description:
def _columns_for_table(table_name):
"""
Return all of the columns registered for a given table.
Parameters
----------
table_name : str
Returns
-------
columns : dict of column wrappers
Keys will be column names.
""" |
return {cname: col
for (tname, cname), col in _COLUMNS.items()
if tname == table_name} |
<SYSTEM_TASK:>
Get a wrapped, registered column.
<END_TASK>
<USER_TASK:>
Description:
def get_raw_column(table_name, column_name):
"""
Get a wrapped, registered column.
This function cannot return columns that are part of wrapped
DataFrames, it's only for columns registered directly through Orca.
Parameters
----------
table_name : str
column_name : str
Returns
-------
wrapped : _SeriesWrapper or _ColumnFuncWrapper
""" |
try:
return _COLUMNS[(table_name, column_name)]
except KeyError:
raise KeyError('column {!r} not found for table {!r}'.format(
column_name, table_name)) |
<SYSTEM_TASK:>
Wraps a function for memoization and ties it's cache into the
<END_TASK>
<USER_TASK:>
Description:
def _memoize_function(f, name, cache_scope=_CS_FOREVER):
"""
Wraps a function for memoization and ties it's cache into the
Orca cacheing system.
Parameters
----------
f : function
name : str
Name of injectable.
cache_scope : {'step', 'iteration', 'forever'}, optional
Scope for which to cache data. Default is to cache forever
(or until manually cleared). 'iteration' caches data for each
complete iteration of the pipeline, 'step' caches data for
a single step of the pipeline.
""" |
cache = {}
@wraps(f)
def wrapper(*args, **kwargs):
try:
cache_key = (
args or None, frozenset(kwargs.items()) if kwargs else None)
in_cache = cache_key in cache
except TypeError:
raise TypeError(
'function arguments must be hashable for memoization')
if _CACHING and in_cache:
return cache[cache_key]
else:
result = f(*args, **kwargs)
cache[cache_key] = result
return result
wrapper.__wrapped__ = f
wrapper.cache = cache
wrapper.clear_cached = lambda: cache.clear()
_MEMOIZED[name] = CacheItem(name, wrapper, cache_scope)
return wrapper |
<SYSTEM_TASK:>
Add a value that will be injected into other functions.
<END_TASK>
<USER_TASK:>
Description:
def add_injectable(
name, value, autocall=True, cache=False, cache_scope=_CS_FOREVER,
memoize=False):
"""
Add a value that will be injected into other functions.
Parameters
----------
name : str
value
If a callable and `autocall` is True then the function's
argument names and keyword argument values will be matched
to registered variables when the function needs to be
evaluated by Orca. The return value will
be passed to any functions using this injectable. In all other
cases, `value` will be passed through untouched.
autocall : bool, optional
Set to True to have injectable functions automatically called
(with argument matching) and the result injected instead of
the function itself.
cache : bool, optional
Whether to cache the return value of an injectable function.
Only applies when `value` is a callable and `autocall` is True.
cache_scope : {'step', 'iteration', 'forever'}, optional
Scope for which to cache data. Default is to cache forever
(or until manually cleared). 'iteration' caches data for each
complete iteration of the pipeline, 'step' caches data for
a single step of the pipeline.
memoize : bool, optional
If autocall is False it is still possible to cache function results
by setting this flag to True. Cached values are stored in a dictionary
keyed by argument values, so the argument values must be hashable.
Memoized functions have their caches cleared according to the same
rules as universal caching.
""" |
if isinstance(value, Callable):
if autocall:
value = _InjectableFuncWrapper(
name, value, cache=cache, cache_scope=cache_scope)
# clear any cached data from a previously registered value
value.clear_cached()
elif not autocall and memoize:
value = _memoize_function(value, name, cache_scope=cache_scope)
logger.debug('registering injectable {!r}'.format(name))
_INJECTABLES[name] = value |
<SYSTEM_TASK:>
Decorates functions that will be injected into other functions.
<END_TASK>
<USER_TASK:>
Description:
def injectable(
name=None, autocall=True, cache=False, cache_scope=_CS_FOREVER,
memoize=False):
"""
Decorates functions that will be injected into other functions.
Decorator version of `add_injectable`. Name defaults to
name of function.
The function's argument names and keyword argument values
will be matched to registered variables when the function
needs to be evaluated by Orca.
The argument name "iter_var" may be used to have the current
iteration variable injected.
""" |
def decorator(func):
if name:
n = name
else:
n = func.__name__
add_injectable(
n, func, autocall=autocall, cache=cache, cache_scope=cache_scope,
memoize=memoize)
return func
return decorator |
<SYSTEM_TASK:>
Return data about an injectable function's source, including file name,
<END_TASK>
<USER_TASK:>
Description:
def get_injectable_func_source_data(name):
"""
Return data about an injectable function's source, including file name,
line number, and source code.
Parameters
----------
name : str
Returns
-------
filename : str
lineno : int
The line number on which the function starts.
source : str
""" |
if injectable_type(name) != 'function':
raise ValueError('injectable {!r} is not a function'.format(name))
inj = get_raw_injectable(name)
if isinstance(inj, _InjectableFuncWrapper):
return utils.func_source_data(inj._func)
elif hasattr(inj, '__wrapped__'):
return utils.func_source_data(inj.__wrapped__)
else:
return utils.func_source_data(inj) |
<SYSTEM_TASK:>
Add a step function to Orca.
<END_TASK>
<USER_TASK:>
Description:
def add_step(step_name, func):
"""
Add a step function to Orca.
The function's argument names and keyword argument values
will be matched to registered variables when the function
needs to be evaluated by Orca.
The argument name "iter_var" may be used to have the current
iteration variable injected.
Parameters
----------
step_name : str
func : callable
""" |
if isinstance(func, Callable):
logger.debug('registering step {!r}'.format(step_name))
_STEPS[step_name] = _StepFuncWrapper(step_name, func)
else:
raise TypeError('func must be a callable') |
<SYSTEM_TASK:>
Decorates functions that will be called by the `run` function.
<END_TASK>
<USER_TASK:>
Description:
def step(step_name=None):
"""
Decorates functions that will be called by the `run` function.
Decorator version of `add_step`. step name defaults to
name of function.
The function's argument names and keyword argument values
will be matched to registered variables when the function
needs to be evaluated by Orca.
The argument name "iter_var" may be used to have the current
iteration variable injected.
""" |
def decorator(func):
if step_name:
name = step_name
else:
name = func.__name__
add_step(name, func)
return func
return decorator |
<SYSTEM_TASK:>
Register a rule for merging two tables by broadcasting one onto
<END_TASK>
<USER_TASK:>
Description:
def broadcast(cast, onto, cast_on=None, onto_on=None,
cast_index=False, onto_index=False):
"""
Register a rule for merging two tables by broadcasting one onto
the other.
Parameters
----------
cast, onto : str
Names of registered tables.
cast_on, onto_on : str, optional
Column names used for merge, equivalent of ``left_on``/``right_on``
parameters of pandas.merge.
cast_index, onto_index : bool, optional
Whether to use table indexes for merge. Equivalent of
``left_index``/``right_index`` parameters of pandas.merge.
""" |
logger.debug(
'registering broadcast of table {!r} onto {!r}'.format(cast, onto))
_BROADCASTS[(cast, onto)] = \
Broadcast(cast, onto, cast_on, onto_on, cast_index, onto_index) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.