code
stringlengths 52
7.75k
| docs
stringlengths 1
5.85k
|
---|---|
def login(self, username, password, application, application_url):
logger.debug(str((username, application, application_url)))
method = self._anaconda_client_api.authenticate
return self._create_worker(method, username, password, application,
application_url) | Login to anaconda cloud. |
def logout(self):
logger.debug('Logout')
method = self._anaconda_client_api.remove_authentication
return self._create_worker(method) | Logout from anaconda cloud. |
def load_repodata(self, filepaths, extra_data=None, metadata=None):
logger.debug(str((filepaths)))
method = self._load_repodata
return self._create_worker(method, filepaths, extra_data=extra_data,
metadata=metadata) | Load all the available pacakges information for downloaded repodata.
Files include repo.continuum.io, additional data provided (anaconda
cloud), and additional metadata and merge into a single set of packages
and apps. |
def prepare_model_data(self, packages, linked, pip=None,
private_packages=None):
logger.debug('')
return self._prepare_model_data(packages, linked, pip=pip,
private_packages=private_packages) | Prepare downloaded package info along with pip pacakges info. |
def set_domain(self, domain='https://api.anaconda.org'):
logger.debug(str((domain)))
config = binstar_client.utils.get_config()
config['url'] = domain
binstar_client.utils.set_config(config)
self._anaconda_client_api = binstar_client.utils.get_server_api(
token=None, log_level=logging.NOTSET)
return self.user() | Reset current api domain. |
def packages(self, login=None, platform=None, package_type=None,
type_=None, access=None):
logger.debug('')
method = self._anaconda_client_api.user_packages
return self._create_worker(method, login=login, platform=platform,
package_type=package_type,
type_=type_, access=access) | Return all the available packages for a given user.
Parameters
----------
type_: Optional[str]
Only find packages that have this conda `type`, (i.e. 'app').
access : Optional[str]
Only find packages that have this access level (e.g. 'private',
'authenticated', 'public'). |
def _multi_packages(self, logins=None, platform=None, package_type=None,
type_=None, access=None, new_client=True):
private_packages = {}
if not new_client:
time.sleep(0.3)
return private_packages
for login in logins:
data = self._anaconda_client_api.user_packages(
login=login,
platform=platform,
package_type=package_type,
type_=type_,
access=access)
for item in data:
name = item.get('name', '')
public = item.get('public', True)
package_types = item.get('package_types', [])
latest_version = item.get('latest_version', '')
if name and not public and 'conda' in package_types:
if name in private_packages:
versions = private_packages.get('versions', []),
new_versions = item.get('versions', []),
vers = sort_versions(list(set(versions +
new_versions)))
private_packages[name]['versions'] = vers
private_packages[name]['latest_version'] = vers[-1]
else:
private_packages[name] = {
'versions': item.get('versions', []),
'app_entry': {},
'type': {},
'size': {},
'latest_version': latest_version, }
return private_packages | Return the private packages for a given set of usernames/logins. |
def multi_packages(self, logins=None, platform=None, package_type=None,
type_=None, access=None):
logger.debug('')
method = self._multi_packages
new_client = True
try:
# Only the newer versions have extra keywords like `access`
self._anaconda_client_api.user_packages(access='private')
except Exception:
new_client = False
return self._create_worker(method, logins=logins,
platform=platform,
package_type=package_type,
type_=type_, access=access,
new_client=new_client) | Return the private packages for a given set of usernames/logins. |
def country(from_key='name', to_key='iso'):
gc = GeonamesCache()
dataset = gc.get_dataset_by_key(gc.get_countries(), from_key)
def mapper(input):
# For country name inputs take the names mapping into account.
if 'name' == from_key:
input = mappings.country_names.get(input, input)
# If there is a record return the demanded attribute.
item = dataset.get(input)
if item:
return item[to_key]
return mapper | Creates and returns a mapper function to access country data.
The mapper function that is returned must be called with one argument. In
the default case you call it with a name and it returns a 3-letter
ISO_3166-1 code, e. g. called with ``Spain`` it would return ``ESP``.
:param from_key: (optional) the country attribute you give as input.
Defaults to ``name``.
:param to_key: (optional) the country attribute you want as output.
Defaults to ``iso``.
:return: mapper
:rtype: function |
def get_cities(self):
if self.cities is None:
self.cities = self._load_data(self.cities, 'cities.json')
return self.cities | Get a dictionary of cities keyed by geonameid. |
def get_cities_by_name(self, name):
if name not in self.cities_by_names:
if self.cities_items is None:
self.cities_items = list(self.get_cities().items())
self.cities_by_names[name] = [dict({gid: city})
for gid, city in self.cities_items if city['name'] == name]
return self.cities_by_names[name] | Get a list of city dictionaries with the given name.
City names cannot be used as keys, as they are not unique. |
def _set_repo_urls_from_channels(self, channels):
repos = []
sys_platform = self._conda_api.get_platform()
for channel in channels:
url = '{0}/{1}/repodata.json.bz2'.format(channel, sys_platform)
repos.append(url)
return repos | Convert a channel into a normalized repo name including.
Channels are assumed in normalized url form. |
def _check_repos(self, repos):
self._checking_repos = []
self._valid_repos = []
for repo in repos:
worker = self.download_is_valid_url(repo)
worker.sig_finished.connect(self._repos_checked)
worker.repo = repo
self._checking_repos.append(repo) | Check if repodata urls are valid. |
def _repos_checked(self, worker, output, error):
if worker.repo in self._checking_repos:
self._checking_repos.remove(worker.repo)
if output:
self._valid_repos.append(worker.repo)
if len(self._checking_repos) == 0:
self._download_repodata(self._valid_repos) | Callback for _check_repos. |
def _repo_url_to_path(self, repo):
repo = repo.replace('http://', '')
repo = repo.replace('https://', '')
repo = repo.replace('/', '_')
return os.sep.join([self._data_directory, repo]) | Convert a `repo` url to a file path for local storage. |
def _download_repodata(self, checked_repos):
self._files_downloaded = []
self._repodata_files = []
self.__counter = -1
if checked_repos:
for repo in checked_repos:
path = self._repo_url_to_path(repo)
self._files_downloaded.append(path)
self._repodata_files.append(path)
worker = self.download_async(repo, path)
worker.url = repo
worker.path = path
worker.sig_finished.connect(self._repodata_downloaded)
else:
# Empty, maybe there is no internet connection
# Load information from conda-meta and save that file
path = self._get_repodata_from_meta()
self._repodata_files = [path]
self._repodata_downloaded() | Dowload repodata. |
def _get_repodata_from_meta(self):
path = os.sep.join([self.ROOT_PREFIX, 'conda-meta'])
packages = os.listdir(path)
meta_repodata = {}
for pkg in packages:
if pkg.endswith('.json'):
filepath = os.sep.join([path, pkg])
with open(filepath, 'r') as f:
data = json.load(f)
if 'files' in data:
data.pop('files')
if 'icondata' in data:
data.pop('icondata')
name = pkg.replace('.json', '')
meta_repodata[name] = data
meta_repodata_path = os.sep.join([self._data_directory,
'offline.json'])
repodata = {'info': [],
'packages': meta_repodata}
with open(meta_repodata_path, 'w') as f:
json.dump(repodata, f, sort_keys=True,
indent=4, separators=(',', ': '))
return meta_repodata_path | Generate repodata from local meta files. |
def _repodata_downloaded(self, worker=None, output=None, error=None):
if worker:
self._files_downloaded.remove(worker.path)
if worker.path in self._files_downloaded:
self._files_downloaded.remove(worker.path)
if len(self._files_downloaded) == 0:
self.sig_repodata_updated.emit(list(set(self._repodata_files))) | Callback for _download_repodata. |
def repodata_files(self, channels=None):
if channels is None:
channels = self.conda_get_condarc_channels()
repodata_urls = self._set_repo_urls_from_channels(channels)
repopaths = []
for repourl in repodata_urls:
fullpath = os.sep.join([self._repo_url_to_path(repourl)])
repopaths.append(fullpath)
return repopaths | Return the repodata paths based on `channels` and the `data_directory`.
There is no check for validity here. |
def update_repodata(self, channels=None):
norm_channels = self.conda_get_condarc_channels(channels=channels,
normalize=True)
repodata_urls = self._set_repo_urls_from_channels(norm_channels)
self._check_repos(repodata_urls) | Update repodata from channels or use condarc channels if None. |
def update_metadata(self):
if self._data_directory is None:
raise Exception('Need to call `api.set_data_directory` first.')
metadata_url = 'https://repo.continuum.io/pkgs/metadata.json'
filepath = os.sep.join([self._data_directory, 'metadata.json'])
worker = self.download_requests(metadata_url, filepath)
return worker | Update the metadata available for packages in repo.continuum.io.
Returns a download worker. |
def check_valid_channel(self,
channel,
conda_url='https://conda.anaconda.org'):
if channel.startswith('https://') or channel.startswith('http://'):
url = channel
else:
url = "{0}/{1}".format(conda_url, channel)
if url[-1] == '/':
url = url[:-1]
plat = self.conda_platform()
repodata_url = "{0}/{1}/{2}".format(url, plat, 'repodata.json')
worker = self.download_is_valid_url(repodata_url)
worker.url = url
return worker | Check if channel is valid. |
def _aws_get_instance_by_tag(region, name, tag, raw):
client = boto3.session.Session().client('ec2', region)
matching_reservations = client.describe_instances(Filters=[{'Name': tag, 'Values': [name]}]).get('Reservations', [])
instances = []
[[instances.append(_aws_instance_from_dict(region, instance, raw)) # pylint: disable=expression-not-assigned
for instance in reservation.get('Instances')] for reservation in matching_reservations if reservation]
return instances | Get all instances matching a tag. |
def aws_get_instances_by_id(region, instance_id, raw=True):
client = boto3.session.Session().client('ec2', region)
try:
matching_reservations = client.describe_instances(InstanceIds=[instance_id]).get('Reservations', [])
except ClientError as exc:
if exc.response.get('Error', {}).get('Code') != 'InvalidInstanceID.NotFound':
raise
return []
instances = []
[[instances.append(_aws_instance_from_dict(region, instance, raw)) # pylint: disable=expression-not-assigned
for instance in reservation.get('Instances')] for reservation in matching_reservations if reservation]
return instances | Returns instances mathing an id. |
def get_instances_by_name(name, sort_by_order=('cloud', 'name'), projects=None, raw=True, regions=None, gcp_credentials=None, clouds=SUPPORTED_CLOUDS):
matching_instances = all_clouds_get_instances_by_name(
name, projects, raw, credentials=gcp_credentials, clouds=clouds)
if regions:
matching_instances = [instance for instance in matching_instances if instance.region in regions]
matching_instances.sort(key=lambda instance: [getattr(instance, field) for field in sort_by_order])
return matching_instances | Get intsances from GCP and AWS by name. |
def get_os_version(instance):
if instance.cloud == 'aws':
client = boto3.client('ec2', instance.region)
image_id = client.describe_instances(InstanceIds=[instance.id])['Reservations'][0]['Instances'][0]['ImageId']
return '16.04' if '16.04' in client.describe_images(ImageIds=[image_id])['Images'][0]['Name'] else '14.04'
if instance.cloud == 'gcp':
credentials = GoogleCredentials.get_application_default()
compute = discovery.build('compute', 'v1', credentials=credentials)
for disk in compute.instances().get(instance=instance.name,
zone=instance.zone,
project=instance.project).execute()['disks']:
if not disk.get('boot'):
continue
for value in disk.get('licenses', []):
if '1604' in value:
return '16.04'
if '1404' in value:
return '14.04'
return '14.04'
return '14.04' | Get OS Version for instances. |
def get_volumes(instance):
if instance.cloud == 'aws':
client = boto3.client('ec2', instance.region)
devices = client.describe_instance_attribute(
InstanceId=instance.id, Attribute='blockDeviceMapping').get('BlockDeviceMappings', [])
volumes = client.describe_volumes(VolumeIds=[device['Ebs']['VolumeId']
for device in devices if device.get('Ebs', {}).get('VolumeId')]).get('Volumes', [])
return {volume['Attachments'][0]['Device']: {'size': volume['Size'], 'volume_type': volume['VolumeType']} for volume in volumes}
if instance.cloud == 'gcp':
credentials = GoogleCredentials.get_application_default()
compute = discovery.build('compute', 'v1', credentials=credentials)
volumes = {}
for disk in compute.instances().get(instance=instance.id,
zone=instance.zone,
project=instance.project).execute()['disks']:
index = disk['index']
name = disk['deviceName'] if disk['deviceName'] not in [u'persistent-disk-0', 'boot'] else instance.id
if 'local-ssd' in disk['deviceName']:
size = 375.0
if 'local-ssd' in disk['deviceName']:
size = 375.0
disk_type = 'local-ssd'
else:
disk_data = compute.disks().get(disk=name,
zone=instance.zone,
project=instance.project).execute()
size = float(disk_data['sizeGb'])
disk_type = 'pd-ssd'
volumes[index] = {'size': size,
'type': disk['type'],
'deviceName': disk['deviceName'],
'interface': disk['interface'],
'diskType': disk_type}
return volumes
raise ValueError('Unknown cloud %s' % instance.cloud) | Returns all the volumes of an instance. |
def get_persistent_address(instance):
if instance.cloud == 'aws':
client = boto3.client('ec2', instance.region)
try:
client.describe_addresses(PublicIps=[instance.ip_address])
return instance.ip_address
except botocore.client.ClientError as exc:
if exc.response.get('Error', {}).get('Code') != 'InvalidAddress.NotFound':
raise
# Address is not public
return None
if instance.cloud == 'gcp':
credentials = GoogleCredentials.get_application_default()
compute = discovery.build('compute', 'v1', credentials=credentials)
try:
return compute.addresses().get(address=instance.name, project=instance.project, region=instance.region).execute()['address']
except errors.HttpError as exc:
if 'was not found' in str(exc):
return None
raise
raise ValueError('Unknown cloud %s' % instance.cloud) | Returns the public ip address of an instance. |
def main():
pip_packages = {}
for package in pip.get_installed_distributions():
name = package.project_name
version = package.version
full_name = "{0}-{1}-pip".format(name.lower(), version)
pip_packages[full_name] = {'version': version}
data = json.dumps(pip_packages)
print(data) | Use pip to find pip installed packages in a given prefix. |
def _save(file, data, mode='w+'):
with open(file, mode) as fh:
fh.write(data) | Write all data to created file. Also overwrite previous file. |
def merge(obj):
merge = ''
for f in obj.get('static', []):
print 'Merging: {}'. format(f)
merge += _read(f)
def doless(f):
print 'Compiling LESS: {}'.format(f)
ret, tmp = commands.getstatusoutput('lesscpy '+f)
if ret == 0:
return tmp
else:
print 'LESS to CSS failed for: {} (Do you have lesscpy installed?)'.format(f)
return ''
if merger.get('config'): #only imports django if we have a config file defined
import re
for p in merger['path']: sys.path.append(p)
os.environ.setdefault("DJANGO_SETTINGS_MODULE", merger['config'])
try:
from django.template.loader import get_template_from_string
from django.template.base import Context
from django.utils.encoding import smart_str
from django.conf import settings
except:
print 'Do you really have django well installed?'
sys.exit(1)
for f in obj.get('template', []):
print 'Merging django template: {}'. format(f)
t = _read(f)
if settings.FORCE_SCRIPT_NAME:
t = re.sub(r'\{%\s+url\b', settings.FORCE_SCRIPT_NAME+'{% url ', t)
tmp = smart_str(get_template_from_string(t).render(Context({})))
if f.endswith('.less'):
pass
#TODO compilar tmp para css
merge += tmp
for f in obj.get('less', []):
merge += doless(f)
return merge | Merge contents.
It does a simply merge of all files defined under 'static' key.
If you have JS or CSS file with embeded django tags like {% url ... %} or
{% static ... %} you should declare them under 'template' key. This
function will render them and append to the merged output.
To use the render option you have to define both 'config' and 'path' on
merger dictionary. |
def jsMin(data, file):
print 'Minifying JS... ',
url = 'http://javascript-minifier.com/raw' #POST
req = urllib2.Request(url, urllib.urlencode({'input': data}))
try:
f = urllib2.urlopen(req)
response = f.read()
f.close()
print 'Final: {:.1f}%'.format(100.0*len(response)/len(data))
print 'Saving: {} ({:.2f}kB)'.format(file, len(response)/1024.0)
_save(file, response)
except:
print 'Oops!! Failed :('
return 1
return 0 | Minify JS data and saves to file.
Data should be a string will whole JS content, and file will be
overwrited if exists. |
def jpgMin(file, force=False):
if not os.path.isfile(file+'.original') or force:
data = _read(file, 'rb')
_save(file+'.original', data, 'w+b')
print 'Optmising JPG {} - {:.2f}kB'.format(file, len(data)/1024.0),
url = 'http://jpgoptimiser.com/optimise'
parts, headers = encode_multipart({}, {'input': {'filename': 'wherever.jpg', 'content': data}})
req = urllib2.Request(url, data=parts, headers=headers)
try:
f = urllib2.urlopen(req)
response = f.read()
f.close()
print ' - {:.2f} - {:.1f}%'.format(len(response)/1024.0, 100.0*len(response)/len(data))
_save(file, response, 'w+b')
except:
print 'Oops!! Failed :('
return 1
else:
print 'Ignoring file: {}'.format(file)
return 0 | Try to optimise a JPG file.
The original will be saved at the same place with '.original' appended to its name.
Once a .original exists the function will ignore this file unless force is True. |
def process(obj):
#merge all static and templates and less files
merged = merge(obj)
#save the full file if name defined
if obj.get('full'):
print 'Saving: {} ({:.2f}kB)'.format(obj['full'], len(merged)/1024.0)
_save(obj['full'], merged)
else:
print 'Full merged size: {:.2f}kB'.format(len(merged)/1024.0)
#minify js and save to file
if obj.get('jsmin'):
jsMin(merged, obj['jsmin'])
#minify css and save to file
if obj.get('cssmin'):
cssMin(merged, obj['cssmin']) | Process each block of the merger object. |
def growthfromrange(rangegrowth, startdate, enddate):
_yrs = (pd.Timestamp(enddate) - pd.Timestamp(startdate)).total_seconds() /\
dt.timedelta(365.25).total_seconds()
return yrlygrowth(rangegrowth, _yrs) | Annual growth given growth from start date to end date. |
def equities(country='US'):
nasdaqblob, otherblob = _getrawdata()
eq_triples = []
eq_triples.extend(_get_nas_triples(nasdaqblob))
eq_triples.extend(_get_other_triples(otherblob))
eq_triples.sort()
index = [triple[0] for triple in eq_triples]
data = [triple[1:] for triple in eq_triples]
return pd.DataFrame(data, index, columns=['Security Name', 'Exchange'], dtype=str) | Return a DataFrame of current US equities.
.. versionadded:: 0.4.0
.. versionchanged:: 0.5.0
Return a DataFrame
Parameters
----------
country : str, optional
Country code for equities to return, defaults to 'US'.
Returns
-------
eqs : :class:`pandas.DataFrame`
DataFrame whose index is a list of all current ticker symbols.
Columns are 'Security Name' (e.g. 'Zynerba Pharmaceuticals, Inc. - Common Stock')
and 'Exchange' ('NASDAQ', 'NYSE', 'NYSE MKT', etc.)
Examples
--------
>>> eqs = pn.data.equities('US')
Notes
-----
Currently only US markets are supported. |
def straddle(self, strike, expiry):
_rows = {}
_prices = {}
for _opttype in _constants.OPTTYPES:
_rows[_opttype] = _relevant_rows(self.data, (strike, expiry, _opttype,),
"No key for {} strike {} {}".format(expiry, strike, _opttype))
_prices[_opttype] = _getprice(_rows[_opttype])
_eq = _rows[_constants.OPTTYPES[0]].loc[:, 'Underlying_Price'].values[0]
_qt = _rows[_constants.OPTTYPES[0]].loc[:, 'Quote_Time'].values[0]
_index = ['Call', 'Put', 'Credit', 'Underlying_Price', 'Quote_Time']
_vals = np.array([_prices['call'], _prices['put'], _prices['call'] + _prices['put'], _eq, _qt])
return pd.DataFrame(_vals, index=_index, columns=['Value']) | Metrics for evaluating a straddle.
Parameters
------------
strike : numeric
Strike price.
expiry : date or date str (e.g. '2015-01-01')
Expiration date.
Returns
------------
metrics : DataFrame
Metrics for evaluating straddle. |
def get(equity):
_optmeta = pdr.data.Options(equity, 'yahoo')
_optdata = _optmeta.get_all_data()
return Options(_optdata) | Retrieve all current options chains for given equity.
.. versionchanged:: 0.5.0
Eliminate special exception handling.
Parameters
-------------
equity : str
Equity for which to retrieve options data.
Returns
-------------
optdata : :class:`~pynance.opt.core.Options`
All options data for given equity currently available
from Yahoo! Finance.
Examples
-------------
Basic usage::
>>> fopt = pn.opt.get('f')
To show useful information (expiration dates, stock price, quote time)
when retrieving options data, you can chain the call to
:func:`get` with :meth:`~pynance.opt.core.Options.info`::
>>> fopt = pn.opt.get('f').info()
Expirations:
...
Stock: 15.93
Quote time: 2015-03-07 16:00 |
def _get_norms_of_rows(data_frame, method):
if method == 'vector':
norm_vector = np.linalg.norm(data_frame.values, axis=1)
elif method == 'last':
norm_vector = data_frame.iloc[:, -1].values
elif method == 'mean':
norm_vector = np.mean(data_frame.values, axis=1)
elif method == 'first':
norm_vector = data_frame.iloc[:, 0].values
else:
raise ValueError("no normalization method '{0}'".format(method))
return norm_vector | return a column vector containing the norm of each row |
def _candlestick_ax(df, ax):
quotes = df.reset_index()
quotes.loc[:, 'Date'] = mdates.date2num(quotes.loc[:, 'Date'].astype(dt.date))
fplt.candlestick_ohlc(ax, quotes.values) | # Alternatively: (but hard to get dates set up properly)
plt.xticks(range(len(df.index)), df.index, rotation=45)
fplt.candlestick2_ohlc(ax, df.loc[:, 'Open'].values, df.loc[:, 'High'].values,
df.loc[:, 'Low'].values, df.loc[:, 'Close'].values, width=0.2) |
def get(self, opttype, strike, expiry):
_optrow = _relevant_rows(self.data, (strike, expiry, opttype,),
"No key for {} strike {} {}".format(expiry, strike, opttype))
return _getprice(_optrow) | Price as midpoint between bid and ask.
Parameters
----------
opttype : str
'call' or 'put'.
strike : numeric
Strike price.
expiry : date-like
Expiration date. Can be a :class:`datetime.datetime` or
a string that :mod:`pandas` can interpret as such, e.g.
'2015-01-01'.
Returns
-------
out : float
Examples
--------
>>> geopts = pn.opt.get('ge')
>>> geopts.price.get('call', 26., '2015-09-18')
0.94 |
def metrics(self, opttype, strike, expiry):
_optrow = _relevant_rows(self.data, (strike, expiry, opttype,),
"No key for {} strike {} {}".format(expiry, strike, opttype))
_index = ['Opt_Price', 'Time_Val', 'Last', 'Bid', 'Ask', 'Vol', 'Open_Int', 'Underlying_Price', 'Quote_Time']
_out = pd.DataFrame(index=_index, columns=['Value'])
_out.loc['Opt_Price', 'Value'] = _opt_price = _getprice(_optrow)
for _name in _index[2:]:
_out.loc[_name, 'Value'] = _optrow.loc[:, _name].values[0]
_eq_price = _out.loc['Underlying_Price', 'Value']
if opttype == 'put':
_out.loc['Time_Val'] = _get_put_time_val(_opt_price, strike, _eq_price)
else:
_out.loc['Time_Val'] = _get_call_time_val(_opt_price, strike, _eq_price)
return _out | Basic metrics for a specific option.
Parameters
----------
opttype : str ('call' or 'put')
strike : numeric
Strike price.
expiry : date-like
Expiration date. Can be a :class:`datetime.datetime` or
a string that :mod:`pandas` can interpret as such, e.g.
'2015-01-01'.
Returns
-------
out : :class:`pandas.DataFrame` |
def strikes(self, opttype, expiry):
_relevant = _relevant_rows(self.data, (slice(None), expiry, opttype,),
"No key for {} {}".format(expiry, opttype))
_index = _relevant.index.get_level_values('Strike')
_columns = ['Price', 'Time_Val', 'Last', 'Bid', 'Ask', 'Vol', 'Open_Int']
_df = pd.DataFrame(index=_index, columns=_columns)
_underlying = _relevant.loc[:, 'Underlying_Price'].values[0]
_quotetime = pd.to_datetime(_relevant.loc[:, 'Quote_Time'].values[0], utc=True).to_datetime()
for _col in _columns[2:]:
_df.loc[:, _col] = _relevant.loc[:, _col].values
_df.loc[:, 'Price'] = (_df.loc[:, 'Bid'] + _df.loc[:, 'Ask']) / 2.
_set_tv_strike_ix(_df, opttype, 'Price', 'Time_Val', _underlying)
return _df, _underlying, _quotetime | Retrieve option prices for all strikes of a given type with a given expiration.
Parameters
----------
opttype : str ('call' or 'put')
expiry : date-like
Expiration date. Can be a :class:`datetime.datetime` or
a string that :mod:`pandas` can interpret as such, e.g.
'2015-01-01'.
Returns
----------
df : :class:`pandas.DataFrame`
eq : float
Price of underlying.
qt : datetime.datetime
Time of quote.
See Also
--------
:meth:`exps` |
def exps(self, opttype, strike):
_relevant = _relevant_rows(self.data, (strike, slice(None), opttype,),
"No key for {} {}".format(strike, opttype))
_index = _relevant.index.get_level_values('Expiry')
_columns = ['Price', 'Time_Val', 'Last', 'Bid', 'Ask', 'Vol', 'Open_Int']
_df = pd.DataFrame(index=_index, columns=_columns)
_eq = _relevant.loc[:, 'Underlying_Price'].values[0]
_qt = pd.to_datetime(_relevant.loc[:, 'Quote_Time'].values[0], utc=True).to_datetime()
for _col in _columns[2:]:
_df.loc[:, _col] = _relevant.loc[:, _col].values
_df.loc[:, 'Price'] = (_df.loc[:, 'Bid'] + _df.loc[:, 'Ask']) / 2.
_set_tv_other_ix(_df, opttype, 'Price', 'Time_Val', _eq, strike)
return _df, _eq, _qt | Prices for given strike on all available dates.
Parameters
----------
opttype : str ('call' or 'put')
strike : numeric
Returns
----------
df : :class:`pandas.DataFrame`
eq : float
Price of underlying.
qt : :class:`datetime.datetime`
Time of quote.
See Also
--------
:meth:`strikes` |
def growth(interval, pricecol, eqdata):
size = len(eqdata.index)
labeldata = eqdata.loc[:, pricecol].values[interval:] /\
eqdata.loc[:, pricecol].values[:(size - interval)]
df = pd.DataFrame(data=labeldata, index=eqdata.index[:(size - interval)],
columns=['Growth'], dtype='float64')
return df | Retrieve growth labels.
Parameters
--------------
interval : int
Number of sessions over which growth is measured. For example, if
the value of 32 is passed for `interval`, the data returned will
show the growth 32 sessions ahead for each data point.
eqdata : DataFrame
Data for evaluating growth.
pricecol : str
Column of `eqdata` to be used for prices (Normally 'Adj Close').
Returns
--------
labels : DataFrame
Growth labels for the specified period
skipatend : int
Number of rows skipped at the end of `eqdata` for the given labels.
Used to synchronize labels and features.
Examples
---------------
>>> from functools import partial
>>> features, labels = pn.data.labeledfeatures(eqdata, 256,
... partial(pn.data.lab.growth, 32, 'Adj Close')) |
def sma(eqdata, **kwargs):
if len(eqdata.shape) > 1 and eqdata.shape[1] != 1:
_selection = kwargs.get('selection', 'Adj Close')
_eqdata = eqdata.loc[:, _selection]
else:
_eqdata = eqdata
_window = kwargs.get('window', 20)
_outputcol = kwargs.get('outputcol', 'SMA')
ret = pd.DataFrame(index=_eqdata.index, columns=[_outputcol], dtype=np.float64)
ret.loc[:, _outputcol] = _eqdata.rolling(window=_window, center=False).mean().values.flatten()
return ret | simple moving average
Parameters
----------
eqdata : DataFrame
window : int, optional
Lookback period for sma. Defaults to 20.
outputcol : str, optional
Column to use for output. Defaults to 'SMA'.
selection : str, optional
Column of eqdata on which to calculate sma. If
`eqdata` has only 1 column, `selection` is ignored,
and sma is calculated on that column. Defaults
to 'Adj Close'. |
def ema(eqdata, **kwargs):
if len(eqdata.shape) > 1 and eqdata.shape[1] != 1:
_selection = kwargs.get('selection', 'Adj Close')
_eqdata = eqdata.loc[:, _selection]
else:
_eqdata = eqdata
_span = kwargs.get('span', 20)
_col = kwargs.get('outputcol', 'EMA')
_emadf = pd.DataFrame(index=_eqdata.index, columns=[_col], dtype=np.float64)
_emadf.loc[:, _col] = _eqdata.ewm(span=_span, min_periods=0, adjust=True, ignore_na=False).mean().values.flatten()
return _emadf | Exponential moving average with the given span.
Parameters
----------
eqdata : DataFrame
Must have exactly 1 column on which to calculate EMA
span : int, optional
Span for exponential moving average. Cf. `pandas.stats.moments.ewma
<http://pandas.pydata.org/pandas-docs/stable/generated/pandas.stats.moments.ewma.html>`_ and
`additional Pandas documentation
<http://pandas.pydata.org/pandas-docs/stable/computation.html#exponentially-weighted-moment-functions>`_.
outputcol : str, optional
Column to use for output. Defaults to 'EMA'.
selection : str, optional
Column of eqdata on which to calculate ema. If
`eqdata` has only 1 column, `selection` is ignored,
and ema is calculated on that column. Defaults
to 'Adj Close'.
Returns
---------
emadf : DataFrame
Exponential moving average using the given `span`. |
def ema_growth(eqdata, **kwargs):
_growth_outputcol = kwargs.get('outputcol', 'EMA Growth')
_ema_outputcol = 'EMA'
kwargs['outputcol'] = _ema_outputcol
_emadf = ema(eqdata, **kwargs)
return simple.growth(_emadf, selection=_ema_outputcol, outputcol=_growth_outputcol) | Growth of exponential moving average.
Parameters
----------
eqdata : DataFrame
span : int, optional
Span for exponential moving average. Defaults to 20.
outputcol : str, optional.
Column to use for output. Defaults to 'EMA Growth'.
selection : str, optional
Column of eqdata on which to calculate ema growth. If
`eqdata` has only 1 column, `selection` is ignored,
and ema growth is calculated on that column. Defaults
to 'Adj Close'.
Returns
---------
out : DataFrame
Growth of exponential moving average from one day to next |
def volatility(eqdata, **kwargs):
if len(eqdata.shape) > 1 and eqdata.shape[1] != 1:
_selection = kwargs.get('selection', 'Adj Close')
_eqdata = eqdata.loc[:, _selection]
else:
_eqdata = eqdata
_window = kwargs.get('window', 20)
_colname = kwargs.get('outputcol', 'Risk')
_risk = pd.DataFrame(index=_eqdata.index, columns=[_colname], dtype=np.float64)
_risk.loc[:, _colname] = _eqdata.rolling(center=False, window=_window).std().values.flatten()
return _risk | Volatility (standard deviation) over the given window
Parameters
----------
eqdata : DataFrame
window : int, optional
Lookback period. Defaults to 20.
outputcol : str, optional
Name of column to be used in returned dataframe. Defaults to 'Risk'.
selection : str, optional
Column of eqdata on which to calculate volatility. If
`eqdata` has only 1 column, `selection` is ignored,
and volatility is calculated on that column. Defaults
to 'Adj Close'.
Returns
---------
risk : DataFrame
Moving volatility with the given lookback. |
def growth_volatility(eqdata, **kwargs):
_window = kwargs.get('window', 20)
_selection = kwargs.get('selection', 'Adj Close')
_outputcol = kwargs.get('outputcol', 'Growth Risk')
_growthdata = simple.growth(eqdata, selection=_selection)
return volatility(_growthdata, outputcol=_outputcol, window=_window) | Return the volatility of growth.
Note that, like :func:`pynance.tech.simple.growth` but in contrast to
:func:`volatility`, :func:`growth_volatility`
applies directly to a dataframe like that returned by
:func:`pynance.data.retrieve.get`, not necessarily to a single-column dataframe.
Parameters
----------
eqdata : DataFrame
Data from which to extract growth volatility. An exception
will be raised if `eqdata` does not contain a column 'Adj Close'
or an optional name specified by the `selection` parameter.
window : int, optional
Window on which to calculate volatility. Defaults to 20.
selection : str, optional
Column of eqdata on which to calculate volatility of growth. Defaults
to 'Adj Close'
outputcol : str, optional
Column to use for output. Defaults to 'Growth Risk'.
Returns
---------
out : DataFrame
Dataframe showing the volatility of growth over the specified `window`. |
def bollinger(eqdata, **kwargs):
_window = kwargs.get('window', 20)
_multiple = kwargs.get('multiple', 2.)
_selection = kwargs.get('selection', 'Adj Close')
# ensures correct name for output column of sma()
kwargs['outputcol'] = 'SMA'
_smadf = sma(eqdata, **kwargs)
_sigmas = eqdata.loc[:, _selection].rolling(center=False, window=_window).std().values.flatten()
_diff = _multiple * _sigmas
_bolldf = pd.DataFrame(index=eqdata.index, columns=['Upper', 'Lower'], dtype=np.float64)
_bolldf.loc[:, 'Upper'] = _smadf.iloc[:, 0].values + _diff
_bolldf.loc[:, 'Lower'] = _smadf.iloc[:, 0].values - _diff
return _bolldf, _smadf | Bollinger bands
Returns bolldf, smadf where bolldf is a DataFrame containing
Bollinger bands with columns 'Upper' and 'Lower' and smadf contains
the simple moving average.
Parameters
----------
eqdata : DataFrame
Must include a column specified in the `selection` parameter or,
if no `selection` parameter is given, a column 'Adj Close'.
window : int, optional
Lookback period.
multiple : float, optional
Multiple of standard deviation above and below sma to use
in calculating band value. Defaults to 2.0.
selection : str, optional
Column of `eqdata` on which to calculate bollinger bands.
Defaults to 'Adj Close'.
Returns
---------
bolldf : DataFrame
Dataframe containing columns 'Upper' and 'Lower' describing
the given multiple of standard deviations above and below
simple moving average for the lookback period.
smadf : DataFrame
Simple moving average given the specified lookback. |
def ratio_to_ave(window, eqdata, **kwargs):
_selection = kwargs.get('selection', 'Volume')
_skipstartrows = kwargs.get('skipstartrows', 0)
_skipendrows = kwargs.get('skipendrows', 0)
_outputcol = kwargs.get('outputcol', 'Ratio to Ave')
_size = len(eqdata.index)
_eqdata = eqdata.loc[:, _selection]
_sma = _eqdata.iloc[:-1 - _skipendrows].rolling(window=window, center=False).mean().values
_outdata = _eqdata.values[window + _skipstartrows:_size - _skipendrows] /\
_sma[window + _skipstartrows - 1:]
_index = eqdata.index[window + _skipstartrows:_size - _skipendrows]
return pd.DataFrame(_outdata, index=_index, columns=[_outputcol], dtype=np.float64) | Return values expressed as ratios to the average over some number
of prior sessions.
Parameters
----------
eqdata : DataFrame
Must contain a column with name matching `selection`, or, if
`selection` is not specified, a column named 'Volume'
window : int
Interval over which to calculate the average. Normally 252 (1 year)
selection : str, optional
Column to select for calculating ratio. Defaults to 'Volume'
skipstartrows : int, optional
Rows to skip at beginning in addition to the `window` rows
that must be skipped to get the baseline volume. Defaults to 0.
skipendrows : int, optional
Rows to skip at end. Defaults to 0.
outputcol : str, optional
Name of column in output dataframe. Defaults to 'Ratio to Ave'
Returns
---------
out : DataFrame |
def run(features, labels, regularization=0., constfeat=True):
n_col = (features.shape[1] if len(features.shape) > 1 else 1)
reg_matrix = regularization * np.identity(n_col, dtype='float64')
if constfeat:
reg_matrix[0, 0] = 0.
# http://stackoverflow.com/questions/27476933/numpy-linear-regression-with-regularization
return np.linalg.lstsq(features.T.dot(features) + reg_matrix, features.T.dot(labels))[0] | Run linear regression on the given data.
.. versionadded:: 0.5.0
If a regularization parameter is provided, this function
is a simplification and specialization of ridge
regression, as implemented in `scikit-learn
<http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.Ridge.html#sklearn.linear_model.Ridge>`_.
Setting `solver` to `'svd'` in :class:`sklearn.linear_model.Ridge` and equating
our `regularization` with their `alpha` will yield the same results.
Parameters
----------
features : ndarray
Features on which to run linear regression.
labels : ndarray
Labels for the given features. Multiple columns
of labels are allowed.
regularization : float, optional
Regularization parameter. Defaults to 0.
constfeat : bool, optional
Whether or not the first column of features is
the constant feature 1. If True, the first column
will be excluded from regularization. Defaults to True.
Returns
-------
model : ndarray
Regression model for the given data. |
def cal(self, opttype, strike, exp1, exp2):
assert pd.Timestamp(exp1) < pd.Timestamp(exp2)
_row1 = _relevant_rows(self.data, (strike, exp1, opttype,),
"No key for {} strike {} {}".format(exp1, strike, opttype))
_row2 = _relevant_rows(self.data, (strike, exp2, opttype,),
"No key for {} strike {} {}".format(exp2, strike, opttype))
_price1 = _getprice(_row1)
_price2 = _getprice(_row2)
_eq = _row1.loc[:, 'Underlying_Price'].values[0]
_qt = _row1.loc[:, 'Quote_Time'].values[0]
_index = ['Near', 'Far', 'Debit', 'Underlying_Price', 'Quote_Time']
_vals = np.array([_price1, _price2, _price2 - _price1, _eq, _qt])
return pd.DataFrame(_vals, index=_index, columns=['Value']) | Metrics for evaluating a calendar spread.
Parameters
------------
opttype : str ('call' or 'put')
Type of option on which to collect data.
strike : numeric
Strike price.
exp1 : date or date str (e.g. '2015-01-01')
Earlier expiration date.
exp2 : date or date str (e.g. '2015-01-01')
Later expiration date.
Returns
------------
metrics : DataFrame
Metrics for evaluating spread. |
def featurize(equity_data, n_sessions, **kwargs):
#Benchmarking
#>>> s = 'from __main__ import data\nimport datetime as dt\n'
#>>> timeit.timeit('data.featurize(data.get("ge", dt.date(1960, 1, 1),
# dt.date(2014, 12, 31)), 256)', setup=s, number=1)
#1.6771750450134277
columns = kwargs.get('columns', map(str, range(-n_sessions + 1, 1)))
selection = kwargs.get('selection', 'Adj Close')
# empty DataFrame with desired index and column labels
features = pd.DataFrame(index=equity_data.index[(n_sessions - 1):],
columns=columns, dtype='float64')
values = equity_data[selection].values
for i in range(n_sessions - 1):
features.iloc[:, i] = values[i:(-n_sessions + i + 1)]
features.iloc[:, n_sessions - 1] = values[(n_sessions - 1):]
return features | Generate a raw (unnormalized) feature set from the input data.
The value at `column` on the given date is taken
as a feature, and each row contains values for n_sessions
Parameters
-----------
equity_data : DataFrame
data from which to generate features
n_sessions : int
number of sessions to use as features
selection : str, default: 'Adj Close'
column of `equity_data` from which to generate features.
columns : list, default: ``map(str, range((-n_sessions + 1), 1))``
column names for output DataFrame. Default will look like:
['-5', '-4', '-3', '-2', '-1', '0'].
Returns
----------
out : DataFrame
Each row is a sequence of `n_sessions` session values where
the last column matches the value on the date specified by
the DataFrame index.
Examples
--------
>>> pn.featurize(equity_data, n_sessions, **kwargs) |
def decorate(fn, *args, **kwargs):
def _wrapper(*_args, **kwargs):
_ret = fn(*_args, **kwargs)
if isinstance(_ret, tuple):
return _ret + args
if len(args) == 0:
return _ret
return (_ret,) + args
for key, value in kwargs.items():
_wrapper.__dict__[key] = value
return _wrapper | Return a new function that replicates the behavior of the input
but also returns an additional value. Used for creating functions
of the proper type to pass to `labeledfeatures()`.
Parameters
----------
fn : function
*args : any
Additional parameters that the returned function will return
**kwargs : dict
Each element in `kwargs` will become an attribute of the output
function.
Returns
----------
wrapped : function
New function that acts like `fn` except that it also returns
an additional value.
Examples
----------
>>> from functools import partial
>>> forecast_interval = 32
>>> features, labels = pn.data.labeledfeatures(eqdata, 256, featurefn,
... decorate(partial(pn.data.lab.growth, forecast_interval, 'Adj Close'), forecast_interval))
>>> def f():
... return 0, 1
...
>>> pn.decorate(f, 3, 4, 5)()
(0, 1, 3, 4, 5)
>>> pn.decorate(lambda x: x * .5, 3, 4, 5)(1.)
(1., 3, 4, 5)
>>> pn.decorate(lambda x: x, 1 2)('foo')
('foo', 1, 2)
>>> pn.decorate(f, 'foo'):
(0, 1, 'foo')
pn.decorate(f, 0, foo='bar').foo
>>> 'bar'
Notes
----------
If `fn` returns multiple values, these will be returned in sequence
as the first values returned by `add_rets(fn, arg0, arg1, arg2)`. See example
above. |
def expand(fn, col, inputtype=pd.DataFrame):
if inputtype == pd.DataFrame:
if isinstance(col, int):
def _wrapper(*args, **kwargs):
return fn(args[0].iloc[:, col], *args[1:], **kwargs)
return _wrapper
def _wrapper(*args, **kwargs):
return fn(args[0].loc[:, col], *args[1:], **kwargs)
return _wrapper
elif inputtype == np.ndarray:
def _wrapper(*args, **kwargs):
return fn(args[0][:, col], *args[1:], **kwargs)
return _wrapper
raise TypeError("invalid input type") | Wrap a function applying to a single column to make a function
applying to a multi-dimensional dataframe or ndarray
Parameters
----------
fn : function
Function that applies to a series or vector.
col : str or int
Index of column to which to apply `fn`.
inputtype : class or type
Type of input to be expected by the wrapped function.
Normally pd.DataFrame or np.ndarray. Defaults to pd.DataFrame.
Returns
----------
wrapped : function
Function that takes an input of type `inputtype` and applies
`fn` to the specified `col`. |
def has_na(eqdata):
if isinstance(eqdata, pd.DataFrame):
_values = eqdata.values
else:
_values = eqdata
return len(_values[pd.isnull(_values)]) > 0 | Return false if `eqdata` contains no missing values.
Parameters
----------
eqdata : DataFrame or ndarray
Data to check for missing values (NaN, None)
Returns
----------
answer : bool
False iff `eqdata` contains no missing values. |
def add_const(features):
content = np.empty((features.shape[0], features.shape[1] + 1), dtype='float64')
content[:, 0] = 1.
if isinstance(features, np.ndarray):
content[:, 1:] = features
return content
content[:, 1:] = features.iloc[:, :].values
cols = ['Constant'] + features.columns.tolist()
return pd.DataFrame(data=content, index=features.index, columns=cols, dtype='float64') | Prepend the constant feature 1 as first feature and return the modified
feature set.
Parameters
----------
features : ndarray or DataFrame |
def fromcols(selection, n_sessions, eqdata, **kwargs):
_constfeat = kwargs.get('constfeat', True)
_outcols = ['Constant'] if _constfeat else []
_n_rows = len(eqdata.index)
for _col in selection:
_outcols += map(partial(_concat, strval=' ' + _col), range(-n_sessions + 1, 1))
_features = pd.DataFrame(index=eqdata.index[n_sessions - 1:], columns=_outcols, dtype=np.float64)
_offset = 0
if _constfeat:
_features.iloc[:, 0] = 1.
_offset += 1
for _col in selection:
_values = eqdata.loc[:, _col].values
for i in range(n_sessions):
_features.iloc[:, _offset + i] = _values[i:_n_rows - n_sessions + i + 1]
_offset += n_sessions
return _features | Generate features from selected columns of a dataframe.
Parameters
----------
selection : list or tuple of str
Columns to be used as features.
n_sessions : int
Number of sessions over which to create features.
eqdata : DataFrame
Data from which to generate feature set. Must contain
as columns the values from which the features are to
be generated.
constfeat : bool, optional
Whether or not the returned features will have the constant
feature.
Returns
----------
features : DataFrame |
def ln_growth(eqdata, **kwargs):
if 'outputcol' not in kwargs:
kwargs['outputcol'] = 'LnGrowth'
return np.log(growth(eqdata, **kwargs)) | Return the natural log of growth.
See also
--------
:func:`growth` |
def ret(eqdata, **kwargs):
if 'outputcol' not in kwargs:
kwargs['outputcol'] = 'Return'
result = growth(eqdata, **kwargs)
result.values[:, :] -= 1.
return result | Generate a DataFrame where the sole column, 'Return',
is the return for the equity over the given number of sessions.
For example, if 'XYZ' has 'Adj Close' of `100.0` on 2014-12-15 and
`90.0` 4 *sessions* later on 2014-12-19, then the 'Return' value
for 2014-12-19 will be `-0.1`.
Parameters
----------
eqdata : DataFrame
Data such as that returned by `get()`
selection : str, optional
Column from which to determine growth values. Defaults to
'Adj Close'.
n_sessions : int
Number of sessions to count back for calculating today's
return. For example, if `n_sessions` is set to 4, return is
calculated relative to the price 4 sessions ago. Defaults
to 1 (price of previous session).
skipstartrows : int
Rows to skip at beginning of `eqdata` in addition to the 1 row that must
be skipped because the calculation relies on a prior data point.
Defaults to 0.
skipendrows : int
Rows to skip at end of `eqdata`. Defaults to 0.
outputcol : str, optional
Name for column of output dataframe. Defaults to 'Return'.
Returns
----------
out : DataFrame
See Also
--------
:func:`growth`
Notes
----------
The interval is the number of *sessions* between the 2 values
whose ratio is being measured, *not* the number of days (which
includes days on which the market is closed).
The percentage gain or loss is measured relative to the earlier
date, but the index date is the later date. The index is chose because
that is the date on which the value is known. The percentage measure is because
that is the way for calculating percent profit and loss. |
def mse(predicted, actual):
diff = predicted - actual
return np.average(diff * diff, axis=0) | Mean squared error of predictions.
.. versionadded:: 0.5.0
Parameters
----------
predicted : ndarray
Predictions on which to measure error. May
contain a single or multiple column but must
match `actual` in shape.
actual : ndarray
Actual values against which to measure predictions.
Returns
-------
err : ndarray
Mean squared error of predictions relative to actual
values. |
def is_bday(date, bday=None):
_date = Timestamp(date)
if bday is None:
bday = CustomBusinessDay(calendar=USFederalHolidayCalendar())
return _date == (_date + bday) - bday | Return true iff the given date is a business day.
Parameters
----------
date : :class:`pandas.Timestamp`
Any value that can be converted to a pandas Timestamp--e.g.,
'2012-05-01', dt.datetime(2012, 5, 1, 3)
bday : :class:`pandas.tseries.offsets.CustomBusinessDay`
Defaults to `CustomBusinessDay(calendar=USFederalHolidayCalendar())`.
Pass this parameter in performance-sensitive contexts, such
as when calling this function in a loop. The creation of the `CustomBusinessDay`
object is the performance bottleneck of this function.
Cf. `pandas.tseries.offsets.CustomBusinessDay
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#custom-business-days-experimental>`_.
Returns
-------
val : bool
True iff `date` is a business day |
def compare(eq_dfs, columns=None, selection='Adj Close'):
content = np.empty((eq_dfs[0].shape[0], len(eq_dfs)), dtype=np.float64)
rel_perf = pd.DataFrame(content, eq_dfs[0].index, columns, dtype=np.float64)
for i in range(len(eq_dfs)):
rel_perf.iloc[:, i] = eq_dfs[i].loc[:, selection] / eq_dfs[i].iloc[0].loc[selection]
return rel_perf | Get the relative performance of multiple equities.
.. versionadded:: 0.5.0
Parameters
----------
eq_dfs : list or tuple of DataFrame
Performance data for multiple equities over
a consistent time frame.
columns : iterable of str, default None
Labels to use for the columns of the output DataFrame.
The labels, if provided, should normally be the names
of the equities whose performance is being compared.
selection : str, default 'Adj Close'
Column containing prices to be compared. Defaults
to 'Adj Close'.
Returns
-------
rel_perf : DataFrame
A DataFrame whose columns contain normalized data
for each equity represented in `eq_dfs`. The initial
price for each equity will be normalized to 1.0.
Examples
--------
.. code-block:: python
import pynance as pn
eqs = ('FSLR', 'SCTY', 'SPWR')
eq_dfs = []
for eq in eqs:
eq_dfs.append(pn.data.get(eq, '2016'))
rel_perf = pn.data.compare(eq_dfs, eqs)
Notes
-----
Each set of data passed in `eq_dfs` is assumed to have
the same start and end dates as the other data sets. |
def info(self):
print("Expirations:")
_i = 0
for _datetime in self.data.index.levels[1].to_pydatetime():
print("{:2d} {}".format(_i, _datetime.strftime('%Y-%m-%d')))
_i += 1
print("Stock: {:.2f}".format(self.data.iloc[0].loc['Underlying_Price']))
print("Quote time: {}".format(self.quotetime().strftime('%Y-%m-%d %H:%M%z')))
return self, self.exps() | Show expiration dates, equity price, quote time.
Returns
-------
self : :class:`~pynance.opt.core.Options`
Returns a reference to the calling object to allow
chaining.
expiries : :class:`pandas.tseries.index.DatetimeIndex`
Examples
--------
>>> fopt, fexp = pn.opt.get('f').info()
Expirations:
...
Stock: 16.25
Quote time: 2015-03-01 16:00 |
def tolist(self):
return [_todict(key, self.data.loc[key, :]) for key in self.data.index] | Return the array as a list of rows.
Each row is a `dict` of values. Facilitates inserting data into a database.
.. versionadded:: 0.3.1
Returns
-------
quotes : list
A list in which each entry is a dictionary representing
a single options quote. |
def _generate_username(self):
while True:
# Generate a UUID username, removing dashes and the last 2 chars
# to make it fit into the 30 char User.username field. Gracefully
# handle any unlikely, but possible duplicate usernames.
username = str(uuid.uuid4())
username = username.replace('-', '')
username = username[:-2]
try:
User.objects.get(username=username)
except User.DoesNotExist:
return username | Generate a unique username |
def update_model_cache(table_name):
model_cache_info = ModelCacheInfo(table_name, uuid.uuid4().hex)
model_cache_backend.share_model_cache_info(model_cache_info) | Updates model cache by generating a new key for the model |
def invalidate_model_cache(sender, instance, **kwargs):
logger.debug('Received post_save/post_delete signal from sender {0}'.format(sender))
if django.VERSION >= (1, 8):
related_tables = set(
[f.related_model._meta.db_table for f in sender._meta.get_fields()
if f.related_model is not None
and (((f.one_to_many or f.one_to_one) and f.auto_created)
or f.many_to_one or (f.many_to_many and not f.auto_created))])
else:
related_tables = set([rel.model._meta.db_table for rel in sender._meta.get_all_related_objects()])
# temporary fix for m2m relations with an intermediate model, goes away after better join caching
related_tables |= set([field.rel.to._meta.db_table for field in sender._meta.fields if issubclass(type(field), RelatedField)])
logger.debug('Related tables of sender {0} are {1}'.format(sender, related_tables))
update_model_cache(sender._meta.db_table)
for related_table in related_tables:
update_model_cache(related_table) | Signal receiver for models to invalidate model cache of sender and related models.
Model cache is invalidated by generating new key for each model.
Parameters
~~~~~~~~~~
sender
The model class
instance
The actual instance being saved. |
def invalidate_m2m_cache(sender, instance, model, **kwargs):
logger.debug('Received m2m_changed signals from sender {0}'.format(sender))
update_model_cache(instance._meta.db_table)
update_model_cache(model._meta.db_table) | Signal receiver for models to invalidate model cache for many-to-many relationship.
Parameters
~~~~~~~~~~
sender
The model class
instance
The instance whose many-to-many relation is updated.
model
The class of the objects that are added to, removed from or cleared from the relation. |
def get_params(self):
value = self._get_lookup(self.operator, self.value)
self.params.append(self.value)
return self.params | returns a list |
def get_wheres(self):
self.wheres.append(u"%s %s"
% (lookup_cast(operator) % self.db_field,
self.operator))
return self.wheres | returns a list |
def generate_key(self):
sql = self.sql()
key, created = self.get_or_create_model_key()
if created:
db_table = self.model._meta.db_table
logger.debug('created new key {0} for model {1}'.format(key, db_table))
model_cache_info = ModelCacheInfo(db_table, key)
model_cache_backend.share_model_cache_info(model_cache_info)
query_key = u'{model_key}{qs}{db}'.format(model_key=key,
qs=sql,
db=self.db)
key = hashlib.md5(query_key.encode('utf-8')).hexdigest()
return key | Generate cache key for the current query. If a new key is created for the model it is
then shared with other consumers. |
def sql(self):
clone = self.query.clone()
sql, params = clone.get_compiler(using=self.db).as_sql()
return sql % params | Get sql for the current query. |
def get_or_create_model_key(self):
model_cache_info = model_cache_backend.retrieve_model_cache_info(self.model._meta.db_table)
if not model_cache_info:
return uuid.uuid4().hex, True
return model_cache_info.table_key, False | Get or create key for the model.
Returns
~~~~~~~
(model_key, boolean) tuple |
def invalidate_model_cache(self):
logger.info('Invalidating cache for table {0}'.format(self.model._meta.db_table))
if django.VERSION >= (1, 8):
related_tables = set(
[f.related_model._meta.db_table for f in self.model._meta.get_fields()
if ((f.one_to_many or f.one_to_one) and f.auto_created)
or f.many_to_one or (f.many_to_many and not f.auto_created)])
else:
related_tables = set([rel.model._meta.db_table for rel in self.model._meta.get_all_related_objects()])
# temporary fix for m2m relations with an intermediate model, goes away after better join caching
related_tables |= set([field.rel.to._meta.db_table for field in self.model._meta.fields if issubclass(type(field), RelatedField)])
logger.debug('Related tables of model {0} are {1}'.format(self.model, related_tables))
update_model_cache(self.model._meta.db_table)
for related_table in related_tables:
update_model_cache(related_table) | Invalidate model cache by generating new key for the model. |
def cache_backend(self):
if not hasattr(self, '_cache_backend'):
if hasattr(django.core.cache, 'caches'):
self._cache_backend = django.core.cache.caches[_cache_name]
else:
self._cache_backend = django.core.cache.get_cache(_cache_name)
return self._cache_backend | Get the cache backend
Returns
~~~~~~~
Django cache backend |
def import_file(filename):
pathname, filename = os.path.split(filename)
modname = re.match(
r'(?P<modname>\w+)\.py', filename).group('modname')
file, path, desc = imp.find_module(modname, [pathname])
try:
imp.load_module(modname, file, path, desc)
finally:
file.close() | Import a file that will trigger the population of Orca.
Parameters
----------
filename : str |
def check_is_table(func):
@wraps(func)
def wrapper(**kwargs):
if not orca.is_table(kwargs['table_name']):
abort(404)
return func(**kwargs)
return wrapper | Decorator that will check whether the "table_name" keyword argument
to the wrapped function matches a registered Orca table. |
def check_is_column(func):
@wraps(func)
def wrapper(**kwargs):
table_name = kwargs['table_name']
col_name = kwargs['col_name']
if not orca.is_table(table_name):
abort(404)
if col_name not in orca.get_table(table_name).columns:
abort(404)
return func(**kwargs)
return wrapper | Decorator that will check whether the "table_name" and "col_name"
keyword arguments to the wrapped function match a registered Orca
table and column. |
def check_is_injectable(func):
@wraps(func)
def wrapper(**kwargs):
name = kwargs['inj_name']
if not orca.is_injectable(name):
abort(404)
return func(**kwargs)
return wrapper | Decorator that will check whether the "inj_name" keyword argument to
the wrapped function matches a registered Orca injectable. |
def schema():
tables = orca.list_tables()
cols = {t: orca.get_table(t).columns for t in tables}
steps = orca.list_steps()
injectables = orca.list_injectables()
broadcasts = orca.list_broadcasts()
return jsonify(
tables=tables, columns=cols, steps=steps, injectables=injectables,
broadcasts=broadcasts) | All tables, columns, steps, injectables and broadcasts registered with
Orca. Includes local columns on tables. |
def table_info(table_name):
table = orca.get_table(table_name).to_frame()
buf = StringIO()
table.info(verbose=True, buf=buf)
info = buf.getvalue()
return info, 200, {'Content-Type': 'text/plain'} | Return the text result of table.info(verbose=True). |
def table_preview(table_name):
preview = orca.get_table(table_name).to_frame().head()
return (
preview.to_json(orient='split', date_format='iso'),
200,
{'Content-Type': 'application/json'}) | Returns the first five rows of a table as JSON. Inlcudes all columns.
Uses Pandas' "split" JSON format. |
def table_describe(table_name):
desc = orca.get_table(table_name).to_frame().describe()
return (
desc.to_json(orient='split', date_format='iso'),
200,
{'Content-Type': 'application/json'}) | Return summary statistics of a table as JSON. Includes all columns.
Uses Pandas' "split" JSON format. |
def table_definition(table_name):
if orca.table_type(table_name) == 'dataframe':
return jsonify(type='dataframe')
filename, lineno, source = \
orca.get_raw_table(table_name).func_source_data()
html = highlight(source, PythonLexer(), HtmlFormatter())
return jsonify(
type='function', filename=filename, lineno=lineno, text=source,
html=html) | Get the source of a table function.
If a table is registered DataFrame and not a function then all that is
returned is {'type': 'dataframe'}.
If the table is a registered function then the JSON returned has keys
"type", "filename", "lineno", "text", and "html". "text" is the raw
text of the function, "html" has been marked up by Pygments. |
def table_groupbyagg(table_name):
table = orca.get_table(table_name)
# column to aggregate
column = request.args.get('column', None)
if not column or column not in table.columns:
abort(400)
# column or index level to group by
by = request.args.get('by', None)
level = request.args.get('level', None)
if (not by and not level) or (by and level):
abort(400)
# aggregation type
agg = request.args.get('agg', None)
if not agg or agg not in _GROUPBY_AGG_MAP:
abort(400)
column = table.get_column(column)
# level can either be an integer level number or a string level name.
# try converting to integer, but if that doesn't work
# we go ahead with the string.
if level:
try:
level = int(level)
except ValueError:
pass
gby = column.groupby(level=level)
else:
by = table.get_column(by)
gby = column.groupby(by)
result = _GROUPBY_AGG_MAP[agg](gby)
return (
result.to_json(orient='split', date_format='iso'),
200,
{'Content-Type': 'application/json'}) | Perform a groupby on a table and return an aggregation on a single column.
This depends on some request parameters in the URL.
"column" and "agg" must always be present, and one of "by" or "level"
must be present. "column" is the table column on which aggregation will
be performed, "agg" is the aggregation that will be performed, and
"by"/"level" define how to group the data.
Supported "agg" parameters are: mean, median, std, sum, and size. |
def column_preview(table_name, col_name):
col = orca.get_table(table_name).get_column(col_name).head(10)
return (
col.to_json(orient='split', date_format='iso'),
200,
{'Content-Type': 'application/json'}) | Return the first ten elements of a column as JSON in Pandas'
"split" format. |
def column_definition(table_name, col_name):
col_type = orca.get_table(table_name).column_type(col_name)
if col_type != 'function':
return jsonify(type=col_type)
filename, lineno, source = \
orca.get_raw_column(table_name, col_name).func_source_data()
html = highlight(source, PythonLexer(), HtmlFormatter())
return jsonify(
type='function', filename=filename, lineno=lineno, text=source,
html=html) | Get the source of a column function.
If a column is a registered Series and not a function then all that is
returned is {'type': 'series'}.
If the column is a registered function then the JSON returned has keys
"type", "filename", "lineno", "text", and "html". "text" is the raw
text of the function, "html" has been marked up by Pygments. |
def column_describe(table_name, col_name):
col_desc = orca.get_table(table_name).get_column(col_name).describe()
return (
col_desc.to_json(orient='split'),
200,
{'Content-Type': 'application/json'}) | Return summary statistics of a column as JSON.
Uses Pandas' "split" JSON format. |
def column_csv(table_name, col_name):
csv = orca.get_table(table_name).get_column(col_name).to_csv(path=None)
return csv, 200, {'Content-Type': 'text/csv'} | Return a column as CSV using Pandas' default CSV output. |
def injectable_repr(inj_name):
i = orca.get_injectable(inj_name)
return jsonify(type=str(type(i)), repr=repr(i)) | Returns the type and repr of an injectable. JSON response has
"type" and "repr" keys. |
def injectable_definition(inj_name):
inj_type = orca.injectable_type(inj_name)
if inj_type == 'variable':
return jsonify(type='variable')
else:
filename, lineno, source = \
orca.get_injectable_func_source_data(inj_name)
html = highlight(source, PythonLexer(), HtmlFormatter())
return jsonify(
type='function', filename=filename, lineno=lineno, text=source,
html=html) | Get the source of an injectable function.
If an injectable is a registered Python variable and not a function
then all that is returned is {'type': 'variable'}.
If the column is a registered function then the JSON returned has keys
"type", "filename", "lineno", "text", and "html". "text" is the raw
text of the function, "html" has been marked up by Pygments. |
def list_broadcasts():
casts = [{'cast': b[0], 'onto': b[1]} for b in orca.list_broadcasts()]
return jsonify(broadcasts=casts) | List all registered broadcasts as a list of objects with
keys "cast" and "onto". |
def broadcast_definition(cast_name, onto_name):
if not orca.is_broadcast(cast_name, onto_name):
abort(404)
b = orca.get_broadcast(cast_name, onto_name)
return jsonify(
cast=b.cast, onto=b.onto, cast_on=b.cast_on, onto_on=b.onto_on,
cast_index=b.cast_index, onto_index=b.onto_index) | Return the definition of a broadcast as an object with keys
"cast", "onto", "cast_on", "onto_on", "cast_index", and "onto_index".
These are the same as the arguments to the ``broadcast`` function. |
def step_definition(step_name):
if not orca.is_step(step_name):
abort(404)
filename, lineno, source = \
orca.get_step(step_name).func_source_data()
html = highlight(source, PythonLexer(), HtmlFormatter())
return jsonify(filename=filename, lineno=lineno, text=source, html=html) | Get the source of a step function. Returned object has keys
"filename", "lineno", "text" and "html". "text" is the raw
text of the function, "html" has been marked up by Pygments. |
def _add_log_handler(
handler, level=None, fmt=None, datefmt=None, propagate=None):
if not fmt:
fmt = US_LOG_FMT
if not datefmt:
datefmt = US_LOG_DATE_FMT
handler.setFormatter(logging.Formatter(fmt=fmt, datefmt=datefmt))
if level is not None:
handler.setLevel(level)
logger = logging.getLogger('orca')
logger.addHandler(handler)
if propagate is not None:
logger.propagate = propagate | Add a logging handler to Orca.
Parameters
----------
handler : logging.Handler subclass
level : int, optional
An optional logging level that will apply only to this stream
handler.
fmt : str, optional
An optional format string that will be used for the log
messages.
datefmt : str, optional
An optional format string for formatting dates in the log
messages.
propagate : bool, optional
Whether the Orca logger should propagate. If None the
propagation will not be modified, otherwise it will be set
to this value. |
def log_to_stream(level=None, fmt=None, datefmt=None):
_add_log_handler(
logging.StreamHandler(), fmt=fmt, datefmt=datefmt, propagate=False) | Send log messages to the console.
Parameters
----------
level : int, optional
An optional logging level that will apply only to this stream
handler.
fmt : str, optional
An optional format string that will be used for the log
messages.
datefmt : str, optional
An optional format string for formatting dates in the log
messages. |
def log_to_file(filename, level=None, fmt=None, datefmt=None):
_add_log_handler(
logging.FileHandler(filename), fmt=fmt, datefmt=datefmt) | Send log output to the given file.
Parameters
----------
filename : str
level : int, optional
An optional logging level that will apply only to this stream
handler.
fmt : str, optional
An optional format string that will be used for the log
messages.
datefmt : str, optional
An optional format string for formatting dates in the log
messages. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.