text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Return a list of containers tracked by this environment that are running
<END_TASK>
<USER_TASK:>
Description:
def containers_running(get_container_name):
"""
Return a list of containers tracked by this environment that are running
""" |
running = []
for n in ['web', 'postgres', 'solr', 'datapusher', 'redis']:
info = docker.inspect_container(get_container_name(n))
if info and not info['State']['Running']:
running.append(n + '(halted)')
elif info:
running.append(n)
return running |
<SYSTEM_TASK:>
Gets the names of all of the sites from the datadir and stores them
<END_TASK>
<USER_TASK:>
Description:
def _load_sites(self):
"""
Gets the names of all of the sites from the datadir and stores them
in self.sites. Also returns this list.
""" |
if not self.sites:
self.sites = task.list_sites(self.datadir)
return self.sites |
<SYSTEM_TASK:>
Save environment settings in the directory that need to be saved
<END_TASK>
<USER_TASK:>
Description:
def save_site(self, create=True):
"""
Save environment settings in the directory that need to be saved
even when creating only a new sub-site env.
""" |
self._load_sites()
if create:
self.sites.append(self.site_name)
task.save_new_site(self.site_name, self.sitedir, self.target, self.port,
self.address, self.site_url, self.passwords) |
<SYSTEM_TASK:>
Save environment settings into environment directory, overwriting
<END_TASK>
<USER_TASK:>
Description:
def save(self):
"""
Save environment settings into environment directory, overwriting
any existing configuration and discarding site config
""" |
task.save_new_environment(self.name, self.datadir, self.target,
self.ckan_version, self.deploy_target, self.always_prod) |
<SYSTEM_TASK:>
Return a Environment object with settings for a new project.
<END_TASK>
<USER_TASK:>
Description:
def new(cls, path, ckan_version, site_name, **kwargs):
"""
Return a Environment object with settings for a new project.
No directories or containers are created by this call.
:params path: location for new project directory, may be relative
:params ckan_version: release of CKAN to install
:params site_name: The name of the site to install database and solr \
eventually.
For additional keyword arguments see the __init__ method.
Raises DatcatsError if directories or project with same
name already exits.
""" |
if ckan_version == 'master':
ckan_version = 'latest'
name, datadir, srcdir = task.new_environment_check(path, site_name, ckan_version)
environment = cls(name, srcdir, datadir, site_name, ckan_version, **kwargs)
environment._generate_passwords()
return environment |
<SYSTEM_TASK:>
Return an Environment object based on an existing environnment+site.
<END_TASK>
<USER_TASK:>
Description:
def load(cls, environment_name=None, site_name='primary', data_only=False, allow_old=False):
"""
Return an Environment object based on an existing environnment+site.
:param environment_name: exising environment name, path or None to
look in current or parent directories for project
:param data_only: set to True to only load from data dir, not
the project dir; Used for purging environment data.
:param allow_old: load a very minimal subset of what we usually
load. This will only work for purging environment data on an old site.
Raises DatacatsError if environment can't be found or if there is an
error parsing the environment information.
""" |
srcdir, extension_dir, datadir = task.find_environment_dirs(
environment_name, data_only)
if datadir and data_only:
return cls(environment_name, None, datadir, site_name)
(datadir, name, ckan_version, always_prod, deploy_target,
remote_server_key, extra_containers) = task.load_environment(srcdir, datadir, allow_old)
if not allow_old:
(port, address, site_url, passwords) = task.load_site(srcdir, datadir, site_name)
else:
(port, address, site_url, passwords) = (None, None, None, None)
environment = cls(name, srcdir, datadir, site_name, ckan_version=ckan_version,
port=port, deploy_target=deploy_target, site_url=site_url,
always_prod=always_prod, address=address,
extension_dir=extension_dir,
remote_server_key=remote_server_key,
extra_containers=extra_containers)
if passwords:
environment.passwords = passwords
else:
environment._generate_passwords()
if not allow_old:
environment._load_sites()
return environment |
<SYSTEM_TASK:>
Return True if all the expected datadir files are present
<END_TASK>
<USER_TASK:>
Description:
def data_complete(self):
"""
Return True if all the expected datadir files are present
""" |
return task.data_complete(self.datadir, self.sitedir,
self._get_container_name) |
<SYSTEM_TASK:>
raise a DatacatsError if the datadir or volumes are missing or damaged
<END_TASK>
<USER_TASK:>
Description:
def require_data(self):
"""
raise a DatacatsError if the datadir or volumes are missing or damaged
""" |
files = task.source_missing(self.target)
if files:
raise DatacatsError('Missing files in source directory:\n' +
'\n'.join(files))
if not self.data_exists():
raise DatacatsError('Environment datadir missing. '
'Try "datacats init".')
if not self.data_complete():
raise DatacatsError('Environment datadir damaged or volumes '
'missing. '
'To reset and discard all data use '
'"datacats reset"') |
<SYSTEM_TASK:>
Call once for new projects to create the initial project directories.
<END_TASK>
<USER_TASK:>
Description:
def create_directories(self, create_project_dir=True):
"""
Call once for new projects to create the initial project directories.
""" |
return task.create_directories(self.datadir, self.sitedir,
self.target if create_project_dir else None) |
<SYSTEM_TASK:>
Populate ckan directory from preloaded image and copy
<END_TASK>
<USER_TASK:>
Description:
def create_source(self, datapusher=True):
"""
Populate ckan directory from preloaded image and copy
who.ini and schema.xml info conf directory
""" |
task.create_source(self.target, self._preload_image(), datapusher) |
<SYSTEM_TASK:>
Use make-config to generate an initial development.ini file
<END_TASK>
<USER_TASK:>
Description:
def create_ckan_ini(self):
"""
Use make-config to generate an initial development.ini file
""" |
self.run_command(
command='/scripts/run_as_user.sh /usr/lib/ckan/bin/paster make-config'
' ckan /project/development.ini',
rw_project=True,
ro={scripts.get_script_path('run_as_user.sh'): '/scripts/run_as_user.sh'},
) |
<SYSTEM_TASK:>
Use config-tool to update development.ini with our environment settings
<END_TASK>
<USER_TASK:>
Description:
def update_ckan_ini(self, skin=True):
"""
Use config-tool to update development.ini with our environment settings
:param skin: use environment template skin plugin True/False
""" |
command = [
'/usr/lib/ckan/bin/paster', '--plugin=ckan', 'config-tool',
'/project/development.ini', '-e',
'sqlalchemy.url = postgresql://<hidden>',
'ckan.datastore.read_url = postgresql://<hidden>',
'ckan.datastore.write_url = postgresql://<hidden>',
'ckan.datapusher.url = http://datapusher:8800',
'solr_url = http://solr:8080/solr',
'ckan.storage_path = /var/www/storage',
'ckan.plugins = datastore resource_proxy text_view ' +
('datapusher ' if exists(self.target + '/datapusher') else '')
+ 'recline_grid_view recline_graph_view'
+ (' {0}_theme'.format(self.name) if skin else ''),
'ckan.site_title = ' + self.name,
'ckan.site_logo =',
'ckan.auth.create_user_via_web = false',
]
self.run_command(command=command, rw_project=True) |
<SYSTEM_TASK:>
Create an example ckan extension for this environment and install it
<END_TASK>
<USER_TASK:>
Description:
def create_install_template_skin(self):
"""
Create an example ckan extension for this environment and install it
""" |
ckan_extension_template(self.name, self.target)
self.install_package_develop('ckanext-' + self.name + 'theme') |
<SYSTEM_TASK:>
Run db init to create all ckan tables
<END_TASK>
<USER_TASK:>
Description:
def ckan_db_init(self, retry_seconds=DB_INIT_RETRY_SECONDS):
"""
Run db init to create all ckan tables
:param retry_seconds: how long to retry waiting for db to start
""" |
# XXX workaround for not knowing how long we need to wait
# for postgres to be ready. fix this by changing the postgres
# entrypoint, or possibly running once with command=/bin/true
started = time.time()
while True:
try:
self.run_command(
'/usr/lib/ckan/bin/paster --plugin=ckan db init '
'-c /project/development.ini',
db_links=True,
clean_up=True,
)
break
except WebCommandError:
if started + retry_seconds > time.time():
raise
time.sleep(DB_INIT_RETRY_DELAY) |
<SYSTEM_TASK:>
Start the apache server or paster serve
<END_TASK>
<USER_TASK:>
Description:
def start_ckan(self, production=False, log_syslog=False, paster_reload=True,
interactive=False):
"""
Start the apache server or paster serve
:param log_syslog: A flag to redirect all container logs to host's syslog
:param production: True for apache, False for paster serve + debug on
:param paster_reload: Instruct paster to watch for file changes
""" |
self.stop_ckan()
address = self.address or '127.0.0.1'
port = self.port
# in prod we always use log_syslog driver
log_syslog = True if self.always_prod else log_syslog
production = production or self.always_prod
# We only override the site URL with the docker URL on three conditions
override_site_url = (self.address is None
and not is_boot2docker()
and not self.site_url)
command = ['/scripts/web.sh', str(production), str(override_site_url), str(paster_reload)]
# XXX nasty hack, remove this once we have a lessc command
# for users (not just for building our preload image)
if not production:
css = self.target + '/ckan/ckan/public/base/css'
if not exists(css + '/main.debug.css'):
from shutil import copyfile
copyfile(css + '/main.css', css + '/main.debug.css')
ro = {
self.target: '/project',
scripts.get_script_path('datapusher.sh'): '/scripts/datapusher.sh'
}
if not is_boot2docker():
ro[self.datadir + '/venv'] = '/usr/lib/ckan'
datapusher = self.needs_datapusher()
if datapusher:
run_container(
self._get_container_name('datapusher'),
'datacats/web',
'/scripts/datapusher.sh',
ro=ro,
volumes_from=(self._get_container_name('venv') if is_boot2docker() else None),
log_syslog=log_syslog)
while True:
self._create_run_ini(port, production)
try:
self._run_web_container(port, command, address, log_syslog=log_syslog,
datapusher=datapusher, interactive=interactive)
if not is_boot2docker():
self.address = address
except PortAllocatedError:
port = self._next_port(port)
continue
break |
<SYSTEM_TASK:>
Start web container on port with command
<END_TASK>
<USER_TASK:>
Description:
def _run_web_container(self, port, command, address, log_syslog=False,
datapusher=True, interactive=False):
"""
Start web container on port with command
""" |
if is_boot2docker():
ro = {}
volumes_from = self._get_container_name('venv')
else:
ro = {self.datadir + '/venv': '/usr/lib/ckan'}
volumes_from = None
links = {
self._get_container_name('solr'): 'solr',
self._get_container_name('postgres'): 'db'
}
links.update({self._get_container_name(container): container
for container in self.extra_containers})
if datapusher:
if 'datapusher' not in self.containers_running():
raise DatacatsError(container_logs(self._get_container_name('datapusher'), "all",
False, False))
links[self._get_container_name('datapusher')] = 'datapusher'
ro = dict({
self.target: '/project/',
scripts.get_script_path('web.sh'): '/scripts/web.sh',
scripts.get_script_path('adjust_devini.py'): '/scripts/adjust_devini.py'},
**ro)
rw = {
self.sitedir + '/files': '/var/www/storage',
self.sitedir + '/run/development.ini': '/project/development.ini'
}
try:
if not interactive:
run_container(
name=self._get_container_name('web'),
image='datacats/web',
rw=rw,
ro=ro,
links=links,
volumes_from=volumes_from,
command=command,
port_bindings={
5000: port if is_boot2docker() else (address, port)},
log_syslog=log_syslog
)
else:
# FIXME: share more code with interactive_shell
if is_boot2docker():
switches = ['--volumes-from',
self._get_container_name('pgdata'), '--volumes-from',
self._get_container_name('venv')]
else:
switches = []
switches += ['--volume={}:{}:ro'.format(vol, ro[vol]) for vol in ro]
switches += ['--volume={}:{}'.format(vol, rw[vol]) for vol in rw]
links = ['--link={}:{}'.format(link, links[link]) for link in links]
args = ['docker', 'run', '-it', '--name', self._get_container_name('web'),
'-p', '{}:5000'.format(port) if is_boot2docker()
else '{}:{}:5000'.format(address, port)] + \
switches + links + ['datacats/web', ] + command
subprocess.call(args)
except APIError as e:
if '409' in str(e):
raise DatacatsError('Web container already running. '
'Please stop_web before running.')
else:
raise |
<SYSTEM_TASK:>
Wait for the web server to become available or raise DatacatsError
<END_TASK>
<USER_TASK:>
Description:
def wait_for_web_available(self):
"""
Wait for the web server to become available or raise DatacatsError
if it fails to start.
""" |
try:
if not wait_for_service_available(
self._get_container_name('web'),
self.web_address(),
WEB_START_TIMEOUT_SECONDS):
raise DatacatsError('Error while starting web container:\n' +
container_logs(self._get_container_name('web'), "all",
False, None))
except ServiceTimeout:
raise DatacatsError('Timeout while starting web container. Logs:' +
container_logs(self._get_container_name('web'), "all", False, None)) |
<SYSTEM_TASK:>
Return a port number from 5000-5999 based on the environment name
<END_TASK>
<USER_TASK:>
Description:
def _choose_port(self):
"""
Return a port number from 5000-5999 based on the environment name
to be used as a default when the user hasn't selected one.
""" |
# instead of random let's base it on the name chosen (and the site name)
return 5000 + unpack('Q',
sha((self.name + self.site_name)
.decode('ascii')).digest()[:8])[0] % 1000 |
<SYSTEM_TASK:>
Return another port from the 5000-5999 range
<END_TASK>
<USER_TASK:>
Description:
def _next_port(self, port):
"""
Return another port from the 5000-5999 range
""" |
port = 5000 + (port + 1) % 1000
if port == self.port:
raise DatacatsError('Too many instances running')
return port |
<SYSTEM_TASK:>
Stop and remove the web container
<END_TASK>
<USER_TASK:>
Description:
def stop_ckan(self):
"""
Stop and remove the web container
""" |
remove_container(self._get_container_name('web'), force=True)
remove_container(self._get_container_name('datapusher'), force=True) |
<SYSTEM_TASK:>
return just the port number for the web container, or None if
<END_TASK>
<USER_TASK:>
Description:
def _current_web_port(self):
"""
return just the port number for the web container, or None if
not running
""" |
info = inspect_container(self._get_container_name('web'))
if info is None:
return None
try:
if not info['State']['Running']:
return None
return info['NetworkSettings']['Ports']['5000/tcp'][0]['HostPort']
except TypeError:
return None |
<SYSTEM_TASK:>
Return the url of the web server or None if not running
<END_TASK>
<USER_TASK:>
Description:
def web_address(self):
"""
Return the url of the web server or None if not running
""" |
port = self._current_web_port()
address = self.address or '127.0.0.1'
if port is None:
return None
return 'http://{0}:{1}/'.format(
address if address and not is_boot2docker() else docker_host(),
port) |
<SYSTEM_TASK:>
create 'admin' account with given password
<END_TASK>
<USER_TASK:>
Description:
def create_admin_set_password(self, password):
"""
create 'admin' account with given password
""" |
with open(self.sitedir + '/run/admin.json', 'w') as out:
json.dump({
'name': 'admin',
'email': 'none',
'password': password,
'sysadmin': True},
out)
self.user_run_script(
script=scripts.get_script_path('update_add_admin.sh'),
args=[],
db_links=True,
ro={
self.sitedir + '/run/admin.json': '/input/admin.json'
},
)
remove(self.sitedir + '/run/admin.json') |
<SYSTEM_TASK:>
launch interactive shell session with all writable volumes
<END_TASK>
<USER_TASK:>
Description:
def interactive_shell(self, command=None, paster=False, detach=False):
"""
launch interactive shell session with all writable volumes
:param: list of strings to execute instead of bash
""" |
if not exists(self.target + '/.bash_profile'):
# this file is required for activating the virtualenv
self.create_bash_profile()
if not command:
command = []
use_tty = sys.stdin.isatty() and sys.stdout.isatty()
background = environ.get('CIRCLECI', False) or detach
if is_boot2docker():
venv_volumes = ['--volumes-from', self._get_container_name('venv')]
else:
venv_volumes = ['-v', self.datadir + '/venv:/usr/lib/ckan:rw']
self._create_run_ini(self.port, production=False, output='run.ini')
self._create_run_ini(self.port, production=True, output='test.ini',
source='ckan/test-core.ini', override_site_url=False)
script = scripts.get_script_path('shell.sh')
if paster:
script = scripts.get_script_path('paster.sh')
if command and command != ['help'] and command != ['--help']:
command += ['--config=/project/development.ini']
command = [self.extension_dir] + command
proxy_settings = self._proxy_settings()
if proxy_settings:
venv_volumes += ['-v',
self.sitedir + '/run/proxy-environment:/etc/environment:ro']
links = {self._get_container_name('solr'): 'solr',
self._get_container_name('postgres'): 'db'}
links.update({self._get_container_name(container): container for container
in self.extra_containers})
link_params = []
for link in links:
link_params.append('--link')
link_params.append(link + ':' + links[link])
if 'datapusher' in self.containers_running():
link_params.append('--link')
link_params.append(self._get_container_name('datapusher') + ':datapusher')
# FIXME: consider switching this to dockerpty
# using subprocess for docker client's interactive session
return subprocess.call([
DOCKER_EXE, 'run',
] + (['--rm'] if not background else []) + [
'-t' if use_tty else '',
'-d' if detach else '-i',
] + venv_volumes + [
'-v', self.target + ':/project:rw',
'-v', self.sitedir + '/files:/var/www/storage:rw',
'-v', script + ':/scripts/shell.sh:ro',
'-v', scripts.get_script_path('paster_cd.sh') + ':/scripts/paster_cd.sh:ro',
'-v', self.sitedir + '/run/run.ini:/project/development.ini:ro',
'-v', self.sitedir +
'/run/test.ini:/project/ckan/test-core.ini:ro'] +
link_params +
['--hostname', self.name,
'datacats/web', '/scripts/shell.sh'] + command) |
<SYSTEM_TASK:>
Install from requirements.txt file found in psrc
<END_TASK>
<USER_TASK:>
Description:
def install_package_requirements(self, psrc, stream_output=None):
"""
Install from requirements.txt file found in psrc
:param psrc: name of directory in environment directory
""" |
package = self.target + '/' + psrc
assert isdir(package), package
reqname = '/requirements.txt'
if not exists(package + reqname):
reqname = '/pip-requirements.txt'
if not exists(package + reqname):
return
return self.user_run_script(
script=scripts.get_script_path('install_reqs.sh'),
args=['/project/' + psrc + reqname],
rw_venv=True,
rw_project=True,
stream_output=stream_output
) |
<SYSTEM_TASK:>
Remove uploaded files, postgres db, solr index, venv
<END_TASK>
<USER_TASK:>
Description:
def purge_data(self, which_sites=None, never_delete=False):
"""
Remove uploaded files, postgres db, solr index, venv
""" |
# Default to the set of all sites
if not exists(self.datadir + '/.version'):
format_version = 1
else:
with open(self.datadir + '/.version') as f:
format_version = int(f.read().strip())
if format_version == 1:
print 'WARNING: Defaulting to old purge for version 1.'
datadirs = ['files', 'solr']
if is_boot2docker():
remove_container('datacats_pgdata_{}'.format(self.name))
remove_container('datacats_venv_{}'.format(self.name))
else:
datadirs += ['postgres', 'venv']
web_command(
command=['/scripts/purge.sh']
+ ['/project/data/' + d for d in datadirs],
ro={scripts.get_script_path('purge.sh'): '/scripts/purge.sh'},
rw={self.datadir: '/project/data'},
)
shutil.rmtree(self.datadir)
elif format_version == 2:
if not which_sites:
which_sites = self.sites
datadirs = []
boot2docker = is_boot2docker()
if which_sites:
if self.target:
cp = SafeConfigParser()
cp.read([self.target + '/.datacats-environment'])
for site in which_sites:
if boot2docker:
remove_container(self._get_container_name('pgdata'))
else:
datadirs += [site + '/postgres']
# Always rm the site dir & solr & files
datadirs += [site, site + '/files', site + '/solr']
if self.target:
cp.remove_section('site_' + site)
self.sites.remove(site)
if self.target:
with open(self.target + '/.datacats-environment', 'w') as conf:
cp.write(conf)
datadirs = ['sites/' + datadir for datadir in datadirs]
if not self.sites and not never_delete:
datadirs.append('venv')
web_command(
command=['/scripts/purge.sh']
+ ['/project/data/' + d for d in datadirs],
ro={scripts.get_script_path('purge.sh'): '/scripts/purge.sh'},
rw={self.datadir: '/project/data'},
)
if not self.sites and not never_delete:
shutil.rmtree(self.datadir)
else:
raise DatacatsError('Unknown format version {}'.format(format_version)) |
<SYSTEM_TASK:>
Recompiles less files in an environment.
<END_TASK>
<USER_TASK:>
Description:
def less(environment, opts):
# pylint: disable=unused-argument
"""Recompiles less files in an environment.
Usage:
datacats less [ENVIRONMENT]
ENVIRONMENT may be an environment name or a path to an environment directory.
Default: '.'
""" |
require_extra_image(LESSC_IMAGE)
print 'Converting .less files to .css...'
for log in environment.compile_less():
print log |
<SYSTEM_TASK:>
Decorator applied to a dataset conversion function that converts acquired
<END_TASK>
<USER_TASK:>
Description:
def fetch_and_convert_dataset(source_files, target_filename):
"""
Decorator applied to a dataset conversion function that converts acquired
source files into a dataset file that BatchUp can use.
Parameters
----------
source_file: list of `AbstractSourceFile` instances
A list of files to be acquired
target_filename: str or callable
The name of the target file in which to store the converted data
either as a string or as a function of the form `fn() -> str`
that returns it.
The conversion function is of the form `fn(source_paths, target_path)`.
It should return `target_path` if successful, `None` otherwise.
After the conversion function is successfully applied, the temporary
source files that were downloaded or copied into BatchUp's temporary
directory are deleted, unless the conversion function moved or deleted
them in which case no action is taken.
Example
-------
In this example, we will show how to acquire the USPS dataset from an
online source. USPS is provided as an HDF5 file anyway, so the
conversion function simply moves it to the target path:
>>> import shutil
>>>
>>> _USPS_SRC_ONLINE = DownloadSourceFile(
... filename='usps.h5',
... url='https://github.com/Britefury/usps_dataset/raw/master/'
... 'usps.h5',
... sha256='ba768d9a9b11e79b31c1e40130647c4fc04e6afc1fb41a0d4b9f11'
... '76065482b4'
... )
>>>
>>> @fetch_and_convert_dataset([_USPS_SRC_ONLINE], 'usps.h5')
... def usps_data_online(source_paths, target_path):
... usps_path = source_paths[0]
... # For other datasets, you would convert the data here
... # In this case, we move the file
... shutil.move(usps_path, target_path)
... # Return the target path indicating success
... return target_path
>>>
>>> # Now use it:
>>> usps_path = usps_data_online() #doctest: +SKIP
In this example, the USPS dataset will be acquired from a file on the
filesystem. Note that the source path is fixed; the next example
shows how we can determine the source path dynamically:
>>> _USPS_SRC_OFFLINE_FIXED = CopySourceFile(
... filename='usps.h5',
... source_path='some/path/to/usps.h5',
... sha256='ba768d9a9b11e79b31c1e40130647c4fc04e6afc1fb41a0d4b9f11'
... '76065482b4'
... )
>>>
>>> @fetch_and_convert_dataset([_USPS_SRC_OFFLINE_FIXED], 'usps.h5')
... def usps_data_offline_fixed(source_paths, target_path):
... usps_path = source_paths[0]
... # For other datasets, you would convert the data here
... # In this case, we move the file
... shutil.move(usps_path, target_path)
... # Return the target path indicating success
... return target_path
>>>
>>> # Now use it:
>>> usps_path = usps_data_offline_fixed() #doctest: +SKIP
The source path is provided as an argument to the decorated fetch
function:
>>> _USPS_SRC_OFFLINE_DYNAMIC = CopySourceFile(
... filename='usps.h5',
... arg_name='usps_path',
... sha256='ba768d9a9b11e79b31c1e40130647c4fc04e6afc1fb41a0d4b9f11'
... '76065482b4'
... )
>>>
>>> @fetch_and_convert_dataset([_USPS_SRC_OFFLINE_DYNAMIC], 'usps.h5')
... def usps_data_offline_dynamic(source_paths, target_path):
... usps_path = source_paths[0]
... # For other datasets, you would convert the data here
... # In this case, we move the file
... shutil.move(usps_path, target_path)
... # Return the target path indicating success
... return target_path
>>>
>>> # Now use it (note that the KW-arg `usps_path` is the same
>>> # as the `arg_name` parameter given to `CopySourceFile` above:
>>> usps_path = usps_data_offline_dynamic(
... usps_path=get_config('mypath')) #doctest: +SKIP
""" |
if not isinstance(target_filename, six.string_types) and \
not callable(target_filename):
raise TypeError(
'target_filename must either be a string or be callable (it is '
'a {})'.format(type(target_filename)))
for src in source_files:
if not isinstance(src, AbstractSourceFile):
raise TypeError('source_files should contain'
'`AbstractSourceFile` instances, '
'not {}'.format(type(src)))
def decorate_fetcher(convert_function):
def fetch(**kwargs):
target_fn = path_string(target_filename)
target_path = config.get_data_path(target_fn)
# If the target file does not exist, we need to acquire the
# source files and convert them
if not os.path.exists(target_path):
# Acquire the source files
source_paths = []
for src in source_files:
p = src.acquire(**kwargs)
if p is not None:
if p in source_paths:
raise ValueError(
'Duplicate source file {}'.format(p))
source_paths.append(p)
else:
print('Failed to acquire {}'.format(src))
return None
# Got the source files
# Convert
converted_path = convert_function(source_paths, target_path)
# If successful, delete the source files
if converted_path is not None:
for src in source_files:
src.clean_up()
return converted_path
else:
# Target file already exists
return target_path
fetch.__name__ = convert_function.__name__
return fetch
return decorate_fetcher |
<SYSTEM_TASK:>
Download the file and return its path
<END_TASK>
<USER_TASK:>
Description:
def acquire(self, **kwargs):
"""
Download the file and return its path
Returns
-------
str or None
The path of the file in BatchUp's temporary directory or None if
the download failed.
""" |
return config.download_data(self.temp_filename, self.url,
self.sha256) |
<SYSTEM_TASK:>
Retrieve a result from executing a task. Note that tasks are executed
<END_TASK>
<USER_TASK:>
Description:
def retrieve(self):
"""
Retrieve a result from executing a task. Note that tasks are executed
in order and that if the next task has not yet completed, this call
will block until the result is available.
Returns
-------
A result from the result buffer.
""" |
if len(self.__result_buffer) > 0:
res = self.__result_buffer.popleft()
value = res.get()
else:
return None
self.__populate_buffer()
return value |
<SYSTEM_TASK:>
Install or reinstall Python packages within this environment
<END_TASK>
<USER_TASK:>
Description:
def install(environment, opts):
"""Install or reinstall Python packages within this environment
Usage:
datacats install [-q] [--address=IP] [ENVIRONMENT [PACKAGE ...]]
datacats install -c [q] [--address=IP] [ENVIRONMENT]
Options:
--address=IP The address to bind to when reloading after install
-c --clean Reinstall packages into a clean virtualenv
-q --quiet Do not show output from installing packages and requirements.
ENVIRONMENT may be an environment name or a path to an environment directory.
Default: '.'
""" |
environment.require_data()
install_all(environment, opts['--clean'], verbose=not opts['--quiet'],
packages=opts['PACKAGE'])
for site in environment.sites:
environment = Environment.load(environment.name, site)
if 'web' in environment.containers_running():
# FIXME: reload without changing debug setting?
manage.reload_(environment, {
'--address': opts['--address'],
'--background': False,
'--no-watch': False,
'--production': False,
'PORT': None,
'--syslog': False,
'--site-url': None,
'--interactive': False
}) |
<SYSTEM_TASK:>
Migrate an environment to a given revision of the datadir format.
<END_TASK>
<USER_TASK:>
Description:
def migrate(opts):
"""Migrate an environment to a given revision of the datadir format.
Usage:
datacats migrate [-y] [-r VERSION] [ENVIRONMENT_DIR]
Options:
-r --revision=VERSION The version of the datadir format you want
to convert to [default: 2]
-y --yes Answer yes to all questions.
Defaults to '.' if ENVIRONMENT_DIR isn't specified.
""" |
try:
version = int(opts['--revision'])
except:
raise DatacatsError('--revision parameter must be an integer.')
always_yes = opts['--yes']
if 'ENVIRONMENT_DIR' not in opts or not opts['ENVIRONMENT_DIR']:
cwd = getcwd()
# Get the dirname
opts['ENVIRONMENT_DIR'] = split(cwd if cwd[-1] != '/' else cwd[:-1])[1]
datadir = expanduser('~/.datacats/' + opts['ENVIRONMENT_DIR'])
if needs_format_conversion(datadir, version):
convert_environment(datadir, version, always_yes)
print 'Successfully converted datadir {} to format version {}'.format(datadir, version)
else:
print 'datadir {} is already at version {}.'.format(datadir, version) |
<SYSTEM_TASK:>
Trim the mini-batch `batch` to the size `length`.
<END_TASK>
<USER_TASK:>
Description:
def _trim_batch(batch, length):
"""Trim the mini-batch `batch` to the size `length`.
`batch` can be:
- a NumPy array, in which case it's first axis will be trimmed to size
`length`
- a tuple, in which case `_trim_batch` applied recursively to
each element and the resulting tuple returned
As a consequence, mini-batches can be structured; lists and tuples can
be nested arbitrarily deep.
Parameters
----------
batch: tuple or NumPy array
the mini-batch to trim
length: int
the size to which `batch` is to be trimmed
Returns
-------
tuple or NumPy array of same structure as `batch`
The trimmed mini-batch
""" |
if isinstance(batch, tuple):
return tuple([_trim_batch(b, length) for b in batch])
else:
return batch[:length] |
<SYSTEM_TASK:>
Apply a function to all the samples that are accessed as mini-batches
<END_TASK>
<USER_TASK:>
Description:
def batch_map_concat(func, batch_iter, progress_iter_func=None,
n_batches=None, prepend_args=None):
"""
Apply a function to all the samples that are accessed as mini-batches
obtained from an iterator.
Returns the per-sample results.
The function `func` should return the result for each sample in the
mini-batch as an array. To return multiple results (e.g. loss and errors)
return a tuple of arrays (e.g. `(loss_array, error_array)`)
`batch_iter` must be an iterator that generates mini-batches that
contain samples
Parameters
----------
func: callable `func(*batch) -> results`
The function to call on each mini-batch. Note that the results
must be `None`, a tuple or a NumPy array
batch_iter: data set iterator
Iterator that generates mini-batches of data
progress_iter_func: [optional] callable
`progress_iter_func(iterator, total=total, leave=leave)`
A `tqdm` style function that will be passed the iterator that
generates training batches along with the total number of batches
and `False` for the `leave` parameter. By passing either
`tqdm.tqdm` or `tqdm.tqdm_notebook` as this argument you can have
the training loop display a progress bar.
n_batches: [optional] integer
Process at most this number of batches before returning.
prepend_args: [optional] tuple
Arguments to prepend to the arguments passed to `func`
Returns
-------
tuple
The per-sample sum of the results of the function `func` e.g.
`(batch_A, batch_B, ...)`
Returns an empty tuple if there were 0 samples in the data set.
Examples
--------
In these examples we will demonstrate the use of `batch_map` to apply
a function (e.g. a Theano function that runs on the GPU) to samples
in a data set. We construct an iterator that generates mini-batches from
the data set and pass it to `batch_map` along with the function that
we wish to apply. The function will receive the batches and process them.
Define a function to apply to samples:
>>> def sqr_sum(x):
... # Ensure that we receive batches of the expected size:
... assert len(x) in {5, 2}
... return (x ** 2).sum(axis=1)
Construct data to process and create a data source:
>>> X = np.random.normal(size=(7, 10))
>>> ds = ArrayDataSource([X])
Apply the function defined above:
>>> batch_iter = ds.batch_iterator(batch_size=5)
>>> X_sqr_sum = batch_map_concat(sqr_sum, batch_iter)
>>> assert np.allclose(X_sqr_sum[0], (X ** 2).sum(axis=1))
There are also cases where we wish to limit the number of batches that
will be processed:
- when the iterator generates an infinite number of samples
- when the data set is huge and we wish to show results as we go
Use the `n_batches` argument to limit the number of batches to process:
>>> X_large = np.random.normal(size=(100, 10))
>>> ds_large = ArrayDataSource([X_large])
>>> iter_large = ds_large.batch_iterator(batch_size=5)
>>> for i in range(10):
... partial_result = batch_map_concat(sqr_sum, iter_large, n_batches=2)
... # Should have 10 samples per partial result
... assert len(partial_result[0]) == 10
... j = i * 10
... assert np.allclose(partial_result[0],
... (X_large[j:j + 10]**2).sum(axis=1))
""" |
# Accumulator for results and number of samples
results = []
# If `progress_iter_func` is not `None`, apply it
if progress_iter_func is not None:
batch_iter = progress_iter_func(batch_iter, total=n_batches,
leave=False)
# Apply `func` to each batch
n_processed = 0
for batch in batch_iter:
# Apply on batch and check the type of the results
if prepend_args is not None:
batch_results = func(*(prepend_args + tuple(batch)))
else:
batch_results = func(*batch)
if batch_results is None:
pass
elif isinstance(batch_results, np.ndarray):
batch_results = (batch_results,)
elif isinstance(batch_results, tuple):
pass
else:
raise TypeError(
'Batch function should return a tuple of results, a '
'single result as a NumPy array, or None, '
'not {}'.format(type(batch_results)))
# Accumulate training results
if batch_results is not None:
results.append(batch_results)
n_processed += 1
if n_batches is not None and n_processed >= n_batches:
break
# Concatenate result arrays
if len(results) > 0:
results = zip(*results)
results = tuple([np.concatenate(list(r), axis=0) for r in results])
return results
else:
return None |
<SYSTEM_TASK:>
Apply a function to all the samples that are accessed as mini-batches
<END_TASK>
<USER_TASK:>
Description:
def batch_map_mean(func, batch_iter, progress_iter_func=None, sum_axis=None,
n_batches=None, prepend_args=None):
"""
Apply a function to all the samples that are accessed as mini-batches
obtained from an iterator.
Returns the across-samples mean of the results returned by `func`
The `sum_axis` arguments tells `mean_batch_map` how to process the
results of `func` before accumulating them:
- If `sum_axis` is `None`, `func` should return the
across-samples SUM of the results of operating on the mini-batch the
sum of the values for the samples, e.g. for loss and error it should
return `(sum([loss0, loss1, ... lossN]), sum([err0, err1, ... errN]))`
- Otherwise, `sum_axis` should specify the axis or axes over which
the the batch results should be summed, e.g. if `func` returns a
per-sample loss and error in two arrays
`[[loss0, loss1, ... lossN], [err0, err1, ... errN]`, give `sum_axis`
a value of `0` to sum over axis 0 to get the per-batch loss and error.
These results will be accumulated and divided by the number of samples
at the end to get the mean.
Parameters
----------
func: callable `func(*batch) -> results`
The function to call on each mini-batch. Note that the results
must be `None`, a tuple or a NumPy array
batch_iter: data set iterator
Iterator that generates mini-batches of data
progress_iter_func: [optional] callable
`progress_iter_func(iterator, total=total, leave=leave)`
A `tqdm` style function that will be passed the iterator that
generates training batches along with the total number of batches
and `False` for the `leave` parameter. By passing either
`tqdm.tqdm` or `tqdm.tqdm_notebook` as this argument you can have
the training loop display a progress bar.
sum_axis: (default=`None`) int, tuple of ints or None
If an integer or a tuple of integers, the results returned by `func`
will be summed across this axis / these axes before being accumulated;
e.g. if `func` returns an array of per-sample losses, with axis 0
being the sample dimension, passing a value of `0` as `sum_axis`
will cause these results to be summed along axis 0 to get the
per-batch sum before accumulating the losses. The total summed loss
will be divided by the number of samples at the end in order to
compute the mean loss.
n_batches: [optional] integer that specifies the number of mini-batches
to process before returning
prepend_args: [optional] tuple
Arguments to prepend to the arguments passed to `func`
Returns
-------
tuple
The sum of the results of the function `fn` divided by the number of
samples processed, e.g.
`(sum(outA_per_batch) / n_samples,
sum(outB_per_batch) / n_samples,
...)`
Examples
--------
The following examples will demonstrate the use of `mean_batch_map`
to compute binary cross entropy loss over a data set.
A few variants will be demonstrated:
- the default behaviour in which the function being applied should
return the sum over the batch sample axis
- having the function return per sample results and maving
`mean_batch_map` perform the sum operation. This is easier to
understand but less efficient as a Theano function would have to
move more data back from the GPU.
- limiting the number of batches that will be processed in order to get
partial results when dealing with a large data set
Define a function to compute the per-sample binary cross entropy
loss:
>>> def binary_crossentropy_loss(pred, target):
... e = -target * np.log(pred) - (1 - target) * np.log(1 - pred)
... return e.mean(axis=1)
Now define a function that computes the *SUM* of the binary cross
entropy losses over the sample axis (axis 0), as the default
behaviour of `mean_batch_map` will sum them up and divide by the
number of samples at the end:
>>> def binary_crossentropy_loss_sum(pred, target):
... return binary_crossentropy_loss(pred, target).sum()
Construct prediction and target data
>>> pred = np.random.uniform(0.1, 0.9, size=(7, 10))
>>> tgt = np.random.uniform(0.1, 0.9, size=(7, 10))
>>> ds = ArrayDataSource([pred, tgt])
Apply the loss sum function defined above:
>>> batch_iter = ds.batch_iterator(batch_size=5)
>>> loss = batch_map_mean(binary_crossentropy_loss_sum, batch_iter)
>>> assert np.allclose(
... loss, binary_crossentropy_loss(pred, tgt).mean())
Have `mean_batch_map` sum over axis 0:
>>> batch_iter = ds.batch_iterator(batch_size=5)
>>> loss = batch_map_mean(binary_crossentropy_loss, batch_iter,
... sum_axis=0)
>>> assert np.allclose(
... loss, binary_crossentropy_loss(pred, tgt).mean())
Construct a large data set and use `batch
>>> pred_large = np.random.uniform(0.1, 0.9, size=(100, 10))
>>> tgt_large = np.random.uniform(0.1, 0.9, size=(100, 10))
>>> ds_large = ArrayDataSource([pred_large, tgt_large])
>>> iter_large = ds_large.batch_iterator(batch_size=5)
>>> for i in range(10):
... partial_loss = batch_map_mean(binary_crossentropy_loss_sum,
... iter_large, n_batches=2)
... j = i * 10
... assert np.allclose(
... partial_loss, binary_crossentropy_loss(
... pred_large[j:j + 10], tgt_large[j:j + 10]).mean())
""" |
# Accumulator for results and number of samples
results_accum = None
n_samples_accum = 0
# If `progress_iter_func` is not `None`, apply it
if progress_iter_func is not None:
batch_iter = progress_iter_func(batch_iter, total=n_batches,
leave=False)
# Train on each batch
n_processed = 0
for batch in batch_iter:
# Get number of samples in batch; can vary
batch_n = _length_of_batch(batch)
# Apply on batch and check the type of the results
if prepend_args is not None:
batch_results = func(*(prepend_args + tuple(batch)))
else:
batch_results = func(*batch)
if batch_results is None:
pass
elif isinstance(batch_results, (np.ndarray, float)):
batch_results = (batch_results,)
elif isinstance(batch_results, tuple):
pass
else:
raise TypeError(
'Batch function should return a tuple of results, a '
'single result as a NumPy array or float, or None, '
'not {}'.format(type(batch_results)))
# Accumulate results and number of samples
if results_accum is None:
# Initialise the accumulator to the batch results if `func`
# returns summed results or if it returned None;
# don't attempt to iterate over None and sum each item
if batch_results is None:
pass
elif sum_axis is None:
results_accum = list(batch_results)
else:
results_accum = [br.sum(axis=sum_axis) for br in batch_results]
else:
if batch_results is not None:
for i in range(len(results_accum)):
br = batch_results[i]
if sum_axis is not None:
br = br.sum(axis=sum_axis)
results_accum[i] += br
n_samples_accum += batch_n
n_processed += 1
if n_batches is not None and n_processed >= n_batches:
break
# Divide by the number of training examples used to compute mean
if results_accum is not None:
results_accum = tuple([np.array(r).astype(float) / n_samples_accum
for r in results_accum])
return results_accum |
<SYSTEM_TASK:>
Helper function to coerce an object into a data source, selecting the
<END_TASK>
<USER_TASK:>
Description:
def coerce_data_source(x):
"""
Helper function to coerce an object into a data source, selecting the
appropriate data source class for the given object. If `x` is already
a data source it is returned as is.
Parameters
----------
x: any
The object to coerce. If `x` is a data source, it is returned as is.
If it is a list or tuple of array-like objects they will be wrapped
in an `ArrayDataSource` that will be returned. If `x` is an iterator
it will be wrapped in an `IteratorDataSource`. If it is a callable
it will be wrapped in a `CallableDataSource`.
Returns
-------
`x` coerced into a data source
Raises
------
`TypeError` if `x` is not a data souce, a list or tuple of array-like
objects, an iterator or a callable.
""" |
if isinstance(x, AbstractDataSource):
return x
elif isinstance(x, (list, tuple)):
# Sequence of array-likes
items = []
for item in x:
if _is_array_like(item):
items.append(item)
else:
raise TypeError(
'Cannot convert x to a data source; x is a sequence and '
'one of the elements is not an array-like object, rather '
'a {}'.format(type(item)))
if len(items) == 0:
raise ValueError('Cannot convert x to a data source; x is an '
'empty sequence')
return ArrayDataSource(items)
elif isinstance(x, collections.Iterator):
return IteratorDataSource(x)
elif callable(x):
return CallableDataSource(x)
else:
raise TypeError('Cannot convert x to a data source; can only handle '
'iterators, callables, non-empty sequences of '
'array-like objects; cannot '
'handle {}'.format(type(x))) |
<SYSTEM_TASK:>
A batch oriented implementation of `map`.
<END_TASK>
<USER_TASK:>
Description:
def batch_map_concat(self, func, batch_size, progress_iter_func=None,
n_batches=None, prepend_args=None, **kwargs):
"""A batch oriented implementation of `map`.
Applies a function to all the samples in this data source by breaking
the data into mini-batches and applying the function to each
mini-batch.
Returns the per-sample results.
This method is a wrapper around the :func:`batch_map` function;
please see its documentation for more information and examples.
The function `func` should return the result for each sample in the
mini-batch as an array. To return multiple results (e.g. loss and
errors) return a tuple of arrays (e.g. `(loss_array, error_array)`)
Parameters
----------
func: callable `func(*batch) -> results`
The function to call on each mini-batch. Note that the results
must be `None`, a tuple or a NumPy array
batch_size: int
The mini-batch size
progress_iter_func: [optional] callable
`progress_iter_func(iterator, total=total, leave=leave)`
A `tqdm` style function that will be passed the iterator that
generates training batches along with the total number of batches
and `False` for the `leave` parameter. By passing either
`tqdm.tqdm` or `tqdm.tqdm_notebook` as this argument you can have
the training loop display a progress bar.
n_batches: [optional] integer that specifies the number of mini-batches
to process before returning
prepend_args: [optional] tuple
Arguments to prepend to the arguments passed to `func`
Returns
-------
tuple
The per-sample sum of the results of the function `func` e.g.
`(batch_A, batch_B, ...)`
Returns an empty tuple if there were 0 samples in the data set.
Examples
--------
Define a function to apply to samples:
>>> def sqr_sum(x):
... return (x ** 2).sum(axis=1)
Construct data to process and create a data source:
>>> X = np.random.normal(size=(7, 10))
>>> ds = ArrayDataSource([X])
Apply the function defined above:
>>> X_sqr_sum = ds.batch_map_concat(sqr_sum, batch_size=5)
>>> assert (X_sqr_sum[0] == (X ** 2).sum(axis=1)).all()
""" |
if n_batches is None:
n = self.num_samples(**kwargs)
if n == np.inf:
raise ValueError('Data set has infinite size or sampler will '
'generate infinite samples but no n_batches '
'limit specified')
elif n is not None:
n_batches = sampling.num_batches(n, batch_size)
batch_iter = self.batch_iterator(batch_size, **kwargs)
return batch_map_concat(func, batch_iter, progress_iter_func,
n_batches, prepend_args) |
<SYSTEM_TASK:>
Create an iterator that generates mini-batch sample indices.
<END_TASK>
<USER_TASK:>
Description:
def batch_indices_iterator(self, batch_size, shuffle=None, **kwargs):
"""
Create an iterator that generates mini-batch sample indices.
The batches will have `batch_size` elements, with the exception
of the final batch which will have less if there are insufficient
elements left to make a complete batch.
If `shuffle` is `None` or `False` elements will be extracted in
order. If it is a `numpy.random.RandomState`, it will be used to
randomise the order in which elements are extracted from the data.
If it is `True`, NumPy's default random number generator will be
use to shuffle elements.
If an array of indices was provided to the constructor, the subset of
samples identified in that array is used, rather than the complete
set of samples.
The generated mini-batches indices take the form of 1D NumPy integer
arrays.
Parameters
----------
batch_size: int
Mini-batch size
shuffle: `numpy.random.RandomState` or `True` or `None`
Used to randomise element order. If `None`, elements will be
extracted in order. If it is a `RandomState` instance, that
RNG will be used to shuffle elements. If it is `True`, NumPy's
default RNG will be used.
Returns
-------
iterator
An iterator that generates mini-batches in the form of 1D NumPy
integer arrays.
""" |
shuffle_rng = self._get_shuffle_rng(shuffle)
if shuffle_rng is not None:
return self.sampler.shuffled_indices_batch_iterator(
batch_size, shuffle_rng)
else:
return self.sampler.in_order_indices_batch_iterator(batch_size) |
<SYSTEM_TASK:>
Create an iterator that generates mini-batches extracted from
<END_TASK>
<USER_TASK:>
Description:
def batch_iterator(self, batch_size, shuffle=None, **kwargs):
"""
Create an iterator that generates mini-batches extracted from
this data source. The batches will have `batch_size` elements, with
the exception of the final batch which will have less if there are
insufficient elements left to make a complete batch.
If `shuffle` is `None` or `False` elements will be extracted in
order. If it is a `numpy.random.RandomState`, it will be used to
randomise the order in which elements are extracted from the data.
If it is `True`, NumPy's default random number generator will be
use to shuffle elements.
If an array of indices was provided to the constructor, the subset of
samples identified in that array is used, rather than the complete
set of samples.
The generated mini-batches take the form `[batch_x, batch_y, ...]`.
Parameters
----------
batch_size: int
Mini-batch size
shuffle: `numpy.random.RandomState` or `True` or `None`
Used to randomise element order. If `None`, elements will be
extracted in order. If it is a `RandomState` instance, that
RNG will be used to shuffle elements. If it is `True`, NumPy's
default RNG will be used.
Returns
-------
iterator
An iterator that generates items of type `[batch_x, batch_y, ...]`
where `batch_x`, `batch_y`, etc are themselves arrays.
""" |
for batch_ndx in self.batch_indices_iterator(
batch_size, shuffle=shuffle, **kwargs):
yield self.samples_by_indices_nomapping(batch_ndx) |
<SYSTEM_TASK:>
Get the number of samples in this data source.
<END_TASK>
<USER_TASK:>
Description:
def num_samples(self, **kwargs):
"""
Get the number of samples in this data source.
Returns
-------
int, `np.inf` or `None`.
An int if the number of samples is known, `np.inf` if it is
infinite or `None` if the number of samples is unknown.
""" |
if self.num_samples_fn is None:
return None
elif callable(self.num_samples_fn):
return self.num_samples_fn(**kwargs)
else:
return self.num_samples_fn |
<SYSTEM_TASK:>
Gather a batch of samples by indices, applying any index
<END_TASK>
<USER_TASK:>
Description:
def samples_by_indices(self, indices):
"""
Gather a batch of samples by indices, applying any index
mapping defined by the underlying data sources.
Parameters
----------
indices: 1D-array of ints or slice
An index array or a slice that selects the samples to retrieve
Returns
-------
nested list of arrays
A mini-batch
""" |
if not self._random_access:
raise TypeError('samples_by_indices method not supported as one '
'or more of the underlying data sources does '
'not support random access')
batch = self.source.samples_by_indices(indices)
return self.fn(*batch) |
<SYSTEM_TASK:>
Purge environment database and uploaded files
<END_TASK>
<USER_TASK:>
Description:
def purge(opts):
"""Purge environment database and uploaded files
Usage:
datacats purge [-s NAME | --delete-environment] [-y] [ENVIRONMENT]
Options:
--delete-environment Delete environment directory as well as its data, as
well as the data for **all** sites.
-s --site=NAME Specify a site to be purge [default: primary]
-y --yes Respond yes to all prompts (i.e. force)
ENVIRONMENT may be an environment name or a path to an environment directory.
Default: '.'
""" |
old = False
try:
environment = Environment.load(opts['ENVIRONMENT'], opts['--site'])
except DatacatsError:
environment = Environment.load(opts['ENVIRONMENT'], opts['--site'], data_only=True)
if get_format_version(environment.datadir) == 1:
old = True
environment = Environment.load(opts['ENVIRONMENT'], opts['--site'], allow_old=True)
# We need a valid site if they don't want to blow away everything.
if not opts['--delete-environment'] and not old:
environment.require_valid_site()
sites = [opts['--site']] if not opts['--delete-environment'] else environment.sites
if not opts['--yes']:
y_or_n_prompt('datacats purge will delete all stored data')
environment.stop_ckan()
environment.stop_supporting_containers()
environment.purge_data(sites)
if opts['--delete-environment']:
if environment.target:
rmtree(environment.target)
else:
DatacatsError(("Unable to find the environment source"
" directory so that it can be deleted.\n"
"Chances are it's because it already does not exist")) |
<SYSTEM_TASK:>
Print the error message to stdout with colors and borders
<END_TASK>
<USER_TASK:>
Description:
def pretty_print(self):
"""
Print the error message to stdout with colors and borders
""" |
print colored.blue("-" * 40)
print colored.red("datacats: problem was encountered:")
print self.message
print colored.blue("-" * 40) |
<SYSTEM_TASK:>
Return a 16-character alphanumeric random string generated by the
<END_TASK>
<USER_TASK:>
Description:
def generate_password():
"""
Return a 16-character alphanumeric random string generated by the
operating system's secure pseudo random number generator
""" |
chars = uppercase + lowercase + digits
return ''.join(SystemRandom().choice(chars) for x in xrange(16)) |
<SYSTEM_TASK:>
This method calls to docker-machine on the command line and
<END_TASK>
<USER_TASK:>
Description:
def _machine_check_connectivity():
"""
This method calls to docker-machine on the command line and
makes sure that it is up and ready.
Potential improvements to be made:
- Support multiple machine names (run a `docker-machine ls` and then
see which machines are active. Use a priority list)
""" |
with open(devnull, 'w') as devnull_f:
try:
status = subprocess.check_output(
['docker-machine', 'status', 'dev'],
stderr=devnull_f).strip()
if status == 'Stopped':
raise DatacatsError('Please start your docker-machine '
'VM with "docker-machine start dev"')
# XXX HACK: This exists because of
# http://github.com/datacats/datacats/issues/63,
# as a temporary fix.
if 'tls' in _docker_kwargs:
# It will print out messages to the user otherwise.
_docker_kwargs['tls'].assert_hostname = False
except subprocess.CalledProcessError:
raise DatacatsError('Please create a docker-machine with '
'"docker-machine start dev"') |
<SYSTEM_TASK:>
Run a single command in a web image optionally preloaded with the ckan
<END_TASK>
<USER_TASK:>
Description:
def web_command(command, ro=None, rw=None, links=None,
image='datacats/web', volumes_from=None, commit=False,
clean_up=False, stream_output=None, entrypoint=None):
"""
Run a single command in a web image optionally preloaded with the ckan
source and virtual envrionment.
:param command: command to execute
:param ro: {localdir: binddir} dict for read-only volumes
:param rw: {localdir: binddir} dict for read-write volumes
:param links: links passed to start
:param image: docker image name to use
:param volumes_from:
:param commit: True to create a new image based on result
:param clean_up: True to remove container even on error
:param stream_output: file to write stderr+stdout from command
:param entrypoint: override entrypoint (script that runs command)
:returns: image id if commit=True
""" |
binds = ro_rw_to_binds(ro, rw)
c = _get_docker().create_container(
image=image,
command=command,
volumes=binds_to_volumes(binds),
detach=False,
host_config=_get_docker().create_host_config(binds=binds, volumes_from=volumes_from, links=links),
entrypoint=entrypoint)
_get_docker().start(
container=c['Id'],
)
if stream_output:
for output in _get_docker().attach(
c['Id'], stdout=True, stderr=True, stream=True):
stream_output.write(output)
if _get_docker().wait(c['Id']):
# Before the (potential) cleanup, grab the logs!
logs = _get_docker().logs(c['Id'])
if clean_up:
remove_container(c['Id'])
raise WebCommandError(command, c['Id'][:12], logs)
if commit:
rval = _get_docker().commit(c['Id'])
if not remove_container(c['Id']):
# circle ci doesn't let us remove containers, quiet the warnings
if not environ.get('CIRCLECI', False):
warn('failed to remove container: {0}'.format(c['Id']))
if commit:
return rval['Id'] |
<SYSTEM_TASK:>
Wrapper for docker create_container, start calls
<END_TASK>
<USER_TASK:>
Description:
def run_container(name, image, command=None, environment=None,
ro=None, rw=None, links=None, detach=True, volumes_from=None,
port_bindings=None, log_syslog=False):
"""
Wrapper for docker create_container, start calls
:param log_syslog: bool flag to redirect container's logs to host's syslog
:returns: container info dict or None if container couldn't be created
Raises PortAllocatedError if container couldn't start on the
requested port.
""" |
binds = ro_rw_to_binds(ro, rw)
log_config = LogConfig(type=LogConfig.types.JSON)
if log_syslog:
log_config = LogConfig(
type=LogConfig.types.SYSLOG,
config={'syslog-tag': name})
host_config = _get_docker().create_host_config(binds=binds, log_config=log_config, links=links, volumes_from=volumes_from, port_bindings=port_bindings)
c = _get_docker().create_container(
name=name,
image=image,
command=command,
environment=environment,
volumes=binds_to_volumes(binds),
detach=detach,
stdin_open=False,
tty=False,
ports=list(port_bindings) if port_bindings else None,
host_config=host_config)
try:
_get_docker().start(
container=c['Id'],
)
except APIError as e:
if 'address already in use' in e.explanation:
try:
_get_docker().remove_container(name, force=True)
except APIError:
pass
raise PortAllocatedError()
raise
return c |
<SYSTEM_TASK:>
Wrapper for docker remove_container
<END_TASK>
<USER_TASK:>
Description:
def remove_container(name, force=False):
"""
Wrapper for docker remove_container
:returns: True if container was found and removed
""" |
try:
if not force:
_get_docker().stop(name)
except APIError:
pass
try:
_get_docker().remove_container(name, force=True)
return True
except APIError:
return False |
<SYSTEM_TASK:>
Wrapper for docker logs, attach commands.
<END_TASK>
<USER_TASK:>
Description:
def container_logs(name, tail, follow, timestamps):
"""
Wrapper for docker logs, attach commands.
""" |
if follow:
return _get_docker().attach(
name,
stdout=True,
stderr=True,
stream=True
)
return _docker.logs(
name,
stdout=True,
stderr=True,
tail=tail,
timestamps=timestamps,
) |
<SYSTEM_TASK:>
Returns a string representation of the logs from a container.
<END_TASK>
<USER_TASK:>
Description:
def collect_logs(name):
"""
Returns a string representation of the logs from a container.
This is similar to container_logs but uses the `follow` option
and flattens the logs into a string instead of a generator.
:param name: The container name to grab logs for
:return: A string representation of the logs
""" |
logs = container_logs(name, "all", True, None)
string = ""
for s in logs:
string += s
return string |
<SYSTEM_TASK:>
create "data-only container" if it doesn't already exist.
<END_TASK>
<USER_TASK:>
Description:
def data_only_container(name, volumes):
"""
create "data-only container" if it doesn't already exist.
We'd like to avoid these, but postgres + boot2docker make
it difficult, see issue #5
""" |
info = inspect_container(name)
if info:
return
c = _get_docker().create_container(
name=name,
image='datacats/postgres', # any image will do
command='true',
volumes=volumes,
detach=True)
return c |
<SYSTEM_TASK:>
The main entry point for datacats cli tool
<END_TASK>
<USER_TASK:>
Description:
def main():
"""
The main entry point for datacats cli tool
(as defined in setup.py's entry_points)
It parses the cli arguments for corresponding options
and runs the corresponding command
""" |
# pylint: disable=bare-except
try:
command_fn, opts = _parse_arguments(sys.argv[1:])
# purge handles loading differently
# 1 - Bail and just call the command if it doesn't have ENVIRONMENT.
if command_fn == purge.purge or 'ENVIRONMENT' not in opts:
return command_fn(opts)
environment = Environment.load(
opts['ENVIRONMENT'] or '.',
opts['--site'] if '--site' in opts else 'primary')
if command_fn not in COMMANDS_THAT_USE_SSH:
return command_fn(environment, opts)
# for commands that communicate with a remote server
# we load UserProfile and test our communication
user_profile = UserProfile()
user_profile.test_ssh_key(environment)
return command_fn(environment, opts, user_profile)
except DatacatsError as e:
_error_exit(e)
except SystemExit:
raise
except:
exc_info = "\n".join([line.rstrip()
for line in traceback.format_exception(*sys.exc_info())])
user_message = ("Something that should not"
" have happened happened when attempting"
" to run this command:\n"
" datacats {args}\n\n"
"It is seems to be a bug.\n"
"Please report this issue to us by"
" creating an issue ticket at\n\n"
" https://github.com/datacats/datacats/issues\n\n"
"so that we would be able to look into that "
"and fix the issue."
).format(args=" ".join(sys.argv[1:]))
_error_exit(DatacatsError(user_message,
parent_exception=UndocumentedError(exc_info))) |
<SYSTEM_TASK:>
Create containers and start serving environment
<END_TASK>
<USER_TASK:>
Description:
def start(environment, opts):
"""Create containers and start serving environment
Usage:
datacats start [-b] [--site-url SITE_URL] [-p|--no-watch] [-s NAME]
[-i] [--syslog] [--address=IP] [ENVIRONMENT [PORT]]
datacats start -r [-b] [--site-url SITE_URL] [-s NAME] [--syslog]
[-i] [--address=IP] [ENVIRONMENT]
Options:
--address=IP Address to listen on (Linux-only)
-b --background Don't wait for response from web server
--no-watch Do not automatically reload templates and .py files on change
-i --interactive Calls out to docker via the command line, allowing
for interactivity with the web image.
-p --production Start with apache and debug=false
-s --site=NAME Specify a site to start [default: primary]
--syslog Log to the syslog
--site-url SITE_URL The site_url to use in API responses. Defaults to old setting or
will attempt to determine it. (e.g. http://example.org:{port}/)
ENVIRONMENT may be an environment name or a path to an environment directory.
Default: '.'
""" |
environment.require_data()
if environment.fully_running():
print 'Already running at {0}'.format(environment.web_address())
return
reload_(environment, opts) |
<SYSTEM_TASK:>
Reload environment source and configuration
<END_TASK>
<USER_TASK:>
Description:
def reload_(environment, opts):
"""Reload environment source and configuration
Usage:
datacats reload [-b] [-p|--no-watch] [--syslog] [-s NAME] [--site-url=SITE_URL]
[-i] [--address=IP] [ENVIRONMENT [PORT]]
datacats reload -r [-b] [--syslog] [-s NAME] [--address=IP] [--site-url=SITE_URL]
[-i] [ENVIRONMENT]
Options:
--address=IP Address to listen on (Linux-only)
-i --interactive Calls out to docker via the command line, allowing
for interactivity with the web image.
--site-url=SITE_URL The site_url to use in API responses. Can use Python template syntax
to insert the port and address (e.g. http://example.org:{port}/)
-b --background Don't wait for response from web server
--no-watch Do not automatically reload templates and .py files on change
-p --production Reload with apache and debug=false
-s --site=NAME Specify a site to reload [default: primary]
--syslog Log to the syslog
ENVIRONMENT may be an environment name or a path to an environment directory.
Default: '.'
""" |
if opts['--interactive']:
# We can't wait for the server if we're tty'd
opts['--background'] = True
if opts['--address'] and is_boot2docker():
raise DatacatsError('Cannot specify address on boot2docker.')
environment.require_data()
environment.stop_ckan()
if opts['PORT'] or opts['--address'] or opts['--site-url']:
if opts['PORT']:
environment.port = int(opts['PORT'])
if opts['--address']:
environment.address = opts['--address']
if opts['--site-url']:
site_url = opts['--site-url']
# TODO: Check it against a regex or use urlparse
try:
site_url = site_url.format(address=environment.address, port=environment.port)
environment.site_url = site_url
environment.save_site(False)
except (KeyError, IndexError, ValueError) as e:
raise DatacatsError('Could not parse site_url: {}'.format(e))
environment.save()
for container in environment.extra_containers:
require_extra_image(EXTRA_IMAGE_MAPPING[container])
environment.stop_supporting_containers()
environment.start_supporting_containers()
environment.start_ckan(
production=opts['--production'],
paster_reload=not opts['--no-watch'],
log_syslog=opts['--syslog'],
interactive=opts['--interactive'])
write('Starting web server at {0} ...'.format(environment.web_address()))
if opts['--background']:
write('\n')
return
try:
environment.wait_for_web_available()
finally:
write('\n') |
<SYSTEM_TASK:>
Display information about environment and running containers
<END_TASK>
<USER_TASK:>
Description:
def info(environment, opts):
"""Display information about environment and running containers
Usage:
datacats info [-qr] [ENVIRONMENT]
Options:
-q --quiet Echo only the web URL or nothing if not running
ENVIRONMENT may be an environment name or a path to an environment directory.
Default: '.'
""" |
damaged = False
sites = environment.sites
if not environment.sites:
sites = []
damaged = True
if opts['--quiet']:
if damaged:
raise DatacatsError('Damaged datadir: cannot get address.')
for site in sites:
environment.site_name = site
print '{}: {}'.format(site, environment.web_address())
return
datadir = environment.datadir
if not environment.data_exists():
datadir = ''
elif damaged:
datadir += ' (damaged)'
print 'Environment name: ' + environment.name
print ' Environment dir: ' + environment.target
print ' Data dir: ' + datadir
print ' Sites: ' + ' '.join(environment.sites)
for site in environment.sites:
print
environment.site_name = site
print ' Site: ' + site
print ' Containers: ' + ' '.join(environment.containers_running())
sitedir = environment.sitedir + (' (damaged)' if not environment.data_complete() else '')
print ' Site dir: ' + sitedir
addr = environment.web_address()
if addr:
print ' Available at: ' + addr |
<SYSTEM_TASK:>
Display or follow container logs
<END_TASK>
<USER_TASK:>
Description:
def logs(environment, opts):
"""Display or follow container logs
Usage:
datacats logs [--postgres | --solr | --datapusher] [-s NAME] [-tr] [--tail=LINES] [ENVIRONMENT]
datacats logs -f [--postgres | --solr | --datapusher] [-s NAME] [-r] [ENVIRONMENT]
Options:
--datapusher Show logs for datapusher instead of web logs
--postgres Show postgres database logs instead of web logs
-f --follow Follow logs instead of exiting immediately
--solr Show solr search logs instead of web logs
-t --timestamps Add timestamps to log lines
-s --site=NAME Specify a site for logs if needed [default: primary]
--tail=LINES Number of lines to show [default: all]
ENVIRONMENT may be an environment name or a path to an environment directory.
Default: '.'
""" |
container = 'web'
if opts['--solr']:
container = 'solr'
if opts['--postgres']:
container = 'postgres'
if opts['--datapusher']:
container = 'datapusher'
tail = opts['--tail']
if tail != 'all':
tail = int(tail)
l = environment.logs(container, tail, opts['--follow'],
opts['--timestamps'])
if not opts['--follow']:
print l
return
try:
for message in l:
write(message)
except KeyboardInterrupt:
print |
<SYSTEM_TASK:>
Open web browser window to this environment
<END_TASK>
<USER_TASK:>
Description:
def open_(environment, opts):
# pylint: disable=unused-argument
"""Open web browser window to this environment
Usage:
datacats open [-r] [-s NAME] [ENVIRONMENT]
Options:
-s --site=NAME Choose a site to open [default: primary]
ENVIRONMENT may be an environment name or a path to an environment directory.
Default: '.'
""" |
environment.require_data()
addr = environment.web_address()
if not addr:
print "Site not currently running"
else:
webbrowser.open(addr) |
<SYSTEM_TASK:>
Commands operating on environment data
<END_TASK>
<USER_TASK:>
Description:
def tweak(environment, opts):
"""Commands operating on environment data
Usage:
datacats tweak --install-postgis [ENVIRONMENT]
datacats tweak --add-redis [ENVIRONMENT]
datacats tweak --admin-password [ENVIRONMENT]
Options:
--install-postgis Install postgis in ckan database
--add-redis Adds redis next time this environment reloads
-s --site=NAME Choose a site to tweak [default: primary]
-p --admin-password Prompt to change the admin password
ENVIRONMENT may be an environment name or a path to an environment directory.
Default: '.'
""" |
environment.require_data()
if opts['--install-postgis']:
print "Installing postgis"
environment.install_postgis_sql()
if opts['--add-redis']:
# Let the user know if they are trying to add it and it is already there
print ('Adding redis extra container... Please note that you will have '
'to reload your environment for these changes to take effect ("datacats reload {}")'
.format(environment.name))
environment.add_extra_container('redis', error_on_exists=True)
if opts['--admin-password']:
environment.create_admin_set_password(confirm_password()) |
<SYSTEM_TASK:>
Fetch the history of a flight by its number.
<END_TASK>
<USER_TASK:>
Description:
def get_history_by_flight_number(self, flight_number, page=1, limit=100):
"""Fetch the history of a flight by its number.
This method can be used to get the history of a flight route by the number.
It checks the user authentication and returns the data accordingly.
Args:
flight_number (str): The flight number, e.g. AI101
page (int): Optional page number; for users who are on a plan with flightradar24 they can pass in higher page numbers to get more data
limit (int): Optional limit on number of records returned
Returns:
A list of dicts with the data; one dict for each row of data from flightradar24
Example::
from pyflightdata import FlightData
f=FlightData()
#optional login
f.login(myemail,mypassword)
f.get_history_by_flight_number('AI101')
f.get_history_by_flight_number('AI101',page=1,limit=10)
""" |
url = FLT_BASE.format(flight_number, str(self.AUTH_TOKEN), page, limit)
return self._fr24.get_data(url) |
<SYSTEM_TASK:>
Fetch the history of a particular aircraft by its tail number.
<END_TASK>
<USER_TASK:>
Description:
def get_history_by_tail_number(self, tail_number, page=1, limit=100):
"""Fetch the history of a particular aircraft by its tail number.
This method can be used to get the history of a particular aircraft by its tail number.
It checks the user authentication and returns the data accordingly.
Args:
tail_number (str): The tail number, e.g. VT-ANL
page (int): Optional page number; for users who are on a plan with flightradar24 they can pass in higher page numbers to get more data
limit (int): Optional limit on number of records returned
Returns:
A list of dicts with the data; one dict for each row of data from flightradar24
Example::
from pyflightdata import FlightData
f=FlightData()
#optional login
f.login(myemail,mypassword)
f.get_history_by_flight_number('VT-ANL')
f.get_history_by_flight_number('VT-ANL',page=1,limit=10)
""" |
url = REG_BASE.format(tail_number, str(self.AUTH_TOKEN), page, limit)
return self._fr24.get_data(url, True) |
<SYSTEM_TASK:>
Returns a list of all the airports
<END_TASK>
<USER_TASK:>
Description:
def get_airports(self, country):
"""Returns a list of all the airports
For a given country this returns a list of dicts, one for each airport, with information like the iata code of the airport etc
Args:
country (str): The country for which the airports will be fetched
Example::
from pyflightdata import FlightData
f=FlightData()
f.get_airports('India')
""" |
url = AIRPORT_BASE.format(country.replace(" ", "-"))
return self._fr24.get_airports_data(url) |
<SYSTEM_TASK:>
Fetch the details of a particular aircraft by its tail number.
<END_TASK>
<USER_TASK:>
Description:
def get_info_by_tail_number(self, tail_number, page=1, limit=100):
"""Fetch the details of a particular aircraft by its tail number.
This method can be used to get the details of a particular aircraft by its tail number.
Details include the serial number, age etc along with links to the images of the aircraft.
It checks the user authentication and returns the data accordingly.
Args:
tail_number (str): The tail number, e.g. VT-ANL
page (int): Optional page number; for users who are on a plan with flightradar24 they can pass in higher page numbers to get more data
limit (int): Optional limit on number of records returned
Returns:
A list of dicts with the data; one dict for each row of data from flightradar24
Example::
from pyflightdata import FlightData
f=FlightData()
#optional login
f.login(myemail,mypassword)
f.get_info_by_flight_number('VT-ANL')
f.get_info_by_flight_number('VT-ANL',page=1,limit=10)
""" |
url = REG_BASE.format(tail_number, str(self.AUTH_TOKEN), page, limit)
return self._fr24.get_aircraft_data(url) |
<SYSTEM_TASK:>
Get the fleet for a particular airline.
<END_TASK>
<USER_TASK:>
Description:
def get_fleet(self, airline_key):
"""Get the fleet for a particular airline.
Given a airline code form the get_airlines() method output, this method returns the fleet for the airline.
Args:
airline_key (str): The code for the airline on flightradar24
Returns:
A list of dicts, one for each aircraft in the airlines fleet
Example::
from pyflightdata import FlightData
f=FlightData()
#optional login
f.login(myemail,mypassword)
f.get_fleet('ai-aic')
""" |
url = AIRLINE_FLEET_BASE.format(airline_key)
return self._fr24.get_airline_fleet_data(url, self.AUTH_TOKEN != '') |
<SYSTEM_TASK:>
Get the flights for a particular airline.
<END_TASK>
<USER_TASK:>
Description:
def get_flights(self, search_key):
"""Get the flights for a particular airline.
Given a full or partial flight number string, this method returns the first 100 flights matching that string.
Please note this method was different in earlier versions. The older versions took an airline code and returned all scheduled flights for that airline
Args:
search_key (str): Full or partial flight number for any airline e.g. MI47 to get all SilkAir flights starting with MI47
Returns:
A list of dicts, one for each scheduled flight in the airlines network
Example::
from pyflightdata import FlightData
f=FlightData()
#optional login
f.login(myemail,mypassword)
f.get_flights('MI47')
""" |
# assume limit 100 to return first 100 of any wild card search
url = AIRLINE_FLT_BASE.format(search_key, 100)
return self._fr24.get_airline_flight_data(url) |
<SYSTEM_TASK:>
Get the flights for a particular origin and destination.
<END_TASK>
<USER_TASK:>
Description:
def get_flights_from_to(self, origin, destination):
"""Get the flights for a particular origin and destination.
Given an origin and destination this method returns the upcoming scheduled flights between these two points.
The data returned has the airline, airport and schedule information - this is subject to change in future.
Args:
origin (str): The origin airport code
destination (str): The destination airport code
Returns:
A list of dicts, one for each scheduled flight between the two points.
Example::
from pyflightdata import FlightData
f=FlightData()
#optional login
f.login(myemail,mypassword)
f.get_flights_from_to('SIN','HYD')
""" |
# assume limit 100 to return first 100 of any wild card search
url = AIRLINE_FLT_BASE_POINTS.format(origin, destination)
return self._fr24.get_airline_flight_data(url, by_airports=True) |
<SYSTEM_TASK:>
Retrieve the weather at an airport
<END_TASK>
<USER_TASK:>
Description:
def get_airport_weather(self, iata, page=1, limit=100):
"""Retrieve the weather at an airport
Given the IATA code of an airport, this method returns the weather information.
Args:
iata (str): The IATA code for an airport, e.g. HYD
page (int): Optional page number; for users who are on a plan with flightradar24 they can pass in higher page numbers to get more data
limit (int): Optional limit on number of records returned
Returns:
A list of dicts with the data; one dict for each row of data from flightradar24
Example::
from pyflightdata import FlightData
f=FlightData()
#optional login
f.login(myemail,mypassword)
f.get_airport_weather('HYD')
f.get_airport_weather('HYD',page=1,limit=10)
""" |
url = AIRPORT_DATA_BASE.format(iata, str(self.AUTH_TOKEN), page, limit)
weather = self._fr24.get_airport_weather(url)
mi = weather['sky']['visibility']['mi']
if (mi is not None) and (mi != "None"):
mi = float(mi)
km = mi * 1.6094
weather['sky']['visibility']['km'] = km
return weather |
<SYSTEM_TASK:>
Retrieve the metar data at the current time
<END_TASK>
<USER_TASK:>
Description:
def get_airport_metars(self, iata, page=1, limit=100):
"""Retrieve the metar data at the current time
Given the IATA code of an airport, this method returns the metar information.
Args:
iata (str): The IATA code for an airport, e.g. HYD
page (int): Optional page number; for users who are on a plan with flightradar24 they can pass in higher page numbers to get more data
limit (int): Optional limit on number of records returned
Returns:
The metar data for the airport
Example::
from pyflightdata import FlightData
f=FlightData()
#optional login
f.login(myemail,mypassword)
f.get_airport_metars('HYD')
""" |
url = AIRPORT_DATA_BASE.format(iata, str(self.AUTH_TOKEN), page, limit)
w = self._fr24.get_airport_weather(url)
return w['metar'] |
<SYSTEM_TASK:>
Retrieve the metar data for past 72 hours. The data will not be parsed to readable format.
<END_TASK>
<USER_TASK:>
Description:
def get_airport_metars_hist(self, iata):
"""Retrieve the metar data for past 72 hours. The data will not be parsed to readable format.
Given the IATA code of an airport, this method returns the metar information for last 72 hours.
Args:
iata (str): The IATA code for an airport, e.g. HYD
Returns:
The metar data for the airport
Example::
from pyflightdata import FlightData
f=FlightData()
#optional login
f.login(myemail,mypassword)
f.get_airport_metars_hist('HYD')
""" |
url = AIRPORT_BASE.format(iata) + "/weather"
return self._fr24.get_airport_metars_hist(url) |
<SYSTEM_TASK:>
Retrieve the performance statistics at an airport
<END_TASK>
<USER_TASK:>
Description:
def get_airport_stats(self, iata, page=1, limit=100):
"""Retrieve the performance statistics at an airport
Given the IATA code of an airport, this method returns the performance statistics for the airport.
Args:
iata (str): The IATA code for an airport, e.g. HYD
page (int): Optional page number; for users who are on a plan with flightradar24 they can pass in higher page numbers to get more data
limit (int): Optional limit on number of records returned
Returns:
A list of dicts with the data; one dict for each row of data from flightradar24
Example::
from pyflightdata import FlightData
f=FlightData()
#optional login
f.login(myemail,mypassword)
f.get_airport_stats('HYD')
f.get_airport_stats('HYD',page=1,limit=10)
""" |
url = AIRPORT_DATA_BASE.format(iata, str(self.AUTH_TOKEN), page, limit)
return self._fr24.get_airport_stats(url) |
<SYSTEM_TASK:>
Retrieve the details of an airport
<END_TASK>
<USER_TASK:>
Description:
def get_airport_details(self, iata, page=1, limit=100):
"""Retrieve the details of an airport
Given the IATA code of an airport, this method returns the detailed information like lat lon, full name, URL, codes etc.
Args:
iata (str): The IATA code for an airport, e.g. HYD
page (int): Optional page number; for users who are on a plan with flightradar24 they can pass in higher page numbers to get more data
limit (int): Optional limit on number of records returned
Returns:
A list of dicts with the data; one dict for each row of data from flightradar24
Example::
from pyflightdata import FlightData
f=FlightData()
#optional login
f.login(myemail,mypassword)
f.get_airport_details('HYD')
f.get_airport_details('HYD',page=1,limit=10)
""" |
url = AIRPORT_DATA_BASE.format(iata, str(self.AUTH_TOKEN), page, limit)
details = self._fr24.get_airport_details(url)
weather = self._fr24.get_airport_weather(url)
# weather has more correct and standard elevation details in feet and meters
details['position']['elevation'] = weather['elevation']
return details |
<SYSTEM_TASK:>
Fetch the images of a particular aircraft by its tail number.
<END_TASK>
<USER_TASK:>
Description:
def get_images_by_tail_number(self, tail_number, page=1, limit=100):
"""Fetch the images of a particular aircraft by its tail number.
This method can be used to get the images of the aircraft. The images are in 3 sizes and you can use what suits your need.
Args:
tail_number (str): The tail number, e.g. VT-ANL
page (int): Optional page number; for users who are on a plan with flightradar24 they can pass in higher page numbers to get more data
limit (int): Optional limit on number of records returned
Returns:
A dict with the images of the aircraft in various sizes
Example::
from pyflightdata import FlightData
f=FlightData()
#optional login
f.login(myemail,mypassword)
f.get_images_by_flight_number('VT-ANL')
f.get_images_by_flight_number('VT-ANL',page=1,limit=10)
""" |
url = REG_BASE.format(tail_number, str(self.AUTH_TOKEN), page, limit)
return self._fr24.get_aircraft_image_data(url) |
<SYSTEM_TASK:>
Login to the flightradar24 session
<END_TASK>
<USER_TASK:>
Description:
def login(self, email, password):
"""Login to the flightradar24 session
The API currently uses flightradar24 as the primary data source. The site provides different levels of data based on user plans.
For users who have signed up for a plan, this method allows to login with the credentials from flightradar24. The API obtains
a token that will be passed on all the requests; this obtains the data as per the plan limits.
Args:
email (str): The email ID which is used to login to flightradar24
password (str): The password for the user ID
Example::
from pyflightdata import FlightData
f=FlightData()
f.login(myemail,mypassword)
""" |
response = FlightData.session.post(
url=LOGIN_URL,
data={
'email': email,
'password': password,
'remember': 'true',
'type': 'web'
},
headers={
'Origin': 'https://www.flightradar24.com',
'Referer': 'https://www.flightradar24.com',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:28.0) Gecko/20100101 Firefox/28.0'
}
)
response = self._fr24.json_loads_byteified(
response.content) if response.status_code == 200 else None
if response:
token = response['userData']['subscriptionKey']
self.AUTH_TOKEN = token |
<SYSTEM_TASK:>
Simple method that decodes a given metar string.
<END_TASK>
<USER_TASK:>
Description:
def decode_metar(self, metar):
"""
Simple method that decodes a given metar string.
Args:
metar (str): The metar data
Returns:
The metar data in readable format
Example::
from pyflightdata import FlightData
f=FlightData()
f.decode_metar('WSSS 181030Z 04009KT 010V080 9999 FEW018TCU BKN300 29/22 Q1007 NOSIG')
""" |
try:
from metar import Metar
except:
return "Unable to parse metars. Please install parser from https://github.com/tomp/python-metar."
m = Metar.Metar(metar)
return m.string() |
<SYSTEM_TASK:>
Perform the actual radius authentication by passing the given packet
<END_TASK>
<USER_TASK:>
Description:
def _perform_radius_auth(self, client, packet):
"""
Perform the actual radius authentication by passing the given packet
to the server which `client` is bound to.
Returns True or False depending on whether the user is authenticated
successfully.
""" |
try:
reply = client.SendPacket(packet)
except Timeout as e:
logging.error("RADIUS timeout occurred contacting %s:%s" % (
client.server, client.authport))
return False
except Exception as e:
logging.error("RADIUS error: %s" % e)
return False
if reply.code == AccessReject:
logging.warning("RADIUS access rejected for user '%s'" % (
packet['User-Name']))
return False
elif reply.code != AccessAccept:
logging.error("RADIUS access error for user '%s' (code %s)" % (
packet['User-Name'], reply.code))
return False
logging.info("RADIUS access granted for user '%s'" % (
packet['User-Name']))
return True |
<SYSTEM_TASK:>
Check credentials against RADIUS server and return a User object or
<END_TASK>
<USER_TASK:>
Description:
def authenticate(self, request, username=None, password=None):
"""
Check credentials against RADIUS server and return a User object or
None.
""" |
if isinstance(username, basestring):
username = username.encode('utf-8')
if isinstance(password, basestring):
password = password.encode('utf-8')
server = self._get_server_from_settings()
result = self._radius_auth(server, username, password)
if result:
return self.get_django_user(username, password)
return None |
<SYSTEM_TASK:>
Remove any fluctuated data points by magnitudes.
<END_TASK>
<USER_TASK:>
Description:
def sigma_clipping(date, mag, err, threshold=3, iteration=1):
"""
Remove any fluctuated data points by magnitudes.
Parameters
----------
date : array_like
An array of dates.
mag : array_like
An array of magnitudes.
err : array_like
An array of magnitude errors.
threshold : float, optional
Threshold for sigma-clipping.
iteration : int, optional
The number of iteration.
Returns
-------
date : array_like
Sigma-clipped dates.
mag : array_like
Sigma-clipped magnitudes.
err : array_like
Sigma-clipped magnitude errors.
""" |
# Check length.
if (len(date) != len(mag)) \
or (len(date) != len(err)) \
or (len(mag) != len(err)):
raise RuntimeError('The length of date, mag, and err must be same.')
# By magnitudes
for i in range(int(iteration)):
mean = np.median(mag)
std = np.std(mag)
index = (mag >= mean - threshold*std) & (mag <= mean + threshold*std)
date = date[index]
mag = mag[index]
err = err[index]
return date, mag, err |
<SYSTEM_TASK:>
Return a schema object from a spec.
<END_TASK>
<USER_TASK:>
Description:
def from_spec(spec):
"""Return a schema object from a spec.
A spec is either a string for a scalar type, or a list of 0 or 1 specs,
or a dictionary with two elements: {'fields': { ... }, required: [...]}.
""" |
if spec == '':
return any_schema
if framework.is_str(spec):
# Scalar type
if spec not in SCALAR_TYPES:
raise exceptions.SchemaError('Not a valid schema type: %r' % spec)
return ScalarSchema(spec)
if framework.is_list(spec):
return ListSchema(spec[0] if len(spec) else any_schema)
if framework.is_tuple(spec):
return TupleSchema(spec.get('fields', {}), spec.get('required', []))
raise exceptions.SchemaError('Not valid schema spec; %r' % spec) |
<SYSTEM_TASK:>
Validate an object according to its own AND an externally imposed schema.
<END_TASK>
<USER_TASK:>
Description:
def validate(obj, schema):
"""Validate an object according to its own AND an externally imposed schema.""" |
if not framework.EvaluationContext.current().validate:
# Short circuit evaluation when disabled
return obj
# Validate returned object according to its own schema
if hasattr(obj, 'tuple_schema'):
obj.tuple_schema.validate(obj)
# Validate object according to externally imposed schema
if schema:
schema.validate(obj)
return obj |
<SYSTEM_TASK:>
Attach the given schema to the given object.
<END_TASK>
<USER_TASK:>
Description:
def attach(obj, schema):
"""Attach the given schema to the given object.""" |
# We have a silly exception for lists, since they have no 'attach_schema'
# method, and I don't feel like making a subclass for List just to add it.
# So, we recursively search the list for tuples and attach the schema in
# there.
if framework.is_list(obj) and isinstance(schema, ListSchema):
for x in obj:
attach(x, schema.element_schema)
return
# Otherwise, the object should be able to handle its own schema attachment.
getattr(obj, 'attach_schema', nop)(schema) |
<SYSTEM_TASK:>
Return a list of entire features.
<END_TASK>
<USER_TASK:>
Description:
def get_feature_set_all():
"""
Return a list of entire features.
A set of entire features regardless of being used to train a model or
predict a class.
Returns
-------
feature_names : list
A list of features' names.
""" |
features = get_feature_set()
features.append('cusum')
features.append('eta')
features.append('n_points')
features.append('period_SNR')
features.append('period_log10FAP')
features.append('period_uncertainty')
features.append('weighted_mean')
features.append('weighted_std')
features.sort()
return features |
<SYSTEM_TASK:>
A property that returns all of the model's parameters.
<END_TASK>
<USER_TASK:>
Description:
def parameters(self):
""" A property that returns all of the model's parameters. """ |
parameters = []
for hl in self.hidden_layers:
parameters.extend(hl.parameters)
parameters.extend(self.top_layer.parameters)
return parameters |
<SYSTEM_TASK:>
Used to set all of the model's parameters to new values.
<END_TASK>
<USER_TASK:>
Description:
def parameters(self, value):
""" Used to set all of the model's parameters to new values.
**Parameters:**
value : array_like
New values for the model parameters. Must be of length
``self.n_parameters``.
""" |
if len(value) != self.n_parameters:
raise ValueError("Incorrect length of parameter vector. "
"Model has %d parameters, but got %d" %
(self.n_parameters, len(value)))
i = 0
for hl in self.hidden_layers:
hl.parameters = value[i:i + hl.n_parameters]
i += hl.n_parameters
self.top_layer.parameters = value[-self.top_layer.n_parameters:] |
<SYSTEM_TASK:>
Returns an MD5 digest of the model.
<END_TASK>
<USER_TASK:>
Description:
def checksum(self):
""" Returns an MD5 digest of the model.
This can be used to easily identify whether two models have the
same architecture.
""" |
m = md5()
for hl in self.hidden_layers:
m.update(str(hl.architecture))
m.update(str(self.top_layer.architecture))
return m.hexdigest() |
<SYSTEM_TASK:>
Evaluate the loss function without computing gradients.
<END_TASK>
<USER_TASK:>
Description:
def evaluate(self, input_data, targets,
return_cache=False, prediction=True):
""" Evaluate the loss function without computing gradients.
**Parameters:**
input_data : GPUArray
Data to evaluate
targets: GPUArray
Targets
return_cache : bool, optional
Whether to return intermediary variables from the
computation and the hidden activations.
prediction : bool, optional
Whether to use prediction model. Only relevant when using
dropout. If true, then weights are multiplied by
1 - dropout if the layer uses dropout.
**Returns:**
loss : float
The value of the loss function.
hidden_cache : list, only returned if ``return_cache == True``
Cache as returned by :meth:`hebel.models.NeuralNet.feed_forward`.
activations : list, only returned if ``return_cache == True``
Hidden activations as returned by
:meth:`hebel.models.NeuralNet.feed_forward`.
""" |
# Forward pass
activations, hidden_cache = self.feed_forward(
input_data, return_cache=True, prediction=prediction)
loss = self.top_layer.train_error(None,
targets, average=False, cache=activations,
prediction=prediction)
for hl in self.hidden_layers:
if hl.l1_penalty_weight: loss += hl.l1_penalty
if hl.l2_penalty_weight: loss += hl.l2_penalty
if self.top_layer.l1_penalty_weight: loss += self.top_layer.l1_penalty
if self.top_layer.l2_penalty_weight: loss += self.top_layer.l2_penalty
if not return_cache:
return loss
else:
return loss, hidden_cache, activations |
<SYSTEM_TASK:>
Perform a full forward and backward pass through the model.
<END_TASK>
<USER_TASK:>
Description:
def training_pass(self, input_data, targets):
""" Perform a full forward and backward pass through the model.
**Parameters:**
input_data : GPUArray
Data to train the model with.
targets : GPUArray
Training targets.
**Returns:**
loss : float
Value of loss function as evaluated on the data and targets.
gradients : list of GPUArray
Gradients obtained from backpropagation in the backward pass.
""" |
# Forward pass
loss, hidden_cache, logistic_cache = self.evaluate(
input_data, targets, return_cache=True, prediction=False)
if not np.isfinite(loss):
raise ValueError('Infinite activations!')
# Backpropagation
if self.hidden_layers:
hidden_activations = hidden_cache[-1][0]
else:
hidden_activations = input_data
df_top_layer = \
self.top_layer.backprop(hidden_activations, targets,
cache=logistic_cache)
gradients = list(df_top_layer[0][::-1])
df_hidden = df_top_layer[1]
if self.hidden_layers:
hidden_inputs = [input_data] + [c[0] for c in hidden_cache[:-1]]
for hl, hc, hi in \
zip(self.hidden_layers[::-1], hidden_cache[::-1],
hidden_inputs[::-1]):
g, df_hidden = hl.backprop(hi, df_hidden, cache=hc)
gradients.extend(g[::-1])
gradients.reverse()
return loss, gradients |
<SYSTEM_TASK:>
Run data forward through the model.
<END_TASK>
<USER_TASK:>
Description:
def feed_forward(self, input_data, return_cache=False, prediction=True):
""" Run data forward through the model.
**Parameters:**
input_data : GPUArray
Data to run through the model.
return_cache : bool, optional
Whether to return the intermediary results.
prediction : bool, optional
Whether to run in prediction mode. Only relevant when
using dropout. If true, weights are multiplied by 1 - dropout.
If false, then half of hidden units are randomly dropped and
the dropout mask is returned in case ``return_cache==True``.
**Returns:**
prediction : GPUArray
Predictions from the model.
cache : list of GPUArray, only returned if ``return_cache == True``
Results of intermediary computations.
""" |
hidden_cache = None # Create variable in case there are no hidden layers
if self.hidden_layers:
# Forward pass
hidden_cache = []
for i in range(len(self.hidden_layers)):
hidden_activations = hidden_cache[i - 1][0] if i else input_data
# Use dropout predict if previous layer has dropout
hidden_cache.append(self.hidden_layers[i]
.feed_forward(hidden_activations,
prediction=prediction))
hidden_activations = hidden_cache[-1][0]
else:
hidden_activations = input_data
# Use dropout_predict if last hidden layer has dropout
activations = \
self.top_layer.feed_forward(hidden_activations,
prediction=False)
if return_cache:
return activations, hidden_cache
return activations |
<SYSTEM_TASK:>
Derive not-period-based features.
<END_TASK>
<USER_TASK:>
Description:
def shallow_run(self):
"""Derive not-period-based features.""" |
# Number of data points
self.n_points = len(self.date)
# Weight calculation.
# All zero values.
if not self.err.any():
self.err = np.ones(len(self.mag)) * np.std(self.mag)
# Some zero values.
elif not self.err.all():
np.putmask(self.err, self.err==0, np.median(self.err))
self.weight = 1. / self.err
self.weighted_sum = np.sum(self.weight)
# Simple statistics, mean, median and std.
self.mean = np.mean(self.mag)
self.median = np.median(self.mag)
self.std = np.std(self.mag)
# Weighted mean and std.
self.weighted_mean = np.sum(self.mag * self.weight) / self.weighted_sum
self.weighted_std = np.sqrt(np.sum((self.mag - self.weighted_mean) ** 2 \
* self.weight) / self.weighted_sum)
# Skewness and kurtosis.
self.skewness = ss.skew(self.mag)
self.kurtosis = ss.kurtosis(self.mag)
# Normalization-test. Shapiro-Wilk test.
shapiro = ss.shapiro(self.mag)
self.shapiro_w = shapiro[0]
# self.shapiro_log10p = np.log10(shapiro[1])
# Percentile features.
self.quartile31 = np.percentile(self.mag, 75) \
- np.percentile(self.mag, 25)
# Stetson K.
self.stetson_k = self.get_stetson_k(self.mag, self.median, self.err)
# Ratio between higher and lower amplitude than average.
self.hl_amp_ratio = self.half_mag_amplitude_ratio(
self.mag, self.median, self.weight)
# This second function's value is very similar with the above one.
# self.hl_amp_ratio2 = self.half_mag_amplitude_ratio2(
# self.mag, self.median)
# Cusum
self.cusum = self.get_cusum(self.mag)
# Eta
self.eta = self.get_eta(self.mag, self.weighted_std) |
<SYSTEM_TASK:>
Period finding using the Lomb-Scargle algorithm.
<END_TASK>
<USER_TASK:>
Description:
def get_period_LS(self, date, mag, n_threads, min_period):
"""
Period finding using the Lomb-Scargle algorithm.
Finding two periods. The second period is estimated after whitening
the first period. Calculating various other features as well
using derived periods.
Parameters
----------
date : array_like
An array of observed date, in days.
mag : array_like
An array of observed magnitude.
n_threads : int
The number of threads to use.
min_period : float
The minimum period to calculate.
""" |
# DO NOT CHANGE THESE PARAMETERS.
oversampling = 3.
hifac = int((max(date) - min(date)) / len(date) / min_period * 2.)
# Minimum hifac
if hifac < 100:
hifac = 100
# Lomb-Scargle.
fx, fy, nout, jmax, prob = pLS.fasper(date, mag, oversampling, hifac,
n_threads)
self.f = fx[jmax]
self.period = 1. / self.f
self.period_uncertainty = self.get_period_uncertainty(fx, fy, jmax)
self.period_log10FAP = \
np.log10(pLS.getSignificance(fx, fy, nout, oversampling)[jmax])
# self.f_SNR1 = fy[jmax] / np.median(fy)
self.period_SNR = (fy[jmax] - np.median(fy)) / np.std(fy)
# Fit Fourier Series of order 3.
order = 3
# Initial guess of Fourier coefficients.
p0 = np.ones(order * 2 + 1)
date_period = (date % self.period) / self.period
p1, success = leastsq(self.residuals, p0,
args=(date_period, mag, order))
# fitted_y = self.FourierSeries(p1, date_period, order)
# print p1, self.mean, self.median
# plt.plot(date_period, self.mag, 'b+')
# plt.show()
# Derive Fourier features for the first period.
# Petersen, J. O., 1986, A&A
self.amplitude = np.sqrt(p1[1] ** 2 + p1[2] ** 2)
self.r21 = np.sqrt(p1[3] ** 2 + p1[4] ** 2) / self.amplitude
self.r31 = np.sqrt(p1[5] ** 2 + p1[6] ** 2) / self.amplitude
self.f_phase = np.arctan(-p1[1] / p1[2])
self.phi21 = np.arctan(-p1[3] / p1[4]) - 2. * self.f_phase
self.phi31 = np.arctan(-p1[5] / p1[6]) - 3. * self.f_phase
"""
# Derive a second period.
# Whitening a light curve.
residual_mag = mag - fitted_y
# Lomb-Scargle again to find the second period.
omega_top, power_top = search_frequencies(date, residual_mag, err,
#LS_kwargs={'generalized':True, 'subtract_mean':True},
n_eval=5000, n_retry=3, n_save=50)
self.period2 = 2*np.pi/omega_top[np.where(power_top==np.max(power_top))][0]
self.f2 = 1. / self.period2
self.f2_SNR = power_top[np.where(power_top==np.max(power_top))][0] \
* (len(self.date) - 1) / 2.
# Fit Fourier Series again.
p0 = [1.] * order * 2
date_period = (date % self.period) / self.period
p2, success = leastsq(self.residuals, p0,
args=(date_period, residual_mag, order))
fitted_y = self.FourierSeries(p2, date_period, order)
#plt.plot(date%self.period2, residual_mag, 'b+')
#plt.show()
# Derive Fourier features for the first second.
self.f2_amp = 2. * np.sqrt(p2[1]**2 + p2[2]**2)
self.f2_R21 = np.sqrt(p2[3]**2 + p2[4]**2) / self.f2_amp
self.f2_R31 = np.sqrt(p2[5]**2 + p2[6]**2) / self.f2_amp
self.f2_R41 = np.sqrt(p2[7]**2 + p2[8]**2) / self.f2_amp
self.f2_R51 = np.sqrt(p2[9]**2 + p2[10]**2) / self.f2_amp
self.f2_phase = np.arctan(-p2[1] / p2[2])
self.f2_phi21 = np.arctan(-p2[3] / p2[4]) - 2. * self.f2_phase
self.f2_phi31 = np.arctan(-p2[5] / p2[6]) - 3. * self.f2_phase
self.f2_phi41 = np.arctan(-p2[7] / p2[8]) - 4. * self.f2_phase
self.f2_phi51 = np.arctan(-p2[9] / p2[10]) - 5. * self.f2_phase
# Calculate features using the first and second periods.
self.f12_ratio = self.f2 / self.f1
self.f12_remain = self.f1 % self.f2 \
if self.f1 > self.f2 else self.f2 % self.f1
self.f12_amp = self.f2_amp / self.f1_amp
self.f12_phase = self.f2_phase - self.f1_phase
""" |
<SYSTEM_TASK:>
Get uncertainty of a period.
<END_TASK>
<USER_TASK:>
Description:
def get_period_uncertainty(self, fx, fy, jmax, fx_width=100):
"""
Get uncertainty of a period.
The uncertainty is defined as the half width of the frequencies
around the peak, that becomes lower than average + standard deviation
of the power spectrum.
Since we may not have fine resolution around the peak,
we do not assume it is gaussian. So, no scaling factor of
2.355 (= 2 * sqrt(2 * ln2)) is applied.
Parameters
----------
fx : array_like
An array of frequencies.
fy : array_like
An array of amplitudes.
jmax : int
An index at the peak frequency.
fx_width : int, optional
Width of power spectrum to calculate uncertainty.
Returns
-------
p_uncertain : float
Period uncertainty.
""" |
# Get subset
start_index = jmax - fx_width
end_index = jmax + fx_width
if start_index < 0:
start_index = 0
if end_index > len(fx) - 1:
end_index = len(fx) - 1
fx_subset = fx[start_index:end_index]
fy_subset = fy[start_index:end_index]
fy_mean = np.median(fy_subset)
fy_std = np.std(fy_subset)
# Find peak
max_index = np.argmax(fy_subset)
# Find list whose powers become lower than average + std.
index = np.where(fy_subset <= fy_mean + fy_std)[0]
# Find the edge at left and right. This is the full width.
left_index = index[(index < max_index)]
if len(left_index) == 0:
left_index = 0
else:
left_index = left_index[-1]
right_index = index[(index > max_index)]
if len(right_index) == 0:
right_index = len(fy_subset) - 1
else:
right_index = right_index[0]
# We assume the half of the full width is the period uncertainty.
half_width = (1. / fx_subset[left_index]
- 1. / fx_subset[right_index]) / 2.
period_uncertainty = half_width
return period_uncertainty |
<SYSTEM_TASK:>
Residual of Fourier Series.
<END_TASK>
<USER_TASK:>
Description:
def residuals(self, pars, x, y, order):
"""
Residual of Fourier Series.
Parameters
----------
pars : array_like
Fourier series parameters.
x : array_like
An array of date.
y : array_like
An array of true values to fit.
order : int
An order of Fourier Series.
""" |
return y - self.fourier_series(pars, x, order) |
<SYSTEM_TASK:>
Function to fit Fourier Series.
<END_TASK>
<USER_TASK:>
Description:
def fourier_series(self, pars, x, order):
"""
Function to fit Fourier Series.
Parameters
----------
x : array_like
An array of date divided by period. It doesn't need to be sorted.
pars : array_like
Fourier series parameters.
order : int
An order of Fourier series.
""" |
sum = pars[0]
for i in range(order):
sum += pars[i * 2 + 1] * np.sin(2 * np.pi * (i + 1) * x) \
+ pars[i * 2 + 2] * np.cos(2 * np.pi * (i + 1) * x)
return sum |
<SYSTEM_TASK:>
Return 10% and 90% percentile of slope.
<END_TASK>
<USER_TASK:>
Description:
def slope_percentile(self, date, mag):
"""
Return 10% and 90% percentile of slope.
Parameters
----------
date : array_like
An array of phase-folded date. Sorted.
mag : array_like
An array of phase-folded magnitudes. Sorted by date.
Returns
-------
per_10 : float
10% percentile values of slope.
per_90 : float
90% percentile values of slope.
""" |
date_diff = date[1:] - date[:len(date) - 1]
mag_diff = mag[1:] - mag[:len(mag) - 1]
# Remove zero mag_diff.
index = np.where(mag_diff != 0.)
date_diff = date_diff[index]
mag_diff = mag_diff[index]
# Derive slope.
slope = date_diff / mag_diff
percentile_10 = np.percentile(slope, 10.)
percentile_90 = np.percentile(slope, 90.)
return percentile_10, percentile_90 |
<SYSTEM_TASK:>
Return max - min of cumulative sum.
<END_TASK>
<USER_TASK:>
Description:
def get_cusum(self, mag):
"""
Return max - min of cumulative sum.
Parameters
----------
mag : array_like
An array of magnitudes.
Returns
-------
mm_cusum : float
Max - min of cumulative sum.
""" |
c = np.cumsum(mag - self.weighted_mean) / len(mag) / self.weighted_std
return np.max(c) - np.min(c) |
<SYSTEM_TASK:>
Initialize Hebel.
<END_TASK>
<USER_TASK:>
Description:
def init(device_id=None, random_seed=None):
"""Initialize Hebel.
This function creates a CUDA context, CUBLAS context and
initializes and seeds the pseudo-random number generator.
**Parameters:**
device_id : integer, optional
The ID of the GPU device to use. If this is omitted, PyCUDA's
default context is used, which by default uses the fastest
available device on the system. Alternatively, you can put the
device id in the environment variable ``CUDA_DEVICE`` or into
the file ``.cuda-device`` in the user's home directory.
random_seed : integer, optional
The seed to use for the pseudo-random number generator. If
this is omitted, the seed is taken from the environment
variable ``RANDOM_SEED`` and if that is not defined, a random
integer is used as a seed.
""" |
if device_id is None:
random_seed = _os.environ.get('CUDA_DEVICE')
if random_seed is None:
random_seed = _os.environ.get('RANDOM_SEED')
global is_initialized
if not is_initialized:
is_initialized = True
global context
context.init_context(device_id)
from pycuda import gpuarray, driver, curandom
# Initialize memory pool
global memory_pool
memory_pool.init()
# Initialize PRG
global sampler
sampler.set_seed(random_seed)
# Initialize pycuda_ops
from hebel import pycuda_ops
pycuda_ops.init() |
<SYSTEM_TASK:>
Instantiate a Tuple from a TupleNode.
<END_TASK>
<USER_TASK:>
Description:
def inflate_context_tuple(ast_rootpath, root_env):
"""Instantiate a Tuple from a TupleNode.
Walking the AST tree upwards, evaluate from the root down again.
""" |
with util.LogTime('inflate_context_tuple'):
# We only need to look at tuple members going down.
inflated = ast_rootpath[0].eval(root_env)
current = inflated
env = root_env
try:
for node in ast_rootpath[1:]:
if is_tuple_member_node(node):
assert framework.is_tuple(current)
with util.LogTime('into tuple'):
thunk, env = inflated.get_thunk_env(node.name)
current = framework.eval(thunk, env)
elif framework.is_list(current):
with util.LogTime('eval thing'):
current = framework.eval(node, env)
if framework.is_tuple(current):
inflated = current
except (gcl.EvaluationError, ast.UnparseableAccess):
# Eat evaluation error, probably means the rightmost tuplemember wasn't complete.
# Return what we have so far.
pass
return inflated |
<SYSTEM_TASK:>
Return whether the cursor is in identifier-position in a member declaration.
<END_TASK>
<USER_TASK:>
Description:
def is_identifier_position(rootpath):
"""Return whether the cursor is in identifier-position in a member declaration.""" |
if len(rootpath) >= 2 and is_tuple_member_node(rootpath[-2]) and is_identifier(rootpath[-1]):
return True
if len(rootpath) >= 1 and is_tuple_node(rootpath[-1]):
# No deeper node than tuple? Must be identifier position, otherwise we'd have a TupleMemberNode.
return True
return False |
<SYSTEM_TASK:>
Find completions at the cursor.
<END_TASK>
<USER_TASK:>
Description:
def find_completions_at_cursor(ast_tree, filename, line, col, root_env=gcl.default_env):
"""Find completions at the cursor.
Return a dict of { name => Completion } objects.
""" |
q = gcl.SourceQuery(filename, line, col - 1)
rootpath = ast_tree.find_tokens(q)
if is_identifier_position(rootpath):
return find_inherited_key_completions(rootpath, root_env)
try:
ret = find_deref_completions(rootpath, root_env) or enumerate_scope(rootpath, root_env=root_env)
assert isinstance(ret, dict)
return ret
except gcl.EvaluationError:
# Probably an unbound value or something--just return an empty list
return {} |
<SYSTEM_TASK:>
Return completion keys from INHERITED tuples.
<END_TASK>
<USER_TASK:>
Description:
def find_inherited_key_completions(rootpath, root_env):
"""Return completion keys from INHERITED tuples.
Easiest way to get those is to evaluate the tuple, check if it is a CompositeTuple,
then enumerate the keys that are NOT in the rightmost tuple.
""" |
tup = inflate_context_tuple(rootpath, root_env)
if isinstance(tup, runtime.CompositeTuple):
keys = set(k for t in tup.tuples[:-1] for k in t.keys())
return {n: get_completion(tup, n) for n in keys}
return {} |
<SYSTEM_TASK:>
Find the value of the object under the cursor.
<END_TASK>
<USER_TASK:>
Description:
def find_value_at_cursor(ast_tree, filename, line, col, root_env=gcl.default_env):
"""Find the value of the object under the cursor.""" |
q = gcl.SourceQuery(filename, line, col)
rootpath = ast_tree.find_tokens(q)
rootpath = path_until(rootpath, is_thunk)
if len(rootpath) <= 1:
# Just the file tuple itself, or some non-thunk element at the top level
return None
tup = inflate_context_tuple(rootpath, root_env)
try:
if isinstance(rootpath[-1], ast.Inherit):
# Special case handling of 'Inherit' nodes, show the value that's being
# inherited.
return tup[rootpath[-1].name]
return rootpath[-1].eval(tup.env(tup))
except gcl.EvaluationError as e:
return e |
<SYSTEM_TASK:>
Add a vector to a matrix
<END_TASK>
<USER_TASK:>
Description:
def add_vec_to_mat(mat, vec, axis=None, inplace=False,
target=None, substract=False):
""" Add a vector to a matrix
""" |
assert mat.flags.c_contiguous
if axis is None:
if vec.shape[0] == mat.shape[0]:
axis = 0
elif vec.shape[0] == mat.shape[1]:
axis = 1
else:
raise ValueError('Vector length must be equal '
'to one side of the matrix')
n, m = mat.shape
block = (_compilation_constants['add_vec_block_size'],
_compilation_constants['add_vec_block_size'], 1)
gridx = ceil_div(n, block[0])
gridy = ceil_div(m, block[1])
grid = (gridx, gridy, 1)
if inplace:
target = mat
elif target is None:
target = gpuarray.empty_like(mat)
if axis == 0:
assert vec.shape[0] == mat.shape[0]
add_col_vec_kernel.prepared_call(
grid, block,
mat.gpudata,
vec.gpudata,
target.gpudata,
np.uint32(n),
np.uint32(m),
np.int32(substract))
elif axis == 1:
assert vec.shape[0] == mat.shape[1]
add_row_vec_kernel.prepared_call(
grid, block,
mat.gpudata,
vec.gpudata,
target.gpudata,
np.uint32(n),
np.uint32(m),
np.int32(substract))
return target |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.