_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 31
13.1k
| language
stringclasses 1
value | meta_information
dict |
---|---|---|---|---|---|
q278800
|
stream_command_dicts
|
test
|
def stream_command_dicts(commands, parallel=False):
"""
Takes a list of dictionaries with keys corresponding to ``stream_command``
arguments, and runs all concurrently.
:param commands: A list of dictionaries, the keys of which should line up
with the arguments to ``stream_command`` function.
:type commands: ``list`` of ``dict``
:param parallel: If true, commands will be run in parallel.
:type parallel: ``bool``
"""
if parallel is True:
threads = []
for command in commands:
|
python
|
{
"resource": ""
}
|
q278801
|
stream_commands
|
test
|
def stream_commands(commands, hash_colors=True, parallel=False):
"""
Runs multiple commands, optionally in parallel. Each command should be
a dictionary with a 'command' key and optionally 'description' and
'write_stdin' keys.
"""
def _get_color(string):
if hash_colors is True:
return get_color_hash(string)
else:
return DEFAULT_COLOR
fixed_commands = []
for command in commands:
cmd_text = command['command']
description = command.get('description')
color = _get_color(description or '')
write_stdin = command.get('write_stdin')
description = color(description) if color is not None else
|
python
|
{
"resource": ""
}
|
q278802
|
networkdays
|
test
|
def networkdays(from_date, to_date, locale='en-US'):
""" Return the net work days according to RH's calendar. """
|
python
|
{
"resource": ""
}
|
q278803
|
_get_path
|
test
|
def _get_path(cmd):
"""Queries bash to find the path to a commmand on the system."""
if cmd in _PATHS:
|
python
|
{
"resource": ""
}
|
q278804
|
_build_ssh_command
|
test
|
def _build_ssh_command(hostname, username, idfile, ssh_command, tunnel):
"""Uses hostname and other info to construct an SSH command."""
command = [_get_path('ssh'),
'-o', 'StrictHostKeyChecking=no',
'-o', 'ConnectTimeout=5']
if idfile is not None:
command.extend(['-i', idfile])
if tunnel is not None:
# If there's a tunnel, run the ssh command on
|
python
|
{
"resource": ""
}
|
q278805
|
_build_scp_command
|
test
|
def _build_scp_command(hostname, username, idfile, is_get,
local_path, remote_path):
"""
Uses hostname and other info to construct an SCP command.
:param hostname: The hostname of the remote machine.
:type hostname: ``str``
:param username: The username to use on the remote machine.
:type username: ``str``
:param idfile: A path to the identity file to use.
:type idfile: ``str``
:param is_get: If true, we are getting a file rather than putting a file.
:type is_get: ``bool``
:param local_path: The path on the local file system.
:type local_path: ``str``
:param remote_path: The path on the remote file system.
:type remote_path: ``str``
"""
if hostname.strip() == '' or hostname is None:
raise ValueError('Empty hostname')
command = [_get_path('scp'),
|
python
|
{
"resource": ""
}
|
q278806
|
_copy_to
|
test
|
def _copy_to(entries, remote_path, local_path, profile):
"""
Performs an SCP command where the remote_path is the target and the
local_path is the source.
:param entries: A list of entries.
:type entries: ``list`` of :py:class:`HostEntry`
:param remote_path: The target path on the remote machine(s).
:type remote_path: ``str``
:param local_path: The source path on the local machine.
:type local_path: ``str``
:param profile: The profile, holding username/idfile info, etc.
:type profile: :py:class:`Profile`
"""
commands = []
for entry in entries:
hname = entry.hostname or entry.public_ip
cmd = _build_scp_command(hname, profile.username,
|
python
|
{
"resource": ""
}
|
q278807
|
_copy_from
|
test
|
def _copy_from(entries, remote_path, local_path, profile):
"""
Performs an SCP command where the remote_path is the source and the
local_path is a format string, formatted individually for each host
being copied from so as to create one or more distinct paths on the
local system.
:param entries: A list of entries.
:type entries: ``list`` of :py:class:`HostEntry`
:param remote_path: The source path on the remote machine(s).
:type remote_path: ``str``
:param local_path: A format string for the path on the local machine.
:type local_path: ``str``
:param profile: The profile, holding username/idfile info, etc.
:type profile: :py:class:`Profile`
"""
commands = []
paths = set()
for entry in entries:
hname = entry.hostname or entry.public_ip
_local_path = entry.format_string(local_path)
if _local_path in paths:
raise ValueError('Duplicate local paths: one or more paths '
'had value {} after formatting.'
.format(local_path))
paths.add(_local_path)
# If the path references a folder, create
|
python
|
{
"resource": ""
}
|
q278808
|
_run_ssh_command
|
test
|
def _run_ssh_command(entries, username, idfile, command, tunnel,
parallel=False):
"""
Runs the given command over SSH in parallel on all hosts in `entries`.
:param entries: The host entries the hostnames from.
:type entries: ``list`` of :py:class:`HostEntry`
:param username: To use a specific username.
:type username: ``str`` or ``NoneType``
:param idfile: The SSH identity file to use, or none.
:type idfile: ``str`` or ``NoneType``
:param command: The command to run.
:type command: ``str``
:param parallel: If true, commands will be run in parallel.
:type parallel: ``bool``
"""
if len(entries) == 0:
print('(No hosts to run command on)')
return 1
if command.strip() == ''
|
python
|
{
"resource": ""
}
|
q278809
|
_connect_ssh
|
test
|
def _connect_ssh(entry, username, idfile, tunnel=None):
"""
SSH into to a host.
:param entry: The host entry to pull the hostname from.
:type entry: :py:class:`HostEntry`
:param username: To use a specific username.
:type username: ``str`` or ``NoneType``
:param idfile: The SSH identity file to use, if supplying a username.
:type idfile: ``str`` or ``NoneType``
:param tunnel: Host to tunnel SSH command through.
:type tunnel: ``str`` or ``NoneType``
:return: An exit status code.
:rtype: ``int``
"""
if entry.hostname != "" and entry.hostname is not None:
_host = entry.hostname
elif entry.public_ip != "" and entry.public_ip is not None:
_host = entry.public_ip
elif entry.private_ip != "" and entry.private_ip is not None:
if tunnel is None:
raise ValueError("Entry does not have a hostname or public IP. "
"You can connect via private IP if you use a "
|
python
|
{
"resource": ""
}
|
q278810
|
LsiProfile.load
|
test
|
def load(cls, profile_name=None):
"""Loads the user's LSI profile, or provides a default."""
lsi_location = os.path.expanduser('~/.lsi')
if not os.path.exists(lsi_location):
return LsiProfile()
cfg_parser = ConfigParser()
cfg_parser.read(lsi_location)
if profile_name is None:
# Load the default profile if one exists; otherwise return empty.
if cfg_parser.has_section('default'):
profile_name = 'default'
else:
return cls()
elif not cfg_parser.has_section(profile_name):
raise cls.LoadError('No such profile {}'.format(profile_name))
|
python
|
{
"resource": ""
}
|
q278811
|
LsiProfile.from_args
|
test
|
def from_args(args):
"""Takes arguments parsed from argparse and returns a profile."""
# If the args specify a username explicitly, don't load from file.
if args.username is not None or args.identity_file is not None:
profile = LsiProfile()
else:
profile = LsiProfile.load(args.profile)
profile.override('username', args.username)
profile.override('identity_file', args.identity_file)
profile.override('command', args.command)
|
python
|
{
"resource": ""
}
|
q278812
|
Relational.relate
|
test
|
def relate(self, part, id=None):
"""Relate this package component to the supplied part."""
assert part.name.startswith(self.base)
name = part.name[len(self.base):].lstrip('/')
rel =
|
python
|
{
"resource": ""
}
|
q278813
|
Relational.related
|
test
|
def related(self, reltype):
"""Return a list of parts related to this one via reltype."""
parts = []
package = getattr(self, 'package', None) or self
for rel
|
python
|
{
"resource": ""
}
|
q278814
|
Relational._load_rels
|
test
|
def _load_rels(self, source):
"""Load relationships from source XML."""
# don't get confused here
|
python
|
{
"resource": ""
}
|
q278815
|
Package.add
|
test
|
def add(self, part, override=True):
"""Add a part to the package.
It will also add a content-type - by default an override. If
override is False then it will add a content-type for the extension
if one isn't already present.
"""
|
python
|
{
"resource": ""
}
|
q278816
|
Package._load_part
|
test
|
def _load_part(self, rel_type, name, data):
"""
Load a part into this package based on its relationship type
"""
if self.content_types.find_for(name) is None:
log.warning('no content type found for part %(name)s' % vars())
return
|
python
|
{
"resource": ""
}
|
q278817
|
ContentTypes.find_for
|
test
|
def find_for(self, name):
"""
Get the correct content type for a given name
"""
map = self.items
# first search the overrides (by name)
# then fall back to the defaults (by extension)
#
|
python
|
{
"resource": ""
}
|
q278818
|
ContentType.from_element
|
test
|
def from_element(cls, element):
"given an element, parse out the proper ContentType"
# disambiguate the subclass
ns, class_name = parse_tag(element.tag)
class_ = getattr(ContentType, class_name)
if not class_:
msg = 'Invalid Types child element: %(class_name)s' % vars()
|
python
|
{
"resource": ""
}
|
q278819
|
parse
|
test
|
def parse(input_string, prefix=''):
"""Parses the given DSL string and returns parsed results.
Args:
input_string (str): DSL string
prefix (str): Optional prefix to add to every element name, useful to namespace things
Returns:
|
python
|
{
"resource": ""
}
|
q278820
|
ProjectTokenBuilder.build
|
test
|
def build(self, secret_key):
"""Builds a final copy of the token using the given secret key.
:param secret_key(string): The secret key that corresponds to this builder's access key.
"""
key = jwk.JWK(
kty='oct',
k=base64url_encode(uuid.UUID(secret_key).bytes),
)
header = {
'alg': 'dir',
'enc': 'A128GCM',
'zip': 'DEF',
'cty': 'JWT',
'kid': self._access_key,
}
now = int(time.time())
payload = {
'iat': now,
'nbf': now,
}
if self._expiration is not None:
payload['exp'] = int(calendar.timegm(self._expiration.utctimetuple()))
if len(self._view_identifiers) > 0:
payload[VIEW_IDENTIFIERS_CLAIM_NAME] = self._view_identifiers
if len(self._parameters) > 0:
parameters = []
for parameter in self._parameters:
serialized = {
'field': parameter.field,
|
python
|
{
"resource": ""
}
|
q278821
|
assign_force_field
|
test
|
def assign_force_field(ampal_obj, ff):
"""Assigns force field parameters to Atoms in the AMPAL object.
Parameters
----------
ampal_obj : AMPAL Object
Any AMPAL object with a `get_atoms` method.
ff: BuffForceField
The force field to be used for scoring.
"""
if hasattr(ampal_obj, 'ligands'):
atoms = ampal_obj.get_atoms(ligands=True, inc_alt_states=True)
else:
atoms = ampal_obj.get_atoms(inc_alt_states=True)
for atom in atoms:
w_str = None
a_ff_id = None
if atom.element == 'H':
continue
elif atom.parent.mol_code.upper() in ff:
if atom.res_label.upper() in ff[atom.parent.mol_code]:
a_ff_id = (atom.parent.mol_code.upper(),
atom.res_label.upper())
elif atom.res_label.upper() in ff['WLD']:
a_ff_id = ('WLD', atom.res_label.upper())
else:
w_str = ('{} atom is not parameterised in the selected '
|
python
|
{
"resource": ""
}
|
q278822
|
BuffForceField.find_max_rad_npnp
|
test
|
def find_max_rad_npnp(self):
"""Finds the maximum radius and npnp in the force field.
Returns
-------
(max_rad, max_npnp): (float, float)
Maximum radius and npnp distance in the loaded force field.
"""
max_rad = 0
max_npnp = 0
for res, _ in self.items():
if res != 'KEY':
for _, ff_params in self[res].items():
|
python
|
{
"resource": ""
}
|
q278823
|
BuffForceField._make_ff_params_dict
|
test
|
def _make_ff_params_dict(self):
"""Makes a dictionary containing PyAtomData for the force field.
Returns
-------
ff_params_struct_dict: dict
Dictionary containing PyAtomData structs for the force field
parameters for each atom in the force field.
"""
try:
ff_params_struct_dict = {}
for res in self.keys():
|
python
|
{
"resource": ""
}
|
q278824
|
ZipPackage.as_stream
|
test
|
def as_stream(self):
"""
Return a zipped package as a readable stream
"""
stream = io.BytesIO()
|
python
|
{
"resource": ""
}
|
q278825
|
ZipPackage._get_matching_segments
|
test
|
def _get_matching_segments(self, zf, name):
"""
Return a generator yielding each of
|
python
|
{
"resource": ""
}
|
q278826
|
copy_dir
|
test
|
def copy_dir(bucket_name, src_path, dest_path,
aws_access_key_id=None, aws_secret_access_key=None,
aws_profile=None,
surrogate_key=None, cache_control=None,
surrogate_control=None,
create_directory_redirect_object=True):
"""Copy objects from one directory in a bucket to another directory in
the same bucket.
Object metadata is preserved while copying, with the following exceptions:
- If a new surrogate key is provided it will replace the original one.
- If ``cache_control`` and ``surrogate_control`` values are provided they
will replace the old one.
Parameters
----------
bucket_name : `str`
Name of an S3 bucket.
src_path : `str`
Source directory in the S3 bucket. The ``src_path`` should ideally end
in a trailing `'/'`. E.g. `'dir/dir2/'`.
dest_path : `str`
Destination directory in the S3 bucket. The ``dest_path`` should
ideally end in a trailing `'/'`. E.g. `'dir/dir2/'`. The destination
path cannot contain the source path.
aws_access_key_id : `str`
The access key for your AWS account. Also set
``aws_secret_access_key``.
aws_secret_access_key : `str`
The secret key for your AWS account.
aws_profile : `str`, optional
Name of AWS profile in :file:`~/.aws/credentials`. Use this instead
of ``aws_access_key_id`` and ``aws_secret_access_key`` for file-based
credentials.
surrogate_key : `str`, optional
The surrogate key to insert in the header of all objects in the
``x-amz-meta-surrogate-key`` field. This key is used to purge
builds from the Fastly CDN when Editions change.
If `None` then no header will be set.
If the object already has a ``x-amz-meta-surrogate-key`` header then
it will be replaced.
cache_control : `str`, optional
This sets (and overrides) the ``Cache-Control`` header on the copied
files. The ``Cache-Control`` header specifically dictates how content
is cached by the browser (if ``surrogate_control`` is also set).
surrogate_control : `str`, optional
This sets (and overrides) the ``x-amz-meta-surrogate-control`` header
on the copied files. The ``Surrogate-Control``
or ``x-amz-meta-surrogate-control`` header is used in priority by
Fastly to givern it's caching. This caching policy is *not* passed
to the browser.
create_directory_redirect_object : `bool`, optional
Create a directory redirect object for the root directory. The
directory redirect object is an empty S3 object named after the
directory (without a trailing slash) that contains a
``x-amz-meta-dir-redirect=true`` HTTP header. LSST the Docs' Fastly
VCL is configured to redirect requests for a directory path to the
directory's ``index.html`` (known as *courtesy redirects*).
Raises
------
ltdconveyor.s3.S3Error
Thrown by any unexpected faults from the S3 API.
RuntimeError
Thrown when the source and destination directories are the same.
"""
if not src_path.endswith('/'):
src_path += '/'
if not dest_path.endswith('/'):
dest_path += '/'
# Ensure the src_path and dest_path don't contain each other
common_prefix = os.path.commonprefix([src_path, dest_path])
if common_prefix == src_path:
|
python
|
{
"resource": ""
}
|
q278827
|
open_bucket
|
test
|
def open_bucket(bucket_name,
aws_access_key_id=None, aws_secret_access_key=None,
aws_profile=None):
"""Open an S3 Bucket resource.
Parameters
----------
bucket_name : `str`
Name of the S3 bucket.
aws_access_key_id : `str`, optional
The access key for your AWS account. Also set
``aws_secret_access_key``.
aws_secret_access_key : `str`, optional
The secret key for your AWS account.
aws_profile : `str`, optional
Name of AWS profile in :file:`~/.aws/credentials`. Use this instead
of ``aws_access_key_id`` and ``aws_secret_access_key`` for file-based
credentials.
Returns
|
python
|
{
"resource": ""
}
|
q278828
|
upload_dir
|
test
|
def upload_dir(bucket_name, path_prefix, source_dir,
upload_dir_redirect_objects=True,
surrogate_key=None,
surrogate_control=None, cache_control=None,
acl=None,
aws_access_key_id=None, aws_secret_access_key=None,
aws_profile=None):
"""Upload a directory of files to S3.
This function places the contents of the Sphinx HTML build directory
into the ``/path_prefix/`` directory of an *existing* S3 bucket.
Existing files on S3 are overwritten; files that no longer exist in the
``source_dir`` are deleted from S3.
Parameters
----------
bucket_name : `str`
Name of the S3 bucket where documentation is uploaded.
path_prefix : `str`
The root directory in the bucket where documentation is stored.
source_dir : `str`
Path of the Sphinx HTML build directory on the local file system.
The contents of this directory are uploaded into the ``/path_prefix/``
directory of the S3 bucket.
upload_dir_redirect_objects : `bool`, optional
A feature flag to enable uploading objects to S3 for every directory.
These objects contain ``x-amz-meta-dir-redirect=true`` HTTP headers
that tell Fastly to issue a 301 redirect from the directory object to
the `index.html`` in that directory.
surrogate_key : `str`, optional
The surrogate key to insert in the header of all objects
in the ``x-amz-meta-surrogate-key`` field. This key is used to purge
builds from the Fastly CDN when Editions change.
If `None` then no header will be set.
cache_control : `str`, optional
This sets the ``Cache-Control`` header on the uploaded
files. The ``Cache-Control`` header specifically dictates how content
is cached by the browser (if ``surrogate_control`` is also set).
surrogate_control : `str`, optional
This sets the ``x-amz-meta-surrogate-control`` header
on the uploaded files. The ``Surrogate-Control``
or ``x-amz-meta-surrogate-control`` header is used in priority by
Fastly to givern it's caching. This caching policy is *not* passed
to the browser.
acl : `str`, optional
The pre-canned AWS access control list to apply to this upload.
Can be ``'public-read'``, which allow files to be downloaded
over HTTP by the public. See
https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl
for an overview of S3's pre-canned ACL lists. Note that ACL settings
are not validated locally. Default is `None`, meaning that no ACL
is applied to an individual object. In this case, use ACLs applied
to the bucket itself.
aws_access_key_id : `str`, optional
The access key for your AWS account. Also set
``aws_secret_access_key``.
aws_secret_access_key : `str`, optional
The secret key for your AWS account.
aws_profile : `str`, optional
Name of AWS profile in :file:`~/.aws/credentials`. Use this instead
of ``aws_access_key_id`` and ``aws_secret_access_key`` for file-based
credentials.
Notes
-----
``cache_control`` and ``surrogate_control`` can be used together.
``surrogate_control`` takes priority in setting Fastly's POP caching,
while ``cache_control`` then sets the browser's caching. For example:
- ``cache_control='no-cache'``
- ``surrogate_control='max-age=31536000'``
together will ensure that the browser always does an ETAG server query,
but that Fastly will cache the content for one year (or until purged).
This configuration is good for files that are frequently changed in place.
For immutable uploads simply using ``cache_control`` is more efficient
since it allows the browser to also locally cache content.
.. seelso:
|
python
|
{
"resource": ""
}
|
q278829
|
upload_file
|
test
|
def upload_file(local_path, bucket_path, bucket,
metadata=None, acl=None, cache_control=None):
"""Upload a file to the S3 bucket.
This function uses the mimetypes module to guess and then set the
Content-Type and Encoding-Type headers.
Parameters
----------
local_path : `str`
Full path to a file on the local file system.
bucket_path : `str`
Destination path (also known as the key name) of the file in the
S3 bucket.
bucket : boto3 Bucket instance
S3 bucket.
metadata : `dict`, optional
Header metadata values. These keys will appear in headers as
``x-amz-meta-*``.
acl : `str`, optional
A pre-canned access control list. See
|
python
|
{
"resource": ""
}
|
q278830
|
upload_object
|
test
|
def upload_object(bucket_path, bucket, content='',
metadata=None, acl=None, cache_control=None,
content_type=None):
"""Upload an arbitrary object to an S3 bucket.
Parameters
----------
bucket_path : `str`
Destination path (also known as the key name) of the file in the
S3 bucket.
content : `str` or `bytes`, optional
Object content.
bucket : boto3 Bucket instance
S3 bucket.
metadata : `dict`, optional
Header metadata values. These keys will appear in headers as
``x-amz-meta-*``.
acl : `str`, optional
A pre-canned access control list. See
https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl
Default is `None`, meaning that no ACL is applied to the object.
cache_control : `str`, optional
The cache-control header value. For example, ``'max-age=31536000'``.
content_type : `str`, optional
The object's content
|
python
|
{
"resource": ""
}
|
q278831
|
ObjectManager.list_filenames_in_directory
|
test
|
def list_filenames_in_directory(self, dirname):
"""List all file-type object names that exist at the root of this
bucket directory.
Parameters
----------
dirname : `str`
Directory name in the bucket relative to ``bucket_root/``.
Returns
-------
filenames : `list`
List of file names (`str`), relative to ``bucket_root/``, that
exist at the root of ``dirname``.
"""
prefix = self._create_prefix(dirname)
filenames = []
for obj in self._bucket.objects.filter(Prefix=prefix):
|
python
|
{
"resource": ""
}
|
q278832
|
ObjectManager.list_dirnames_in_directory
|
test
|
def list_dirnames_in_directory(self, dirname):
"""List all names of directories that exist at the root of this
bucket directory.
Note that *directories* don't exist in S3; rather directories are
inferred from path names.
Parameters
----------
dirname : `str`
Directory name in the bucket relative to ``bucket_root``.
Returns
-------
dirnames : `list`
List of directory names (`str`), relative to ``bucket_root/``,
that exist at the root of ``dirname``.
"""
prefix = self._create_prefix(dirname)
dirnames = []
for obj in self._bucket.objects.filter(Prefix=prefix):
# get directory name of every object under this path prefix
dirname = os.path.dirname(obj.key)
# dirname is empty if the object happens to be the directory
# redirect object object for the prefix directory (directory
# redirect objects are named after directories and have metadata
# that tells Fastly to redirect the browser to the index.html
# contained in the directory).
if dirname == '':
dirname = obj.key + '/'
|
python
|
{
"resource": ""
}
|
q278833
|
ObjectManager._create_prefix
|
test
|
def _create_prefix(self, dirname):
"""Make an absolute directory path in the bucker for dirname,
which is is assumed relative to the self._bucket_root prefix directory.
"""
if dirname in ('.', '/'):
dirname = ''
# Strips trailing slash from dir prefix
|
python
|
{
"resource": ""
}
|
q278834
|
ObjectManager.delete_file
|
test
|
def delete_file(self, filename):
"""Delete a file from the bucket.
Parameters
----------
filename : `str`
Name of the file, relative to ``bucket_root/``.
"""
|
python
|
{
"resource": ""
}
|
q278835
|
ensure_login
|
test
|
def ensure_login(ctx):
"""Ensure a token is in the Click context object or authenticate and obtain
the token from LTD Keeper.
Parameters
----------
ctx : `click.Context`
The Click context. ``ctx.obj`` must be a `dict` that contains keys:
``keeper_hostname``, ``username``, ``password``, ``token``. This
context object is prepared by the main Click group,
`ltdconveyor.cli.main.main`.
"""
logger = logging.getLogger(__name__)
logger.info('utils name %r', __name__)
if ctx.obj['token'] is None:
if ctx.obj['username'] is None or ctx.obj['password'] is None:
raise click.UsageError(
'Use `ltd -u <username> -p <password> COMMAND` to '
'authenticate to the LTD Keeper server.')
sys.exit(1)
logger.debug(
'About to get token for user %s at %s',
ctx.obj['username'],
|
python
|
{
"resource": ""
}
|
q278836
|
Five.loud
|
test
|
def loud(self, lang='englist'):
"""Speak loudly! FIVE! Use upper case!"""
lang_method = getattr(self, lang, None)
if lang_method:
|
python
|
{
"resource": ""
}
|
q278837
|
delete_dir
|
test
|
def delete_dir(bucket_name, root_path,
aws_access_key_id=None, aws_secret_access_key=None,
aws_profile=None):
"""Delete all objects in the S3 bucket named ``bucket_name`` that are
found in the ``root_path`` directory.
Parameters
----------
bucket_name : `str`
Name of an S3 bucket.
root_path : `str`
Directory in the S3 bucket that will be deleted.
aws_access_key_id : `str`
The access key for your AWS account. Also set
``aws_secret_access_key``.
aws_secret_access_key : `str`
The secret key for your AWS account.
aws_profile : `str`, optional
Name of AWS profile in :file:`~/.aws/credentials`. Use this instead
of ``aws_access_key_id`` and ``aws_secret_access_key`` for file-based
credentials.
Raises
------
ltdconveyor.s3.S3Error
Thrown by any unexpected faults from the S3 API.
"""
logger = logging.getLogger(__name__)
session = boto3.session.Session(
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key)
s3 = session.resource('s3')
client = s3.meta.client
# Normalize directory path for searching patch prefixes of objects
if not root_path.endswith('/'):
root_path.rstrip('/')
paginator = client.get_paginator('list_objects_v2')
pages = paginator.paginate(Bucket=bucket_name, Prefix=root_path)
keys = dict(Objects=[])
for item in pages.search('Contents'):
try:
keys['Objects'].append({'Key': item['Key']})
except TypeError: # item is None; nothing to delete
|
python
|
{
"resource": ""
}
|
q278838
|
home_url
|
test
|
def home_url():
"""Get project's home URL based on settings.PROJECT_HOME_NAMESPACE.
Returns None if PROJECT_HOME_NAMESPACE is not defined in settings.
"""
try:
return reverse(home_namespace)
except Exception:
url = home_namespace
try:
|
python
|
{
"resource": ""
}
|
q278839
|
silence_without_namespace
|
test
|
def silence_without_namespace(f):
"""Decorator to silence template tags if 'PROJECT_HOME_NAMESPACE' is
not defined in settings.
Usage Example:
from django import template
register = template.Library()
@register.simple_tag
@silence_without_namespace
def a_template_tag(*args):
|
python
|
{
"resource": ""
}
|
q278840
|
project_home_breadcrumb_bs3
|
test
|
def project_home_breadcrumb_bs3(label):
"""A template tag to return the project's home URL and label
formatted as a Bootstrap 3 breadcrumb.
PROJECT_HOME_NAMESPACE must be defined in settings, for example:
PROJECT_HOME_NAMESPACE = 'project_name:index_view'
Usage Example:
{% load project_home_tags %}
<ol class="breadcrumb">
{% project_home_breadcrumb_bs3 %} {# <--- #}
<li><a href="{% url 'app:namespace' %}">List of Objects</a></li>
<li class="active">Object Detail</li>
</ol>
This gets converted into:
<ol class="breadcrumb">
<li><a href="{% url 'project_name:index_view' %}">Home</a></li> {# <--- #}
<li><a href="{% url 'app:namespace' %}">List of Objects</a></li>
|
python
|
{
"resource": ""
}
|
q278841
|
project_home_breadcrumb_bs4
|
test
|
def project_home_breadcrumb_bs4(label):
"""A template tag to return the project's home URL and label
formatted as a Bootstrap 4 breadcrumb.
PROJECT_HOME_NAMESPACE must be defined in settings, for example:
PROJECT_HOME_NAMESPACE = 'project_name:index_view'
Usage Example:
{% load project_home_tags %}
<ol class="breadcrumb">
{% project_home_breadcrumb_bs4 %} {# <--- #}
<li class="breadcrumb-item" aria-label="breadcrumb"><a href="{% url 'app:namespace' %}">List of Objects</a></li>
<li class=" breadcrumb-item active" aria-label="breadcrumb" aria-current="page">Object Detail</li>
</ol>
This gets converted into:
<ol class="breadcrumb">
<li class="breadcrumb-item" aria-label="breadcrumb"><a href="{% url 'project_name:index_view' %}">Home</a></li> {# <--- #}
<li class="breadcrumb-item" aria-label="breadcrumb"><a href="{% url 'app:namespace' %}">List of Objects</a></li>
<li class=" breadcrumb-item active" aria-label="breadcrumb" aria-current="page">Object Detail</li>
|
python
|
{
"resource": ""
}
|
q278842
|
get_interaction_energy
|
test
|
def get_interaction_energy(ampal_objs, ff=None, assign_ff=True):
"""Calculates the interaction energy between AMPAL objects.
Parameters
----------
ampal_objs: [AMPAL Object]
A list of any AMPAL objects with `get_atoms` methods.
ff: BuffForceField, optional
The force field to be used for scoring. If no force field is
provided then the most current version of the BUDE force field
will be used.
assign_ff: bool, optional
If true, then force field assignment on the AMPAL object will be
will be updated.
Returns
-------
BUFF_score: BUFFScore
|
python
|
{
"resource": ""
}
|
q278843
|
get_internal_energy
|
test
|
def get_internal_energy(ampal_obj, ff=None, assign_ff=True):
"""Calculates the internal energy of the AMPAL object.
Parameters
----------
ampal_obj: AMPAL Object
Any AMPAL object with a `get_atoms` method.
ff: BuffForceField, optional
The force field to be used for scoring. If no force field is
provided then the most current version of the BUDE force field
will be used.
assign_ff: bool, optional
If true, then force field assignment on the AMPAL object will be
will be updated.
Returns
-------
BUFF_score: BUFFScore
|
python
|
{
"resource": ""
}
|
q278844
|
_BaseSampler.hotspots
|
test
|
def hotspots(self):
'''
Get lines sampled accross all threads, in order
from most to least sampled.
'''
rooted_leaf_samples, _ = self.live_data_copy()
line_samples = {}
for _, counts in rooted_leaf_samples.items():
for key, count in counts.items():
|
python
|
{
"resource": ""
}
|
q278845
|
get_keeper_token
|
test
|
def get_keeper_token(host, username, password):
"""Get a temporary auth token from LTD Keeper.
Parameters
----------
host : `str`
Hostname of the LTD Keeper API (e.g., ``'https://keeper.lsst.codes'``).
username : `str`
Username.
password : `str`
Password.
Returns
-------
token : `str`
LTD Keeper API token.
Raises
------
KeeperError
Raised if the LTD Keeper API cannot return a token.
"""
token_endpoint = urljoin(host, '/token')
|
python
|
{
"resource": ""
}
|
q278846
|
upload
|
test
|
def upload(ctx, product, git_ref, dirname, aws_id, aws_secret, ci_env,
on_travis_push, on_travis_pr, on_travis_api, on_travis_cron,
skip_upload):
"""Upload a new site build to LSST the Docs.
"""
logger = logging.getLogger(__name__)
if skip_upload:
click.echo('Skipping ltd upload.')
sys.exit(0)
logger.debug('CI environment: %s', ci_env)
logger.debug('Travis events settings. '
'On Push: %r, PR: %r, API: %r, Cron: %r',
on_travis_push, on_travis_pr, on_travis_api, on_travis_cron)
# Abort upload on Travis CI under certain events
if ci_env == 'travis' and \
_should_skip_travis_event(
on_travis_push, on_travis_pr, on_travis_api, on_travis_cron):
sys.exit(0)
# Authenticate to LTD Keeper host
ensure_login(ctx)
# Detect git refs
git_refs = _get_git_refs(ci_env, git_ref)
build_resource = register_build(
ctx.obj['keeper_hostname'],
ctx.obj['token'],
product,
git_refs
)
logger.debug('Created build resource %r', build_resource)
# Do the upload.
# This cache_control is appropriate for builds since they're immutable.
# The LTD Keeper server changes the cache settings when copying the build
# over
|
python
|
{
"resource": ""
}
|
q278847
|
_should_skip_travis_event
|
test
|
def _should_skip_travis_event(on_travis_push, on_travis_pr, on_travis_api,
on_travis_cron):
"""Detect if the upload should be skipped based on the
``TRAVIS_EVENT_TYPE`` environment variable.
Returns
-------
should_skip : `bool`
True if the upload should be skipped based on the combination of
``TRAVIS_EVENT_TYPE`` and user settings.
"""
travis_event = os.getenv('TRAVIS_EVENT_TYPE')
if travis_event is None:
raise click.UsageError(
'Using --travis but the TRAVIS_EVENT_TYPE '
'environment variable is not detected.')
if travis_event == 'push' and on_travis_push is False:
click.echo('Skipping upload on Travis push event.')
return True
|
python
|
{
"resource": ""
}
|
q278848
|
purge_key
|
test
|
def purge_key(surrogate_key, service_id, api_key):
"""Instant purge URLs with a given surrogate key from the Fastly caches.
Parameters
----------
surrogate_key : `str`
Surrogate key header (``x-amz-meta-surrogate-key``) value of objects
to purge from the Fastly cache.
service_id : `str`
Fastly service ID.
api_key : `str`
Fastly API key.
Raises
------
FastlyError
Error with the Fastly API usage.
Notes
-----
This
|
python
|
{
"resource": ""
}
|
q278849
|
register_build
|
test
|
def register_build(host, keeper_token, product, git_refs):
"""Register a new build for a product on LSST the Docs.
Wraps ``POST /products/{product}/builds/``.
Parameters
----------
host : `str`
Hostname of LTD Keeper API server.
keeper_token : `str`
Auth token (`ltdconveyor.keeper.get_keeper_token`).
product : `str`
Name of the product in the LTD Keeper service.
git_refs : `list` of `str`
List of Git refs that correspond to the version of the build. Git refs
can be tags or branches.
Returns
-------
build_info : `dict`
LTD Keeper build resource.
Raises
------
ltdconveyor.keeper.KeeperError
|
python
|
{
"resource": ""
}
|
q278850
|
confirm_build
|
test
|
def confirm_build(build_url, keeper_token):
"""Confirm a build upload is complete.
Wraps ``PATCH /builds/{build}``.
Parameters
----------
build_url : `str`
URL of the build resource. Given a build resource, this URL is
available from the ``self_url`` field.
keeper_token : `str`
Auth token (`ltdconveyor.keeper.get_keeper_token`).
Raises
------
ltdconveyor.keeper.KeeperError
|
python
|
{
"resource": ""
}
|
q278851
|
deep_update
|
test
|
def deep_update(d, u):
"""Deeply updates a dictionary. List values are concatenated.
Args:
d (dict): First dictionary which will be updated
u (dict): Second dictionary use to extend the first one
Returns:
dict: The merge dictionary
"""
for k, v in u.items():
if isinstance(v, Mapping):
d[k] = deep_update(d.get(k,
|
python
|
{
"resource": ""
}
|
q278852
|
main
|
test
|
def main(ctx, log_level, keeper_hostname, username, password):
"""ltd is a command-line client for LSST the Docs.
Use ltd to upload new site builds, and to work with the LTD Keeper API.
"""
ch = logging.StreamHandler()
formatter = logging.Formatter(
'%(asctime)s %(levelname)8s %(name)s | %(message)s')
ch.setFormatter(formatter)
logger = logging.getLogger('ltdconveyor')
logger.addHandler(ch)
logger.setLevel(log_level.upper())
# Subcommands should use the
|
python
|
{
"resource": ""
}
|
q278853
|
part_edit_cmd
|
test
|
def part_edit_cmd():
'Edit a part from an OOXML Package without unzipping it'
parser = argparse.ArgumentParser(description=inspect.getdoc(part_edit_cmd))
parser.add_argument(
'path',
help='Path to part (including path to zip file, i.e. ./file.zipx/part)',
)
parser.add_argument(
'--reformat-xml',
action='store_true',
|
python
|
{
"resource": ""
}
|
q278854
|
pack_dir_cmd
|
test
|
def pack_dir_cmd():
'List the contents of a subdirectory of a zipfile'
parser = argparse.ArgumentParser(description=inspect.getdoc(part_edit_cmd))
parser.add_argument(
'path',
help=(
'Path to list (including path to zip file, '
|
python
|
{
"resource": ""
}
|
q278855
|
split_all
|
test
|
def split_all(path):
"""
recursively call os.path.split until we have all of the components
of a pathname suitable for passing back to os.path.join.
|
python
|
{
"resource": ""
}
|
q278856
|
find_file
|
test
|
def find_file(path):
"""
Given a path to a part in a zip file, return a path to the file and
the path to the part.
Assuming /foo.zipx exists as a file,
>>> find_file('/foo.zipx/dir/part') # doctest: +SKIP
('/foo.zipx', '/dir/part')
>>> find_file('/foo.zipx') # doctest: +SKIP
('/foo.zipx', '')
"""
path_components = split_all(path)
def get_assemblies():
"""
Enumerate the various combinations of file paths and part paths
"""
for n in range(len(path_components), 0,
|
python
|
{
"resource": ""
}
|
q278857
|
EditableFile.get_editor
|
test
|
def get_editor(filepath):
"""
Give preference to an XML_EDITOR or EDITOR defined in the
environment. Otherwise use notepad on Windows and
|
python
|
{
"resource": ""
}
|
q278858
|
FileHeaderChecker.process_module
|
test
|
def process_module(self, node):
"""Process the astroid node stream."""
if self.config.file_header:
if sys.version_info[0] < 3:
pattern = re.compile(
'\A' + self.config.file_header, re.LOCALE | re.MULTILINE)
else:
# The use of re.LOCALE is discouraged in python 3
pattern = re.compile(
'\A' + self.config.file_header, re.MULTILINE)
content = None
|
python
|
{
"resource": ""
}
|
q278859
|
ChartsGenerator.gen
|
test
|
def gen(self, slug, name, dataobj, xfield, yfield, time_unit=None,
chart_type="line", width=800,
height=300, color=Color(), size=Size(),
scale=Scale(zero=False), shape=Shape(), filepath=None,
html_before="", html_after=""):
"""
Generates an html chart from either a pandas dataframe, a dictionnary,
a list or an Altair Data object and optionally write it to a file
|
python
|
{
"resource": ""
}
|
q278860
|
ChartsGenerator.html
|
test
|
def html(self, slug, name, chart_obj, filepath=None,
html_before="", html_after=""):
"""
Generate html from an Altair chart object and optionally write it to a file
"""
try:
html = ""
if name:
html = "<h3>" + name + "</h3>"
json_data = chart_obj.to_json()
json_data = self._patch_json(json_data)
html = html_before + html +\
self._json_to_html(slug, json_data) + html_after
|
python
|
{
"resource": ""
}
|
q278861
|
ChartsGenerator.serialize
|
test
|
def serialize(self, dataobj, xfield, yfield, time_unit=None,
chart_type="line", width=800,
height=300, color=None, size=None,
scale=Scale(zero=False), shape=None, options={}):
"""
Serialize to an Altair chart object from either a pandas dataframe, a dictionnary,
a list or an Altair Data object
"""
dataset = dataobj
if self._is_dict(dataobj) is True:
dataset = self._dict_to_df(dataobj, xfield, yfield)
elif isinstance(dataobj, list):
dataset = Data(values=dataobj)
xencode, yencode = self._encode_fields(
xfield, yfield, time_unit)
|
python
|
{
"resource": ""
}
|
q278862
|
ChartsGenerator._patch_json
|
test
|
def _patch_json(self, json_data):
"""
Patch the Altair generated json to the newest Vega Lite spec
"""
json_data = json.loads(json_data)
# add schema
json_data["$schema"] = "https://vega.github.io/schema/vega-lite/2.0.0-beta.15.json"
# add top level width and height
|
python
|
{
"resource": ""
}
|
q278863
|
ChartsGenerator._json_to_html
|
test
|
def _json_to_html(self, slug, json_data):
"""
Generates html from Vega lite data
"""
html = '<div id="chart-' + slug + '"></div>'
html += '<script>'
html += 'var s' + slug + ' = ' + json_data + ';'
html += 'vega.embed("#chart-' +
|
python
|
{
"resource": ""
}
|
q278864
|
ChartsGenerator._dict_to_df
|
test
|
def _dict_to_df(self, dictobj, xfield, yfield):
"""
Converts a dictionnary to a pandas dataframe
"""
x = []
y = []
for datapoint in dictobj:
|
python
|
{
"resource": ""
}
|
q278865
|
ChartsGenerator._write_file
|
test
|
def _write_file(self, slug, folderpath, html):
"""
Writes a chart's html to a file
"""
# check directories
if not os.path.isdir(folderpath):
try:
os.makedirs(folderpath)
|
python
|
{
"resource": ""
}
|
q278866
|
ChartsGenerator._chart_class
|
test
|
def _chart_class(self, df, chart_type, **kwargs):
"""
Get the right chart class from a string
"""
if chart_type == "bar":
return Chart(df).mark_bar(**kwargs)
elif chart_type == "circle":
return Chart(df).mark_circle(**kwargs)
elif chart_type == "line":
return Chart(df).mark_line(**kwargs)
elif chart_type == "point":
return Chart(df).mark_point(**kwargs)
elif chart_type == "area":
return Chart(df).mark_area(**kwargs)
elif chart_type == "tick":
|
python
|
{
"resource": ""
}
|
q278867
|
ChartsGenerator._encode_fields
|
test
|
def _encode_fields(self, xfield, yfield, time_unit=None,
scale=Scale(zero=False)):
"""
Encode the fields in Altair format
"""
if scale is None:
scale = Scale()
xfieldtype = xfield[1]
yfieldtype = yfield[1]
x_options = None
if len(xfield) > 2:
x_options = xfield[2]
y_options = None
if len(yfield) > 2:
y_options = yfield[2]
if time_unit is not None:
if x_options is None:
xencode = X(xfieldtype, timeUnit=time_unit)
else:
xencode = X(
xfieldtype,
axis=Axis(**x_options),
timeUnit=time_unit,
scale=scale
|
python
|
{
"resource": ""
}
|
q278868
|
ghuser_role
|
test
|
def ghuser_role(name, rawtext, text, lineno, inliner, options={}, content=[]):
"""Link to a GitHub user.
Returns 2 part tuple containing list of nodes to insert into the
document and a list of system messages. Both are allowed to be
empty.
:param name: The role name used in the document.
:param rawtext: The entire markup snippet, with role.
:param text: The text marked with the role.
:param lineno: The line number where rawtext appears in the input.
:param inliner: The
|
python
|
{
"resource": ""
}
|
q278869
|
_infer_tarball_url
|
test
|
def _infer_tarball_url():
"""Returns the tarball URL inferred from an app.json, if present."""
try:
with click.open_file('app.json', 'r') as f:
contents = f.read()
app_json = json.loads(contents)
except IOError:
return None
repository
|
python
|
{
"resource": ""
}
|
q278870
|
up
|
test
|
def up(tarball_url, auth_token, env, app_name):
"""Brings up a Heroku app."""
tarball_url = tarball_url or _infer_tarball_url()
if not tarball_url:
click.echo('No tarball URL found.')
sys.exit(1)
if env:
# Split ["KEY=value", ...] into {"KEY": "value", ...}
env = {
arg.split('=')[0]: arg.split('=')[1]
|
python
|
{
"resource": ""
}
|
q278871
|
down
|
test
|
def down(auth_token, force, app_name):
"""Brings down a Heroku app."""
if not app_name:
click.echo(
'WARNING: Inferring the app name when deleting is deprecated. '
'Starting with happy 2.0, the app_name parameter will be required.'
)
app_name = app_name or _read_app_name()
if not app_name:
click.echo('No app name given.')
sys.exit(1)
if not force:
click.confirm(
'Are you sure you want to delete %s?' % app_name,
|
python
|
{
"resource": ""
}
|
q278872
|
iter_attribute
|
test
|
def iter_attribute(iterable_name) -> Union[Iterable, Callable]:
"""Decorator implementing Iterator interface with nicer manner.
Example
-------
@iter_attribute('my_attr'):
class DecoratedClass:
...
Warning:
========
When using PyCharm or MYPY you'll probably see issues with decorated class not being recognized as Iterator.
That's an issue which I could not overcome yet, it's probably due to the fact that interpretation of object
is being done statically rather than dynamically. MYPY checks for definition of methods in class code which
changes at runtime. Since __iter__ and __next__ are added dynamically MYPY cannot find those
defined in objects before object of a class is created. Possible workarounds for this issue are:
1. Define ``dummy`` __iter__ class like:
@iter_attribute('attr')
class Test:
def __init__(self) -> None:
self.attr = [1, 2, 3]
def __iter__(self):
pass
2. After creating object use cast or assert function denoting that particular instance inherits
from collections.Iterator:
assert isinstance(my_object, collections.Iterator)
:param iterable_name: string representing attribute name which has to be iterated
:return: DecoratedClass with implemented '__iter__' and '__next__' methods.
"""
|
python
|
{
"resource": ""
}
|
q278873
|
binary
|
test
|
def binary(length):
"""
returns a a random string that represent a binary representation
:param length: number of bits
"""
num = randint(1, 999999)
|
python
|
{
"resource": ""
}
|
q278874
|
ipaddress
|
test
|
def ipaddress(not_valid=None):
"""
returns a string representing a random ip address
:param not_valid: if passed must be a list of integers representing valid class A netoworks that must be ignored
"""
not_valid_class_A = not_valid or []
class_a = [r for r in range(1, 256) if r not in not_valid_class_A]
|
python
|
{
"resource": ""
}
|
q278875
|
date
|
test
|
def date(start, end):
"""Get a random date between two dates"""
stime = date_to_timestamp(start)
|
python
|
{
"resource": ""
}
|
q278876
|
Heroku._get_session
|
test
|
def _get_session(self):
"""Returns a prepared ``Session`` instance."""
session = Session()
session.headers = {
'Content-type': 'application/json',
'Accept': 'application/vnd.heroku+json; version=3',
}
if self._auth_token:
|
python
|
{
"resource": ""
}
|
q278877
|
Heroku.api_request
|
test
|
def api_request(self, method, endpoint, data=None, *args, **kwargs):
"""Sends an API request to Heroku.
:param method: HTTP method.
:param endpoint: API endpoint, e.g. ``/apps``.
:param data: A dict sent as JSON in the body of the request.
:returns: A dict represntation of the JSON response.
"""
session = self._get_session()
api_root = 'https://api.heroku.com'
url = api_root + endpoint
if data:
data = json.dumps(data)
|
python
|
{
"resource": ""
}
|
q278878
|
Heroku.create_build
|
test
|
def create_build(self, tarball_url, env=None, app_name=None):
"""Creates an app-setups build. Returns response data as a dict.
:param tarball_url: URL of a tarball containing an ``app.json``.
:param env: Dict containing environment variable overrides.
:param app_name: Name of the Heroku app to create.
:returns: Response data as a ``dict``.
"""
data = {
'source_blob': {
|
python
|
{
"resource": ""
}
|
q278879
|
Heroku.check_build_status
|
test
|
def check_build_status(self, build_id):
"""Checks the status of an app-setups build.
:param build_id: ID of the build to check.
:returns: ``True`` if succeeded, ``False`` if pending.
"""
data = self.api_request('GET', '/app-setups/%s' % build_id)
status = data.get('status')
|
python
|
{
"resource": ""
}
|
q278880
|
sequence
|
test
|
def sequence(prefix, cache=None):
"""
generator that returns an unique string
:param prefix: prefix of string
:param cache: cache used to store the last used number
>>> next(sequence('abc'))
'abc-0'
>>> next(sequence('abc'))
'abc-1'
"""
if cache is None:
|
python
|
{
"resource": ""
}
|
q278881
|
memoize
|
test
|
def memoize(func):
"""Decorator that stores function results in a dictionary to be used on the
next time that the same arguments were informed."""
func._cache_dict = {}
@wraps(func)
|
python
|
{
"resource": ""
}
|
q278882
|
unique
|
test
|
def unique(func, num_args=0, max_attempts=100, cache=None):
"""
wraps a function so that produce unique results
:param func:
:param num_args:
>>> import random
>>> choices = [1,2]
>>> a = unique(random.choice, 1)
>>> a,b = a(choices), a(choices)
>>> a == b
False
"""
if cache is None:
cache = _cache_unique
@wraps(func)
def wrapper(*args):
key = "%s_%s" % (str(func.__name__), str(args[:num_args]))
attempt = 0
while attempt < max_attempts:
attempt
|
python
|
{
"resource": ""
}
|
q278883
|
BaseCommand.register_sub_commands
|
test
|
def register_sub_commands(self, parser):
"""
Add any sub commands to the argument parser.
:param parser: The argument parser object
"""
sub_commands = self.get_sub_commands()
if sub_commands:
sub_parsers = parser.add_subparsers(dest=self.sub_parser_dest_name)
for name, cls in sub_commands.items():
|
python
|
{
"resource": ""
}
|
q278884
|
BaseCommand.get_root_argparser
|
test
|
def get_root_argparser(self):
"""
Gets the root argument parser object.
"""
return
|
python
|
{
"resource": ""
}
|
q278885
|
BaseCommand.get_description
|
test
|
def get_description(self):
"""
Gets the description of the command. If its not supplied the first sentence of the doc string is used.
"""
if self.description:
return self.description
|
python
|
{
"resource": ""
}
|
q278886
|
BaseCommand.get_help
|
test
|
def get_help(self):
"""
Gets the help text for the command. If its not supplied the doc string is used.
"""
if self.help:
return self.help
|
python
|
{
"resource": ""
}
|
q278887
|
BaseCommand.run
|
test
|
def run(self, args=None):
"""
Runs the command passing in the parsed arguments.
:param args: The arguments to run the command with. If ``None`` the arguments
are gathered from the argument parser. This is automatically set when calling
sub commands and in most cases should not be
|
python
|
{
"resource": ""
}
|
q278888
|
Encoder.encode
|
test
|
def encode(self, *args, **kwargs):
"""Encode wrapper for a dataset with maximum value
Datasets can be one or two dimensional
Strings are ignored as ordinal encoding"""
if isinstance(args[0], str):
return self.encode([args[0]],**kwargs)
elif isinstance(args[0], int) or isinstance(args[0], float):
return self.encode([[args[0]]],**kwargs)
if len(args)>1:
dataset = args
else:
dataset = args[0]
typemap = list(map(type,dataset))
code = self.encoding[0]
if type('') in typemap:
data = ','.join(map(str,dataset))
elif type([]) in typemap or type(()) in typemap:
|
python
|
{
"resource": ""
}
|
q278889
|
GoldenCheetahClient.get_athletes
|
test
|
def get_athletes(self):
"""Get all available athletes
This method is cached to prevent unnecessary calls to GC.
"""
|
python
|
{
"resource": ""
}
|
q278890
|
GoldenCheetahClient.get_last_activities
|
test
|
def get_last_activities(self, n):
"""Get all activity data for the last activity
Keyword arguments:
"""
filenames = self.get_activity_list().iloc[-n:].filename.tolist()
|
python
|
{
"resource": ""
}
|
q278891
|
GoldenCheetahClient._request_activity_list
|
test
|
def _request_activity_list(self, athlete):
"""Actually do the request for activity list
This call is slow and therefore this method is memory cached.
Keyword arguments:
athlete -- Full name of athlete
"""
response = self._get_request(self._athlete_endpoint(athlete))
response_buffer = StringIO(response.text)
activity_list = pd.read_csv(
filepath_or_buffer=response_buffer,
parse_dates={'datetime': ['date', 'time']},
sep=',\s*',
engine='python'
)
activity_list.rename(columns=lambda x: x.lower(), inplace=True)
activity_list.rename(
|
python
|
{
"resource": ""
}
|
q278892
|
GoldenCheetahClient._request_activity_data
|
test
|
def _request_activity_data(self, athlete, filename):
"""Actually do the request for activity filename
This call is slow and therefore this method is memory cached.
Keyword arguments:
athlete -- Full name of athlete
filename -- filename of request activity (e.g. \'2015_04_29_09_03_16.json\')
"""
response = self._get_request(self._activity_endpoint(athlete, filename)).json()
activity = pd.DataFrame(response['RIDE']['SAMPLES'])
|
python
|
{
"resource": ""
}
|
q278893
|
GoldenCheetahClient._athlete_endpoint
|
test
|
def _athlete_endpoint(self, athlete):
"""Construct athlete endpoint from host and athlete name
Keyword arguments:
|
python
|
{
"resource": ""
}
|
q278894
|
GoldenCheetahClient._activity_endpoint
|
test
|
def _activity_endpoint(self, athlete, filename):
"""Construct activity endpoint from host, athlete name and filename
Keyword arguments:
athlete -- Full athlete name
|
python
|
{
"resource": ""
}
|
q278895
|
GoldenCheetahClient._get_request
|
test
|
def _get_request(self, endpoint):
"""Do actual GET request to GC REST API
Also validates responses.
Keyword arguments:
endpoint -- full endpoint for GET request
"""
try:
response = requests.get(endpoint)
except requests.exceptions.RequestException:
raise GoldenCheetahNotAvailable(endpoint)
if response.text.startswith('unknown athlete'):
match = re.match(
pattern='unknown athlete (?P<athlete>.+)',
string=response.text)
|
python
|
{
"resource": ""
}
|
q278896
|
Happy.create
|
test
|
def create(self, tarball_url, env=None, app_name=None):
"""Creates a Heroku app-setup build.
:param tarball_url: URL of a tarball containing an ``app.json``.
:param env: (optional) Dict containing environment variable overrides.
:param app_name: (optional) Name of the Heroku app to create.
:returns: A tuple with ``(build_id, app_name)``.
|
python
|
{
"resource": ""
}
|
q278897
|
url_with_auth
|
test
|
def url_with_auth(regex, view, kwargs=None, name=None, prefix=''):
"""
if view is string based, must be a full path
"""
from djapiauth.auth import api_auth
if isinstance(view, six.string_types): #
|
python
|
{
"resource": ""
}
|
q278898
|
title
|
test
|
def title(languages=None, genders=None):
"""
returns a random title
.. code-block:: python
>>> d.title()
u'Mrs.'
>>> d.title(['es'])
u'El Sr.'
>>> d.title(None, [GENDER_FEMALE])
u'Mrs.'
:param languages: list of allowed languages. ['en'] if None
:param genders: list of allowed genders. (GENDER_FEMALE, GENDER_MALE) if None
"""
languages =
|
python
|
{
"resource": ""
}
|
q278899
|
person
|
test
|
def person(languages=None, genders=None):
"""
returns a random tuple representing person information
.. code-block:: python
>>> d.person()
(u'Derren', u'Powell', 'm')
>>> d.person(genders=['f'])
(u'Marge', u'Rodriguez', u'Mrs.', 'f')
>>> d.person(['es'],['m'])
(u'Jacinto', u'Delgado', u'El Sr.', 'm')
:param language:
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.