_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q278800
stream_command_dicts
test
def stream_command_dicts(commands, parallel=False): """ Takes a list of dictionaries with keys corresponding to ``stream_command`` arguments, and runs all concurrently. :param commands: A list of dictionaries, the keys of which should line up with the arguments to ``stream_command`` function. :type commands: ``list`` of ``dict`` :param parallel: If true, commands will be run in parallel. :type parallel: ``bool`` """ if parallel is True: threads = [] for command in commands: target = lambda: stream_command(**command) thread = Thread(target=target) thread.start() threads.append(thread) for t in threads: t.join() else: for command in commands: stream_command(**command)
python
{ "resource": "" }
q278801
stream_commands
test
def stream_commands(commands, hash_colors=True, parallel=False): """ Runs multiple commands, optionally in parallel. Each command should be a dictionary with a 'command' key and optionally 'description' and 'write_stdin' keys. """ def _get_color(string): if hash_colors is True: return get_color_hash(string) else: return DEFAULT_COLOR fixed_commands = [] for command in commands: cmd_text = command['command'] description = command.get('description') color = _get_color(description or '') write_stdin = command.get('write_stdin') description = color(description) if color is not None else description formatter = _format_with_description(description) fixed_commands.append({ 'command': cmd_text, 'formatter': formatter, 'write_stdin': write_stdin, 'ignore_empty': True }) stream_command_dicts(fixed_commands, parallel=parallel)
python
{ "resource": "" }
q278802
networkdays
test
def networkdays(from_date, to_date, locale='en-US'): """ Return the net work days according to RH's calendar. """ holidays = locales[locale] return workdays.networkdays(from_date, to_date, holidays)
python
{ "resource": "" }
q278803
_get_path
test
def _get_path(cmd): """Queries bash to find the path to a commmand on the system.""" if cmd in _PATHS: return _PATHS[cmd] out = subprocess.check_output('which {}'.format(cmd), shell=True) _PATHS[cmd] = out.decode("utf-8").strip() return _PATHS[cmd]
python
{ "resource": "" }
q278804
_build_ssh_command
test
def _build_ssh_command(hostname, username, idfile, ssh_command, tunnel): """Uses hostname and other info to construct an SSH command.""" command = [_get_path('ssh'), '-o', 'StrictHostKeyChecking=no', '-o', 'ConnectTimeout=5'] if idfile is not None: command.extend(['-i', idfile]) if tunnel is not None: # If there's a tunnel, run the ssh command on the tunneled host. command.extend(['-A', '-t', tunnel, 'ssh', '-A', '-t']) if username is not None: command.append('{}@{}'.format(username, hostname)) else: command.append(hostname) if ssh_command is not None: command.append(repr(ssh_command)) return(' '.join(command))
python
{ "resource": "" }
q278805
_build_scp_command
test
def _build_scp_command(hostname, username, idfile, is_get, local_path, remote_path): """ Uses hostname and other info to construct an SCP command. :param hostname: The hostname of the remote machine. :type hostname: ``str`` :param username: The username to use on the remote machine. :type username: ``str`` :param idfile: A path to the identity file to use. :type idfile: ``str`` :param is_get: If true, we are getting a file rather than putting a file. :type is_get: ``bool`` :param local_path: The path on the local file system. :type local_path: ``str`` :param remote_path: The path on the remote file system. :type remote_path: ``str`` """ if hostname.strip() == '' or hostname is None: raise ValueError('Empty hostname') command = [_get_path('scp'), '-o', 'StrictHostKeyChecking=no', '-o', 'ConnectTimeout=5', '-o', 'UserKnownHostsFile={}'.format(_KNOWN_HOSTS_FILE)] if idfile is not None: command.extend(['-i', idfile]) if username is not None: hostname = '%s@%s' % (username, hostname) remote_path = '{}:{}'.format(hostname, remote_path) if is_get: command.extend([remote_path, local_path]) else: command.extend([local_path, remote_path]) return ' '.join(command)
python
{ "resource": "" }
q278806
_copy_to
test
def _copy_to(entries, remote_path, local_path, profile): """ Performs an SCP command where the remote_path is the target and the local_path is the source. :param entries: A list of entries. :type entries: ``list`` of :py:class:`HostEntry` :param remote_path: The target path on the remote machine(s). :type remote_path: ``str`` :param local_path: The source path on the local machine. :type local_path: ``str`` :param profile: The profile, holding username/idfile info, etc. :type profile: :py:class:`Profile` """ commands = [] for entry in entries: hname = entry.hostname or entry.public_ip cmd = _build_scp_command(hname, profile.username, profile.identity_file, is_get=False, local_path=local_path, remote_path=remote_path) print('Command:', cmd) commands.append({ 'command': cmd, 'description': entry.display() }) stream_commands(commands) print(green('Finished copying'))
python
{ "resource": "" }
q278807
_copy_from
test
def _copy_from(entries, remote_path, local_path, profile): """ Performs an SCP command where the remote_path is the source and the local_path is a format string, formatted individually for each host being copied from so as to create one or more distinct paths on the local system. :param entries: A list of entries. :type entries: ``list`` of :py:class:`HostEntry` :param remote_path: The source path on the remote machine(s). :type remote_path: ``str`` :param local_path: A format string for the path on the local machine. :type local_path: ``str`` :param profile: The profile, holding username/idfile info, etc. :type profile: :py:class:`Profile` """ commands = [] paths = set() for entry in entries: hname = entry.hostname or entry.public_ip _local_path = entry.format_string(local_path) if _local_path in paths: raise ValueError('Duplicate local paths: one or more paths ' 'had value {} after formatting.' .format(local_path)) paths.add(_local_path) # If the path references a folder, create the folder if it doesn't # exist. _folder = os.path.split(_local_path)[0] if len(_folder) > 0: if not os.path.exists(_folder): print('Creating directory ' + _folder) os.makedirs(_folder) cmd = _build_scp_command(hname, profile.username, profile.identity_file, is_get=True, local_path=_local_path, remote_path=remote_path) print('Command:', cmd) commands.append({ 'command': cmd, 'description': entry.display() }) stream_commands(commands) print(green('Finished copying'))
python
{ "resource": "" }
q278808
_run_ssh_command
test
def _run_ssh_command(entries, username, idfile, command, tunnel, parallel=False): """ Runs the given command over SSH in parallel on all hosts in `entries`. :param entries: The host entries the hostnames from. :type entries: ``list`` of :py:class:`HostEntry` :param username: To use a specific username. :type username: ``str`` or ``NoneType`` :param idfile: The SSH identity file to use, or none. :type idfile: ``str`` or ``NoneType`` :param command: The command to run. :type command: ``str`` :param parallel: If true, commands will be run in parallel. :type parallel: ``bool`` """ if len(entries) == 0: print('(No hosts to run command on)') return 1 if command.strip() == '' or command is None: raise ValueError('No command given') print('Running command {0} on {1} matching hosts' .format(green(repr(command)), len(entries))) shell_cmds = [] for entry in entries: hname = entry.hostname or entry.public_ip cmd = _build_ssh_command(hname, username, idfile, command, tunnel) shell_cmds.append({ 'command': cmd, 'description': entry.display() }) stream_commands(shell_cmds, parallel=parallel) print(green('All commands finished'))
python
{ "resource": "" }
q278809
_connect_ssh
test
def _connect_ssh(entry, username, idfile, tunnel=None): """ SSH into to a host. :param entry: The host entry to pull the hostname from. :type entry: :py:class:`HostEntry` :param username: To use a specific username. :type username: ``str`` or ``NoneType`` :param idfile: The SSH identity file to use, if supplying a username. :type idfile: ``str`` or ``NoneType`` :param tunnel: Host to tunnel SSH command through. :type tunnel: ``str`` or ``NoneType`` :return: An exit status code. :rtype: ``int`` """ if entry.hostname != "" and entry.hostname is not None: _host = entry.hostname elif entry.public_ip != "" and entry.public_ip is not None: _host = entry.public_ip elif entry.private_ip != "" and entry.private_ip is not None: if tunnel is None: raise ValueError("Entry does not have a hostname or public IP. " "You can connect via private IP if you use a " "tunnel.") _host = entry.private_ip else: raise ValueError("No hostname, public IP or private IP information " "found on host entry. I don't know how to connect.") command = _build_ssh_command(_host, username, idfile, None, tunnel) print('Connecting to %s...' % cyan(entry.display())) print('SSH command: %s' % green(command)) proc = subprocess.Popen(command, shell=True) return proc.wait()
python
{ "resource": "" }
q278810
LsiProfile.load
test
def load(cls, profile_name=None): """Loads the user's LSI profile, or provides a default.""" lsi_location = os.path.expanduser('~/.lsi') if not os.path.exists(lsi_location): return LsiProfile() cfg_parser = ConfigParser() cfg_parser.read(lsi_location) if profile_name is None: # Load the default profile if one exists; otherwise return empty. if cfg_parser.has_section('default'): profile_name = 'default' else: return cls() elif not cfg_parser.has_section(profile_name): raise cls.LoadError('No such profile {}'.format(profile_name)) def _get(option, alt=None): """Gets an option if it exists; else returns `alt`.""" if cfg_parser.has_option(profile_name, option): return cfg_parser.get(profile_name, option) else: return alt if cfg_parser.has_option(profile_name, 'inherit'): profile = cls.load(cfg_parser.get(profile_name, 'inherit')) else: profile = cls() profile.override('username', _get('username')) profile.override('identity_file', _get('identity file')) profile.override('command', _get('command')) filters = [s for s in _get('filters', '').split(',') if len(s) > 0] exclude = [s for s in _get('exclude', '').split(',') if len(s) > 0] profile.filters.extend(filters) profile.exclude.extend(exclude) return profile
python
{ "resource": "" }
q278811
LsiProfile.from_args
test
def from_args(args): """Takes arguments parsed from argparse and returns a profile.""" # If the args specify a username explicitly, don't load from file. if args.username is not None or args.identity_file is not None: profile = LsiProfile() else: profile = LsiProfile.load(args.profile) profile.override('username', args.username) profile.override('identity_file', args.identity_file) profile.override('command', args.command) profile.no_prompt = args.no_prompt profile.filters.extend(args.filters) profile.exclude.extend(args.exclude) if profile.identity_file is not None: profile.identity_file = os.path.expanduser(profile.identity_file) return profile
python
{ "resource": "" }
q278812
Relational.relate
test
def relate(self, part, id=None): """Relate this package component to the supplied part.""" assert part.name.startswith(self.base) name = part.name[len(self.base):].lstrip('/') rel = Relationship(self, name, part.rel_type, id=id) self.relationships.add(rel) return rel
python
{ "resource": "" }
q278813
Relational.related
test
def related(self, reltype): """Return a list of parts related to this one via reltype.""" parts = [] package = getattr(self, 'package', None) or self for rel in self.relationships.types.get(reltype, []): parts.append(package[posixpath.join(self.base, rel.target)]) return parts
python
{ "resource": "" }
q278814
Relational._load_rels
test
def _load_rels(self, source): """Load relationships from source XML.""" # don't get confused here - the original source is string data; # the parameter source below is a Part object self.relationships.load(source=self, data=source)
python
{ "resource": "" }
q278815
Package.add
test
def add(self, part, override=True): """Add a part to the package. It will also add a content-type - by default an override. If override is False then it will add a content-type for the extension if one isn't already present. """ ct_add_method = [ self.content_types.add_default, self.content_types.add_override, ][override] self[part.name] = part ct_add_method(part)
python
{ "resource": "" }
q278816
Package._load_part
test
def _load_part(self, rel_type, name, data): """ Load a part into this package based on its relationship type """ if self.content_types.find_for(name) is None: log.warning('no content type found for part %(name)s' % vars()) return cls = Part.classes_by_rel_type[rel_type] part = cls(self, name) part.load(data) self[name] = part return part
python
{ "resource": "" }
q278817
ContentTypes.find_for
test
def find_for(self, name): """ Get the correct content type for a given name """ map = self.items # first search the overrides (by name) # then fall back to the defaults (by extension) # finally, return None if unmatched return map.get(name, None) or map.get(get_ext(name) or None, None)
python
{ "resource": "" }
q278818
ContentType.from_element
test
def from_element(cls, element): "given an element, parse out the proper ContentType" # disambiguate the subclass ns, class_name = parse_tag(element.tag) class_ = getattr(ContentType, class_name) if not class_: msg = 'Invalid Types child element: %(class_name)s' % vars() raise ValueError(msg) # construct the subclass key = element.get(class_.key_name) name = element.get('ContentType') return class_(name, key)
python
{ "resource": "" }
q278819
parse
test
def parse(input_string, prefix=''): """Parses the given DSL string and returns parsed results. Args: input_string (str): DSL string prefix (str): Optional prefix to add to every element name, useful to namespace things Returns: dict: Parsed content """ tree = parser.parse(input_string) visitor = ChatlVisitor(prefix) visit_parse_tree(tree, visitor) return visitor.parsed
python
{ "resource": "" }
q278820
ProjectTokenBuilder.build
test
def build(self, secret_key): """Builds a final copy of the token using the given secret key. :param secret_key(string): The secret key that corresponds to this builder's access key. """ key = jwk.JWK( kty='oct', k=base64url_encode(uuid.UUID(secret_key).bytes), ) header = { 'alg': 'dir', 'enc': 'A128GCM', 'zip': 'DEF', 'cty': 'JWT', 'kid': self._access_key, } now = int(time.time()) payload = { 'iat': now, 'nbf': now, } if self._expiration is not None: payload['exp'] = int(calendar.timegm(self._expiration.utctimetuple())) if len(self._view_identifiers) > 0: payload[VIEW_IDENTIFIERS_CLAIM_NAME] = self._view_identifiers if len(self._parameters) > 0: parameters = [] for parameter in self._parameters: serialized = { 'field': parameter.field, 'op': parameter.op, } if hasattr(parameter, '__iter__'): serialized['any'] = list(parameter.value) else: serialized['value'] = parameter.value parameters.append(serialized) payload[PARAMETERS_CLAIM_NAME] = parameters if len(self._attributes) > 0: payload[ATTRIBUTES_CLAIM_NAME] = self._attributes tok = jwe.JWE(json_encode(payload), protected=header) tok.add_recipient(key) return tok.serialize(compact=True)
python
{ "resource": "" }
q278821
assign_force_field
test
def assign_force_field(ampal_obj, ff): """Assigns force field parameters to Atoms in the AMPAL object. Parameters ---------- ampal_obj : AMPAL Object Any AMPAL object with a `get_atoms` method. ff: BuffForceField The force field to be used for scoring. """ if hasattr(ampal_obj, 'ligands'): atoms = ampal_obj.get_atoms(ligands=True, inc_alt_states=True) else: atoms = ampal_obj.get_atoms(inc_alt_states=True) for atom in atoms: w_str = None a_ff_id = None if atom.element == 'H': continue elif atom.parent.mol_code.upper() in ff: if atom.res_label.upper() in ff[atom.parent.mol_code]: a_ff_id = (atom.parent.mol_code.upper(), atom.res_label.upper()) elif atom.res_label.upper() in ff['WLD']: a_ff_id = ('WLD', atom.res_label.upper()) else: w_str = ('{} atom is not parameterised in the selected ' 'force field for {} residues, this will be ' 'ignored.').format( atom.res_label, atom.parent.mol_code) elif atom.res_label.upper() in ff['WLD']: a_ff_id = ('WLD', atom.res_label.upper()) else: w_str = ('{} ({}) atom is not parameterised in the selected' ' residue force field.').format( atom.res_label, atom.parent.mol_code) if w_str: warnings.warn(w_str, NotParameterisedWarning) atom.tags['_buff_ff_id'] = a_ff_id return
python
{ "resource": "" }
q278822
BuffForceField.find_max_rad_npnp
test
def find_max_rad_npnp(self): """Finds the maximum radius and npnp in the force field. Returns ------- (max_rad, max_npnp): (float, float) Maximum radius and npnp distance in the loaded force field. """ max_rad = 0 max_npnp = 0 for res, _ in self.items(): if res != 'KEY': for _, ff_params in self[res].items(): if max_rad < ff_params[1]: max_rad = ff_params[1] if max_npnp < ff_params[4]: max_npnp = ff_params[4] return max_rad, max_npnp
python
{ "resource": "" }
q278823
BuffForceField._make_ff_params_dict
test
def _make_ff_params_dict(self): """Makes a dictionary containing PyAtomData for the force field. Returns ------- ff_params_struct_dict: dict Dictionary containing PyAtomData structs for the force field parameters for each atom in the force field. """ try: ff_params_struct_dict = {} for res in self.keys(): if res == 'KEY': continue if res not in ff_params_struct_dict: ff_params_struct_dict[res] = {} for atom, params in self[res].items(): ff_params_struct_dict[res][atom] = PyAtomData( atom.encode(), params[0].encode(), *params[1:]) except TypeError: raise ForceFieldParameterError( 'Badly formatted force field parameters: {}'.format(params)) return ff_params_struct_dict
python
{ "resource": "" }
q278824
ZipPackage.as_stream
test
def as_stream(self): """ Return a zipped package as a readable stream """ stream = io.BytesIO() self._store(stream) stream.seek(0) return stream
python
{ "resource": "" }
q278825
ZipPackage._get_matching_segments
test
def _get_matching_segments(self, zf, name): """ Return a generator yielding each of the segments who's names match name. """ for n in zf.namelist(): if n.startswith(name): yield zf.read(n)
python
{ "resource": "" }
q278826
copy_dir
test
def copy_dir(bucket_name, src_path, dest_path, aws_access_key_id=None, aws_secret_access_key=None, aws_profile=None, surrogate_key=None, cache_control=None, surrogate_control=None, create_directory_redirect_object=True): """Copy objects from one directory in a bucket to another directory in the same bucket. Object metadata is preserved while copying, with the following exceptions: - If a new surrogate key is provided it will replace the original one. - If ``cache_control`` and ``surrogate_control`` values are provided they will replace the old one. Parameters ---------- bucket_name : `str` Name of an S3 bucket. src_path : `str` Source directory in the S3 bucket. The ``src_path`` should ideally end in a trailing `'/'`. E.g. `'dir/dir2/'`. dest_path : `str` Destination directory in the S3 bucket. The ``dest_path`` should ideally end in a trailing `'/'`. E.g. `'dir/dir2/'`. The destination path cannot contain the source path. aws_access_key_id : `str` The access key for your AWS account. Also set ``aws_secret_access_key``. aws_secret_access_key : `str` The secret key for your AWS account. aws_profile : `str`, optional Name of AWS profile in :file:`~/.aws/credentials`. Use this instead of ``aws_access_key_id`` and ``aws_secret_access_key`` for file-based credentials. surrogate_key : `str`, optional The surrogate key to insert in the header of all objects in the ``x-amz-meta-surrogate-key`` field. This key is used to purge builds from the Fastly CDN when Editions change. If `None` then no header will be set. If the object already has a ``x-amz-meta-surrogate-key`` header then it will be replaced. cache_control : `str`, optional This sets (and overrides) the ``Cache-Control`` header on the copied files. The ``Cache-Control`` header specifically dictates how content is cached by the browser (if ``surrogate_control`` is also set). surrogate_control : `str`, optional This sets (and overrides) the ``x-amz-meta-surrogate-control`` header on the copied files. The ``Surrogate-Control`` or ``x-amz-meta-surrogate-control`` header is used in priority by Fastly to givern it's caching. This caching policy is *not* passed to the browser. create_directory_redirect_object : `bool`, optional Create a directory redirect object for the root directory. The directory redirect object is an empty S3 object named after the directory (without a trailing slash) that contains a ``x-amz-meta-dir-redirect=true`` HTTP header. LSST the Docs' Fastly VCL is configured to redirect requests for a directory path to the directory's ``index.html`` (known as *courtesy redirects*). Raises ------ ltdconveyor.s3.S3Error Thrown by any unexpected faults from the S3 API. RuntimeError Thrown when the source and destination directories are the same. """ if not src_path.endswith('/'): src_path += '/' if not dest_path.endswith('/'): dest_path += '/' # Ensure the src_path and dest_path don't contain each other common_prefix = os.path.commonprefix([src_path, dest_path]) if common_prefix == src_path: msg = 'Common prefix {0} is same as source dir {1}'.format( common_prefix, src_path) raise RuntimeError(msg) if common_prefix == dest_path: msg = 'Common prefix {0} is same as dest dir {1}'.format( common_prefix, dest_path) raise RuntimeError(msg) # Delete any existing objects in the destination delete_dir(bucket_name, dest_path, aws_access_key_id, aws_secret_access_key) session = boto3.session.Session( aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key, profile_name=aws_profile) s3 = session.resource('s3') bucket = s3.Bucket(bucket_name) # Copy each object from source to destination for src_obj in bucket.objects.filter(Prefix=src_path): src_rel_path = os.path.relpath(src_obj.key, start=src_path) dest_key_path = os.path.join(dest_path, src_rel_path) # the src_obj (ObjectSummary) doesn't include headers afaik head = s3.meta.client.head_object(Bucket=bucket_name, Key=src_obj.key) metadata = head['Metadata'] content_type = head['ContentType'] # try to use original Cache-Control header if new one is not set if cache_control is None and 'CacheControl' in head: cache_control = head['CacheControl'] if surrogate_control is not None: metadata['surrogate-control'] = surrogate_control if surrogate_key is not None: metadata['surrogate-key'] = surrogate_key s3.meta.client.copy_object( Bucket=bucket_name, Key=dest_key_path, CopySource={'Bucket': bucket_name, 'Key': src_obj.key}, MetadataDirective='REPLACE', Metadata=metadata, ACL='public-read', CacheControl=cache_control, ContentType=content_type) if create_directory_redirect_object: dest_dirname = dest_path.rstrip('/') obj = bucket.Object(dest_dirname) metadata = {'dir-redirect': 'true'} obj.put(Body='', ACL='public-read', Metadata=metadata, CacheControl=cache_control)
python
{ "resource": "" }
q278827
open_bucket
test
def open_bucket(bucket_name, aws_access_key_id=None, aws_secret_access_key=None, aws_profile=None): """Open an S3 Bucket resource. Parameters ---------- bucket_name : `str` Name of the S3 bucket. aws_access_key_id : `str`, optional The access key for your AWS account. Also set ``aws_secret_access_key``. aws_secret_access_key : `str`, optional The secret key for your AWS account. aws_profile : `str`, optional Name of AWS profile in :file:`~/.aws/credentials`. Use this instead of ``aws_access_key_id`` and ``aws_secret_access_key`` for file-based credentials. Returns ------- bucket : Boto3 S3 Bucket instance The S3 bucket as a Boto3 instance. """ session = boto3.session.Session( profile_name=aws_profile, aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key) s3 = session.resource('s3') bucket = s3.Bucket(bucket_name) return bucket
python
{ "resource": "" }
q278828
upload_dir
test
def upload_dir(bucket_name, path_prefix, source_dir, upload_dir_redirect_objects=True, surrogate_key=None, surrogate_control=None, cache_control=None, acl=None, aws_access_key_id=None, aws_secret_access_key=None, aws_profile=None): """Upload a directory of files to S3. This function places the contents of the Sphinx HTML build directory into the ``/path_prefix/`` directory of an *existing* S3 bucket. Existing files on S3 are overwritten; files that no longer exist in the ``source_dir`` are deleted from S3. Parameters ---------- bucket_name : `str` Name of the S3 bucket where documentation is uploaded. path_prefix : `str` The root directory in the bucket where documentation is stored. source_dir : `str` Path of the Sphinx HTML build directory on the local file system. The contents of this directory are uploaded into the ``/path_prefix/`` directory of the S3 bucket. upload_dir_redirect_objects : `bool`, optional A feature flag to enable uploading objects to S3 for every directory. These objects contain ``x-amz-meta-dir-redirect=true`` HTTP headers that tell Fastly to issue a 301 redirect from the directory object to the `index.html`` in that directory. surrogate_key : `str`, optional The surrogate key to insert in the header of all objects in the ``x-amz-meta-surrogate-key`` field. This key is used to purge builds from the Fastly CDN when Editions change. If `None` then no header will be set. cache_control : `str`, optional This sets the ``Cache-Control`` header on the uploaded files. The ``Cache-Control`` header specifically dictates how content is cached by the browser (if ``surrogate_control`` is also set). surrogate_control : `str`, optional This sets the ``x-amz-meta-surrogate-control`` header on the uploaded files. The ``Surrogate-Control`` or ``x-amz-meta-surrogate-control`` header is used in priority by Fastly to givern it's caching. This caching policy is *not* passed to the browser. acl : `str`, optional The pre-canned AWS access control list to apply to this upload. Can be ``'public-read'``, which allow files to be downloaded over HTTP by the public. See https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl for an overview of S3's pre-canned ACL lists. Note that ACL settings are not validated locally. Default is `None`, meaning that no ACL is applied to an individual object. In this case, use ACLs applied to the bucket itself. aws_access_key_id : `str`, optional The access key for your AWS account. Also set ``aws_secret_access_key``. aws_secret_access_key : `str`, optional The secret key for your AWS account. aws_profile : `str`, optional Name of AWS profile in :file:`~/.aws/credentials`. Use this instead of ``aws_access_key_id`` and ``aws_secret_access_key`` for file-based credentials. Notes ----- ``cache_control`` and ``surrogate_control`` can be used together. ``surrogate_control`` takes priority in setting Fastly's POP caching, while ``cache_control`` then sets the browser's caching. For example: - ``cache_control='no-cache'`` - ``surrogate_control='max-age=31536000'`` together will ensure that the browser always does an ETAG server query, but that Fastly will cache the content for one year (or until purged). This configuration is good for files that are frequently changed in place. For immutable uploads simply using ``cache_control`` is more efficient since it allows the browser to also locally cache content. .. seelso: - `Fastly: Cache control tutorial <https://docs.fastly.com/guides/tutorials/cache-control-tutorial>`_. - `Google: HTTP caching <http://ls.st/39v>`_. """ logger = logging.getLogger(__name__) logger.debug('s3upload.upload({0}, {1}, {2})'.format( bucket_name, path_prefix, source_dir)) session = boto3.session.Session( profile_name=aws_profile, aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key) s3 = session.resource('s3') bucket = s3.Bucket(bucket_name) metadata = {} if surrogate_key is not None: metadata['surrogate-key'] = surrogate_key if surrogate_control is not None: metadata['surrogate-control'] = surrogate_control manager = ObjectManager(session, bucket_name, path_prefix) for (rootdir, dirnames, filenames) in os.walk(source_dir): # name of root directory on S3 bucket bucket_root = os.path.relpath(rootdir, start=source_dir) if bucket_root in ('.', '/'): bucket_root = '' # Delete bucket directories that no longer exist in source bucket_dirnames = manager.list_dirnames_in_directory(bucket_root) for bucket_dirname in bucket_dirnames: if bucket_dirname not in dirnames: logger.debug(('Deleting bucket directory {0}'.format( bucket_dirname))) manager.delete_directory(bucket_dirname) # Delete files that no longer exist in source bucket_filenames = manager.list_filenames_in_directory(bucket_root) for bucket_filename in bucket_filenames: if bucket_filename not in filenames: bucket_filename = os.path.join(bucket_root, bucket_filename) logger.debug( 'Deleting bucket file {0}'.format(bucket_filename)) manager.delete_file(bucket_filename) # Upload files in directory for filename in filenames: local_path = os.path.join(rootdir, filename) bucket_path = os.path.join(path_prefix, bucket_root, filename) logger.debug('Uploading to {0}'.format(bucket_path)) upload_file(local_path, bucket_path, bucket, metadata=metadata, acl=acl, cache_control=cache_control) # Upload a directory redirect object if upload_dir_redirect_objects is True: bucket_dir_path = os.path.join(path_prefix, bucket_root) create_dir_redirect_object( bucket_dir_path, bucket, metadata=metadata, acl=acl, cache_control=cache_control)
python
{ "resource": "" }
q278829
upload_file
test
def upload_file(local_path, bucket_path, bucket, metadata=None, acl=None, cache_control=None): """Upload a file to the S3 bucket. This function uses the mimetypes module to guess and then set the Content-Type and Encoding-Type headers. Parameters ---------- local_path : `str` Full path to a file on the local file system. bucket_path : `str` Destination path (also known as the key name) of the file in the S3 bucket. bucket : boto3 Bucket instance S3 bucket. metadata : `dict`, optional Header metadata values. These keys will appear in headers as ``x-amz-meta-*``. acl : `str`, optional A pre-canned access control list. See https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl Default is `None`, mean that no ACL is applied to the object. cache_control : `str`, optional The cache-control header value. For example, ``'max-age=31536000'``. """ logger = logging.getLogger(__name__) extra_args = {} if acl is not None: extra_args['ACL'] = acl if metadata is not None and len(metadata) > 0: # avoid empty Metadata extra_args['Metadata'] = metadata if cache_control is not None: extra_args['CacheControl'] = cache_control # guess_type returns None if it cannot detect a type content_type, content_encoding = mimetypes.guess_type(local_path, strict=False) if content_type is not None: extra_args['ContentType'] = content_type logger.debug(str(extra_args)) obj = bucket.Object(bucket_path) # no return status from the upload_file api obj.upload_file(local_path, ExtraArgs=extra_args)
python
{ "resource": "" }
q278830
upload_object
test
def upload_object(bucket_path, bucket, content='', metadata=None, acl=None, cache_control=None, content_type=None): """Upload an arbitrary object to an S3 bucket. Parameters ---------- bucket_path : `str` Destination path (also known as the key name) of the file in the S3 bucket. content : `str` or `bytes`, optional Object content. bucket : boto3 Bucket instance S3 bucket. metadata : `dict`, optional Header metadata values. These keys will appear in headers as ``x-amz-meta-*``. acl : `str`, optional A pre-canned access control list. See https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl Default is `None`, meaning that no ACL is applied to the object. cache_control : `str`, optional The cache-control header value. For example, ``'max-age=31536000'``. content_type : `str`, optional The object's content type (such as ``text/html``). If left unset, no MIME type is passed to boto3 (which defaults to ``binary/octet-stream``). """ obj = bucket.Object(bucket_path) # Object.put seems to be sensitive to None-type kwargs, so we filter first args = {} if metadata is not None and len(metadata) > 0: # avoid empty Metadata args['Metadata'] = metadata if acl is not None: args['ACL'] = acl if cache_control is not None: args['CacheControl'] = cache_control if content_type is not None: args['ContentType'] = content_type obj.put(Body=content, **args)
python
{ "resource": "" }
q278831
ObjectManager.list_filenames_in_directory
test
def list_filenames_in_directory(self, dirname): """List all file-type object names that exist at the root of this bucket directory. Parameters ---------- dirname : `str` Directory name in the bucket relative to ``bucket_root/``. Returns ------- filenames : `list` List of file names (`str`), relative to ``bucket_root/``, that exist at the root of ``dirname``. """ prefix = self._create_prefix(dirname) filenames = [] for obj in self._bucket.objects.filter(Prefix=prefix): if obj.key.endswith('/'): # a directory redirect object, not a file continue obj_dirname = os.path.dirname(obj.key) if obj_dirname == prefix: # object is at root of directory filenames.append(os.path.relpath(obj.key, start=prefix)) return filenames
python
{ "resource": "" }
q278832
ObjectManager.list_dirnames_in_directory
test
def list_dirnames_in_directory(self, dirname): """List all names of directories that exist at the root of this bucket directory. Note that *directories* don't exist in S3; rather directories are inferred from path names. Parameters ---------- dirname : `str` Directory name in the bucket relative to ``bucket_root``. Returns ------- dirnames : `list` List of directory names (`str`), relative to ``bucket_root/``, that exist at the root of ``dirname``. """ prefix = self._create_prefix(dirname) dirnames = [] for obj in self._bucket.objects.filter(Prefix=prefix): # get directory name of every object under this path prefix dirname = os.path.dirname(obj.key) # dirname is empty if the object happens to be the directory # redirect object object for the prefix directory (directory # redirect objects are named after directories and have metadata # that tells Fastly to redirect the browser to the index.html # contained in the directory). if dirname == '': dirname = obj.key + '/' # Strip out the path prefix from the directory name rel_dirname = os.path.relpath(dirname, start=prefix) # If there's only one part then this directory is at the root # relative to the prefix. We want this. dir_parts = rel_dirname.split('/') if len(dir_parts) == 1: dirnames.append(dir_parts[0]) # Above algorithm finds root directories for all *files* in sub # subdirectories; trim down to the unique set. dirnames = list(set(dirnames)) # Remove posix-like relative directory names that can appear # in the bucket listing. for filtered_dir in ('.', '..'): if filtered_dir in dirnames: dirnames.remove(filtered_dir) return dirnames
python
{ "resource": "" }
q278833
ObjectManager._create_prefix
test
def _create_prefix(self, dirname): """Make an absolute directory path in the bucker for dirname, which is is assumed relative to the self._bucket_root prefix directory. """ if dirname in ('.', '/'): dirname = '' # Strips trailing slash from dir prefix for comparisons # os.path.dirname() returns directory names without a trailing / prefix = os.path.join(self._bucket_root, dirname) prefix = prefix.rstrip('/') return prefix
python
{ "resource": "" }
q278834
ObjectManager.delete_file
test
def delete_file(self, filename): """Delete a file from the bucket. Parameters ---------- filename : `str` Name of the file, relative to ``bucket_root/``. """ key = os.path.join(self._bucket_root, filename) objects = list(self._bucket.objects.filter(Prefix=key)) for obj in objects: obj.delete()
python
{ "resource": "" }
q278835
ensure_login
test
def ensure_login(ctx): """Ensure a token is in the Click context object or authenticate and obtain the token from LTD Keeper. Parameters ---------- ctx : `click.Context` The Click context. ``ctx.obj`` must be a `dict` that contains keys: ``keeper_hostname``, ``username``, ``password``, ``token``. This context object is prepared by the main Click group, `ltdconveyor.cli.main.main`. """ logger = logging.getLogger(__name__) logger.info('utils name %r', __name__) if ctx.obj['token'] is None: if ctx.obj['username'] is None or ctx.obj['password'] is None: raise click.UsageError( 'Use `ltd -u <username> -p <password> COMMAND` to ' 'authenticate to the LTD Keeper server.') sys.exit(1) logger.debug( 'About to get token for user %s at %s', ctx.obj['username'], ctx.obj['keeper_hostname']) token = get_keeper_token( ctx.obj['keeper_hostname'], ctx.obj['username'], ctx.obj['password']) ctx.obj['token'] = token logger.debug( 'Got token for user %s at %s', ctx.obj['username'], ctx.obj['keeper_hostname']) else: logger.debug( 'Token already exists.')
python
{ "resource": "" }
q278836
Five.loud
test
def loud(self, lang='englist'): """Speak loudly! FIVE! Use upper case!""" lang_method = getattr(self, lang, None) if lang_method: return lang_method().upper() else: return self.english().upper()
python
{ "resource": "" }
q278837
delete_dir
test
def delete_dir(bucket_name, root_path, aws_access_key_id=None, aws_secret_access_key=None, aws_profile=None): """Delete all objects in the S3 bucket named ``bucket_name`` that are found in the ``root_path`` directory. Parameters ---------- bucket_name : `str` Name of an S3 bucket. root_path : `str` Directory in the S3 bucket that will be deleted. aws_access_key_id : `str` The access key for your AWS account. Also set ``aws_secret_access_key``. aws_secret_access_key : `str` The secret key for your AWS account. aws_profile : `str`, optional Name of AWS profile in :file:`~/.aws/credentials`. Use this instead of ``aws_access_key_id`` and ``aws_secret_access_key`` for file-based credentials. Raises ------ ltdconveyor.s3.S3Error Thrown by any unexpected faults from the S3 API. """ logger = logging.getLogger(__name__) session = boto3.session.Session( aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key) s3 = session.resource('s3') client = s3.meta.client # Normalize directory path for searching patch prefixes of objects if not root_path.endswith('/'): root_path.rstrip('/') paginator = client.get_paginator('list_objects_v2') pages = paginator.paginate(Bucket=bucket_name, Prefix=root_path) keys = dict(Objects=[]) for item in pages.search('Contents'): try: keys['Objects'].append({'Key': item['Key']}) except TypeError: # item is None; nothing to delete continue # Delete immediately when 1000 objects are listed # the delete_objects method can only take a maximum of 1000 keys if len(keys['Objects']) >= 1000: try: client.delete_objects(Bucket=bucket_name, Delete=keys) except Exception: message = 'Error deleting objects from %r' % root_path logger.exception(message) raise S3Error(message) keys = dict(Objects=[]) # Delete remaining keys if len(keys['Objects']) > 0: try: client.delete_objects(Bucket=bucket_name, Delete=keys) except Exception: message = 'Error deleting objects from %r' % root_path logger.exception(message) raise S3Error(message)
python
{ "resource": "" }
q278838
home_url
test
def home_url(): """Get project's home URL based on settings.PROJECT_HOME_NAMESPACE. Returns None if PROJECT_HOME_NAMESPACE is not defined in settings. """ try: return reverse(home_namespace) except Exception: url = home_namespace try: validate_url = URLValidator() if '://' not in url: url = 'http://' + url validate_url(url) return(url) except ValidationError: return None
python
{ "resource": "" }
q278839
silence_without_namespace
test
def silence_without_namespace(f): """Decorator to silence template tags if 'PROJECT_HOME_NAMESPACE' is not defined in settings. Usage Example: from django import template register = template.Library() @register.simple_tag @silence_without_namespace def a_template_tag(*args): ... """ @wraps(f) def wrapped(label=None): if not home_namespace: return '' if label: return f(label) else: return f(home_label) return wrapped
python
{ "resource": "" }
q278840
project_home_breadcrumb_bs3
test
def project_home_breadcrumb_bs3(label): """A template tag to return the project's home URL and label formatted as a Bootstrap 3 breadcrumb. PROJECT_HOME_NAMESPACE must be defined in settings, for example: PROJECT_HOME_NAMESPACE = 'project_name:index_view' Usage Example: {% load project_home_tags %} <ol class="breadcrumb"> {% project_home_breadcrumb_bs3 %} {# <--- #} <li><a href="{% url 'app:namespace' %}">List of Objects</a></li> <li class="active">Object Detail</li> </ol> This gets converted into: <ol class="breadcrumb"> <li><a href="{% url 'project_name:index_view' %}">Home</a></li> {# <--- #} <li><a href="{% url 'app:namespace' %}">List of Objects</a></li> <li class="active">Object Detail</li> </ol> By default, the link's text is 'Home'. A project-wide label can be defined with PROJECT_HOME_LABEL in settings. Both the default and the project-wide label can be overridden by passing a string to the template tag. For example: {% project_home_breadcrumb_bs3 'Custom Label' %} """ url = home_url() if url: return format_html( '<li><a href="{}">{}</a></li>', url, label) else: return format_html('<li>{}</li>', label)
python
{ "resource": "" }
q278841
project_home_breadcrumb_bs4
test
def project_home_breadcrumb_bs4(label): """A template tag to return the project's home URL and label formatted as a Bootstrap 4 breadcrumb. PROJECT_HOME_NAMESPACE must be defined in settings, for example: PROJECT_HOME_NAMESPACE = 'project_name:index_view' Usage Example: {% load project_home_tags %} <ol class="breadcrumb"> {% project_home_breadcrumb_bs4 %} {# <--- #} <li class="breadcrumb-item" aria-label="breadcrumb"><a href="{% url 'app:namespace' %}">List of Objects</a></li> <li class=" breadcrumb-item active" aria-label="breadcrumb" aria-current="page">Object Detail</li> </ol> This gets converted into: <ol class="breadcrumb"> <li class="breadcrumb-item" aria-label="breadcrumb"><a href="{% url 'project_name:index_view' %}">Home</a></li> {# <--- #} <li class="breadcrumb-item" aria-label="breadcrumb"><a href="{% url 'app:namespace' %}">List of Objects</a></li> <li class=" breadcrumb-item active" aria-label="breadcrumb" aria-current="page">Object Detail</li> </ol> By default, the link's text is 'Home'. A project-wide label can be defined with PROJECT_HOME_LABEL in settings. Both the default and the project-wide label can be overridden by passing a string to the template tag. For example: {% project_home_breadcrumb_bs4 'Custom Label' %} """ url = home_url() if url: return format_html( '<li class="breadcrumb-item" aria-label="breadcrumb"><a href="{}">{}</a></li>', url, label) else: return format_html( '<li class="breadcrumb-item" aria-label="breadcrumb">{}</li>', label)
python
{ "resource": "" }
q278842
get_interaction_energy
test
def get_interaction_energy(ampal_objs, ff=None, assign_ff=True): """Calculates the interaction energy between AMPAL objects. Parameters ---------- ampal_objs: [AMPAL Object] A list of any AMPAL objects with `get_atoms` methods. ff: BuffForceField, optional The force field to be used for scoring. If no force field is provided then the most current version of the BUDE force field will be used. assign_ff: bool, optional If true, then force field assignment on the AMPAL object will be will be updated. Returns ------- BUFF_score: BUFFScore A BUFFScore object with information about each of the interactions and the atoms involved. """ if ff is None: ff = FORCE_FIELDS['bude_2016v1'] if assign_ff: for ampal_obj in ampal_objs: assign_force_field(ampal_obj, ff) interactions = find_inter_ampal(ampal_objs, ff.distance_cutoff) buff_score = score_interactions(interactions, ff) return buff_score
python
{ "resource": "" }
q278843
get_internal_energy
test
def get_internal_energy(ampal_obj, ff=None, assign_ff=True): """Calculates the internal energy of the AMPAL object. Parameters ---------- ampal_obj: AMPAL Object Any AMPAL object with a `get_atoms` method. ff: BuffForceField, optional The force field to be used for scoring. If no force field is provided then the most current version of the BUDE force field will be used. assign_ff: bool, optional If true, then force field assignment on the AMPAL object will be will be updated. Returns ------- BUFF_score: BUFFScore A BUFFScore object with information about each of the interactions and the atoms involved. """ if ff is None: ff = FORCE_FIELDS['bude_2016v1'] if assign_ff: assign_force_field(ampal_obj, ff) interactions = find_intra_ampal(ampal_obj, ff.distance_cutoff) buff_score = score_interactions(interactions, ff) return buff_score
python
{ "resource": "" }
q278844
_BaseSampler.hotspots
test
def hotspots(self): ''' Get lines sampled accross all threads, in order from most to least sampled. ''' rooted_leaf_samples, _ = self.live_data_copy() line_samples = {} for _, counts in rooted_leaf_samples.items(): for key, count in counts.items(): line_samples.setdefault(key, 0) line_samples[key] += count return sorted( line_samples.items(), key=lambda v: v[1], reverse=True)
python
{ "resource": "" }
q278845
get_keeper_token
test
def get_keeper_token(host, username, password): """Get a temporary auth token from LTD Keeper. Parameters ---------- host : `str` Hostname of the LTD Keeper API (e.g., ``'https://keeper.lsst.codes'``). username : `str` Username. password : `str` Password. Returns ------- token : `str` LTD Keeper API token. Raises ------ KeeperError Raised if the LTD Keeper API cannot return a token. """ token_endpoint = urljoin(host, '/token') r = requests.get(token_endpoint, auth=(username, password)) if r.status_code != 200: raise KeeperError('Could not authenticate to {0}: error {1:d}\n{2}'. format(host, r.status_code, r.json())) return r.json()['token']
python
{ "resource": "" }
q278846
upload
test
def upload(ctx, product, git_ref, dirname, aws_id, aws_secret, ci_env, on_travis_push, on_travis_pr, on_travis_api, on_travis_cron, skip_upload): """Upload a new site build to LSST the Docs. """ logger = logging.getLogger(__name__) if skip_upload: click.echo('Skipping ltd upload.') sys.exit(0) logger.debug('CI environment: %s', ci_env) logger.debug('Travis events settings. ' 'On Push: %r, PR: %r, API: %r, Cron: %r', on_travis_push, on_travis_pr, on_travis_api, on_travis_cron) # Abort upload on Travis CI under certain events if ci_env == 'travis' and \ _should_skip_travis_event( on_travis_push, on_travis_pr, on_travis_api, on_travis_cron): sys.exit(0) # Authenticate to LTD Keeper host ensure_login(ctx) # Detect git refs git_refs = _get_git_refs(ci_env, git_ref) build_resource = register_build( ctx.obj['keeper_hostname'], ctx.obj['token'], product, git_refs ) logger.debug('Created build resource %r', build_resource) # Do the upload. # This cache_control is appropriate for builds since they're immutable. # The LTD Keeper server changes the cache settings when copying the build # over to be a mutable edition. upload_dir( build_resource['bucket_name'], build_resource['bucket_root_dir'], dirname, aws_access_key_id=aws_id, aws_secret_access_key=aws_secret, surrogate_key=build_resource['surrogate_key'], cache_control='max-age=31536000', surrogate_control=None, upload_dir_redirect_objects=True) logger.debug('Upload complete for %r', build_resource['self_url']) # Confirm upload confirm_build( build_resource['self_url'], ctx.obj['token'] ) logger.debug('Build %r complete', build_resource['self_url'])
python
{ "resource": "" }
q278847
_should_skip_travis_event
test
def _should_skip_travis_event(on_travis_push, on_travis_pr, on_travis_api, on_travis_cron): """Detect if the upload should be skipped based on the ``TRAVIS_EVENT_TYPE`` environment variable. Returns ------- should_skip : `bool` True if the upload should be skipped based on the combination of ``TRAVIS_EVENT_TYPE`` and user settings. """ travis_event = os.getenv('TRAVIS_EVENT_TYPE') if travis_event is None: raise click.UsageError( 'Using --travis but the TRAVIS_EVENT_TYPE ' 'environment variable is not detected.') if travis_event == 'push' and on_travis_push is False: click.echo('Skipping upload on Travis push event.') return True elif travis_event == 'pull_request' and on_travis_pr is False: click.echo('Skipping upload on Travis pull request event.') return True elif travis_event == 'api' and on_travis_api is False: click.echo('Skipping upload on Travis pull request event.') return True elif travis_event == 'cron' and on_travis_cron is False: click.echo('Skipping upload on Travis cron event.') return True else: return False
python
{ "resource": "" }
q278848
purge_key
test
def purge_key(surrogate_key, service_id, api_key): """Instant purge URLs with a given surrogate key from the Fastly caches. Parameters ---------- surrogate_key : `str` Surrogate key header (``x-amz-meta-surrogate-key``) value of objects to purge from the Fastly cache. service_id : `str` Fastly service ID. api_key : `str` Fastly API key. Raises ------ FastlyError Error with the Fastly API usage. Notes ----- This function uses Fastly's ``/service/{service}/purge/{key}`` endpoint. See the `Fastly Purge documentation <http://ls.st/jxg>`_ for more information. For other Fastly APIs, consider using `fastly-py <https://github.com/fastly/fastly-py>`_. """ logger = logging.getLogger(__name__) api_root = 'https://api.fastly.com' path = '/service/{service}/purge/{surrogate_key}'.format( service=service_id, surrogate_key=surrogate_key) logger.info('Fastly purge {0}'.format(path)) r = requests.post(api_root + path, headers={'Fastly-Key': api_key, 'Accept': 'application/json'}) if r.status_code != 200: raise FastlyError(r.json)
python
{ "resource": "" }
q278849
register_build
test
def register_build(host, keeper_token, product, git_refs): """Register a new build for a product on LSST the Docs. Wraps ``POST /products/{product}/builds/``. Parameters ---------- host : `str` Hostname of LTD Keeper API server. keeper_token : `str` Auth token (`ltdconveyor.keeper.get_keeper_token`). product : `str` Name of the product in the LTD Keeper service. git_refs : `list` of `str` List of Git refs that correspond to the version of the build. Git refs can be tags or branches. Returns ------- build_info : `dict` LTD Keeper build resource. Raises ------ ltdconveyor.keeper.KeeperError Raised if there is an error communicating with the LTD Keeper API. """ data = { 'git_refs': git_refs } endpoint_url = uritemplate.expand( urljoin(host, '/products/{p}/builds/'), p=product) r = requests.post( endpoint_url, auth=(keeper_token, ''), json=data) if r.status_code != 201: raise KeeperError(r.json()) build_info = r.json() return build_info
python
{ "resource": "" }
q278850
confirm_build
test
def confirm_build(build_url, keeper_token): """Confirm a build upload is complete. Wraps ``PATCH /builds/{build}``. Parameters ---------- build_url : `str` URL of the build resource. Given a build resource, this URL is available from the ``self_url`` field. keeper_token : `str` Auth token (`ltdconveyor.keeper.get_keeper_token`). Raises ------ ltdconveyor.keeper.KeeperError Raised if there is an error communicating with the LTD Keeper API. """ data = { 'uploaded': True } r = requests.patch( build_url, auth=(keeper_token, ''), json=data) if r.status_code != 200: raise KeeperError(r)
python
{ "resource": "" }
q278851
deep_update
test
def deep_update(d, u): """Deeply updates a dictionary. List values are concatenated. Args: d (dict): First dictionary which will be updated u (dict): Second dictionary use to extend the first one Returns: dict: The merge dictionary """ for k, v in u.items(): if isinstance(v, Mapping): d[k] = deep_update(d.get(k, {}), v) elif isinstance(v, list): existing_elements = d.get(k, []) d[k] = existing_elements + [ele for ele in v if ele not in existing_elements] else: d[k] = v return d
python
{ "resource": "" }
q278852
main
test
def main(ctx, log_level, keeper_hostname, username, password): """ltd is a command-line client for LSST the Docs. Use ltd to upload new site builds, and to work with the LTD Keeper API. """ ch = logging.StreamHandler() formatter = logging.Formatter( '%(asctime)s %(levelname)8s %(name)s | %(message)s') ch.setFormatter(formatter) logger = logging.getLogger('ltdconveyor') logger.addHandler(ch) logger.setLevel(log_level.upper()) # Subcommands should use the click.pass_obj decorator to get this # ctx.obj object as the first argument. ctx.obj = { 'keeper_hostname': keeper_hostname, 'username': username, 'password': password, 'token': None }
python
{ "resource": "" }
q278853
part_edit_cmd
test
def part_edit_cmd(): 'Edit a part from an OOXML Package without unzipping it' parser = argparse.ArgumentParser(description=inspect.getdoc(part_edit_cmd)) parser.add_argument( 'path', help='Path to part (including path to zip file, i.e. ./file.zipx/part)', ) parser.add_argument( '--reformat-xml', action='store_true', help=( 'run the content through an XML pretty-printer ' 'first for improved editability' ), ) args = parser.parse_args() part_edit(args.path, args.reformat_xml)
python
{ "resource": "" }
q278854
pack_dir_cmd
test
def pack_dir_cmd(): 'List the contents of a subdirectory of a zipfile' parser = argparse.ArgumentParser(description=inspect.getdoc(part_edit_cmd)) parser.add_argument( 'path', help=( 'Path to list (including path to zip file, ' 'i.e. ./file.zipx or ./file.zipx/subdir)' ), ) args = parser.parse_args() for item, is_file in sorted(list_contents(args.path)): prefix = 'd ' if not is_file else ' ' msg = prefix + item print(msg)
python
{ "resource": "" }
q278855
split_all
test
def split_all(path): """ recursively call os.path.split until we have all of the components of a pathname suitable for passing back to os.path.join. """ drive, path = os.path.splitdrive(path) head, tail = os.path.split(path) terminators = [os.path.sep, os.path.altsep, ''] parts = split_all(head) if head not in terminators else [head] return [drive] + parts + [tail]
python
{ "resource": "" }
q278856
find_file
test
def find_file(path): """ Given a path to a part in a zip file, return a path to the file and the path to the part. Assuming /foo.zipx exists as a file, >>> find_file('/foo.zipx/dir/part') # doctest: +SKIP ('/foo.zipx', '/dir/part') >>> find_file('/foo.zipx') # doctest: +SKIP ('/foo.zipx', '') """ path_components = split_all(path) def get_assemblies(): """ Enumerate the various combinations of file paths and part paths """ for n in range(len(path_components), 0, -1): file_c = path_components[:n] part_c = path_components[n:] or [''] yield (os.path.join(*file_c), posixpath.join(*part_c)) for file_path, part_path in get_assemblies(): if os.path.isfile(file_path): return file_path, part_path
python
{ "resource": "" }
q278857
EditableFile.get_editor
test
def get_editor(filepath): """ Give preference to an XML_EDITOR or EDITOR defined in the environment. Otherwise use notepad on Windows and edit on other platforms. """ default_editor = ['edit', 'notepad'][sys.platform.startswith('win32')] return os.environ.get( 'XML_EDITOR', os.environ.get('EDITOR', default_editor), )
python
{ "resource": "" }
q278858
FileHeaderChecker.process_module
test
def process_module(self, node): """Process the astroid node stream.""" if self.config.file_header: if sys.version_info[0] < 3: pattern = re.compile( '\A' + self.config.file_header, re.LOCALE | re.MULTILINE) else: # The use of re.LOCALE is discouraged in python 3 pattern = re.compile( '\A' + self.config.file_header, re.MULTILINE) content = None with node.stream() as stream: # Explicit decoding required by python 3 content = stream.read().decode('utf-8') matches = pattern.findall(content) if len(matches) != 1: self.add_message('invalid-file-header', 1, args=self.config.file_header)
python
{ "resource": "" }
q278859
ChartsGenerator.gen
test
def gen(self, slug, name, dataobj, xfield, yfield, time_unit=None, chart_type="line", width=800, height=300, color=Color(), size=Size(), scale=Scale(zero=False), shape=Shape(), filepath=None, html_before="", html_after=""): """ Generates an html chart from either a pandas dataframe, a dictionnary, a list or an Altair Data object and optionally write it to a file """ chart_obj = self.serialize(dataobj, xfield, yfield, time_unit, chart_type, width, height, color, size, scale, shape) html = self.html(slug, name, chart_obj, filepath, html_before, html_after) return html
python
{ "resource": "" }
q278860
ChartsGenerator.html
test
def html(self, slug, name, chart_obj, filepath=None, html_before="", html_after=""): """ Generate html from an Altair chart object and optionally write it to a file """ try: html = "" if name: html = "<h3>" + name + "</h3>" json_data = chart_obj.to_json() json_data = self._patch_json(json_data) html = html_before + html +\ self._json_to_html(slug, json_data) + html_after except Exception as e: tr.new(e) tr.check() # generate file if filepath is not None: self._write_file(slug, filepath, html) return None else: return html
python
{ "resource": "" }
q278861
ChartsGenerator.serialize
test
def serialize(self, dataobj, xfield, yfield, time_unit=None, chart_type="line", width=800, height=300, color=None, size=None, scale=Scale(zero=False), shape=None, options={}): """ Serialize to an Altair chart object from either a pandas dataframe, a dictionnary, a list or an Altair Data object """ dataset = dataobj if self._is_dict(dataobj) is True: dataset = self._dict_to_df(dataobj, xfield, yfield) elif isinstance(dataobj, list): dataset = Data(values=dataobj) xencode, yencode = self._encode_fields( xfield, yfield, time_unit) opts = dict(x=xencode, y=yencode) if color is not None: opts["color"] = color if size is not None: opts["size"] = size if shape is not None: opts["shape"] = shape chart = self._chart_class(dataset, chart_type, **options).encode( **opts ).configure_cell( width=width, height=height, ) return chart
python
{ "resource": "" }
q278862
ChartsGenerator._patch_json
test
def _patch_json(self, json_data): """ Patch the Altair generated json to the newest Vega Lite spec """ json_data = json.loads(json_data) # add schema json_data["$schema"] = "https://vega.github.io/schema/vega-lite/2.0.0-beta.15.json" # add top level width and height json_data["width"] = json_data["config"]["cell"]["width"] json_data["height"] = json_data["config"]["cell"]["height"] del(json_data["config"]["cell"]) return json.dumps(json_data)
python
{ "resource": "" }
q278863
ChartsGenerator._json_to_html
test
def _json_to_html(self, slug, json_data): """ Generates html from Vega lite data """ html = '<div id="chart-' + slug + '"></div>' html += '<script>' html += 'var s' + slug + ' = ' + json_data + ';' html += 'vega.embed("#chart-' + slug + '", s' + slug + ');' #html += 'console.log(JSON.stringify(s{id}, null, 2));' html += '</script>' return html
python
{ "resource": "" }
q278864
ChartsGenerator._dict_to_df
test
def _dict_to_df(self, dictobj, xfield, yfield): """ Converts a dictionnary to a pandas dataframe """ x = [] y = [] for datapoint in dictobj: x.append(datapoint) y.append(dictobj[datapoint]) df = pd.DataFrame({xfield[0]: x, yfield[0]: y}) return df
python
{ "resource": "" }
q278865
ChartsGenerator._write_file
test
def _write_file(self, slug, folderpath, html): """ Writes a chart's html to a file """ # check directories if not os.path.isdir(folderpath): try: os.makedirs(folderpath) except Exception as e: tr.err(e) # construct file path filepath = folderpath + "/" + slug + ".html" #~ write the file try: filex = open(filepath, "w") filex.write(html) filex.close() except Exception as e: tr.err(e)
python
{ "resource": "" }
q278866
ChartsGenerator._chart_class
test
def _chart_class(self, df, chart_type, **kwargs): """ Get the right chart class from a string """ if chart_type == "bar": return Chart(df).mark_bar(**kwargs) elif chart_type == "circle": return Chart(df).mark_circle(**kwargs) elif chart_type == "line": return Chart(df).mark_line(**kwargs) elif chart_type == "point": return Chart(df).mark_point(**kwargs) elif chart_type == "area": return Chart(df).mark_area(**kwargs) elif chart_type == "tick": return Chart(df).mark_tick(**kwargs) elif chart_type == "text": return Chart(df).mark_text(**kwargs) elif chart_type == "square": return Chart(df).mark_square(**kwargs) elif chart_type == "rule": return Chart(df).mark_rule(**kwargs) return None
python
{ "resource": "" }
q278867
ChartsGenerator._encode_fields
test
def _encode_fields(self, xfield, yfield, time_unit=None, scale=Scale(zero=False)): """ Encode the fields in Altair format """ if scale is None: scale = Scale() xfieldtype = xfield[1] yfieldtype = yfield[1] x_options = None if len(xfield) > 2: x_options = xfield[2] y_options = None if len(yfield) > 2: y_options = yfield[2] if time_unit is not None: if x_options is None: xencode = X(xfieldtype, timeUnit=time_unit) else: xencode = X( xfieldtype, axis=Axis(**x_options), timeUnit=time_unit, scale=scale ) else: if x_options is None: xencode = X(xfieldtype) else: xencode = X( xfieldtype, axis=Axis(**x_options), scale=scale ) if y_options is None: yencode = Y(yfieldtype, scale=scale) else: yencode = Y( yfieldtype, axis=Axis(**y_options), scale=scale ) return xencode, yencode
python
{ "resource": "" }
q278868
ghuser_role
test
def ghuser_role(name, rawtext, text, lineno, inliner, options={}, content=[]): """Link to a GitHub user. Returns 2 part tuple containing list of nodes to insert into the document and a list of system messages. Both are allowed to be empty. :param name: The role name used in the document. :param rawtext: The entire markup snippet, with role. :param text: The text marked with the role. :param lineno: The line number where rawtext appears in the input. :param inliner: The inliner instance that called us. :param options: Directive options for customization. :param content: The directive content for customization. """ # app = inliner.document.settings.env.app #app.info('user link %r' % text) ref = 'https://www.github.com/' + text node = nodes.reference(rawtext, text, refuri=ref, **options) return [node], []
python
{ "resource": "" }
q278869
_infer_tarball_url
test
def _infer_tarball_url(): """Returns the tarball URL inferred from an app.json, if present.""" try: with click.open_file('app.json', 'r') as f: contents = f.read() app_json = json.loads(contents) except IOError: return None repository = app_json.get('repository') if not repository: return None else: return app_json.get('repository') + '/tarball/master/'
python
{ "resource": "" }
q278870
up
test
def up(tarball_url, auth_token, env, app_name): """Brings up a Heroku app.""" tarball_url = tarball_url or _infer_tarball_url() if not tarball_url: click.echo('No tarball URL found.') sys.exit(1) if env: # Split ["KEY=value", ...] into {"KEY": "value", ...} env = { arg.split('=')[0]: arg.split('=')[1] for arg in env } happy = Happy(auth_token=auth_token) click.echo('Creating app... ', nl=False) build_id, app_name = happy.create( tarball_url=tarball_url, env=env, app_name=app_name, ) click.echo(app_name) click.echo('Building... ', nl=False) happy.wait(build_id) _write_app_name(app_name) click.echo('done') click.echo("It's up! :) https://%s.herokuapp.com" % app_name)
python
{ "resource": "" }
q278871
down
test
def down(auth_token, force, app_name): """Brings down a Heroku app.""" if not app_name: click.echo( 'WARNING: Inferring the app name when deleting is deprecated. ' 'Starting with happy 2.0, the app_name parameter will be required.' ) app_name = app_name or _read_app_name() if not app_name: click.echo('No app name given.') sys.exit(1) if not force: click.confirm( 'Are you sure you want to delete %s?' % app_name, abort=True, ) happy = Happy(auth_token=auth_token) click.echo('Destroying app %s... ' % app_name, nl=False) happy.delete(app_name=app_name) _delete_app_name_file() click.echo('done') click.echo("It's down. :(")
python
{ "resource": "" }
q278872
iter_attribute
test
def iter_attribute(iterable_name) -> Union[Iterable, Callable]: """Decorator implementing Iterator interface with nicer manner. Example ------- @iter_attribute('my_attr'): class DecoratedClass: ... Warning: ======== When using PyCharm or MYPY you'll probably see issues with decorated class not being recognized as Iterator. That's an issue which I could not overcome yet, it's probably due to the fact that interpretation of object is being done statically rather than dynamically. MYPY checks for definition of methods in class code which changes at runtime. Since __iter__ and __next__ are added dynamically MYPY cannot find those defined in objects before object of a class is created. Possible workarounds for this issue are: 1. Define ``dummy`` __iter__ class like: @iter_attribute('attr') class Test: def __init__(self) -> None: self.attr = [1, 2, 3] def __iter__(self): pass 2. After creating object use cast or assert function denoting that particular instance inherits from collections.Iterator: assert isinstance(my_object, collections.Iterator) :param iterable_name: string representing attribute name which has to be iterated :return: DecoratedClass with implemented '__iter__' and '__next__' methods. """ def create_new_class(decorated_class) -> Union[Iterable, Callable]: """Class extender implementing __next__ and __iter__ methods. :param decorated_class: class to be extended with iterator interface :return: new class """ assert inspect.isclass(decorated_class), 'You can only decorate class objects!' assert isinstance(iterable_name, str), 'Please provide attribute name string' decorated_class.iterator_attr_index = 0 def __iter__(instance) -> Iterable: """Implement __iter__ method :param instance: __iter__ uses instance of class which is being extended :return: instance of decorated_class """ return instance def __next__(instance) -> Any: """Implement __next__ method :param instance: __next__ uses instance of class which is being extended :return: instance of decorated_class """ assert hasattr(instance, iterable_name), \ 'Decorated object does not have attribute named {}'.format(iterable_name) assert isinstance(getattr(instance, iterable_name), collections.Iterable), \ '{} of object {} is not iterable'.format(iterable_name, instance.__class__.__name__) ind = instance.iterator_attr_index while ind < len(getattr(instance, iterable_name)): val = getattr(instance, iterable_name)[ind] instance.iterator_attr_index += 1 return val instance.iterator_attr_index = 0 raise StopIteration dct = dict(decorated_class.__dict__) dct['__iter__'] = __iter__ dct['__next__'] = __next__ dct['iterator_attr_index'] = decorated_class.iterator_attr_index return type(decorated_class.__name__, (collections.Iterable,), dct) return create_new_class
python
{ "resource": "" }
q278873
binary
test
def binary(length): """ returns a a random string that represent a binary representation :param length: number of bits """ num = randint(1, 999999) mask = '0' * length return (mask + ''.join([str(num >> i & 1) for i in range(7, -1, -1)]))[-length:]
python
{ "resource": "" }
q278874
ipaddress
test
def ipaddress(not_valid=None): """ returns a string representing a random ip address :param not_valid: if passed must be a list of integers representing valid class A netoworks that must be ignored """ not_valid_class_A = not_valid or [] class_a = [r for r in range(1, 256) if r not in not_valid_class_A] shuffle(class_a) first = class_a.pop() return ".".join([str(first), str(randrange(1, 256)), str(randrange(1, 256)), str(randrange(1, 256))])
python
{ "resource": "" }
q278875
date
test
def date(start, end): """Get a random date between two dates""" stime = date_to_timestamp(start) etime = date_to_timestamp(end) ptime = stime + random.random() * (etime - stime) return datetime.date.fromtimestamp(ptime)
python
{ "resource": "" }
q278876
Heroku._get_session
test
def _get_session(self): """Returns a prepared ``Session`` instance.""" session = Session() session.headers = { 'Content-type': 'application/json', 'Accept': 'application/vnd.heroku+json; version=3', } if self._auth_token: session.trust_env = False # Effectively disable netrc auth session.headers['Authorization'] = 'Bearer %s' % self._auth_token return session
python
{ "resource": "" }
q278877
Heroku.api_request
test
def api_request(self, method, endpoint, data=None, *args, **kwargs): """Sends an API request to Heroku. :param method: HTTP method. :param endpoint: API endpoint, e.g. ``/apps``. :param data: A dict sent as JSON in the body of the request. :returns: A dict represntation of the JSON response. """ session = self._get_session() api_root = 'https://api.heroku.com' url = api_root + endpoint if data: data = json.dumps(data) response = session.request(method, url, data=data, *args, **kwargs) if not response.ok: try: message = response.json().get('message') except ValueError: message = response.content raise APIError(message) return response.json()
python
{ "resource": "" }
q278878
Heroku.create_build
test
def create_build(self, tarball_url, env=None, app_name=None): """Creates an app-setups build. Returns response data as a dict. :param tarball_url: URL of a tarball containing an ``app.json``. :param env: Dict containing environment variable overrides. :param app_name: Name of the Heroku app to create. :returns: Response data as a ``dict``. """ data = { 'source_blob': { 'url': tarball_url } } if env: data['overrides'] = {'env': env} if app_name: data['app'] = {'name': app_name} return self.api_request('POST', '/app-setups', data=data)
python
{ "resource": "" }
q278879
Heroku.check_build_status
test
def check_build_status(self, build_id): """Checks the status of an app-setups build. :param build_id: ID of the build to check. :returns: ``True`` if succeeded, ``False`` if pending. """ data = self.api_request('GET', '/app-setups/%s' % build_id) status = data.get('status') if status == 'pending': return False elif status == 'succeeded': return True else: raise BuildError(str(data))
python
{ "resource": "" }
q278880
sequence
test
def sequence(prefix, cache=None): """ generator that returns an unique string :param prefix: prefix of string :param cache: cache used to store the last used number >>> next(sequence('abc')) 'abc-0' >>> next(sequence('abc')) 'abc-1' """ if cache is None: cache = _sequence_counters if cache == -1: cache = {} if prefix not in cache: cache[prefix] = infinite() while cache[prefix]: yield "{0}-{1}".format(prefix, next(cache[prefix]))
python
{ "resource": "" }
q278881
memoize
test
def memoize(func): """Decorator that stores function results in a dictionary to be used on the next time that the same arguments were informed.""" func._cache_dict = {} @wraps(func) def _inner(*args, **kwargs): return _get_memoized_value(func, args, kwargs) return _inner
python
{ "resource": "" }
q278882
unique
test
def unique(func, num_args=0, max_attempts=100, cache=None): """ wraps a function so that produce unique results :param func: :param num_args: >>> import random >>> choices = [1,2] >>> a = unique(random.choice, 1) >>> a,b = a(choices), a(choices) >>> a == b False """ if cache is None: cache = _cache_unique @wraps(func) def wrapper(*args): key = "%s_%s" % (str(func.__name__), str(args[:num_args])) attempt = 0 while attempt < max_attempts: attempt += 1 drawn = cache.get(key, []) result = func(*args) if result not in drawn: drawn.append(result) cache[key] = drawn return result raise MaxAttemptException() return wrapper
python
{ "resource": "" }
q278883
BaseCommand.register_sub_commands
test
def register_sub_commands(self, parser): """ Add any sub commands to the argument parser. :param parser: The argument parser object """ sub_commands = self.get_sub_commands() if sub_commands: sub_parsers = parser.add_subparsers(dest=self.sub_parser_dest_name) for name, cls in sub_commands.items(): cmd = cls(name) sub_parser = sub_parsers.add_parser(name, help=name, description=cmd.get_help(), formatter_class=cmd.get_formatter_class()) cmd.add_args(sub_parser) cmd.register_sub_commands(sub_parser)
python
{ "resource": "" }
q278884
BaseCommand.get_root_argparser
test
def get_root_argparser(self): """ Gets the root argument parser object. """ return self.arg_parse_class(description=self.get_help(), formatter_class=self.get_formatter_class())
python
{ "resource": "" }
q278885
BaseCommand.get_description
test
def get_description(self): """ Gets the description of the command. If its not supplied the first sentence of the doc string is used. """ if self.description: return self.description elif self.__doc__ and self.__doc__.strip(): return self.__doc__.strip().split('.')[0] + '.' else: return ''
python
{ "resource": "" }
q278886
BaseCommand.get_help
test
def get_help(self): """ Gets the help text for the command. If its not supplied the doc string is used. """ if self.help: return self.help elif self.__doc__ and self.__doc__.strip(): return self.__doc__.strip() else: return ''
python
{ "resource": "" }
q278887
BaseCommand.run
test
def run(self, args=None): """ Runs the command passing in the parsed arguments. :param args: The arguments to run the command with. If ``None`` the arguments are gathered from the argument parser. This is automatically set when calling sub commands and in most cases should not be set for the root command. :return: The status code of the action (0 on success) """ args = args or self.parse_args() sub_command_name = getattr(args, self.sub_parser_dest_name, None) if sub_command_name: sub_commands = self.get_sub_commands() cmd_cls = sub_commands[sub_command_name] return cmd_cls(sub_command_name).run(args) return self.action(args) or 0
python
{ "resource": "" }
q278888
Encoder.encode
test
def encode(self, *args, **kwargs): """Encode wrapper for a dataset with maximum value Datasets can be one or two dimensional Strings are ignored as ordinal encoding""" if isinstance(args[0], str): return self.encode([args[0]],**kwargs) elif isinstance(args[0], int) or isinstance(args[0], float): return self.encode([[args[0]]],**kwargs) if len(args)>1: dataset = args else: dataset = args[0] typemap = list(map(type,dataset)) code = self.encoding[0] if type('') in typemap: data = ','.join(map(str,dataset)) elif type([]) in typemap or type(()) in typemap: data = self.codeset['char'].join(map(self.encodedata, dataset)) elif len(dataset) == 1 and hasattr(dataset[0], '__iter__'): data = self.encodedata(dataset[0]) else: try: data = self.encodedata(dataset) except ValueError: data = self.encodedata(','.join(map(unicode,dataset))) if not '.' in data and code == 't': code = 'e' return '%s%s:%s'%(code,self.series,data)
python
{ "resource": "" }
q278889
GoldenCheetahClient.get_athletes
test
def get_athletes(self): """Get all available athletes This method is cached to prevent unnecessary calls to GC. """ response = self._get_request(self.host) response_buffer = StringIO(response.text) return pd.read_csv(response_buffer)
python
{ "resource": "" }
q278890
GoldenCheetahClient.get_last_activities
test
def get_last_activities(self, n): """Get all activity data for the last activity Keyword arguments: """ filenames = self.get_activity_list().iloc[-n:].filename.tolist() last_activities = [self.get_activity(f) for f in filenames] return last_activities
python
{ "resource": "" }
q278891
GoldenCheetahClient._request_activity_list
test
def _request_activity_list(self, athlete): """Actually do the request for activity list This call is slow and therefore this method is memory cached. Keyword arguments: athlete -- Full name of athlete """ response = self._get_request(self._athlete_endpoint(athlete)) response_buffer = StringIO(response.text) activity_list = pd.read_csv( filepath_or_buffer=response_buffer, parse_dates={'datetime': ['date', 'time']}, sep=',\s*', engine='python' ) activity_list.rename(columns=lambda x: x.lower(), inplace=True) activity_list.rename( columns=lambda x: '_' + x if x[0].isdigit() else x, inplace=True) activity_list['has_hr'] = activity_list.average_heart_rate.map(bool) activity_list['has_spd'] = activity_list.average_speed.map(bool) activity_list['has_pwr'] = activity_list.average_power.map(bool) activity_list['has_cad'] = activity_list.average_heart_rate.map(bool) activity_list['data'] = pd.Series(dtype=np.dtype("object")) return activity_list
python
{ "resource": "" }
q278892
GoldenCheetahClient._request_activity_data
test
def _request_activity_data(self, athlete, filename): """Actually do the request for activity filename This call is slow and therefore this method is memory cached. Keyword arguments: athlete -- Full name of athlete filename -- filename of request activity (e.g. \'2015_04_29_09_03_16.json\') """ response = self._get_request(self._activity_endpoint(athlete, filename)).json() activity = pd.DataFrame(response['RIDE']['SAMPLES']) activity = activity.rename(columns=ACTIVITY_COLUMN_TRANSLATION) activity.index = pd.to_timedelta(activity.time, unit='s') activity.drop('time', axis=1, inplace=True) return activity[[i for i in ACTIVITY_COLUMN_ORDER if i in activity.columns]]
python
{ "resource": "" }
q278893
GoldenCheetahClient._athlete_endpoint
test
def _athlete_endpoint(self, athlete): """Construct athlete endpoint from host and athlete name Keyword arguments: athlete -- Full athlete name """ return '{host}{athlete}'.format( host=self.host, athlete=quote_plus(athlete) )
python
{ "resource": "" }
q278894
GoldenCheetahClient._activity_endpoint
test
def _activity_endpoint(self, athlete, filename): """Construct activity endpoint from host, athlete name and filename Keyword arguments: athlete -- Full athlete name filename -- filename of request activity (e.g. \'2015_04_29_09_03_16.json\') """ return '{host}{athlete}/activity/{filename}'.format( host=self.host, athlete=quote_plus(athlete), filename=filename )
python
{ "resource": "" }
q278895
GoldenCheetahClient._get_request
test
def _get_request(self, endpoint): """Do actual GET request to GC REST API Also validates responses. Keyword arguments: endpoint -- full endpoint for GET request """ try: response = requests.get(endpoint) except requests.exceptions.RequestException: raise GoldenCheetahNotAvailable(endpoint) if response.text.startswith('unknown athlete'): match = re.match( pattern='unknown athlete (?P<athlete>.+)', string=response.text) raise AthleteDoesNotExist( athlete=match.groupdict()['athlete']) elif response.text == 'file not found': match = re.match( pattern='.+/activity/(?P<filename>.+)', string=endpoint) raise ActivityDoesNotExist( filename=match.groupdict()['filename']) return response
python
{ "resource": "" }
q278896
Happy.create
test
def create(self, tarball_url, env=None, app_name=None): """Creates a Heroku app-setup build. :param tarball_url: URL of a tarball containing an ``app.json``. :param env: (optional) Dict containing environment variable overrides. :param app_name: (optional) Name of the Heroku app to create. :returns: A tuple with ``(build_id, app_name)``. """ data = self._api.create_build( tarball_url=tarball_url, env=env, app_name=app_name, ) return (data['id'], data['app']['name'])
python
{ "resource": "" }
q278897
url_with_auth
test
def url_with_auth(regex, view, kwargs=None, name=None, prefix=''): """ if view is string based, must be a full path """ from djapiauth.auth import api_auth if isinstance(view, six.string_types): # view is a string, must be full path return url(regex, api_auth(import_by_path(prefix + "." + view if prefix else view))) elif isinstance(view, (list, tuple)): # include return url(regex, view, name, prefix, **kwargs) else: # view is an object return url(regex, api_auth(view))
python
{ "resource": "" }
q278898
title
test
def title(languages=None, genders=None): """ returns a random title .. code-block:: python >>> d.title() u'Mrs.' >>> d.title(['es']) u'El Sr.' >>> d.title(None, [GENDER_FEMALE]) u'Mrs.' :param languages: list of allowed languages. ['en'] if None :param genders: list of allowed genders. (GENDER_FEMALE, GENDER_MALE) if None """ languages = languages or ['en'] genders = genders or (GENDER_FEMALE, GENDER_MALE) choices = _get_titles(languages) gender = {'m':0, 'f':1}[random.choice(genders)] return random.choice(choices)[gender]
python
{ "resource": "" }
q278899
person
test
def person(languages=None, genders=None): """ returns a random tuple representing person information .. code-block:: python >>> d.person() (u'Derren', u'Powell', 'm') >>> d.person(genders=['f']) (u'Marge', u'Rodriguez', u'Mrs.', 'f') >>> d.person(['es'],['m']) (u'Jacinto', u'Delgado', u'El Sr.', 'm') :param language: :param genders: """ languages = languages or ['en'] genders = genders or (GENDER_FEMALE, GENDER_MALE) lang = random.choice(languages) g = random.choice(genders) t = title([lang], [g]) return first_name([lang], [g]), last_name([lang]), t, g
python
{ "resource": "" }