repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
mitsei/dlkit
dlkit/json_/repository/sessions.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/repository/sessions.py#L274-L299
def get_assets_by_genus_type(self, asset_genus_type): """Gets an ``AssetList`` corresponding to the given asset genus ``Type`` which does not include assets of types derived from the specified ``Type``. In plenary mode, the returned list contains all known assets or an error results. Otherwise, the returned list may contain only those assets that are accessible through this session. arg: asset_genus_type (osid.type.Type): an asset genus type return: (osid.repository.AssetList) - the returned ``Asset list`` raise: NullArgument - ``asset_genus_type`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.ResourceLookupSession.get_resources_by_genus_type # NOTE: This implementation currently ignores plenary view collection = JSONClientValidated('repository', collection='Asset', runtime=self._runtime) result = collection.find( dict({'genusTypeId': str(asset_genus_type)}, **self._view_filter())).sort('_id', DESCENDING) return objects.AssetList(result, runtime=self._runtime, proxy=self._proxy)
[ "def", "get_assets_by_genus_type", "(", "self", ",", "asset_genus_type", ")", ":", "# Implemented from template for", "# osid.resource.ResourceLookupSession.get_resources_by_genus_type", "# NOTE: This implementation currently ignores plenary view", "collection", "=", "JSONClientValidated", "(", "'repository'", ",", "collection", "=", "'Asset'", ",", "runtime", "=", "self", ".", "_runtime", ")", "result", "=", "collection", ".", "find", "(", "dict", "(", "{", "'genusTypeId'", ":", "str", "(", "asset_genus_type", ")", "}", ",", "*", "*", "self", ".", "_view_filter", "(", ")", ")", ")", ".", "sort", "(", "'_id'", ",", "DESCENDING", ")", "return", "objects", ".", "AssetList", "(", "result", ",", "runtime", "=", "self", ".", "_runtime", ",", "proxy", "=", "self", ".", "_proxy", ")" ]
Gets an ``AssetList`` corresponding to the given asset genus ``Type`` which does not include assets of types derived from the specified ``Type``. In plenary mode, the returned list contains all known assets or an error results. Otherwise, the returned list may contain only those assets that are accessible through this session. arg: asset_genus_type (osid.type.Type): an asset genus type return: (osid.repository.AssetList) - the returned ``Asset list`` raise: NullArgument - ``asset_genus_type`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
[ "Gets", "an", "AssetList", "corresponding", "to", "the", "given", "asset", "genus", "Type", "which", "does", "not", "include", "assets", "of", "types", "derived", "from", "the", "specified", "Type", "." ]
python
train
pydata/xarray
xarray/backends/netcdf3.py
https://github.com/pydata/xarray/blob/6d93a95d05bdbfc33fff24064f67d29dd891ab58/xarray/backends/netcdf3.py#L26-L45
def coerce_nc3_dtype(arr): """Coerce an array to a data type that can be stored in a netCDF-3 file This function performs the following dtype conversions: int64 -> int32 bool -> int8 Data is checked for equality, or equivalence (non-NaN values) with `np.allclose` with the default keyword arguments. """ dtype = str(arr.dtype) if dtype in _nc3_dtype_coercions: new_dtype = _nc3_dtype_coercions[dtype] # TODO: raise a warning whenever casting the data-type instead? cast_arr = arr.astype(new_dtype) if not (cast_arr == arr).all(): raise ValueError('could not safely cast array from dtype %s to %s' % (dtype, new_dtype)) arr = cast_arr return arr
[ "def", "coerce_nc3_dtype", "(", "arr", ")", ":", "dtype", "=", "str", "(", "arr", ".", "dtype", ")", "if", "dtype", "in", "_nc3_dtype_coercions", ":", "new_dtype", "=", "_nc3_dtype_coercions", "[", "dtype", "]", "# TODO: raise a warning whenever casting the data-type instead?", "cast_arr", "=", "arr", ".", "astype", "(", "new_dtype", ")", "if", "not", "(", "cast_arr", "==", "arr", ")", ".", "all", "(", ")", ":", "raise", "ValueError", "(", "'could not safely cast array from dtype %s to %s'", "%", "(", "dtype", ",", "new_dtype", ")", ")", "arr", "=", "cast_arr", "return", "arr" ]
Coerce an array to a data type that can be stored in a netCDF-3 file This function performs the following dtype conversions: int64 -> int32 bool -> int8 Data is checked for equality, or equivalence (non-NaN values) with `np.allclose` with the default keyword arguments.
[ "Coerce", "an", "array", "to", "a", "data", "type", "that", "can", "be", "stored", "in", "a", "netCDF", "-", "3", "file" ]
python
train
saltstack/salt
salt/states/file.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/file.py#L431-L556
def _gen_recurse_managed_files( name, source, keep_symlinks=False, include_pat=None, exclude_pat=None, maxdepth=None, include_empty=False, **kwargs): ''' Generate the list of files managed by a recurse state ''' # Convert a relative path generated from salt master paths to an OS path # using "name" as the base directory def full_path(master_relpath): return os.path.join(name, _salt_to_os_path(master_relpath)) # Process symlinks and return the updated filenames list def process_symlinks(filenames, symlinks): for lname, ltarget in six.iteritems(symlinks): srelpath = posixpath.relpath(lname, srcpath) if not _is_valid_relpath(srelpath, maxdepth=maxdepth): continue if not salt.utils.stringutils.check_include_exclude( srelpath, include_pat, exclude_pat): continue # Check for all paths that begin with the symlink # and axe it leaving only the dirs/files below it. # This needs to use list() otherwise they reference # the same list. _filenames = list(filenames) for filename in _filenames: if filename.startswith(lname): log.debug( '** skipping file ** %s, it intersects a symlink', filename ) filenames.remove(filename) # Create the symlink along with the necessary dirs. # The dir perms/ownership will be adjusted later # if needed managed_symlinks.add((srelpath, ltarget)) # Add the path to the keep set in case clean is set to True keep.add(full_path(srelpath)) vdir.update(keep) return filenames managed_files = set() managed_directories = set() managed_symlinks = set() keep = set() vdir = set() srcpath, senv = salt.utils.url.parse(source) if senv is None: senv = __env__ if not srcpath.endswith(posixpath.sep): # we're searching for things that start with this *directory*. srcpath = srcpath + posixpath.sep fns_ = __salt__['cp.list_master'](senv, srcpath) # If we are instructed to keep symlinks, then process them. if keep_symlinks: # Make this global so that emptydirs can use it if needed. symlinks = __salt__['cp.list_master_symlinks'](senv, srcpath) fns_ = process_symlinks(fns_, symlinks) for fn_ in fns_: if not fn_.strip(): continue # fn_ here is the absolute (from file_roots) source path of # the file to copy from; it is either a normal file or an # empty dir(if include_empty==true). relname = salt.utils.data.decode(posixpath.relpath(fn_, srcpath)) if not _is_valid_relpath(relname, maxdepth=maxdepth): continue # Check if it is to be excluded. Match only part of the path # relative to the target directory if not salt.utils.stringutils.check_include_exclude( relname, include_pat, exclude_pat): continue dest = full_path(relname) dirname = os.path.dirname(dest) keep.add(dest) if dirname not in vdir: # verify the directory perms if they are set managed_directories.add(dirname) vdir.add(dirname) src = salt.utils.url.create(fn_, saltenv=senv) managed_files.add((dest, src)) if include_empty: mdirs = __salt__['cp.list_master_dirs'](senv, srcpath) for mdir in mdirs: relname = posixpath.relpath(mdir, srcpath) if not _is_valid_relpath(relname, maxdepth=maxdepth): continue if not salt.utils.stringutils.check_include_exclude( relname, include_pat, exclude_pat): continue mdest = full_path(relname) # Check for symlinks that happen to point to an empty dir. if keep_symlinks: islink = False for link in symlinks: if mdir.startswith(link, 0): log.debug( '** skipping empty dir ** %s, it intersects a ' 'symlink', mdir ) islink = True break if islink: continue managed_directories.add(mdest) keep.add(mdest) return managed_files, managed_directories, managed_symlinks, keep
[ "def", "_gen_recurse_managed_files", "(", "name", ",", "source", ",", "keep_symlinks", "=", "False", ",", "include_pat", "=", "None", ",", "exclude_pat", "=", "None", ",", "maxdepth", "=", "None", ",", "include_empty", "=", "False", ",", "*", "*", "kwargs", ")", ":", "# Convert a relative path generated from salt master paths to an OS path", "# using \"name\" as the base directory", "def", "full_path", "(", "master_relpath", ")", ":", "return", "os", ".", "path", ".", "join", "(", "name", ",", "_salt_to_os_path", "(", "master_relpath", ")", ")", "# Process symlinks and return the updated filenames list", "def", "process_symlinks", "(", "filenames", ",", "symlinks", ")", ":", "for", "lname", ",", "ltarget", "in", "six", ".", "iteritems", "(", "symlinks", ")", ":", "srelpath", "=", "posixpath", ".", "relpath", "(", "lname", ",", "srcpath", ")", "if", "not", "_is_valid_relpath", "(", "srelpath", ",", "maxdepth", "=", "maxdepth", ")", ":", "continue", "if", "not", "salt", ".", "utils", ".", "stringutils", ".", "check_include_exclude", "(", "srelpath", ",", "include_pat", ",", "exclude_pat", ")", ":", "continue", "# Check for all paths that begin with the symlink", "# and axe it leaving only the dirs/files below it.", "# This needs to use list() otherwise they reference", "# the same list.", "_filenames", "=", "list", "(", "filenames", ")", "for", "filename", "in", "_filenames", ":", "if", "filename", ".", "startswith", "(", "lname", ")", ":", "log", ".", "debug", "(", "'** skipping file ** %s, it intersects a symlink'", ",", "filename", ")", "filenames", ".", "remove", "(", "filename", ")", "# Create the symlink along with the necessary dirs.", "# The dir perms/ownership will be adjusted later", "# if needed", "managed_symlinks", ".", "add", "(", "(", "srelpath", ",", "ltarget", ")", ")", "# Add the path to the keep set in case clean is set to True", "keep", ".", "add", "(", "full_path", "(", "srelpath", ")", ")", "vdir", ".", "update", "(", "keep", ")", "return", "filenames", "managed_files", "=", "set", "(", ")", "managed_directories", "=", "set", "(", ")", "managed_symlinks", "=", "set", "(", ")", "keep", "=", "set", "(", ")", "vdir", "=", "set", "(", ")", "srcpath", ",", "senv", "=", "salt", ".", "utils", ".", "url", ".", "parse", "(", "source", ")", "if", "senv", "is", "None", ":", "senv", "=", "__env__", "if", "not", "srcpath", ".", "endswith", "(", "posixpath", ".", "sep", ")", ":", "# we're searching for things that start with this *directory*.", "srcpath", "=", "srcpath", "+", "posixpath", ".", "sep", "fns_", "=", "__salt__", "[", "'cp.list_master'", "]", "(", "senv", ",", "srcpath", ")", "# If we are instructed to keep symlinks, then process them.", "if", "keep_symlinks", ":", "# Make this global so that emptydirs can use it if needed.", "symlinks", "=", "__salt__", "[", "'cp.list_master_symlinks'", "]", "(", "senv", ",", "srcpath", ")", "fns_", "=", "process_symlinks", "(", "fns_", ",", "symlinks", ")", "for", "fn_", "in", "fns_", ":", "if", "not", "fn_", ".", "strip", "(", ")", ":", "continue", "# fn_ here is the absolute (from file_roots) source path of", "# the file to copy from; it is either a normal file or an", "# empty dir(if include_empty==true).", "relname", "=", "salt", ".", "utils", ".", "data", ".", "decode", "(", "posixpath", ".", "relpath", "(", "fn_", ",", "srcpath", ")", ")", "if", "not", "_is_valid_relpath", "(", "relname", ",", "maxdepth", "=", "maxdepth", ")", ":", "continue", "# Check if it is to be excluded. Match only part of the path", "# relative to the target directory", "if", "not", "salt", ".", "utils", ".", "stringutils", ".", "check_include_exclude", "(", "relname", ",", "include_pat", ",", "exclude_pat", ")", ":", "continue", "dest", "=", "full_path", "(", "relname", ")", "dirname", "=", "os", ".", "path", ".", "dirname", "(", "dest", ")", "keep", ".", "add", "(", "dest", ")", "if", "dirname", "not", "in", "vdir", ":", "# verify the directory perms if they are set", "managed_directories", ".", "add", "(", "dirname", ")", "vdir", ".", "add", "(", "dirname", ")", "src", "=", "salt", ".", "utils", ".", "url", ".", "create", "(", "fn_", ",", "saltenv", "=", "senv", ")", "managed_files", ".", "add", "(", "(", "dest", ",", "src", ")", ")", "if", "include_empty", ":", "mdirs", "=", "__salt__", "[", "'cp.list_master_dirs'", "]", "(", "senv", ",", "srcpath", ")", "for", "mdir", "in", "mdirs", ":", "relname", "=", "posixpath", ".", "relpath", "(", "mdir", ",", "srcpath", ")", "if", "not", "_is_valid_relpath", "(", "relname", ",", "maxdepth", "=", "maxdepth", ")", ":", "continue", "if", "not", "salt", ".", "utils", ".", "stringutils", ".", "check_include_exclude", "(", "relname", ",", "include_pat", ",", "exclude_pat", ")", ":", "continue", "mdest", "=", "full_path", "(", "relname", ")", "# Check for symlinks that happen to point to an empty dir.", "if", "keep_symlinks", ":", "islink", "=", "False", "for", "link", "in", "symlinks", ":", "if", "mdir", ".", "startswith", "(", "link", ",", "0", ")", ":", "log", ".", "debug", "(", "'** skipping empty dir ** %s, it intersects a '", "'symlink'", ",", "mdir", ")", "islink", "=", "True", "break", "if", "islink", ":", "continue", "managed_directories", ".", "add", "(", "mdest", ")", "keep", ".", "add", "(", "mdest", ")", "return", "managed_files", ",", "managed_directories", ",", "managed_symlinks", ",", "keep" ]
Generate the list of files managed by a recurse state
[ "Generate", "the", "list", "of", "files", "managed", "by", "a", "recurse", "state" ]
python
train
inveniosoftware/kwalitee
kwalitee/kwalitee.py
https://github.com/inveniosoftware/kwalitee/blob/9124f8f55b15547fef08c6c43cabced314e70674/kwalitee/kwalitee.py#L193-L245
def _check_signatures(lines, **kwargs): """Check that the signatures are valid. There should be at least three signatures. If not, one of them should be a trusted developer/reviewer. Formatting supported being: [signature] full name <email@address> :param lines: lines (lineno, content) to verify. :type lines: list :param signatures: list of supported signature :type signatures: list :param alt_signatures: list of alternative signatures, not counted :type alt_signatures: list :param trusted: list of trusted reviewers, the e-mail address. :type trusted: list :param min_reviewers: minimal number of reviewers needed. (Default 3) :type min_reviewers: int :return: errors as in (code, line number, *args) :rtype: list """ trusted = kwargs.get("trusted", ()) signatures = tuple(kwargs.get("signatures", ())) alt_signatures = tuple(kwargs.get("alt_signatures", ())) min_reviewers = kwargs.get("min_reviewers", 3) matching = [] errors = [] signatures += alt_signatures test_signatures = re.compile("^({0})".format("|".join(signatures))) test_alt_signatures = re.compile("^({0})".format("|".join(alt_signatures))) for i, line in lines: if signatures and test_signatures.search(line): if line.endswith("."): errors.append(("M191", i)) if not alt_signatures or not test_alt_signatures.search(line): matching.append(line) else: errors.append(("M102", i)) if not matching: errors.append(("M101", 1)) errors.append(("M100", 1)) elif len(matching) < min_reviewers: pattern = re.compile('|'.join(map(lambda x: '<' + re.escape(x) + '>', trusted))) trusted_matching = list(filter(None, map(pattern.search, matching))) if len(trusted_matching) == 0: errors.append(("M100", 1)) return errors
[ "def", "_check_signatures", "(", "lines", ",", "*", "*", "kwargs", ")", ":", "trusted", "=", "kwargs", ".", "get", "(", "\"trusted\"", ",", "(", ")", ")", "signatures", "=", "tuple", "(", "kwargs", ".", "get", "(", "\"signatures\"", ",", "(", ")", ")", ")", "alt_signatures", "=", "tuple", "(", "kwargs", ".", "get", "(", "\"alt_signatures\"", ",", "(", ")", ")", ")", "min_reviewers", "=", "kwargs", ".", "get", "(", "\"min_reviewers\"", ",", "3", ")", "matching", "=", "[", "]", "errors", "=", "[", "]", "signatures", "+=", "alt_signatures", "test_signatures", "=", "re", ".", "compile", "(", "\"^({0})\"", ".", "format", "(", "\"|\"", ".", "join", "(", "signatures", ")", ")", ")", "test_alt_signatures", "=", "re", ".", "compile", "(", "\"^({0})\"", ".", "format", "(", "\"|\"", ".", "join", "(", "alt_signatures", ")", ")", ")", "for", "i", ",", "line", "in", "lines", ":", "if", "signatures", "and", "test_signatures", ".", "search", "(", "line", ")", ":", "if", "line", ".", "endswith", "(", "\".\"", ")", ":", "errors", ".", "append", "(", "(", "\"M191\"", ",", "i", ")", ")", "if", "not", "alt_signatures", "or", "not", "test_alt_signatures", ".", "search", "(", "line", ")", ":", "matching", ".", "append", "(", "line", ")", "else", ":", "errors", ".", "append", "(", "(", "\"M102\"", ",", "i", ")", ")", "if", "not", "matching", ":", "errors", ".", "append", "(", "(", "\"M101\"", ",", "1", ")", ")", "errors", ".", "append", "(", "(", "\"M100\"", ",", "1", ")", ")", "elif", "len", "(", "matching", ")", "<", "min_reviewers", ":", "pattern", "=", "re", ".", "compile", "(", "'|'", ".", "join", "(", "map", "(", "lambda", "x", ":", "'<'", "+", "re", ".", "escape", "(", "x", ")", "+", "'>'", ",", "trusted", ")", ")", ")", "trusted_matching", "=", "list", "(", "filter", "(", "None", ",", "map", "(", "pattern", ".", "search", ",", "matching", ")", ")", ")", "if", "len", "(", "trusted_matching", ")", "==", "0", ":", "errors", ".", "append", "(", "(", "\"M100\"", ",", "1", ")", ")", "return", "errors" ]
Check that the signatures are valid. There should be at least three signatures. If not, one of them should be a trusted developer/reviewer. Formatting supported being: [signature] full name <email@address> :param lines: lines (lineno, content) to verify. :type lines: list :param signatures: list of supported signature :type signatures: list :param alt_signatures: list of alternative signatures, not counted :type alt_signatures: list :param trusted: list of trusted reviewers, the e-mail address. :type trusted: list :param min_reviewers: minimal number of reviewers needed. (Default 3) :type min_reviewers: int :return: errors as in (code, line number, *args) :rtype: list
[ "Check", "that", "the", "signatures", "are", "valid", "." ]
python
train
trailofbits/manticore
manticore/native/cpu/abstractcpu.py
https://github.com/trailofbits/manticore/blob/54c5a15b1119c523ae54c09972413e8b97f11629/manticore/native/cpu/abstractcpu.py#L897-L949
def execute(self): """ Decode, and execute one instruction pointed by register PC """ if issymbolic(self.PC): raise ConcretizeRegister(self, 'PC', policy='ALL') if not self.memory.access_ok(self.PC, 'x'): raise InvalidMemoryAccess(self.PC, 'x') self._publish('will_decode_instruction', self.PC) insn = self.decode_instruction(self.PC) self._last_pc = self.PC self._publish('will_execute_instruction', self.PC, insn) # FIXME (theo) why just return here? if insn.address != self.PC: return name = self.canonicalize_instruction_name(insn) if logger.level == logging.DEBUG: logger.debug(self.render_instruction(insn)) for l in self.render_registers(): register_logger.debug(l) try: if self._concrete and 'SYSCALL' in name: self.emu.sync_unicorn_to_manticore() if self._concrete and 'SYSCALL' not in name: self.emulate(insn) if self.PC == self._break_unicorn_at: logger.debug("Switching from Unicorn to Manticore") self._break_unicorn_at = None self._concrete = False else: implementation = getattr(self, name, None) if implementation is not None: implementation(*insn.operands) else: text_bytes = ' '.join('%02x' % x for x in insn.bytes) logger.warning("Unimplemented instruction: 0x%016x:\t%s\t%s\t%s", insn.address, text_bytes, insn.mnemonic, insn.op_str) self.backup_emulate(insn) except (Interruption, Syscall) as e: e.on_handled = lambda: self._publish_instruction_as_executed(insn) raise e else: self._publish_instruction_as_executed(insn)
[ "def", "execute", "(", "self", ")", ":", "if", "issymbolic", "(", "self", ".", "PC", ")", ":", "raise", "ConcretizeRegister", "(", "self", ",", "'PC'", ",", "policy", "=", "'ALL'", ")", "if", "not", "self", ".", "memory", ".", "access_ok", "(", "self", ".", "PC", ",", "'x'", ")", ":", "raise", "InvalidMemoryAccess", "(", "self", ".", "PC", ",", "'x'", ")", "self", ".", "_publish", "(", "'will_decode_instruction'", ",", "self", ".", "PC", ")", "insn", "=", "self", ".", "decode_instruction", "(", "self", ".", "PC", ")", "self", ".", "_last_pc", "=", "self", ".", "PC", "self", ".", "_publish", "(", "'will_execute_instruction'", ",", "self", ".", "PC", ",", "insn", ")", "# FIXME (theo) why just return here?", "if", "insn", ".", "address", "!=", "self", ".", "PC", ":", "return", "name", "=", "self", ".", "canonicalize_instruction_name", "(", "insn", ")", "if", "logger", ".", "level", "==", "logging", ".", "DEBUG", ":", "logger", ".", "debug", "(", "self", ".", "render_instruction", "(", "insn", ")", ")", "for", "l", "in", "self", ".", "render_registers", "(", ")", ":", "register_logger", ".", "debug", "(", "l", ")", "try", ":", "if", "self", ".", "_concrete", "and", "'SYSCALL'", "in", "name", ":", "self", ".", "emu", ".", "sync_unicorn_to_manticore", "(", ")", "if", "self", ".", "_concrete", "and", "'SYSCALL'", "not", "in", "name", ":", "self", ".", "emulate", "(", "insn", ")", "if", "self", ".", "PC", "==", "self", ".", "_break_unicorn_at", ":", "logger", ".", "debug", "(", "\"Switching from Unicorn to Manticore\"", ")", "self", ".", "_break_unicorn_at", "=", "None", "self", ".", "_concrete", "=", "False", "else", ":", "implementation", "=", "getattr", "(", "self", ",", "name", ",", "None", ")", "if", "implementation", "is", "not", "None", ":", "implementation", "(", "*", "insn", ".", "operands", ")", "else", ":", "text_bytes", "=", "' '", ".", "join", "(", "'%02x'", "%", "x", "for", "x", "in", "insn", ".", "bytes", ")", "logger", ".", "warning", "(", "\"Unimplemented instruction: 0x%016x:\\t%s\\t%s\\t%s\"", ",", "insn", ".", "address", ",", "text_bytes", ",", "insn", ".", "mnemonic", ",", "insn", ".", "op_str", ")", "self", ".", "backup_emulate", "(", "insn", ")", "except", "(", "Interruption", ",", "Syscall", ")", "as", "e", ":", "e", ".", "on_handled", "=", "lambda", ":", "self", ".", "_publish_instruction_as_executed", "(", "insn", ")", "raise", "e", "else", ":", "self", ".", "_publish_instruction_as_executed", "(", "insn", ")" ]
Decode, and execute one instruction pointed by register PC
[ "Decode", "and", "execute", "one", "instruction", "pointed", "by", "register", "PC" ]
python
valid
mwouts/jupytext
jupytext/contentsmanager.py
https://github.com/mwouts/jupytext/blob/eb7d6aee889f80ad779cfc53441c648f0db9246d/jupytext/contentsmanager.py#L162-L171
def drop_paired_notebook(self, path): """Remove the current notebook from the list of paired notebooks""" if path not in self.paired_notebooks: return fmt, formats = self.paired_notebooks.pop(path) prev_paired_paths = paired_paths(path, fmt, formats) for alt_path, _ in prev_paired_paths: if alt_path in self.paired_notebooks: self.drop_paired_notebook(alt_path)
[ "def", "drop_paired_notebook", "(", "self", ",", "path", ")", ":", "if", "path", "not", "in", "self", ".", "paired_notebooks", ":", "return", "fmt", ",", "formats", "=", "self", ".", "paired_notebooks", ".", "pop", "(", "path", ")", "prev_paired_paths", "=", "paired_paths", "(", "path", ",", "fmt", ",", "formats", ")", "for", "alt_path", ",", "_", "in", "prev_paired_paths", ":", "if", "alt_path", "in", "self", ".", "paired_notebooks", ":", "self", ".", "drop_paired_notebook", "(", "alt_path", ")" ]
Remove the current notebook from the list of paired notebooks
[ "Remove", "the", "current", "notebook", "from", "the", "list", "of", "paired", "notebooks" ]
python
train
adamcharnock/swiftwind
swiftwind/costs/models.py
https://github.com/adamcharnock/swiftwind/blob/72c715800841c3b2feabded3f3b65b76388b4cea/swiftwind/costs/models.py#L196-L205
def disable_if_done(self, commit=True): """Set disabled=True if we have billed all we need to Will only have an effect on one-off costs. """ if self._is_billing_complete() and not self.disabled: self.disabled = True if commit: self.save()
[ "def", "disable_if_done", "(", "self", ",", "commit", "=", "True", ")", ":", "if", "self", ".", "_is_billing_complete", "(", ")", "and", "not", "self", ".", "disabled", ":", "self", ".", "disabled", "=", "True", "if", "commit", ":", "self", ".", "save", "(", ")" ]
Set disabled=True if we have billed all we need to Will only have an effect on one-off costs.
[ "Set", "disabled", "=", "True", "if", "we", "have", "billed", "all", "we", "need", "to" ]
python
train
HDI-Project/MLPrimitives
mlprimitives/utils.py
https://github.com/HDI-Project/MLPrimitives/blob/bf415f9f751724ff545a1156ddfd7524e320f469/mlprimitives/utils.py#L18-L65
def image_transform(X, function, reshape_before=False, reshape_after=False, width=None, height=None, **kwargs): """Apply a function image by image. Args: reshape_before: whether 1d array needs to be reshaped to a 2d image reshape_after: whether the returned values need to be reshaped back to a 1d array width: image width used to rebuild the 2d images. Required if the image is not square. height: image height used to rebuild the 2d images. Required if the image is not square. """ if not callable(function): function = import_object(function) elif not callable(function): raise ValueError("function must be a str or a callable") flat_image = len(X[0].shape) == 1 if reshape_before and flat_image: if not (width and height): side_length = math.sqrt(X.shape[1]) if side_length.is_integer(): side_length = int(side_length) width = side_length height = side_length else: raise ValueError("Image sizes must be given for non-square images") else: reshape_before = False new_X = [] for image in X: if reshape_before: image = image.reshape((width, height)) features = function( image, **kwargs ) if reshape_after: features = np.reshape(features, X.shape[1]) new_X.append(features) return np.array(new_X)
[ "def", "image_transform", "(", "X", ",", "function", ",", "reshape_before", "=", "False", ",", "reshape_after", "=", "False", ",", "width", "=", "None", ",", "height", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "not", "callable", "(", "function", ")", ":", "function", "=", "import_object", "(", "function", ")", "elif", "not", "callable", "(", "function", ")", ":", "raise", "ValueError", "(", "\"function must be a str or a callable\"", ")", "flat_image", "=", "len", "(", "X", "[", "0", "]", ".", "shape", ")", "==", "1", "if", "reshape_before", "and", "flat_image", ":", "if", "not", "(", "width", "and", "height", ")", ":", "side_length", "=", "math", ".", "sqrt", "(", "X", ".", "shape", "[", "1", "]", ")", "if", "side_length", ".", "is_integer", "(", ")", ":", "side_length", "=", "int", "(", "side_length", ")", "width", "=", "side_length", "height", "=", "side_length", "else", ":", "raise", "ValueError", "(", "\"Image sizes must be given for non-square images\"", ")", "else", ":", "reshape_before", "=", "False", "new_X", "=", "[", "]", "for", "image", "in", "X", ":", "if", "reshape_before", ":", "image", "=", "image", ".", "reshape", "(", "(", "width", ",", "height", ")", ")", "features", "=", "function", "(", "image", ",", "*", "*", "kwargs", ")", "if", "reshape_after", ":", "features", "=", "np", ".", "reshape", "(", "features", ",", "X", ".", "shape", "[", "1", "]", ")", "new_X", ".", "append", "(", "features", ")", "return", "np", ".", "array", "(", "new_X", ")" ]
Apply a function image by image. Args: reshape_before: whether 1d array needs to be reshaped to a 2d image reshape_after: whether the returned values need to be reshaped back to a 1d array width: image width used to rebuild the 2d images. Required if the image is not square. height: image height used to rebuild the 2d images. Required if the image is not square.
[ "Apply", "a", "function", "image", "by", "image", "." ]
python
train
amperser/proselint
proselint/checks/terms/venery.py
https://github.com/amperser/proselint/blob/cb619ee4023cc7856f5fb96aec2a33a2c9f1a2e2/proselint/checks/terms/venery.py#L20-L89
def check(text): """Check the text.""" err = "oxford.venery_terms" msg = "The venery term is '{}'." term_list = [ ["alligators", "congregation"], ["antelopes", "herd"], ["baboons", "troop"], ["badgers", "cete"], ["bats", "colony"], ["bears", "sloth"], ["buffalo", "herd"], ["bullfinches", "bellowing"], ["caribou", "herd"], ["cats", "glaring"], ["caterpillars", "army"], ["cockroaches", "intrusion"], ["coyotes", "pack"], ["crows", "murder"], ["dogs", "pack"], ["eagles", "convocation"], ["emus", "mob"], ["flamingos", "stand"], ["frogs", "army"], ["goldfinches", "charm"], ["gorillas", "band"], ["guineafowl", "rasp"], ["hedgehogs", "array"], ["herons", "siege"], ["hogs", "parcel"], ["hyenas", "cackle"], ["ibex", "herd"], ["iguanas", "mess"], ["lions", "pride"], ["locusts", "plague"], ["mackerel", "shoal"], ["mares", "stud"], ["minnows", "shoal"], ["moose", "herd"], ["mosquitoes", "scourge"], ["nightingales", "watch"], ["oysters", "bed"], ["partridges", "covey"], ["pelicans", "pod"], ["raccoons", "gaze"], ["ravens", "unkindness"], ["rhinoceroses", "crash"], ["sea urchins", "sea"], ["starlings", "murmuration"], ["toads", "knot"], ["wombats", "wisdom"], ["woodcocks", "fall"], ["woodpeckers", "descent"], ["wrens", "herd"], ] generic_terms = [ "group", "bunch", ] list = [] for term_pair in term_list: for generic in generic_terms: wrong = "a {} of {}".format(generic, term_pair[0]) right = "a {} of {}".format(term_pair[1], term_pair[0]) list += [[right, [wrong]]] return preferred_forms_check(text, list, err, msg)
[ "def", "check", "(", "text", ")", ":", "err", "=", "\"oxford.venery_terms\"", "msg", "=", "\"The venery term is '{}'.\"", "term_list", "=", "[", "[", "\"alligators\"", ",", "\"congregation\"", "]", ",", "[", "\"antelopes\"", ",", "\"herd\"", "]", ",", "[", "\"baboons\"", ",", "\"troop\"", "]", ",", "[", "\"badgers\"", ",", "\"cete\"", "]", ",", "[", "\"bats\"", ",", "\"colony\"", "]", ",", "[", "\"bears\"", ",", "\"sloth\"", "]", ",", "[", "\"buffalo\"", ",", "\"herd\"", "]", ",", "[", "\"bullfinches\"", ",", "\"bellowing\"", "]", ",", "[", "\"caribou\"", ",", "\"herd\"", "]", ",", "[", "\"cats\"", ",", "\"glaring\"", "]", ",", "[", "\"caterpillars\"", ",", "\"army\"", "]", ",", "[", "\"cockroaches\"", ",", "\"intrusion\"", "]", ",", "[", "\"coyotes\"", ",", "\"pack\"", "]", ",", "[", "\"crows\"", ",", "\"murder\"", "]", ",", "[", "\"dogs\"", ",", "\"pack\"", "]", ",", "[", "\"eagles\"", ",", "\"convocation\"", "]", ",", "[", "\"emus\"", ",", "\"mob\"", "]", ",", "[", "\"flamingos\"", ",", "\"stand\"", "]", ",", "[", "\"frogs\"", ",", "\"army\"", "]", ",", "[", "\"goldfinches\"", ",", "\"charm\"", "]", ",", "[", "\"gorillas\"", ",", "\"band\"", "]", ",", "[", "\"guineafowl\"", ",", "\"rasp\"", "]", ",", "[", "\"hedgehogs\"", ",", "\"array\"", "]", ",", "[", "\"herons\"", ",", "\"siege\"", "]", ",", "[", "\"hogs\"", ",", "\"parcel\"", "]", ",", "[", "\"hyenas\"", ",", "\"cackle\"", "]", ",", "[", "\"ibex\"", ",", "\"herd\"", "]", ",", "[", "\"iguanas\"", ",", "\"mess\"", "]", ",", "[", "\"lions\"", ",", "\"pride\"", "]", ",", "[", "\"locusts\"", ",", "\"plague\"", "]", ",", "[", "\"mackerel\"", ",", "\"shoal\"", "]", ",", "[", "\"mares\"", ",", "\"stud\"", "]", ",", "[", "\"minnows\"", ",", "\"shoal\"", "]", ",", "[", "\"moose\"", ",", "\"herd\"", "]", ",", "[", "\"mosquitoes\"", ",", "\"scourge\"", "]", ",", "[", "\"nightingales\"", ",", "\"watch\"", "]", ",", "[", "\"oysters\"", ",", "\"bed\"", "]", ",", "[", "\"partridges\"", ",", "\"covey\"", "]", ",", "[", "\"pelicans\"", ",", "\"pod\"", "]", ",", "[", "\"raccoons\"", ",", "\"gaze\"", "]", ",", "[", "\"ravens\"", ",", "\"unkindness\"", "]", ",", "[", "\"rhinoceroses\"", ",", "\"crash\"", "]", ",", "[", "\"sea urchins\"", ",", "\"sea\"", "]", ",", "[", "\"starlings\"", ",", "\"murmuration\"", "]", ",", "[", "\"toads\"", ",", "\"knot\"", "]", ",", "[", "\"wombats\"", ",", "\"wisdom\"", "]", ",", "[", "\"woodcocks\"", ",", "\"fall\"", "]", ",", "[", "\"woodpeckers\"", ",", "\"descent\"", "]", ",", "[", "\"wrens\"", ",", "\"herd\"", "]", ",", "]", "generic_terms", "=", "[", "\"group\"", ",", "\"bunch\"", ",", "]", "list", "=", "[", "]", "for", "term_pair", "in", "term_list", ":", "for", "generic", "in", "generic_terms", ":", "wrong", "=", "\"a {} of {}\"", ".", "format", "(", "generic", ",", "term_pair", "[", "0", "]", ")", "right", "=", "\"a {} of {}\"", ".", "format", "(", "term_pair", "[", "1", "]", ",", "term_pair", "[", "0", "]", ")", "list", "+=", "[", "[", "right", ",", "[", "wrong", "]", "]", "]", "return", "preferred_forms_check", "(", "text", ",", "list", ",", "err", ",", "msg", ")" ]
Check the text.
[ "Check", "the", "text", "." ]
python
train
idlesign/django-etc
etc/templatetags/gravatar.py
https://github.com/idlesign/django-etc/blob/dbfc7e9dfc4fdfe69547f71ba4921989f9e97dbe/etc/templatetags/gravatar.py#L56-L72
def gravatar_get_img(obj, size=65, default='identicon'): """Returns Gravatar image HTML tag for a given string or UserModel. Example: {% load gravatar %} {% gravatar_get_img user_model %} :param UserModel, str obj: :param int size: :param str default: :return: """ url = get_gravatar_url(obj, size=size, default=default) if url: return safe('<img src="%s" class="gravatar">' % url) return ''
[ "def", "gravatar_get_img", "(", "obj", ",", "size", "=", "65", ",", "default", "=", "'identicon'", ")", ":", "url", "=", "get_gravatar_url", "(", "obj", ",", "size", "=", "size", ",", "default", "=", "default", ")", "if", "url", ":", "return", "safe", "(", "'<img src=\"%s\" class=\"gravatar\">'", "%", "url", ")", "return", "''" ]
Returns Gravatar image HTML tag for a given string or UserModel. Example: {% load gravatar %} {% gravatar_get_img user_model %} :param UserModel, str obj: :param int size: :param str default: :return:
[ "Returns", "Gravatar", "image", "HTML", "tag", "for", "a", "given", "string", "or", "UserModel", "." ]
python
test
boppreh/keyboard
keyboard/_generic.py
https://github.com/boppreh/keyboard/blob/dbb73dfff484f733d5fed8dbc53301af5b6c7f50/keyboard/_generic.py#L28-L46
def start_if_necessary(self): """ Starts the listening thread if it wans't already. """ self.lock.acquire() try: if not self.listening: self.init() self.listening = True self.listening_thread = Thread(target=self.listen) self.listening_thread.daemon = True self.listening_thread.start() self.processing_thread = Thread(target=self.process) self.processing_thread.daemon = True self.processing_thread.start() finally: self.lock.release()
[ "def", "start_if_necessary", "(", "self", ")", ":", "self", ".", "lock", ".", "acquire", "(", ")", "try", ":", "if", "not", "self", ".", "listening", ":", "self", ".", "init", "(", ")", "self", ".", "listening", "=", "True", "self", ".", "listening_thread", "=", "Thread", "(", "target", "=", "self", ".", "listen", ")", "self", ".", "listening_thread", ".", "daemon", "=", "True", "self", ".", "listening_thread", ".", "start", "(", ")", "self", ".", "processing_thread", "=", "Thread", "(", "target", "=", "self", ".", "process", ")", "self", ".", "processing_thread", ".", "daemon", "=", "True", "self", ".", "processing_thread", ".", "start", "(", ")", "finally", ":", "self", ".", "lock", ".", "release", "(", ")" ]
Starts the listening thread if it wans't already.
[ "Starts", "the", "listening", "thread", "if", "it", "wans", "t", "already", "." ]
python
train
pyviz/holoviews
holoviews/plotting/bokeh/element.py
https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/plotting/bokeh/element.py#L519-L539
def _set_active_tools(self, plot): "Activates the list of active tools" for tool in self.active_tools: if isinstance(tool, util.basestring): tool_type = TOOL_TYPES[tool] matching = [t for t in plot.toolbar.tools if isinstance(t, tool_type)] if not matching: self.param.warning('Tool of type %r could not be found ' 'and could not be activated by default.' % tool) continue tool = matching[0] if isinstance(tool, tools.Drag): plot.toolbar.active_drag = tool if isinstance(tool, tools.Scroll): plot.toolbar.active_scroll = tool if isinstance(tool, tools.Tap): plot.toolbar.active_tap = tool if isinstance(tool, tools.Inspection): plot.toolbar.active_inspect.append(tool)
[ "def", "_set_active_tools", "(", "self", ",", "plot", ")", ":", "for", "tool", "in", "self", ".", "active_tools", ":", "if", "isinstance", "(", "tool", ",", "util", ".", "basestring", ")", ":", "tool_type", "=", "TOOL_TYPES", "[", "tool", "]", "matching", "=", "[", "t", "for", "t", "in", "plot", ".", "toolbar", ".", "tools", "if", "isinstance", "(", "t", ",", "tool_type", ")", "]", "if", "not", "matching", ":", "self", ".", "param", ".", "warning", "(", "'Tool of type %r could not be found '", "'and could not be activated by default.'", "%", "tool", ")", "continue", "tool", "=", "matching", "[", "0", "]", "if", "isinstance", "(", "tool", ",", "tools", ".", "Drag", ")", ":", "plot", ".", "toolbar", ".", "active_drag", "=", "tool", "if", "isinstance", "(", "tool", ",", "tools", ".", "Scroll", ")", ":", "plot", ".", "toolbar", ".", "active_scroll", "=", "tool", "if", "isinstance", "(", "tool", ",", "tools", ".", "Tap", ")", ":", "plot", ".", "toolbar", ".", "active_tap", "=", "tool", "if", "isinstance", "(", "tool", ",", "tools", ".", "Inspection", ")", ":", "plot", ".", "toolbar", ".", "active_inspect", ".", "append", "(", "tool", ")" ]
Activates the list of active tools
[ "Activates", "the", "list", "of", "active", "tools" ]
python
train
pyamg/pyamg
pyamg/multilevel.py
https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/multilevel.py#L177-L247
def cycle_complexity(self, cycle='V'): """Cycle complexity of V, W, AMLI, and F(1,1) cycle with simple relaxation. Cycle complexity is an approximate measure of the number of floating point operations (FLOPs) required to perform a single multigrid cycle relative to the cost a single smoothing operation. Parameters ---------- cycle : {'V','W','F','AMLI'} Type of multigrid cycle to perform in each iteration. Returns ------- cc : float Defined as F_sum / F_0, where F_sum is the total number of nonzeros in the matrix on all levels encountered during a cycle and F_0 is the number of nonzeros in the matrix on the finest level. Notes ----- This is only a rough estimate of the true cycle complexity. The estimate assumes that the cost of pre and post-smoothing are (each) equal to the number of nonzeros in the matrix on that level. This assumption holds for smoothers like Jacobi and Gauss-Seidel. However, the true cycle complexity of cycle using more expensive methods, like block Gauss-Seidel will be underestimated. Additionally, if the cycle used in practice isn't a (1,1)-cycle, then this cost estimate will be off. """ cycle = str(cycle).upper() nnz = [level.A.nnz for level in self.levels] def V(level): if len(self.levels) == 1: return nnz[0] elif level == len(self.levels) - 2: return 2 * nnz[level] + nnz[level + 1] else: return 2 * nnz[level] + V(level + 1) def W(level): if len(self.levels) == 1: return nnz[0] elif level == len(self.levels) - 2: return 2 * nnz[level] + nnz[level + 1] else: return 2 * nnz[level] + 2 * W(level + 1) def F(level): if len(self.levels) == 1: return nnz[0] elif level == len(self.levels) - 2: return 2 * nnz[level] + nnz[level + 1] else: return 2 * nnz[level] + F(level + 1) + V(level + 1) if cycle == 'V': flops = V(0) elif (cycle == 'W') or (cycle == 'AMLI'): flops = W(0) elif cycle == 'F': flops = F(0) else: raise TypeError('Unrecognized cycle type (%s)' % cycle) return float(flops) / float(nnz[0])
[ "def", "cycle_complexity", "(", "self", ",", "cycle", "=", "'V'", ")", ":", "cycle", "=", "str", "(", "cycle", ")", ".", "upper", "(", ")", "nnz", "=", "[", "level", ".", "A", ".", "nnz", "for", "level", "in", "self", ".", "levels", "]", "def", "V", "(", "level", ")", ":", "if", "len", "(", "self", ".", "levels", ")", "==", "1", ":", "return", "nnz", "[", "0", "]", "elif", "level", "==", "len", "(", "self", ".", "levels", ")", "-", "2", ":", "return", "2", "*", "nnz", "[", "level", "]", "+", "nnz", "[", "level", "+", "1", "]", "else", ":", "return", "2", "*", "nnz", "[", "level", "]", "+", "V", "(", "level", "+", "1", ")", "def", "W", "(", "level", ")", ":", "if", "len", "(", "self", ".", "levels", ")", "==", "1", ":", "return", "nnz", "[", "0", "]", "elif", "level", "==", "len", "(", "self", ".", "levels", ")", "-", "2", ":", "return", "2", "*", "nnz", "[", "level", "]", "+", "nnz", "[", "level", "+", "1", "]", "else", ":", "return", "2", "*", "nnz", "[", "level", "]", "+", "2", "*", "W", "(", "level", "+", "1", ")", "def", "F", "(", "level", ")", ":", "if", "len", "(", "self", ".", "levels", ")", "==", "1", ":", "return", "nnz", "[", "0", "]", "elif", "level", "==", "len", "(", "self", ".", "levels", ")", "-", "2", ":", "return", "2", "*", "nnz", "[", "level", "]", "+", "nnz", "[", "level", "+", "1", "]", "else", ":", "return", "2", "*", "nnz", "[", "level", "]", "+", "F", "(", "level", "+", "1", ")", "+", "V", "(", "level", "+", "1", ")", "if", "cycle", "==", "'V'", ":", "flops", "=", "V", "(", "0", ")", "elif", "(", "cycle", "==", "'W'", ")", "or", "(", "cycle", "==", "'AMLI'", ")", ":", "flops", "=", "W", "(", "0", ")", "elif", "cycle", "==", "'F'", ":", "flops", "=", "F", "(", "0", ")", "else", ":", "raise", "TypeError", "(", "'Unrecognized cycle type (%s)'", "%", "cycle", ")", "return", "float", "(", "flops", ")", "/", "float", "(", "nnz", "[", "0", "]", ")" ]
Cycle complexity of V, W, AMLI, and F(1,1) cycle with simple relaxation. Cycle complexity is an approximate measure of the number of floating point operations (FLOPs) required to perform a single multigrid cycle relative to the cost a single smoothing operation. Parameters ---------- cycle : {'V','W','F','AMLI'} Type of multigrid cycle to perform in each iteration. Returns ------- cc : float Defined as F_sum / F_0, where F_sum is the total number of nonzeros in the matrix on all levels encountered during a cycle and F_0 is the number of nonzeros in the matrix on the finest level. Notes ----- This is only a rough estimate of the true cycle complexity. The estimate assumes that the cost of pre and post-smoothing are (each) equal to the number of nonzeros in the matrix on that level. This assumption holds for smoothers like Jacobi and Gauss-Seidel. However, the true cycle complexity of cycle using more expensive methods, like block Gauss-Seidel will be underestimated. Additionally, if the cycle used in practice isn't a (1,1)-cycle, then this cost estimate will be off.
[ "Cycle", "complexity", "of", "V", "W", "AMLI", "and", "F", "(", "1", "1", ")", "cycle", "with", "simple", "relaxation", "." ]
python
train
Jajcus/pyxmpp2
pyxmpp2/error.py
https://github.com/Jajcus/pyxmpp2/blob/14a40a3950910a9cd008b55f0d8905aa0186ce18/pyxmpp2/error.py#L232-L275
def _from_xml(self, element): """Initialize an ErrorElement object from an XML element. :Parameters: - `element`: XML element to be decoded. :Types: - `element`: :etree:`ElementTree.Element` """ # pylint: disable-msg=R0912 if element.tag != self.error_qname: raise ValueError(u"{0!r} is not a {1!r} element".format( element, self.error_qname)) lang = element.get(XML_LANG_QNAME, None) if lang: self.language = lang self.condition = None for child in element: if child.tag.startswith(self.cond_qname_prefix): if self.condition is not None: logger.warning("Multiple conditions in XMPP error element.") continue self.condition = deepcopy(child) elif child.tag == self.text_qname: lang = child.get(XML_LANG_QNAME, None) if lang: self.language = lang self.text = child.text.strip() else: bad = False for prefix in (STREAM_QNP, STANZA_CLIENT_QNP, STANZA_SERVER_QNP, STANZA_ERROR_QNP, STREAM_ERROR_QNP): if child.tag.startswith(prefix): logger.warning("Unexpected stream-namespaced" " element in error.") bad = True break if not bad: self.custom_condition.append( deepcopy(child) ) if self.condition is None: self.condition = ElementTree.Element(self.cond_qname_prefix + "undefined-condition") if self.condition.tag in OBSOLETE_CONDITIONS: new_cond_name = OBSOLETE_CONDITIONS[self.condition.tag] self.condition = ElementTree.Element(new_cond_name)
[ "def", "_from_xml", "(", "self", ",", "element", ")", ":", "# pylint: disable-msg=R0912", "if", "element", ".", "tag", "!=", "self", ".", "error_qname", ":", "raise", "ValueError", "(", "u\"{0!r} is not a {1!r} element\"", ".", "format", "(", "element", ",", "self", ".", "error_qname", ")", ")", "lang", "=", "element", ".", "get", "(", "XML_LANG_QNAME", ",", "None", ")", "if", "lang", ":", "self", ".", "language", "=", "lang", "self", ".", "condition", "=", "None", "for", "child", "in", "element", ":", "if", "child", ".", "tag", ".", "startswith", "(", "self", ".", "cond_qname_prefix", ")", ":", "if", "self", ".", "condition", "is", "not", "None", ":", "logger", ".", "warning", "(", "\"Multiple conditions in XMPP error element.\"", ")", "continue", "self", ".", "condition", "=", "deepcopy", "(", "child", ")", "elif", "child", ".", "tag", "==", "self", ".", "text_qname", ":", "lang", "=", "child", ".", "get", "(", "XML_LANG_QNAME", ",", "None", ")", "if", "lang", ":", "self", ".", "language", "=", "lang", "self", ".", "text", "=", "child", ".", "text", ".", "strip", "(", ")", "else", ":", "bad", "=", "False", "for", "prefix", "in", "(", "STREAM_QNP", ",", "STANZA_CLIENT_QNP", ",", "STANZA_SERVER_QNP", ",", "STANZA_ERROR_QNP", ",", "STREAM_ERROR_QNP", ")", ":", "if", "child", ".", "tag", ".", "startswith", "(", "prefix", ")", ":", "logger", ".", "warning", "(", "\"Unexpected stream-namespaced\"", "\" element in error.\"", ")", "bad", "=", "True", "break", "if", "not", "bad", ":", "self", ".", "custom_condition", ".", "append", "(", "deepcopy", "(", "child", ")", ")", "if", "self", ".", "condition", "is", "None", ":", "self", ".", "condition", "=", "ElementTree", ".", "Element", "(", "self", ".", "cond_qname_prefix", "+", "\"undefined-condition\"", ")", "if", "self", ".", "condition", ".", "tag", "in", "OBSOLETE_CONDITIONS", ":", "new_cond_name", "=", "OBSOLETE_CONDITIONS", "[", "self", ".", "condition", ".", "tag", "]", "self", ".", "condition", "=", "ElementTree", ".", "Element", "(", "new_cond_name", ")" ]
Initialize an ErrorElement object from an XML element. :Parameters: - `element`: XML element to be decoded. :Types: - `element`: :etree:`ElementTree.Element`
[ "Initialize", "an", "ErrorElement", "object", "from", "an", "XML", "element", "." ]
python
valid
loli/medpy
medpy/filter/binary.py
https://github.com/loli/medpy/blob/95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5/medpy/filter/binary.py#L118-L134
def bounding_box(img): r""" Return the bounding box incorporating all non-zero values in the image. Parameters ---------- img : array_like An array containing non-zero objects. Returns ------- bbox : a list of slicer objects defining the bounding box """ locations = numpy.argwhere(img) mins = locations.min(0) maxs = locations.max(0) + 1 return [slice(x, y) for x, y in zip(mins, maxs)]
[ "def", "bounding_box", "(", "img", ")", ":", "locations", "=", "numpy", ".", "argwhere", "(", "img", ")", "mins", "=", "locations", ".", "min", "(", "0", ")", "maxs", "=", "locations", ".", "max", "(", "0", ")", "+", "1", "return", "[", "slice", "(", "x", ",", "y", ")", "for", "x", ",", "y", "in", "zip", "(", "mins", ",", "maxs", ")", "]" ]
r""" Return the bounding box incorporating all non-zero values in the image. Parameters ---------- img : array_like An array containing non-zero objects. Returns ------- bbox : a list of slicer objects defining the bounding box
[ "r", "Return", "the", "bounding", "box", "incorporating", "all", "non", "-", "zero", "values", "in", "the", "image", ".", "Parameters", "----------", "img", ":", "array_like", "An", "array", "containing", "non", "-", "zero", "objects", ".", "Returns", "-------", "bbox", ":", "a", "list", "of", "slicer", "objects", "defining", "the", "bounding", "box" ]
python
train
wal-e/wal-e
wal_e/piper.py
https://github.com/wal-e/wal-e/blob/027263860e72a403bc0e1497bb3e67523138e7a2/wal_e/piper.py#L94-L122
def pipe(*args): """ Takes as parameters several dicts, each with the same parameters passed to popen. Runs the various processes in a pipeline, connecting the stdout of every process except the last with the stdin of the next process. Adapted from http://www.enricozini.org/2009/debian/python-pipes/ """ if len(args) < 2: raise ValueError("pipe needs at least 2 processes") # Set stdout=PIPE in every subprocess except the last for i in args[:-1]: i["stdout"] = subprocess.PIPE # Runs all subprocesses connecting stdins and stdouts to create the # pipeline. Closes stdouts to avoid deadlocks. popens = [popen_sp(**args[0])] for i in range(1, len(args)): args[i]["stdin"] = popens[i - 1].stdout popens.append(popen_sp(**args[i])) popens[i - 1].stdout.close() # Returns the array of subprocesses just created return popens
[ "def", "pipe", "(", "*", "args", ")", ":", "if", "len", "(", "args", ")", "<", "2", ":", "raise", "ValueError", "(", "\"pipe needs at least 2 processes\"", ")", "# Set stdout=PIPE in every subprocess except the last", "for", "i", "in", "args", "[", ":", "-", "1", "]", ":", "i", "[", "\"stdout\"", "]", "=", "subprocess", ".", "PIPE", "# Runs all subprocesses connecting stdins and stdouts to create the", "# pipeline. Closes stdouts to avoid deadlocks.", "popens", "=", "[", "popen_sp", "(", "*", "*", "args", "[", "0", "]", ")", "]", "for", "i", "in", "range", "(", "1", ",", "len", "(", "args", ")", ")", ":", "args", "[", "i", "]", "[", "\"stdin\"", "]", "=", "popens", "[", "i", "-", "1", "]", ".", "stdout", "popens", ".", "append", "(", "popen_sp", "(", "*", "*", "args", "[", "i", "]", ")", ")", "popens", "[", "i", "-", "1", "]", ".", "stdout", ".", "close", "(", ")", "# Returns the array of subprocesses just created", "return", "popens" ]
Takes as parameters several dicts, each with the same parameters passed to popen. Runs the various processes in a pipeline, connecting the stdout of every process except the last with the stdin of the next process. Adapted from http://www.enricozini.org/2009/debian/python-pipes/
[ "Takes", "as", "parameters", "several", "dicts", "each", "with", "the", "same", "parameters", "passed", "to", "popen", "." ]
python
train
ckan/deadoralive
deadoralive/deadoralive.py
https://github.com/ckan/deadoralive/blob/82eed6c73e17b9884476311a7a8fae9d2b379600/deadoralive/deadoralive.py#L71-L121
def check_url(url): """Check whether the given URL is dead or alive. Returns a dict with four keys: "url": The URL that was checked (string) "alive": Whether the URL was working, True or False "status": The HTTP status code of the response from the URL, e.g. 200, 401, 500 (int) "reason": The reason for the success or failure of the check, e.g. "OK", "Unauthorized", "Internal Server Error" (string) The "status" may be None if we did not get a valid HTTP response, e.g. in the event of a timeout, DNS failure or invalid HTTP response. The "reason" will always be a string, but may be a requests library exception string rather than an HTTP reason string if we did not get a valid HTTP response. """ result = {"url": url} try: response = requests.get(url) result["status"] = response.status_code result["reason"] = response.reason response.raise_for_status() # Raise if status_code is not OK. result["alive"] = True except AttributeError as err: if err.message == "'NoneType' object has no attribute 'encode'": # requests seems to throw these for some invalid URLs. result["alive"] = False result["reason"] = "Invalid URL" result["status"] = None else: raise except requests.exceptions.RequestException as err: result["alive"] = False if "reason" not in result: result["reason"] = str(err) if "status" not in result: # This can happen if the response is invalid HTTP, if we get a DNS # failure, or a timeout, etc. result["status"] = None # We should always have these four fields in the result. assert "url" in result assert result.get("alive") in (True, False) assert "status" in result assert "reason" in result return result
[ "def", "check_url", "(", "url", ")", ":", "result", "=", "{", "\"url\"", ":", "url", "}", "try", ":", "response", "=", "requests", ".", "get", "(", "url", ")", "result", "[", "\"status\"", "]", "=", "response", ".", "status_code", "result", "[", "\"reason\"", "]", "=", "response", ".", "reason", "response", ".", "raise_for_status", "(", ")", "# Raise if status_code is not OK.", "result", "[", "\"alive\"", "]", "=", "True", "except", "AttributeError", "as", "err", ":", "if", "err", ".", "message", "==", "\"'NoneType' object has no attribute 'encode'\"", ":", "# requests seems to throw these for some invalid URLs.", "result", "[", "\"alive\"", "]", "=", "False", "result", "[", "\"reason\"", "]", "=", "\"Invalid URL\"", "result", "[", "\"status\"", "]", "=", "None", "else", ":", "raise", "except", "requests", ".", "exceptions", ".", "RequestException", "as", "err", ":", "result", "[", "\"alive\"", "]", "=", "False", "if", "\"reason\"", "not", "in", "result", ":", "result", "[", "\"reason\"", "]", "=", "str", "(", "err", ")", "if", "\"status\"", "not", "in", "result", ":", "# This can happen if the response is invalid HTTP, if we get a DNS", "# failure, or a timeout, etc.", "result", "[", "\"status\"", "]", "=", "None", "# We should always have these four fields in the result.", "assert", "\"url\"", "in", "result", "assert", "result", ".", "get", "(", "\"alive\"", ")", "in", "(", "True", ",", "False", ")", "assert", "\"status\"", "in", "result", "assert", "\"reason\"", "in", "result", "return", "result" ]
Check whether the given URL is dead or alive. Returns a dict with four keys: "url": The URL that was checked (string) "alive": Whether the URL was working, True or False "status": The HTTP status code of the response from the URL, e.g. 200, 401, 500 (int) "reason": The reason for the success or failure of the check, e.g. "OK", "Unauthorized", "Internal Server Error" (string) The "status" may be None if we did not get a valid HTTP response, e.g. in the event of a timeout, DNS failure or invalid HTTP response. The "reason" will always be a string, but may be a requests library exception string rather than an HTTP reason string if we did not get a valid HTTP response.
[ "Check", "whether", "the", "given", "URL", "is", "dead", "or", "alive", "." ]
python
train
hobson/aima
aima/nlp.py
https://github.com/hobson/aima/blob/3572b2fb92039b4a1abe384be8545560fbd3d470/aima/nlp.py#L123-L137
def parses(self, words, S='S'): """Return a list of parses; words can be a list or string. >>> chart = Chart(E_NP_) >>> chart.parses('happy man', 'NP') [[0, 2, 'NP', [('Adj', 'happy'), [1, 2, 'NP', [('N', 'man')], []]], []]] """ if isinstance(words, str): words = words.split() self.parse(words, S) # Return all the parses that span the whole input # 'span the whole input' => begin at 0, end at len(words) return [[i, j, S, found, []] for (i, j, lhs, found, expects) in self.chart[len(words)] # assert j == len(words) if i == 0 and lhs == S and expects == []]
[ "def", "parses", "(", "self", ",", "words", ",", "S", "=", "'S'", ")", ":", "if", "isinstance", "(", "words", ",", "str", ")", ":", "words", "=", "words", ".", "split", "(", ")", "self", ".", "parse", "(", "words", ",", "S", ")", "# Return all the parses that span the whole input", "# 'span the whole input' => begin at 0, end at len(words)", "return", "[", "[", "i", ",", "j", ",", "S", ",", "found", ",", "[", "]", "]", "for", "(", "i", ",", "j", ",", "lhs", ",", "found", ",", "expects", ")", "in", "self", ".", "chart", "[", "len", "(", "words", ")", "]", "# assert j == len(words)", "if", "i", "==", "0", "and", "lhs", "==", "S", "and", "expects", "==", "[", "]", "]" ]
Return a list of parses; words can be a list or string. >>> chart = Chart(E_NP_) >>> chart.parses('happy man', 'NP') [[0, 2, 'NP', [('Adj', 'happy'), [1, 2, 'NP', [('N', 'man')], []]], []]]
[ "Return", "a", "list", "of", "parses", ";", "words", "can", "be", "a", "list", "or", "string", ".", ">>>", "chart", "=", "Chart", "(", "E_NP_", ")", ">>>", "chart", ".", "parses", "(", "happy", "man", "NP", ")", "[[", "0", "2", "NP", "[", "(", "Adj", "happy", ")", "[", "1", "2", "NP", "[", "(", "N", "man", ")", "]", "[]", "]]", "[]", "]]" ]
python
valid
mathandy/svgpathtools
svgpathtools/path.py
https://github.com/mathandy/svgpathtools/blob/fd7348a1dfd88b65ea61da02325c6605aedf8c4f/svgpathtools/path.py#L712-L715
def cropped(self, t0, t1): """returns a cropped copy of this segment which starts at self.point(t0) and ends at self.point(t1).""" return Line(self.point(t0), self.point(t1))
[ "def", "cropped", "(", "self", ",", "t0", ",", "t1", ")", ":", "return", "Line", "(", "self", ".", "point", "(", "t0", ")", ",", "self", ".", "point", "(", "t1", ")", ")" ]
returns a cropped copy of this segment which starts at self.point(t0) and ends at self.point(t1).
[ "returns", "a", "cropped", "copy", "of", "this", "segment", "which", "starts", "at", "self", ".", "point", "(", "t0", ")", "and", "ends", "at", "self", ".", "point", "(", "t1", ")", "." ]
python
train
daboth/pagan
tools/webserver/webserver.py
https://github.com/daboth/pagan/blob/1e6d31f78e312d242751e70566ca9a6278784915/tools/webserver/webserver.py#L63-L105
def index(): """main functionality of webserver""" default = ["pagan", "python", "avatar", "github"] slogan = request.forms.get("slogan") if not slogan: if request.get_cookie("hist1"): slogan = request.get_cookie("hist1") else: slogan = "pagan" if not request.get_cookie("hist1"): hist1, hist2, hist3, hist4 = default[:] else: hist1 = request.get_cookie("hist1") hist2 = request.get_cookie("hist2") hist3 = request.get_cookie("hist3") hist4 = request.get_cookie("hist4") if slogan in (hist1, hist2, hist3, hist4): history = [hist1, hist2, hist3, hist4] history.remove(slogan) hist1, hist2, hist3 = history[0], history[1], history[2] response.set_cookie("hist1", slogan, max_age=60*60*24*30, httponly=True) response.set_cookie("hist2", hist1, max_age=60*60*24*30, httponly=True) response.set_cookie("hist3", hist2, max_age=60*60*24*30, httponly=True) response.set_cookie("hist4", hist3, max_age=60*60*24*30, httponly=True) # slogan, hist1, hist2, hist3 = escape(slogan), escape(hist1),\ # escape(hist2), escape(hist3) md5 = hashlib.md5() md5.update(slogan) slogan_hash = md5.hexdigest() md5.update(hist1) hist1_hash = md5.hexdigest() md5.update(hist2) hist2_hash = md5.hexdigest() md5.update(hist3) hist3_hash = md5.hexdigest() return template(TEMPLATEINDEX, slogan=slogan, hist1=hist1, hist2=hist2, hist3=hist3, sloganHash=slogan_hash, hist1Hash=hist1_hash, hist2Hash=hist2_hash, hist3Hash=hist3_hash)
[ "def", "index", "(", ")", ":", "default", "=", "[", "\"pagan\"", ",", "\"python\"", ",", "\"avatar\"", ",", "\"github\"", "]", "slogan", "=", "request", ".", "forms", ".", "get", "(", "\"slogan\"", ")", "if", "not", "slogan", ":", "if", "request", ".", "get_cookie", "(", "\"hist1\"", ")", ":", "slogan", "=", "request", ".", "get_cookie", "(", "\"hist1\"", ")", "else", ":", "slogan", "=", "\"pagan\"", "if", "not", "request", ".", "get_cookie", "(", "\"hist1\"", ")", ":", "hist1", ",", "hist2", ",", "hist3", ",", "hist4", "=", "default", "[", ":", "]", "else", ":", "hist1", "=", "request", ".", "get_cookie", "(", "\"hist1\"", ")", "hist2", "=", "request", ".", "get_cookie", "(", "\"hist2\"", ")", "hist3", "=", "request", ".", "get_cookie", "(", "\"hist3\"", ")", "hist4", "=", "request", ".", "get_cookie", "(", "\"hist4\"", ")", "if", "slogan", "in", "(", "hist1", ",", "hist2", ",", "hist3", ",", "hist4", ")", ":", "history", "=", "[", "hist1", ",", "hist2", ",", "hist3", ",", "hist4", "]", "history", ".", "remove", "(", "slogan", ")", "hist1", ",", "hist2", ",", "hist3", "=", "history", "[", "0", "]", ",", "history", "[", "1", "]", ",", "history", "[", "2", "]", "response", ".", "set_cookie", "(", "\"hist1\"", ",", "slogan", ",", "max_age", "=", "60", "*", "60", "*", "24", "*", "30", ",", "httponly", "=", "True", ")", "response", ".", "set_cookie", "(", "\"hist2\"", ",", "hist1", ",", "max_age", "=", "60", "*", "60", "*", "24", "*", "30", ",", "httponly", "=", "True", ")", "response", ".", "set_cookie", "(", "\"hist3\"", ",", "hist2", ",", "max_age", "=", "60", "*", "60", "*", "24", "*", "30", ",", "httponly", "=", "True", ")", "response", ".", "set_cookie", "(", "\"hist4\"", ",", "hist3", ",", "max_age", "=", "60", "*", "60", "*", "24", "*", "30", ",", "httponly", "=", "True", ")", "# slogan, hist1, hist2, hist3 = escape(slogan), escape(hist1),\\", "# escape(hist2), escape(hist3)", "md5", "=", "hashlib", ".", "md5", "(", ")", "md5", ".", "update", "(", "slogan", ")", "slogan_hash", "=", "md5", ".", "hexdigest", "(", ")", "md5", ".", "update", "(", "hist1", ")", "hist1_hash", "=", "md5", ".", "hexdigest", "(", ")", "md5", ".", "update", "(", "hist2", ")", "hist2_hash", "=", "md5", ".", "hexdigest", "(", ")", "md5", ".", "update", "(", "hist3", ")", "hist3_hash", "=", "md5", ".", "hexdigest", "(", ")", "return", "template", "(", "TEMPLATEINDEX", ",", "slogan", "=", "slogan", ",", "hist1", "=", "hist1", ",", "hist2", "=", "hist2", ",", "hist3", "=", "hist3", ",", "sloganHash", "=", "slogan_hash", ",", "hist1Hash", "=", "hist1_hash", ",", "hist2Hash", "=", "hist2_hash", ",", "hist3Hash", "=", "hist3_hash", ")" ]
main functionality of webserver
[ "main", "functionality", "of", "webserver" ]
python
train
gem/oq-engine
openquake/baselib/hdf5.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/baselib/hdf5.py#L53-L74
def create(hdf5, name, dtype, shape=(None,), compression=None, fillvalue=0, attrs=None): """ :param hdf5: a h5py.File object :param name: an hdf5 key string :param dtype: dtype of the dataset (usually composite) :param shape: shape of the dataset (can be extendable) :param compression: None or 'gzip' are recommended :param attrs: dictionary of attributes of the dataset :returns: a HDF5 dataset """ if shape[0] is None: # extendable dataset dset = hdf5.create_dataset( name, (0,) + shape[1:], dtype, chunks=True, maxshape=shape, compression=compression) else: # fixed-shape dataset dset = hdf5.create_dataset(name, shape, dtype, fillvalue=fillvalue, compression=compression) if attrs: for k, v in attrs.items(): dset.attrs[k] = maybe_encode(v) return dset
[ "def", "create", "(", "hdf5", ",", "name", ",", "dtype", ",", "shape", "=", "(", "None", ",", ")", ",", "compression", "=", "None", ",", "fillvalue", "=", "0", ",", "attrs", "=", "None", ")", ":", "if", "shape", "[", "0", "]", "is", "None", ":", "# extendable dataset", "dset", "=", "hdf5", ".", "create_dataset", "(", "name", ",", "(", "0", ",", ")", "+", "shape", "[", "1", ":", "]", ",", "dtype", ",", "chunks", "=", "True", ",", "maxshape", "=", "shape", ",", "compression", "=", "compression", ")", "else", ":", "# fixed-shape dataset", "dset", "=", "hdf5", ".", "create_dataset", "(", "name", ",", "shape", ",", "dtype", ",", "fillvalue", "=", "fillvalue", ",", "compression", "=", "compression", ")", "if", "attrs", ":", "for", "k", ",", "v", "in", "attrs", ".", "items", "(", ")", ":", "dset", ".", "attrs", "[", "k", "]", "=", "maybe_encode", "(", "v", ")", "return", "dset" ]
:param hdf5: a h5py.File object :param name: an hdf5 key string :param dtype: dtype of the dataset (usually composite) :param shape: shape of the dataset (can be extendable) :param compression: None or 'gzip' are recommended :param attrs: dictionary of attributes of the dataset :returns: a HDF5 dataset
[ ":", "param", "hdf5", ":", "a", "h5py", ".", "File", "object", ":", "param", "name", ":", "an", "hdf5", "key", "string", ":", "param", "dtype", ":", "dtype", "of", "the", "dataset", "(", "usually", "composite", ")", ":", "param", "shape", ":", "shape", "of", "the", "dataset", "(", "can", "be", "extendable", ")", ":", "param", "compression", ":", "None", "or", "gzip", "are", "recommended", ":", "param", "attrs", ":", "dictionary", "of", "attributes", "of", "the", "dataset", ":", "returns", ":", "a", "HDF5", "dataset" ]
python
train
maxfischer2781/chainlet
chainlet/protolink.py
https://github.com/maxfischer2781/chainlet/blob/4e17f9992b4780bd0d9309202e2847df640bffe8/chainlet/protolink.py#L158-L183
def printlet(flatten=False, **kwargs): """ Print chunks of data from a chain :param flatten: whether to flatten data chunks :param kwargs: keyword arguments as for :py:func:`print` If ``flatten`` is :py:const:`True`, every chunk received is unpacked. This is useful when passing around connected data, e.g. from :py:func:`~.enumeratelet`. Keyword arguments via ``kwargs`` are equivalent to those of :py:func:`print`. For example, passing ``file=sys.stderr`` is a simple way of creating a debugging element in a chain: .. code:: debug_chain = chain[:i] >> printlet(file=sys.stderr) >> chain[i:] """ chunk = yield if flatten: while True: print(*chunk, **kwargs) chunk = yield chunk else: while True: print(chunk, **kwargs) chunk = yield chunk
[ "def", "printlet", "(", "flatten", "=", "False", ",", "*", "*", "kwargs", ")", ":", "chunk", "=", "yield", "if", "flatten", ":", "while", "True", ":", "print", "(", "*", "chunk", ",", "*", "*", "kwargs", ")", "chunk", "=", "yield", "chunk", "else", ":", "while", "True", ":", "print", "(", "chunk", ",", "*", "*", "kwargs", ")", "chunk", "=", "yield", "chunk" ]
Print chunks of data from a chain :param flatten: whether to flatten data chunks :param kwargs: keyword arguments as for :py:func:`print` If ``flatten`` is :py:const:`True`, every chunk received is unpacked. This is useful when passing around connected data, e.g. from :py:func:`~.enumeratelet`. Keyword arguments via ``kwargs`` are equivalent to those of :py:func:`print`. For example, passing ``file=sys.stderr`` is a simple way of creating a debugging element in a chain: .. code:: debug_chain = chain[:i] >> printlet(file=sys.stderr) >> chain[i:]
[ "Print", "chunks", "of", "data", "from", "a", "chain" ]
python
train
Fantomas42/django-blog-zinnia
zinnia/views/trackback.py
https://github.com/Fantomas42/django-blog-zinnia/blob/b4949304b104a8e1a7a7a0773cbfd024313c3a15/zinnia/views/trackback.py#L46-L97
def post(self, request, *args, **kwargs): """ Check if an URL is provided and if trackbacks are enabled on the Entry. If so the URL is registered one time as a trackback. """ url = request.POST.get('url') if not url: return self.get(request, *args, **kwargs) entry = self.get_object() site = Site.objects.get_current() if not entry.trackbacks_are_open: return self.render_to_response( {'error': 'Trackback is not enabled for %s' % entry.title}) title = request.POST.get('title') or url excerpt = request.POST.get('excerpt') or title blog_name = request.POST.get('blog_name') or title ip_address = request.META.get('REMOTE_ADDR', None) trackback_klass = comments.get_model() trackback_datas = { 'content_type': ContentType.objects.get_for_model(Entry), 'object_pk': entry.pk, 'site': site, 'user_url': url, 'user_name': blog_name, 'ip_address': ip_address, 'comment': excerpt } trackback = trackback_klass(**trackback_datas) if check_is_spam(trackback, entry, request): return self.render_to_response( {'error': 'Trackback considered like spam'}) trackback_defaults = {'comment': trackback_datas.pop('comment')} trackback, created = trackback_klass.objects.get_or_create( defaults=trackback_defaults, **trackback_datas) if created: trackback.flags.create(user=get_user_flagger(), flag=TRACKBACK) trackback_was_posted.send(trackback.__class__, trackback=trackback, entry=entry) else: return self.render_to_response( {'error': 'Trackback is already registered'}) return self.render_to_response({})
[ "def", "post", "(", "self", ",", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "url", "=", "request", ".", "POST", ".", "get", "(", "'url'", ")", "if", "not", "url", ":", "return", "self", ".", "get", "(", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", "entry", "=", "self", ".", "get_object", "(", ")", "site", "=", "Site", ".", "objects", ".", "get_current", "(", ")", "if", "not", "entry", ".", "trackbacks_are_open", ":", "return", "self", ".", "render_to_response", "(", "{", "'error'", ":", "'Trackback is not enabled for %s'", "%", "entry", ".", "title", "}", ")", "title", "=", "request", ".", "POST", ".", "get", "(", "'title'", ")", "or", "url", "excerpt", "=", "request", ".", "POST", ".", "get", "(", "'excerpt'", ")", "or", "title", "blog_name", "=", "request", ".", "POST", ".", "get", "(", "'blog_name'", ")", "or", "title", "ip_address", "=", "request", ".", "META", ".", "get", "(", "'REMOTE_ADDR'", ",", "None", ")", "trackback_klass", "=", "comments", ".", "get_model", "(", ")", "trackback_datas", "=", "{", "'content_type'", ":", "ContentType", ".", "objects", ".", "get_for_model", "(", "Entry", ")", ",", "'object_pk'", ":", "entry", ".", "pk", ",", "'site'", ":", "site", ",", "'user_url'", ":", "url", ",", "'user_name'", ":", "blog_name", ",", "'ip_address'", ":", "ip_address", ",", "'comment'", ":", "excerpt", "}", "trackback", "=", "trackback_klass", "(", "*", "*", "trackback_datas", ")", "if", "check_is_spam", "(", "trackback", ",", "entry", ",", "request", ")", ":", "return", "self", ".", "render_to_response", "(", "{", "'error'", ":", "'Trackback considered like spam'", "}", ")", "trackback_defaults", "=", "{", "'comment'", ":", "trackback_datas", ".", "pop", "(", "'comment'", ")", "}", "trackback", ",", "created", "=", "trackback_klass", ".", "objects", ".", "get_or_create", "(", "defaults", "=", "trackback_defaults", ",", "*", "*", "trackback_datas", ")", "if", "created", ":", "trackback", ".", "flags", ".", "create", "(", "user", "=", "get_user_flagger", "(", ")", ",", "flag", "=", "TRACKBACK", ")", "trackback_was_posted", ".", "send", "(", "trackback", ".", "__class__", ",", "trackback", "=", "trackback", ",", "entry", "=", "entry", ")", "else", ":", "return", "self", ".", "render_to_response", "(", "{", "'error'", ":", "'Trackback is already registered'", "}", ")", "return", "self", ".", "render_to_response", "(", "{", "}", ")" ]
Check if an URL is provided and if trackbacks are enabled on the Entry. If so the URL is registered one time as a trackback.
[ "Check", "if", "an", "URL", "is", "provided", "and", "if", "trackbacks", "are", "enabled", "on", "the", "Entry", ".", "If", "so", "the", "URL", "is", "registered", "one", "time", "as", "a", "trackback", "." ]
python
train
pytroll/python-geotiepoints
geotiepoints/__init__.py
https://github.com/pytroll/python-geotiepoints/blob/7c5cc8a887f8534cc2839c716c2c560aeaf77659/geotiepoints/__init__.py#L99-L119
def _multi(fun, lons, lats, chunk_size, cores=1): """Work on multiple cores. """ pool = Pool(processes=cores) splits = get_scene_splits(lons.shape[0], chunk_size, cores) lons_parts = np.vsplit(lons, splits) lats_parts = np.vsplit(lats, splits) results = [pool.apply_async(fun, (lons_parts[i], lats_parts[i])) for i in range(len(lons_parts))] pool.close() pool.join() lons, lats = zip(*(res.get() for res in results)) return np.vstack(lons), np.vstack(lats)
[ "def", "_multi", "(", "fun", ",", "lons", ",", "lats", ",", "chunk_size", ",", "cores", "=", "1", ")", ":", "pool", "=", "Pool", "(", "processes", "=", "cores", ")", "splits", "=", "get_scene_splits", "(", "lons", ".", "shape", "[", "0", "]", ",", "chunk_size", ",", "cores", ")", "lons_parts", "=", "np", ".", "vsplit", "(", "lons", ",", "splits", ")", "lats_parts", "=", "np", ".", "vsplit", "(", "lats", ",", "splits", ")", "results", "=", "[", "pool", ".", "apply_async", "(", "fun", ",", "(", "lons_parts", "[", "i", "]", ",", "lats_parts", "[", "i", "]", ")", ")", "for", "i", "in", "range", "(", "len", "(", "lons_parts", ")", ")", "]", "pool", ".", "close", "(", ")", "pool", ".", "join", "(", ")", "lons", ",", "lats", "=", "zip", "(", "*", "(", "res", ".", "get", "(", ")", "for", "res", "in", "results", ")", ")", "return", "np", ".", "vstack", "(", "lons", ")", ",", "np", ".", "vstack", "(", "lats", ")" ]
Work on multiple cores.
[ "Work", "on", "multiple", "cores", "." ]
python
train
peri-source/peri
peri/opt/addsubtract.py
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/opt/addsubtract.py#L59-L82
def _feature_guess(im, rad, minmass=None, use_tp=False, trim_edge=False): """Workhorse of feature_guess""" if minmass is None: # we use 1% of the feature size mass as a cutoff; # it's easier to remove than to add minmass = rad**3 * 4/3.*np.pi * 0.01 # 0.03 is a magic number; works well if use_tp: diameter = np.ceil(2*rad) diameter += 1-(diameter % 2) df = peri.trackpy.locate(im, int(diameter), minmass=minmass) npart = np.array(df['mass']).size guess = np.zeros([npart, 3]) guess[:, 0] = df['z'] guess[:, 1] = df['y'] guess[:, 2] = df['x'] mass = df['mass'] else: guess, mass = initializers.local_max_featuring( im, radius=rad, minmass=minmass, trim_edge=trim_edge) npart = guess.shape[0] # I want to return these sorted by mass: inds = np.argsort(mass)[::-1] # biggest mass first return guess[inds].copy(), npart
[ "def", "_feature_guess", "(", "im", ",", "rad", ",", "minmass", "=", "None", ",", "use_tp", "=", "False", ",", "trim_edge", "=", "False", ")", ":", "if", "minmass", "is", "None", ":", "# we use 1% of the feature size mass as a cutoff;", "# it's easier to remove than to add", "minmass", "=", "rad", "**", "3", "*", "4", "/", "3.", "*", "np", ".", "pi", "*", "0.01", "# 0.03 is a magic number; works well", "if", "use_tp", ":", "diameter", "=", "np", ".", "ceil", "(", "2", "*", "rad", ")", "diameter", "+=", "1", "-", "(", "diameter", "%", "2", ")", "df", "=", "peri", ".", "trackpy", ".", "locate", "(", "im", ",", "int", "(", "diameter", ")", ",", "minmass", "=", "minmass", ")", "npart", "=", "np", ".", "array", "(", "df", "[", "'mass'", "]", ")", ".", "size", "guess", "=", "np", ".", "zeros", "(", "[", "npart", ",", "3", "]", ")", "guess", "[", ":", ",", "0", "]", "=", "df", "[", "'z'", "]", "guess", "[", ":", ",", "1", "]", "=", "df", "[", "'y'", "]", "guess", "[", ":", ",", "2", "]", "=", "df", "[", "'x'", "]", "mass", "=", "df", "[", "'mass'", "]", "else", ":", "guess", ",", "mass", "=", "initializers", ".", "local_max_featuring", "(", "im", ",", "radius", "=", "rad", ",", "minmass", "=", "minmass", ",", "trim_edge", "=", "trim_edge", ")", "npart", "=", "guess", ".", "shape", "[", "0", "]", "# I want to return these sorted by mass:", "inds", "=", "np", ".", "argsort", "(", "mass", ")", "[", ":", ":", "-", "1", "]", "# biggest mass first", "return", "guess", "[", "inds", "]", ".", "copy", "(", ")", ",", "npart" ]
Workhorse of feature_guess
[ "Workhorse", "of", "feature_guess" ]
python
valid
pydanny/dj-webhooks
djwebhooks/models.py
https://github.com/pydanny/dj-webhooks/blob/88e245bfe2020e96279af261d88bf8469ba469e5/djwebhooks/models.py#L16-L27
def event_choices(events): """ Get the possible events from settings """ if events is None: msg = "Please add some events in settings.WEBHOOK_EVENTS." raise ImproperlyConfigured(msg) try: choices = [(x, x) for x in events] except TypeError: """ Not a valid iterator, so we raise an exception """ msg = "settings.WEBHOOK_EVENTS must be an iterable object." raise ImproperlyConfigured(msg) return choices
[ "def", "event_choices", "(", "events", ")", ":", "if", "events", "is", "None", ":", "msg", "=", "\"Please add some events in settings.WEBHOOK_EVENTS.\"", "raise", "ImproperlyConfigured", "(", "msg", ")", "try", ":", "choices", "=", "[", "(", "x", ",", "x", ")", "for", "x", "in", "events", "]", "except", "TypeError", ":", "\"\"\" Not a valid iterator, so we raise an exception \"\"\"", "msg", "=", "\"settings.WEBHOOK_EVENTS must be an iterable object.\"", "raise", "ImproperlyConfigured", "(", "msg", ")", "return", "choices" ]
Get the possible events from settings
[ "Get", "the", "possible", "events", "from", "settings" ]
python
valid
ewels/MultiQC
multiqc/modules/peddy/peddy.py
https://github.com/ewels/MultiQC/blob/2037d6322b2554146a74efbf869156ad20d4c4ec/multiqc/modules/peddy/peddy.py#L118-L150
def parse_peddy_csv(self, f, pattern): """ Parse csv output from peddy """ parsed_data = dict() headers = None s_name_idx = None for l in f['f'].splitlines(): s = l.split(",") if headers is None: headers = s try: s_name_idx = [headers.index("sample_id")] except ValueError: try: s_name_idx = [headers.index("sample_a"), headers.index("sample_b")] except ValueError: log.warn("Could not find sample name in Peddy output: {}".format(f['fn'])) return None else: s_name = '-'.join([s[idx] for idx in s_name_idx]) parsed_data[s_name] = dict() for i, v in enumerate(s): if i not in s_name_idx: if headers[i] == "error" and pattern == "sex_check": v = "True" if v == "False" else "False" try: # add the pattern as a suffix to key parsed_data[s_name][headers[i] + "_" + pattern] = float(v) except ValueError: # add the pattern as a suffix to key parsed_data[s_name][headers[i] + "_" + pattern] = v if len(parsed_data) == 0: return None return parsed_data
[ "def", "parse_peddy_csv", "(", "self", ",", "f", ",", "pattern", ")", ":", "parsed_data", "=", "dict", "(", ")", "headers", "=", "None", "s_name_idx", "=", "None", "for", "l", "in", "f", "[", "'f'", "]", ".", "splitlines", "(", ")", ":", "s", "=", "l", ".", "split", "(", "\",\"", ")", "if", "headers", "is", "None", ":", "headers", "=", "s", "try", ":", "s_name_idx", "=", "[", "headers", ".", "index", "(", "\"sample_id\"", ")", "]", "except", "ValueError", ":", "try", ":", "s_name_idx", "=", "[", "headers", ".", "index", "(", "\"sample_a\"", ")", ",", "headers", ".", "index", "(", "\"sample_b\"", ")", "]", "except", "ValueError", ":", "log", ".", "warn", "(", "\"Could not find sample name in Peddy output: {}\"", ".", "format", "(", "f", "[", "'fn'", "]", ")", ")", "return", "None", "else", ":", "s_name", "=", "'-'", ".", "join", "(", "[", "s", "[", "idx", "]", "for", "idx", "in", "s_name_idx", "]", ")", "parsed_data", "[", "s_name", "]", "=", "dict", "(", ")", "for", "i", ",", "v", "in", "enumerate", "(", "s", ")", ":", "if", "i", "not", "in", "s_name_idx", ":", "if", "headers", "[", "i", "]", "==", "\"error\"", "and", "pattern", "==", "\"sex_check\"", ":", "v", "=", "\"True\"", "if", "v", "==", "\"False\"", "else", "\"False\"", "try", ":", "# add the pattern as a suffix to key", "parsed_data", "[", "s_name", "]", "[", "headers", "[", "i", "]", "+", "\"_\"", "+", "pattern", "]", "=", "float", "(", "v", ")", "except", "ValueError", ":", "# add the pattern as a suffix to key", "parsed_data", "[", "s_name", "]", "[", "headers", "[", "i", "]", "+", "\"_\"", "+", "pattern", "]", "=", "v", "if", "len", "(", "parsed_data", ")", "==", "0", ":", "return", "None", "return", "parsed_data" ]
Parse csv output from peddy
[ "Parse", "csv", "output", "from", "peddy" ]
python
train
dls-controls/pymalcolm
setup.py
https://github.com/dls-controls/pymalcolm/blob/80ea667e4da26365a6cebc0249f52fdc744bd983/setup.py#L12-L27
def get_version(): """Extracts the version number from the version.py file. """ VERSION_FILE = os.path.join(module_name, 'version.py') txt = open(VERSION_FILE).read() mo = re.search(r'^__version__ = [\'"]([^\'"]*)[\'"]', txt, re.M) if mo: version = mo.group(1) bs_version = os.environ.get('MODULEVER', '0.0') assert bs_version == "0.0" or bs_version == version, \ "Version {} specified by the build system doesn't match {} in " \ "version.py".format(bs_version, version) return version else: raise RuntimeError('Unable to find version string in {0}.' .format(VERSION_FILE))
[ "def", "get_version", "(", ")", ":", "VERSION_FILE", "=", "os", ".", "path", ".", "join", "(", "module_name", ",", "'version.py'", ")", "txt", "=", "open", "(", "VERSION_FILE", ")", ".", "read", "(", ")", "mo", "=", "re", ".", "search", "(", "r'^__version__ = [\\'\"]([^\\'\"]*)[\\'\"]'", ",", "txt", ",", "re", ".", "M", ")", "if", "mo", ":", "version", "=", "mo", ".", "group", "(", "1", ")", "bs_version", "=", "os", ".", "environ", ".", "get", "(", "'MODULEVER'", ",", "'0.0'", ")", "assert", "bs_version", "==", "\"0.0\"", "or", "bs_version", "==", "version", ",", "\"Version {} specified by the build system doesn't match {} in \"", "\"version.py\"", ".", "format", "(", "bs_version", ",", "version", ")", "return", "version", "else", ":", "raise", "RuntimeError", "(", "'Unable to find version string in {0}.'", ".", "format", "(", "VERSION_FILE", ")", ")" ]
Extracts the version number from the version.py file.
[ "Extracts", "the", "version", "number", "from", "the", "version", ".", "py", "file", "." ]
python
train
boronine/discipline
discipline/models.py
https://github.com/boronine/discipline/blob/68bea9bc2198cc91cee49a6e2d0f3333cc9bf476/discipline/models.py#L621-L633
def _get_modcommit(self, key): """Return the last modcommit of the given field. If no modcommit exists (for example after a migration that created new fields) returns None. """ try: return ModificationCommit.objects.filter( object_uid = self.uid, key = key, action__id__lte = self.step ).order_by("-action__id")[0] except IndexError: return None
[ "def", "_get_modcommit", "(", "self", ",", "key", ")", ":", "try", ":", "return", "ModificationCommit", ".", "objects", ".", "filter", "(", "object_uid", "=", "self", ".", "uid", ",", "key", "=", "key", ",", "action__id__lte", "=", "self", ".", "step", ")", ".", "order_by", "(", "\"-action__id\"", ")", "[", "0", "]", "except", "IndexError", ":", "return", "None" ]
Return the last modcommit of the given field. If no modcommit exists (for example after a migration that created new fields) returns None.
[ "Return", "the", "last", "modcommit", "of", "the", "given", "field", ".", "If", "no", "modcommit", "exists", "(", "for", "example", "after", "a", "migration", "that", "created", "new", "fields", ")", "returns", "None", "." ]
python
train
quintusdias/glymur
glymur/lib/openjpeg.py
https://github.com/quintusdias/glymur/blob/8b8fb091130fff00f1028dc82219e69e3f9baf6d/glymur/lib/openjpeg.py#L477-L484
def decode(dinfo, cio): """Wrapper for opj_decode. """ argtypes = [ctypes.POINTER(DecompressionInfoType), ctypes.POINTER(CioType)] OPENJPEG.opj_decode.argtypes = argtypes OPENJPEG.opj_decode.restype = ctypes.POINTER(ImageType) image = OPENJPEG.opj_decode(dinfo, cio) return image
[ "def", "decode", "(", "dinfo", ",", "cio", ")", ":", "argtypes", "=", "[", "ctypes", ".", "POINTER", "(", "DecompressionInfoType", ")", ",", "ctypes", ".", "POINTER", "(", "CioType", ")", "]", "OPENJPEG", ".", "opj_decode", ".", "argtypes", "=", "argtypes", "OPENJPEG", ".", "opj_decode", ".", "restype", "=", "ctypes", ".", "POINTER", "(", "ImageType", ")", "image", "=", "OPENJPEG", ".", "opj_decode", "(", "dinfo", ",", "cio", ")", "return", "image" ]
Wrapper for opj_decode.
[ "Wrapper", "for", "opj_decode", "." ]
python
train
rosenbrockc/fortpy
fortpy/scripts/analyze.py
https://github.com/rosenbrockc/fortpy/blob/1ed0757c52d549e41d9d44bdea68cb89529293a5/fortpy/scripts/analyze.py#L645-L651
def _print_map_dict(self, argkey, filename, append): """Prints a dictionary that has variable => value mappings.""" result = [] skeys = list(sorted(self.curargs[argkey].keys())) for key in skeys: result.append("'{}' => {}".format(key, self.curargs[argkey][key])) self._redirect_output('\n'.join(result), filename, append, msg.info)
[ "def", "_print_map_dict", "(", "self", ",", "argkey", ",", "filename", ",", "append", ")", ":", "result", "=", "[", "]", "skeys", "=", "list", "(", "sorted", "(", "self", ".", "curargs", "[", "argkey", "]", ".", "keys", "(", ")", ")", ")", "for", "key", "in", "skeys", ":", "result", ".", "append", "(", "\"'{}' => {}\"", ".", "format", "(", "key", ",", "self", ".", "curargs", "[", "argkey", "]", "[", "key", "]", ")", ")", "self", ".", "_redirect_output", "(", "'\\n'", ".", "join", "(", "result", ")", ",", "filename", ",", "append", ",", "msg", ".", "info", ")" ]
Prints a dictionary that has variable => value mappings.
[ "Prints", "a", "dictionary", "that", "has", "variable", "=", ">", "value", "mappings", "." ]
python
train
RudolfCardinal/pythonlib
cardinal_pythonlib/sqlalchemy/schema.py
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/sqlalchemy/schema.py#L1139-L1151
def index_lists_equal(a: List[Index], b: List[Index]) -> bool: """ Are all indexes in list ``a`` equal to their counterparts in list ``b``, as per :func:`indexes_equal`? """ n = len(a) if len(b) != n: return False for i in range(n): if not indexes_equal(a[i], b[i]): log.debug("Mismatch: {!r} != {!r}", a[i], b[i]) return False return True
[ "def", "index_lists_equal", "(", "a", ":", "List", "[", "Index", "]", ",", "b", ":", "List", "[", "Index", "]", ")", "->", "bool", ":", "n", "=", "len", "(", "a", ")", "if", "len", "(", "b", ")", "!=", "n", ":", "return", "False", "for", "i", "in", "range", "(", "n", ")", ":", "if", "not", "indexes_equal", "(", "a", "[", "i", "]", ",", "b", "[", "i", "]", ")", ":", "log", ".", "debug", "(", "\"Mismatch: {!r} != {!r}\"", ",", "a", "[", "i", "]", ",", "b", "[", "i", "]", ")", "return", "False", "return", "True" ]
Are all indexes in list ``a`` equal to their counterparts in list ``b``, as per :func:`indexes_equal`?
[ "Are", "all", "indexes", "in", "list", "a", "equal", "to", "their", "counterparts", "in", "list", "b", "as", "per", ":", "func", ":", "indexes_equal", "?" ]
python
train
annoviko/pyclustering
pyclustering/cluster/fcm.py
https://github.com/annoviko/pyclustering/blob/98aa0dd89fd36f701668fb1eb29c8fb5662bf7d0/pyclustering/cluster/fcm.py#L204-L213
def __process_by_ccore(self): """! @brief Performs cluster analysis using C/C++ implementation. """ result = wrapper.fcm_algorithm(self.__data, self.__centers, self.__m, self.__tolerance, self.__itermax) self.__clusters = result[wrapper.fcm_package_indexer.INDEX_CLUSTERS] self.__centers = result[wrapper.fcm_package_indexer.INDEX_CENTERS] self.__membership = result[wrapper.fcm_package_indexer.INDEX_MEMBERSHIP]
[ "def", "__process_by_ccore", "(", "self", ")", ":", "result", "=", "wrapper", ".", "fcm_algorithm", "(", "self", ".", "__data", ",", "self", ".", "__centers", ",", "self", ".", "__m", ",", "self", ".", "__tolerance", ",", "self", ".", "__itermax", ")", "self", ".", "__clusters", "=", "result", "[", "wrapper", ".", "fcm_package_indexer", ".", "INDEX_CLUSTERS", "]", "self", ".", "__centers", "=", "result", "[", "wrapper", ".", "fcm_package_indexer", ".", "INDEX_CENTERS", "]", "self", ".", "__membership", "=", "result", "[", "wrapper", ".", "fcm_package_indexer", ".", "INDEX_MEMBERSHIP", "]" ]
! @brief Performs cluster analysis using C/C++ implementation.
[ "!" ]
python
valid
fastly/fastly-py
fastly/models.py
https://github.com/fastly/fastly-py/blob/f336551c368b3ae44d6b5690913f9e6799d1a83f/fastly/models.py#L85-L97
def version(self): """ Create a new version under this service. """ ver = Version() ver.conn = self.conn ver.attrs = { # Parent params 'service_id': self.attrs['id'], } ver.save() return ver
[ "def", "version", "(", "self", ")", ":", "ver", "=", "Version", "(", ")", "ver", ".", "conn", "=", "self", ".", "conn", "ver", ".", "attrs", "=", "{", "# Parent params", "'service_id'", ":", "self", ".", "attrs", "[", "'id'", "]", ",", "}", "ver", ".", "save", "(", ")", "return", "ver" ]
Create a new version under this service.
[ "Create", "a", "new", "version", "under", "this", "service", "." ]
python
valid
IDSIA/sacred
sacred/arg_parser.py
https://github.com/IDSIA/sacred/blob/72633776bed9b5bddf93ae7d215188e61970973a/sacred/arg_parser.py#L78-L109
def _format_options_usage(options): """ Format the Options-part of the usage text. Parameters ---------- options : list[sacred.commandline_options.CommandLineOption] A list of all supported commandline options. Returns ------- str Text formatted as a description for the commandline options """ options_usage = "" for op in options: short, long = op.get_flags() if op.arg: flag = "{short} {arg} {long}={arg}".format( short=short, long=long, arg=op.arg) else: flag = "{short} {long}".format(short=short, long=long) wrapped_description = textwrap.wrap(inspect.cleandoc(op.__doc__), width=79, initial_indent=' ' * 32, subsequent_indent=' ' * 32) wrapped_description = "\n".join(wrapped_description).strip() options_usage += " {0:28} {1}\n".format(flag, wrapped_description) return options_usage
[ "def", "_format_options_usage", "(", "options", ")", ":", "options_usage", "=", "\"\"", "for", "op", "in", "options", ":", "short", ",", "long", "=", "op", ".", "get_flags", "(", ")", "if", "op", ".", "arg", ":", "flag", "=", "\"{short} {arg} {long}={arg}\"", ".", "format", "(", "short", "=", "short", ",", "long", "=", "long", ",", "arg", "=", "op", ".", "arg", ")", "else", ":", "flag", "=", "\"{short} {long}\"", ".", "format", "(", "short", "=", "short", ",", "long", "=", "long", ")", "wrapped_description", "=", "textwrap", ".", "wrap", "(", "inspect", ".", "cleandoc", "(", "op", ".", "__doc__", ")", ",", "width", "=", "79", ",", "initial_indent", "=", "' '", "*", "32", ",", "subsequent_indent", "=", "' '", "*", "32", ")", "wrapped_description", "=", "\"\\n\"", ".", "join", "(", "wrapped_description", ")", ".", "strip", "(", ")", "options_usage", "+=", "\" {0:28} {1}\\n\"", ".", "format", "(", "flag", ",", "wrapped_description", ")", "return", "options_usage" ]
Format the Options-part of the usage text. Parameters ---------- options : list[sacred.commandline_options.CommandLineOption] A list of all supported commandline options. Returns ------- str Text formatted as a description for the commandline options
[ "Format", "the", "Options", "-", "part", "of", "the", "usage", "text", "." ]
python
train
globocom/GloboNetworkAPI-client-python
networkapiclient/ClientFactory.py
https://github.com/globocom/GloboNetworkAPI-client-python/blob/cf34f913da48d9abbf750114f5d2ac4b2dde137d/networkapiclient/ClientFactory.py#L376-L382
def create_equipamento_roteiro(self): """Get an instance of equipamento_roteiro services facade.""" return EquipamentoRoteiro( self.networkapi_url, self.user, self.password, self.user_ldap)
[ "def", "create_equipamento_roteiro", "(", "self", ")", ":", "return", "EquipamentoRoteiro", "(", "self", ".", "networkapi_url", ",", "self", ".", "user", ",", "self", ".", "password", ",", "self", ".", "user_ldap", ")" ]
Get an instance of equipamento_roteiro services facade.
[ "Get", "an", "instance", "of", "equipamento_roteiro", "services", "facade", "." ]
python
train
b3j0f/schema
b3j0f/schema/lang/factory.py
https://github.com/b3j0f/schema/blob/1c88c23337f5fef50254e65bd407112c43396dd9/b3j0f/schema/lang/factory.py#L129-L151
def getschemacls(self, resource, besteffort=True): """Get schema class related to input resource. :param resource: resource from which get schema class. :param bool besteffort: if True (default) try a best effort in parsing the inheritance tree of resource if resource is a class. :rtype: type """ result = None if hasattr(resource, 'mro') and besteffort and isclass(resource): resources = list(resource.mro()) else: resources = [resource] for _resource in resources: if _resource in self._schemasbyresource: result = self._schemasbyresource[_resource] break return result
[ "def", "getschemacls", "(", "self", ",", "resource", ",", "besteffort", "=", "True", ")", ":", "result", "=", "None", "if", "hasattr", "(", "resource", ",", "'mro'", ")", "and", "besteffort", "and", "isclass", "(", "resource", ")", ":", "resources", "=", "list", "(", "resource", ".", "mro", "(", ")", ")", "else", ":", "resources", "=", "[", "resource", "]", "for", "_resource", "in", "resources", ":", "if", "_resource", "in", "self", ".", "_schemasbyresource", ":", "result", "=", "self", ".", "_schemasbyresource", "[", "_resource", "]", "break", "return", "result" ]
Get schema class related to input resource. :param resource: resource from which get schema class. :param bool besteffort: if True (default) try a best effort in parsing the inheritance tree of resource if resource is a class. :rtype: type
[ "Get", "schema", "class", "related", "to", "input", "resource", "." ]
python
train
bwohlberg/sporco
sporco/admm/ccmod.py
https://github.com/bwohlberg/sporco/blob/8946a04331106f4e39904fbdf2dc7351900baa04/sporco/admm/ccmod.py#L768-L802
def xstep(self): r"""Minimise Augmented Lagrangian with respect to block vector :math:`\mathbf{x} = \left( \begin{array}{ccc} \mathbf{x}_0^T & \mathbf{x}_1^T & \ldots \end{array} \right)^T\;`. """ # This test reflects empirical evidence that two slightly # different implementations are faster for single or # multi-channel data. This kludge is intended to be temporary. if self.cri.Cd > 1: for i in range(self.Nb): self.xistep(i) else: self.YU[:] = self.Y[..., np.newaxis] - self.U b = np.swapaxes(self.ZSf[..., np.newaxis], self.cri.axisK, -1) \ + self.rho*sl.rfftn(self.YU, None, self.cri.axisN) for i in range(self.Nb): self.Xf[..., i] = sl.solvedbi_sm( self.Zf[..., [i], :], self.rho, b[..., i], axis=self.cri.axisM) self.X = sl.irfftn(self.Xf, self.cri.Nv, self.cri.axisN) if self.opt['LinSolveCheck']: ZSfs = np.sum(self.ZSf, axis=self.cri.axisK, keepdims=True) YU = np.sum(self.Y[..., np.newaxis] - self.U, axis=-1) b = ZSfs + self.rho*sl.rfftn(YU, None, self.cri.axisN) Xf = self.swapaxes(self.Xf) Zop = lambda x: sl.inner(self.Zf, x, axis=self.cri.axisM) ZHop = lambda x: np.conj(self.Zf) * x ax = np.sum(ZHop(Zop(Xf)) + self.rho*Xf, axis=self.cri.axisK, keepdims=True) self.xrrs = sl.rrs(ax, b) else: self.xrrs = None
[ "def", "xstep", "(", "self", ")", ":", "# This test reflects empirical evidence that two slightly", "# different implementations are faster for single or", "# multi-channel data. This kludge is intended to be temporary.", "if", "self", ".", "cri", ".", "Cd", ">", "1", ":", "for", "i", "in", "range", "(", "self", ".", "Nb", ")", ":", "self", ".", "xistep", "(", "i", ")", "else", ":", "self", ".", "YU", "[", ":", "]", "=", "self", ".", "Y", "[", "...", ",", "np", ".", "newaxis", "]", "-", "self", ".", "U", "b", "=", "np", ".", "swapaxes", "(", "self", ".", "ZSf", "[", "...", ",", "np", ".", "newaxis", "]", ",", "self", ".", "cri", ".", "axisK", ",", "-", "1", ")", "+", "self", ".", "rho", "*", "sl", ".", "rfftn", "(", "self", ".", "YU", ",", "None", ",", "self", ".", "cri", ".", "axisN", ")", "for", "i", "in", "range", "(", "self", ".", "Nb", ")", ":", "self", ".", "Xf", "[", "...", ",", "i", "]", "=", "sl", ".", "solvedbi_sm", "(", "self", ".", "Zf", "[", "...", ",", "[", "i", "]", ",", ":", "]", ",", "self", ".", "rho", ",", "b", "[", "...", ",", "i", "]", ",", "axis", "=", "self", ".", "cri", ".", "axisM", ")", "self", ".", "X", "=", "sl", ".", "irfftn", "(", "self", ".", "Xf", ",", "self", ".", "cri", ".", "Nv", ",", "self", ".", "cri", ".", "axisN", ")", "if", "self", ".", "opt", "[", "'LinSolveCheck'", "]", ":", "ZSfs", "=", "np", ".", "sum", "(", "self", ".", "ZSf", ",", "axis", "=", "self", ".", "cri", ".", "axisK", ",", "keepdims", "=", "True", ")", "YU", "=", "np", ".", "sum", "(", "self", ".", "Y", "[", "...", ",", "np", ".", "newaxis", "]", "-", "self", ".", "U", ",", "axis", "=", "-", "1", ")", "b", "=", "ZSfs", "+", "self", ".", "rho", "*", "sl", ".", "rfftn", "(", "YU", ",", "None", ",", "self", ".", "cri", ".", "axisN", ")", "Xf", "=", "self", ".", "swapaxes", "(", "self", ".", "Xf", ")", "Zop", "=", "lambda", "x", ":", "sl", ".", "inner", "(", "self", ".", "Zf", ",", "x", ",", "axis", "=", "self", ".", "cri", ".", "axisM", ")", "ZHop", "=", "lambda", "x", ":", "np", ".", "conj", "(", "self", ".", "Zf", ")", "*", "x", "ax", "=", "np", ".", "sum", "(", "ZHop", "(", "Zop", "(", "Xf", ")", ")", "+", "self", ".", "rho", "*", "Xf", ",", "axis", "=", "self", ".", "cri", ".", "axisK", ",", "keepdims", "=", "True", ")", "self", ".", "xrrs", "=", "sl", ".", "rrs", "(", "ax", ",", "b", ")", "else", ":", "self", ".", "xrrs", "=", "None" ]
r"""Minimise Augmented Lagrangian with respect to block vector :math:`\mathbf{x} = \left( \begin{array}{ccc} \mathbf{x}_0^T & \mathbf{x}_1^T & \ldots \end{array} \right)^T\;`.
[ "r", "Minimise", "Augmented", "Lagrangian", "with", "respect", "to", "block", "vector", ":", "math", ":", "\\", "mathbf", "{", "x", "}", "=", "\\", "left", "(", "\\", "begin", "{", "array", "}", "{", "ccc", "}", "\\", "mathbf", "{", "x", "}", "_0^T", "&", "\\", "mathbf", "{", "x", "}", "_1^T", "&", "\\", "ldots", "\\", "end", "{", "array", "}", "\\", "right", ")", "^T", "\\", ";", "." ]
python
train
cloud-custodian/cloud-custodian
c7n/utils.py
https://github.com/cloud-custodian/cloud-custodian/blob/52ef732eb3d7bc939d1579faf519314814695c08/c7n/utils.py#L449-L466
def reformat_schema(model): """ Reformat schema to be in a more displayable format. """ if not hasattr(model, 'schema'): return "Model '{}' does not have a schema".format(model) if 'properties' not in model.schema: return "Schema in unexpected format." ret = copy.deepcopy(model.schema['properties']) if 'type' in ret: del(ret['type']) for key in model.schema.get('required', []): if key in ret: ret[key]['required'] = True return ret
[ "def", "reformat_schema", "(", "model", ")", ":", "if", "not", "hasattr", "(", "model", ",", "'schema'", ")", ":", "return", "\"Model '{}' does not have a schema\"", ".", "format", "(", "model", ")", "if", "'properties'", "not", "in", "model", ".", "schema", ":", "return", "\"Schema in unexpected format.\"", "ret", "=", "copy", ".", "deepcopy", "(", "model", ".", "schema", "[", "'properties'", "]", ")", "if", "'type'", "in", "ret", ":", "del", "(", "ret", "[", "'type'", "]", ")", "for", "key", "in", "model", ".", "schema", ".", "get", "(", "'required'", ",", "[", "]", ")", ":", "if", "key", "in", "ret", ":", "ret", "[", "key", "]", "[", "'required'", "]", "=", "True", "return", "ret" ]
Reformat schema to be in a more displayable format.
[ "Reformat", "schema", "to", "be", "in", "a", "more", "displayable", "format", "." ]
python
train
reorx/torext
torext/handlers/base.py
https://github.com/reorx/torext/blob/84c4300ebc7fab0dbd11cf8b020bc7d4d1570171/torext/handlers/base.py#L56-L68
def log_request(handler): """ Logging request is opposite to response, sometime its necessary, feel free to enable it. """ block = 'Request Infomations:\n' + _format_headers_log(handler.request.headers) if handler.request.arguments: block += '+----Arguments----+\n' for k, v in handler.request.arguments.items(): block += '| {0:<15} | {1:<15} \n'.format(repr(k), repr(v)) app_log.info(block)
[ "def", "log_request", "(", "handler", ")", ":", "block", "=", "'Request Infomations:\\n'", "+", "_format_headers_log", "(", "handler", ".", "request", ".", "headers", ")", "if", "handler", ".", "request", ".", "arguments", ":", "block", "+=", "'+----Arguments----+\\n'", "for", "k", ",", "v", "in", "handler", ".", "request", ".", "arguments", ".", "items", "(", ")", ":", "block", "+=", "'| {0:<15} | {1:<15} \\n'", ".", "format", "(", "repr", "(", "k", ")", ",", "repr", "(", "v", ")", ")", "app_log", ".", "info", "(", "block", ")" ]
Logging request is opposite to response, sometime its necessary, feel free to enable it.
[ "Logging", "request", "is", "opposite", "to", "response", "sometime", "its", "necessary", "feel", "free", "to", "enable", "it", "." ]
python
train
datacats/datacats
datacats/docker.py
https://github.com/datacats/datacats/blob/e4bae503efa997660fb3f34fe166699569653157/datacats/docker.py#L318-L334
def remove_container(name, force=False): """ Wrapper for docker remove_container :returns: True if container was found and removed """ try: if not force: _get_docker().stop(name) except APIError: pass try: _get_docker().remove_container(name, force=True) return True except APIError: return False
[ "def", "remove_container", "(", "name", ",", "force", "=", "False", ")", ":", "try", ":", "if", "not", "force", ":", "_get_docker", "(", ")", ".", "stop", "(", "name", ")", "except", "APIError", ":", "pass", "try", ":", "_get_docker", "(", ")", ".", "remove_container", "(", "name", ",", "force", "=", "True", ")", "return", "True", "except", "APIError", ":", "return", "False" ]
Wrapper for docker remove_container :returns: True if container was found and removed
[ "Wrapper", "for", "docker", "remove_container" ]
python
train
ska-sa/purr
Purr/Plugins/local_pychart/font.py
https://github.com/ska-sa/purr/blob/4c848768d0485d0f88b30850d0d5372221b21b66/Purr/Plugins/local_pychart/font.py#L475-L478
def get_align(text): "Return (halign, valign, angle) of the <text>." (x1, x2, h, v, a) = unaligned_get_dimension(text) return (h, v, a)
[ "def", "get_align", "(", "text", ")", ":", "(", "x1", ",", "x2", ",", "h", ",", "v", ",", "a", ")", "=", "unaligned_get_dimension", "(", "text", ")", "return", "(", "h", ",", "v", ",", "a", ")" ]
Return (halign, valign, angle) of the <text>.
[ "Return", "(", "halign", "valign", "angle", ")", "of", "the", "<text", ">", "." ]
python
train
Saledddar/pyunet
pyunet/util.py
https://github.com/Saledddar/pyunet/blob/ca5ccc32588fae8da43f968e7747d3f3da509507/pyunet/util.py#L20-L31
def add_to_path(p): ''' Adds a path to python paths and removes it after the 'with' block ends ''' old_path = sys.path if p not in sys.path: sys.path = sys.path[:] sys.path.insert(0, p) try: yield finally: sys.path = old_path
[ "def", "add_to_path", "(", "p", ")", ":", "old_path", "=", "sys", ".", "path", "if", "p", "not", "in", "sys", ".", "path", ":", "sys", ".", "path", "=", "sys", ".", "path", "[", ":", "]", "sys", ".", "path", ".", "insert", "(", "0", ",", "p", ")", "try", ":", "yield", "finally", ":", "sys", ".", "path", "=", "old_path" ]
Adds a path to python paths and removes it after the 'with' block ends
[ "Adds", "a", "path", "to", "python", "paths", "and", "removes", "it", "after", "the", "with", "block", "ends" ]
python
train
sampottinger/pycotracer
pycotracer/retrieval.py
https://github.com/sampottinger/pycotracer/blob/c66c3230949b7bee8c9fec5fc00ab392865a0c8b/pycotracer/retrieval.py#L115-L137
def get_report_interpreted(year, report_type): """Download, exract, and interpret a CO-TRACER report. Generate a URL for the given report, download the corresponding archive, extract the CSV report, and interpret it using TRACER-specific logic. @param year: The year for which data should be downloaded. @type year: int @param report_type: The type of report that should be downloaded. Should be one of the strings in constants.REPORT_TYPES. @type report_type: str @return: A collection of dict with the loaded data. Note that this data has been interpreted so data fields like floating point values, dates, and boolean values are no longer strings. @rtype: Iterable over dict """ if not is_valid_report_type(report_type): msg = '%s is not a valid report type.' % report_type raise ValueError(msg) raw_report = get_report_raw(year, report_type) interpreter = REPORT_TYPE_INTERPRETERS[report_type] return interpreter(raw_report)
[ "def", "get_report_interpreted", "(", "year", ",", "report_type", ")", ":", "if", "not", "is_valid_report_type", "(", "report_type", ")", ":", "msg", "=", "'%s is not a valid report type.'", "%", "report_type", "raise", "ValueError", "(", "msg", ")", "raw_report", "=", "get_report_raw", "(", "year", ",", "report_type", ")", "interpreter", "=", "REPORT_TYPE_INTERPRETERS", "[", "report_type", "]", "return", "interpreter", "(", "raw_report", ")" ]
Download, exract, and interpret a CO-TRACER report. Generate a URL for the given report, download the corresponding archive, extract the CSV report, and interpret it using TRACER-specific logic. @param year: The year for which data should be downloaded. @type year: int @param report_type: The type of report that should be downloaded. Should be one of the strings in constants.REPORT_TYPES. @type report_type: str @return: A collection of dict with the loaded data. Note that this data has been interpreted so data fields like floating point values, dates, and boolean values are no longer strings. @rtype: Iterable over dict
[ "Download", "exract", "and", "interpret", "a", "CO", "-", "TRACER", "report", "." ]
python
train
KnowledgeLinks/rdfframework
rdfframework/connections/fedoracommons.py
https://github.com/KnowledgeLinks/rdfframework/blob/9ec32dcc4bed51650a4b392cc5c15100fef7923a/rdfframework/connections/fedoracommons.py#L70-L100
def check_status(self): """ tests both the ext_url and local_url to see if the database is running returns: True if a connection can be made False if the connection cannot me made """ log = logging.getLogger("%s.%s" % (self.log_name, inspect.stack()[0][3])) log.setLevel(self.log_level) if self.url: return True try: result = requests.get(self.ext_url) self.url = self.ext_url return True except requests.exceptions.ConnectionError: pass try: result = requests.get(self.local_url) log.warning("Url '%s' not connecting. Using local_url '%s'" % \ (self.ext_url, self.local_url)) self.url = self.local_url return True except requests.exceptions.ConnectionError: self.url = None log.warning("Unable to connect using urls: %s" % set([self.ext_url, self.local_url])) return False
[ "def", "check_status", "(", "self", ")", ":", "log", "=", "logging", ".", "getLogger", "(", "\"%s.%s\"", "%", "(", "self", ".", "log_name", ",", "inspect", ".", "stack", "(", ")", "[", "0", "]", "[", "3", "]", ")", ")", "log", ".", "setLevel", "(", "self", ".", "log_level", ")", "if", "self", ".", "url", ":", "return", "True", "try", ":", "result", "=", "requests", ".", "get", "(", "self", ".", "ext_url", ")", "self", ".", "url", "=", "self", ".", "ext_url", "return", "True", "except", "requests", ".", "exceptions", ".", "ConnectionError", ":", "pass", "try", ":", "result", "=", "requests", ".", "get", "(", "self", ".", "local_url", ")", "log", ".", "warning", "(", "\"Url '%s' not connecting. Using local_url '%s'\"", "%", "(", "self", ".", "ext_url", ",", "self", ".", "local_url", ")", ")", "self", ".", "url", "=", "self", ".", "local_url", "return", "True", "except", "requests", ".", "exceptions", ".", "ConnectionError", ":", "self", ".", "url", "=", "None", "log", ".", "warning", "(", "\"Unable to connect using urls: %s\"", "%", "set", "(", "[", "self", ".", "ext_url", ",", "self", ".", "local_url", "]", ")", ")", "return", "False" ]
tests both the ext_url and local_url to see if the database is running returns: True if a connection can be made False if the connection cannot me made
[ "tests", "both", "the", "ext_url", "and", "local_url", "to", "see", "if", "the", "database", "is", "running" ]
python
train
nickmckay/LiPD-utilities
Python/lipd/jsons.py
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/jsons.py#L357-L380
def _export_section(sections, pc): """ Switch chron data to index-by-number :param dict sections: Metadata :return list _sections: Metadata """ logger_jsons.info("enter export_data: {}".format(pc)) _sections = [] for name, section in sections.items(): # Process chron models if "model" in section: section["model"] = _export_model(section["model"]) # Process the chron measurement table if "measurementTable" in section: section["measurementTable"] = _idx_table_by_num(section["measurementTable"]) # Add only the table to the output list _sections.append(section) logger_jsons.info("exit export_data: {}".format(pc)) return _sections
[ "def", "_export_section", "(", "sections", ",", "pc", ")", ":", "logger_jsons", ".", "info", "(", "\"enter export_data: {}\"", ".", "format", "(", "pc", ")", ")", "_sections", "=", "[", "]", "for", "name", ",", "section", "in", "sections", ".", "items", "(", ")", ":", "# Process chron models", "if", "\"model\"", "in", "section", ":", "section", "[", "\"model\"", "]", "=", "_export_model", "(", "section", "[", "\"model\"", "]", ")", "# Process the chron measurement table", "if", "\"measurementTable\"", "in", "section", ":", "section", "[", "\"measurementTable\"", "]", "=", "_idx_table_by_num", "(", "section", "[", "\"measurementTable\"", "]", ")", "# Add only the table to the output list", "_sections", ".", "append", "(", "section", ")", "logger_jsons", ".", "info", "(", "\"exit export_data: {}\"", ".", "format", "(", "pc", ")", ")", "return", "_sections" ]
Switch chron data to index-by-number :param dict sections: Metadata :return list _sections: Metadata
[ "Switch", "chron", "data", "to", "index", "-", "by", "-", "number", ":", "param", "dict", "sections", ":", "Metadata", ":", "return", "list", "_sections", ":", "Metadata" ]
python
train
KelSolaar/Umbra
umbra/ui/widgets/codeEditor_QPlainTextEdit.py
https://github.com/KelSolaar/Umbra/blob/66f45f08d9d723787f1191989f8b0dda84b412ce/umbra/ui/widgets/codeEditor_QPlainTextEdit.py#L1066-L1102
def get_matching_symbols_pairs(self, cursor, opening_symbol, closing_symbol, backward=False): """ Returns the cursor for matching given symbols pairs. :param cursor: Cursor to match from. :type cursor: QTextCursor :param opening_symbol: Opening symbol. :type opening_symbol: unicode :param closing_symbol: Closing symbol to match. :type closing_symbol: unicode :return: Matching cursor. :rtype: QTextCursor """ if cursor.hasSelection(): start_position = cursor.selectionEnd() if backward else cursor.selectionStart() else: start_position = cursor.position() flags = QTextDocument.FindFlags() if backward: flags = flags | QTextDocument.FindBackward start_cursor = previous_start_cursor = cursor.document().find(opening_symbol, start_position, flags) end_cursor = previous_end_cursor = cursor.document().find(closing_symbol, start_position, flags) if backward: while start_cursor > end_cursor: start_cursor = cursor.document().find(opening_symbol, start_cursor.selectionStart(), flags) if start_cursor > end_cursor: end_cursor = cursor.document().find(closing_symbol, end_cursor.selectionStart(), flags) else: while start_cursor < end_cursor: start_cursor = cursor.document().find(opening_symbol, start_cursor.selectionEnd(), flags) if start_cursor < end_cursor: end_cursor = cursor.document().find(closing_symbol, end_cursor.selectionEnd(), flags) return end_cursor if end_cursor.position() != -1 else previous_end_cursor
[ "def", "get_matching_symbols_pairs", "(", "self", ",", "cursor", ",", "opening_symbol", ",", "closing_symbol", ",", "backward", "=", "False", ")", ":", "if", "cursor", ".", "hasSelection", "(", ")", ":", "start_position", "=", "cursor", ".", "selectionEnd", "(", ")", "if", "backward", "else", "cursor", ".", "selectionStart", "(", ")", "else", ":", "start_position", "=", "cursor", ".", "position", "(", ")", "flags", "=", "QTextDocument", ".", "FindFlags", "(", ")", "if", "backward", ":", "flags", "=", "flags", "|", "QTextDocument", ".", "FindBackward", "start_cursor", "=", "previous_start_cursor", "=", "cursor", ".", "document", "(", ")", ".", "find", "(", "opening_symbol", ",", "start_position", ",", "flags", ")", "end_cursor", "=", "previous_end_cursor", "=", "cursor", ".", "document", "(", ")", ".", "find", "(", "closing_symbol", ",", "start_position", ",", "flags", ")", "if", "backward", ":", "while", "start_cursor", ">", "end_cursor", ":", "start_cursor", "=", "cursor", ".", "document", "(", ")", ".", "find", "(", "opening_symbol", ",", "start_cursor", ".", "selectionStart", "(", ")", ",", "flags", ")", "if", "start_cursor", ">", "end_cursor", ":", "end_cursor", "=", "cursor", ".", "document", "(", ")", ".", "find", "(", "closing_symbol", ",", "end_cursor", ".", "selectionStart", "(", ")", ",", "flags", ")", "else", ":", "while", "start_cursor", "<", "end_cursor", ":", "start_cursor", "=", "cursor", ".", "document", "(", ")", ".", "find", "(", "opening_symbol", ",", "start_cursor", ".", "selectionEnd", "(", ")", ",", "flags", ")", "if", "start_cursor", "<", "end_cursor", ":", "end_cursor", "=", "cursor", ".", "document", "(", ")", ".", "find", "(", "closing_symbol", ",", "end_cursor", ".", "selectionEnd", "(", ")", ",", "flags", ")", "return", "end_cursor", "if", "end_cursor", ".", "position", "(", ")", "!=", "-", "1", "else", "previous_end_cursor" ]
Returns the cursor for matching given symbols pairs. :param cursor: Cursor to match from. :type cursor: QTextCursor :param opening_symbol: Opening symbol. :type opening_symbol: unicode :param closing_symbol: Closing symbol to match. :type closing_symbol: unicode :return: Matching cursor. :rtype: QTextCursor
[ "Returns", "the", "cursor", "for", "matching", "given", "symbols", "pairs", "." ]
python
train
scdoshi/django-bits
bits/gis.py
https://github.com/scdoshi/django-bits/blob/0a2f4fd9374d2a8acb8df9a7b83eebcf2782256f/bits/gis.py#L55-L60
def change_in_longitude(lat, miles): """Given a latitude and a distance west, return the change in longitude.""" # Find the radius of a circle around the earth at given latitude. r = earth_radius * math.cos(lat * degrees_to_radians) return (miles / r) * radians_to_degrees
[ "def", "change_in_longitude", "(", "lat", ",", "miles", ")", ":", "# Find the radius of a circle around the earth at given latitude.", "r", "=", "earth_radius", "*", "math", ".", "cos", "(", "lat", "*", "degrees_to_radians", ")", "return", "(", "miles", "/", "r", ")", "*", "radians_to_degrees" ]
Given a latitude and a distance west, return the change in longitude.
[ "Given", "a", "latitude", "and", "a", "distance", "west", "return", "the", "change", "in", "longitude", "." ]
python
train
pytroll/trollimage
trollimage/colormap.py
https://github.com/pytroll/trollimage/blob/d35a7665ad475ff230e457085523e21f2cd3f454/trollimage/colormap.py#L30-L54
def colorize(arr, colors, values): """Colorize a monochromatic array *arr*, based *colors* given for *values*. Interpolation is used. *values* must be in ascending order. """ hcolors = np.array([rgb2hcl(*i[:3]) for i in colors]) # unwrap colormap in hcl space hcolors[:, 0] = np.rad2deg(np.unwrap(np.deg2rad(np.array(hcolors)[:, 0]))) channels = [np.interp(arr, np.array(values), np.array(hcolors)[:, i]) for i in range(3)] channels = list(hcl2rgb(*channels)) rest = [np.interp(arr, np.array(values), np.array(colors)[:, i + 3]) for i in range(np.array(colors).shape[1] - 3)] channels.extend(rest) try: return [np.ma.array(channel, mask=arr.mask) for channel in channels] except AttributeError: return channels
[ "def", "colorize", "(", "arr", ",", "colors", ",", "values", ")", ":", "hcolors", "=", "np", ".", "array", "(", "[", "rgb2hcl", "(", "*", "i", "[", ":", "3", "]", ")", "for", "i", "in", "colors", "]", ")", "# unwrap colormap in hcl space", "hcolors", "[", ":", ",", "0", "]", "=", "np", ".", "rad2deg", "(", "np", ".", "unwrap", "(", "np", ".", "deg2rad", "(", "np", ".", "array", "(", "hcolors", ")", "[", ":", ",", "0", "]", ")", ")", ")", "channels", "=", "[", "np", ".", "interp", "(", "arr", ",", "np", ".", "array", "(", "values", ")", ",", "np", ".", "array", "(", "hcolors", ")", "[", ":", ",", "i", "]", ")", "for", "i", "in", "range", "(", "3", ")", "]", "channels", "=", "list", "(", "hcl2rgb", "(", "*", "channels", ")", ")", "rest", "=", "[", "np", ".", "interp", "(", "arr", ",", "np", ".", "array", "(", "values", ")", ",", "np", ".", "array", "(", "colors", ")", "[", ":", ",", "i", "+", "3", "]", ")", "for", "i", "in", "range", "(", "np", ".", "array", "(", "colors", ")", ".", "shape", "[", "1", "]", "-", "3", ")", "]", "channels", ".", "extend", "(", "rest", ")", "try", ":", "return", "[", "np", ".", "ma", ".", "array", "(", "channel", ",", "mask", "=", "arr", ".", "mask", ")", "for", "channel", "in", "channels", "]", "except", "AttributeError", ":", "return", "channels" ]
Colorize a monochromatic array *arr*, based *colors* given for *values*. Interpolation is used. *values* must be in ascending order.
[ "Colorize", "a", "monochromatic", "array", "*", "arr", "*", "based", "*", "colors", "*", "given", "for", "*", "values", "*", ".", "Interpolation", "is", "used", ".", "*", "values", "*", "must", "be", "in", "ascending", "order", "." ]
python
train
Dallinger/Dallinger
dallinger/data.py
https://github.com/Dallinger/Dallinger/blob/76ca8217c709989c116d0ebd8fca37bd22f591af/dallinger/data.py#L53-L92
def find_experiment_export(app_id): """Attempt to find a zipped export of an experiment with the ID provided and return its path. Returns None if not found. Search order: 1. local "data" subdirectory 2. user S3 bucket 3. Dallinger S3 bucket """ # Check locally first cwd = os.getcwd() data_filename = "{}-data.zip".format(app_id) path_to_data = os.path.join(cwd, "data", data_filename) if os.path.exists(path_to_data): try: Data(path_to_data) except IOError: from dallinger import logger logger.exception( "Error reading local data file {}, checking remote.".format( path_to_data ) ) else: return path_to_data # Get remote file instead path_to_data = os.path.join(tempfile.mkdtemp(), data_filename) buckets = [user_s3_bucket(), dallinger_s3_bucket()] for bucket in buckets: try: bucket.download_file(data_filename, path_to_data) except botocore.exceptions.ClientError: pass else: return path_to_data
[ "def", "find_experiment_export", "(", "app_id", ")", ":", "# Check locally first", "cwd", "=", "os", ".", "getcwd", "(", ")", "data_filename", "=", "\"{}-data.zip\"", ".", "format", "(", "app_id", ")", "path_to_data", "=", "os", ".", "path", ".", "join", "(", "cwd", ",", "\"data\"", ",", "data_filename", ")", "if", "os", ".", "path", ".", "exists", "(", "path_to_data", ")", ":", "try", ":", "Data", "(", "path_to_data", ")", "except", "IOError", ":", "from", "dallinger", "import", "logger", "logger", ".", "exception", "(", "\"Error reading local data file {}, checking remote.\"", ".", "format", "(", "path_to_data", ")", ")", "else", ":", "return", "path_to_data", "# Get remote file instead", "path_to_data", "=", "os", ".", "path", ".", "join", "(", "tempfile", ".", "mkdtemp", "(", ")", ",", "data_filename", ")", "buckets", "=", "[", "user_s3_bucket", "(", ")", ",", "dallinger_s3_bucket", "(", ")", "]", "for", "bucket", "in", "buckets", ":", "try", ":", "bucket", ".", "download_file", "(", "data_filename", ",", "path_to_data", ")", "except", "botocore", ".", "exceptions", ".", "ClientError", ":", "pass", "else", ":", "return", "path_to_data" ]
Attempt to find a zipped export of an experiment with the ID provided and return its path. Returns None if not found. Search order: 1. local "data" subdirectory 2. user S3 bucket 3. Dallinger S3 bucket
[ "Attempt", "to", "find", "a", "zipped", "export", "of", "an", "experiment", "with", "the", "ID", "provided", "and", "return", "its", "path", ".", "Returns", "None", "if", "not", "found", "." ]
python
train
MartinThoma/hwrt
hwrt/serve.py
https://github.com/MartinThoma/hwrt/blob/725c21a3d0f5a30b8492cbc184b3688ceb364e1c/hwrt/serve.py#L122-L161
def worker(): """Implement a worker for write-math.com.""" global n global use_segmenter_flag if request.method == 'POST': raw_data_json = request.form['classify'] try: secret_uuid = request.form['secret'] except: logging.info("No secret uuid given. Create one.") secret_uuid = str(uuid.uuid4()) # Check recording try: json.loads(raw_data_json) except ValueError: return "Invalid JSON string: %s" % raw_data_json # Classify if use_segmenter_flag: strokelist = json.loads(raw_data_json) beam = utils.get_beam(secret_uuid) if beam is None: beam = se.Beam() for stroke in strokelist: beam.add_stroke(stroke) results = beam.get_results() utils.store_beam(beam, secret_uuid) else: stroke = strokelist[-1] beam.add_stroke(stroke) results = beam.get_results() utils.store_beam(beam, secret_uuid) else: results = classify.classify_segmented_recording(raw_data_json) return get_json_result(results, n=n) else: # Page where the user can enter a recording return "Classification Worker (Version %s)" % hwrt.__version__
[ "def", "worker", "(", ")", ":", "global", "n", "global", "use_segmenter_flag", "if", "request", ".", "method", "==", "'POST'", ":", "raw_data_json", "=", "request", ".", "form", "[", "'classify'", "]", "try", ":", "secret_uuid", "=", "request", ".", "form", "[", "'secret'", "]", "except", ":", "logging", ".", "info", "(", "\"No secret uuid given. Create one.\"", ")", "secret_uuid", "=", "str", "(", "uuid", ".", "uuid4", "(", ")", ")", "# Check recording", "try", ":", "json", ".", "loads", "(", "raw_data_json", ")", "except", "ValueError", ":", "return", "\"Invalid JSON string: %s\"", "%", "raw_data_json", "# Classify", "if", "use_segmenter_flag", ":", "strokelist", "=", "json", ".", "loads", "(", "raw_data_json", ")", "beam", "=", "utils", ".", "get_beam", "(", "secret_uuid", ")", "if", "beam", "is", "None", ":", "beam", "=", "se", ".", "Beam", "(", ")", "for", "stroke", "in", "strokelist", ":", "beam", ".", "add_stroke", "(", "stroke", ")", "results", "=", "beam", ".", "get_results", "(", ")", "utils", ".", "store_beam", "(", "beam", ",", "secret_uuid", ")", "else", ":", "stroke", "=", "strokelist", "[", "-", "1", "]", "beam", ".", "add_stroke", "(", "stroke", ")", "results", "=", "beam", ".", "get_results", "(", ")", "utils", ".", "store_beam", "(", "beam", ",", "secret_uuid", ")", "else", ":", "results", "=", "classify", ".", "classify_segmented_recording", "(", "raw_data_json", ")", "return", "get_json_result", "(", "results", ",", "n", "=", "n", ")", "else", ":", "# Page where the user can enter a recording", "return", "\"Classification Worker (Version %s)\"", "%", "hwrt", ".", "__version__" ]
Implement a worker for write-math.com.
[ "Implement", "a", "worker", "for", "write", "-", "math", ".", "com", "." ]
python
train
cmap/cmapPy
cmapPy/pandasGEXpress/parse_gctx.py
https://github.com/cmap/cmapPy/blob/59d833b64fd2c3a494cdf67fe1eb11fc8008bf76/cmapPy/pandasGEXpress/parse_gctx.py#L23-L126
def parse(gctx_file_path, convert_neg_666=True, rid=None, cid=None, ridx=None, cidx=None, row_meta_only=False, col_meta_only=False, make_multiindex=False): """ Primary method of script. Reads in path to a gctx file and parses into GCToo object. Input: Mandatory: - gctx_file_path (str): full path to gctx file you want to parse. Optional: - convert_neg_666 (bool): whether to convert -666 values to numpy.nan or not (see Note below for more details on this). Default = False. - rid (list of strings): list of row ids to specifically keep from gctx. Default=None. - cid (list of strings): list of col ids to specifically keep from gctx. Default=None. - ridx (list of integers): only read the rows corresponding to this list of integer ids. Default=None. - cidx (list of integers): only read the columns corresponding to this list of integer ids. Default=None. - row_meta_only (bool): Whether to load data + metadata (if False), or just row metadata (if True) as pandas DataFrame - col_meta_only (bool): Whether to load data + metadata (if False), or just col metadata (if True) as pandas DataFrame - make_multiindex (bool): whether to create a multi-index df combining the 3 component dfs Output: - myGCToo (GCToo): A GCToo instance containing content of parsed gctx file. Note: if meta_only = True, this will be a GCToo instance where the data_df is empty, i.e. data_df = pd.DataFrame(index=rids, columns = cids) Note: why does convert_neg_666 exist? - In CMap--for somewhat obscure historical reasons--we use "-666" as our null value for metadata. However (so that users can take full advantage of pandas' methods, including those for filtering nan's etc) we provide the option of converting these into numpy.NaN values, the pandas default. """ full_path = os.path.expanduser(gctx_file_path) # Verify that the path exists if not os.path.exists(full_path): err_msg = "The given path to the gctx file cannot be found. full_path: {}" logger.error(err_msg.format(full_path)) raise Exception(err_msg.format(full_path)) logger.info("Reading GCTX: {}".format(full_path)) # open file gctx_file = h5py.File(full_path, "r") if row_meta_only: # read in row metadata row_dset = gctx_file[row_meta_group_node] row_meta = parse_metadata_df("row", row_dset, convert_neg_666) # validate optional input ids & get indexes to subset by (sorted_ridx, sorted_cidx) = check_and_order_id_inputs(rid, ridx, cid, cidx, row_meta, None) gctx_file.close() # subset if specified, then return row_meta = row_meta.iloc[sorted_ridx] return row_meta elif col_meta_only: # read in col metadata col_dset = gctx_file[col_meta_group_node] col_meta = parse_metadata_df("col", col_dset, convert_neg_666) # validate optional input ids & get indexes to subset by (sorted_ridx, sorted_cidx) = check_and_order_id_inputs(rid, ridx, cid, cidx, None, col_meta) gctx_file.close() # subset if specified, then return col_meta = col_meta.iloc[sorted_cidx] return col_meta else: # read in row metadata row_dset = gctx_file[row_meta_group_node] row_meta = parse_metadata_df("row", row_dset, convert_neg_666) # read in col metadata col_dset = gctx_file[col_meta_group_node] col_meta = parse_metadata_df("col", col_dset, convert_neg_666) # validate optional input ids & get indexes to subset by (sorted_ridx, sorted_cidx) = check_and_order_id_inputs(rid, ridx, cid, cidx, row_meta, col_meta) data_dset = gctx_file[data_node] data_df = parse_data_df(data_dset, sorted_ridx, sorted_cidx, row_meta, col_meta) # (if subsetting) subset metadata row_meta = row_meta.iloc[sorted_ridx] col_meta = col_meta.iloc[sorted_cidx] # get version my_version = gctx_file.attrs[version_node] if type(my_version) == np.ndarray: my_version = my_version[0] gctx_file.close() # make GCToo instance my_gctoo = GCToo.GCToo(data_df=data_df, row_metadata_df=row_meta, col_metadata_df=col_meta, src=full_path, version=my_version, make_multiindex=make_multiindex) return my_gctoo
[ "def", "parse", "(", "gctx_file_path", ",", "convert_neg_666", "=", "True", ",", "rid", "=", "None", ",", "cid", "=", "None", ",", "ridx", "=", "None", ",", "cidx", "=", "None", ",", "row_meta_only", "=", "False", ",", "col_meta_only", "=", "False", ",", "make_multiindex", "=", "False", ")", ":", "full_path", "=", "os", ".", "path", ".", "expanduser", "(", "gctx_file_path", ")", "# Verify that the path exists", "if", "not", "os", ".", "path", ".", "exists", "(", "full_path", ")", ":", "err_msg", "=", "\"The given path to the gctx file cannot be found. full_path: {}\"", "logger", ".", "error", "(", "err_msg", ".", "format", "(", "full_path", ")", ")", "raise", "Exception", "(", "err_msg", ".", "format", "(", "full_path", ")", ")", "logger", ".", "info", "(", "\"Reading GCTX: {}\"", ".", "format", "(", "full_path", ")", ")", "# open file", "gctx_file", "=", "h5py", ".", "File", "(", "full_path", ",", "\"r\"", ")", "if", "row_meta_only", ":", "# read in row metadata", "row_dset", "=", "gctx_file", "[", "row_meta_group_node", "]", "row_meta", "=", "parse_metadata_df", "(", "\"row\"", ",", "row_dset", ",", "convert_neg_666", ")", "# validate optional input ids & get indexes to subset by", "(", "sorted_ridx", ",", "sorted_cidx", ")", "=", "check_and_order_id_inputs", "(", "rid", ",", "ridx", ",", "cid", ",", "cidx", ",", "row_meta", ",", "None", ")", "gctx_file", ".", "close", "(", ")", "# subset if specified, then return", "row_meta", "=", "row_meta", ".", "iloc", "[", "sorted_ridx", "]", "return", "row_meta", "elif", "col_meta_only", ":", "# read in col metadata", "col_dset", "=", "gctx_file", "[", "col_meta_group_node", "]", "col_meta", "=", "parse_metadata_df", "(", "\"col\"", ",", "col_dset", ",", "convert_neg_666", ")", "# validate optional input ids & get indexes to subset by", "(", "sorted_ridx", ",", "sorted_cidx", ")", "=", "check_and_order_id_inputs", "(", "rid", ",", "ridx", ",", "cid", ",", "cidx", ",", "None", ",", "col_meta", ")", "gctx_file", ".", "close", "(", ")", "# subset if specified, then return", "col_meta", "=", "col_meta", ".", "iloc", "[", "sorted_cidx", "]", "return", "col_meta", "else", ":", "# read in row metadata", "row_dset", "=", "gctx_file", "[", "row_meta_group_node", "]", "row_meta", "=", "parse_metadata_df", "(", "\"row\"", ",", "row_dset", ",", "convert_neg_666", ")", "# read in col metadata", "col_dset", "=", "gctx_file", "[", "col_meta_group_node", "]", "col_meta", "=", "parse_metadata_df", "(", "\"col\"", ",", "col_dset", ",", "convert_neg_666", ")", "# validate optional input ids & get indexes to subset by", "(", "sorted_ridx", ",", "sorted_cidx", ")", "=", "check_and_order_id_inputs", "(", "rid", ",", "ridx", ",", "cid", ",", "cidx", ",", "row_meta", ",", "col_meta", ")", "data_dset", "=", "gctx_file", "[", "data_node", "]", "data_df", "=", "parse_data_df", "(", "data_dset", ",", "sorted_ridx", ",", "sorted_cidx", ",", "row_meta", ",", "col_meta", ")", "# (if subsetting) subset metadata", "row_meta", "=", "row_meta", ".", "iloc", "[", "sorted_ridx", "]", "col_meta", "=", "col_meta", ".", "iloc", "[", "sorted_cidx", "]", "# get version", "my_version", "=", "gctx_file", ".", "attrs", "[", "version_node", "]", "if", "type", "(", "my_version", ")", "==", "np", ".", "ndarray", ":", "my_version", "=", "my_version", "[", "0", "]", "gctx_file", ".", "close", "(", ")", "# make GCToo instance", "my_gctoo", "=", "GCToo", ".", "GCToo", "(", "data_df", "=", "data_df", ",", "row_metadata_df", "=", "row_meta", ",", "col_metadata_df", "=", "col_meta", ",", "src", "=", "full_path", ",", "version", "=", "my_version", ",", "make_multiindex", "=", "make_multiindex", ")", "return", "my_gctoo" ]
Primary method of script. Reads in path to a gctx file and parses into GCToo object. Input: Mandatory: - gctx_file_path (str): full path to gctx file you want to parse. Optional: - convert_neg_666 (bool): whether to convert -666 values to numpy.nan or not (see Note below for more details on this). Default = False. - rid (list of strings): list of row ids to specifically keep from gctx. Default=None. - cid (list of strings): list of col ids to specifically keep from gctx. Default=None. - ridx (list of integers): only read the rows corresponding to this list of integer ids. Default=None. - cidx (list of integers): only read the columns corresponding to this list of integer ids. Default=None. - row_meta_only (bool): Whether to load data + metadata (if False), or just row metadata (if True) as pandas DataFrame - col_meta_only (bool): Whether to load data + metadata (if False), or just col metadata (if True) as pandas DataFrame - make_multiindex (bool): whether to create a multi-index df combining the 3 component dfs Output: - myGCToo (GCToo): A GCToo instance containing content of parsed gctx file. Note: if meta_only = True, this will be a GCToo instance where the data_df is empty, i.e. data_df = pd.DataFrame(index=rids, columns = cids) Note: why does convert_neg_666 exist? - In CMap--for somewhat obscure historical reasons--we use "-666" as our null value for metadata. However (so that users can take full advantage of pandas' methods, including those for filtering nan's etc) we provide the option of converting these into numpy.NaN values, the pandas default.
[ "Primary", "method", "of", "script", ".", "Reads", "in", "path", "to", "a", "gctx", "file", "and", "parses", "into", "GCToo", "object", "." ]
python
train
jjjake/internetarchive
internetarchive/session.py
https://github.com/jjjake/internetarchive/blob/7c0c71bfe52490927a37ade15bd09b2733fea660/internetarchive/session.py#L247-L268
def get_metadata(self, identifier, request_kwargs=None): """Get an item's metadata from the `Metadata API <http://blog.archive.org/2013/07/04/metadata-api/>`__ :type identifier: str :param identifier: Globally unique Archive.org identifier. :rtype: dict :returns: Metadat API response. """ request_kwargs = {} if not request_kwargs else request_kwargs url = '{0}//archive.org/metadata/{1}'.format(self.protocol, identifier) if 'timeout' not in request_kwargs: request_kwargs['timeout'] = 12 try: resp = self.get(url, **request_kwargs) resp.raise_for_status() except Exception as exc: error_msg = 'Error retrieving metadata from {0}, {1}'.format(url, exc) logger.error(error_msg) raise type(exc)(error_msg) return resp.json()
[ "def", "get_metadata", "(", "self", ",", "identifier", ",", "request_kwargs", "=", "None", ")", ":", "request_kwargs", "=", "{", "}", "if", "not", "request_kwargs", "else", "request_kwargs", "url", "=", "'{0}//archive.org/metadata/{1}'", ".", "format", "(", "self", ".", "protocol", ",", "identifier", ")", "if", "'timeout'", "not", "in", "request_kwargs", ":", "request_kwargs", "[", "'timeout'", "]", "=", "12", "try", ":", "resp", "=", "self", ".", "get", "(", "url", ",", "*", "*", "request_kwargs", ")", "resp", ".", "raise_for_status", "(", ")", "except", "Exception", "as", "exc", ":", "error_msg", "=", "'Error retrieving metadata from {0}, {1}'", ".", "format", "(", "url", ",", "exc", ")", "logger", ".", "error", "(", "error_msg", ")", "raise", "type", "(", "exc", ")", "(", "error_msg", ")", "return", "resp", ".", "json", "(", ")" ]
Get an item's metadata from the `Metadata API <http://blog.archive.org/2013/07/04/metadata-api/>`__ :type identifier: str :param identifier: Globally unique Archive.org identifier. :rtype: dict :returns: Metadat API response.
[ "Get", "an", "item", "s", "metadata", "from", "the", "Metadata", "API", "<http", ":", "//", "blog", ".", "archive", ".", "org", "/", "2013", "/", "07", "/", "04", "/", "metadata", "-", "api", "/", ">", "__" ]
python
train
rosenbrockc/fortpy
fortpy/elements.py
https://github.com/rosenbrockc/fortpy/blob/1ed0757c52d549e41d9d44bdea68cb89529293a5/fortpy/elements.py#L1695-L1704
def collection(self, attribute): """Returns the collection corresponding the attribute name.""" return { "dependencies": self.dependencies, "publics": self.publics, "members": self.members, "types": self.types, "executables": self.executables, "interfaces": self.interfaces }[attribute]
[ "def", "collection", "(", "self", ",", "attribute", ")", ":", "return", "{", "\"dependencies\"", ":", "self", ".", "dependencies", ",", "\"publics\"", ":", "self", ".", "publics", ",", "\"members\"", ":", "self", ".", "members", ",", "\"types\"", ":", "self", ".", "types", ",", "\"executables\"", ":", "self", ".", "executables", ",", "\"interfaces\"", ":", "self", ".", "interfaces", "}", "[", "attribute", "]" ]
Returns the collection corresponding the attribute name.
[ "Returns", "the", "collection", "corresponding", "the", "attribute", "name", "." ]
python
train
nikcub/floyd
floyd/db/__init__.py
https://github.com/nikcub/floyd/blob/5772d0047efb11c9ce5f7d234a9da4576ce24edc/floyd/db/__init__.py#L201-L205
def sort_by(self, sb): """Sort results""" self._dataset = self._dataset.sort(key=lambda x: x.pubdate, reverse=True) return self
[ "def", "sort_by", "(", "self", ",", "sb", ")", ":", "self", ".", "_dataset", "=", "self", ".", "_dataset", ".", "sort", "(", "key", "=", "lambda", "x", ":", "x", ".", "pubdate", ",", "reverse", "=", "True", ")", "return", "self" ]
Sort results
[ "Sort", "results" ]
python
train
PedalPi/Raspberry-Physical
physical/liquidcristal/liquid_crystal.py
https://github.com/PedalPi/Raspberry-Physical/blob/3dc71b6997ef36d0de256c5db7a1b38178937fd5/physical/liquidcristal/liquid_crystal.py#L185-L188
def blink(self, status): """Turn blink cursor visibility on/off""" self._display_control = ByteUtil.apply_flag(self._display_control, Command.BLINKING_ON, status) self.command(self._display_control)
[ "def", "blink", "(", "self", ",", "status", ")", ":", "self", ".", "_display_control", "=", "ByteUtil", ".", "apply_flag", "(", "self", ".", "_display_control", ",", "Command", ".", "BLINKING_ON", ",", "status", ")", "self", ".", "command", "(", "self", ".", "_display_control", ")" ]
Turn blink cursor visibility on/off
[ "Turn", "blink", "cursor", "visibility", "on", "/", "off" ]
python
train
dmlc/gluon-nlp
scripts/question_answering/data_processing.py
https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/scripts/question_answering/data_processing.py#L130-L142
def get_word_level_vocab(self): """Provides word level vocabulary Returns ------- Vocab Word level vocabulary """ def simple_tokenize(source_str, token_delim=' ', seq_delim='\n'): return list(filter(None, re.split(token_delim + '|' + seq_delim, source_str))) return VocabProvider._create_squad_vocab(simple_tokenize, self._dataset)
[ "def", "get_word_level_vocab", "(", "self", ")", ":", "def", "simple_tokenize", "(", "source_str", ",", "token_delim", "=", "' '", ",", "seq_delim", "=", "'\\n'", ")", ":", "return", "list", "(", "filter", "(", "None", ",", "re", ".", "split", "(", "token_delim", "+", "'|'", "+", "seq_delim", ",", "source_str", ")", ")", ")", "return", "VocabProvider", ".", "_create_squad_vocab", "(", "simple_tokenize", ",", "self", ".", "_dataset", ")" ]
Provides word level vocabulary Returns ------- Vocab Word level vocabulary
[ "Provides", "word", "level", "vocabulary" ]
python
train
nicolargo/glances
glances/plugins/glances_memswap.py
https://github.com/nicolargo/glances/blob/5bd4d587a736e0d2b03170b56926841d2a3eb7ee/glances/plugins/glances_memswap.py#L60-L128
def update(self): """Update swap memory stats using the input method.""" # Init new stats stats = self.get_init_value() if self.input_method == 'local': # Update stats using the standard system lib # Grab SWAP using the psutil swap_memory method sm_stats = psutil.swap_memory() # Get all the swap stats (copy/paste of the psutil documentation) # total: total swap memory in bytes # used: used swap memory in bytes # free: free swap memory in bytes # percent: the percentage usage # sin: the number of bytes the system has swapped in from disk (cumulative) # sout: the number of bytes the system has swapped out from disk # (cumulative) for swap in ['total', 'used', 'free', 'percent', 'sin', 'sout']: if hasattr(sm_stats, swap): stats[swap] = getattr(sm_stats, swap) elif self.input_method == 'snmp': # Update stats using SNMP if self.short_system_name == 'windows': # Mem stats for Windows OS are stored in the FS table try: fs_stat = self.get_stats_snmp(snmp_oid=snmp_oid[self.short_system_name], bulk=True) except KeyError: self.reset() else: for fs in fs_stat: # The virtual memory concept is used by the operating # system to extend (virtually) the physical memory and # thus to run more programs by swapping unused memory # zone (page) to a disk file. if fs == 'Virtual Memory': stats['total'] = int( fs_stat[fs]['size']) * int(fs_stat[fs]['alloc_unit']) stats['used'] = int( fs_stat[fs]['used']) * int(fs_stat[fs]['alloc_unit']) stats['percent'] = float( stats['used'] * 100 / stats['total']) stats['free'] = stats['total'] - stats['used'] break else: stats = self.get_stats_snmp(snmp_oid=snmp_oid['default']) if stats['total'] == '': self.reset() return stats for key in iterkeys(stats): if stats[key] != '': stats[key] = float(stats[key]) * 1024 # used=total-free stats['used'] = stats['total'] - stats['free'] # percent: the percentage usage calculated as (total - # available) / total * 100. stats['percent'] = float( (stats['total'] - stats['free']) / stats['total'] * 100) # Update the stats self.stats = stats return self.stats
[ "def", "update", "(", "self", ")", ":", "# Init new stats", "stats", "=", "self", ".", "get_init_value", "(", ")", "if", "self", ".", "input_method", "==", "'local'", ":", "# Update stats using the standard system lib", "# Grab SWAP using the psutil swap_memory method", "sm_stats", "=", "psutil", ".", "swap_memory", "(", ")", "# Get all the swap stats (copy/paste of the psutil documentation)", "# total: total swap memory in bytes", "# used: used swap memory in bytes", "# free: free swap memory in bytes", "# percent: the percentage usage", "# sin: the number of bytes the system has swapped in from disk (cumulative)", "# sout: the number of bytes the system has swapped out from disk", "# (cumulative)", "for", "swap", "in", "[", "'total'", ",", "'used'", ",", "'free'", ",", "'percent'", ",", "'sin'", ",", "'sout'", "]", ":", "if", "hasattr", "(", "sm_stats", ",", "swap", ")", ":", "stats", "[", "swap", "]", "=", "getattr", "(", "sm_stats", ",", "swap", ")", "elif", "self", ".", "input_method", "==", "'snmp'", ":", "# Update stats using SNMP", "if", "self", ".", "short_system_name", "==", "'windows'", ":", "# Mem stats for Windows OS are stored in the FS table", "try", ":", "fs_stat", "=", "self", ".", "get_stats_snmp", "(", "snmp_oid", "=", "snmp_oid", "[", "self", ".", "short_system_name", "]", ",", "bulk", "=", "True", ")", "except", "KeyError", ":", "self", ".", "reset", "(", ")", "else", ":", "for", "fs", "in", "fs_stat", ":", "# The virtual memory concept is used by the operating", "# system to extend (virtually) the physical memory and", "# thus to run more programs by swapping unused memory", "# zone (page) to a disk file.", "if", "fs", "==", "'Virtual Memory'", ":", "stats", "[", "'total'", "]", "=", "int", "(", "fs_stat", "[", "fs", "]", "[", "'size'", "]", ")", "*", "int", "(", "fs_stat", "[", "fs", "]", "[", "'alloc_unit'", "]", ")", "stats", "[", "'used'", "]", "=", "int", "(", "fs_stat", "[", "fs", "]", "[", "'used'", "]", ")", "*", "int", "(", "fs_stat", "[", "fs", "]", "[", "'alloc_unit'", "]", ")", "stats", "[", "'percent'", "]", "=", "float", "(", "stats", "[", "'used'", "]", "*", "100", "/", "stats", "[", "'total'", "]", ")", "stats", "[", "'free'", "]", "=", "stats", "[", "'total'", "]", "-", "stats", "[", "'used'", "]", "break", "else", ":", "stats", "=", "self", ".", "get_stats_snmp", "(", "snmp_oid", "=", "snmp_oid", "[", "'default'", "]", ")", "if", "stats", "[", "'total'", "]", "==", "''", ":", "self", ".", "reset", "(", ")", "return", "stats", "for", "key", "in", "iterkeys", "(", "stats", ")", ":", "if", "stats", "[", "key", "]", "!=", "''", ":", "stats", "[", "key", "]", "=", "float", "(", "stats", "[", "key", "]", ")", "*", "1024", "# used=total-free", "stats", "[", "'used'", "]", "=", "stats", "[", "'total'", "]", "-", "stats", "[", "'free'", "]", "# percent: the percentage usage calculated as (total -", "# available) / total * 100.", "stats", "[", "'percent'", "]", "=", "float", "(", "(", "stats", "[", "'total'", "]", "-", "stats", "[", "'free'", "]", ")", "/", "stats", "[", "'total'", "]", "*", "100", ")", "# Update the stats", "self", ".", "stats", "=", "stats", "return", "self", ".", "stats" ]
Update swap memory stats using the input method.
[ "Update", "swap", "memory", "stats", "using", "the", "input", "method", "." ]
python
train
sosreport/sos
sos/plugins/networking.py
https://github.com/sosreport/sos/blob/2ebc04da53dc871c8dd5243567afa4f8592dca29/sos/plugins/networking.py#L58-L64
def collect_ip6table(self, tablename): """ Same as function above, but for ipv6 """ modname = "ip6table_"+tablename if self.check_ext_prog("grep -q %s /proc/modules" % modname): cmd = "ip6tables -t "+tablename+" -nvL" self.add_cmd_output(cmd)
[ "def", "collect_ip6table", "(", "self", ",", "tablename", ")", ":", "modname", "=", "\"ip6table_\"", "+", "tablename", "if", "self", ".", "check_ext_prog", "(", "\"grep -q %s /proc/modules\"", "%", "modname", ")", ":", "cmd", "=", "\"ip6tables -t \"", "+", "tablename", "+", "\" -nvL\"", "self", ".", "add_cmd_output", "(", "cmd", ")" ]
Same as function above, but for ipv6
[ "Same", "as", "function", "above", "but", "for", "ipv6" ]
python
train
nchopin/particles
particles/hilbert.py
https://github.com/nchopin/particles/blob/3faa97a1073db45c5889eef3e015dd76ef350b52/particles/hilbert.py#L17-L33
def hilbert_array(xint): """Compute Hilbert indices. Parameters ---------- xint: (N, d) int numpy.ndarray Returns ------- h: (N,) int numpy.ndarray Hilbert indices """ N, d = xint.shape h = np.zeros(N, int64) for n in range(N): h[n] = Hilbert_to_int(xint[n, :]) return h
[ "def", "hilbert_array", "(", "xint", ")", ":", "N", ",", "d", "=", "xint", ".", "shape", "h", "=", "np", ".", "zeros", "(", "N", ",", "int64", ")", "for", "n", "in", "range", "(", "N", ")", ":", "h", "[", "n", "]", "=", "Hilbert_to_int", "(", "xint", "[", "n", ",", ":", "]", ")", "return", "h" ]
Compute Hilbert indices. Parameters ---------- xint: (N, d) int numpy.ndarray Returns ------- h: (N,) int numpy.ndarray Hilbert indices
[ "Compute", "Hilbert", "indices", "." ]
python
train
klen/graphite-beacon
graphite_beacon/alerts.py
https://github.com/klen/graphite-beacon/blob/c1f071e9f557693bc90f6acbc314994985dc3b77/graphite_beacon/alerts.py#L184-L199
def evaluate_rule(self, rule, value, target): """Calculate the value.""" def evaluate(expr): if expr in LOGICAL_OPERATORS.values(): return expr rvalue = self.get_value_for_expr(expr, target) if rvalue is None: return False # ignore this result return expr['op'](value, rvalue) evaluated = [evaluate(expr) for expr in rule['exprs']] while len(evaluated) > 1: lhs, logical_op, rhs = (evaluated.pop(0) for _ in range(3)) evaluated.insert(0, logical_op(lhs, rhs)) return evaluated[0]
[ "def", "evaluate_rule", "(", "self", ",", "rule", ",", "value", ",", "target", ")", ":", "def", "evaluate", "(", "expr", ")", ":", "if", "expr", "in", "LOGICAL_OPERATORS", ".", "values", "(", ")", ":", "return", "expr", "rvalue", "=", "self", ".", "get_value_for_expr", "(", "expr", ",", "target", ")", "if", "rvalue", "is", "None", ":", "return", "False", "# ignore this result", "return", "expr", "[", "'op'", "]", "(", "value", ",", "rvalue", ")", "evaluated", "=", "[", "evaluate", "(", "expr", ")", "for", "expr", "in", "rule", "[", "'exprs'", "]", "]", "while", "len", "(", "evaluated", ")", ">", "1", ":", "lhs", ",", "logical_op", ",", "rhs", "=", "(", "evaluated", ".", "pop", "(", "0", ")", "for", "_", "in", "range", "(", "3", ")", ")", "evaluated", ".", "insert", "(", "0", ",", "logical_op", "(", "lhs", ",", "rhs", ")", ")", "return", "evaluated", "[", "0", "]" ]
Calculate the value.
[ "Calculate", "the", "value", "." ]
python
train
miLibris/flask-rest-jsonapi
flask_rest_jsonapi/querystring.py
https://github.com/miLibris/flask-rest-jsonapi/blob/ecc8f2cd2b54cc0bfae7acd6cffcda0ba1140c43/flask_rest_jsonapi/querystring.py#L159-L185
def sorting(self): """Return fields to sort by including sort name for SQLAlchemy and row sort parameter for other ORMs :return list: a list of sorting information Example of return value:: [ {'field': 'created_at', 'order': 'desc'}, ] """ if self.qs.get('sort'): sorting_results = [] for sort_field in self.qs['sort'].split(','): field = sort_field.replace('-', '') if field not in self.schema._declared_fields: raise InvalidSort("{} has no attribute {}".format(self.schema.__name__, field)) if field in get_relationships(self.schema): raise InvalidSort("You can't sort on {} because it is a relationship field".format(field)) field = get_model_field(self.schema, field) order = 'desc' if sort_field.startswith('-') else 'asc' sorting_results.append({'field': field, 'order': order}) return sorting_results return []
[ "def", "sorting", "(", "self", ")", ":", "if", "self", ".", "qs", ".", "get", "(", "'sort'", ")", ":", "sorting_results", "=", "[", "]", "for", "sort_field", "in", "self", ".", "qs", "[", "'sort'", "]", ".", "split", "(", "','", ")", ":", "field", "=", "sort_field", ".", "replace", "(", "'-'", ",", "''", ")", "if", "field", "not", "in", "self", ".", "schema", ".", "_declared_fields", ":", "raise", "InvalidSort", "(", "\"{} has no attribute {}\"", ".", "format", "(", "self", ".", "schema", ".", "__name__", ",", "field", ")", ")", "if", "field", "in", "get_relationships", "(", "self", ".", "schema", ")", ":", "raise", "InvalidSort", "(", "\"You can't sort on {} because it is a relationship field\"", ".", "format", "(", "field", ")", ")", "field", "=", "get_model_field", "(", "self", ".", "schema", ",", "field", ")", "order", "=", "'desc'", "if", "sort_field", ".", "startswith", "(", "'-'", ")", "else", "'asc'", "sorting_results", ".", "append", "(", "{", "'field'", ":", "field", ",", "'order'", ":", "order", "}", ")", "return", "sorting_results", "return", "[", "]" ]
Return fields to sort by including sort name for SQLAlchemy and row sort parameter for other ORMs :return list: a list of sorting information Example of return value:: [ {'field': 'created_at', 'order': 'desc'}, ]
[ "Return", "fields", "to", "sort", "by", "including", "sort", "name", "for", "SQLAlchemy", "and", "row", "sort", "parameter", "for", "other", "ORMs" ]
python
train
Clinical-Genomics/housekeeper
housekeeper/cli/core.py
https://github.com/Clinical-Genomics/housekeeper/blob/a7d10d327dc9f06274bdef5504ed1b9413f2c8c1/housekeeper/cli/core.py#L17-L22
def base(context, config, database, root, log_level): """Housekeeper - Access your files!""" coloredlogs.install(level=log_level) context.obj = ruamel.yaml.safe_load(config) if config else {} context.obj['database'] = database if database else context.obj['database'] context.obj['root'] = root if root else context.obj['root']
[ "def", "base", "(", "context", ",", "config", ",", "database", ",", "root", ",", "log_level", ")", ":", "coloredlogs", ".", "install", "(", "level", "=", "log_level", ")", "context", ".", "obj", "=", "ruamel", ".", "yaml", ".", "safe_load", "(", "config", ")", "if", "config", "else", "{", "}", "context", ".", "obj", "[", "'database'", "]", "=", "database", "if", "database", "else", "context", ".", "obj", "[", "'database'", "]", "context", ".", "obj", "[", "'root'", "]", "=", "root", "if", "root", "else", "context", ".", "obj", "[", "'root'", "]" ]
Housekeeper - Access your files!
[ "Housekeeper", "-", "Access", "your", "files!" ]
python
train
angr/angr
angr/analyses/forward_analysis.py
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/analyses/forward_analysis.py#L370-L383
def add_job(self, job, merged=False, widened=False): """ Appended a new job to this JobInfo node. :param job: The new job to append. :param bool merged: Whether it is a merged job or not. :param bool widened: Whether it is a widened job or not. """ job_type = '' if merged: job_type = 'merged' elif widened: job_type = 'widened' self.jobs.append((job, job_type))
[ "def", "add_job", "(", "self", ",", "job", ",", "merged", "=", "False", ",", "widened", "=", "False", ")", ":", "job_type", "=", "''", "if", "merged", ":", "job_type", "=", "'merged'", "elif", "widened", ":", "job_type", "=", "'widened'", "self", ".", "jobs", ".", "append", "(", "(", "job", ",", "job_type", ")", ")" ]
Appended a new job to this JobInfo node. :param job: The new job to append. :param bool merged: Whether it is a merged job or not. :param bool widened: Whether it is a widened job or not.
[ "Appended", "a", "new", "job", "to", "this", "JobInfo", "node", ".", ":", "param", "job", ":", "The", "new", "job", "to", "append", ".", ":", "param", "bool", "merged", ":", "Whether", "it", "is", "a", "merged", "job", "or", "not", ".", ":", "param", "bool", "widened", ":", "Whether", "it", "is", "a", "widened", "job", "or", "not", "." ]
python
train
daler/trackhub
trackhub/assembly.py
https://github.com/daler/trackhub/blob/e4655f79177822529f80b923df117e38e28df702/trackhub/assembly.py#L138-L153
def add_params(self, **kw): """ Add [possibly many] parameters to the Assembly. Parameters will be checked against known UCSC parameters and their supported formats. """ for k, v in kw.items(): if k not in self.params: raise ValidationError( '"%s" is not a valid parameter for %s' % (k, self.__class__.__name__)) self.params[k].validate(v) self._orig_kwargs.update(kw) self.kwargs = self._orig_kwargs.copy()
[ "def", "add_params", "(", "self", ",", "*", "*", "kw", ")", ":", "for", "k", ",", "v", "in", "kw", ".", "items", "(", ")", ":", "if", "k", "not", "in", "self", ".", "params", ":", "raise", "ValidationError", "(", "'\"%s\" is not a valid parameter for %s'", "%", "(", "k", ",", "self", ".", "__class__", ".", "__name__", ")", ")", "self", ".", "params", "[", "k", "]", ".", "validate", "(", "v", ")", "self", ".", "_orig_kwargs", ".", "update", "(", "kw", ")", "self", ".", "kwargs", "=", "self", ".", "_orig_kwargs", ".", "copy", "(", ")" ]
Add [possibly many] parameters to the Assembly. Parameters will be checked against known UCSC parameters and their supported formats.
[ "Add", "[", "possibly", "many", "]", "parameters", "to", "the", "Assembly", "." ]
python
train
CodeReclaimers/neat-python
neat/threaded.py
https://github.com/CodeReclaimers/neat-python/blob/e3dbe77c0d776eae41d598e6439e6ac02ab90b18/neat/threaded.py#L85-L98
def evaluate(self, genomes, config): """Evaluate the genomes""" if not self.working: self.start() p = 0 for genome_id, genome in genomes: p += 1 self.inqueue.put((genome_id, genome, config)) # assign the fitness back to each genome while p > 0: p -= 1 ignored_genome_id, genome, fitness = self.outqueue.get() genome.fitness = fitness
[ "def", "evaluate", "(", "self", ",", "genomes", ",", "config", ")", ":", "if", "not", "self", ".", "working", ":", "self", ".", "start", "(", ")", "p", "=", "0", "for", "genome_id", ",", "genome", "in", "genomes", ":", "p", "+=", "1", "self", ".", "inqueue", ".", "put", "(", "(", "genome_id", ",", "genome", ",", "config", ")", ")", "# assign the fitness back to each genome", "while", "p", ">", "0", ":", "p", "-=", "1", "ignored_genome_id", ",", "genome", ",", "fitness", "=", "self", ".", "outqueue", ".", "get", "(", ")", "genome", ".", "fitness", "=", "fitness" ]
Evaluate the genomes
[ "Evaluate", "the", "genomes" ]
python
train
gmr/tredis
tredis/client.py
https://github.com/gmr/tredis/blob/2e91c6a58a35460be0525c51ac6a98fde3b506ad/tredis/client.py#L194-L212
def _on_connected(self, stream_future, connect_future): """Invoked when the socket stream has connected, setting up the stream callbacks and invoking the on connect callback if set. :param stream_future: The connection socket future :type stream_future: :class:`~tornado.concurrent.Future` :param stream_future: The connection response future :type stream_future: :class:`~tornado.concurrent.Future` :raises: :exc:`tredis.exceptions.ConnectError` """ if stream_future.exception(): connect_future.set_exception( exceptions.ConnectError(stream_future.exception())) else: self._stream = stream_future.result() self._stream.set_close_callback(self._on_closed) self.connected = True connect_future.set_result(self)
[ "def", "_on_connected", "(", "self", ",", "stream_future", ",", "connect_future", ")", ":", "if", "stream_future", ".", "exception", "(", ")", ":", "connect_future", ".", "set_exception", "(", "exceptions", ".", "ConnectError", "(", "stream_future", ".", "exception", "(", ")", ")", ")", "else", ":", "self", ".", "_stream", "=", "stream_future", ".", "result", "(", ")", "self", ".", "_stream", ".", "set_close_callback", "(", "self", ".", "_on_closed", ")", "self", ".", "connected", "=", "True", "connect_future", ".", "set_result", "(", "self", ")" ]
Invoked when the socket stream has connected, setting up the stream callbacks and invoking the on connect callback if set. :param stream_future: The connection socket future :type stream_future: :class:`~tornado.concurrent.Future` :param stream_future: The connection response future :type stream_future: :class:`~tornado.concurrent.Future` :raises: :exc:`tredis.exceptions.ConnectError`
[ "Invoked", "when", "the", "socket", "stream", "has", "connected", "setting", "up", "the", "stream", "callbacks", "and", "invoking", "the", "on", "connect", "callback", "if", "set", "." ]
python
train
Azure/azure-cli-extensions
src/sqlvm-preview/azext_sqlvm_preview/custom.py
https://github.com/Azure/azure-cli-extensions/blob/3d4854205b0f0d882f688cfa12383d14506c2e35/src/sqlvm-preview/azext_sqlvm_preview/custom.py#L271-L342
def sqlvm_update(instance, sql_server_license_type=None, enable_auto_patching=None, day_of_week=None, maintenance_window_starting_hour=None, maintenance_window_duration=None, enable_auto_backup=None, enable_encryption=False, retention_period=None, storage_account_url=None, storage_access_key=None, backup_password=None, backup_system_dbs=False, backup_schedule_type=None, full_backup_frequency=None, full_backup_start_time=None, full_backup_window_hours=None, log_backup_frequency=None, enable_key_vault_credential=None, credential_name=None, azure_key_vault_url=None, service_principal_name=None, service_principal_secret=None, connectivity_type=None, port=None, sql_workload_type=None, enable_r_services=None, tags=None): ''' Updates a SQL virtual machine. ''' if tags is not None: instance.tags = tags if sql_server_license_type is not None: instance.sql_server_license_type = sql_server_license_type if (enable_auto_patching is not None or day_of_week is not None or maintenance_window_starting_hour is not None or maintenance_window_duration is not None): enable_auto_patching = enable_auto_patching if enable_auto_patching is False else True instance.auto_patching_settings = AutoPatchingSettings(enable=enable_auto_patching, day_of_week=day_of_week, maintenance_window_starting_hour=maintenance_window_starting_hour, maintenance_window_duration=maintenance_window_duration) if (enable_auto_backup is not None or enable_encryption or retention_period is not None or storage_account_url is not None or storage_access_key is not None or backup_password is not None or backup_system_dbs or backup_schedule_type is not None or full_backup_frequency is not None or full_backup_start_time is not None or full_backup_window_hours is not None or log_backup_frequency is not None): enable_auto_backup = enable_auto_backup if enable_auto_backup is False else True instance.auto_backup_settings = AutoBackupSettings(enable=enable_auto_backup, enable_encryption=enable_encryption if enable_auto_backup else None, retention_period=retention_period, storage_account_url=storage_account_url, storage_access_key=storage_access_key, password=backup_password, backup_system_dbs=backup_system_dbs if enable_auto_backup else None, backup_schedule_type=backup_schedule_type, full_backup_frequency=full_backup_frequency, full_backup_start_time=full_backup_start_time, full_backup_window_hours=full_backup_window_hours, log_backup_frequency=log_backup_frequency) if (enable_key_vault_credential is not None or credential_name is not None or azure_key_vault_url is not None or service_principal_name is not None or service_principal_secret is not None): enable_key_vault_credential = enable_key_vault_credential if enable_key_vault_credential is False else True instance.key_vault_credential_settings = KeyVaultCredentialSettings(enable=enable_key_vault_credential, credential_name=credential_name, service_principal_name=service_principal_name, service_principal_secret=service_principal_secret, azure_key_vault_url=azure_key_vault_url) instance.server_configurations_management_settings = ServerConfigurationsManagementSettings() if (connectivity_type is not None or port is not None): instance.server_configurations_management_settings.sql_connectivity_update_settings = SqlConnectivityUpdateSettings(connectivity_type=connectivity_type, port=port) if sql_workload_type is not None: instance.server_configurations_management_settings.sql_workload_type_update_settings = SqlWorkloadTypeUpdateSettings(sql_workload_type=sql_workload_type) if enable_r_services is not None: instance.server_configurations_management_settings.additional_features_server_configurations = AdditionalFeaturesServerConfigurations(is_rservices_enabled=enable_r_services) # If none of the settings was modified, reset server_configurations_management_settings to be null if (instance.server_configurations_management_settings.sql_connectivity_update_settings is None and instance.server_configurations_management_settings.sql_workload_type_update_settings is None and instance.server_configurations_management_settings.sql_storage_update_settings is None and instance.server_configurations_management_settings.additional_features_server_configurations is None): instance.server_configurations_management_settings = None return instance
[ "def", "sqlvm_update", "(", "instance", ",", "sql_server_license_type", "=", "None", ",", "enable_auto_patching", "=", "None", ",", "day_of_week", "=", "None", ",", "maintenance_window_starting_hour", "=", "None", ",", "maintenance_window_duration", "=", "None", ",", "enable_auto_backup", "=", "None", ",", "enable_encryption", "=", "False", ",", "retention_period", "=", "None", ",", "storage_account_url", "=", "None", ",", "storage_access_key", "=", "None", ",", "backup_password", "=", "None", ",", "backup_system_dbs", "=", "False", ",", "backup_schedule_type", "=", "None", ",", "full_backup_frequency", "=", "None", ",", "full_backup_start_time", "=", "None", ",", "full_backup_window_hours", "=", "None", ",", "log_backup_frequency", "=", "None", ",", "enable_key_vault_credential", "=", "None", ",", "credential_name", "=", "None", ",", "azure_key_vault_url", "=", "None", ",", "service_principal_name", "=", "None", ",", "service_principal_secret", "=", "None", ",", "connectivity_type", "=", "None", ",", "port", "=", "None", ",", "sql_workload_type", "=", "None", ",", "enable_r_services", "=", "None", ",", "tags", "=", "None", ")", ":", "if", "tags", "is", "not", "None", ":", "instance", ".", "tags", "=", "tags", "if", "sql_server_license_type", "is", "not", "None", ":", "instance", ".", "sql_server_license_type", "=", "sql_server_license_type", "if", "(", "enable_auto_patching", "is", "not", "None", "or", "day_of_week", "is", "not", "None", "or", "maintenance_window_starting_hour", "is", "not", "None", "or", "maintenance_window_duration", "is", "not", "None", ")", ":", "enable_auto_patching", "=", "enable_auto_patching", "if", "enable_auto_patching", "is", "False", "else", "True", "instance", ".", "auto_patching_settings", "=", "AutoPatchingSettings", "(", "enable", "=", "enable_auto_patching", ",", "day_of_week", "=", "day_of_week", ",", "maintenance_window_starting_hour", "=", "maintenance_window_starting_hour", ",", "maintenance_window_duration", "=", "maintenance_window_duration", ")", "if", "(", "enable_auto_backup", "is", "not", "None", "or", "enable_encryption", "or", "retention_period", "is", "not", "None", "or", "storage_account_url", "is", "not", "None", "or", "storage_access_key", "is", "not", "None", "or", "backup_password", "is", "not", "None", "or", "backup_system_dbs", "or", "backup_schedule_type", "is", "not", "None", "or", "full_backup_frequency", "is", "not", "None", "or", "full_backup_start_time", "is", "not", "None", "or", "full_backup_window_hours", "is", "not", "None", "or", "log_backup_frequency", "is", "not", "None", ")", ":", "enable_auto_backup", "=", "enable_auto_backup", "if", "enable_auto_backup", "is", "False", "else", "True", "instance", ".", "auto_backup_settings", "=", "AutoBackupSettings", "(", "enable", "=", "enable_auto_backup", ",", "enable_encryption", "=", "enable_encryption", "if", "enable_auto_backup", "else", "None", ",", "retention_period", "=", "retention_period", ",", "storage_account_url", "=", "storage_account_url", ",", "storage_access_key", "=", "storage_access_key", ",", "password", "=", "backup_password", ",", "backup_system_dbs", "=", "backup_system_dbs", "if", "enable_auto_backup", "else", "None", ",", "backup_schedule_type", "=", "backup_schedule_type", ",", "full_backup_frequency", "=", "full_backup_frequency", ",", "full_backup_start_time", "=", "full_backup_start_time", ",", "full_backup_window_hours", "=", "full_backup_window_hours", ",", "log_backup_frequency", "=", "log_backup_frequency", ")", "if", "(", "enable_key_vault_credential", "is", "not", "None", "or", "credential_name", "is", "not", "None", "or", "azure_key_vault_url", "is", "not", "None", "or", "service_principal_name", "is", "not", "None", "or", "service_principal_secret", "is", "not", "None", ")", ":", "enable_key_vault_credential", "=", "enable_key_vault_credential", "if", "enable_key_vault_credential", "is", "False", "else", "True", "instance", ".", "key_vault_credential_settings", "=", "KeyVaultCredentialSettings", "(", "enable", "=", "enable_key_vault_credential", ",", "credential_name", "=", "credential_name", ",", "service_principal_name", "=", "service_principal_name", ",", "service_principal_secret", "=", "service_principal_secret", ",", "azure_key_vault_url", "=", "azure_key_vault_url", ")", "instance", ".", "server_configurations_management_settings", "=", "ServerConfigurationsManagementSettings", "(", ")", "if", "(", "connectivity_type", "is", "not", "None", "or", "port", "is", "not", "None", ")", ":", "instance", ".", "server_configurations_management_settings", ".", "sql_connectivity_update_settings", "=", "SqlConnectivityUpdateSettings", "(", "connectivity_type", "=", "connectivity_type", ",", "port", "=", "port", ")", "if", "sql_workload_type", "is", "not", "None", ":", "instance", ".", "server_configurations_management_settings", ".", "sql_workload_type_update_settings", "=", "SqlWorkloadTypeUpdateSettings", "(", "sql_workload_type", "=", "sql_workload_type", ")", "if", "enable_r_services", "is", "not", "None", ":", "instance", ".", "server_configurations_management_settings", ".", "additional_features_server_configurations", "=", "AdditionalFeaturesServerConfigurations", "(", "is_rservices_enabled", "=", "enable_r_services", ")", "# If none of the settings was modified, reset server_configurations_management_settings to be null", "if", "(", "instance", ".", "server_configurations_management_settings", ".", "sql_connectivity_update_settings", "is", "None", "and", "instance", ".", "server_configurations_management_settings", ".", "sql_workload_type_update_settings", "is", "None", "and", "instance", ".", "server_configurations_management_settings", ".", "sql_storage_update_settings", "is", "None", "and", "instance", ".", "server_configurations_management_settings", ".", "additional_features_server_configurations", "is", "None", ")", ":", "instance", ".", "server_configurations_management_settings", "=", "None", "return", "instance" ]
Updates a SQL virtual machine.
[ "Updates", "a", "SQL", "virtual", "machine", "." ]
python
train
greenbone/ospd
ospd/ospd.py
https://github.com/greenbone/ospd/blob/cef773166b15a19c17764721d3fe404fa0e107bf/ospd/ospd.py#L249-L258
def add_scanner_param(self, name, scanner_param): """ Add a scanner parameter. """ assert name assert scanner_param self.scanner_params[name] = scanner_param command = self.commands.get('start_scan') command['elements'] = { 'scanner_params': {k: v['name'] for k, v in self.scanner_params.items()}}
[ "def", "add_scanner_param", "(", "self", ",", "name", ",", "scanner_param", ")", ":", "assert", "name", "assert", "scanner_param", "self", ".", "scanner_params", "[", "name", "]", "=", "scanner_param", "command", "=", "self", ".", "commands", ".", "get", "(", "'start_scan'", ")", "command", "[", "'elements'", "]", "=", "{", "'scanner_params'", ":", "{", "k", ":", "v", "[", "'name'", "]", "for", "k", ",", "v", "in", "self", ".", "scanner_params", ".", "items", "(", ")", "}", "}" ]
Add a scanner parameter.
[ "Add", "a", "scanner", "parameter", "." ]
python
train
Stranger6667/postmarker
postmarker/utils.py
https://github.com/Stranger6667/postmarker/blob/013224ab1761e95c488c7d2701e6fa83f3108d94/postmarker/utils.py#L12-L26
def sizes(count, offset=0, max_chunk=500): """ Helper to iterate over remote data via count & offset pagination. """ if count is None: chunk = max_chunk while True: yield chunk, offset offset += chunk else: while count: chunk = min(count, max_chunk) count = max(0, count - max_chunk) yield chunk, offset offset += chunk
[ "def", "sizes", "(", "count", ",", "offset", "=", "0", ",", "max_chunk", "=", "500", ")", ":", "if", "count", "is", "None", ":", "chunk", "=", "max_chunk", "while", "True", ":", "yield", "chunk", ",", "offset", "offset", "+=", "chunk", "else", ":", "while", "count", ":", "chunk", "=", "min", "(", "count", ",", "max_chunk", ")", "count", "=", "max", "(", "0", ",", "count", "-", "max_chunk", ")", "yield", "chunk", ",", "offset", "offset", "+=", "chunk" ]
Helper to iterate over remote data via count & offset pagination.
[ "Helper", "to", "iterate", "over", "remote", "data", "via", "count", "&", "offset", "pagination", "." ]
python
train
ofek/bit
bit/network/services.py
https://github.com/ofek/bit/blob/20fc0e7047946c1f28f868008d99d659905c1af6/bit/network/services.py#L489-L511
def broadcast_tx(cls, tx_hex): # pragma: no cover """Broadcasts a transaction to the blockchain. :param tx_hex: A signed transaction in hex form. :type tx_hex: ``str`` :raises ConnectionError: If all API services fail. """ success = None for api_call in cls.BROADCAST_TX_MAIN: try: success = api_call(tx_hex) if not success: continue return except cls.IGNORED_ERRORS: pass if success is False: raise ConnectionError('Transaction broadcast failed, or ' 'Unspents were already used.') raise ConnectionError('All APIs are unreachable.')
[ "def", "broadcast_tx", "(", "cls", ",", "tx_hex", ")", ":", "# pragma: no cover", "success", "=", "None", "for", "api_call", "in", "cls", ".", "BROADCAST_TX_MAIN", ":", "try", ":", "success", "=", "api_call", "(", "tx_hex", ")", "if", "not", "success", ":", "continue", "return", "except", "cls", ".", "IGNORED_ERRORS", ":", "pass", "if", "success", "is", "False", ":", "raise", "ConnectionError", "(", "'Transaction broadcast failed, or '", "'Unspents were already used.'", ")", "raise", "ConnectionError", "(", "'All APIs are unreachable.'", ")" ]
Broadcasts a transaction to the blockchain. :param tx_hex: A signed transaction in hex form. :type tx_hex: ``str`` :raises ConnectionError: If all API services fail.
[ "Broadcasts", "a", "transaction", "to", "the", "blockchain", "." ]
python
train
brutasse/graphite-api
graphite_api/functions.py
https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L2395-L2408
def averageOutsidePercentile(requestContext, seriesList, n): """ Removes functions lying inside an average percentile interval """ averages = [safeAvg(s) for s in seriesList] if n < 50: n = 100 - n lowPercentile = _getPercentile(averages, 100 - n) highPercentile = _getPercentile(averages, n) return [s for s in seriesList if not lowPercentile < safeAvg(s) < highPercentile]
[ "def", "averageOutsidePercentile", "(", "requestContext", ",", "seriesList", ",", "n", ")", ":", "averages", "=", "[", "safeAvg", "(", "s", ")", "for", "s", "in", "seriesList", "]", "if", "n", "<", "50", ":", "n", "=", "100", "-", "n", "lowPercentile", "=", "_getPercentile", "(", "averages", ",", "100", "-", "n", ")", "highPercentile", "=", "_getPercentile", "(", "averages", ",", "n", ")", "return", "[", "s", "for", "s", "in", "seriesList", "if", "not", "lowPercentile", "<", "safeAvg", "(", "s", ")", "<", "highPercentile", "]" ]
Removes functions lying inside an average percentile interval
[ "Removes", "functions", "lying", "inside", "an", "average", "percentile", "interval" ]
python
train
xgfs/NetLSD
netlsd/util.py
https://github.com/xgfs/NetLSD/blob/54820b3669a94852bd9653be23b09e126e901ab3/netlsd/util.py#L177-L209
def updown_linear_approx(eigvals_lower, eigvals_upper, nv): """ Approximates Laplacian spectrum using upper and lower parts of the eigenspectrum. Parameters ---------- eigvals_lower : numpy.ndarray Lower part of the spectrum, sorted eigvals_upper : numpy.ndarray Upper part of the spectrum, sorted nv : int Total number of nodes (eigenvalues) in the graph. Returns ------- numpy.ndarray Vector of approximated eigenvalues Examples -------- >>> updown_linear_approx([1, 2, 3], [7, 8, 9], 9) array([1, 2, 3, 4, 5, 6, 7, 8, 9]) """ nal = len(eigvals_lower) nau = len(eigvals_upper) if nv < nal + nau: raise ValueError('Number of supplied eigenvalues ({0} lower and {1} upper) is higher than number of nodes ({2})!'.format(nal, nau, nv)) ret = np.zeros(nv) ret[:nal] = eigvals_lower ret[-nau:] = eigvals_upper ret[nal-1:-nau+1] = np.linspace(eigvals_lower[-1], eigvals_upper[0], nv-nal-nau+2) return ret
[ "def", "updown_linear_approx", "(", "eigvals_lower", ",", "eigvals_upper", ",", "nv", ")", ":", "nal", "=", "len", "(", "eigvals_lower", ")", "nau", "=", "len", "(", "eigvals_upper", ")", "if", "nv", "<", "nal", "+", "nau", ":", "raise", "ValueError", "(", "'Number of supplied eigenvalues ({0} lower and {1} upper) is higher than number of nodes ({2})!'", ".", "format", "(", "nal", ",", "nau", ",", "nv", ")", ")", "ret", "=", "np", ".", "zeros", "(", "nv", ")", "ret", "[", ":", "nal", "]", "=", "eigvals_lower", "ret", "[", "-", "nau", ":", "]", "=", "eigvals_upper", "ret", "[", "nal", "-", "1", ":", "-", "nau", "+", "1", "]", "=", "np", ".", "linspace", "(", "eigvals_lower", "[", "-", "1", "]", ",", "eigvals_upper", "[", "0", "]", ",", "nv", "-", "nal", "-", "nau", "+", "2", ")", "return", "ret" ]
Approximates Laplacian spectrum using upper and lower parts of the eigenspectrum. Parameters ---------- eigvals_lower : numpy.ndarray Lower part of the spectrum, sorted eigvals_upper : numpy.ndarray Upper part of the spectrum, sorted nv : int Total number of nodes (eigenvalues) in the graph. Returns ------- numpy.ndarray Vector of approximated eigenvalues Examples -------- >>> updown_linear_approx([1, 2, 3], [7, 8, 9], 9) array([1, 2, 3, 4, 5, 6, 7, 8, 9])
[ "Approximates", "Laplacian", "spectrum", "using", "upper", "and", "lower", "parts", "of", "the", "eigenspectrum", ".", "Parameters", "----------", "eigvals_lower", ":", "numpy", ".", "ndarray", "Lower", "part", "of", "the", "spectrum", "sorted", "eigvals_upper", ":", "numpy", ".", "ndarray", "Upper", "part", "of", "the", "spectrum", "sorted", "nv", ":", "int", "Total", "number", "of", "nodes", "(", "eigenvalues", ")", "in", "the", "graph", "." ]
python
train
loli/medpy
medpy/graphcut/energy_voxel.py
https://github.com/loli/medpy/blob/95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5/medpy/graphcut/energy_voxel.py#L501-L539
def __skeleton_maximum(graph, image, boundary_term, spacing): """ A skeleton for the calculation of maximum intensity based boundary terms. This function is equivalent to energy_voxel.__skeleton_difference(), but uses the maximum intensity rather than the intensity difference of neighbouring voxels. It is therefore suitable to be used with the gradient image, rather than the original image. The computation of the edge weights follows .. math:: w(p,q) = g(max(I_p, I_q)) ,where :math:`g(\cdot)` is the supplied boundary term function. @param graph An initialized graph.GCGraph object @type graph.GCGraph @param image The image to compute on @type image numpy.ndarray @param boundary_term A function to compute the boundary term over an array of maximum intensities @type boundary_term function @param spacing A sequence containing the slice spacing used for weighting the computed neighbourhood weight value for different dimensions. If False, no distance based weighting of the graph edges is performed. @param spacing sequence | False @see energy_voxel.__skeleton_difference() for more details. """ def intensity_maximum(neighbour_one, neighbour_two): """ Takes two voxel arrays constituting neighbours and computes the maximum between their intensities. """ return scipy.maximum(neighbour_one, neighbour_two) __skeleton_base(graph, numpy.abs(image), boundary_term, intensity_maximum, spacing)
[ "def", "__skeleton_maximum", "(", "graph", ",", "image", ",", "boundary_term", ",", "spacing", ")", ":", "def", "intensity_maximum", "(", "neighbour_one", ",", "neighbour_two", ")", ":", "\"\"\"\n Takes two voxel arrays constituting neighbours and computes the maximum between\n their intensities.\n \"\"\"", "return", "scipy", ".", "maximum", "(", "neighbour_one", ",", "neighbour_two", ")", "__skeleton_base", "(", "graph", ",", "numpy", ".", "abs", "(", "image", ")", ",", "boundary_term", ",", "intensity_maximum", ",", "spacing", ")" ]
A skeleton for the calculation of maximum intensity based boundary terms. This function is equivalent to energy_voxel.__skeleton_difference(), but uses the maximum intensity rather than the intensity difference of neighbouring voxels. It is therefore suitable to be used with the gradient image, rather than the original image. The computation of the edge weights follows .. math:: w(p,q) = g(max(I_p, I_q)) ,where :math:`g(\cdot)` is the supplied boundary term function. @param graph An initialized graph.GCGraph object @type graph.GCGraph @param image The image to compute on @type image numpy.ndarray @param boundary_term A function to compute the boundary term over an array of maximum intensities @type boundary_term function @param spacing A sequence containing the slice spacing used for weighting the computed neighbourhood weight value for different dimensions. If False, no distance based weighting of the graph edges is performed. @param spacing sequence | False @see energy_voxel.__skeleton_difference() for more details.
[ "A", "skeleton", "for", "the", "calculation", "of", "maximum", "intensity", "based", "boundary", "terms", ".", "This", "function", "is", "equivalent", "to", "energy_voxel", ".", "__skeleton_difference", "()", "but", "uses", "the", "maximum", "intensity", "rather", "than", "the", "intensity", "difference", "of", "neighbouring", "voxels", ".", "It", "is", "therefore", "suitable", "to", "be", "used", "with", "the", "gradient", "image", "rather", "than", "the", "original", "image", ".", "The", "computation", "of", "the", "edge", "weights", "follows", "..", "math", "::", "w", "(", "p", "q", ")", "=", "g", "(", "max", "(", "I_p", "I_q", "))", "where", ":", "math", ":", "g", "(", "\\", "cdot", ")", "is", "the", "supplied", "boundary", "term", "function", "." ]
python
train
GaretJax/lancet
lancet/contrib/dploi.py
https://github.com/GaretJax/lancet/blob/cf438c5c6166b18ee0dc5ffce55220793019bb95/lancet/contrib/dploi.py#L11-L28
def ssh(lancet, print_cmd, environment): """ SSH into the given environment, based on the dploi configuration. """ namespace = {} with open(lancet.config.get('dploi', 'deployment_spec')) as fh: code = compile(fh.read(), 'deployment.py', 'exec') exec(code, {}, namespace) config = namespace['settings'][environment] host = '{}@{}'.format(config['user'], config['hosts'][0]) cmd = ['ssh', '-p', str(config.get('port', 22)), host] if print_cmd: click.echo(' '.join(quote(s) for s in cmd)) else: lancet.defer_to_shell(*cmd)
[ "def", "ssh", "(", "lancet", ",", "print_cmd", ",", "environment", ")", ":", "namespace", "=", "{", "}", "with", "open", "(", "lancet", ".", "config", ".", "get", "(", "'dploi'", ",", "'deployment_spec'", ")", ")", "as", "fh", ":", "code", "=", "compile", "(", "fh", ".", "read", "(", ")", ",", "'deployment.py'", ",", "'exec'", ")", "exec", "(", "code", ",", "{", "}", ",", "namespace", ")", "config", "=", "namespace", "[", "'settings'", "]", "[", "environment", "]", "host", "=", "'{}@{}'", ".", "format", "(", "config", "[", "'user'", "]", ",", "config", "[", "'hosts'", "]", "[", "0", "]", ")", "cmd", "=", "[", "'ssh'", ",", "'-p'", ",", "str", "(", "config", ".", "get", "(", "'port'", ",", "22", ")", ")", ",", "host", "]", "if", "print_cmd", ":", "click", ".", "echo", "(", "' '", ".", "join", "(", "quote", "(", "s", ")", "for", "s", "in", "cmd", ")", ")", "else", ":", "lancet", ".", "defer_to_shell", "(", "*", "cmd", ")" ]
SSH into the given environment, based on the dploi configuration.
[ "SSH", "into", "the", "given", "environment", "based", "on", "the", "dploi", "configuration", "." ]
python
train
HazyResearch/metal
metal/label_model/graph_utils.py
https://github.com/HazyResearch/metal/blob/c24e3772e25ac6d0917b8b7af4c1bcb92928f84a/metal/label_model/graph_utils.py#L4-L39
def get_clique_tree(nodes, edges): """Given a set of int nodes i and edges (i,j), returns an nx.Graph object G which is a clique tree, where: - G.node[i]['members'] contains the set of original nodes in the ith maximal clique - G[i][j]['members'] contains the set of original nodes in the seperator set between maximal cliques i and j Note: This method is currently only implemented for chordal graphs; TODO: add a step to triangulate non-chordal graphs. """ # Form the original graph G1 G1 = nx.Graph() G1.add_nodes_from(nodes) G1.add_edges_from(edges) # Check if graph is chordal # TODO: Add step to triangulate graph if not if not nx.is_chordal(G1): raise NotImplementedError("Graph triangulation not implemented.") # Create maximal clique graph G2 # Each node is a maximal clique C_i # Let w = |C_i \cap C_j|; C_i, C_j have an edge with weight w if w > 0 G2 = nx.Graph() for i, c in enumerate(nx.chordal_graph_cliques(G1)): G2.add_node(i, members=c) for i in G2.nodes: for j in G2.nodes: S = G2.node[i]["members"].intersection(G2.node[j]["members"]) w = len(S) if w > 0: G2.add_edge(i, j, weight=w, members=S) # Return a minimum spanning tree of G2 return nx.minimum_spanning_tree(G2)
[ "def", "get_clique_tree", "(", "nodes", ",", "edges", ")", ":", "# Form the original graph G1", "G1", "=", "nx", ".", "Graph", "(", ")", "G1", ".", "add_nodes_from", "(", "nodes", ")", "G1", ".", "add_edges_from", "(", "edges", ")", "# Check if graph is chordal", "# TODO: Add step to triangulate graph if not", "if", "not", "nx", ".", "is_chordal", "(", "G1", ")", ":", "raise", "NotImplementedError", "(", "\"Graph triangulation not implemented.\"", ")", "# Create maximal clique graph G2", "# Each node is a maximal clique C_i", "# Let w = |C_i \\cap C_j|; C_i, C_j have an edge with weight w if w > 0", "G2", "=", "nx", ".", "Graph", "(", ")", "for", "i", ",", "c", "in", "enumerate", "(", "nx", ".", "chordal_graph_cliques", "(", "G1", ")", ")", ":", "G2", ".", "add_node", "(", "i", ",", "members", "=", "c", ")", "for", "i", "in", "G2", ".", "nodes", ":", "for", "j", "in", "G2", ".", "nodes", ":", "S", "=", "G2", ".", "node", "[", "i", "]", "[", "\"members\"", "]", ".", "intersection", "(", "G2", ".", "node", "[", "j", "]", "[", "\"members\"", "]", ")", "w", "=", "len", "(", "S", ")", "if", "w", ">", "0", ":", "G2", ".", "add_edge", "(", "i", ",", "j", ",", "weight", "=", "w", ",", "members", "=", "S", ")", "# Return a minimum spanning tree of G2", "return", "nx", ".", "minimum_spanning_tree", "(", "G2", ")" ]
Given a set of int nodes i and edges (i,j), returns an nx.Graph object G which is a clique tree, where: - G.node[i]['members'] contains the set of original nodes in the ith maximal clique - G[i][j]['members'] contains the set of original nodes in the seperator set between maximal cliques i and j Note: This method is currently only implemented for chordal graphs; TODO: add a step to triangulate non-chordal graphs.
[ "Given", "a", "set", "of", "int", "nodes", "i", "and", "edges", "(", "i", "j", ")", "returns", "an", "nx", ".", "Graph", "object", "G", "which", "is", "a", "clique", "tree", "where", ":", "-", "G", ".", "node", "[", "i", "]", "[", "members", "]", "contains", "the", "set", "of", "original", "nodes", "in", "the", "ith", "maximal", "clique", "-", "G", "[", "i", "]", "[", "j", "]", "[", "members", "]", "contains", "the", "set", "of", "original", "nodes", "in", "the", "seperator", "set", "between", "maximal", "cliques", "i", "and", "j" ]
python
train
raiden-network/raiden
raiden/messages.py
https://github.com/raiden-network/raiden/blob/407ba15c72074e9de88771d6b9661ff4dc36bef5/raiden/messages.py#L272-L280
def _data_to_sign(self) -> bytes: """ Return the binary data to be/which was signed """ packed = self.packed() field = type(packed).fields_spec[-1] assert field.name == 'signature', 'signature is not the last field' # this slice must be from the end of the buffer return packed.data[:-field.size_bytes]
[ "def", "_data_to_sign", "(", "self", ")", "->", "bytes", ":", "packed", "=", "self", ".", "packed", "(", ")", "field", "=", "type", "(", "packed", ")", ".", "fields_spec", "[", "-", "1", "]", "assert", "field", ".", "name", "==", "'signature'", ",", "'signature is not the last field'", "# this slice must be from the end of the buffer", "return", "packed", ".", "data", "[", ":", "-", "field", ".", "size_bytes", "]" ]
Return the binary data to be/which was signed
[ "Return", "the", "binary", "data", "to", "be", "/", "which", "was", "signed" ]
python
train
Neurosim-lab/netpyne
netpyne/support/csd.py
https://github.com/Neurosim-lab/netpyne/blob/edb67b5098b2e7923d55010ded59ad1bf75c0f18/netpyne/support/csd.py#L35-L54
def getCSD (lfps,sampr,minf=0.05,maxf=300,norm=True,vaknin=False,spacing=1.0): """ get current source density approximation using set of local field potentials with equidistant spacing first performs a lowpass filter lfps is a list or numpy array of LFPs arranged spatially by column spacing is in microns """ datband = getbandpass(lfps,sampr,minf,maxf) if datband.shape[0] > datband.shape[1]: # take CSD along smaller dimension ax = 1 else: ax = 0 # can change default to run Vaknin on bandpass filtered LFPs before calculating CSD, that # way would have same number of channels in CSD and LFP (but not critical, and would take more RAM); if vaknin: datband = Vaknin(datband) if norm: removemean(datband,ax=ax) # NB: when drawing CSD make sure that negative values (depolarizing intracellular current) drawn in red, # and positive values (hyperpolarizing intracellular current) drawn in blue CSD = -numpy.diff(datband,n=2,axis=ax) / spacing**2 # now each column (or row) is an electrode -- CSD along electrodes return CSD
[ "def", "getCSD", "(", "lfps", ",", "sampr", ",", "minf", "=", "0.05", ",", "maxf", "=", "300", ",", "norm", "=", "True", ",", "vaknin", "=", "False", ",", "spacing", "=", "1.0", ")", ":", "datband", "=", "getbandpass", "(", "lfps", ",", "sampr", ",", "minf", ",", "maxf", ")", "if", "datband", ".", "shape", "[", "0", "]", ">", "datband", ".", "shape", "[", "1", "]", ":", "# take CSD along smaller dimension", "ax", "=", "1", "else", ":", "ax", "=", "0", "# can change default to run Vaknin on bandpass filtered LFPs before calculating CSD, that", "# way would have same number of channels in CSD and LFP (but not critical, and would take more RAM);", "if", "vaknin", ":", "datband", "=", "Vaknin", "(", "datband", ")", "if", "norm", ":", "removemean", "(", "datband", ",", "ax", "=", "ax", ")", "# NB: when drawing CSD make sure that negative values (depolarizing intracellular current) drawn in red,", "# and positive values (hyperpolarizing intracellular current) drawn in blue", "CSD", "=", "-", "numpy", ".", "diff", "(", "datband", ",", "n", "=", "2", ",", "axis", "=", "ax", ")", "/", "spacing", "**", "2", "# now each column (or row) is an electrode -- CSD along electrodes", "return", "CSD" ]
get current source density approximation using set of local field potentials with equidistant spacing first performs a lowpass filter lfps is a list or numpy array of LFPs arranged spatially by column spacing is in microns
[ "get", "current", "source", "density", "approximation", "using", "set", "of", "local", "field", "potentials", "with", "equidistant", "spacing", "first", "performs", "a", "lowpass", "filter", "lfps", "is", "a", "list", "or", "numpy", "array", "of", "LFPs", "arranged", "spatially", "by", "column", "spacing", "is", "in", "microns" ]
python
train
cni/MRS
MRS/utils.py
https://github.com/cni/MRS/blob/16098b3cf4830780efd787fee9efa46513850283/MRS/utils.py#L577-L603
def detect_outliers(in_arr, thresh=3.0): """ Detects outliers more than X standard deviations from mean. Parameters ---------- in_list: ndarray An array of measures for which outliers need to be detected. thresh: float (optional) Threshold number of standard deviations before a measure is considered an outlier. Default = 3.0 Returns ------- outlier_idx: ndarray of boolean An array indicating if a measure is an outlier (True) or not (False). """ mean = np.nanmean(in_arr) std = np.nanstd(in_arr) # calculate cutoffs uppthresh = mean + thresh * std lowthresh = mean - thresh * std return np.logical_or(in_arr < lowthresh, in_arr > uppthresh)
[ "def", "detect_outliers", "(", "in_arr", ",", "thresh", "=", "3.0", ")", ":", "mean", "=", "np", ".", "nanmean", "(", "in_arr", ")", "std", "=", "np", ".", "nanstd", "(", "in_arr", ")", "# calculate cutoffs ", "uppthresh", "=", "mean", "+", "thresh", "*", "std", "lowthresh", "=", "mean", "-", "thresh", "*", "std", "return", "np", ".", "logical_or", "(", "in_arr", "<", "lowthresh", ",", "in_arr", ">", "uppthresh", ")" ]
Detects outliers more than X standard deviations from mean. Parameters ---------- in_list: ndarray An array of measures for which outliers need to be detected. thresh: float (optional) Threshold number of standard deviations before a measure is considered an outlier. Default = 3.0 Returns ------- outlier_idx: ndarray of boolean An array indicating if a measure is an outlier (True) or not (False).
[ "Detects", "outliers", "more", "than", "X", "standard", "deviations", "from", "mean", ".", "Parameters", "----------", "in_list", ":", "ndarray", "An", "array", "of", "measures", "for", "which", "outliers", "need", "to", "be", "detected", ".", "thresh", ":", "float", "(", "optional", ")", "Threshold", "number", "of", "standard", "deviations", "before", "a", "measure", "is", "considered", "an", "outlier", ".", "Default", "=", "3", ".", "0", "Returns", "-------", "outlier_idx", ":", "ndarray", "of", "boolean", "An", "array", "indicating", "if", "a", "measure", "is", "an", "outlier", "(", "True", ")", "or", "not", "(", "False", ")", "." ]
python
train
crs4/hl7apy
hl7apy/core.py
https://github.com/crs4/hl7apy/blob/91be488e9274f6ec975519a1d9c17045bc91bf74/hl7apy/core.py#L403-L433
def create_element(self, name, traversal_parent=False, reference=None): """ Create an element having the given name :type name: ``str`` :param name: the name of the element to be created (e.g. PID) :type traversal_parent: ``bool`` :param traversal_parent: if ``True``, the parent will be set as temporary for traversal purposes :param reference: the new element structure (see :func:`load_reference <hl7apy.load_reference>`) :return: an instance of an :class:`hl7apy.core.Element` subclass :raises: :exc:`ChildNotFound <hl7apy.exceptions.ChildNotFound>` if the element does not exist """ if reference is None: reference = self.element.find_child_reference(name) if reference is not None: cls = reference['cls'] element_name = reference['name'] kwargs = {'reference': reference['ref'], 'validation_level': self.element.validation_level, 'version': self.element.version} if not traversal_parent: kwargs['parent'] = self.element else: kwargs['traversal_parent'] = self.element return cls(element_name, **kwargs) else: raise ChildNotFound(name)
[ "def", "create_element", "(", "self", ",", "name", ",", "traversal_parent", "=", "False", ",", "reference", "=", "None", ")", ":", "if", "reference", "is", "None", ":", "reference", "=", "self", ".", "element", ".", "find_child_reference", "(", "name", ")", "if", "reference", "is", "not", "None", ":", "cls", "=", "reference", "[", "'cls'", "]", "element_name", "=", "reference", "[", "'name'", "]", "kwargs", "=", "{", "'reference'", ":", "reference", "[", "'ref'", "]", ",", "'validation_level'", ":", "self", ".", "element", ".", "validation_level", ",", "'version'", ":", "self", ".", "element", ".", "version", "}", "if", "not", "traversal_parent", ":", "kwargs", "[", "'parent'", "]", "=", "self", ".", "element", "else", ":", "kwargs", "[", "'traversal_parent'", "]", "=", "self", ".", "element", "return", "cls", "(", "element_name", ",", "*", "*", "kwargs", ")", "else", ":", "raise", "ChildNotFound", "(", "name", ")" ]
Create an element having the given name :type name: ``str`` :param name: the name of the element to be created (e.g. PID) :type traversal_parent: ``bool`` :param traversal_parent: if ``True``, the parent will be set as temporary for traversal purposes :param reference: the new element structure (see :func:`load_reference <hl7apy.load_reference>`) :return: an instance of an :class:`hl7apy.core.Element` subclass :raises: :exc:`ChildNotFound <hl7apy.exceptions.ChildNotFound>` if the element does not exist
[ "Create", "an", "element", "having", "the", "given", "name" ]
python
train
joke2k/faker
faker/utils/datetime_safe.py
https://github.com/joke2k/faker/blob/965824b61132e52d92d1a6ce470396dbbe01c96c/faker/utils/datetime_safe.py#L44-L51
def new_datetime(d): """ Generate a safe datetime from a datetime.date or datetime.datetime object. """ kw = [d.year, d.month, d.day] if isinstance(d, real_datetime): kw.extend([d.hour, d.minute, d.second, d.microsecond, d.tzinfo]) return datetime(*kw)
[ "def", "new_datetime", "(", "d", ")", ":", "kw", "=", "[", "d", ".", "year", ",", "d", ".", "month", ",", "d", ".", "day", "]", "if", "isinstance", "(", "d", ",", "real_datetime", ")", ":", "kw", ".", "extend", "(", "[", "d", ".", "hour", ",", "d", ".", "minute", ",", "d", ".", "second", ",", "d", ".", "microsecond", ",", "d", ".", "tzinfo", "]", ")", "return", "datetime", "(", "*", "kw", ")" ]
Generate a safe datetime from a datetime.date or datetime.datetime object.
[ "Generate", "a", "safe", "datetime", "from", "a", "datetime", ".", "date", "or", "datetime", ".", "datetime", "object", "." ]
python
train
google/prettytensor
prettytensor/pretty_tensor_class.py
https://github.com/google/prettytensor/blob/75daa0b11252590f548da5647addc0ea610c4c45/prettytensor/pretty_tensor_class.py#L233-L276
def defaults_scope(**kwargs): """Creates a scope for the defaults that are used in a `with` block. Note: `defaults_scope` supports nesting where later defaults can be overridden. Also, an explicitly given keyword argument on a method always takes precedence. In addition to setting defaults for some methods, this also can control: * `summary_collections`: Choose which collection to place summaries in or disable with `None`. * `trainable_variables`: Boolean indicating if variables are trainable. * `variable_collections`: Default collections in which to place variables; `tf.GraphKeys.GLOBAL_VARIABLES` is always included. Args: **kwargs: The defaults. Yields: Doesn't really yield, instead this creates a Context Manager for use in a `with` statement. Raises: ValueError: if a collection type is accidently supplied a string. """ _assert_value_not_string('summary_collections', kwargs) _assert_value_not_string('variable_collections', kwargs) _check_defaults(kwargs) global _defaults old_defaults = _defaults _defaults = chain_dict.ChainDict(_defaults) _defaults.update(kwargs) # Special logic to support summary_collections. # This is added here because introducing more scopes would add more confusion # than overloading this one a bit. books = bookkeeper.for_default_graph() if 'summary_collections' in _defaults: books.summary_collections = _defaults['summary_collections'] else: books.reset_summary_collections() try: yield _defaults finally: _defaults = old_defaults
[ "def", "defaults_scope", "(", "*", "*", "kwargs", ")", ":", "_assert_value_not_string", "(", "'summary_collections'", ",", "kwargs", ")", "_assert_value_not_string", "(", "'variable_collections'", ",", "kwargs", ")", "_check_defaults", "(", "kwargs", ")", "global", "_defaults", "old_defaults", "=", "_defaults", "_defaults", "=", "chain_dict", ".", "ChainDict", "(", "_defaults", ")", "_defaults", ".", "update", "(", "kwargs", ")", "# Special logic to support summary_collections.", "# This is added here because introducing more scopes would add more confusion", "# than overloading this one a bit.", "books", "=", "bookkeeper", ".", "for_default_graph", "(", ")", "if", "'summary_collections'", "in", "_defaults", ":", "books", ".", "summary_collections", "=", "_defaults", "[", "'summary_collections'", "]", "else", ":", "books", ".", "reset_summary_collections", "(", ")", "try", ":", "yield", "_defaults", "finally", ":", "_defaults", "=", "old_defaults" ]
Creates a scope for the defaults that are used in a `with` block. Note: `defaults_scope` supports nesting where later defaults can be overridden. Also, an explicitly given keyword argument on a method always takes precedence. In addition to setting defaults for some methods, this also can control: * `summary_collections`: Choose which collection to place summaries in or disable with `None`. * `trainable_variables`: Boolean indicating if variables are trainable. * `variable_collections`: Default collections in which to place variables; `tf.GraphKeys.GLOBAL_VARIABLES` is always included. Args: **kwargs: The defaults. Yields: Doesn't really yield, instead this creates a Context Manager for use in a `with` statement. Raises: ValueError: if a collection type is accidently supplied a string.
[ "Creates", "a", "scope", "for", "the", "defaults", "that", "are", "used", "in", "a", "with", "block", "." ]
python
train
numenta/nupic
src/nupic/swarming/permutations_runner.py
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/swarming/permutations_runner.py#L101-L112
def _escape(s): """Escape commas, tabs, newlines and dashes in a string Commas are encoded as tabs """ assert isinstance(s, str), \ "expected %s but got %s; value=%s" % (type(str), type(s), s) s = s.replace("\\", "\\\\") s = s.replace("\n", "\\n") s = s.replace("\t", "\\t") s = s.replace(",", "\t") return s
[ "def", "_escape", "(", "s", ")", ":", "assert", "isinstance", "(", "s", ",", "str", ")", ",", "\"expected %s but got %s; value=%s\"", "%", "(", "type", "(", "str", ")", ",", "type", "(", "s", ")", ",", "s", ")", "s", "=", "s", ".", "replace", "(", "\"\\\\\"", ",", "\"\\\\\\\\\"", ")", "s", "=", "s", ".", "replace", "(", "\"\\n\"", ",", "\"\\\\n\"", ")", "s", "=", "s", ".", "replace", "(", "\"\\t\"", ",", "\"\\\\t\"", ")", "s", "=", "s", ".", "replace", "(", "\",\"", ",", "\"\\t\"", ")", "return", "s" ]
Escape commas, tabs, newlines and dashes in a string Commas are encoded as tabs
[ "Escape", "commas", "tabs", "newlines", "and", "dashes", "in", "a", "string" ]
python
valid
markovmodel/PyEMMA
pyemma/coordinates/pipelines.py
https://github.com/markovmodel/PyEMMA/blob/5c3124398217de05ba5ce9c8fb01519222481ab8/pyemma/coordinates/pipelines.py#L92-L135
def set_element(self, index, e): r""" Replaces a pipeline stage. Replace an element in chain and return replaced element. """ if index > len(self._chain): raise IndexError("tried to access element %i, but chain has only %i" " elements" % (index, len(self._chain))) if type(index) is not int: raise ValueError( "index is not a integer but '%s'" % str(type(index))) # if e is already in chain, we're finished if self._chain[index] is e: return # remove current index and its data producer replaced = self._chain.pop(index) if not replaced.is_reader: replaced.data_producer = None self._chain.insert(index, e) if index == 0: e.data_producer = e else: # rewire data_producers e.data_producer = self._chain[index - 1] # if e has a successive element, need to set data_producer try: successor = self._chain[index + 1] successor.data_producer = e except IndexError: pass # set data_producer for predecessor of e # self._chain[max(0, index - 1)].data_producer = self._chain[index] # since data producer of element after insertion changed, reset its status # TODO: make parameterized a property? self._chain[index]._estimated = False return replaced
[ "def", "set_element", "(", "self", ",", "index", ",", "e", ")", ":", "if", "index", ">", "len", "(", "self", ".", "_chain", ")", ":", "raise", "IndexError", "(", "\"tried to access element %i, but chain has only %i\"", "\" elements\"", "%", "(", "index", ",", "len", "(", "self", ".", "_chain", ")", ")", ")", "if", "type", "(", "index", ")", "is", "not", "int", ":", "raise", "ValueError", "(", "\"index is not a integer but '%s'\"", "%", "str", "(", "type", "(", "index", ")", ")", ")", "# if e is already in chain, we're finished", "if", "self", ".", "_chain", "[", "index", "]", "is", "e", ":", "return", "# remove current index and its data producer", "replaced", "=", "self", ".", "_chain", ".", "pop", "(", "index", ")", "if", "not", "replaced", ".", "is_reader", ":", "replaced", ".", "data_producer", "=", "None", "self", ".", "_chain", ".", "insert", "(", "index", ",", "e", ")", "if", "index", "==", "0", ":", "e", ".", "data_producer", "=", "e", "else", ":", "# rewire data_producers", "e", ".", "data_producer", "=", "self", ".", "_chain", "[", "index", "-", "1", "]", "# if e has a successive element, need to set data_producer", "try", ":", "successor", "=", "self", ".", "_chain", "[", "index", "+", "1", "]", "successor", ".", "data_producer", "=", "e", "except", "IndexError", ":", "pass", "# set data_producer for predecessor of e", "# self._chain[max(0, index - 1)].data_producer = self._chain[index]", "# since data producer of element after insertion changed, reset its status", "# TODO: make parameterized a property?", "self", ".", "_chain", "[", "index", "]", ".", "_estimated", "=", "False", "return", "replaced" ]
r""" Replaces a pipeline stage. Replace an element in chain and return replaced element.
[ "r", "Replaces", "a", "pipeline", "stage", "." ]
python
train
genialis/resolwe
resolwe/flow/serializers/data.py
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/serializers/data.py#L110-L119
def get_collections(self, data): """Return serialized list of collection objects on data that user has `view` permission on.""" collections = self._filter_queryset('view_collection', data.collection_set.all()) from .collection import CollectionSerializer class CollectionWithoutDataSerializer(WithoutDataSerializerMixin, CollectionSerializer): """Collection without data field serializer.""" return self._serialize_items(CollectionWithoutDataSerializer, 'collections', collections)
[ "def", "get_collections", "(", "self", ",", "data", ")", ":", "collections", "=", "self", ".", "_filter_queryset", "(", "'view_collection'", ",", "data", ".", "collection_set", ".", "all", "(", ")", ")", "from", ".", "collection", "import", "CollectionSerializer", "class", "CollectionWithoutDataSerializer", "(", "WithoutDataSerializerMixin", ",", "CollectionSerializer", ")", ":", "\"\"\"Collection without data field serializer.\"\"\"", "return", "self", ".", "_serialize_items", "(", "CollectionWithoutDataSerializer", ",", "'collections'", ",", "collections", ")" ]
Return serialized list of collection objects on data that user has `view` permission on.
[ "Return", "serialized", "list", "of", "collection", "objects", "on", "data", "that", "user", "has", "view", "permission", "on", "." ]
python
train
hitchtest/hitchserve
hitchserve/service_bundle.py
https://github.com/hitchtest/hitchserve/blob/a2def19979264186d283e76f7f0c88f3ed97f2e0/hitchserve/service_bundle.py#L294-L299
def wait_for_ipykernel(self, service_name, timeout=10): """Wait for an IPython kernel-nnnn.json filename message to appear in log.""" kernel_line = self._services[service_name].logs.tail.until( lambda line: "--existing" in line[1], timeout=10, lines_back=5 ) return kernel_line.replace("--existing", "").strip()
[ "def", "wait_for_ipykernel", "(", "self", ",", "service_name", ",", "timeout", "=", "10", ")", ":", "kernel_line", "=", "self", ".", "_services", "[", "service_name", "]", ".", "logs", ".", "tail", ".", "until", "(", "lambda", "line", ":", "\"--existing\"", "in", "line", "[", "1", "]", ",", "timeout", "=", "10", ",", "lines_back", "=", "5", ")", "return", "kernel_line", ".", "replace", "(", "\"--existing\"", ",", "\"\"", ")", ".", "strip", "(", ")" ]
Wait for an IPython kernel-nnnn.json filename message to appear in log.
[ "Wait", "for", "an", "IPython", "kernel", "-", "nnnn", ".", "json", "filename", "message", "to", "appear", "in", "log", "." ]
python
train
openstack/python-monascaclient
monascaclient/v2_0/shell.py
https://github.com/openstack/python-monascaclient/blob/03b07534145928eb2debad938da033c232dda105/monascaclient/v2_0/shell.py#L593-L613
def do_notification_show(mc, args): '''Describe the notification.''' fields = {} fields['notification_id'] = args.id try: notification = mc.notifications.get(**fields) except (osc_exc.ClientException, k_exc.HttpError) as he: raise osc_exc.CommandError('%s\n%s' % (he.message, he.details)) else: if args.json: print(utils.json_formatter(notification)) return formatters = { 'name': utils.json_formatter, 'id': utils.json_formatter, 'type': utils.json_formatter, 'address': utils.json_formatter, 'period': utils.json_formatter, 'links': utils.format_dictlist, } utils.print_dict(notification, formatters=formatters)
[ "def", "do_notification_show", "(", "mc", ",", "args", ")", ":", "fields", "=", "{", "}", "fields", "[", "'notification_id'", "]", "=", "args", ".", "id", "try", ":", "notification", "=", "mc", ".", "notifications", ".", "get", "(", "*", "*", "fields", ")", "except", "(", "osc_exc", ".", "ClientException", ",", "k_exc", ".", "HttpError", ")", "as", "he", ":", "raise", "osc_exc", ".", "CommandError", "(", "'%s\\n%s'", "%", "(", "he", ".", "message", ",", "he", ".", "details", ")", ")", "else", ":", "if", "args", ".", "json", ":", "print", "(", "utils", ".", "json_formatter", "(", "notification", ")", ")", "return", "formatters", "=", "{", "'name'", ":", "utils", ".", "json_formatter", ",", "'id'", ":", "utils", ".", "json_formatter", ",", "'type'", ":", "utils", ".", "json_formatter", ",", "'address'", ":", "utils", ".", "json_formatter", ",", "'period'", ":", "utils", ".", "json_formatter", ",", "'links'", ":", "utils", ".", "format_dictlist", ",", "}", "utils", ".", "print_dict", "(", "notification", ",", "formatters", "=", "formatters", ")" ]
Describe the notification.
[ "Describe", "the", "notification", "." ]
python
train
DataONEorg/d1_python
lib_common/src/d1_common/cert/x509.py
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/cert/x509.py#L854-L885
def _get_val_list(obj, path_list, reverse=False): """Extract values from nested objects by attribute names. Objects contain attributes which are named references to objects. This will descend down a tree of nested objects, starting at the given object, following the given path. Args: obj: object Any type of object path_list: list Attribute names reverse: bool Reverse the list of values before concatenation. Returns: list of objects """ try: y = getattr(obj, path_list[0]) except AttributeError: return [] if len(path_list) == 1: return [y] else: val_list = [x for a in y for x in _get_val_list(a, path_list[1:], reverse)] if reverse: val_list.reverse() return val_list
[ "def", "_get_val_list", "(", "obj", ",", "path_list", ",", "reverse", "=", "False", ")", ":", "try", ":", "y", "=", "getattr", "(", "obj", ",", "path_list", "[", "0", "]", ")", "except", "AttributeError", ":", "return", "[", "]", "if", "len", "(", "path_list", ")", "==", "1", ":", "return", "[", "y", "]", "else", ":", "val_list", "=", "[", "x", "for", "a", "in", "y", "for", "x", "in", "_get_val_list", "(", "a", ",", "path_list", "[", "1", ":", "]", ",", "reverse", ")", "]", "if", "reverse", ":", "val_list", ".", "reverse", "(", ")", "return", "val_list" ]
Extract values from nested objects by attribute names. Objects contain attributes which are named references to objects. This will descend down a tree of nested objects, starting at the given object, following the given path. Args: obj: object Any type of object path_list: list Attribute names reverse: bool Reverse the list of values before concatenation. Returns: list of objects
[ "Extract", "values", "from", "nested", "objects", "by", "attribute", "names", "." ]
python
train
mixmastamyk/console
console/proximity.py
https://github.com/mixmastamyk/console/blob/afe6c95d5a7b83d85376f450454e3769e4a5c3d0/console/proximity.py#L110-L135
def find_nearest_color_hexstr(hexdigits, color_table=None, method='euclid'): ''' Given a three or six-character hex digit string, return the nearest color index. Arguments: hexdigits: a three/6 digit hex string, e.g. 'b0b', '123456' Returns: int, None: index, or None on error. ''' triplet = [] try: if len(hexdigits) == 3: for digit in hexdigits: digit = int(digit, 16) triplet.append((digit * 16) + digit) elif len(hexdigits) == 6: triplet.extend(int(hexdigits[i:i+2], 16) for i in (0, 2, 4)) else: raise ValueError('wrong length: %r' % hexdigits) except ValueError: return None return find_nearest_color_index(*triplet, color_table=color_table, method=method)
[ "def", "find_nearest_color_hexstr", "(", "hexdigits", ",", "color_table", "=", "None", ",", "method", "=", "'euclid'", ")", ":", "triplet", "=", "[", "]", "try", ":", "if", "len", "(", "hexdigits", ")", "==", "3", ":", "for", "digit", "in", "hexdigits", ":", "digit", "=", "int", "(", "digit", ",", "16", ")", "triplet", ".", "append", "(", "(", "digit", "*", "16", ")", "+", "digit", ")", "elif", "len", "(", "hexdigits", ")", "==", "6", ":", "triplet", ".", "extend", "(", "int", "(", "hexdigits", "[", "i", ":", "i", "+", "2", "]", ",", "16", ")", "for", "i", "in", "(", "0", ",", "2", ",", "4", ")", ")", "else", ":", "raise", "ValueError", "(", "'wrong length: %r'", "%", "hexdigits", ")", "except", "ValueError", ":", "return", "None", "return", "find_nearest_color_index", "(", "*", "triplet", ",", "color_table", "=", "color_table", ",", "method", "=", "method", ")" ]
Given a three or six-character hex digit string, return the nearest color index. Arguments: hexdigits: a three/6 digit hex string, e.g. 'b0b', '123456' Returns: int, None: index, or None on error.
[ "Given", "a", "three", "or", "six", "-", "character", "hex", "digit", "string", "return", "the", "nearest", "color", "index", "." ]
python
train
matthieugouel/gibica
gibica/parser.py
https://github.com/matthieugouel/gibica/blob/65f937f7a6255078cc22eb7691a2897466032909/gibica/parser.py#L311-L330
def term(self): """ term: atom (('*' | '/' | '//') atom)* """ node = self.atom() while self.token.nature in (Nature.MUL, Nature.DIV, Nature.INT_DIV): token = self.token if token.nature == Nature.MUL: self._process(Nature.MUL) elif token.nature == Nature.DIV: self._process(Nature.DIV) elif token.nature == Nature.INT_DIV: self._process(Nature.INT_DIV) else: self._error() node = BinaryOperation(left=node, op=token, right=self.atom()) return node
[ "def", "term", "(", "self", ")", ":", "node", "=", "self", ".", "atom", "(", ")", "while", "self", ".", "token", ".", "nature", "in", "(", "Nature", ".", "MUL", ",", "Nature", ".", "DIV", ",", "Nature", ".", "INT_DIV", ")", ":", "token", "=", "self", ".", "token", "if", "token", ".", "nature", "==", "Nature", ".", "MUL", ":", "self", ".", "_process", "(", "Nature", ".", "MUL", ")", "elif", "token", ".", "nature", "==", "Nature", ".", "DIV", ":", "self", ".", "_process", "(", "Nature", ".", "DIV", ")", "elif", "token", ".", "nature", "==", "Nature", ".", "INT_DIV", ":", "self", ".", "_process", "(", "Nature", ".", "INT_DIV", ")", "else", ":", "self", ".", "_error", "(", ")", "node", "=", "BinaryOperation", "(", "left", "=", "node", ",", "op", "=", "token", ",", "right", "=", "self", ".", "atom", "(", ")", ")", "return", "node" ]
term: atom (('*' | '/' | '//') atom)*
[ "term", ":", "atom", "((", "*", "|", "/", "|", "//", ")", "atom", ")", "*" ]
python
train
blockstack/blockstack-core
blockstack/lib/atlas.py
https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/atlas.py#L2945-L2955
def get_current_peers( self, peer_table=None ): """ Get the current set of peers """ # get current peers current_peers = None with AtlasPeerTableLocked(peer_table) as ptbl: current_peers = ptbl.keys()[:] return current_peers
[ "def", "get_current_peers", "(", "self", ",", "peer_table", "=", "None", ")", ":", "# get current peers", "current_peers", "=", "None", "with", "AtlasPeerTableLocked", "(", "peer_table", ")", "as", "ptbl", ":", "current_peers", "=", "ptbl", ".", "keys", "(", ")", "[", ":", "]", "return", "current_peers" ]
Get the current set of peers
[ "Get", "the", "current", "set", "of", "peers" ]
python
train
symphonyoss/python-symphony
symphony/Pod/groups.py
https://github.com/symphonyoss/python-symphony/blob/b939f35fbda461183ec0c01790c754f89a295be0/symphony/Pod/groups.py#L29-L35
def ib_group_member_list(self, group_id): ''' ib group member list ''' req_hook = 'pod/v1/admin/group/' + group_id + '/membership/list' req_args = None status_code, response = self.__rest__.GET_query(req_hook, req_args) self.logger.debug('%s: %s' % (status_code, response)) return status_code, response
[ "def", "ib_group_member_list", "(", "self", ",", "group_id", ")", ":", "req_hook", "=", "'pod/v1/admin/group/'", "+", "group_id", "+", "'/membership/list'", "req_args", "=", "None", "status_code", ",", "response", "=", "self", ".", "__rest__", ".", "GET_query", "(", "req_hook", ",", "req_args", ")", "self", ".", "logger", ".", "debug", "(", "'%s: %s'", "%", "(", "status_code", ",", "response", ")", ")", "return", "status_code", ",", "response" ]
ib group member list
[ "ib", "group", "member", "list" ]
python
train
xeroc/python-graphenelib
graphenecommon/price.py
https://github.com/xeroc/python-graphenelib/blob/8bb5396bc79998ee424cf3813af478304173f3a6/graphenecommon/price.py#L267-L277
def as_quote(self, quote): """ Returns the price instance so that the quote asset is ``quote``. Note: This makes a copy of the object! """ if quote == self["quote"]["symbol"]: return self.copy() elif quote == self["base"]["symbol"]: return self.copy().invert() else: raise InvalidAssetException
[ "def", "as_quote", "(", "self", ",", "quote", ")", ":", "if", "quote", "==", "self", "[", "\"quote\"", "]", "[", "\"symbol\"", "]", ":", "return", "self", ".", "copy", "(", ")", "elif", "quote", "==", "self", "[", "\"base\"", "]", "[", "\"symbol\"", "]", ":", "return", "self", ".", "copy", "(", ")", ".", "invert", "(", ")", "else", ":", "raise", "InvalidAssetException" ]
Returns the price instance so that the quote asset is ``quote``. Note: This makes a copy of the object!
[ "Returns", "the", "price", "instance", "so", "that", "the", "quote", "asset", "is", "quote", "." ]
python
valid
saltstack/salt
salt/modules/dockercompose.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/dockercompose.py#L349-L379
def __load_compose_definitions(path, definition): ''' Will load the compose file located at path Then determines the format/contents of the sent definition err or results are only set if there were any :param path: :param definition: :return tuple(compose_result, loaded_definition, err): ''' compose_result, err = __load_docker_compose(path) if err: return None, None, err if isinstance(definition, dict): return compose_result, definition, None elif definition.strip().startswith('{'): try: loaded_definition = json.deserialize(definition) except json.DeserializationError as jerr: msg = 'Could not parse {0} {1}'.format(definition, jerr) return None, None, __standardize_result(False, msg, None, None) else: try: loaded_definition = yaml.load(definition) except yaml.YAMLError as yerr: msg = 'Could not parse {0} {1}'.format(definition, yerr) return None, None, __standardize_result(False, msg, None, None) return compose_result, loaded_definition, None
[ "def", "__load_compose_definitions", "(", "path", ",", "definition", ")", ":", "compose_result", ",", "err", "=", "__load_docker_compose", "(", "path", ")", "if", "err", ":", "return", "None", ",", "None", ",", "err", "if", "isinstance", "(", "definition", ",", "dict", ")", ":", "return", "compose_result", ",", "definition", ",", "None", "elif", "definition", ".", "strip", "(", ")", ".", "startswith", "(", "'{'", ")", ":", "try", ":", "loaded_definition", "=", "json", ".", "deserialize", "(", "definition", ")", "except", "json", ".", "DeserializationError", "as", "jerr", ":", "msg", "=", "'Could not parse {0} {1}'", ".", "format", "(", "definition", ",", "jerr", ")", "return", "None", ",", "None", ",", "__standardize_result", "(", "False", ",", "msg", ",", "None", ",", "None", ")", "else", ":", "try", ":", "loaded_definition", "=", "yaml", ".", "load", "(", "definition", ")", "except", "yaml", ".", "YAMLError", "as", "yerr", ":", "msg", "=", "'Could not parse {0} {1}'", ".", "format", "(", "definition", ",", "yerr", ")", "return", "None", ",", "None", ",", "__standardize_result", "(", "False", ",", "msg", ",", "None", ",", "None", ")", "return", "compose_result", ",", "loaded_definition", ",", "None" ]
Will load the compose file located at path Then determines the format/contents of the sent definition err or results are only set if there were any :param path: :param definition: :return tuple(compose_result, loaded_definition, err):
[ "Will", "load", "the", "compose", "file", "located", "at", "path", "Then", "determines", "the", "format", "/", "contents", "of", "the", "sent", "definition" ]
python
train
SUNCAT-Center/CatHub
cathub/ase_tools/gas_phase_references.py
https://github.com/SUNCAT-Center/CatHub/blob/324625d1d8e740673f139658b2de4c9e1059739e/cathub/ase_tools/gas_phase_references.py#L8-L25
def molecules2symbols(molecules, add_hydrogen=True): """Take a list of molecules and return just a list of atomic symbols, possibly adding hydrogen """ symbols = sorted( list(set( ase.symbols.string2symbols(''.join( map( lambda _x: ''.join(ase.symbols.string2symbols(_x)), molecules) )) )), key=lambda _y: ase.data.atomic_numbers[_y]) if add_hydrogen and 'H' not in symbols: symbols.insert(0, 'H') return symbols
[ "def", "molecules2symbols", "(", "molecules", ",", "add_hydrogen", "=", "True", ")", ":", "symbols", "=", "sorted", "(", "list", "(", "set", "(", "ase", ".", "symbols", ".", "string2symbols", "(", "''", ".", "join", "(", "map", "(", "lambda", "_x", ":", "''", ".", "join", "(", "ase", ".", "symbols", ".", "string2symbols", "(", "_x", ")", ")", ",", "molecules", ")", ")", ")", ")", ")", ",", "key", "=", "lambda", "_y", ":", "ase", ".", "data", ".", "atomic_numbers", "[", "_y", "]", ")", "if", "add_hydrogen", "and", "'H'", "not", "in", "symbols", ":", "symbols", ".", "insert", "(", "0", ",", "'H'", ")", "return", "symbols" ]
Take a list of molecules and return just a list of atomic symbols, possibly adding hydrogen
[ "Take", "a", "list", "of", "molecules", "and", "return", "just", "a", "list", "of", "atomic", "symbols", "possibly", "adding", "hydrogen" ]
python
train
mjirik/io3d
io3d/dili.py
https://github.com/mjirik/io3d/blob/ccaf3e378dcc967f2565d477fc27583fd0f61fcc/io3d/dili.py#L104-L121
def recursive_update(d, u): """ Dict recursive update. Based on Alex Martelli code on stackoverflow http://stackoverflow.com/questions/3232943/update-value-of-a-nested-dictionary-of-varying-depth?answertab=votes#tab-top :param d: dict to update :param u: dict with new data :return: """ for k, v in u.iteritems(): if isinstance(v, collections.Mapping): r = recursive_update(d.get(k, {}), v) d[k] = r else: d[k] = u[k] return d
[ "def", "recursive_update", "(", "d", ",", "u", ")", ":", "for", "k", ",", "v", "in", "u", ".", "iteritems", "(", ")", ":", "if", "isinstance", "(", "v", ",", "collections", ".", "Mapping", ")", ":", "r", "=", "recursive_update", "(", "d", ".", "get", "(", "k", ",", "{", "}", ")", ",", "v", ")", "d", "[", "k", "]", "=", "r", "else", ":", "d", "[", "k", "]", "=", "u", "[", "k", "]", "return", "d" ]
Dict recursive update. Based on Alex Martelli code on stackoverflow http://stackoverflow.com/questions/3232943/update-value-of-a-nested-dictionary-of-varying-depth?answertab=votes#tab-top :param d: dict to update :param u: dict with new data :return:
[ "Dict", "recursive", "update", "." ]
python
train
seanbrant/pykss
pykss/parser.py
https://github.com/seanbrant/pykss/blob/c01b0f97c3ebfff32b4bdb3e6e2e1616c5c4ea3e/pykss/parser.py#L29-L36
def find_files(self): '''Find files in `paths` which match valid extensions''' for path in self.paths: for subpath, dirs, files in os.walk(path): for filename in files: (name, ext) = os.path.splitext(filename) if ext in self.extensions: yield os.path.join(subpath, filename)
[ "def", "find_files", "(", "self", ")", ":", "for", "path", "in", "self", ".", "paths", ":", "for", "subpath", ",", "dirs", ",", "files", "in", "os", ".", "walk", "(", "path", ")", ":", "for", "filename", "in", "files", ":", "(", "name", ",", "ext", ")", "=", "os", ".", "path", ".", "splitext", "(", "filename", ")", "if", "ext", "in", "self", ".", "extensions", ":", "yield", "os", ".", "path", ".", "join", "(", "subpath", ",", "filename", ")" ]
Find files in `paths` which match valid extensions
[ "Find", "files", "in", "paths", "which", "match", "valid", "extensions" ]
python
train
jut-io/jut-python-tools
jut/api/deployments.py
https://github.com/jut-io/jut-python-tools/blob/65574d23f51a7bbced9bb25010d02da5ca5d906f/jut/api/deployments.py#L60-L82
def get_deployment_id(deployment_name, token_manager=None, app_url=defaults.APP_URL): """ return the deployment id for the deployment with the specified name """ headers = token_manager.get_access_token_headers() deployment_url = environment.get_deployment_url(app_url=app_url) response = requests.get('%s/api/v1/deployments' % deployment_url, headers=headers) if response.status_code == 200: deployments = response.json() for deployment in deployments: if deployment['name'] == deployment_name: return deployment['deployment_id'] raise JutException('Unable to find deployment with name %s' % deployment_name) else: raise JutException('Error %s: %s' % (response.status_code, response.text))
[ "def", "get_deployment_id", "(", "deployment_name", ",", "token_manager", "=", "None", ",", "app_url", "=", "defaults", ".", "APP_URL", ")", ":", "headers", "=", "token_manager", ".", "get_access_token_headers", "(", ")", "deployment_url", "=", "environment", ".", "get_deployment_url", "(", "app_url", "=", "app_url", ")", "response", "=", "requests", ".", "get", "(", "'%s/api/v1/deployments'", "%", "deployment_url", ",", "headers", "=", "headers", ")", "if", "response", ".", "status_code", "==", "200", ":", "deployments", "=", "response", ".", "json", "(", ")", "for", "deployment", "in", "deployments", ":", "if", "deployment", "[", "'name'", "]", "==", "deployment_name", ":", "return", "deployment", "[", "'deployment_id'", "]", "raise", "JutException", "(", "'Unable to find deployment with name %s'", "%", "deployment_name", ")", "else", ":", "raise", "JutException", "(", "'Error %s: %s'", "%", "(", "response", ".", "status_code", ",", "response", ".", "text", ")", ")" ]
return the deployment id for the deployment with the specified name
[ "return", "the", "deployment", "id", "for", "the", "deployment", "with", "the", "specified", "name" ]
python
train
Azure/azure-sdk-for-python
azure-mgmt-resource/azure/mgmt/resource/policy/policy_client.py
https://github.com/Azure/azure-sdk-for-python/blob/d7306fde32f60a293a7567678692bdad31e4b667/azure-mgmt-resource/azure/mgmt/resource/policy/policy_client.py#L127-L152
def policy_assignments(self): """Instance depends on the API version: * 2015-10-01-preview: :class:`PolicyAssignmentsOperations<azure.mgmt.resource.policy.v2015_10_01_preview.operations.PolicyAssignmentsOperations>` * 2016-04-01: :class:`PolicyAssignmentsOperations<azure.mgmt.resource.policy.v2016_04_01.operations.PolicyAssignmentsOperations>` * 2016-12-01: :class:`PolicyAssignmentsOperations<azure.mgmt.resource.policy.v2016_12_01.operations.PolicyAssignmentsOperations>` * 2017-06-01-preview: :class:`PolicyAssignmentsOperations<azure.mgmt.resource.policy.v2017_06_01_preview.operations.PolicyAssignmentsOperations>` * 2018-03-01: :class:`PolicyAssignmentsOperations<azure.mgmt.resource.policy.v2018_03_01.operations.PolicyAssignmentsOperations>` * 2018-05-01: :class:`PolicyAssignmentsOperations<azure.mgmt.resource.policy.v2018_05_01.operations.PolicyAssignmentsOperations>` """ api_version = self._get_api_version('policy_assignments') if api_version == '2015-10-01-preview': from .v2015_10_01_preview.operations import PolicyAssignmentsOperations as OperationClass elif api_version == '2016-04-01': from .v2016_04_01.operations import PolicyAssignmentsOperations as OperationClass elif api_version == '2016-12-01': from .v2016_12_01.operations import PolicyAssignmentsOperations as OperationClass elif api_version == '2017-06-01-preview': from .v2017_06_01_preview.operations import PolicyAssignmentsOperations as OperationClass elif api_version == '2018-03-01': from .v2018_03_01.operations import PolicyAssignmentsOperations as OperationClass elif api_version == '2018-05-01': from .v2018_05_01.operations import PolicyAssignmentsOperations as OperationClass else: raise NotImplementedError("APIVersion {} is not available".format(api_version)) return OperationClass(self._client, self.config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
[ "def", "policy_assignments", "(", "self", ")", ":", "api_version", "=", "self", ".", "_get_api_version", "(", "'policy_assignments'", ")", "if", "api_version", "==", "'2015-10-01-preview'", ":", "from", ".", "v2015_10_01_preview", ".", "operations", "import", "PolicyAssignmentsOperations", "as", "OperationClass", "elif", "api_version", "==", "'2016-04-01'", ":", "from", ".", "v2016_04_01", ".", "operations", "import", "PolicyAssignmentsOperations", "as", "OperationClass", "elif", "api_version", "==", "'2016-12-01'", ":", "from", ".", "v2016_12_01", ".", "operations", "import", "PolicyAssignmentsOperations", "as", "OperationClass", "elif", "api_version", "==", "'2017-06-01-preview'", ":", "from", ".", "v2017_06_01_preview", ".", "operations", "import", "PolicyAssignmentsOperations", "as", "OperationClass", "elif", "api_version", "==", "'2018-03-01'", ":", "from", ".", "v2018_03_01", ".", "operations", "import", "PolicyAssignmentsOperations", "as", "OperationClass", "elif", "api_version", "==", "'2018-05-01'", ":", "from", ".", "v2018_05_01", ".", "operations", "import", "PolicyAssignmentsOperations", "as", "OperationClass", "else", ":", "raise", "NotImplementedError", "(", "\"APIVersion {} is not available\"", ".", "format", "(", "api_version", ")", ")", "return", "OperationClass", "(", "self", ".", "_client", ",", "self", ".", "config", ",", "Serializer", "(", "self", ".", "_models_dict", "(", "api_version", ")", ")", ",", "Deserializer", "(", "self", ".", "_models_dict", "(", "api_version", ")", ")", ")" ]
Instance depends on the API version: * 2015-10-01-preview: :class:`PolicyAssignmentsOperations<azure.mgmt.resource.policy.v2015_10_01_preview.operations.PolicyAssignmentsOperations>` * 2016-04-01: :class:`PolicyAssignmentsOperations<azure.mgmt.resource.policy.v2016_04_01.operations.PolicyAssignmentsOperations>` * 2016-12-01: :class:`PolicyAssignmentsOperations<azure.mgmt.resource.policy.v2016_12_01.operations.PolicyAssignmentsOperations>` * 2017-06-01-preview: :class:`PolicyAssignmentsOperations<azure.mgmt.resource.policy.v2017_06_01_preview.operations.PolicyAssignmentsOperations>` * 2018-03-01: :class:`PolicyAssignmentsOperations<azure.mgmt.resource.policy.v2018_03_01.operations.PolicyAssignmentsOperations>` * 2018-05-01: :class:`PolicyAssignmentsOperations<azure.mgmt.resource.policy.v2018_05_01.operations.PolicyAssignmentsOperations>`
[ "Instance", "depends", "on", "the", "API", "version", ":" ]
python
test
TeleSign/python_telesign
telesign/rest.py
https://github.com/TeleSign/python_telesign/blob/f0c2e4373dc8d685e1a7d65444b5e55955c340cb/telesign/rest.py#L164-L172
def get(self, resource, **params): """ Generic TeleSign REST API GET handler. :param resource: The partial resource URI to perform the request against, as a string. :param params: Body params to perform the GET request with, as a dictionary. :return: The RestClient Response object. """ return self._execute(self.session.get, 'GET', resource, **params)
[ "def", "get", "(", "self", ",", "resource", ",", "*", "*", "params", ")", ":", "return", "self", ".", "_execute", "(", "self", ".", "session", ".", "get", ",", "'GET'", ",", "resource", ",", "*", "*", "params", ")" ]
Generic TeleSign REST API GET handler. :param resource: The partial resource URI to perform the request against, as a string. :param params: Body params to perform the GET request with, as a dictionary. :return: The RestClient Response object.
[ "Generic", "TeleSign", "REST", "API", "GET", "handler", "." ]
python
train