repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
mardix/Mocha
mocha/core.py
https://github.com/mardix/Mocha/blob/bce481cb31a0972061dd99bc548701411dcb9de3/mocha/core.py#L224-L264
def page_attr(title=None, **kwargs): """ Page Attr allows you to add page meta data in the request `g` context :params **kwargs: meta keys we're expecting: title (str) description (str) url (str) (Will pick it up by itself if not set) image (str) site_name (str) (but can pick it up from config file) object_type (str) keywords (list) locale (str) card (str) **Boolean By default these keys are True use_opengraph use_twitter use_googleplus python """ default = dict( title="", description="", url="", image="", site_name="", object_type="article", locale="", keywords=[], use_opengraph=True, use_googleplus=True, use_twitter=True, properties={} ) meta = getattr(g, "__META__", default) if title: kwargs["title"] = title meta.update(**kwargs) setattr(g, "__META__", meta)
[ "def", "page_attr", "(", "title", "=", "None", ",", "*", "*", "kwargs", ")", ":", "default", "=", "dict", "(", "title", "=", "\"\"", ",", "description", "=", "\"\"", ",", "url", "=", "\"\"", ",", "image", "=", "\"\"", ",", "site_name", "=", "\"\"", ",", "object_type", "=", "\"article\"", ",", "locale", "=", "\"\"", ",", "keywords", "=", "[", "]", ",", "use_opengraph", "=", "True", ",", "use_googleplus", "=", "True", ",", "use_twitter", "=", "True", ",", "properties", "=", "{", "}", ")", "meta", "=", "getattr", "(", "g", ",", "\"__META__\"", ",", "default", ")", "if", "title", ":", "kwargs", "[", "\"title\"", "]", "=", "title", "meta", ".", "update", "(", "*", "*", "kwargs", ")", "setattr", "(", "g", ",", "\"__META__\"", ",", "meta", ")" ]
Page Attr allows you to add page meta data in the request `g` context :params **kwargs: meta keys we're expecting: title (str) description (str) url (str) (Will pick it up by itself if not set) image (str) site_name (str) (but can pick it up from config file) object_type (str) keywords (list) locale (str) card (str) **Boolean By default these keys are True use_opengraph use_twitter use_googleplus python
[ "Page", "Attr", "allows", "you", "to", "add", "page", "meta", "data", "in", "the", "request", "g", "context", ":", "params", "**", "kwargs", ":" ]
python
train
24.02439
brocade/pynos
pynos/versions/ver_7/ver_7_1_0/yang/brocade_vcs.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_7/ver_7_1_0/yang/brocade_vcs.py#L764-L776
def vcs_virtual_ipv6_address_ipv6address(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") vcs = ET.SubElement(config, "vcs", xmlns="urn:brocade.com:mgmt:brocade-vcs") virtual = ET.SubElement(vcs, "virtual") ipv6 = ET.SubElement(virtual, "ipv6") address = ET.SubElement(ipv6, "address") ipv6address = ET.SubElement(address, "ipv6address") ipv6address.text = kwargs.pop('ipv6address') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "vcs_virtual_ipv6_address_ipv6address", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "vcs", "=", "ET", ".", "SubElement", "(", "config", ",", "\"vcs\"", ",", "xmlns", "=", "\"urn:brocade.com:mgmt:brocade-vcs\"", ")", "virtual", "=", "ET", ".", "SubElement", "(", "vcs", ",", "\"virtual\"", ")", "ipv6", "=", "ET", ".", "SubElement", "(", "virtual", ",", "\"ipv6\"", ")", "address", "=", "ET", ".", "SubElement", "(", "ipv6", ",", "\"address\"", ")", "ipv6address", "=", "ET", ".", "SubElement", "(", "address", ",", "\"ipv6address\"", ")", "ipv6address", ".", "text", "=", "kwargs", ".", "pop", "(", "'ipv6address'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
42.923077
dslackw/slpkg
slpkg/pkg/manager.py
https://github.com/dslackw/slpkg/blob/dd2e08a80e944d337d157b992167ba631a4343de/slpkg/pkg/manager.py#L270-L280
def _removepkg(self, package): """removepkg Slackware command """ try: subprocess.call("removepkg {0} {1}".format(self.flag, package), shell=True) if os.path.isfile(self.dep_path + package): os.remove(self.dep_path + package) # remove log except subprocess.CalledProcessError as er: print(er) raise SystemExit()
[ "def", "_removepkg", "(", "self", ",", "package", ")", ":", "try", ":", "subprocess", ".", "call", "(", "\"removepkg {0} {1}\"", ".", "format", "(", "self", ".", "flag", ",", "package", ")", ",", "shell", "=", "True", ")", "if", "os", ".", "path", ".", "isfile", "(", "self", ".", "dep_path", "+", "package", ")", ":", "os", ".", "remove", "(", "self", ".", "dep_path", "+", "package", ")", "# remove log", "except", "subprocess", ".", "CalledProcessError", "as", "er", ":", "print", "(", "er", ")", "raise", "SystemExit", "(", ")" ]
removepkg Slackware command
[ "removepkg", "Slackware", "command" ]
python
train
38.727273
kstaniek/condoor
condoor/actions.py
https://github.com/kstaniek/condoor/blob/77c054b29d4e286c1d7aca2c74dff86b805e1fae/condoor/actions.py#L210-L214
def a_message_callback(ctx): """Message the captured pattern.""" message = ctx.ctrl.after.strip().splitlines()[-1] ctx.device.chain.connection.emit_message(message, log_level=logging.INFO) return True
[ "def", "a_message_callback", "(", "ctx", ")", ":", "message", "=", "ctx", ".", "ctrl", ".", "after", ".", "strip", "(", ")", ".", "splitlines", "(", ")", "[", "-", "1", "]", "ctx", ".", "device", ".", "chain", ".", "connection", ".", "emit_message", "(", "message", ",", "log_level", "=", "logging", ".", "INFO", ")", "return", "True" ]
Message the captured pattern.
[ "Message", "the", "captured", "pattern", "." ]
python
train
42.4
dagster-io/dagster
bin/publish.py
https://github.com/dagster-io/dagster/blob/4119f8c773089de64831b1dfb9e168e353d401dc/bin/publish.py#L50-L63
def construct_publish_comands(additional_steps=None, nightly=False): '''Get the shell commands we'll use to actually build and publish a package to PyPI.''' publish_commands = ( ['rm -rf dist'] + (additional_steps if additional_steps else []) + [ 'python setup.py sdist bdist_wheel{nightly}'.format( nightly=' --nightly' if nightly else '' ), 'twine upload dist/*', ] ) return publish_commands
[ "def", "construct_publish_comands", "(", "additional_steps", "=", "None", ",", "nightly", "=", "False", ")", ":", "publish_commands", "=", "(", "[", "'rm -rf dist'", "]", "+", "(", "additional_steps", "if", "additional_steps", "else", "[", "]", ")", "+", "[", "'python setup.py sdist bdist_wheel{nightly}'", ".", "format", "(", "nightly", "=", "' --nightly'", "if", "nightly", "else", "''", ")", ",", "'twine upload dist/*'", ",", "]", ")", "return", "publish_commands" ]
Get the shell commands we'll use to actually build and publish a package to PyPI.
[ "Get", "the", "shell", "commands", "we", "ll", "use", "to", "actually", "build", "and", "publish", "a", "package", "to", "PyPI", "." ]
python
test
34.357143
log2timeline/plaso
plaso/cli/psteal_tool.py
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/cli/psteal_tool.py#L340-L423
def ParseArguments(self): """Parses the command line arguments. Returns: bool: True if the arguments were successfully parsed. """ loggers.ConfigureLogging() argument_parser = argparse.ArgumentParser( description=self.DESCRIPTION, epilog=self.EPILOG, add_help=False, formatter_class=argparse.RawDescriptionHelpFormatter) self.AddBasicOptions(argument_parser) extraction_group = argument_parser.add_argument_group( 'extraction arguments') argument_helper_names = ['extraction'] helpers_manager.ArgumentHelperManager.AddCommandLineArguments( extraction_group, names=argument_helper_names) extraction_group.add_argument( '--storage_file', '--storage-file', metavar='PATH', type=str, default=None, help=( 'The path of the storage file. If not specified, one will be made ' 'in the form <timestamp>-<source>.plaso')) self.AddStorageMediaImageOptions(extraction_group) self.AddCredentialOptions(extraction_group) info_group = argument_parser.add_argument_group('informational arguments') helpers_manager.ArgumentHelperManager.AddCommandLineArguments( info_group, names=['status_view']) input_group = argument_parser.add_argument_group('input arguments') input_group.add_argument( '--source', dest='source', action='store', type=str, help='The source to process') helpers_manager.ArgumentHelperManager.AddCommandLineArguments( input_group, names=['data_location']) output_group = argument_parser.add_argument_group('output arguments') helpers_manager.ArgumentHelperManager.AddCommandLineArguments( output_group, names=['language']) self.AddTimeZoneOption(output_group) output_format_group = argument_parser.add_argument_group( 'output format arguments') helpers_manager.ArgumentHelperManager.AddCommandLineArguments( output_format_group, names=['output_modules']) processing_group = argument_parser.add_argument_group( 'processing arguments') self.AddPerformanceOptions(processing_group) self.AddProcessingOptions(processing_group) try: options = argument_parser.parse_args() except UnicodeEncodeError: # If we get here we are attempting to print help in a non-Unicode # terminal. self._output_writer.Write('\n') self._output_writer.Write(argument_parser.format_help()) return False try: self.ParseOptions(options) except errors.BadConfigOption as exception: self._output_writer.Write('ERROR: {0!s}\n'.format(exception)) self._output_writer.Write('\n') self._output_writer.Write(argument_parser.format_usage()) return False loggers.ConfigureLogging( debug_output=self._debug_mode, filename=self._log_file, quiet_mode=self._quiet_mode) return True
[ "def", "ParseArguments", "(", "self", ")", ":", "loggers", ".", "ConfigureLogging", "(", ")", "argument_parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "self", ".", "DESCRIPTION", ",", "epilog", "=", "self", ".", "EPILOG", ",", "add_help", "=", "False", ",", "formatter_class", "=", "argparse", ".", "RawDescriptionHelpFormatter", ")", "self", ".", "AddBasicOptions", "(", "argument_parser", ")", "extraction_group", "=", "argument_parser", ".", "add_argument_group", "(", "'extraction arguments'", ")", "argument_helper_names", "=", "[", "'extraction'", "]", "helpers_manager", ".", "ArgumentHelperManager", ".", "AddCommandLineArguments", "(", "extraction_group", ",", "names", "=", "argument_helper_names", ")", "extraction_group", ".", "add_argument", "(", "'--storage_file'", ",", "'--storage-file'", ",", "metavar", "=", "'PATH'", ",", "type", "=", "str", ",", "default", "=", "None", ",", "help", "=", "(", "'The path of the storage file. If not specified, one will be made '", "'in the form <timestamp>-<source>.plaso'", ")", ")", "self", ".", "AddStorageMediaImageOptions", "(", "extraction_group", ")", "self", ".", "AddCredentialOptions", "(", "extraction_group", ")", "info_group", "=", "argument_parser", ".", "add_argument_group", "(", "'informational arguments'", ")", "helpers_manager", ".", "ArgumentHelperManager", ".", "AddCommandLineArguments", "(", "info_group", ",", "names", "=", "[", "'status_view'", "]", ")", "input_group", "=", "argument_parser", ".", "add_argument_group", "(", "'input arguments'", ")", "input_group", ".", "add_argument", "(", "'--source'", ",", "dest", "=", "'source'", ",", "action", "=", "'store'", ",", "type", "=", "str", ",", "help", "=", "'The source to process'", ")", "helpers_manager", ".", "ArgumentHelperManager", ".", "AddCommandLineArguments", "(", "input_group", ",", "names", "=", "[", "'data_location'", "]", ")", "output_group", "=", "argument_parser", ".", "add_argument_group", "(", "'output arguments'", ")", "helpers_manager", ".", "ArgumentHelperManager", ".", "AddCommandLineArguments", "(", "output_group", ",", "names", "=", "[", "'language'", "]", ")", "self", ".", "AddTimeZoneOption", "(", "output_group", ")", "output_format_group", "=", "argument_parser", ".", "add_argument_group", "(", "'output format arguments'", ")", "helpers_manager", ".", "ArgumentHelperManager", ".", "AddCommandLineArguments", "(", "output_format_group", ",", "names", "=", "[", "'output_modules'", "]", ")", "processing_group", "=", "argument_parser", ".", "add_argument_group", "(", "'processing arguments'", ")", "self", ".", "AddPerformanceOptions", "(", "processing_group", ")", "self", ".", "AddProcessingOptions", "(", "processing_group", ")", "try", ":", "options", "=", "argument_parser", ".", "parse_args", "(", ")", "except", "UnicodeEncodeError", ":", "# If we get here we are attempting to print help in a non-Unicode", "# terminal.", "self", ".", "_output_writer", ".", "Write", "(", "'\\n'", ")", "self", ".", "_output_writer", ".", "Write", "(", "argument_parser", ".", "format_help", "(", ")", ")", "return", "False", "try", ":", "self", ".", "ParseOptions", "(", "options", ")", "except", "errors", ".", "BadConfigOption", "as", "exception", ":", "self", ".", "_output_writer", ".", "Write", "(", "'ERROR: {0!s}\\n'", ".", "format", "(", "exception", ")", ")", "self", ".", "_output_writer", ".", "Write", "(", "'\\n'", ")", "self", ".", "_output_writer", ".", "Write", "(", "argument_parser", ".", "format_usage", "(", ")", ")", "return", "False", "loggers", ".", "ConfigureLogging", "(", "debug_output", "=", "self", ".", "_debug_mode", ",", "filename", "=", "self", ".", "_log_file", ",", "quiet_mode", "=", "self", ".", "_quiet_mode", ")", "return", "True" ]
Parses the command line arguments. Returns: bool: True if the arguments were successfully parsed.
[ "Parses", "the", "command", "line", "arguments", "." ]
python
train
33.642857
radjkarl/fancyTools
fancytools/math/blockshaped.py
https://github.com/radjkarl/fancyTools/blob/4c4d961003dc4ed6e46429a0c24f7e2bb52caa8b/fancytools/math/blockshaped.py#L12-L23
def blockshaped(arr, nrows, ncols): """ Return an new array of shape (n, nrows, ncols) where n * nrows * ncols = arr.size If arr is a 2D array, the returned array looks like n subblocks with each subblock preserving the "physical" layout of arr. """ h, w = arr.shape return (arr.reshape(h // nrows, nrows, -1, ncols) .swapaxes(1, 2) .reshape(-1, nrows, ncols))
[ "def", "blockshaped", "(", "arr", ",", "nrows", ",", "ncols", ")", ":", "h", ",", "w", "=", "arr", ".", "shape", "return", "(", "arr", ".", "reshape", "(", "h", "//", "nrows", ",", "nrows", ",", "-", "1", ",", "ncols", ")", ".", "swapaxes", "(", "1", ",", "2", ")", ".", "reshape", "(", "-", "1", ",", "nrows", ",", "ncols", ")", ")" ]
Return an new array of shape (n, nrows, ncols) where n * nrows * ncols = arr.size If arr is a 2D array, the returned array looks like n subblocks with each subblock preserving the "physical" layout of arr.
[ "Return", "an", "new", "array", "of", "shape", "(", "n", "nrows", "ncols", ")", "where", "n", "*", "nrows", "*", "ncols", "=", "arr", ".", "size" ]
python
train
34.333333
androguard/androguard
androguard/core/bytecodes/axml/__init__.py
https://github.com/androguard/androguard/blob/984c0d981be2950cf0451e484f7b0d4d53bc4911/androguard/core/bytecodes/axml/__init__.py#L1826-L1840
def get_resolved_res_configs(self, rid, config=None): """ Return a list of resolved resource IDs with their corresponding configuration. It has a similar return type as :meth:`get_res_configs` but also handles complex entries and references. Also instead of returning :class:`ARSCResTableEntry` in the tuple, the actual values are resolved. This is the preferred way of resolving resource IDs to their resources. :param int rid: the numerical ID of the resource :param ARSCTableResConfig config: the desired configuration or None to retrieve all :return: A list of tuples of (ARSCResTableConfig, str) """ resolver = ARSCParser.ResourceResolver(self, config) return resolver.resolve(rid)
[ "def", "get_resolved_res_configs", "(", "self", ",", "rid", ",", "config", "=", "None", ")", ":", "resolver", "=", "ARSCParser", ".", "ResourceResolver", "(", "self", ",", "config", ")", "return", "resolver", ".", "resolve", "(", "rid", ")" ]
Return a list of resolved resource IDs with their corresponding configuration. It has a similar return type as :meth:`get_res_configs` but also handles complex entries and references. Also instead of returning :class:`ARSCResTableEntry` in the tuple, the actual values are resolved. This is the preferred way of resolving resource IDs to their resources. :param int rid: the numerical ID of the resource :param ARSCTableResConfig config: the desired configuration or None to retrieve all :return: A list of tuples of (ARSCResTableConfig, str)
[ "Return", "a", "list", "of", "resolved", "resource", "IDs", "with", "their", "corresponding", "configuration", ".", "It", "has", "a", "similar", "return", "type", "as", ":", "meth", ":", "get_res_configs", "but", "also", "handles", "complex", "entries", "and", "references", ".", "Also", "instead", "of", "returning", ":", "class", ":", "ARSCResTableEntry", "in", "the", "tuple", "the", "actual", "values", "are", "resolved", "." ]
python
train
51.333333
dossier/dossier.store
dossier/store/elastic.py
https://github.com/dossier/dossier.store/blob/b22ffe2470bba9fcc98a30cb55b437bfa1521e7f/dossier/store/elastic.py#L241-L253
def delete_all(self): '''Deletes all feature collections. This does not destroy the ES index, but instead only deletes all FCs with the configured document type (defaults to ``fc``). ''' try: self.conn.indices.delete_mapping( index=self.index, doc_type=self.type) except TransportError: logger.warn('type %r in index %r already deleted', self.index, self.type, exc_info=True)
[ "def", "delete_all", "(", "self", ")", ":", "try", ":", "self", ".", "conn", ".", "indices", ".", "delete_mapping", "(", "index", "=", "self", ".", "index", ",", "doc_type", "=", "self", ".", "type", ")", "except", "TransportError", ":", "logger", ".", "warn", "(", "'type %r in index %r already deleted'", ",", "self", ".", "index", ",", "self", ".", "type", ",", "exc_info", "=", "True", ")" ]
Deletes all feature collections. This does not destroy the ES index, but instead only deletes all FCs with the configured document type (defaults to ``fc``).
[ "Deletes", "all", "feature", "collections", "." ]
python
test
37.230769
BoGoEngine/bogo-python
bogo/core.py
https://github.com/BoGoEngine/bogo-python/blob/9b85329a408ded4cead3539cecba12984d5d7650/bogo/core.py#L365-L434
def _transform(comps, trans): """ Transform the given string with transform type trans """ logging.debug("== In _transform(%s, %s) ==", comps, trans) components = list(comps) action, parameter = _get_action(trans) if action == _Action.ADD_MARK and \ components[2] == "" and \ mark.strip(components[1]).lower() in ['oe', 'oa'] and trans == "o^": action, parameter = _Action.ADD_CHAR, trans[0] if action == _Action.ADD_ACCENT: logging.debug("add_accent(%s, %s)", components, parameter) components = accent.add_accent(components, parameter) elif action == _Action.ADD_MARK and mark.is_valid_mark(components, trans): logging.debug("add_mark(%s, %s)", components, parameter) components = mark.add_mark(components, parameter) # Handle uơ in "huơ", "thuở", "quở" # If the current word has no last consonant and the first consonant # is one of "h", "th" and the vowel is "ươ" then change the vowel into # "uơ", keeping case and accent. If an alphabet character is then added # into the word then change back to "ươ". # # NOTE: In the dictionary, these are the only words having this strange # vowel so we don't need to worry about other cases. if accent.remove_accent_string(components[1]).lower() == "ươ" and \ not components[2] and components[0].lower() in ["", "h", "th", "kh"]: # Backup accents ac = accent.get_accent_string(components[1]) components[1] = ("u", "U")[components[1][0].isupper()] + components[1][1] components = accent.add_accent(components, ac) elif action == _Action.ADD_CHAR: if trans[0] == "<": if not components[2]: # Only allow ư, ơ or ươ sitting alone in the middle part # and ['g', 'i', '']. If we want to type giowf = 'giờ', separate() # will create ['g', 'i', '']. Therefore we have to allow # components[1] == 'i'. if (components[0].lower(), components[1].lower()) == ('g', 'i'): components[0] += components[1] components[1] = '' if not components[1] or \ (components[1].lower(), trans[1].lower()) == ('ư', 'ơ'): components[1] += trans[1] else: components = utils.append_comps(components, parameter) if parameter.isalpha() and \ accent.remove_accent_string(components[1]).lower().startswith("uơ"): ac = accent.get_accent_string(components[1]) components[1] = ('ư', 'Ư')[components[1][0].isupper()] + \ ('ơ', 'Ơ')[components[1][1].isupper()] + components[1][2:] components = accent.add_accent(components, ac) elif action == _Action.UNDO: components = _reverse(components, trans[1:]) if action == _Action.ADD_MARK or (action == _Action.ADD_CHAR and parameter.isalpha()): # If there is any accent, remove and reapply it # because it is likely to be misplaced in previous transformations ac = accent.get_accent_string(components[1]) if ac != accent.Accent.NONE: components = accent.add_accent(components, Accent.NONE) components = accent.add_accent(components, ac) logging.debug("After transform: %s", components) return components
[ "def", "_transform", "(", "comps", ",", "trans", ")", ":", "logging", ".", "debug", "(", "\"== In _transform(%s, %s) ==\"", ",", "comps", ",", "trans", ")", "components", "=", "list", "(", "comps", ")", "action", ",", "parameter", "=", "_get_action", "(", "trans", ")", "if", "action", "==", "_Action", ".", "ADD_MARK", "and", "components", "[", "2", "]", "==", "\"\"", "and", "mark", ".", "strip", "(", "components", "[", "1", "]", ")", ".", "lower", "(", ")", "in", "[", "'oe'", ",", "'oa'", "]", "and", "trans", "==", "\"o^\"", ":", "action", ",", "parameter", "=", "_Action", ".", "ADD_CHAR", ",", "trans", "[", "0", "]", "if", "action", "==", "_Action", ".", "ADD_ACCENT", ":", "logging", ".", "debug", "(", "\"add_accent(%s, %s)\"", ",", "components", ",", "parameter", ")", "components", "=", "accent", ".", "add_accent", "(", "components", ",", "parameter", ")", "elif", "action", "==", "_Action", ".", "ADD_MARK", "and", "mark", ".", "is_valid_mark", "(", "components", ",", "trans", ")", ":", "logging", ".", "debug", "(", "\"add_mark(%s, %s)\"", ",", "components", ",", "parameter", ")", "components", "=", "mark", ".", "add_mark", "(", "components", ",", "parameter", ")", "# Handle uơ in \"huơ\", \"thuở\", \"quở\"", "# If the current word has no last consonant and the first consonant", "# is one of \"h\", \"th\" and the vowel is \"ươ\" then change the vowel into", "# \"uơ\", keeping case and accent. If an alphabet character is then added", "# into the word then change back to \"ươ\".", "#", "# NOTE: In the dictionary, these are the only words having this strange", "# vowel so we don't need to worry about other cases.", "if", "accent", ".", "remove_accent_string", "(", "components", "[", "1", "]", ")", ".", "lower", "(", ")", "==", "\"ươ\" a", "d \\", "not", "components", "[", "2", "]", "and", "components", "[", "0", "]", ".", "lower", "(", ")", "in", "[", "\"\"", ",", "\"h\"", ",", "\"th\"", ",", "\"kh\"", "]", ":", "# Backup accents", "ac", "=", "accent", ".", "get_accent_string", "(", "components", "[", "1", "]", ")", "components", "[", "1", "]", "=", "(", "\"u\"", ",", "\"U\"", ")", "[", "components", "[", "1", "]", "[", "0", "]", ".", "isupper", "(", ")", "]", "+", "components", "[", "1", "]", "[", "1", "]", "components", "=", "accent", ".", "add_accent", "(", "components", ",", "ac", ")", "elif", "action", "==", "_Action", ".", "ADD_CHAR", ":", "if", "trans", "[", "0", "]", "==", "\"<\"", ":", "if", "not", "components", "[", "2", "]", ":", "# Only allow ư, ơ or ươ sitting alone in the middle part", "# and ['g', 'i', '']. If we want to type giowf = 'giờ', separate()", "# will create ['g', 'i', '']. Therefore we have to allow", "# components[1] == 'i'.", "if", "(", "components", "[", "0", "]", ".", "lower", "(", ")", ",", "components", "[", "1", "]", ".", "lower", "(", ")", ")", "==", "(", "'g'", ",", "'i'", ")", ":", "components", "[", "0", "]", "+=", "components", "[", "1", "]", "components", "[", "1", "]", "=", "''", "if", "not", "components", "[", "1", "]", "or", "(", "components", "[", "1", "]", ".", "lower", "(", ")", ",", "trans", "[", "1", "]", ".", "lower", "(", ")", ")", "==", "(", "'ư',", " ", "ơ'):", "", "", "components", "[", "1", "]", "+=", "trans", "[", "1", "]", "else", ":", "components", "=", "utils", ".", "append_comps", "(", "components", ",", "parameter", ")", "if", "parameter", ".", "isalpha", "(", ")", "and", "accent", ".", "remove_accent_string", "(", "components", "[", "1", "]", ")", ".", "lower", "(", ")", ".", "startswith", "(", "\"uơ\")", ":", "", "ac", "=", "accent", ".", "get_accent_string", "(", "components", "[", "1", "]", ")", "components", "[", "1", "]", "=", "(", "'ư',", " ", "Ư')[", "c", "o", "mponents[1", "]", "[", "0", "]", ".", "i", "s", "upper()", "]", " ", "+", "\\", "(", "'ơ',", " ", "Ơ')[", "c", "o", "mponents[1", "]", "[", "1", "]", ".", "i", "s", "upper()", "]", " ", "+", "c", "mponents[1", "]", "[", "2", ":", "]", "", "", "components", "=", "accent", ".", "add_accent", "(", "components", ",", "ac", ")", "elif", "action", "==", "_Action", ".", "UNDO", ":", "components", "=", "_reverse", "(", "components", ",", "trans", "[", "1", ":", "]", ")", "if", "action", "==", "_Action", ".", "ADD_MARK", "or", "(", "action", "==", "_Action", ".", "ADD_CHAR", "and", "parameter", ".", "isalpha", "(", ")", ")", ":", "# If there is any accent, remove and reapply it", "# because it is likely to be misplaced in previous transformations", "ac", "=", "accent", ".", "get_accent_string", "(", "components", "[", "1", "]", ")", "if", "ac", "!=", "accent", ".", "Accent", ".", "NONE", ":", "components", "=", "accent", ".", "add_accent", "(", "components", ",", "Accent", ".", "NONE", ")", "components", "=", "accent", ".", "add_accent", "(", "components", ",", "ac", ")", "logging", ".", "debug", "(", "\"After transform: %s\"", ",", "components", ")", "return", "components" ]
Transform the given string with transform type trans
[ "Transform", "the", "given", "string", "with", "transform", "type", "trans" ]
python
train
48.971429
saltstack/salt
salt/states/git.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/git.py#L47-L63
def _revs_equal(rev1, rev2, rev_type): ''' Shorthand helper function for comparing SHA1s. If rev_type == 'sha1' then the comparison will be done using str.startwith() to allow short SHA1s to compare successfully. NOTE: This means that rev2 must be the short rev. ''' if (rev1 is None and rev2 is not None) \ or (rev2 is None and rev1 is not None): return False elif rev1 is rev2 is None: return True elif rev_type == 'sha1': return rev1.startswith(rev2) else: return rev1 == rev2
[ "def", "_revs_equal", "(", "rev1", ",", "rev2", ",", "rev_type", ")", ":", "if", "(", "rev1", "is", "None", "and", "rev2", "is", "not", "None", ")", "or", "(", "rev2", "is", "None", "and", "rev1", "is", "not", "None", ")", ":", "return", "False", "elif", "rev1", "is", "rev2", "is", "None", ":", "return", "True", "elif", "rev_type", "==", "'sha1'", ":", "return", "rev1", ".", "startswith", "(", "rev2", ")", "else", ":", "return", "rev1", "==", "rev2" ]
Shorthand helper function for comparing SHA1s. If rev_type == 'sha1' then the comparison will be done using str.startwith() to allow short SHA1s to compare successfully. NOTE: This means that rev2 must be the short rev.
[ "Shorthand", "helper", "function", "for", "comparing", "SHA1s", ".", "If", "rev_type", "==", "sha1", "then", "the", "comparison", "will", "be", "done", "using", "str", ".", "startwith", "()", "to", "allow", "short", "SHA1s", "to", "compare", "successfully", "." ]
python
train
32.235294
rosenbrockc/ci
pyci/config.py
https://github.com/rosenbrockc/ci/blob/4d5a60291424a83124d1d962d17fb4c7718cde2b/pyci/config.py#L454-L460
def var_replace(self, text): """Replaces all instances of @VAR with their values in the specified text. """ result = text for var in self._vardict: result = result.replace("@{}".format(var), self._vardict[var]) return result
[ "def", "var_replace", "(", "self", ",", "text", ")", ":", "result", "=", "text", "for", "var", "in", "self", ".", "_vardict", ":", "result", "=", "result", ".", "replace", "(", "\"@{}\"", ".", "format", "(", "var", ")", ",", "self", ".", "_vardict", "[", "var", "]", ")", "return", "result" ]
Replaces all instances of @VAR with their values in the specified text.
[ "Replaces", "all", "instances", "of" ]
python
train
38.571429
drdoctr/doctr
doctr/travis.py
https://github.com/drdoctr/doctr/blob/0f19ff78c8239efcc98d417f36b0a31d9be01ba5/doctr/travis.py#L383-L400
def copy_to_tmp(source): """ Copies ``source`` to a temporary directory, and returns the copied location. If source is a file, the copied location is also a file. """ tmp_dir = tempfile.mkdtemp() # Use pathlib because os.path.basename is different depending on whether # the path ends in a / p = pathlib.Path(source) dirname = p.name or 'temp' new_dir = os.path.join(tmp_dir, dirname) if os.path.isdir(source): shutil.copytree(source, new_dir) else: shutil.copy2(source, new_dir) return new_dir
[ "def", "copy_to_tmp", "(", "source", ")", ":", "tmp_dir", "=", "tempfile", ".", "mkdtemp", "(", ")", "# Use pathlib because os.path.basename is different depending on whether", "# the path ends in a /", "p", "=", "pathlib", ".", "Path", "(", "source", ")", "dirname", "=", "p", ".", "name", "or", "'temp'", "new_dir", "=", "os", ".", "path", ".", "join", "(", "tmp_dir", ",", "dirname", ")", "if", "os", ".", "path", ".", "isdir", "(", "source", ")", ":", "shutil", ".", "copytree", "(", "source", ",", "new_dir", ")", "else", ":", "shutil", ".", "copy2", "(", "source", ",", "new_dir", ")", "return", "new_dir" ]
Copies ``source`` to a temporary directory, and returns the copied location. If source is a file, the copied location is also a file.
[ "Copies", "source", "to", "a", "temporary", "directory", "and", "returns", "the", "copied", "location", "." ]
python
train
30.555556
f3at/feat
src/feat/models/model.py
https://github.com/f3at/feat/blob/15da93fc9d6ec8154f52a9172824e25821195ef8/src/feat/models/model.py#L231-L250
def delete(name, *effects, **kwargs): """ Annotate a delete action to the model being defined. Should be delete(name, *effects, label=None, desc=None) but it is not supported by python < 3. @param name: item name unique for the model being defined. @type name: str or unicode @param effects: @type effects: str or unicode @param label: the action label or None. @type label: str or unicode or None @param desc: the action description or None if not documented. @type desc: str or unicode or None """ label = kwargs.pop("label", None) desc = kwargs.pop("desc", None) if kwargs: raise TypeError("delete() got an unexpected keyword '%s'" % kwargs.keys()[0]) _annotate("delete", name, effects=effects, label=label, desc=desc)
[ "def", "delete", "(", "name", ",", "*", "effects", ",", "*", "*", "kwargs", ")", ":", "label", "=", "kwargs", ".", "pop", "(", "\"label\"", ",", "None", ")", "desc", "=", "kwargs", ".", "pop", "(", "\"desc\"", ",", "None", ")", "if", "kwargs", ":", "raise", "TypeError", "(", "\"delete() got an unexpected keyword '%s'\"", "%", "kwargs", ".", "keys", "(", ")", "[", "0", "]", ")", "_annotate", "(", "\"delete\"", ",", "name", ",", "effects", "=", "effects", ",", "label", "=", "label", ",", "desc", "=", "desc", ")" ]
Annotate a delete action to the model being defined. Should be delete(name, *effects, label=None, desc=None) but it is not supported by python < 3. @param name: item name unique for the model being defined. @type name: str or unicode @param effects: @type effects: str or unicode @param label: the action label or None. @type label: str or unicode or None @param desc: the action description or None if not documented. @type desc: str or unicode or None
[ "Annotate", "a", "delete", "action", "to", "the", "model", "being", "defined", ".", "Should", "be", "delete", "(", "name", "*", "effects", "label", "=", "None", "desc", "=", "None", ")", "but", "it", "is", "not", "supported", "by", "python", "<", "3", "." ]
python
train
40.15
edoburu/django-template-analyzer
template_analyzer/djangoanalyzer.py
https://github.com/edoburu/django-template-analyzer/blob/912916dadf68e5fb6bd3dbaa8e5dcad69d3086d0/template_analyzer/djangoanalyzer.py#L100-L122
def _extend_nodelist(extends_node, context, instance_types): """ Returns a list of results found in the parent template(s) :type extends_node: ExtendsNode """ results = [] # Find all blocks in the complete inheritance chain blocks = extends_node.blocks.copy() # dict with all blocks in the current template _extend_blocks(extends_node, blocks, context) # Dive into all blocks of the page one by one all_block_names = list(blocks.keys()) for block in list(blocks.values()): results += _scan_nodes(block.nodelist, context, instance_types, block, ignore_blocks=all_block_names) # Scan topmost template for nodes that exist outside of blocks parent_template = _find_topmost_template(extends_node, context) if not parent_template: return [] else: results += _scan_nodes(parent_template.nodelist, context, instance_types, ignore_blocks=all_block_names) return results
[ "def", "_extend_nodelist", "(", "extends_node", ",", "context", ",", "instance_types", ")", ":", "results", "=", "[", "]", "# Find all blocks in the complete inheritance chain", "blocks", "=", "extends_node", ".", "blocks", ".", "copy", "(", ")", "# dict with all blocks in the current template", "_extend_blocks", "(", "extends_node", ",", "blocks", ",", "context", ")", "# Dive into all blocks of the page one by one", "all_block_names", "=", "list", "(", "blocks", ".", "keys", "(", ")", ")", "for", "block", "in", "list", "(", "blocks", ".", "values", "(", ")", ")", ":", "results", "+=", "_scan_nodes", "(", "block", ".", "nodelist", ",", "context", ",", "instance_types", ",", "block", ",", "ignore_blocks", "=", "all_block_names", ")", "# Scan topmost template for nodes that exist outside of blocks", "parent_template", "=", "_find_topmost_template", "(", "extends_node", ",", "context", ")", "if", "not", "parent_template", ":", "return", "[", "]", "else", ":", "results", "+=", "_scan_nodes", "(", "parent_template", ".", "nodelist", ",", "context", ",", "instance_types", ",", "ignore_blocks", "=", "all_block_names", ")", "return", "results" ]
Returns a list of results found in the parent template(s) :type extends_node: ExtendsNode
[ "Returns", "a", "list", "of", "results", "found", "in", "the", "parent", "template", "(", "s", ")", ":", "type", "extends_node", ":", "ExtendsNode" ]
python
train
40.652174
pandas-dev/pandas
pandas/io/pytables.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/pytables.py#L1701-L1720
def validate_col(self, itemsize=None): """ validate this column: return the compared against itemsize """ # validate this column for string truncation (or reset to the max size) if _ensure_decoded(self.kind) == 'string': c = self.col if c is not None: if itemsize is None: itemsize = self.itemsize if c.itemsize < itemsize: raise ValueError( "Trying to store a string with len [{itemsize}] in " "[{cname}] column but\nthis column has a limit of " "[{c_itemsize}]!\nConsider using min_itemsize to " "preset the sizes on these columns".format( itemsize=itemsize, cname=self.cname, c_itemsize=c.itemsize)) return c.itemsize return None
[ "def", "validate_col", "(", "self", ",", "itemsize", "=", "None", ")", ":", "# validate this column for string truncation (or reset to the max size)", "if", "_ensure_decoded", "(", "self", ".", "kind", ")", "==", "'string'", ":", "c", "=", "self", ".", "col", "if", "c", "is", "not", "None", ":", "if", "itemsize", "is", "None", ":", "itemsize", "=", "self", ".", "itemsize", "if", "c", ".", "itemsize", "<", "itemsize", ":", "raise", "ValueError", "(", "\"Trying to store a string with len [{itemsize}] in \"", "\"[{cname}] column but\\nthis column has a limit of \"", "\"[{c_itemsize}]!\\nConsider using min_itemsize to \"", "\"preset the sizes on these columns\"", ".", "format", "(", "itemsize", "=", "itemsize", ",", "cname", "=", "self", ".", "cname", ",", "c_itemsize", "=", "c", ".", "itemsize", ")", ")", "return", "c", ".", "itemsize", "return", "None" ]
validate this column: return the compared against itemsize
[ "validate", "this", "column", ":", "return", "the", "compared", "against", "itemsize" ]
python
train
45.55
havardgulldahl/jottalib
src/jottalib/JFS.py
https://github.com/havardgulldahl/jottalib/blob/4d015e4309b1d9055e561ec757363fb2632b4eb7/src/jottalib/JFS.py#L334-L342
def rename(self, newpath): "Move folder to a new name, possibly a whole new path" # POST https://www.jottacloud.com/jfs/**USERNAME**/Jotta/Sync/Ny%20mappe?mvDir=/**USERNAME**/Jotta/Sync/testFolder #url = '%s?mvDir=/%s%s' % (self.path, self.jfs.username, newpath) params = {'mvDir':'/%s%s' % (self.jfs.username, newpath)} r = self.jfs.post(self.path, extra_headers={'Content-Type':'application/octet-stream'}, params=params) return r
[ "def", "rename", "(", "self", ",", "newpath", ")", ":", "# POST https://www.jottacloud.com/jfs/**USERNAME**/Jotta/Sync/Ny%20mappe?mvDir=/**USERNAME**/Jotta/Sync/testFolder", "#url = '%s?mvDir=/%s%s' % (self.path, self.jfs.username, newpath)", "params", "=", "{", "'mvDir'", ":", "'/%s%s'", "%", "(", "self", ".", "jfs", ".", "username", ",", "newpath", ")", "}", "r", "=", "self", ".", "jfs", ".", "post", "(", "self", ".", "path", ",", "extra_headers", "=", "{", "'Content-Type'", ":", "'application/octet-stream'", "}", ",", "params", "=", "params", ")", "return", "r" ]
Move folder to a new name, possibly a whole new path
[ "Move", "folder", "to", "a", "new", "name", "possibly", "a", "whole", "new", "path" ]
python
train
58.222222
AkihikoITOH/capybara
capybara/virtualenv/lib/python2.7/site-packages/setuptools/__init__.py
https://github.com/AkihikoITOH/capybara/blob/e86c2173ea386654f4ae061148e8fbe3f25e715c/capybara/virtualenv/lib/python2.7/site-packages/setuptools/__init__.py#L76-L82
def _all_dirs(base_path): """ Return all dirs in base_path, relative to base_path """ for root, dirs, files in os.walk(base_path, followlinks=True): for dir in dirs: yield os.path.relpath(os.path.join(root, dir), base_path)
[ "def", "_all_dirs", "(", "base_path", ")", ":", "for", "root", ",", "dirs", ",", "files", "in", "os", ".", "walk", "(", "base_path", ",", "followlinks", "=", "True", ")", ":", "for", "dir", "in", "dirs", ":", "yield", "os", ".", "path", ".", "relpath", "(", "os", ".", "path", ".", "join", "(", "root", ",", "dir", ")", ",", "base_path", ")" ]
Return all dirs in base_path, relative to base_path
[ "Return", "all", "dirs", "in", "base_path", "relative", "to", "base_path" ]
python
test
39.571429
urinieto/msaf
examples/compute_features.py
https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/examples/compute_features.py#L37-L51
def process(in_path, out_file, n_jobs, framesync): """Computes the features for the selected dataset or file.""" if os.path.isfile(in_path): # Single file mode # Get (if they exitst) or compute features file_struct = msaf.io.FileStruct(in_path) file_struct.features_file = out_file compute_all_features(file_struct, framesync) else: # Collection mode file_structs = msaf.io.get_dataset_files(in_path) # Call in parallel return Parallel(n_jobs=n_jobs)(delayed(compute_all_features)( file_struct, framesync) for file_struct in file_structs)
[ "def", "process", "(", "in_path", ",", "out_file", ",", "n_jobs", ",", "framesync", ")", ":", "if", "os", ".", "path", ".", "isfile", "(", "in_path", ")", ":", "# Single file mode", "# Get (if they exitst) or compute features", "file_struct", "=", "msaf", ".", "io", ".", "FileStruct", "(", "in_path", ")", "file_struct", ".", "features_file", "=", "out_file", "compute_all_features", "(", "file_struct", ",", "framesync", ")", "else", ":", "# Collection mode", "file_structs", "=", "msaf", ".", "io", ".", "get_dataset_files", "(", "in_path", ")", "# Call in parallel", "return", "Parallel", "(", "n_jobs", "=", "n_jobs", ")", "(", "delayed", "(", "compute_all_features", ")", "(", "file_struct", ",", "framesync", ")", "for", "file_struct", "in", "file_structs", ")" ]
Computes the features for the selected dataset or file.
[ "Computes", "the", "features", "for", "the", "selected", "dataset", "or", "file", "." ]
python
test
41.4
streamlink/streamlink
src/streamlink/stream/segmented.py
https://github.com/streamlink/streamlink/blob/c8ed1daff14ac03195870238b9b900c1109dd5c1/src/streamlink/stream/segmented.py#L111-L122
def put(self, segment): """Adds a segment to the download pool and write queue.""" if self.closed: return if segment is not None: future = self.executor.submit(self.fetch, segment, retries=self.retries) else: future = None self.queue(self.futures, (segment, future))
[ "def", "put", "(", "self", ",", "segment", ")", ":", "if", "self", ".", "closed", ":", "return", "if", "segment", "is", "not", "None", ":", "future", "=", "self", ".", "executor", ".", "submit", "(", "self", ".", "fetch", ",", "segment", ",", "retries", "=", "self", ".", "retries", ")", "else", ":", "future", "=", "None", "self", ".", "queue", "(", "self", ".", "futures", ",", "(", "segment", ",", "future", ")", ")" ]
Adds a segment to the download pool and write queue.
[ "Adds", "a", "segment", "to", "the", "download", "pool", "and", "write", "queue", "." ]
python
test
31.25
boriel/zxbasic
ast_/tree.py
https://github.com/boriel/zxbasic/blob/23b28db10e41117805bdb3c0f78543590853b132/ast_/tree.py#L154-L166
def makenode(clss, symbol, *nexts): """ Stores the symbol in an AST instance, and left and right to the given ones """ result = clss(symbol) for i in nexts: if i is None: continue if not isinstance(i, clss): raise NotAnAstError(i) result.appendChild(i) return result
[ "def", "makenode", "(", "clss", ",", "symbol", ",", "*", "nexts", ")", ":", "result", "=", "clss", "(", "symbol", ")", "for", "i", "in", "nexts", ":", "if", "i", "is", "None", ":", "continue", "if", "not", "isinstance", "(", "i", ",", "clss", ")", ":", "raise", "NotAnAstError", "(", "i", ")", "result", ".", "appendChild", "(", "i", ")", "return", "result" ]
Stores the symbol in an AST instance, and left and right to the given ones
[ "Stores", "the", "symbol", "in", "an", "AST", "instance", "and", "left", "and", "right", "to", "the", "given", "ones" ]
python
train
28.538462
pgmpy/pgmpy
pgmpy/estimators/ExhaustiveSearch.py
https://github.com/pgmpy/pgmpy/blob/9381a66aba3c3871d3ccd00672b148d17d63239e/pgmpy/estimators/ExhaustiveSearch.py#L49-L99
def all_dags(self, nodes=None): """ Computes all possible directed acyclic graphs with a given set of nodes, sparse ones first. `2**(n*(n-1))` graphs need to be searched, given `n` nodes, so this is likely not feasible for n>6. This is a generator. Parameters ---------- nodes: list of nodes for the DAGs (optional) A list of the node names that the generated DAGs should have. If not provided, nodes are taken from data. Returns ------- dags: Generator object for nx.DiGraphs Generator that yields all acyclic nx.DiGraphs, ordered by number of edges. Empty DAG first. Examples -------- >>> import pandas as pd >>> from pgmpy.estimators import ExhaustiveSearch >>> s = ExhaustiveSearch(pd.DataFrame(data={'Temperature': [23, 19], 'Weather': ['sunny', 'cloudy'], 'Humidity': [65, 75]})) >>> list(s.all_dags()) [<networkx.classes.digraph.DiGraph object at 0x7f6955216438>, <networkx.classes.digraph.DiGraph object at 0x7f6955216518>, .... >>> [dag.edges() for dag in s.all_dags()] [[], [('Humidity', 'Temperature')], [('Humidity', 'Weather')], [('Temperature', 'Weather')], [('Temperature', 'Humidity')], .... [('Weather', 'Humidity'), ('Weather', 'Temperature'), ('Temperature', 'Humidity')]] """ if nodes is None: nodes = sorted(self.state_names.keys()) if len(nodes) > 6: warn("Generating all DAGs of n nodes likely not feasible for n>6!") warn("Attempting to search through {0} graphs".format(2**(len(nodes)*(len(nodes)-1)))) edges = list(combinations(nodes, 2)) # n*(n-1) possible directed edges edges.extend([(y, x) for x, y in edges]) all_graphs = powerset(edges) # 2^(n*(n-1)) graphs for graph_edges in all_graphs: graph = nx.DiGraph() graph.add_nodes_from(nodes) graph.add_edges_from(graph_edges) if nx.is_directed_acyclic_graph(graph): yield graph
[ "def", "all_dags", "(", "self", ",", "nodes", "=", "None", ")", ":", "if", "nodes", "is", "None", ":", "nodes", "=", "sorted", "(", "self", ".", "state_names", ".", "keys", "(", ")", ")", "if", "len", "(", "nodes", ")", ">", "6", ":", "warn", "(", "\"Generating all DAGs of n nodes likely not feasible for n>6!\"", ")", "warn", "(", "\"Attempting to search through {0} graphs\"", ".", "format", "(", "2", "**", "(", "len", "(", "nodes", ")", "*", "(", "len", "(", "nodes", ")", "-", "1", ")", ")", ")", ")", "edges", "=", "list", "(", "combinations", "(", "nodes", ",", "2", ")", ")", "# n*(n-1) possible directed edges", "edges", ".", "extend", "(", "[", "(", "y", ",", "x", ")", "for", "x", ",", "y", "in", "edges", "]", ")", "all_graphs", "=", "powerset", "(", "edges", ")", "# 2^(n*(n-1)) graphs", "for", "graph_edges", "in", "all_graphs", ":", "graph", "=", "nx", ".", "DiGraph", "(", ")", "graph", ".", "add_nodes_from", "(", "nodes", ")", "graph", ".", "add_edges_from", "(", "graph_edges", ")", "if", "nx", ".", "is_directed_acyclic_graph", "(", "graph", ")", ":", "yield", "graph" ]
Computes all possible directed acyclic graphs with a given set of nodes, sparse ones first. `2**(n*(n-1))` graphs need to be searched, given `n` nodes, so this is likely not feasible for n>6. This is a generator. Parameters ---------- nodes: list of nodes for the DAGs (optional) A list of the node names that the generated DAGs should have. If not provided, nodes are taken from data. Returns ------- dags: Generator object for nx.DiGraphs Generator that yields all acyclic nx.DiGraphs, ordered by number of edges. Empty DAG first. Examples -------- >>> import pandas as pd >>> from pgmpy.estimators import ExhaustiveSearch >>> s = ExhaustiveSearch(pd.DataFrame(data={'Temperature': [23, 19], 'Weather': ['sunny', 'cloudy'], 'Humidity': [65, 75]})) >>> list(s.all_dags()) [<networkx.classes.digraph.DiGraph object at 0x7f6955216438>, <networkx.classes.digraph.DiGraph object at 0x7f6955216518>, .... >>> [dag.edges() for dag in s.all_dags()] [[], [('Humidity', 'Temperature')], [('Humidity', 'Weather')], [('Temperature', 'Weather')], [('Temperature', 'Humidity')], .... [('Weather', 'Humidity'), ('Weather', 'Temperature'), ('Temperature', 'Humidity')]]
[ "Computes", "all", "possible", "directed", "acyclic", "graphs", "with", "a", "given", "set", "of", "nodes", "sparse", "ones", "first", ".", "2", "**", "(", "n", "*", "(", "n", "-", "1", "))", "graphs", "need", "to", "be", "searched", "given", "n", "nodes", "so", "this", "is", "likely", "not", "feasible", "for", "n", ">", "6", ".", "This", "is", "a", "generator", "." ]
python
train
43.176471
ray-project/ray
examples/resnet/resnet_model.py
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/examples/resnet/resnet_model.py#L49-L59
def build_graph(self): """Build a whole graph for the model.""" self.global_step = tf.Variable(0, trainable=False) self._build_model() if self.mode == "train": self._build_train_op() else: # Additional initialization for the test network. self.variables = ray.experimental.tf_utils.TensorFlowVariables( self.cost) self.summaries = tf.summary.merge_all()
[ "def", "build_graph", "(", "self", ")", ":", "self", ".", "global_step", "=", "tf", ".", "Variable", "(", "0", ",", "trainable", "=", "False", ")", "self", ".", "_build_model", "(", ")", "if", "self", ".", "mode", "==", "\"train\"", ":", "self", ".", "_build_train_op", "(", ")", "else", ":", "# Additional initialization for the test network.", "self", ".", "variables", "=", "ray", ".", "experimental", ".", "tf_utils", ".", "TensorFlowVariables", "(", "self", ".", "cost", ")", "self", ".", "summaries", "=", "tf", ".", "summary", ".", "merge_all", "(", ")" ]
Build a whole graph for the model.
[ "Build", "a", "whole", "graph", "for", "the", "model", "." ]
python
train
40.636364
Fantomas42/django-blog-zinnia
zinnia/templating.py
https://github.com/Fantomas42/django-blog-zinnia/blob/b4949304b104a8e1a7a7a0773cbfd024313c3a15/zinnia/templating.py#L7-L13
def append_position(path, position, separator=''): """ Concatenate a path and a position, between the filename and the extension. """ filename, extension = os.path.splitext(path) return ''.join([filename, separator, str(position), extension])
[ "def", "append_position", "(", "path", ",", "position", ",", "separator", "=", "''", ")", ":", "filename", ",", "extension", "=", "os", ".", "path", ".", "splitext", "(", "path", ")", "return", "''", ".", "join", "(", "[", "filename", ",", "separator", ",", "str", "(", "position", ")", ",", "extension", "]", ")" ]
Concatenate a path and a position, between the filename and the extension.
[ "Concatenate", "a", "path", "and", "a", "position", "between", "the", "filename", "and", "the", "extension", "." ]
python
train
37.142857
pyGrowler/Growler
growler/http/response.py
https://github.com/pyGrowler/Growler/blob/90c923ff204f28b86a01d741224987a22f69540f/growler/http/response.py#L328-L345
def update(self, *args, **kwargs): """ Equivalent to the python dict update method. Update the dictionary with the key/value pairs from other, overwriting existing keys. Args: other (dict): The source of key value pairs to add to headers Keyword Args: All keyword arguments are stored in header directly Returns: None """ for next_dict in chain(args, (kwargs, )): for k, v in next_dict.items(): self[k] = v
[ "def", "update", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "for", "next_dict", "in", "chain", "(", "args", ",", "(", "kwargs", ",", ")", ")", ":", "for", "k", ",", "v", "in", "next_dict", ".", "items", "(", ")", ":", "self", "[", "k", "]", "=", "v" ]
Equivalent to the python dict update method. Update the dictionary with the key/value pairs from other, overwriting existing keys. Args: other (dict): The source of key value pairs to add to headers Keyword Args: All keyword arguments are stored in header directly Returns: None
[ "Equivalent", "to", "the", "python", "dict", "update", "method", "." ]
python
train
29.333333
googledatalab/pydatalab
google/datalab/contrib/mlworkbench/_prediction_explainer.py
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/contrib/mlworkbench/_prediction_explainer.py#L334-L414
def probe_image(self, labels, instance, column_name=None, num_scaled_images=50, top_percent=10): """ Get pixel importance of the image. It performs pixel sensitivity analysis by showing only the most important pixels to a certain label in the image. It uses integrated gradients to measure the importance of each pixel. Args: labels: labels to compute gradients from. instance: the prediction instance. It needs to conform to model's input. Can be a csv line string, or a dict. img_column_name: the name of the image column to probe. If there is only one image column it can be None. num_scaled_images: Number of scaled images to get grads from. For example, if 10, the image will be scaled by 0.1, 0.2, ..., 0,9, 1.0 and it will produce 10 images for grads computation. top_percent: The percentile of pixels to show only. for example, if 10, only top 10% impactful pixels will be shown and rest of the pixels will be black. Returns: A tuple. First is the resized original image (299x299x3). Second is a list of the visualization with same size that highlights the most important pixels, one per each label. """ if len(self._image_columns) > 1 and not column_name: raise ValueError('There are multiple image columns in the input of the model. ' + 'Please specify "column_name".') elif column_name and column_name not in self._image_columns: raise ValueError('Specified column_name "%s" not found in the model input.' % column_name) image_column_name = column_name if column_name else self._image_columns[0] if isinstance(instance, six.string_types): instance = next(csv.DictReader([instance], fieldnames=self._headers)) image_path = instance[image_column_name] with file_io.FileIO(image_path, 'rb') as fi: im = Image.open(fi) resized_image = im.resize((299, 299)) # Produce a list of scaled images, create instances (csv lines) from these images. step = 1. / num_scaled_images scales = np.arange(0.0, 1.0, step) + step csv_lines = [] for s in scales: pixels = (np.asarray(resized_image) * s).astype('uint8') scaled_image = Image.fromarray(pixels) buf = io.BytesIO() scaled_image.save(buf, "JPEG") encoded_image = base64.urlsafe_b64encode(buf.getvalue()).decode('ascii') instance_copy = dict(instance) instance_copy[image_column_name] = encoded_image buf = six.StringIO() writer = csv.DictWriter(buf, fieldnames=self._headers, lineterminator='') writer.writerow(instance_copy) csv_lines.append(buf.getvalue()) integrated_gradients_images = [] for label in labels: # Send to tf model to get gradients. grads = self._image_gradients(csv_lines, label, image_column_name) integrated_grads = resized_image * np.average(grads, axis=0) # Gray scale the grads by removing color dimension. # abs() is for getting the most impactful pixels regardless positive or negative. grayed = np.average(abs(integrated_grads), axis=2) grayed = np.transpose([grayed, grayed, grayed], axes=[1, 2, 0]) # Only show the most impactful pixels. p = np.percentile(grayed, 100 - top_percent) viz_window = np.where(grayed > p, 1, 0) vis = resized_image * viz_window im_vis = Image.fromarray(np.uint8(vis)) integrated_gradients_images.append(im_vis) return resized_image, integrated_gradients_images
[ "def", "probe_image", "(", "self", ",", "labels", ",", "instance", ",", "column_name", "=", "None", ",", "num_scaled_images", "=", "50", ",", "top_percent", "=", "10", ")", ":", "if", "len", "(", "self", ".", "_image_columns", ")", ">", "1", "and", "not", "column_name", ":", "raise", "ValueError", "(", "'There are multiple image columns in the input of the model. '", "+", "'Please specify \"column_name\".'", ")", "elif", "column_name", "and", "column_name", "not", "in", "self", ".", "_image_columns", ":", "raise", "ValueError", "(", "'Specified column_name \"%s\" not found in the model input.'", "%", "column_name", ")", "image_column_name", "=", "column_name", "if", "column_name", "else", "self", ".", "_image_columns", "[", "0", "]", "if", "isinstance", "(", "instance", ",", "six", ".", "string_types", ")", ":", "instance", "=", "next", "(", "csv", ".", "DictReader", "(", "[", "instance", "]", ",", "fieldnames", "=", "self", ".", "_headers", ")", ")", "image_path", "=", "instance", "[", "image_column_name", "]", "with", "file_io", ".", "FileIO", "(", "image_path", ",", "'rb'", ")", "as", "fi", ":", "im", "=", "Image", ".", "open", "(", "fi", ")", "resized_image", "=", "im", ".", "resize", "(", "(", "299", ",", "299", ")", ")", "# Produce a list of scaled images, create instances (csv lines) from these images.", "step", "=", "1.", "/", "num_scaled_images", "scales", "=", "np", ".", "arange", "(", "0.0", ",", "1.0", ",", "step", ")", "+", "step", "csv_lines", "=", "[", "]", "for", "s", "in", "scales", ":", "pixels", "=", "(", "np", ".", "asarray", "(", "resized_image", ")", "*", "s", ")", ".", "astype", "(", "'uint8'", ")", "scaled_image", "=", "Image", ".", "fromarray", "(", "pixels", ")", "buf", "=", "io", ".", "BytesIO", "(", ")", "scaled_image", ".", "save", "(", "buf", ",", "\"JPEG\"", ")", "encoded_image", "=", "base64", ".", "urlsafe_b64encode", "(", "buf", ".", "getvalue", "(", ")", ")", ".", "decode", "(", "'ascii'", ")", "instance_copy", "=", "dict", "(", "instance", ")", "instance_copy", "[", "image_column_name", "]", "=", "encoded_image", "buf", "=", "six", ".", "StringIO", "(", ")", "writer", "=", "csv", ".", "DictWriter", "(", "buf", ",", "fieldnames", "=", "self", ".", "_headers", ",", "lineterminator", "=", "''", ")", "writer", ".", "writerow", "(", "instance_copy", ")", "csv_lines", ".", "append", "(", "buf", ".", "getvalue", "(", ")", ")", "integrated_gradients_images", "=", "[", "]", "for", "label", "in", "labels", ":", "# Send to tf model to get gradients.", "grads", "=", "self", ".", "_image_gradients", "(", "csv_lines", ",", "label", ",", "image_column_name", ")", "integrated_grads", "=", "resized_image", "*", "np", ".", "average", "(", "grads", ",", "axis", "=", "0", ")", "# Gray scale the grads by removing color dimension.", "# abs() is for getting the most impactful pixels regardless positive or negative.", "grayed", "=", "np", ".", "average", "(", "abs", "(", "integrated_grads", ")", ",", "axis", "=", "2", ")", "grayed", "=", "np", ".", "transpose", "(", "[", "grayed", ",", "grayed", ",", "grayed", "]", ",", "axes", "=", "[", "1", ",", "2", ",", "0", "]", ")", "# Only show the most impactful pixels.", "p", "=", "np", ".", "percentile", "(", "grayed", ",", "100", "-", "top_percent", ")", "viz_window", "=", "np", ".", "where", "(", "grayed", ">", "p", ",", "1", ",", "0", ")", "vis", "=", "resized_image", "*", "viz_window", "im_vis", "=", "Image", ".", "fromarray", "(", "np", ".", "uint8", "(", "vis", ")", ")", "integrated_gradients_images", ".", "append", "(", "im_vis", ")", "return", "resized_image", ",", "integrated_gradients_images" ]
Get pixel importance of the image. It performs pixel sensitivity analysis by showing only the most important pixels to a certain label in the image. It uses integrated gradients to measure the importance of each pixel. Args: labels: labels to compute gradients from. instance: the prediction instance. It needs to conform to model's input. Can be a csv line string, or a dict. img_column_name: the name of the image column to probe. If there is only one image column it can be None. num_scaled_images: Number of scaled images to get grads from. For example, if 10, the image will be scaled by 0.1, 0.2, ..., 0,9, 1.0 and it will produce 10 images for grads computation. top_percent: The percentile of pixels to show only. for example, if 10, only top 10% impactful pixels will be shown and rest of the pixels will be black. Returns: A tuple. First is the resized original image (299x299x3). Second is a list of the visualization with same size that highlights the most important pixels, one per each label.
[ "Get", "pixel", "importance", "of", "the", "image", "." ]
python
train
47.481481
The-Politico/politico-civic-election-night
electionnight/serializers/election_day.py
https://github.com/The-Politico/politico-civic-election-night/blob/a8aaf5be43872a7b84d2b0d7c2b6151d32d4d8b6/electionnight/serializers/election_day.py#L36-L42
def get_special_elections(self, obj): """States holding a special election on election day.""" return reverse( 'electionnight_api_special-election-list', request=self.context['request'], kwargs={'date': obj.date} )
[ "def", "get_special_elections", "(", "self", ",", "obj", ")", ":", "return", "reverse", "(", "'electionnight_api_special-election-list'", ",", "request", "=", "self", ".", "context", "[", "'request'", "]", ",", "kwargs", "=", "{", "'date'", ":", "obj", ".", "date", "}", ")" ]
States holding a special election on election day.
[ "States", "holding", "a", "special", "election", "on", "election", "day", "." ]
python
train
38.285714
CityOfZion/neo-python
neo/Core/TX/Transaction.py
https://github.com/CityOfZion/neo-python/blob/fe90f62e123d720d4281c79af0598d9df9e776fb/neo/Core/TX/Transaction.py#L148-L162
def ToJson(self, index): """ Convert object members to a dictionary that can be parsed as JSON. Args: index (int): The index of the output in a transaction Returns: dict: """ return { 'n': index, 'asset': self.AssetId.To0xString(), 'value': self.Value.ToNeoJsonString(), 'address': self.Address }
[ "def", "ToJson", "(", "self", ",", "index", ")", ":", "return", "{", "'n'", ":", "index", ",", "'asset'", ":", "self", ".", "AssetId", ".", "To0xString", "(", ")", ",", "'value'", ":", "self", ".", "Value", ".", "ToNeoJsonString", "(", ")", ",", "'address'", ":", "self", ".", "Address", "}" ]
Convert object members to a dictionary that can be parsed as JSON. Args: index (int): The index of the output in a transaction Returns: dict:
[ "Convert", "object", "members", "to", "a", "dictionary", "that", "can", "be", "parsed", "as", "JSON", ".", "Args", ":", "index", "(", "int", ")", ":", "The", "index", "of", "the", "output", "in", "a", "transaction" ]
python
train
27.466667
tornadoweb/tornado
tornado/netutil.py
https://github.com/tornadoweb/tornado/blob/b8b481770bcdb333a69afde5cce7eaa449128326/tornado/netutil.py#L591-L614
def ssl_wrap_socket( socket: socket.socket, ssl_options: Union[Dict[str, Any], ssl.SSLContext], server_hostname: str = None, **kwargs: Any ) -> ssl.SSLSocket: """Returns an ``ssl.SSLSocket`` wrapping the given socket. ``ssl_options`` may be either an `ssl.SSLContext` object or a dictionary (as accepted by `ssl_options_to_context`). Additional keyword arguments are passed to ``wrap_socket`` (either the `~ssl.SSLContext` method or the `ssl` module function as appropriate). """ context = ssl_options_to_context(ssl_options) if ssl.HAS_SNI: # In python 3.4, wrap_socket only accepts the server_hostname # argument if HAS_SNI is true. # TODO: add a unittest (python added server-side SNI support in 3.4) # In the meantime it can be manually tested with # python3 -m tornado.httpclient https://sni.velox.ch return context.wrap_socket(socket, server_hostname=server_hostname, **kwargs) else: return context.wrap_socket(socket, **kwargs)
[ "def", "ssl_wrap_socket", "(", "socket", ":", "socket", ".", "socket", ",", "ssl_options", ":", "Union", "[", "Dict", "[", "str", ",", "Any", "]", ",", "ssl", ".", "SSLContext", "]", ",", "server_hostname", ":", "str", "=", "None", ",", "*", "*", "kwargs", ":", "Any", ")", "->", "ssl", ".", "SSLSocket", ":", "context", "=", "ssl_options_to_context", "(", "ssl_options", ")", "if", "ssl", ".", "HAS_SNI", ":", "# In python 3.4, wrap_socket only accepts the server_hostname", "# argument if HAS_SNI is true.", "# TODO: add a unittest (python added server-side SNI support in 3.4)", "# In the meantime it can be manually tested with", "# python3 -m tornado.httpclient https://sni.velox.ch", "return", "context", ".", "wrap_socket", "(", "socket", ",", "server_hostname", "=", "server_hostname", ",", "*", "*", "kwargs", ")", "else", ":", "return", "context", ".", "wrap_socket", "(", "socket", ",", "*", "*", "kwargs", ")" ]
Returns an ``ssl.SSLSocket`` wrapping the given socket. ``ssl_options`` may be either an `ssl.SSLContext` object or a dictionary (as accepted by `ssl_options_to_context`). Additional keyword arguments are passed to ``wrap_socket`` (either the `~ssl.SSLContext` method or the `ssl` module function as appropriate).
[ "Returns", "an", "ssl", ".", "SSLSocket", "wrapping", "the", "given", "socket", "." ]
python
train
42.708333
pyupio/changelogs
changelogs/finder.py
https://github.com/pyupio/changelogs/blob/0cdb929ac4546c766cd7eef9ae4eb4baaa08f452/changelogs/finder.py#L23-L43
def validate_repo_url(url): """ Validates and formats `url` to be valid URL pointing to a repo on bitbucket.org or github.com :param url: str, URL :return: str, valid URL if valid repo, emptry string otherwise """ try: if "github.com" in url: return re.findall(r"https?://w?w?w?.?github.com/[\w\-]+/[\w.-]+", url)[0] elif "bitbucket.org" in url: return re.findall(r"https?://bitbucket.org/[\w.-]+/[\w.-]+", url)[0] + "/src/" elif "launchpad.net" in url: return re.findall(r"https?://launchpad.net/[\w.-]+", url)[0] elif "sourceforge.net" in url: mo = re.match(r"https?://sourceforge.net/projects/" r"([\w.-]+)/", url, re.I) template = "https://sourceforge.net/p/{}/code/HEAD/tree/trunk/src/" return template.format(mo.groups()[0]) except (IndexError, AttributeError): pass return ""
[ "def", "validate_repo_url", "(", "url", ")", ":", "try", ":", "if", "\"github.com\"", "in", "url", ":", "return", "re", ".", "findall", "(", "r\"https?://w?w?w?.?github.com/[\\w\\-]+/[\\w.-]+\"", ",", "url", ")", "[", "0", "]", "elif", "\"bitbucket.org\"", "in", "url", ":", "return", "re", ".", "findall", "(", "r\"https?://bitbucket.org/[\\w.-]+/[\\w.-]+\"", ",", "url", ")", "[", "0", "]", "+", "\"/src/\"", "elif", "\"launchpad.net\"", "in", "url", ":", "return", "re", ".", "findall", "(", "r\"https?://launchpad.net/[\\w.-]+\"", ",", "url", ")", "[", "0", "]", "elif", "\"sourceforge.net\"", "in", "url", ":", "mo", "=", "re", ".", "match", "(", "r\"https?://sourceforge.net/projects/\"", "r\"([\\w.-]+)/\"", ",", "url", ",", "re", ".", "I", ")", "template", "=", "\"https://sourceforge.net/p/{}/code/HEAD/tree/trunk/src/\"", "return", "template", ".", "format", "(", "mo", ".", "groups", "(", ")", "[", "0", "]", ")", "except", "(", "IndexError", ",", "AttributeError", ")", ":", "pass", "return", "\"\"" ]
Validates and formats `url` to be valid URL pointing to a repo on bitbucket.org or github.com :param url: str, URL :return: str, valid URL if valid repo, emptry string otherwise
[ "Validates", "and", "formats", "url", "to", "be", "valid", "URL", "pointing", "to", "a", "repo", "on", "bitbucket", ".", "org", "or", "github", ".", "com", ":", "param", "url", ":", "str", "URL", ":", "return", ":", "str", "valid", "URL", "if", "valid", "repo", "emptry", "string", "otherwise" ]
python
train
44.380952
jazzband/django-pipeline
pipeline/views.py
https://github.com/jazzband/django-pipeline/blob/3cd2f93bb47bf8d34447e13ff691f7027e7b07a2/pipeline/views.py#L11-L38
def serve_static(request, path, insecure=False, **kwargs): """Collect and serve static files. This view serves up static files, much like Django's :py:func:`~django.views.static.serve` view, with the addition that it collects static files first (if enabled). This allows images, fonts, and other assets to be served up without first loading a page using the ``{% javascript %}`` or ``{% stylesheet %}`` template tags. You can use this view by adding the following to any :file:`urls.py`:: urlpatterns += static('static/', view='pipeline.views.serve_static') """ # Follow the same logic Django uses for determining access to the # static-serving view. if not django_settings.DEBUG and not insecure: raise ImproperlyConfigured("The staticfiles view can only be used in " "debug mode or if the --insecure " "option of 'runserver' is used") if not settings.PIPELINE_ENABLED and settings.PIPELINE_COLLECTOR_ENABLED: # Collect only the requested file, in order to serve the result as # fast as possible. This won't interfere with the template tags in any # way, as those will still cause Django to collect all media. default_collector.collect(request, files=[path]) return serve(request, path, document_root=django_settings.STATIC_ROOT, **kwargs)
[ "def", "serve_static", "(", "request", ",", "path", ",", "insecure", "=", "False", ",", "*", "*", "kwargs", ")", ":", "# Follow the same logic Django uses for determining access to the", "# static-serving view.", "if", "not", "django_settings", ".", "DEBUG", "and", "not", "insecure", ":", "raise", "ImproperlyConfigured", "(", "\"The staticfiles view can only be used in \"", "\"debug mode or if the --insecure \"", "\"option of 'runserver' is used\"", ")", "if", "not", "settings", ".", "PIPELINE_ENABLED", "and", "settings", ".", "PIPELINE_COLLECTOR_ENABLED", ":", "# Collect only the requested file, in order to serve the result as", "# fast as possible. This won't interfere with the template tags in any", "# way, as those will still cause Django to collect all media.", "default_collector", ".", "collect", "(", "request", ",", "files", "=", "[", "path", "]", ")", "return", "serve", "(", "request", ",", "path", ",", "document_root", "=", "django_settings", ".", "STATIC_ROOT", ",", "*", "*", "kwargs", ")" ]
Collect and serve static files. This view serves up static files, much like Django's :py:func:`~django.views.static.serve` view, with the addition that it collects static files first (if enabled). This allows images, fonts, and other assets to be served up without first loading a page using the ``{% javascript %}`` or ``{% stylesheet %}`` template tags. You can use this view by adding the following to any :file:`urls.py`:: urlpatterns += static('static/', view='pipeline.views.serve_static')
[ "Collect", "and", "serve", "static", "files", "." ]
python
train
50.178571
alfredodeza/notario
notario/engine.py
https://github.com/alfredodeza/notario/blob/d5dc2edfcb75d9291ced3f2551f368c35dd31475/notario/engine.py#L121-L129
def key_leaf(self, data, schema, tree): """ The deepest validation we can make in any given circumstance for a key. Does not recurse, it will just receive both values and the tree, passing them on to the :fun:`enforce` function. """ key, value = data schema_key, schema_value = schema enforce(key, schema_key, tree, 'key')
[ "def", "key_leaf", "(", "self", ",", "data", ",", "schema", ",", "tree", ")", ":", "key", ",", "value", "=", "data", "schema_key", ",", "schema_value", "=", "schema", "enforce", "(", "key", ",", "schema_key", ",", "tree", ",", "'key'", ")" ]
The deepest validation we can make in any given circumstance for a key. Does not recurse, it will just receive both values and the tree, passing them on to the :fun:`enforce` function.
[ "The", "deepest", "validation", "we", "can", "make", "in", "any", "given", "circumstance", "for", "a", "key", ".", "Does", "not", "recurse", "it", "will", "just", "receive", "both", "values", "and", "the", "tree", "passing", "them", "on", "to", "the", ":", "fun", ":", "enforce", "function", "." ]
python
train
42
esafak/mca
src/mca.py
https://github.com/esafak/mca/blob/f2b79ecbf37629902ccdbad2e1a556977c53d370/src/mca.py#L95-L119
def fs_r(self, percent=0.9, N=None): """Get the row factor scores (dimensionality-reduced representation), choosing how many factors to retain, directly or based on the explained variance. 'percent': The minimum variance that the retained factors are required to explain (default: 90% = 0.9) 'N': The number of factors to retain. Overrides 'percent'. If the rank is less than N, N is ignored. """ if not 0 <= percent <= 1: raise ValueError("Percent should be a real number between 0 and 1.") if N: if not isinstance(N, (int, int64)) or N <= 0: raise ValueError("N should be a positive integer.") N = min(N, self.rank) self.k = 1 + flatnonzero(cumsum(self.L) >= sum(self.L)*percent)[0] # S = zeros((self._numitems, self.k)) # the sign of the square root can be either way; singular value vs. eigenvalue # fill_diagonal(S, -sqrt(self.E) if self.cor else self.s) num2ret = N if N else self.k s = -sqrt(self.L) if self.cor else self.s S = diagsvd(s[:num2ret], self._numitems, num2ret) self.F = self.D_r.dot(self.P).dot(S) return self.F
[ "def", "fs_r", "(", "self", ",", "percent", "=", "0.9", ",", "N", "=", "None", ")", ":", "if", "not", "0", "<=", "percent", "<=", "1", ":", "raise", "ValueError", "(", "\"Percent should be a real number between 0 and 1.\"", ")", "if", "N", ":", "if", "not", "isinstance", "(", "N", ",", "(", "int", ",", "int64", ")", ")", "or", "N", "<=", "0", ":", "raise", "ValueError", "(", "\"N should be a positive integer.\"", ")", "N", "=", "min", "(", "N", ",", "self", ".", "rank", ")", "self", ".", "k", "=", "1", "+", "flatnonzero", "(", "cumsum", "(", "self", ".", "L", ")", ">=", "sum", "(", "self", ".", "L", ")", "*", "percent", ")", "[", "0", "]", "# S = zeros((self._numitems, self.k))", "# the sign of the square root can be either way; singular value vs. eigenvalue", "# fill_diagonal(S, -sqrt(self.E) if self.cor else self.s)", "num2ret", "=", "N", "if", "N", "else", "self", ".", "k", "s", "=", "-", "sqrt", "(", "self", ".", "L", ")", "if", "self", ".", "cor", "else", "self", ".", "s", "S", "=", "diagsvd", "(", "s", "[", ":", "num2ret", "]", ",", "self", ".", "_numitems", ",", "num2ret", ")", "self", ".", "F", "=", "self", ".", "D_r", ".", "dot", "(", "self", ".", "P", ")", ".", "dot", "(", "S", ")", "return", "self", ".", "F" ]
Get the row factor scores (dimensionality-reduced representation), choosing how many factors to retain, directly or based on the explained variance. 'percent': The minimum variance that the retained factors are required to explain (default: 90% = 0.9) 'N': The number of factors to retain. Overrides 'percent'. If the rank is less than N, N is ignored.
[ "Get", "the", "row", "factor", "scores", "(", "dimensionality", "-", "reduced", "representation", ")", "choosing", "how", "many", "factors", "to", "retain", "directly", "or", "based", "on", "the", "explained", "variance", "." ]
python
train
42.72
edx/edx-enterprise
enterprise/management/commands/assign_enterprise_user_roles.py
https://github.com/edx/edx-enterprise/blob/aea91379ab0a87cd3bc798961fce28b60ee49a80/enterprise/management/commands/assign_enterprise_user_roles.py#L131-L171
def _assign_enterprise_role_to_users(self, _get_batch_method, options, is_feature_role=False): """ Assigns enterprise role to users. """ role_name = options['role'] batch_limit = options['batch_limit'] batch_sleep = options['batch_sleep'] batch_offset = options['batch_offset'] current_batch_index = batch_offset users_batch = _get_batch_method( batch_offset, batch_offset + batch_limit ) role_class = SystemWideEnterpriseRole role_assignment_class = SystemWideEnterpriseUserRoleAssignment if is_feature_role: role_class = EnterpriseFeatureRole role_assignment_class = EnterpriseFeatureUserRoleAssignment enterprise_role = role_class.objects.get(name=role_name) while users_batch.count() > 0: for index, user in enumerate(users_batch): LOGGER.info( 'Processing user with index %s and id %s', current_batch_index + index, user.id ) role_assignment_class.objects.get_or_create( user=user, role=enterprise_role ) sleep(batch_sleep) current_batch_index += len(users_batch) users_batch = _get_batch_method( current_batch_index, current_batch_index + batch_limit )
[ "def", "_assign_enterprise_role_to_users", "(", "self", ",", "_get_batch_method", ",", "options", ",", "is_feature_role", "=", "False", ")", ":", "role_name", "=", "options", "[", "'role'", "]", "batch_limit", "=", "options", "[", "'batch_limit'", "]", "batch_sleep", "=", "options", "[", "'batch_sleep'", "]", "batch_offset", "=", "options", "[", "'batch_offset'", "]", "current_batch_index", "=", "batch_offset", "users_batch", "=", "_get_batch_method", "(", "batch_offset", ",", "batch_offset", "+", "batch_limit", ")", "role_class", "=", "SystemWideEnterpriseRole", "role_assignment_class", "=", "SystemWideEnterpriseUserRoleAssignment", "if", "is_feature_role", ":", "role_class", "=", "EnterpriseFeatureRole", "role_assignment_class", "=", "EnterpriseFeatureUserRoleAssignment", "enterprise_role", "=", "role_class", ".", "objects", ".", "get", "(", "name", "=", "role_name", ")", "while", "users_batch", ".", "count", "(", ")", ">", "0", ":", "for", "index", ",", "user", "in", "enumerate", "(", "users_batch", ")", ":", "LOGGER", ".", "info", "(", "'Processing user with index %s and id %s'", ",", "current_batch_index", "+", "index", ",", "user", ".", "id", ")", "role_assignment_class", ".", "objects", ".", "get_or_create", "(", "user", "=", "user", ",", "role", "=", "enterprise_role", ")", "sleep", "(", "batch_sleep", ")", "current_batch_index", "+=", "len", "(", "users_batch", ")", "users_batch", "=", "_get_batch_method", "(", "current_batch_index", ",", "current_batch_index", "+", "batch_limit", ")" ]
Assigns enterprise role to users.
[ "Assigns", "enterprise", "role", "to", "users", "." ]
python
valid
34.829268
orb-framework/orb
orb/core/model.py
https://github.com/orb-framework/orb/blob/575be2689cb269e65a0a2678232ff940acc19e5a/orb/core/model.py#L1011-L1024
def callbacks(cls, eventType=None): """ Returns a list of callback methods that can be invoked whenever an event is processed. :return: {subclass of <Event>: <list>, ..} """ key = '_{0}__callbacks'.format(cls.__name__) try: callbacks = getattr(cls, key) except AttributeError: callbacks = {} setattr(cls, key, callbacks) return callbacks.get(eventType, []) if eventType is not None else callbacks
[ "def", "callbacks", "(", "cls", ",", "eventType", "=", "None", ")", ":", "key", "=", "'_{0}__callbacks'", ".", "format", "(", "cls", ".", "__name__", ")", "try", ":", "callbacks", "=", "getattr", "(", "cls", ",", "key", ")", "except", "AttributeError", ":", "callbacks", "=", "{", "}", "setattr", "(", "cls", ",", "key", ",", "callbacks", ")", "return", "callbacks", ".", "get", "(", "eventType", ",", "[", "]", ")", "if", "eventType", "is", "not", "None", "else", "callbacks" ]
Returns a list of callback methods that can be invoked whenever an event is processed. :return: {subclass of <Event>: <list>, ..}
[ "Returns", "a", "list", "of", "callback", "methods", "that", "can", "be", "invoked", "whenever", "an", "event", "is", "processed", "." ]
python
train
34.642857
chaoss/grimoirelab-sortinghat
sortinghat/db/api.py
https://github.com/chaoss/grimoirelab-sortinghat/blob/391cd37a75fea26311dc6908bc1c953c540a8e04/sortinghat/db/api.py#L116-L133
def find_country(session, code): """Find a country. Find a country by its ISO-3166 `code` (i.e ES for Spain, US for United States of America) using the given `session. When the country does not exist the function will return `None`. :param session: database session :param code: ISO-3166 code of the country to find :return: a country object; `None` when the country does not exist """ country = session.query(Country).\ filter(Country.code == code).first() return country
[ "def", "find_country", "(", "session", ",", "code", ")", ":", "country", "=", "session", ".", "query", "(", "Country", ")", ".", "filter", "(", "Country", ".", "code", "==", "code", ")", ".", "first", "(", ")", "return", "country" ]
Find a country. Find a country by its ISO-3166 `code` (i.e ES for Spain, US for United States of America) using the given `session. When the country does not exist the function will return `None`. :param session: database session :param code: ISO-3166 code of the country to find :return: a country object; `None` when the country does not exist
[ "Find", "a", "country", "." ]
python
train
28.833333
ryanjdillon/pyotelem
pyotelem/plots/plotdives.py
https://github.com/ryanjdillon/pyotelem/blob/816563a9c3feb3fa416f1c2921c6b75db34111ad/pyotelem/plots/plotdives.py#L160-L203
def plot_triaxial_depths_speed(tag): '''Plot triaxial accelerometer data for whole deployment, descents, and ascents Only x and z axes are ploted since these are associated with stroking Args ---- tag: pandas.DataFrame Tag dataframe with acceleromter, depth, and propeller columns ''' import numpy from . import plotutils # TODO return to multiple inputs rather than dataframe fig, axes = plt.subplots(3, 3, sharex='col', sharey='row') ((ax1, ax4, ax7), (ax2, ax5, ax8), (ax3, ax6, ax9)) = axes # Create mask of all True for length of depths all_ind = numpy.arange(0, len(tag), dtype=int) cols = [('x', tag['Ax_g'], [ax1, ax2, ax3]), ('y', tag['Ay_g'], [ax4, ax5, ax6]), ('z', tag['Az_g'], [ax7, ax8, ax9])] for label, y, axes in cols: axes[0].title.set_text('Accelerometer {}-axis'.format(label)) axes[0].plot(range(len(y)), y, color=_colors[0], linewidth=_linewidth, label='x') axes[1].title.set_text('Depths') axes[1] = plotutils.plot_noncontiguous(axes[1], tag['depth'], all_ind, color=_colors[1]) axes[1].invert_yaxis() axes[2] = plotutils.plot_noncontiguous(axes[2], tag['propeller'], all_ind, color=_colors[2], label='propeller') plt.show() return None
[ "def", "plot_triaxial_depths_speed", "(", "tag", ")", ":", "import", "numpy", "from", ".", "import", "plotutils", "# TODO return to multiple inputs rather than dataframe", "fig", ",", "axes", "=", "plt", ".", "subplots", "(", "3", ",", "3", ",", "sharex", "=", "'col'", ",", "sharey", "=", "'row'", ")", "(", "(", "ax1", ",", "ax4", ",", "ax7", ")", ",", "(", "ax2", ",", "ax5", ",", "ax8", ")", ",", "(", "ax3", ",", "ax6", ",", "ax9", ")", ")", "=", "axes", "# Create mask of all True for length of depths", "all_ind", "=", "numpy", ".", "arange", "(", "0", ",", "len", "(", "tag", ")", ",", "dtype", "=", "int", ")", "cols", "=", "[", "(", "'x'", ",", "tag", "[", "'Ax_g'", "]", ",", "[", "ax1", ",", "ax2", ",", "ax3", "]", ")", ",", "(", "'y'", ",", "tag", "[", "'Ay_g'", "]", ",", "[", "ax4", ",", "ax5", ",", "ax6", "]", ")", ",", "(", "'z'", ",", "tag", "[", "'Az_g'", "]", ",", "[", "ax7", ",", "ax8", ",", "ax9", "]", ")", "]", "for", "label", ",", "y", ",", "axes", "in", "cols", ":", "axes", "[", "0", "]", ".", "title", ".", "set_text", "(", "'Accelerometer {}-axis'", ".", "format", "(", "label", ")", ")", "axes", "[", "0", "]", ".", "plot", "(", "range", "(", "len", "(", "y", ")", ")", ",", "y", ",", "color", "=", "_colors", "[", "0", "]", ",", "linewidth", "=", "_linewidth", ",", "label", "=", "'x'", ")", "axes", "[", "1", "]", ".", "title", ".", "set_text", "(", "'Depths'", ")", "axes", "[", "1", "]", "=", "plotutils", ".", "plot_noncontiguous", "(", "axes", "[", "1", "]", ",", "tag", "[", "'depth'", "]", ",", "all_ind", ",", "color", "=", "_colors", "[", "1", "]", ")", "axes", "[", "1", "]", ".", "invert_yaxis", "(", ")", "axes", "[", "2", "]", "=", "plotutils", ".", "plot_noncontiguous", "(", "axes", "[", "2", "]", ",", "tag", "[", "'propeller'", "]", ",", "all_ind", ",", "color", "=", "_colors", "[", "2", "]", ",", "label", "=", "'propeller'", ")", "plt", ".", "show", "(", ")", "return", "None" ]
Plot triaxial accelerometer data for whole deployment, descents, and ascents Only x and z axes are ploted since these are associated with stroking Args ---- tag: pandas.DataFrame Tag dataframe with acceleromter, depth, and propeller columns
[ "Plot", "triaxial", "accelerometer", "data", "for", "whole", "deployment", "descents", "and", "ascents" ]
python
train
32.772727
jepegit/cellpy
cellpy/utils/batch_tools/dumpers.py
https://github.com/jepegit/cellpy/blob/9f4a84cdd11f72cfa02cda8c2d7b5174abbb7370/cellpy/utils/batch_tools/dumpers.py#L54-L75
def ram_dumper(**kwargs): """Dump data to 'memory' for later usage.""" logging.debug("trying to save stuff in memory") farms = kwargs["farms"] experiments = kwargs["experiments"] engine = kwargs["engine"] try: engine_name = engine.__name__ except AttributeError: engine_name = engine.__dict__.__name__ accepted_engines = ["summary_engine",] if engine_name in accepted_engines: logging.debug("found the engine that I will try to dump from: " f"{engine_name}") for experiment, farm in zip(experiments, farms): name = experiment.journal.name project = experiment.journal.project experiment.memory_dumped[engine_name] = farm logging.debug(f"farm put into memory_dumped ({project}::{name})")
[ "def", "ram_dumper", "(", "*", "*", "kwargs", ")", ":", "logging", ".", "debug", "(", "\"trying to save stuff in memory\"", ")", "farms", "=", "kwargs", "[", "\"farms\"", "]", "experiments", "=", "kwargs", "[", "\"experiments\"", "]", "engine", "=", "kwargs", "[", "\"engine\"", "]", "try", ":", "engine_name", "=", "engine", ".", "__name__", "except", "AttributeError", ":", "engine_name", "=", "engine", ".", "__dict__", ".", "__name__", "accepted_engines", "=", "[", "\"summary_engine\"", ",", "]", "if", "engine_name", "in", "accepted_engines", ":", "logging", ".", "debug", "(", "\"found the engine that I will try to dump from: \"", "f\"{engine_name}\"", ")", "for", "experiment", ",", "farm", "in", "zip", "(", "experiments", ",", "farms", ")", ":", "name", "=", "experiment", ".", "journal", ".", "name", "project", "=", "experiment", ".", "journal", ".", "project", "experiment", ".", "memory_dumped", "[", "engine_name", "]", "=", "farm", "logging", ".", "debug", "(", "f\"farm put into memory_dumped ({project}::{name})\"", ")" ]
Dump data to 'memory' for later usage.
[ "Dump", "data", "to", "memory", "for", "later", "usage", "." ]
python
train
36.636364
kensho-technologies/graphql-compiler
graphql_compiler/compiler/helpers.py
https://github.com/kensho-technologies/graphql-compiler/blob/f6079c6d10f64932f6b3af309b79bcea2123ca8f/graphql_compiler/compiler/helpers.py#L45-L53
def get_ast_field_name(ast): """Return the normalized field name for the given AST node.""" replacements = { # We always rewrite the following field names into their proper underlying counterparts. TYPENAME_META_FIELD_NAME: '@class' } base_field_name = ast.name.value normalized_name = replacements.get(base_field_name, base_field_name) return normalized_name
[ "def", "get_ast_field_name", "(", "ast", ")", ":", "replacements", "=", "{", "# We always rewrite the following field names into their proper underlying counterparts.", "TYPENAME_META_FIELD_NAME", ":", "'@class'", "}", "base_field_name", "=", "ast", ".", "name", ".", "value", "normalized_name", "=", "replacements", ".", "get", "(", "base_field_name", ",", "base_field_name", ")", "return", "normalized_name" ]
Return the normalized field name for the given AST node.
[ "Return", "the", "normalized", "field", "name", "for", "the", "given", "AST", "node", "." ]
python
train
43.444444
wbond/oscrypto
oscrypto/_osx/symmetric.py
https://github.com/wbond/oscrypto/blob/af778bf1c88bf6c4a7342f5353b130686a5bbe1c/oscrypto/_osx/symmetric.py#L493-L531
def des_cbc_pkcs5_decrypt(key, data, iv): """ Decrypts DES ciphertext using a 56 bit key :param key: The encryption key - a byte string 8 bytes long (includes error correction bits) :param data: The ciphertext - a byte string :param iv: The initialization vector used for encryption - a byte string :raises: ValueError - when any of the parameters contain an invalid value TypeError - when any of the parameters are of the wrong type OSError - when an error is returned by the OS crypto library :return: A byte string of the plaintext """ if len(key) != 8: raise ValueError(pretty_message( ''' key must be 8 bytes (56 bits + 8 parity bits) long - is %s ''', len(key) )) if len(iv) != 8: raise ValueError(pretty_message( ''' iv must be 8 bytes long - is %s ''', len(iv) )) return _decrypt(Security.kSecAttrKeyTypeDES, key, data, iv, Security.kSecPaddingPKCS5Key)
[ "def", "des_cbc_pkcs5_decrypt", "(", "key", ",", "data", ",", "iv", ")", ":", "if", "len", "(", "key", ")", "!=", "8", ":", "raise", "ValueError", "(", "pretty_message", "(", "'''\n key must be 8 bytes (56 bits + 8 parity bits) long - is %s\n '''", ",", "len", "(", "key", ")", ")", ")", "if", "len", "(", "iv", ")", "!=", "8", ":", "raise", "ValueError", "(", "pretty_message", "(", "'''\n iv must be 8 bytes long - is %s\n '''", ",", "len", "(", "iv", ")", ")", ")", "return", "_decrypt", "(", "Security", ".", "kSecAttrKeyTypeDES", ",", "key", ",", "data", ",", "iv", ",", "Security", ".", "kSecPaddingPKCS5Key", ")" ]
Decrypts DES ciphertext using a 56 bit key :param key: The encryption key - a byte string 8 bytes long (includes error correction bits) :param data: The ciphertext - a byte string :param iv: The initialization vector used for encryption - a byte string :raises: ValueError - when any of the parameters contain an invalid value TypeError - when any of the parameters are of the wrong type OSError - when an error is returned by the OS crypto library :return: A byte string of the plaintext
[ "Decrypts", "DES", "ciphertext", "using", "a", "56", "bit", "key" ]
python
valid
27.153846
etingof/pysnmp
pysnmp/smi/instrum.py
https://github.com/etingof/pysnmp/blob/cde062dd42f67dfd2d7686286a322d40e9c3a4b7/pysnmp/smi/instrum.py#L375-L381
def _defaultErrorHandler(varBinds, **context): """Raise exception on any error if user callback is missing""" errors = context.get('errors') if errors: err = errors[-1] raise err['error']
[ "def", "_defaultErrorHandler", "(", "varBinds", ",", "*", "*", "context", ")", ":", "errors", "=", "context", ".", "get", "(", "'errors'", ")", "if", "errors", ":", "err", "=", "errors", "[", "-", "1", "]", "raise", "err", "[", "'error'", "]" ]
Raise exception on any error if user callback is missing
[ "Raise", "exception", "on", "any", "error", "if", "user", "callback", "is", "missing" ]
python
train
32.857143
guma44/GEOparse
GEOparse/GEOTypes.py
https://github.com/guma44/GEOparse/blob/7ee8d5b8678d780382a6bf884afa69d2033f5ca0/GEOparse/GEOTypes.py#L331-L367
def annotate(self, gpl, annotation_column, gpl_on="ID", gsm_on="ID_REF", in_place=False): """Annotate GSM with provided GPL Args: gpl (:obj:`pandas.DataFrame`): A Platform or DataFrame to annotate with annotation_column (str`): Column in a table for annotation gpl_on (:obj:`str`): Use this column in GSM to merge. Defaults to "ID". gsm_on (:obj:`str`): Use this column in GPL to merge. Defaults to "ID_REF". in_place (:obj:`bool`): Substitute table in GSM by new annotated table. Defaults to False. Returns: :obj:`pandas.DataFrame` or :obj:`None`: Annotated table or None Raises: TypeError: GPL should be GPL or pandas.DataFrame """ if isinstance(gpl, GPL): annotation_table = gpl.table elif isinstance(gpl, DataFrame): annotation_table = gpl else: raise TypeError("gpl should be a GPL object or a pandas.DataFrame") # annotate by merging annotated = self.table.merge( annotation_table[[gpl_on, annotation_column]], left_on=gsm_on, right_on=gpl_on) del annotated[gpl_on] if in_place: self.table = annotated return None else: return annotated
[ "def", "annotate", "(", "self", ",", "gpl", ",", "annotation_column", ",", "gpl_on", "=", "\"ID\"", ",", "gsm_on", "=", "\"ID_REF\"", ",", "in_place", "=", "False", ")", ":", "if", "isinstance", "(", "gpl", ",", "GPL", ")", ":", "annotation_table", "=", "gpl", ".", "table", "elif", "isinstance", "(", "gpl", ",", "DataFrame", ")", ":", "annotation_table", "=", "gpl", "else", ":", "raise", "TypeError", "(", "\"gpl should be a GPL object or a pandas.DataFrame\"", ")", "# annotate by merging", "annotated", "=", "self", ".", "table", ".", "merge", "(", "annotation_table", "[", "[", "gpl_on", ",", "annotation_column", "]", "]", ",", "left_on", "=", "gsm_on", ",", "right_on", "=", "gpl_on", ")", "del", "annotated", "[", "gpl_on", "]", "if", "in_place", ":", "self", ".", "table", "=", "annotated", "return", "None", "else", ":", "return", "annotated" ]
Annotate GSM with provided GPL Args: gpl (:obj:`pandas.DataFrame`): A Platform or DataFrame to annotate with annotation_column (str`): Column in a table for annotation gpl_on (:obj:`str`): Use this column in GSM to merge. Defaults to "ID". gsm_on (:obj:`str`): Use this column in GPL to merge. Defaults to "ID_REF". in_place (:obj:`bool`): Substitute table in GSM by new annotated table. Defaults to False. Returns: :obj:`pandas.DataFrame` or :obj:`None`: Annotated table or None Raises: TypeError: GPL should be GPL or pandas.DataFrame
[ "Annotate", "GSM", "with", "provided", "GPL" ]
python
train
36.324324
wonambi-python/wonambi
wonambi/widgets/notes.py
https://github.com/wonambi-python/wonambi/blob/1d8e3d7e53df8017c199f703bcab582914676e76/wonambi/widgets/notes.py#L1577-L1596
def export(self, xformat='csv'): """action: export annotations to CSV.""" if self.annot is None: # remove if buttons are disabled self.parent.statusBar().showMessage('No score file loaded') return if xformat == 'csv': filename = splitext(self.annot.xml_file)[0] + '.csv' filename, _ = QFileDialog.getSaveFileName(self, 'Export stages', filename, 'Sleep stages (*.csv)') if 'remlogic' in xformat: filename = splitext(self.annot.xml_file)[0] + '.txt' filename, _ = QFileDialog.getSaveFileName(self, 'Export stages', filename, 'Sleep stages (*.txt)') if filename == '': return self.annot.export(filename, xformat=xformat)
[ "def", "export", "(", "self", ",", "xformat", "=", "'csv'", ")", ":", "if", "self", ".", "annot", "is", "None", ":", "# remove if buttons are disabled", "self", ".", "parent", ".", "statusBar", "(", ")", ".", "showMessage", "(", "'No score file loaded'", ")", "return", "if", "xformat", "==", "'csv'", ":", "filename", "=", "splitext", "(", "self", ".", "annot", ".", "xml_file", ")", "[", "0", "]", "+", "'.csv'", "filename", ",", "_", "=", "QFileDialog", ".", "getSaveFileName", "(", "self", ",", "'Export stages'", ",", "filename", ",", "'Sleep stages (*.csv)'", ")", "if", "'remlogic'", "in", "xformat", ":", "filename", "=", "splitext", "(", "self", ".", "annot", ".", "xml_file", ")", "[", "0", "]", "+", "'.txt'", "filename", ",", "_", "=", "QFileDialog", ".", "getSaveFileName", "(", "self", ",", "'Export stages'", ",", "filename", ",", "'Sleep stages (*.txt)'", ")", "if", "filename", "==", "''", ":", "return", "self", ".", "annot", ".", "export", "(", "filename", ",", "xformat", "=", "xformat", ")" ]
action: export annotations to CSV.
[ "action", ":", "export", "annotations", "to", "CSV", "." ]
python
train
47.5
RPi-Distro/python-gpiozero
gpiozero/tools.py
https://github.com/RPi-Distro/python-gpiozero/blob/7b67374fd0c8c4fde5586d9bad9531f076db9c0c/gpiozero/tools.py#L652-L676
def cos_values(period=360): """ Provides an infinite source of values representing a cosine wave (from -1 to +1) which repeats every *period* values. For example, to produce a "siren" effect with a couple of LEDs that repeats once a second:: from gpiozero import PWMLED from gpiozero.tools import cos_values, scaled, inverted from signal import pause red = PWMLED(2) blue = PWMLED(3) red.source_delay = 0.01 blue.source_delay = red.source_delay red.source = scaled(cos_values(100), 0, 1, -1, 1) blue.source = inverted(red) pause() If you require a different range than -1 to +1, see :func:`scaled`. """ angles = (2 * pi * i / period for i in range(period)) for a in cycle(angles): yield cos(a)
[ "def", "cos_values", "(", "period", "=", "360", ")", ":", "angles", "=", "(", "2", "*", "pi", "*", "i", "/", "period", "for", "i", "in", "range", "(", "period", ")", ")", "for", "a", "in", "cycle", "(", "angles", ")", ":", "yield", "cos", "(", "a", ")" ]
Provides an infinite source of values representing a cosine wave (from -1 to +1) which repeats every *period* values. For example, to produce a "siren" effect with a couple of LEDs that repeats once a second:: from gpiozero import PWMLED from gpiozero.tools import cos_values, scaled, inverted from signal import pause red = PWMLED(2) blue = PWMLED(3) red.source_delay = 0.01 blue.source_delay = red.source_delay red.source = scaled(cos_values(100), 0, 1, -1, 1) blue.source = inverted(red) pause() If you require a different range than -1 to +1, see :func:`scaled`.
[ "Provides", "an", "infinite", "source", "of", "values", "representing", "a", "cosine", "wave", "(", "from", "-", "1", "to", "+", "1", ")", "which", "repeats", "every", "*", "period", "*", "values", ".", "For", "example", "to", "produce", "a", "siren", "effect", "with", "a", "couple", "of", "LEDs", "that", "repeats", "once", "a", "second", "::" ]
python
train
31.76
fastavro/fastavro
fastavro/_write_py.py
https://github.com/fastavro/fastavro/blob/bafe826293e19eb93e77bbb0f6adfa059c7884b2/fastavro/_write_py.py#L543-L633
def writer(fo, schema, records, codec='null', sync_interval=1000 * SYNC_SIZE, metadata=None, validator=None, sync_marker=None): """Write records to fo (stream) according to schema Parameters ---------- fo: file-like Output stream records: iterable Records to write. This is commonly a list of the dictionary representation of the records, but it can be any iterable codec: string, optional Compression codec, can be 'null', 'deflate' or 'snappy' (if installed) sync_interval: int, optional Size of sync interval metadata: dict, optional Header metadata validator: None, True or a function Validator function. If None (the default) - no validation. If True then then fastavro.validation.validate will be used. If it's a function, it should have the same signature as fastavro.writer.validate and raise an exeption on error. sync_marker: bytes, optional A byte string used as the avro sync marker. If not provided, a random byte string will be used. Example:: from fastavro import writer, parse_schema schema = { 'doc': 'A weather reading.', 'name': 'Weather', 'namespace': 'test', 'type': 'record', 'fields': [ {'name': 'station', 'type': 'string'}, {'name': 'time', 'type': 'long'}, {'name': 'temp', 'type': 'int'}, ], } parsed_schema = parse_schema(schema) records = [ {u'station': u'011990-99999', u'temp': 0, u'time': 1433269388}, {u'station': u'011990-99999', u'temp': 22, u'time': 1433270389}, {u'station': u'011990-99999', u'temp': -11, u'time': 1433273379}, {u'station': u'012650-99999', u'temp': 111, u'time': 1433275478}, ] with open('weather.avro', 'wb') as out: writer(out, parsed_schema, records) Given an existing avro file, it's possible to append to it by re-opening the file in `a+b` mode. If the file is only opened in `ab` mode, we aren't able to read some of the existing header information and an error will be raised. For example:: # Write initial records with open('weather.avro', 'wb') as out: writer(out, parsed_schema, records) # Write some more records with open('weather.avro', 'a+b') as out: writer(out, parsed_schema, more_records) """ # Sanity check that records is not a single dictionary (as that is a common # mistake and the exception that gets raised is not helpful) if isinstance(records, dict): raise ValueError('"records" argument should be an iterable, not dict') output = Writer( fo, schema, codec, sync_interval, metadata, validator, sync_marker, ) for record in records: output.write(record) output.flush()
[ "def", "writer", "(", "fo", ",", "schema", ",", "records", ",", "codec", "=", "'null'", ",", "sync_interval", "=", "1000", "*", "SYNC_SIZE", ",", "metadata", "=", "None", ",", "validator", "=", "None", ",", "sync_marker", "=", "None", ")", ":", "# Sanity check that records is not a single dictionary (as that is a common", "# mistake and the exception that gets raised is not helpful)", "if", "isinstance", "(", "records", ",", "dict", ")", ":", "raise", "ValueError", "(", "'\"records\" argument should be an iterable, not dict'", ")", "output", "=", "Writer", "(", "fo", ",", "schema", ",", "codec", ",", "sync_interval", ",", "metadata", ",", "validator", ",", "sync_marker", ",", ")", "for", "record", "in", "records", ":", "output", ".", "write", "(", "record", ")", "output", ".", "flush", "(", ")" ]
Write records to fo (stream) according to schema Parameters ---------- fo: file-like Output stream records: iterable Records to write. This is commonly a list of the dictionary representation of the records, but it can be any iterable codec: string, optional Compression codec, can be 'null', 'deflate' or 'snappy' (if installed) sync_interval: int, optional Size of sync interval metadata: dict, optional Header metadata validator: None, True or a function Validator function. If None (the default) - no validation. If True then then fastavro.validation.validate will be used. If it's a function, it should have the same signature as fastavro.writer.validate and raise an exeption on error. sync_marker: bytes, optional A byte string used as the avro sync marker. If not provided, a random byte string will be used. Example:: from fastavro import writer, parse_schema schema = { 'doc': 'A weather reading.', 'name': 'Weather', 'namespace': 'test', 'type': 'record', 'fields': [ {'name': 'station', 'type': 'string'}, {'name': 'time', 'type': 'long'}, {'name': 'temp', 'type': 'int'}, ], } parsed_schema = parse_schema(schema) records = [ {u'station': u'011990-99999', u'temp': 0, u'time': 1433269388}, {u'station': u'011990-99999', u'temp': 22, u'time': 1433270389}, {u'station': u'011990-99999', u'temp': -11, u'time': 1433273379}, {u'station': u'012650-99999', u'temp': 111, u'time': 1433275478}, ] with open('weather.avro', 'wb') as out: writer(out, parsed_schema, records) Given an existing avro file, it's possible to append to it by re-opening the file in `a+b` mode. If the file is only opened in `ab` mode, we aren't able to read some of the existing header information and an error will be raised. For example:: # Write initial records with open('weather.avro', 'wb') as out: writer(out, parsed_schema, records) # Write some more records with open('weather.avro', 'a+b') as out: writer(out, parsed_schema, more_records)
[ "Write", "records", "to", "fo", "(", "stream", ")", "according", "to", "schema" ]
python
train
32.956044
chaoss/grimoirelab-manuscripts
manuscripts2/elasticsearch.py
https://github.com/chaoss/grimoirelab-manuscripts/blob/94a3ad4f11bfbcd6c5190e01cb5d3e47a5187cd9/manuscripts2/elasticsearch.py#L617-L645
def calculate_bmi(closed, submitted): """ BMI is the ratio of the number of closed items to the number of total items submitted in a particular period of analysis. The items can be issues, pull requests and such :param closed: dataframe returned from get_timeseries() containing closed items :param submitted: dataframe returned from get_timeseries() containing total items :returns: a dataframe with "date" and "bmi" columns where the date column is also the index. bmi is the ratio of the number of items closed by the total number of items submitted in a "period" of analysis """ if sorted(closed.keys()) != sorted(submitted.keys()): raise AttributeError("The buckets supplied are not congruent!") dates = closed.index.values closed_values = closed['value'] submitted_values = submitted['value'] ratios = [] for x, y in zip(closed_values, submitted_values): if y == 0: ratios.append(0.0) else: ratios.append(float("%.2f" % (x / y))) df = pd.DataFrame.from_records({"date": dates, "bmi": ratios}, index="date") return df.fillna(0)
[ "def", "calculate_bmi", "(", "closed", ",", "submitted", ")", ":", "if", "sorted", "(", "closed", ".", "keys", "(", ")", ")", "!=", "sorted", "(", "submitted", ".", "keys", "(", ")", ")", ":", "raise", "AttributeError", "(", "\"The buckets supplied are not congruent!\"", ")", "dates", "=", "closed", ".", "index", ".", "values", "closed_values", "=", "closed", "[", "'value'", "]", "submitted_values", "=", "submitted", "[", "'value'", "]", "ratios", "=", "[", "]", "for", "x", ",", "y", "in", "zip", "(", "closed_values", ",", "submitted_values", ")", ":", "if", "y", "==", "0", ":", "ratios", ".", "append", "(", "0.0", ")", "else", ":", "ratios", ".", "append", "(", "float", "(", "\"%.2f\"", "%", "(", "x", "/", "y", ")", ")", ")", "df", "=", "pd", ".", "DataFrame", ".", "from_records", "(", "{", "\"date\"", ":", "dates", ",", "\"bmi\"", ":", "ratios", "}", ",", "index", "=", "\"date\"", ")", "return", "df", ".", "fillna", "(", "0", ")" ]
BMI is the ratio of the number of closed items to the number of total items submitted in a particular period of analysis. The items can be issues, pull requests and such :param closed: dataframe returned from get_timeseries() containing closed items :param submitted: dataframe returned from get_timeseries() containing total items :returns: a dataframe with "date" and "bmi" columns where the date column is also the index. bmi is the ratio of the number of items closed by the total number of items submitted in a "period" of analysis
[ "BMI", "is", "the", "ratio", "of", "the", "number", "of", "closed", "items", "to", "the", "number", "of", "total", "items", "submitted", "in", "a", "particular", "period", "of", "analysis", ".", "The", "items", "can", "be", "issues", "pull", "requests", "and", "such" ]
python
train
40.068966
openstack/networking-cisco
networking_cisco/plugins/cisco/device_manager/rpc/devices_cfgagent_rpc_cb.py
https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/plugins/cisco/device_manager/rpc/devices_cfgagent_rpc_cb.py#L87-L104
def get_hosting_devices_for_agent(self, context, host): """Fetches routers that a Cisco cfg agent is managing. This function is supposed to be called when the agent has started, is ready to take on assignments and before any callbacks to fetch logical resources are issued. :param context: contains user information :param host: originator of callback :returns: dict of hosting devices managed by the cfg agent """ agent_ids = self._dmplugin.get_cfg_agents(context, active=None, filters={'host': [host]}) if agent_ids: return [self._dmplugin.get_device_info_for_agent(context, hd_db) for hd_db in self._dmplugin.get_hosting_devices_db( context, filters={'cfg_agent_id': [agent_ids[0].id]})] return []
[ "def", "get_hosting_devices_for_agent", "(", "self", ",", "context", ",", "host", ")", ":", "agent_ids", "=", "self", ".", "_dmplugin", ".", "get_cfg_agents", "(", "context", ",", "active", "=", "None", ",", "filters", "=", "{", "'host'", ":", "[", "host", "]", "}", ")", "if", "agent_ids", ":", "return", "[", "self", ".", "_dmplugin", ".", "get_device_info_for_agent", "(", "context", ",", "hd_db", ")", "for", "hd_db", "in", "self", ".", "_dmplugin", ".", "get_hosting_devices_db", "(", "context", ",", "filters", "=", "{", "'cfg_agent_id'", ":", "[", "agent_ids", "[", "0", "]", ".", "id", "]", "}", ")", "]", "return", "[", "]" ]
Fetches routers that a Cisco cfg agent is managing. This function is supposed to be called when the agent has started, is ready to take on assignments and before any callbacks to fetch logical resources are issued. :param context: contains user information :param host: originator of callback :returns: dict of hosting devices managed by the cfg agent
[ "Fetches", "routers", "that", "a", "Cisco", "cfg", "agent", "is", "managing", "." ]
python
train
48.611111
neighbordog/deviantart
deviantart/api.py
https://github.com/neighbordog/deviantart/blob/5612f1d5e2139a48c9d793d7fd19cde7e162d7b1/deviantart/api.py#L520-L574
def get_collections(self, username="", calculate_size=False, ext_preload=False, offset=0, limit=10): """Fetch collection folders :param username: The user to list folders for, if omitted the authenticated user is used :param calculate_size: The option to include the content count per each collection folder :param ext_preload: Include first 5 deviations from the folder :param offset: the pagination offset :param limit: the pagination limit """ if not username and self.standard_grant_type == "authorization_code": response = self._req('/collections/folders', { "calculate_size":calculate_size, "ext_preload":ext_preload, "offset":offset, "limit":limit }) else: if not username: raise DeviantartError("No username defined.") else: response = self._req('/collections/folders', { "username":username, "calculate_size":calculate_size, "ext_preload":ext_preload, "offset":offset, "limit":limit }) folders = [] for item in response['results']: f = {} f['folderid'] = item['folderid'] f['name'] = item['name'] if "size" in item: f['size'] = item['size'] if "deviations" in item: f['deviations'] = [] for deviation_item in item['deviations']: d = Deviation() d.from_dict(deviation_item) f['deviations'].append(d) folders.append(f) return { "results" : folders, "has_more" : response['has_more'], "next_offset" : response['next_offset'] }
[ "def", "get_collections", "(", "self", ",", "username", "=", "\"\"", ",", "calculate_size", "=", "False", ",", "ext_preload", "=", "False", ",", "offset", "=", "0", ",", "limit", "=", "10", ")", ":", "if", "not", "username", "and", "self", ".", "standard_grant_type", "==", "\"authorization_code\"", ":", "response", "=", "self", ".", "_req", "(", "'/collections/folders'", ",", "{", "\"calculate_size\"", ":", "calculate_size", ",", "\"ext_preload\"", ":", "ext_preload", ",", "\"offset\"", ":", "offset", ",", "\"limit\"", ":", "limit", "}", ")", "else", ":", "if", "not", "username", ":", "raise", "DeviantartError", "(", "\"No username defined.\"", ")", "else", ":", "response", "=", "self", ".", "_req", "(", "'/collections/folders'", ",", "{", "\"username\"", ":", "username", ",", "\"calculate_size\"", ":", "calculate_size", ",", "\"ext_preload\"", ":", "ext_preload", ",", "\"offset\"", ":", "offset", ",", "\"limit\"", ":", "limit", "}", ")", "folders", "=", "[", "]", "for", "item", "in", "response", "[", "'results'", "]", ":", "f", "=", "{", "}", "f", "[", "'folderid'", "]", "=", "item", "[", "'folderid'", "]", "f", "[", "'name'", "]", "=", "item", "[", "'name'", "]", "if", "\"size\"", "in", "item", ":", "f", "[", "'size'", "]", "=", "item", "[", "'size'", "]", "if", "\"deviations\"", "in", "item", ":", "f", "[", "'deviations'", "]", "=", "[", "]", "for", "deviation_item", "in", "item", "[", "'deviations'", "]", ":", "d", "=", "Deviation", "(", ")", "d", ".", "from_dict", "(", "deviation_item", ")", "f", "[", "'deviations'", "]", ".", "append", "(", "d", ")", "folders", ".", "append", "(", "f", ")", "return", "{", "\"results\"", ":", "folders", ",", "\"has_more\"", ":", "response", "[", "'has_more'", "]", ",", "\"next_offset\"", ":", "response", "[", "'next_offset'", "]", "}" ]
Fetch collection folders :param username: The user to list folders for, if omitted the authenticated user is used :param calculate_size: The option to include the content count per each collection folder :param ext_preload: Include first 5 deviations from the folder :param offset: the pagination offset :param limit: the pagination limit
[ "Fetch", "collection", "folders" ]
python
train
33.963636
bitesofcode/projexui
projexui/widgets/xtextedit.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xtextedit.py#L320-L379
def getText(cls, parent=None, windowTitle='Get Text', label='', text='', plain=True, wrapped=True): """ Prompts the user for a text entry using the text edit class. :param parent | <QWidget> windowTitle | <str> label | <str> text | <str> plain | <bool> | return plain text or not :return (<str> text, <bool> accepted) """ # create the dialog dlg = QDialog(parent) dlg.setWindowTitle(windowTitle) # create the layout layout = QVBoxLayout() # create the label if label: lbl = QLabel(dlg) lbl.setText(label) layout.addWidget(lbl) # create the widget widget = cls(dlg) widget.setText(text) if not wrapped: widget.setLineWrapMode(XTextEdit.NoWrap) layout.addWidget(widget) # create the buttons btns = QDialogButtonBox(QDialogButtonBox.Ok | QDialogButtonBox.Cancel, Qt.Horizontal, dlg) layout.addWidget(btns) dlg.setLayout(layout) dlg.adjustSize() # create connections btns.accepted.connect(dlg.accept) btns.rejected.connect(dlg.reject) if dlg.exec_(): if plain: return (widget.toPlainText(), True) else: return (widget.toHtml(), True) else: return ('', False)
[ "def", "getText", "(", "cls", ",", "parent", "=", "None", ",", "windowTitle", "=", "'Get Text'", ",", "label", "=", "''", ",", "text", "=", "''", ",", "plain", "=", "True", ",", "wrapped", "=", "True", ")", ":", "# create the dialog\r", "dlg", "=", "QDialog", "(", "parent", ")", "dlg", ".", "setWindowTitle", "(", "windowTitle", ")", "# create the layout\r", "layout", "=", "QVBoxLayout", "(", ")", "# create the label\r", "if", "label", ":", "lbl", "=", "QLabel", "(", "dlg", ")", "lbl", ".", "setText", "(", "label", ")", "layout", ".", "addWidget", "(", "lbl", ")", "# create the widget\r", "widget", "=", "cls", "(", "dlg", ")", "widget", ".", "setText", "(", "text", ")", "if", "not", "wrapped", ":", "widget", ".", "setLineWrapMode", "(", "XTextEdit", ".", "NoWrap", ")", "layout", ".", "addWidget", "(", "widget", ")", "# create the buttons\r", "btns", "=", "QDialogButtonBox", "(", "QDialogButtonBox", ".", "Ok", "|", "QDialogButtonBox", ".", "Cancel", ",", "Qt", ".", "Horizontal", ",", "dlg", ")", "layout", ".", "addWidget", "(", "btns", ")", "dlg", ".", "setLayout", "(", "layout", ")", "dlg", ".", "adjustSize", "(", ")", "# create connections\r", "btns", ".", "accepted", ".", "connect", "(", "dlg", ".", "accept", ")", "btns", ".", "rejected", ".", "connect", "(", "dlg", ".", "reject", ")", "if", "dlg", ".", "exec_", "(", ")", ":", "if", "plain", ":", "return", "(", "widget", ".", "toPlainText", "(", ")", ",", "True", ")", "else", ":", "return", "(", "widget", ".", "toHtml", "(", ")", ",", "True", ")", "else", ":", "return", "(", "''", ",", "False", ")" ]
Prompts the user for a text entry using the text edit class. :param parent | <QWidget> windowTitle | <str> label | <str> text | <str> plain | <bool> | return plain text or not :return (<str> text, <bool> accepted)
[ "Prompts", "the", "user", "for", "a", "text", "entry", "using", "the", "text", "edit", "class", ".", ":", "param", "parent", "|", "<QWidget", ">", "windowTitle", "|", "<str", ">", "label", "|", "<str", ">", "text", "|", "<str", ">", "plain", "|", "<bool", ">", "|", "return", "plain", "text", "or", "not", ":", "return", "(", "<str", ">", "text", "<bool", ">", "accepted", ")" ]
python
train
29.216667
Opentrons/opentrons
api/src/opentrons/drivers/smoothie_drivers/driver_3_0.py
https://github.com/Opentrons/opentrons/blob/a7c15cc2636ecb64ab56c7edc1d8a57163aaeadf/api/src/opentrons/drivers/smoothie_drivers/driver_3_0.py#L761-L778
def dwell_axes(self, axes): ''' Sets motors to low current, for when they are not moving. Dwell for XYZA axes is only called after HOMING Dwell for BC axes is called after both HOMING and MOVING axes: String containing the axes to set to low current (eg: 'XYZABC') ''' axes = ''.join(set(axes) & set(AXES) - set(DISABLE_AXES)) dwelling_currents = { ax: self._dwelling_current_settings['now'][ax] for ax in axes if self._active_axes[ax] is True } if dwelling_currents: self._save_current(dwelling_currents, axes_active=False)
[ "def", "dwell_axes", "(", "self", ",", "axes", ")", ":", "axes", "=", "''", ".", "join", "(", "set", "(", "axes", ")", "&", "set", "(", "AXES", ")", "-", "set", "(", "DISABLE_AXES", ")", ")", "dwelling_currents", "=", "{", "ax", ":", "self", ".", "_dwelling_current_settings", "[", "'now'", "]", "[", "ax", "]", "for", "ax", "in", "axes", "if", "self", ".", "_active_axes", "[", "ax", "]", "is", "True", "}", "if", "dwelling_currents", ":", "self", ".", "_save_current", "(", "dwelling_currents", ",", "axes_active", "=", "False", ")" ]
Sets motors to low current, for when they are not moving. Dwell for XYZA axes is only called after HOMING Dwell for BC axes is called after both HOMING and MOVING axes: String containing the axes to set to low current (eg: 'XYZABC')
[ "Sets", "motors", "to", "low", "current", "for", "when", "they", "are", "not", "moving", "." ]
python
train
36.055556
Unidata/MetPy
metpy/calc/kinematics.py
https://github.com/Unidata/MetPy/blob/16f68a94919b9a82dcf9cada2169cf039129e67b/metpy/calc/kinematics.py#L337-L399
def frontogenesis(thta, u, v, dx, dy, dim_order='yx'): r"""Calculate the 2D kinematic frontogenesis of a temperature field. The implementation is a form of the Petterssen Frontogenesis and uses the formula outlined in [Bluestein1993]_ pg.248-253. .. math:: F=\frac{1}{2}\left|\nabla \theta\right|[D cos(2\beta)-\delta] * :math:`F` is 2D kinematic frontogenesis * :math:`\theta` is potential temperature * :math:`D` is the total deformation * :math:`\beta` is the angle between the axis of dilitation and the isentropes * :math:`\delta` is the divergence Parameters ---------- thta : (M, N) ndarray Potential temperature u : (M, N) ndarray x component of the wind v : (M, N) ndarray y component of the wind dx : float or ndarray The grid spacing(s) in the x-direction. If an array, there should be one item less than the size of `u` along the applicable axis. dy : float or ndarray The grid spacing(s) in the y-direction. If an array, there should be one item less than the size of `u` along the applicable axis. Returns ------- (M, N) ndarray 2D Frontogenesis in [temperature units]/m/s Notes ----- If inputs have more than two dimensions, they are assumed to have either leading dimensions of (x, y) or trailing dimensions of (y, x), depending on the value of ``dim_order``. Conversion factor to go from [temperature units]/m/s to [temperature units/100km/3h] :math:`1.08e4*1.e5` """ # Get gradients of potential temperature in both x and y ddy_thta = first_derivative(thta, delta=dy, axis=-2) ddx_thta = first_derivative(thta, delta=dx, axis=-1) # Compute the magnitude of the potential temperature gradient mag_thta = np.sqrt(ddx_thta**2 + ddy_thta**2) # Get the shearing, stretching, and total deformation of the wind field shrd = shearing_deformation(u, v, dx, dy, dim_order=dim_order) strd = stretching_deformation(u, v, dx, dy, dim_order=dim_order) tdef = total_deformation(u, v, dx, dy, dim_order=dim_order) # Get the divergence of the wind field div = divergence(u, v, dx, dy, dim_order=dim_order) # Compute the angle (beta) between the wind field and the gradient of potential temperature psi = 0.5 * np.arctan2(shrd, strd) beta = np.arcsin((-ddx_thta * np.cos(psi) - ddy_thta * np.sin(psi)) / mag_thta) return 0.5 * mag_thta * (tdef * np.cos(2 * beta) - div)
[ "def", "frontogenesis", "(", "thta", ",", "u", ",", "v", ",", "dx", ",", "dy", ",", "dim_order", "=", "'yx'", ")", ":", "# Get gradients of potential temperature in both x and y", "ddy_thta", "=", "first_derivative", "(", "thta", ",", "delta", "=", "dy", ",", "axis", "=", "-", "2", ")", "ddx_thta", "=", "first_derivative", "(", "thta", ",", "delta", "=", "dx", ",", "axis", "=", "-", "1", ")", "# Compute the magnitude of the potential temperature gradient", "mag_thta", "=", "np", ".", "sqrt", "(", "ddx_thta", "**", "2", "+", "ddy_thta", "**", "2", ")", "# Get the shearing, stretching, and total deformation of the wind field", "shrd", "=", "shearing_deformation", "(", "u", ",", "v", ",", "dx", ",", "dy", ",", "dim_order", "=", "dim_order", ")", "strd", "=", "stretching_deformation", "(", "u", ",", "v", ",", "dx", ",", "dy", ",", "dim_order", "=", "dim_order", ")", "tdef", "=", "total_deformation", "(", "u", ",", "v", ",", "dx", ",", "dy", ",", "dim_order", "=", "dim_order", ")", "# Get the divergence of the wind field", "div", "=", "divergence", "(", "u", ",", "v", ",", "dx", ",", "dy", ",", "dim_order", "=", "dim_order", ")", "# Compute the angle (beta) between the wind field and the gradient of potential temperature", "psi", "=", "0.5", "*", "np", ".", "arctan2", "(", "shrd", ",", "strd", ")", "beta", "=", "np", ".", "arcsin", "(", "(", "-", "ddx_thta", "*", "np", ".", "cos", "(", "psi", ")", "-", "ddy_thta", "*", "np", ".", "sin", "(", "psi", ")", ")", "/", "mag_thta", ")", "return", "0.5", "*", "mag_thta", "*", "(", "tdef", "*", "np", ".", "cos", "(", "2", "*", "beta", ")", "-", "div", ")" ]
r"""Calculate the 2D kinematic frontogenesis of a temperature field. The implementation is a form of the Petterssen Frontogenesis and uses the formula outlined in [Bluestein1993]_ pg.248-253. .. math:: F=\frac{1}{2}\left|\nabla \theta\right|[D cos(2\beta)-\delta] * :math:`F` is 2D kinematic frontogenesis * :math:`\theta` is potential temperature * :math:`D` is the total deformation * :math:`\beta` is the angle between the axis of dilitation and the isentropes * :math:`\delta` is the divergence Parameters ---------- thta : (M, N) ndarray Potential temperature u : (M, N) ndarray x component of the wind v : (M, N) ndarray y component of the wind dx : float or ndarray The grid spacing(s) in the x-direction. If an array, there should be one item less than the size of `u` along the applicable axis. dy : float or ndarray The grid spacing(s) in the y-direction. If an array, there should be one item less than the size of `u` along the applicable axis. Returns ------- (M, N) ndarray 2D Frontogenesis in [temperature units]/m/s Notes ----- If inputs have more than two dimensions, they are assumed to have either leading dimensions of (x, y) or trailing dimensions of (y, x), depending on the value of ``dim_order``. Conversion factor to go from [temperature units]/m/s to [temperature units/100km/3h] :math:`1.08e4*1.e5`
[ "r", "Calculate", "the", "2D", "kinematic", "frontogenesis", "of", "a", "temperature", "field", "." ]
python
train
38.857143
ejeschke/ginga
ginga/rv/plugins/Contents.py
https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/rv/plugins/Contents.py#L305-L320
def add_image_info_cb(self, viewer, channel, image_info): """Almost the same as add_image_cb(), except that the image may not be loaded in memory. """ chname = channel.name name = image_info.name self.logger.debug("name=%s" % (name)) # Updates of any extant information try: image = channel.get_loaded_image(name) except KeyError: # images that are not yet loaded will show "N/A" for keywords image = None self.add_image_cb(viewer, chname, image, image_info)
[ "def", "add_image_info_cb", "(", "self", ",", "viewer", ",", "channel", ",", "image_info", ")", ":", "chname", "=", "channel", ".", "name", "name", "=", "image_info", ".", "name", "self", ".", "logger", ".", "debug", "(", "\"name=%s\"", "%", "(", "name", ")", ")", "# Updates of any extant information", "try", ":", "image", "=", "channel", ".", "get_loaded_image", "(", "name", ")", "except", "KeyError", ":", "# images that are not yet loaded will show \"N/A\" for keywords", "image", "=", "None", "self", ".", "add_image_cb", "(", "viewer", ",", "chname", ",", "image", ",", "image_info", ")" ]
Almost the same as add_image_cb(), except that the image may not be loaded in memory.
[ "Almost", "the", "same", "as", "add_image_cb", "()", "except", "that", "the", "image", "may", "not", "be", "loaded", "in", "memory", "." ]
python
train
35.0625
adafruit/Adafruit_Python_PureIO
Adafruit_PureIO/smbus.py
https://github.com/adafruit/Adafruit_Python_PureIO/blob/6f4976d91c52d70b67b28bba75a429b5328a52c1/Adafruit_PureIO/smbus.py#L201-L216
def read_i2c_block_data(self, addr, cmd, length=32): """Perform a read from the specified cmd register of device. Length number of bytes (default of 32) will be read and returned as a bytearray. """ assert self._device is not None, 'Bus must be opened before operations are made against it!' # Build ctypes values to marshall between ioctl and Python. reg = c_uint8(cmd) result = create_string_buffer(length) # Build ioctl request. request = make_i2c_rdwr_data([ (addr, 0, 1, pointer(reg)), # Write cmd register. (addr, I2C_M_RD, length, cast(result, POINTER(c_uint8))) # Read data. ]) # Make ioctl call and return result data. ioctl(self._device.fileno(), I2C_RDWR, request) return bytearray(result.raw)
[ "def", "read_i2c_block_data", "(", "self", ",", "addr", ",", "cmd", ",", "length", "=", "32", ")", ":", "assert", "self", ".", "_device", "is", "not", "None", ",", "'Bus must be opened before operations are made against it!'", "# Build ctypes values to marshall between ioctl and Python.", "reg", "=", "c_uint8", "(", "cmd", ")", "result", "=", "create_string_buffer", "(", "length", ")", "# Build ioctl request.", "request", "=", "make_i2c_rdwr_data", "(", "[", "(", "addr", ",", "0", ",", "1", ",", "pointer", "(", "reg", ")", ")", ",", "# Write cmd register.", "(", "addr", ",", "I2C_M_RD", ",", "length", ",", "cast", "(", "result", ",", "POINTER", "(", "c_uint8", ")", ")", ")", "# Read data.", "]", ")", "# Make ioctl call and return result data.", "ioctl", "(", "self", ".", "_device", ".", "fileno", "(", ")", ",", "I2C_RDWR", ",", "request", ")", "return", "bytearray", "(", "result", ".", "raw", ")" ]
Perform a read from the specified cmd register of device. Length number of bytes (default of 32) will be read and returned as a bytearray.
[ "Perform", "a", "read", "from", "the", "specified", "cmd", "register", "of", "device", ".", "Length", "number", "of", "bytes", "(", "default", "of", "32", ")", "will", "be", "read", "and", "returned", "as", "a", "bytearray", "." ]
python
test
52
vilmibm/done
parsedatetime/parsedatetime_consts.py
https://github.com/vilmibm/done/blob/7e5b60d2900ceddefa49de352a19b794199b51a8/parsedatetime/parsedatetime_consts.py#L601-L707
def _initSymbols(ptc): """ Helper function to initialize the single character constants and other symbols needed. """ ptc.timeSep = [ u':' ] ptc.dateSep = [ u'/' ] ptc.meridian = [ u'AM', u'PM' ] ptc.usesMeridian = True ptc.uses24 = False if pyicu and ptc.usePyICU: am = u'' pm = u'' ts = '' # ICU doesn't seem to provide directly the # date or time seperator - so we have to # figure it out o = ptc.icu_tf['short'] s = ptc.timeFormats['short'] ptc.usesMeridian = u'a' in s ptc.uses24 = u'H' in s # '11:45 AM' or '11:45' s = o.format(datetime.datetime(2003, 10, 30, 11, 45)) # ': AM' or ':' s = s.replace('11', '').replace('45', '') if len(s) > 0: ts = s[0] if ptc.usesMeridian: # '23:45 AM' or '23:45' am = s[1:].strip() s = o.format(datetime.datetime(2003, 10, 30, 23, 45)) if ptc.uses24: s = s.replace('23', '') else: s = s.replace('11', '') # 'PM' or '' pm = s.replace('45', '').replace(ts, '').strip() ptc.timeSep = [ ts ] ptc.meridian = [ am, pm ] o = ptc.icu_df['short'] s = o.format(datetime.datetime(2003, 10, 30, 11, 45)) s = s.replace('10', '').replace('30', '').replace('03', '').replace('2003', '') if len(s) > 0: ds = s[0] else: ds = '/' ptc.dateSep = [ ds ] s = ptc.dateFormats['short'] l = s.lower().split(ds) dp_order = [] for s in l: if len(s) > 0: dp_order.append(s[:1]) ptc.dp_order = dp_order else: ptc.timeSep = ptc.locale.timeSep ptc.dateSep = ptc.locale.dateSep ptc.meridian = ptc.locale.meridian ptc.usesMeridian = ptc.locale.usesMeridian ptc.uses24 = ptc.locale.uses24 ptc.dp_order = ptc.locale.dp_order # build am and pm lists to contain # original case, lowercase and first-char # versions of the meridian text if len(ptc.meridian) > 0: am = ptc.meridian[0] ptc.am = [ am ] if len(am) > 0: ptc.am.append(am[0]) am = am.lower() ptc.am.append(am) ptc.am.append(am[0]) else: am = '' ptc.am = [ '', '' ] if len(ptc.meridian) > 1: pm = ptc.meridian[1] ptc.pm = [ pm ] if len(pm) > 0: ptc.pm.append(pm[0]) pm = pm.lower() ptc.pm.append(pm) ptc.pm.append(pm[0]) else: pm = '' ptc.pm = [ '', '' ]
[ "def", "_initSymbols", "(", "ptc", ")", ":", "ptc", ".", "timeSep", "=", "[", "u':'", "]", "ptc", ".", "dateSep", "=", "[", "u'/'", "]", "ptc", ".", "meridian", "=", "[", "u'AM'", ",", "u'PM'", "]", "ptc", ".", "usesMeridian", "=", "True", "ptc", ".", "uses24", "=", "False", "if", "pyicu", "and", "ptc", ".", "usePyICU", ":", "am", "=", "u''", "pm", "=", "u''", "ts", "=", "''", "# ICU doesn't seem to provide directly the", "# date or time seperator - so we have to", "# figure it out", "o", "=", "ptc", ".", "icu_tf", "[", "'short'", "]", "s", "=", "ptc", ".", "timeFormats", "[", "'short'", "]", "ptc", ".", "usesMeridian", "=", "u'a'", "in", "s", "ptc", ".", "uses24", "=", "u'H'", "in", "s", "# '11:45 AM' or '11:45'", "s", "=", "o", ".", "format", "(", "datetime", ".", "datetime", "(", "2003", ",", "10", ",", "30", ",", "11", ",", "45", ")", ")", "# ': AM' or ':'", "s", "=", "s", ".", "replace", "(", "'11'", ",", "''", ")", ".", "replace", "(", "'45'", ",", "''", ")", "if", "len", "(", "s", ")", ">", "0", ":", "ts", "=", "s", "[", "0", "]", "if", "ptc", ".", "usesMeridian", ":", "# '23:45 AM' or '23:45'", "am", "=", "s", "[", "1", ":", "]", ".", "strip", "(", ")", "s", "=", "o", ".", "format", "(", "datetime", ".", "datetime", "(", "2003", ",", "10", ",", "30", ",", "23", ",", "45", ")", ")", "if", "ptc", ".", "uses24", ":", "s", "=", "s", ".", "replace", "(", "'23'", ",", "''", ")", "else", ":", "s", "=", "s", ".", "replace", "(", "'11'", ",", "''", ")", "# 'PM' or ''", "pm", "=", "s", ".", "replace", "(", "'45'", ",", "''", ")", ".", "replace", "(", "ts", ",", "''", ")", ".", "strip", "(", ")", "ptc", ".", "timeSep", "=", "[", "ts", "]", "ptc", ".", "meridian", "=", "[", "am", ",", "pm", "]", "o", "=", "ptc", ".", "icu_df", "[", "'short'", "]", "s", "=", "o", ".", "format", "(", "datetime", ".", "datetime", "(", "2003", ",", "10", ",", "30", ",", "11", ",", "45", ")", ")", "s", "=", "s", ".", "replace", "(", "'10'", ",", "''", ")", ".", "replace", "(", "'30'", ",", "''", ")", ".", "replace", "(", "'03'", ",", "''", ")", ".", "replace", "(", "'2003'", ",", "''", ")", "if", "len", "(", "s", ")", ">", "0", ":", "ds", "=", "s", "[", "0", "]", "else", ":", "ds", "=", "'/'", "ptc", ".", "dateSep", "=", "[", "ds", "]", "s", "=", "ptc", ".", "dateFormats", "[", "'short'", "]", "l", "=", "s", ".", "lower", "(", ")", ".", "split", "(", "ds", ")", "dp_order", "=", "[", "]", "for", "s", "in", "l", ":", "if", "len", "(", "s", ")", ">", "0", ":", "dp_order", ".", "append", "(", "s", "[", ":", "1", "]", ")", "ptc", ".", "dp_order", "=", "dp_order", "else", ":", "ptc", ".", "timeSep", "=", "ptc", ".", "locale", ".", "timeSep", "ptc", ".", "dateSep", "=", "ptc", ".", "locale", ".", "dateSep", "ptc", ".", "meridian", "=", "ptc", ".", "locale", ".", "meridian", "ptc", ".", "usesMeridian", "=", "ptc", ".", "locale", ".", "usesMeridian", "ptc", ".", "uses24", "=", "ptc", ".", "locale", ".", "uses24", "ptc", ".", "dp_order", "=", "ptc", ".", "locale", ".", "dp_order", "# build am and pm lists to contain", "# original case, lowercase and first-char", "# versions of the meridian text", "if", "len", "(", "ptc", ".", "meridian", ")", ">", "0", ":", "am", "=", "ptc", ".", "meridian", "[", "0", "]", "ptc", ".", "am", "=", "[", "am", "]", "if", "len", "(", "am", ")", ">", "0", ":", "ptc", ".", "am", ".", "append", "(", "am", "[", "0", "]", ")", "am", "=", "am", ".", "lower", "(", ")", "ptc", ".", "am", ".", "append", "(", "am", ")", "ptc", ".", "am", ".", "append", "(", "am", "[", "0", "]", ")", "else", ":", "am", "=", "''", "ptc", ".", "am", "=", "[", "''", ",", "''", "]", "if", "len", "(", "ptc", ".", "meridian", ")", ">", "1", ":", "pm", "=", "ptc", ".", "meridian", "[", "1", "]", "ptc", ".", "pm", "=", "[", "pm", "]", "if", "len", "(", "pm", ")", ">", "0", ":", "ptc", ".", "pm", ".", "append", "(", "pm", "[", "0", "]", ")", "pm", "=", "pm", ".", "lower", "(", ")", "ptc", ".", "pm", ".", "append", "(", "pm", ")", "ptc", ".", "pm", ".", "append", "(", "pm", "[", "0", "]", ")", "else", ":", "pm", "=", "''", "ptc", ".", "pm", "=", "[", "''", ",", "''", "]" ]
Helper function to initialize the single character constants and other symbols needed.
[ "Helper", "function", "to", "initialize", "the", "single", "character", "constants", "and", "other", "symbols", "needed", "." ]
python
train
25.457944
gabstopper/smc-python
smc/routing/bgp.py
https://github.com/gabstopper/smc-python/blob/e027b8a5dcfaf884eada32d113d41c1e56b32457/smc/routing/bgp.py#L393-L420
def create(cls, name, port=179, external_distance=20, internal_distance=200, local_distance=200, subnet_distance=None): """ Create a custom BGP Profile :param str name: name of profile :param int port: port for BGP process :param int external_distance: external administrative distance; (1-255) :param int internal_distance: internal administrative distance (1-255) :param int local_distance: local administrative distance (aggregation) (1-255) :param list subnet_distance: configure specific subnet's with respective distances :type tuple subnet_distance: (subnet element(Network), distance(int)) :raises CreateElementFailed: reason for failure :return: instance with meta :rtype: BGPProfile """ json = {'name': name, 'external': external_distance, 'internal': internal_distance, 'local': local_distance, 'port': port} if subnet_distance: d = [{'distance': distance, 'subnet': subnet.href} for subnet, distance in subnet_distance] json.update(distance_entry=d) return ElementCreator(cls, json)
[ "def", "create", "(", "cls", ",", "name", ",", "port", "=", "179", ",", "external_distance", "=", "20", ",", "internal_distance", "=", "200", ",", "local_distance", "=", "200", ",", "subnet_distance", "=", "None", ")", ":", "json", "=", "{", "'name'", ":", "name", ",", "'external'", ":", "external_distance", ",", "'internal'", ":", "internal_distance", ",", "'local'", ":", "local_distance", ",", "'port'", ":", "port", "}", "if", "subnet_distance", ":", "d", "=", "[", "{", "'distance'", ":", "distance", ",", "'subnet'", ":", "subnet", ".", "href", "}", "for", "subnet", ",", "distance", "in", "subnet_distance", "]", "json", ".", "update", "(", "distance_entry", "=", "d", ")", "return", "ElementCreator", "(", "cls", ",", "json", ")" ]
Create a custom BGP Profile :param str name: name of profile :param int port: port for BGP process :param int external_distance: external administrative distance; (1-255) :param int internal_distance: internal administrative distance (1-255) :param int local_distance: local administrative distance (aggregation) (1-255) :param list subnet_distance: configure specific subnet's with respective distances :type tuple subnet_distance: (subnet element(Network), distance(int)) :raises CreateElementFailed: reason for failure :return: instance with meta :rtype: BGPProfile
[ "Create", "a", "custom", "BGP", "Profile" ]
python
train
43.5
twisted/txacme
src/txacme/client.py
https://github.com/twisted/txacme/blob/9478381cc63c6d53d14bf8db8407c923f472989a/src/txacme/client.py#L807-L829
def _add_nonce(self, response): """ Store a nonce from a response we received. :param twisted.web.iweb.IResponse response: The HTTP response. :return: The response, unmodified. """ nonce = response.headers.getRawHeaders( REPLAY_NONCE_HEADER, [None])[0] with LOG_JWS_ADD_NONCE(raw_nonce=nonce) as action: if nonce is None: raise errors.MissingNonce(response) else: try: decoded_nonce = Header._fields['nonce'].decode( nonce.decode('ascii') ) action.add_success_fields(nonce=decoded_nonce) except DeserializationError as error: raise errors.BadNonce(nonce, error) self._nonces.add(decoded_nonce) return response
[ "def", "_add_nonce", "(", "self", ",", "response", ")", ":", "nonce", "=", "response", ".", "headers", ".", "getRawHeaders", "(", "REPLAY_NONCE_HEADER", ",", "[", "None", "]", ")", "[", "0", "]", "with", "LOG_JWS_ADD_NONCE", "(", "raw_nonce", "=", "nonce", ")", "as", "action", ":", "if", "nonce", "is", "None", ":", "raise", "errors", ".", "MissingNonce", "(", "response", ")", "else", ":", "try", ":", "decoded_nonce", "=", "Header", ".", "_fields", "[", "'nonce'", "]", ".", "decode", "(", "nonce", ".", "decode", "(", "'ascii'", ")", ")", "action", ".", "add_success_fields", "(", "nonce", "=", "decoded_nonce", ")", "except", "DeserializationError", "as", "error", ":", "raise", "errors", ".", "BadNonce", "(", "nonce", ",", "error", ")", "self", ".", "_nonces", ".", "add", "(", "decoded_nonce", ")", "return", "response" ]
Store a nonce from a response we received. :param twisted.web.iweb.IResponse response: The HTTP response. :return: The response, unmodified.
[ "Store", "a", "nonce", "from", "a", "response", "we", "received", "." ]
python
train
37.608696
explosion/spaCy
examples/training/train_textcat.py
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/examples/training/train_textcat.py#L120-L129
def load_data(limit=0, split=0.8): """Load data from the IMDB dataset.""" # Partition off part of the train data for evaluation train_data, _ = thinc.extra.datasets.imdb() random.shuffle(train_data) train_data = train_data[-limit:] texts, labels = zip(*train_data) cats = [{"POSITIVE": bool(y), "NEGATIVE": not bool(y)} for y in labels] split = int(len(train_data) * split) return (texts[:split], cats[:split]), (texts[split:], cats[split:])
[ "def", "load_data", "(", "limit", "=", "0", ",", "split", "=", "0.8", ")", ":", "# Partition off part of the train data for evaluation", "train_data", ",", "_", "=", "thinc", ".", "extra", ".", "datasets", ".", "imdb", "(", ")", "random", ".", "shuffle", "(", "train_data", ")", "train_data", "=", "train_data", "[", "-", "limit", ":", "]", "texts", ",", "labels", "=", "zip", "(", "*", "train_data", ")", "cats", "=", "[", "{", "\"POSITIVE\"", ":", "bool", "(", "y", ")", ",", "\"NEGATIVE\"", ":", "not", "bool", "(", "y", ")", "}", "for", "y", "in", "labels", "]", "split", "=", "int", "(", "len", "(", "train_data", ")", "*", "split", ")", "return", "(", "texts", "[", ":", "split", "]", ",", "cats", "[", ":", "split", "]", ")", ",", "(", "texts", "[", "split", ":", "]", ",", "cats", "[", "split", ":", "]", ")" ]
Load data from the IMDB dataset.
[ "Load", "data", "from", "the", "IMDB", "dataset", "." ]
python
train
46.8
log2timeline/dfvfs
dfvfs/vfs/file_entry.py
https://github.com/log2timeline/dfvfs/blob/2b3ccd115f9901d89f383397d4a1376a873c83c4/dfvfs/vfs/file_entry.py#L418-L426
def GetStat(self): """Retrieves information about the file entry. Returns: VFSStat: a stat object or None if not available. """ if self._stat_object is None: self._stat_object = self._GetStat() return self._stat_object
[ "def", "GetStat", "(", "self", ")", ":", "if", "self", ".", "_stat_object", "is", "None", ":", "self", ".", "_stat_object", "=", "self", ".", "_GetStat", "(", ")", "return", "self", ".", "_stat_object" ]
Retrieves information about the file entry. Returns: VFSStat: a stat object or None if not available.
[ "Retrieves", "information", "about", "the", "file", "entry", "." ]
python
train
27
fastai/fastai
docs_src/nbval/cover.py
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/docs_src/nbval/cover.py#L98-L106
def get_cov(config): """Returns the coverage object of pytest-cov.""" # Check with hasplugin to avoid getplugin exception in older pytest. if config.pluginmanager.hasplugin('_cov'): plugin = config.pluginmanager.getplugin('_cov') if plugin.cov_controller: return plugin.cov_controller.cov return None
[ "def", "get_cov", "(", "config", ")", ":", "# Check with hasplugin to avoid getplugin exception in older pytest.", "if", "config", ".", "pluginmanager", ".", "hasplugin", "(", "'_cov'", ")", ":", "plugin", "=", "config", ".", "pluginmanager", ".", "getplugin", "(", "'_cov'", ")", "if", "plugin", ".", "cov_controller", ":", "return", "plugin", ".", "cov_controller", ".", "cov", "return", "None" ]
Returns the coverage object of pytest-cov.
[ "Returns", "the", "coverage", "object", "of", "pytest", "-", "cov", "." ]
python
train
37.444444
trailofbits/manticore
manticore/ethereum/detectors.py
https://github.com/trailofbits/manticore/blob/54c5a15b1119c523ae54c09972413e8b97f11629/manticore/ethereum/detectors.py#L103-L108
def _get_location(self, state, hash_id): """ Get previously saved location A location is composed of: address, pc, finding, at_init, condition """ return state.context.setdefault('{:s}.locations'.format(self.name), {})[hash_id]
[ "def", "_get_location", "(", "self", ",", "state", ",", "hash_id", ")", ":", "return", "state", ".", "context", ".", "setdefault", "(", "'{:s}.locations'", ".", "format", "(", "self", ".", "name", ")", ",", "{", "}", ")", "[", "hash_id", "]" ]
Get previously saved location A location is composed of: address, pc, finding, at_init, condition
[ "Get", "previously", "saved", "location", "A", "location", "is", "composed", "of", ":", "address", "pc", "finding", "at_init", "condition" ]
python
valid
43.666667
kennedyshead/aioasuswrt
aioasuswrt/connection.py
https://github.com/kennedyshead/aioasuswrt/blob/0c4336433727abbb7b324ee29e4c5382be9aaa2b/aioasuswrt/connection.py#L120-L142
async def async_connect(self): """Connect to the ASUS-WRT Telnet server.""" self._reader, self._writer = await asyncio.open_connection( self._host, self._port) with (await self._io_lock): try: await asyncio.wait_for(self._reader.readuntil(b'login: '), 9) except asyncio.streams.IncompleteReadError: _LOGGER.error( "Unable to read from router on %s:%s" % ( self._host, self._port)) return except TimeoutError: _LOGGER.error("Host timeout.") self._writer.write((self._username + '\n').encode('ascii')) await self._reader.readuntil(b'Password: ') self._writer.write((self._password + '\n').encode('ascii')) self._prompt_string = (await self._reader.readuntil( b'#')).split(b'\n')[-1] self._connected = True
[ "async", "def", "async_connect", "(", "self", ")", ":", "self", ".", "_reader", ",", "self", ".", "_writer", "=", "await", "asyncio", ".", "open_connection", "(", "self", ".", "_host", ",", "self", ".", "_port", ")", "with", "(", "await", "self", ".", "_io_lock", ")", ":", "try", ":", "await", "asyncio", ".", "wait_for", "(", "self", ".", "_reader", ".", "readuntil", "(", "b'login: '", ")", ",", "9", ")", "except", "asyncio", ".", "streams", ".", "IncompleteReadError", ":", "_LOGGER", ".", "error", "(", "\"Unable to read from router on %s:%s\"", "%", "(", "self", ".", "_host", ",", "self", ".", "_port", ")", ")", "return", "except", "TimeoutError", ":", "_LOGGER", ".", "error", "(", "\"Host timeout.\"", ")", "self", ".", "_writer", ".", "write", "(", "(", "self", ".", "_username", "+", "'\\n'", ")", ".", "encode", "(", "'ascii'", ")", ")", "await", "self", ".", "_reader", ".", "readuntil", "(", "b'Password: '", ")", "self", ".", "_writer", ".", "write", "(", "(", "self", ".", "_password", "+", "'\\n'", ")", ".", "encode", "(", "'ascii'", ")", ")", "self", ".", "_prompt_string", "=", "(", "await", "self", ".", "_reader", ".", "readuntil", "(", "b'#'", ")", ")", ".", "split", "(", "b'\\n'", ")", "[", "-", "1", "]", "self", ".", "_connected", "=", "True" ]
Connect to the ASUS-WRT Telnet server.
[ "Connect", "to", "the", "ASUS", "-", "WRT", "Telnet", "server", "." ]
python
train
40.695652
psd-tools/psd-tools
src/psd_tools/api/composer.py
https://github.com/psd-tools/psd-tools/blob/4952b57bcf1cf2c1f16fd9d6d51d4fa0b53bce4e/src/psd_tools/api/composer.py#L307-L332
def _generate_symbol(path, width, height, command='C'): """Sequence generator for SVG path.""" if len(path) == 0: return # Initial point. yield 'M' yield path[0].anchor[1] * width yield path[0].anchor[0] * height yield command # Closed path or open path points = (zip(path, path[1:] + path[0:1]) if path.is_closed() else zip(path, path[1:])) # Rest of the points. for p1, p2 in points: yield p1.leaving[1] * width yield p1.leaving[0] * height yield p2.preceding[1] * width yield p2.preceding[0] * height yield p2.anchor[1] * width yield p2.anchor[0] * height if path.is_closed(): yield 'Z'
[ "def", "_generate_symbol", "(", "path", ",", "width", ",", "height", ",", "command", "=", "'C'", ")", ":", "if", "len", "(", "path", ")", "==", "0", ":", "return", "# Initial point.", "yield", "'M'", "yield", "path", "[", "0", "]", ".", "anchor", "[", "1", "]", "*", "width", "yield", "path", "[", "0", "]", ".", "anchor", "[", "0", "]", "*", "height", "yield", "command", "# Closed path or open path", "points", "=", "(", "zip", "(", "path", ",", "path", "[", "1", ":", "]", "+", "path", "[", "0", ":", "1", "]", ")", "if", "path", ".", "is_closed", "(", ")", "else", "zip", "(", "path", ",", "path", "[", "1", ":", "]", ")", ")", "# Rest of the points.", "for", "p1", ",", "p2", "in", "points", ":", "yield", "p1", ".", "leaving", "[", "1", "]", "*", "width", "yield", "p1", ".", "leaving", "[", "0", "]", "*", "height", "yield", "p2", ".", "preceding", "[", "1", "]", "*", "width", "yield", "p2", ".", "preceding", "[", "0", "]", "*", "height", "yield", "p2", ".", "anchor", "[", "1", "]", "*", "width", "yield", "p2", ".", "anchor", "[", "0", "]", "*", "height", "if", "path", ".", "is_closed", "(", ")", ":", "yield", "'Z'" ]
Sequence generator for SVG path.
[ "Sequence", "generator", "for", "SVG", "path", "." ]
python
train
26.692308
googleapis/google-cloud-python
storage/google/cloud/storage/batch.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/storage/google/cloud/storage/batch.py#L191-L218
def _prepare_batch_request(self): """Prepares headers and body for a batch request. :rtype: tuple (dict, str) :returns: The pair of headers and body of the batch request to be sent. :raises: :class:`ValueError` if no requests have been deferred. """ if len(self._requests) == 0: raise ValueError("No deferred requests") multi = MIMEMultipart() for method, uri, headers, body in self._requests: subrequest = MIMEApplicationHTTP(method, uri, headers, body) multi.attach(subrequest) # The `email` package expects to deal with "native" strings if six.PY3: # pragma: NO COVER Python3 buf = io.StringIO() else: buf = io.BytesIO() generator = Generator(buf, False, 0) generator.flatten(multi) payload = buf.getvalue() # Strip off redundant header text _, body = payload.split("\n\n", 1) return dict(multi._headers), body
[ "def", "_prepare_batch_request", "(", "self", ")", ":", "if", "len", "(", "self", ".", "_requests", ")", "==", "0", ":", "raise", "ValueError", "(", "\"No deferred requests\"", ")", "multi", "=", "MIMEMultipart", "(", ")", "for", "method", ",", "uri", ",", "headers", ",", "body", "in", "self", ".", "_requests", ":", "subrequest", "=", "MIMEApplicationHTTP", "(", "method", ",", "uri", ",", "headers", ",", "body", ")", "multi", ".", "attach", "(", "subrequest", ")", "# The `email` package expects to deal with \"native\" strings", "if", "six", ".", "PY3", ":", "# pragma: NO COVER Python3", "buf", "=", "io", ".", "StringIO", "(", ")", "else", ":", "buf", "=", "io", ".", "BytesIO", "(", ")", "generator", "=", "Generator", "(", "buf", ",", "False", ",", "0", ")", "generator", ".", "flatten", "(", "multi", ")", "payload", "=", "buf", ".", "getvalue", "(", ")", "# Strip off redundant header text", "_", ",", "body", "=", "payload", ".", "split", "(", "\"\\n\\n\"", ",", "1", ")", "return", "dict", "(", "multi", ".", "_headers", ")", ",", "body" ]
Prepares headers and body for a batch request. :rtype: tuple (dict, str) :returns: The pair of headers and body of the batch request to be sent. :raises: :class:`ValueError` if no requests have been deferred.
[ "Prepares", "headers", "and", "body", "for", "a", "batch", "request", "." ]
python
train
35.321429
mrstephenneal/mysql-toolkit
mysql/toolkit/commands/dump.py
https://github.com/mrstephenneal/mysql-toolkit/blob/6964f718f4b72eb30f2259adfcfaf3090526c53d/mysql/toolkit/commands/dump.py#L42-L83
def dump_commands(commands, directory=None, sub_dir=None): """ Dump SQL commands to .sql files. :param commands: List of SQL commands :param directory: Directory to dump commands to :param sub_dir: Sub directory :return: Directory failed commands were dumped to """ print('\t' + str(len(commands)), 'failed commands') # Create dump_dir directory if directory and os.path.isfile(directory): dump_dir = set_dump_directory(os.path.dirname(directory), sub_dir) return_dir = dump_dir elif directory: dump_dir = set_dump_directory(directory, sub_dir) return_dir = dump_dir else: dump_dir = TemporaryDirectory().name return_dir = TemporaryDirectory() # Create list of (path, content) tuples command_filepath = [(fail, os.path.join(dump_dir, str(count) + '.sql')) for count, fail in enumerate(commands)] # Dump failed commands to text file in the same directory as the commands # Utilize's multiprocessing module if it is available timer = Timer() if MULTIPROCESS: pool = Pool(cpu_count()) pool.map(write_text_tup, command_filepath) pool.close() print('\tDumped ', len(command_filepath), 'commands\n\t\tTime : {0}'.format(timer.end), '\n\t\tMethod : (multiprocessing)\n\t\tDirectory : {0}'.format(dump_dir)) else: for tup in command_filepath: write_text_tup(tup) print('\tDumped ', len(command_filepath), 'commands\n\t\tTime : {0}'.format(timer.end), '\n\t\tMethod : (sequential)\n\t\tDirectory : {0}'.format(dump_dir)) # Return base directory of dumped commands return return_dir
[ "def", "dump_commands", "(", "commands", ",", "directory", "=", "None", ",", "sub_dir", "=", "None", ")", ":", "print", "(", "'\\t'", "+", "str", "(", "len", "(", "commands", ")", ")", ",", "'failed commands'", ")", "# Create dump_dir directory", "if", "directory", "and", "os", ".", "path", ".", "isfile", "(", "directory", ")", ":", "dump_dir", "=", "set_dump_directory", "(", "os", ".", "path", ".", "dirname", "(", "directory", ")", ",", "sub_dir", ")", "return_dir", "=", "dump_dir", "elif", "directory", ":", "dump_dir", "=", "set_dump_directory", "(", "directory", ",", "sub_dir", ")", "return_dir", "=", "dump_dir", "else", ":", "dump_dir", "=", "TemporaryDirectory", "(", ")", ".", "name", "return_dir", "=", "TemporaryDirectory", "(", ")", "# Create list of (path, content) tuples", "command_filepath", "=", "[", "(", "fail", ",", "os", ".", "path", ".", "join", "(", "dump_dir", ",", "str", "(", "count", ")", "+", "'.sql'", ")", ")", "for", "count", ",", "fail", "in", "enumerate", "(", "commands", ")", "]", "# Dump failed commands to text file in the same directory as the commands", "# Utilize's multiprocessing module if it is available", "timer", "=", "Timer", "(", ")", "if", "MULTIPROCESS", ":", "pool", "=", "Pool", "(", "cpu_count", "(", ")", ")", "pool", ".", "map", "(", "write_text_tup", ",", "command_filepath", ")", "pool", ".", "close", "(", ")", "print", "(", "'\\tDumped '", ",", "len", "(", "command_filepath", ")", ",", "'commands\\n\\t\\tTime : {0}'", ".", "format", "(", "timer", ".", "end", ")", ",", "'\\n\\t\\tMethod : (multiprocessing)\\n\\t\\tDirectory : {0}'", ".", "format", "(", "dump_dir", ")", ")", "else", ":", "for", "tup", "in", "command_filepath", ":", "write_text_tup", "(", "tup", ")", "print", "(", "'\\tDumped '", ",", "len", "(", "command_filepath", ")", ",", "'commands\\n\\t\\tTime : {0}'", ".", "format", "(", "timer", ".", "end", ")", ",", "'\\n\\t\\tMethod : (sequential)\\n\\t\\tDirectory : {0}'", ".", "format", "(", "dump_dir", ")", ")", "# Return base directory of dumped commands", "return", "return_dir" ]
Dump SQL commands to .sql files. :param commands: List of SQL commands :param directory: Directory to dump commands to :param sub_dir: Sub directory :return: Directory failed commands were dumped to
[ "Dump", "SQL", "commands", "to", ".", "sql", "files", "." ]
python
train
39.809524
trailofbits/manticore
manticore/core/smtlib/visitors.py
https://github.com/trailofbits/manticore/blob/54c5a15b1119c523ae54c09972413e8b97f11629/manticore/core/smtlib/visitors.py#L580-L601
def to_constant(expression): """ Iff the expression can be simplified to a Constant get the actual concrete value. This discards/ignore any taint """ value = simplify(expression) if isinstance(value, Expression) and value.taint: raise ValueError("Can not simplify tainted values to constant") if isinstance(value, Constant): return value.value elif isinstance(value, Array): if expression.index_max: ba = bytearray() for i in range(expression.index_max): value_i = simplify(value[i]) if not isinstance(value_i, Constant): break ba.append(value_i.value) else: return bytes(ba) return expression return value
[ "def", "to_constant", "(", "expression", ")", ":", "value", "=", "simplify", "(", "expression", ")", "if", "isinstance", "(", "value", ",", "Expression", ")", "and", "value", ".", "taint", ":", "raise", "ValueError", "(", "\"Can not simplify tainted values to constant\"", ")", "if", "isinstance", "(", "value", ",", "Constant", ")", ":", "return", "value", ".", "value", "elif", "isinstance", "(", "value", ",", "Array", ")", ":", "if", "expression", ".", "index_max", ":", "ba", "=", "bytearray", "(", ")", "for", "i", "in", "range", "(", "expression", ".", "index_max", ")", ":", "value_i", "=", "simplify", "(", "value", "[", "i", "]", ")", "if", "not", "isinstance", "(", "value_i", ",", "Constant", ")", ":", "break", "ba", ".", "append", "(", "value_i", ".", "value", ")", "else", ":", "return", "bytes", "(", "ba", ")", "return", "expression", "return", "value" ]
Iff the expression can be simplified to a Constant get the actual concrete value. This discards/ignore any taint
[ "Iff", "the", "expression", "can", "be", "simplified", "to", "a", "Constant", "get", "the", "actual", "concrete", "value", ".", "This", "discards", "/", "ignore", "any", "taint" ]
python
valid
35.681818
robotpy/pyfrc
lib/pyfrc/sim/field/user_renderer.py
https://github.com/robotpy/pyfrc/blob/7672ea3f17c8d4b702a9f18a7372d95feee7e37d/lib/pyfrc/sim/field/user_renderer.py#L41-L91
def draw_pathfinder_trajectory( self, trajectory, color="#ff0000", offset=None, scale=(1, 1), show_dt=False, dt_offset=0.0, **kwargs ): """ Special helper function for drawing trajectories generated by robotpy-pathfinder :param trajectory: A list of pathfinder segment objects :param offset: If specified, should be x/y tuple to add to the path relative to the robot coordinates :param scale: Multiply all points by this (x,y) tuple :param show_dt: draw text every N seconds along path, or False :param dt_offset: add this to each dt shown :param kwargs: Keyword options to pass to tkinter.create_line """ # pathfinder x/y coordinates are switched pts = [(pt.x, -pt.y) for pt in trajectory] robot_coordinates = offset if offset else True self.draw_line( pts, color=color, robot_coordinates=robot_coordinates, relative_to_first=True, arrow=True, scale=scale, ) if show_dt: dt = trajectory[0].dt def _defer_text(): # defer this execution to save effort when drawing px_per_ft = UserRenderer._global_ui.field.px_per_ft line = self._elements[-1] for i in range(0, len(pts), int(show_dt / dt)): text = "t=%.2f" % (dt_offset + i * dt,) el = TextElement( text, line.pts[i], 0, "#000000", int(px_per_ft * 0.5) ) UserRenderer._global_ui.field.add_moving_element(el) self._elements.append(el) self._run(_defer_text)
[ "def", "draw_pathfinder_trajectory", "(", "self", ",", "trajectory", ",", "color", "=", "\"#ff0000\"", ",", "offset", "=", "None", ",", "scale", "=", "(", "1", ",", "1", ")", ",", "show_dt", "=", "False", ",", "dt_offset", "=", "0.0", ",", "*", "*", "kwargs", ")", ":", "# pathfinder x/y coordinates are switched", "pts", "=", "[", "(", "pt", ".", "x", ",", "-", "pt", ".", "y", ")", "for", "pt", "in", "trajectory", "]", "robot_coordinates", "=", "offset", "if", "offset", "else", "True", "self", ".", "draw_line", "(", "pts", ",", "color", "=", "color", ",", "robot_coordinates", "=", "robot_coordinates", ",", "relative_to_first", "=", "True", ",", "arrow", "=", "True", ",", "scale", "=", "scale", ",", ")", "if", "show_dt", ":", "dt", "=", "trajectory", "[", "0", "]", ".", "dt", "def", "_defer_text", "(", ")", ":", "# defer this execution to save effort when drawing", "px_per_ft", "=", "UserRenderer", ".", "_global_ui", ".", "field", ".", "px_per_ft", "line", "=", "self", ".", "_elements", "[", "-", "1", "]", "for", "i", "in", "range", "(", "0", ",", "len", "(", "pts", ")", ",", "int", "(", "show_dt", "/", "dt", ")", ")", ":", "text", "=", "\"t=%.2f\"", "%", "(", "dt_offset", "+", "i", "*", "dt", ",", ")", "el", "=", "TextElement", "(", "text", ",", "line", ".", "pts", "[", "i", "]", ",", "0", ",", "\"#000000\"", ",", "int", "(", "px_per_ft", "*", "0.5", ")", ")", "UserRenderer", ".", "_global_ui", ".", "field", ".", "add_moving_element", "(", "el", ")", "self", ".", "_elements", ".", "append", "(", "el", ")", "self", ".", "_run", "(", "_defer_text", ")" ]
Special helper function for drawing trajectories generated by robotpy-pathfinder :param trajectory: A list of pathfinder segment objects :param offset: If specified, should be x/y tuple to add to the path relative to the robot coordinates :param scale: Multiply all points by this (x,y) tuple :param show_dt: draw text every N seconds along path, or False :param dt_offset: add this to each dt shown :param kwargs: Keyword options to pass to tkinter.create_line
[ "Special", "helper", "function", "for", "drawing", "trajectories", "generated", "by", "robotpy", "-", "pathfinder", ":", "param", "trajectory", ":", "A", "list", "of", "pathfinder", "segment", "objects", ":", "param", "offset", ":", "If", "specified", "should", "be", "x", "/", "y", "tuple", "to", "add", "to", "the", "path", "relative", "to", "the", "robot", "coordinates", ":", "param", "scale", ":", "Multiply", "all", "points", "by", "this", "(", "x", "y", ")", "tuple", ":", "param", "show_dt", ":", "draw", "text", "every", "N", "seconds", "along", "path", "or", "False", ":", "param", "dt_offset", ":", "add", "this", "to", "each", "dt", "shown", ":", "param", "kwargs", ":", "Keyword", "options", "to", "pass", "to", "tkinter", ".", "create_line" ]
python
train
35.745098
saltstack/salt
salt/modules/nspawn.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/nspawn.py#L190-L208
def _bootstrap_fedora(name, **kwargs): ''' Bootstrap a Fedora container ''' dst = _make_container_root(name) if not kwargs.get('version', False): if __grains__['os'].lower() == 'fedora': version = __grains__['osrelease'] else: version = '21' else: version = '21' cmd = ('yum -y --releasever={0} --nogpg --installroot={1} ' '--disablerepo="*" --enablerepo=fedora install systemd passwd yum ' 'fedora-release vim-minimal'.format(version, dst)) ret = __salt__['cmd.run_all'](cmd, python_shell=False) if ret['retcode'] != 0: _build_failed(dst, name) return ret
[ "def", "_bootstrap_fedora", "(", "name", ",", "*", "*", "kwargs", ")", ":", "dst", "=", "_make_container_root", "(", "name", ")", "if", "not", "kwargs", ".", "get", "(", "'version'", ",", "False", ")", ":", "if", "__grains__", "[", "'os'", "]", ".", "lower", "(", ")", "==", "'fedora'", ":", "version", "=", "__grains__", "[", "'osrelease'", "]", "else", ":", "version", "=", "'21'", "else", ":", "version", "=", "'21'", "cmd", "=", "(", "'yum -y --releasever={0} --nogpg --installroot={1} '", "'--disablerepo=\"*\" --enablerepo=fedora install systemd passwd yum '", "'fedora-release vim-minimal'", ".", "format", "(", "version", ",", "dst", ")", ")", "ret", "=", "__salt__", "[", "'cmd.run_all'", "]", "(", "cmd", ",", "python_shell", "=", "False", ")", "if", "ret", "[", "'retcode'", "]", "!=", "0", ":", "_build_failed", "(", "dst", ",", "name", ")", "return", "ret" ]
Bootstrap a Fedora container
[ "Bootstrap", "a", "Fedora", "container" ]
python
train
34.526316
gwastro/pycbc
pycbc/workflow/core.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/workflow/core.py#L1817-L1826
def parse_segdict_key(self, key): """ Return ifo and name from the segdict key. """ splt = key.split(':') if len(splt) == 2: return splt[0], splt[1] else: err_msg = "Key should be of the format 'ifo:name', got %s." %(key,) raise ValueError(err_msg)
[ "def", "parse_segdict_key", "(", "self", ",", "key", ")", ":", "splt", "=", "key", ".", "split", "(", "':'", ")", "if", "len", "(", "splt", ")", "==", "2", ":", "return", "splt", "[", "0", "]", ",", "splt", "[", "1", "]", "else", ":", "err_msg", "=", "\"Key should be of the format 'ifo:name', got %s.\"", "%", "(", "key", ",", ")", "raise", "ValueError", "(", "err_msg", ")" ]
Return ifo and name from the segdict key.
[ "Return", "ifo", "and", "name", "from", "the", "segdict", "key", "." ]
python
train
32.3
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_firmware_ext.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_firmware_ext.py#L25-L37
def show_firmware_version_output_show_firmware_version_switchid(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") show_firmware_version = ET.Element("show_firmware_version") config = show_firmware_version output = ET.SubElement(show_firmware_version, "output") show_firmware_version = ET.SubElement(output, "show-firmware-version") switchid = ET.SubElement(show_firmware_version, "switchid") switchid.text = kwargs.pop('switchid') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "show_firmware_version_output_show_firmware_version_switchid", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "show_firmware_version", "=", "ET", ".", "Element", "(", "\"show_firmware_version\"", ")", "config", "=", "show_firmware_version", "output", "=", "ET", ".", "SubElement", "(", "show_firmware_version", ",", "\"output\"", ")", "show_firmware_version", "=", "ET", ".", "SubElement", "(", "output", ",", "\"show-firmware-version\"", ")", "switchid", "=", "ET", ".", "SubElement", "(", "show_firmware_version", ",", "\"switchid\"", ")", "switchid", ".", "text", "=", "kwargs", ".", "pop", "(", "'switchid'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
46.538462
inveniosoftware/invenio-access
invenio_access/factory.py
https://github.com/inveniosoftware/invenio-access/blob/3b033a4bdc110eb2f7e9f08f0744a780884bfc80/invenio_access/factory.py#L20-L30
def action_factory(name, parameter=False): """Factory method for creating new actions (w/wo parameters). :param name: Name of the action (prefix with your module name). :param parameter: Determines if action should take parameters or not. Default is ``False``. """ if parameter: return partial(ParameterizedActionNeed, name) else: return ActionNeed(name)
[ "def", "action_factory", "(", "name", ",", "parameter", "=", "False", ")", ":", "if", "parameter", ":", "return", "partial", "(", "ParameterizedActionNeed", ",", "name", ")", "else", ":", "return", "ActionNeed", "(", "name", ")" ]
Factory method for creating new actions (w/wo parameters). :param name: Name of the action (prefix with your module name). :param parameter: Determines if action should take parameters or not. Default is ``False``.
[ "Factory", "method", "for", "creating", "new", "actions", "(", "w", "/", "wo", "parameters", ")", "." ]
python
train
35.727273
rbit/pydtls
dtls/__init__.py
https://github.com/rbit/pydtls/blob/41a71fccd990347d0de5f42418fea1e4e733359c/dtls/__init__.py#L37-L59
def _prep_bins(): """ Support for running straight out of a cloned source directory instead of an installed distribution """ from os import path from sys import platform, maxsize from shutil import copy bit_suffix = "-x86_64" if maxsize > 2**32 else "-x86" package_root = path.abspath(path.dirname(__file__)) prebuilt_path = path.join(package_root, "prebuilt", platform + bit_suffix) config = {"MANIFEST_DIR": prebuilt_path} try: execfile(path.join(prebuilt_path, "manifest.pycfg"), config) except IOError: return # there are no prebuilts for this platform - nothing to do files = map(lambda x: path.join(prebuilt_path, x), config["FILES"]) for prebuilt_file in files: try: copy(path.join(prebuilt_path, prebuilt_file), package_root) except IOError: pass
[ "def", "_prep_bins", "(", ")", ":", "from", "os", "import", "path", "from", "sys", "import", "platform", ",", "maxsize", "from", "shutil", "import", "copy", "bit_suffix", "=", "\"-x86_64\"", "if", "maxsize", ">", "2", "**", "32", "else", "\"-x86\"", "package_root", "=", "path", ".", "abspath", "(", "path", ".", "dirname", "(", "__file__", ")", ")", "prebuilt_path", "=", "path", ".", "join", "(", "package_root", ",", "\"prebuilt\"", ",", "platform", "+", "bit_suffix", ")", "config", "=", "{", "\"MANIFEST_DIR\"", ":", "prebuilt_path", "}", "try", ":", "execfile", "(", "path", ".", "join", "(", "prebuilt_path", ",", "\"manifest.pycfg\"", ")", ",", "config", ")", "except", "IOError", ":", "return", "# there are no prebuilts for this platform - nothing to do", "files", "=", "map", "(", "lambda", "x", ":", "path", ".", "join", "(", "prebuilt_path", ",", "x", ")", ",", "config", "[", "\"FILES\"", "]", ")", "for", "prebuilt_file", "in", "files", ":", "try", ":", "copy", "(", "path", ".", "join", "(", "prebuilt_path", ",", "prebuilt_file", ")", ",", "package_root", ")", "except", "IOError", ":", "pass" ]
Support for running straight out of a cloned source directory instead of an installed distribution
[ "Support", "for", "running", "straight", "out", "of", "a", "cloned", "source", "directory", "instead", "of", "an", "installed", "distribution" ]
python
train
36.956522
peepall/FancyLogger
FancyLogger/__init__.py
https://github.com/peepall/FancyLogger/blob/7f13f1397e76ed768fb6b6358194118831fafc6d/FancyLogger/__init__.py#L277-L286
def set_level(self, level, console_only=False): """ Defines the logging level (from standard logging module) for log messages. :param level: Level of logging for the file logger. :param console_only: [Optional] If True then the file logger will not be affected. """ self.queue.put(dill.dumps(SetLevelCommand(level=level, console_only=console_only)))
[ "def", "set_level", "(", "self", ",", "level", ",", "console_only", "=", "False", ")", ":", "self", ".", "queue", ".", "put", "(", "dill", ".", "dumps", "(", "SetLevelCommand", "(", "level", "=", "level", ",", "console_only", "=", "console_only", ")", ")", ")" ]
Defines the logging level (from standard logging module) for log messages. :param level: Level of logging for the file logger. :param console_only: [Optional] If True then the file logger will not be affected.
[ "Defines", "the", "logging", "level", "(", "from", "standard", "logging", "module", ")", "for", "log", "messages", ".", ":", "param", "level", ":", "Level", "of", "logging", "for", "the", "file", "logger", ".", ":", "param", "console_only", ":", "[", "Optional", "]", "If", "True", "then", "the", "file", "logger", "will", "not", "be", "affected", "." ]
python
train
48.7
apple/turicreate
deps/src/libxml2-2.9.1/python/libxml2.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/libxml2-2.9.1/python/libxml2.py#L3160-L3166
def copyNodeList(self): """Do a recursive copy of the node list. Use xmlDocCopyNodeList() if possible to ensure string interning. """ ret = libxml2mod.xmlCopyNodeList(self._o) if ret is None:raise treeError('xmlCopyNodeList() failed') __tmp = xmlNode(_obj=ret) return __tmp
[ "def", "copyNodeList", "(", "self", ")", ":", "ret", "=", "libxml2mod", ".", "xmlCopyNodeList", "(", "self", ".", "_o", ")", "if", "ret", "is", "None", ":", "raise", "treeError", "(", "'xmlCopyNodeList() failed'", ")", "__tmp", "=", "xmlNode", "(", "_obj", "=", "ret", ")", "return", "__tmp" ]
Do a recursive copy of the node list. Use xmlDocCopyNodeList() if possible to ensure string interning.
[ "Do", "a", "recursive", "copy", "of", "the", "node", "list", ".", "Use", "xmlDocCopyNodeList", "()", "if", "possible", "to", "ensure", "string", "interning", "." ]
python
train
45.285714
pmacosta/pcsv
pcsv/csv_file.py
https://github.com/pmacosta/pcsv/blob/cd1588c19b0cd58c38bc672e396db940f88ffbd7/pcsv/csv_file.py#L596-L643
def dsort(self, order): r""" Sort rows. :param order: Sort order :type order: :ref:`CsvColFilter` .. [[[cog cog.out(exobj.get_sphinx_autodoc()) ]]] .. Auto-generated exceptions documentation for .. pcsv.csv_file.CsvFile.dsort :raises: * RuntimeError (Argument \`order\` is not valid) * RuntimeError (Invalid column specification) * ValueError (Column *[column_identifier]* not found) .. [[[end]]] """ # Make order conforming to a list of dictionaries order = order if isinstance(order, list) else [order] norder = [{item: "A"} if not isinstance(item, dict) else item for item in order] # Verify that all columns exist in file self._in_header([list(item.keys())[0] for item in norder]) # Get column indexes clist = [] for nitem in norder: for key, value in nitem.items(): clist.append( ( key if isinstance(key, int) else self._header_upper.index(key.upper()), value.upper() == "D", ) ) # From the Python documentation: # "Starting with Python 2.3, the sort() method is guaranteed to be # stable. A sort is stable if it guarantees not to change the # relative order of elements that compare equal - this is helpful # for sorting in multiple passes (for example, sort by department, # then by salary grade)." # This means that the sorts have to be done from "minor" column to # "major" column for (cindex, rvalue) in reversed(clist): fpointer = operator.itemgetter(cindex) self._data.sort(key=fpointer, reverse=rvalue)
[ "def", "dsort", "(", "self", ",", "order", ")", ":", "# Make order conforming to a list of dictionaries", "order", "=", "order", "if", "isinstance", "(", "order", ",", "list", ")", "else", "[", "order", "]", "norder", "=", "[", "{", "item", ":", "\"A\"", "}", "if", "not", "isinstance", "(", "item", ",", "dict", ")", "else", "item", "for", "item", "in", "order", "]", "# Verify that all columns exist in file", "self", ".", "_in_header", "(", "[", "list", "(", "item", ".", "keys", "(", ")", ")", "[", "0", "]", "for", "item", "in", "norder", "]", ")", "# Get column indexes", "clist", "=", "[", "]", "for", "nitem", "in", "norder", ":", "for", "key", ",", "value", "in", "nitem", ".", "items", "(", ")", ":", "clist", ".", "append", "(", "(", "key", "if", "isinstance", "(", "key", ",", "int", ")", "else", "self", ".", "_header_upper", ".", "index", "(", "key", ".", "upper", "(", ")", ")", ",", "value", ".", "upper", "(", ")", "==", "\"D\"", ",", ")", ")", "# From the Python documentation:", "# \"Starting with Python 2.3, the sort() method is guaranteed to be", "# stable. A sort is stable if it guarantees not to change the", "# relative order of elements that compare equal - this is helpful", "# for sorting in multiple passes (for example, sort by department,", "# then by salary grade).\"", "# This means that the sorts have to be done from \"minor\" column to", "# \"major\" column", "for", "(", "cindex", ",", "rvalue", ")", "in", "reversed", "(", "clist", ")", ":", "fpointer", "=", "operator", ".", "itemgetter", "(", "cindex", ")", "self", ".", "_data", ".", "sort", "(", "key", "=", "fpointer", ",", "reverse", "=", "rvalue", ")" ]
r""" Sort rows. :param order: Sort order :type order: :ref:`CsvColFilter` .. [[[cog cog.out(exobj.get_sphinx_autodoc()) ]]] .. Auto-generated exceptions documentation for .. pcsv.csv_file.CsvFile.dsort :raises: * RuntimeError (Argument \`order\` is not valid) * RuntimeError (Invalid column specification) * ValueError (Column *[column_identifier]* not found) .. [[[end]]]
[ "r", "Sort", "rows", "." ]
python
train
37.958333
wummel/linkchecker
third_party/dnspython/dns/name.py
https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/third_party/dnspython/dns/name.py#L90-L102
def _escapify(label): """Escape the characters in label which need it. @returns: the escaped string @rtype: string""" text = '' for c in label: if c in _escaped: text += '\\' + c elif ord(c) > 0x20 and ord(c) < 0x7F: text += c else: text += '\\%03d' % ord(c) return text
[ "def", "_escapify", "(", "label", ")", ":", "text", "=", "''", "for", "c", "in", "label", ":", "if", "c", "in", "_escaped", ":", "text", "+=", "'\\\\'", "+", "c", "elif", "ord", "(", "c", ")", ">", "0x20", "and", "ord", "(", "c", ")", "<", "0x7F", ":", "text", "+=", "c", "else", ":", "text", "+=", "'\\\\%03d'", "%", "ord", "(", "c", ")", "return", "text" ]
Escape the characters in label which need it. @returns: the escaped string @rtype: string
[ "Escape", "the", "characters", "in", "label", "which", "need", "it", "." ]
python
train
26.307692
RudolfCardinal/pythonlib
cardinal_pythonlib/tools/remove_duplicate_files.py
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/tools/remove_duplicate_files.py#L53-L186
def deduplicate(directories: List[str], recursive: bool, dummy_run: bool) -> None: """ De-duplicate files within one or more directories. Remove files that are identical to ones already considered. Args: directories: list of directories to process recursive: process subdirectories (recursively)? dummy_run: say what it'll do, but don't do it """ # ------------------------------------------------------------------------- # Catalogue files by their size # ------------------------------------------------------------------------- files_by_size = {} # type: Dict[int, List[str]] # maps size to list of filenames # noqa num_considered = 0 for filename in gen_filenames(directories, recursive=recursive): if not os.path.isfile(filename): continue size = os.stat(filename)[stat.ST_SIZE] a = files_by_size.setdefault(size, []) a.append(filename) num_considered += 1 log.debug("files_by_size =\n{}", pformat(files_by_size)) # ------------------------------------------------------------------------- # By size, look for duplicates using a hash of the first part only # ------------------------------------------------------------------------- log.info("Finding potential duplicates...") potential_duplicate_sets = [] potential_count = 0 sizes = list(files_by_size.keys()) sizes.sort() for k in sizes: files_of_this_size = files_by_size[k] out_files = [] # type: List[str] # ... list of all files having >1 file per hash, for this size hashes = {} # type: Dict[str, Union[bool, str]] # ... key is a hash; value is either True or a filename if len(files_of_this_size) == 1: continue log.info("Testing {} files of size {}...", len(files_of_this_size), k) for filename in files_of_this_size: if not os.path.isfile(filename): continue log.debug("Quick-scanning file: {}", filename) with open(filename, 'rb') as fd: hasher = md5() hasher.update(fd.read(INITIAL_HASH_SIZE)) hash_value = hasher.digest() if hash_value in hashes: # We have discovered the SECOND OR SUBSEQUENT hash match. first_file_or_true = hashes[hash_value] if first_file_or_true is not True: # We have discovered the SECOND file; # first_file_or_true contains the name of the FIRST. out_files.append(first_file_or_true) hashes[hash_value] = True out_files.append(filename) else: # We have discovered the FIRST file with this hash. hashes[hash_value] = filename if out_files: potential_duplicate_sets.append(out_files) potential_count = potential_count + len(out_files) del files_by_size log.info("Found {} sets of potential duplicates, based on hashing the " "first {} bytes of each...", potential_count, INITIAL_HASH_SIZE) log.debug("potential_duplicate_sets =\n{}", pformat(potential_duplicate_sets)) # ------------------------------------------------------------------------- # Within each set, check for duplicates using a hash of the entire file # ------------------------------------------------------------------------- log.info("Scanning for real duplicates...") num_scanned = 0 num_to_scan = sum(len(one_set) for one_set in potential_duplicate_sets) duplicate_sets = [] # type: List[List[str]] for one_set in potential_duplicate_sets: out_files = [] # type: List[str] hashes = {} for filename in one_set: num_scanned += 1 log.info("Scanning file [{}/{}]: {}", num_scanned, num_to_scan, filename) with open(filename, 'rb') as fd: hasher = md5() while True: r = fd.read(MAIN_READ_CHUNK_SIZE) if len(r) == 0: break hasher.update(r) hash_value = hasher.digest() if hash_value in hashes: if not out_files: out_files.append(hashes[hash_value]) out_files.append(filename) else: hashes[hash_value] = filename if len(out_files): duplicate_sets.append(out_files) log.debug("duplicate_sets = \n{}", pformat(duplicate_sets)) num_originals = 0 num_deleted = 0 for d in duplicate_sets: print("Original is: {}".format(d[0])) num_originals += 1 for f in d[1:]: if dummy_run: print("Would delete: {}".format(f)) else: print("Deleting: {}".format(f)) os.remove(f) num_deleted += 1 print() num_unique = num_considered - (num_originals + num_deleted) print( "{action} {d} duplicates, leaving {o} originals (and {u} unique files " "not touched; {c} files considered in total)".format( action="Would delete" if dummy_run else "Deleted", d=num_deleted, o=num_originals, u=num_unique, c=num_considered ) )
[ "def", "deduplicate", "(", "directories", ":", "List", "[", "str", "]", ",", "recursive", ":", "bool", ",", "dummy_run", ":", "bool", ")", "->", "None", ":", "# -------------------------------------------------------------------------", "# Catalogue files by their size", "# -------------------------------------------------------------------------", "files_by_size", "=", "{", "}", "# type: Dict[int, List[str]] # maps size to list of filenames # noqa", "num_considered", "=", "0", "for", "filename", "in", "gen_filenames", "(", "directories", ",", "recursive", "=", "recursive", ")", ":", "if", "not", "os", ".", "path", ".", "isfile", "(", "filename", ")", ":", "continue", "size", "=", "os", ".", "stat", "(", "filename", ")", "[", "stat", ".", "ST_SIZE", "]", "a", "=", "files_by_size", ".", "setdefault", "(", "size", ",", "[", "]", ")", "a", ".", "append", "(", "filename", ")", "num_considered", "+=", "1", "log", ".", "debug", "(", "\"files_by_size =\\n{}\"", ",", "pformat", "(", "files_by_size", ")", ")", "# -------------------------------------------------------------------------", "# By size, look for duplicates using a hash of the first part only", "# -------------------------------------------------------------------------", "log", ".", "info", "(", "\"Finding potential duplicates...\"", ")", "potential_duplicate_sets", "=", "[", "]", "potential_count", "=", "0", "sizes", "=", "list", "(", "files_by_size", ".", "keys", "(", ")", ")", "sizes", ".", "sort", "(", ")", "for", "k", "in", "sizes", ":", "files_of_this_size", "=", "files_by_size", "[", "k", "]", "out_files", "=", "[", "]", "# type: List[str]", "# ... list of all files having >1 file per hash, for this size", "hashes", "=", "{", "}", "# type: Dict[str, Union[bool, str]]", "# ... key is a hash; value is either True or a filename", "if", "len", "(", "files_of_this_size", ")", "==", "1", ":", "continue", "log", ".", "info", "(", "\"Testing {} files of size {}...\"", ",", "len", "(", "files_of_this_size", ")", ",", "k", ")", "for", "filename", "in", "files_of_this_size", ":", "if", "not", "os", ".", "path", ".", "isfile", "(", "filename", ")", ":", "continue", "log", ".", "debug", "(", "\"Quick-scanning file: {}\"", ",", "filename", ")", "with", "open", "(", "filename", ",", "'rb'", ")", "as", "fd", ":", "hasher", "=", "md5", "(", ")", "hasher", ".", "update", "(", "fd", ".", "read", "(", "INITIAL_HASH_SIZE", ")", ")", "hash_value", "=", "hasher", ".", "digest", "(", ")", "if", "hash_value", "in", "hashes", ":", "# We have discovered the SECOND OR SUBSEQUENT hash match.", "first_file_or_true", "=", "hashes", "[", "hash_value", "]", "if", "first_file_or_true", "is", "not", "True", ":", "# We have discovered the SECOND file;", "# first_file_or_true contains the name of the FIRST.", "out_files", ".", "append", "(", "first_file_or_true", ")", "hashes", "[", "hash_value", "]", "=", "True", "out_files", ".", "append", "(", "filename", ")", "else", ":", "# We have discovered the FIRST file with this hash.", "hashes", "[", "hash_value", "]", "=", "filename", "if", "out_files", ":", "potential_duplicate_sets", ".", "append", "(", "out_files", ")", "potential_count", "=", "potential_count", "+", "len", "(", "out_files", ")", "del", "files_by_size", "log", ".", "info", "(", "\"Found {} sets of potential duplicates, based on hashing the \"", "\"first {} bytes of each...\"", ",", "potential_count", ",", "INITIAL_HASH_SIZE", ")", "log", ".", "debug", "(", "\"potential_duplicate_sets =\\n{}\"", ",", "pformat", "(", "potential_duplicate_sets", ")", ")", "# -------------------------------------------------------------------------", "# Within each set, check for duplicates using a hash of the entire file", "# -------------------------------------------------------------------------", "log", ".", "info", "(", "\"Scanning for real duplicates...\"", ")", "num_scanned", "=", "0", "num_to_scan", "=", "sum", "(", "len", "(", "one_set", ")", "for", "one_set", "in", "potential_duplicate_sets", ")", "duplicate_sets", "=", "[", "]", "# type: List[List[str]]", "for", "one_set", "in", "potential_duplicate_sets", ":", "out_files", "=", "[", "]", "# type: List[str]", "hashes", "=", "{", "}", "for", "filename", "in", "one_set", ":", "num_scanned", "+=", "1", "log", ".", "info", "(", "\"Scanning file [{}/{}]: {}\"", ",", "num_scanned", ",", "num_to_scan", ",", "filename", ")", "with", "open", "(", "filename", ",", "'rb'", ")", "as", "fd", ":", "hasher", "=", "md5", "(", ")", "while", "True", ":", "r", "=", "fd", ".", "read", "(", "MAIN_READ_CHUNK_SIZE", ")", "if", "len", "(", "r", ")", "==", "0", ":", "break", "hasher", ".", "update", "(", "r", ")", "hash_value", "=", "hasher", ".", "digest", "(", ")", "if", "hash_value", "in", "hashes", ":", "if", "not", "out_files", ":", "out_files", ".", "append", "(", "hashes", "[", "hash_value", "]", ")", "out_files", ".", "append", "(", "filename", ")", "else", ":", "hashes", "[", "hash_value", "]", "=", "filename", "if", "len", "(", "out_files", ")", ":", "duplicate_sets", ".", "append", "(", "out_files", ")", "log", ".", "debug", "(", "\"duplicate_sets = \\n{}\"", ",", "pformat", "(", "duplicate_sets", ")", ")", "num_originals", "=", "0", "num_deleted", "=", "0", "for", "d", "in", "duplicate_sets", ":", "print", "(", "\"Original is: {}\"", ".", "format", "(", "d", "[", "0", "]", ")", ")", "num_originals", "+=", "1", "for", "f", "in", "d", "[", "1", ":", "]", ":", "if", "dummy_run", ":", "print", "(", "\"Would delete: {}\"", ".", "format", "(", "f", ")", ")", "else", ":", "print", "(", "\"Deleting: {}\"", ".", "format", "(", "f", ")", ")", "os", ".", "remove", "(", "f", ")", "num_deleted", "+=", "1", "print", "(", ")", "num_unique", "=", "num_considered", "-", "(", "num_originals", "+", "num_deleted", ")", "print", "(", "\"{action} {d} duplicates, leaving {o} originals (and {u} unique files \"", "\"not touched; {c} files considered in total)\"", ".", "format", "(", "action", "=", "\"Would delete\"", "if", "dummy_run", "else", "\"Deleted\"", ",", "d", "=", "num_deleted", ",", "o", "=", "num_originals", ",", "u", "=", "num_unique", ",", "c", "=", "num_considered", ")", ")" ]
De-duplicate files within one or more directories. Remove files that are identical to ones already considered. Args: directories: list of directories to process recursive: process subdirectories (recursively)? dummy_run: say what it'll do, but don't do it
[ "De", "-", "duplicate", "files", "within", "one", "or", "more", "directories", ".", "Remove", "files", "that", "are", "identical", "to", "ones", "already", "considered", "." ]
python
train
40.179104
pinterest/pymemcache
pymemcache/client/base.py
https://github.com/pinterest/pymemcache/blob/f3a348f4ce2248cce8b398e93e08d984fb9100e5/pymemcache/client/base.py#L361-L380
def replace(self, key, value, expire=0, noreply=None): """ The memcached "replace" command. Args: key: str, see class docs for details. value: str, see class docs for details. expire: optional int, number of seconds until the item is expired from the cache, or zero for no expiry (the default). noreply: optional bool, True to not wait for the reply (defaults to self.default_noreply). Returns: If noreply is True, always returns True. Otherwise returns True if the value was stored and False if it wasn't (because the key didn't already exist). """ if noreply is None: noreply = self.default_noreply return self._store_cmd(b'replace', {key: value}, expire, noreply)[key]
[ "def", "replace", "(", "self", ",", "key", ",", "value", ",", "expire", "=", "0", ",", "noreply", "=", "None", ")", ":", "if", "noreply", "is", "None", ":", "noreply", "=", "self", ".", "default_noreply", "return", "self", ".", "_store_cmd", "(", "b'replace'", ",", "{", "key", ":", "value", "}", ",", "expire", ",", "noreply", ")", "[", "key", "]" ]
The memcached "replace" command. Args: key: str, see class docs for details. value: str, see class docs for details. expire: optional int, number of seconds until the item is expired from the cache, or zero for no expiry (the default). noreply: optional bool, True to not wait for the reply (defaults to self.default_noreply). Returns: If noreply is True, always returns True. Otherwise returns True if the value was stored and False if it wasn't (because the key didn't already exist).
[ "The", "memcached", "replace", "command", "." ]
python
train
41.45
Unidata/MetPy
metpy/plots/ctables.py
https://github.com/Unidata/MetPy/blob/16f68a94919b9a82dcf9cada2169cf039129e67b/metpy/plots/ctables.py#L167-L179
def add_colortable(self, fobj, name): r"""Add a color table from a file to the registry. Parameters ---------- fobj : file-like object The file to read the color table from name : str The name under which the color table will be stored """ self[name] = read_colortable(fobj) self[name + '_r'] = self[name][::-1]
[ "def", "add_colortable", "(", "self", ",", "fobj", ",", "name", ")", ":", "self", "[", "name", "]", "=", "read_colortable", "(", "fobj", ")", "self", "[", "name", "+", "'_r'", "]", "=", "self", "[", "name", "]", "[", ":", ":", "-", "1", "]" ]
r"""Add a color table from a file to the registry. Parameters ---------- fobj : file-like object The file to read the color table from name : str The name under which the color table will be stored
[ "r", "Add", "a", "color", "table", "from", "a", "file", "to", "the", "registry", "." ]
python
train
29.923077
jsvine/spectra
spectra/core.py
https://github.com/jsvine/spectra/blob/2269a0ae9b5923154b15bd661fb81179608f7ec2/spectra/core.py#L223-L238
def range(self, count): """ Create a list of colors evenly spaced along this scale's domain. :param int count: The number of colors to return. :rtype: list :returns: A list of spectra.Color objects. """ if count <= 1: raise ValueError("Range size must be greater than 1.") dom = self._domain distance = dom[-1] - dom[0] props = [ self(dom[0] + distance * float(x)/(count-1)) for x in range(count) ] return props
[ "def", "range", "(", "self", ",", "count", ")", ":", "if", "count", "<=", "1", ":", "raise", "ValueError", "(", "\"Range size must be greater than 1.\"", ")", "dom", "=", "self", ".", "_domain", "distance", "=", "dom", "[", "-", "1", "]", "-", "dom", "[", "0", "]", "props", "=", "[", "self", "(", "dom", "[", "0", "]", "+", "distance", "*", "float", "(", "x", ")", "/", "(", "count", "-", "1", ")", ")", "for", "x", "in", "range", "(", "count", ")", "]", "return", "props" ]
Create a list of colors evenly spaced along this scale's domain. :param int count: The number of colors to return. :rtype: list :returns: A list of spectra.Color objects.
[ "Create", "a", "list", "of", "colors", "evenly", "spaced", "along", "this", "scale", "s", "domain", "." ]
python
train
31.875
hazelcast/hazelcast-python-client
hazelcast/config.py
https://github.com/hazelcast/hazelcast-python-client/blob/3f6639443c23d6d036aa343f8e094f052250d2c1/hazelcast/config.py#L354-L363
def set_custom_serializer(self, _type, serializer): """ Assign a serializer for the type. :param _type: (Type), the target type of the serializer :param serializer: (Serializer), Custom Serializer constructor function """ validate_type(_type) validate_serializer(serializer, StreamSerializer) self._custom_serializers[_type] = serializer
[ "def", "set_custom_serializer", "(", "self", ",", "_type", ",", "serializer", ")", ":", "validate_type", "(", "_type", ")", "validate_serializer", "(", "serializer", ",", "StreamSerializer", ")", "self", ".", "_custom_serializers", "[", "_type", "]", "=", "serializer" ]
Assign a serializer for the type. :param _type: (Type), the target type of the serializer :param serializer: (Serializer), Custom Serializer constructor function
[ "Assign", "a", "serializer", "for", "the", "type", "." ]
python
train
39.3
stevearc/dynamo3
dynamo3/connection.py
https://github.com/stevearc/dynamo3/blob/f897c40ece28586272dbcab8f0d99a14a1831dda/dynamo3/connection.py#L751-L779
def batch_write(self, tablename, return_capacity=None, return_item_collection_metrics=NONE): """ Perform a batch write on a table Parameters ---------- tablename : str Name of the table to write to return_capacity : {NONE, INDEXES, TOTAL}, optional INDEXES will return the consumed capacity for indexes, TOTAL will return the consumed capacity for the table and the indexes. (default NONE) return_item_collection_metrics : (NONE, SIZE), optional SIZE will return statistics about item collections that were modified. Examples -------- .. code-block:: python with connection.batch_write('mytable') as batch: batch.put({'id': 'id1', 'foo': 'bar'}) batch.delete({'id': 'oldid'}) """ return_capacity = self._default_capacity(return_capacity) return BatchWriter(self, tablename, return_capacity=return_capacity, return_item_collection_metrics=return_item_collection_metrics)
[ "def", "batch_write", "(", "self", ",", "tablename", ",", "return_capacity", "=", "None", ",", "return_item_collection_metrics", "=", "NONE", ")", ":", "return_capacity", "=", "self", ".", "_default_capacity", "(", "return_capacity", ")", "return", "BatchWriter", "(", "self", ",", "tablename", ",", "return_capacity", "=", "return_capacity", ",", "return_item_collection_metrics", "=", "return_item_collection_metrics", ")" ]
Perform a batch write on a table Parameters ---------- tablename : str Name of the table to write to return_capacity : {NONE, INDEXES, TOTAL}, optional INDEXES will return the consumed capacity for indexes, TOTAL will return the consumed capacity for the table and the indexes. (default NONE) return_item_collection_metrics : (NONE, SIZE), optional SIZE will return statistics about item collections that were modified. Examples -------- .. code-block:: python with connection.batch_write('mytable') as batch: batch.put({'id': 'id1', 'foo': 'bar'}) batch.delete({'id': 'oldid'})
[ "Perform", "a", "batch", "write", "on", "a", "table" ]
python
train
38.344828
HazyResearch/fonduer
src/fonduer/utils/data_model_utils/tabular.py
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/utils/data_model_utils/tabular.py#L152-L174
def get_sentence_ngrams(mention, attrib="words", n_min=1, n_max=1, lower=True): """Get the ngrams that are in the Sentence of the given Mention, not including itself. Note that if a candidate is passed in, all of its Mentions will be searched. :param mention: The Mention whose Sentence is being searched :param attrib: The token attribute type (e.g. words, lemmas, poses) :param n_min: The minimum n of the ngrams that should be returned :param n_max: The maximum n of the ngrams that should be returned :param lower: If True, all ngrams will be returned in lower case :rtype: a *generator* of ngrams """ spans = _to_spans(mention) for span in spans: for ngram in get_left_ngrams( span, window=100, attrib=attrib, n_min=n_min, n_max=n_max, lower=lower ): yield ngram for ngram in get_right_ngrams( span, window=100, attrib=attrib, n_min=n_min, n_max=n_max, lower=lower ): yield ngram
[ "def", "get_sentence_ngrams", "(", "mention", ",", "attrib", "=", "\"words\"", ",", "n_min", "=", "1", ",", "n_max", "=", "1", ",", "lower", "=", "True", ")", ":", "spans", "=", "_to_spans", "(", "mention", ")", "for", "span", "in", "spans", ":", "for", "ngram", "in", "get_left_ngrams", "(", "span", ",", "window", "=", "100", ",", "attrib", "=", "attrib", ",", "n_min", "=", "n_min", ",", "n_max", "=", "n_max", ",", "lower", "=", "lower", ")", ":", "yield", "ngram", "for", "ngram", "in", "get_right_ngrams", "(", "span", ",", "window", "=", "100", ",", "attrib", "=", "attrib", ",", "n_min", "=", "n_min", ",", "n_max", "=", "n_max", ",", "lower", "=", "lower", ")", ":", "yield", "ngram" ]
Get the ngrams that are in the Sentence of the given Mention, not including itself. Note that if a candidate is passed in, all of its Mentions will be searched. :param mention: The Mention whose Sentence is being searched :param attrib: The token attribute type (e.g. words, lemmas, poses) :param n_min: The minimum n of the ngrams that should be returned :param n_max: The maximum n of the ngrams that should be returned :param lower: If True, all ngrams will be returned in lower case :rtype: a *generator* of ngrams
[ "Get", "the", "ngrams", "that", "are", "in", "the", "Sentence", "of", "the", "given", "Mention", "not", "including", "itself", "." ]
python
train
43.130435
proycon/clam
clam/common/data.py
https://github.com/proycon/clam/blob/09d15cfc26d7cbe0f5976cdd5424dc446d10dbf3/clam/common/data.py#L1637-L1660
def fromxml(node): """Static method returning an MetaField instance (any subclass of AbstractMetaField) from the given XML description. Node can be a string or an etree._Element.""" if not isinstance(node,ElementTree._Element): #pylint: disable=protected-access node = parsexmlstring(node) if node.tag.lower() != 'meta': raise Exception("Expected meta tag but got '" + node.tag + "' instead") key = node.attrib['id'] if node.text: value = node.text else: value = None operator = 'set' if 'operator' in node.attrib: operator= node.attrib['operator'] if operator == 'set': cls = SetMetaField elif operator == 'unset': cls = UnsetMetaField elif operator == 'copy': cls = CopyMetaField elif operator == 'parameter': cls = ParameterMetaField return cls(key, value)
[ "def", "fromxml", "(", "node", ")", ":", "if", "not", "isinstance", "(", "node", ",", "ElementTree", ".", "_Element", ")", ":", "#pylint: disable=protected-access", "node", "=", "parsexmlstring", "(", "node", ")", "if", "node", ".", "tag", ".", "lower", "(", ")", "!=", "'meta'", ":", "raise", "Exception", "(", "\"Expected meta tag but got '\"", "+", "node", ".", "tag", "+", "\"' instead\"", ")", "key", "=", "node", ".", "attrib", "[", "'id'", "]", "if", "node", ".", "text", ":", "value", "=", "node", ".", "text", "else", ":", "value", "=", "None", "operator", "=", "'set'", "if", "'operator'", "in", "node", ".", "attrib", ":", "operator", "=", "node", ".", "attrib", "[", "'operator'", "]", "if", "operator", "==", "'set'", ":", "cls", "=", "SetMetaField", "elif", "operator", "==", "'unset'", ":", "cls", "=", "UnsetMetaField", "elif", "operator", "==", "'copy'", ":", "cls", "=", "CopyMetaField", "elif", "operator", "==", "'parameter'", ":", "cls", "=", "ParameterMetaField", "return", "cls", "(", "key", ",", "value", ")" ]
Static method returning an MetaField instance (any subclass of AbstractMetaField) from the given XML description. Node can be a string or an etree._Element.
[ "Static", "method", "returning", "an", "MetaField", "instance", "(", "any", "subclass", "of", "AbstractMetaField", ")", "from", "the", "given", "XML", "description", ".", "Node", "can", "be", "a", "string", "or", "an", "etree", ".", "_Element", "." ]
python
train
39.541667
kensho-technologies/graphql-compiler
graphql_compiler/compiler/expressions.py
https://github.com/kensho-technologies/graphql-compiler/blob/f6079c6d10f64932f6b3af309b79bcea2123ca8f/graphql_compiler/compiler/expressions.py#L185-L204
def to_match(self): """Return a unicode object with the MATCH representation of this Variable.""" self.validate() # We don't want the dollar sign as part of the variable name. variable_with_no_dollar_sign = self.variable_name[1:] match_variable_name = '{%s}' % (six.text_type(variable_with_no_dollar_sign),) # We can't directly pass a Date or DateTime object, so we have to pass it as a string # and then parse it inline. For date format parameter meanings, see: # http://docs.oracle.com/javase/7/docs/api/java/text/SimpleDateFormat.html # For the semantics of the date() OrientDB SQL function, see: # http://orientdb.com/docs/last/SQL-Functions.html#date if GraphQLDate.is_same_type(self.inferred_type): return u'date(%s, "%s")' % (match_variable_name, STANDARD_DATE_FORMAT) elif GraphQLDateTime.is_same_type(self.inferred_type): return u'date(%s, "%s")' % (match_variable_name, STANDARD_DATETIME_FORMAT) else: return match_variable_name
[ "def", "to_match", "(", "self", ")", ":", "self", ".", "validate", "(", ")", "# We don't want the dollar sign as part of the variable name.", "variable_with_no_dollar_sign", "=", "self", ".", "variable_name", "[", "1", ":", "]", "match_variable_name", "=", "'{%s}'", "%", "(", "six", ".", "text_type", "(", "variable_with_no_dollar_sign", ")", ",", ")", "# We can't directly pass a Date or DateTime object, so we have to pass it as a string", "# and then parse it inline. For date format parameter meanings, see:", "# http://docs.oracle.com/javase/7/docs/api/java/text/SimpleDateFormat.html", "# For the semantics of the date() OrientDB SQL function, see:", "# http://orientdb.com/docs/last/SQL-Functions.html#date", "if", "GraphQLDate", ".", "is_same_type", "(", "self", ".", "inferred_type", ")", ":", "return", "u'date(%s, \"%s\")'", "%", "(", "match_variable_name", ",", "STANDARD_DATE_FORMAT", ")", "elif", "GraphQLDateTime", ".", "is_same_type", "(", "self", ".", "inferred_type", ")", ":", "return", "u'date(%s, \"%s\")'", "%", "(", "match_variable_name", ",", "STANDARD_DATETIME_FORMAT", ")", "else", ":", "return", "match_variable_name" ]
Return a unicode object with the MATCH representation of this Variable.
[ "Return", "a", "unicode", "object", "with", "the", "MATCH", "representation", "of", "this", "Variable", "." ]
python
train
53.1
blockstack/blockstack-core
blockstack/blockstackd.py
https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/blockstackd.py#L1207-L1229
def rpc_get_historic_names_by_address(self, address, offset, count, **con_info): """ Get the list of names owned by an address throughout history Return {'status': True, 'names': [{'name': ..., 'block_id': ..., 'vtxindex': ...}]} on success Return {'error': ...} on error """ if not check_address(address): return {'error': 'Invalid address', 'http_status': 400} if not check_offset(offset): return {'error': 'invalid offset', 'http_status': 400} if not check_count(count, 10): return {'error': 'invalid count', 'http_status': 400} db = get_db_state(self.working_dir) names = db.get_historic_names_by_address(address, offset, count) db.close() if names is None: names = [] return self.success_response( {'names': names} )
[ "def", "rpc_get_historic_names_by_address", "(", "self", ",", "address", ",", "offset", ",", "count", ",", "*", "*", "con_info", ")", ":", "if", "not", "check_address", "(", "address", ")", ":", "return", "{", "'error'", ":", "'Invalid address'", ",", "'http_status'", ":", "400", "}", "if", "not", "check_offset", "(", "offset", ")", ":", "return", "{", "'error'", ":", "'invalid offset'", ",", "'http_status'", ":", "400", "}", "if", "not", "check_count", "(", "count", ",", "10", ")", ":", "return", "{", "'error'", ":", "'invalid count'", ",", "'http_status'", ":", "400", "}", "db", "=", "get_db_state", "(", "self", ".", "working_dir", ")", "names", "=", "db", ".", "get_historic_names_by_address", "(", "address", ",", "offset", ",", "count", ")", "db", ".", "close", "(", ")", "if", "names", "is", "None", ":", "names", "=", "[", "]", "return", "self", ".", "success_response", "(", "{", "'names'", ":", "names", "}", ")" ]
Get the list of names owned by an address throughout history Return {'status': True, 'names': [{'name': ..., 'block_id': ..., 'vtxindex': ...}]} on success Return {'error': ...} on error
[ "Get", "the", "list", "of", "names", "owned", "by", "an", "address", "throughout", "history", "Return", "{", "status", ":", "True", "names", ":", "[", "{", "name", ":", "...", "block_id", ":", "...", "vtxindex", ":", "...", "}", "]", "}", "on", "success", "Return", "{", "error", ":", "...", "}", "on", "error" ]
python
train
37.217391
Opentrons/opentrons
api/src/opentrons/system/udev.py
https://github.com/Opentrons/opentrons/blob/a7c15cc2636ecb64ab56c7edc1d8a57163aaeadf/api/src/opentrons/system/udev.py#L7-L35
def setup_rules_file(): """ Copy the udev rules file for Opentrons Modules to opentrons_data directory and trigger the new rules. This rules file in opentrons_data is symlinked into udev rules directory TODO: Move this file to resources and move the symlink to point to /data/system/ """ import shutil import subprocess rules_file = os.path.join( os.path.abspath(os.path.dirname(__file__)), '..', 'config', 'modules', '95-opentrons-modules.rules') shutil.copy2( rules_file, '/data/user_storage/opentrons_data/95-opentrons-modules.rules') res0 = subprocess.run('udevadm control --reload-rules', shell=True, stdout=subprocess.PIPE).stdout.decode() if res0: log.warning(res0.strip()) res1 = subprocess.run('udevadm trigger', shell=True, stdout=subprocess.PIPE).stdout.decode() if res1: log.warning(res1.strip())
[ "def", "setup_rules_file", "(", ")", ":", "import", "shutil", "import", "subprocess", "rules_file", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ")", ",", "'..'", ",", "'config'", ",", "'modules'", ",", "'95-opentrons-modules.rules'", ")", "shutil", ".", "copy2", "(", "rules_file", ",", "'/data/user_storage/opentrons_data/95-opentrons-modules.rules'", ")", "res0", "=", "subprocess", ".", "run", "(", "'udevadm control --reload-rules'", ",", "shell", "=", "True", ",", "stdout", "=", "subprocess", ".", "PIPE", ")", ".", "stdout", ".", "decode", "(", ")", "if", "res0", ":", "log", ".", "warning", "(", "res0", ".", "strip", "(", ")", ")", "res1", "=", "subprocess", ".", "run", "(", "'udevadm trigger'", ",", "shell", "=", "True", ",", "stdout", "=", "subprocess", ".", "PIPE", ")", ".", "stdout", ".", "decode", "(", ")", "if", "res1", ":", "log", ".", "warning", "(", "res1", ".", "strip", "(", ")", ")" ]
Copy the udev rules file for Opentrons Modules to opentrons_data directory and trigger the new rules. This rules file in opentrons_data is symlinked into udev rules directory TODO: Move this file to resources and move the symlink to point to /data/system/
[ "Copy", "the", "udev", "rules", "file", "for", "Opentrons", "Modules", "to", "opentrons_data", "directory", "and", "trigger", "the", "new", "rules", ".", "This", "rules", "file", "in", "opentrons_data", "is", "symlinked", "into", "udev", "rules", "directory" ]
python
train
32.586207
cogniteev/docido-python-sdk
docido_sdk/core.py
https://github.com/cogniteev/docido-python-sdk/blob/58ecb6c6f5757fd40c0601657ab18368da7ddf33/docido_sdk/core.py#L206-L210
def is_enabled(self, cls): """Return whether the given component class is enabled.""" if cls not in self.enabled: self.enabled[cls] = self.is_component_enabled(cls) return self.enabled[cls]
[ "def", "is_enabled", "(", "self", ",", "cls", ")", ":", "if", "cls", "not", "in", "self", ".", "enabled", ":", "self", ".", "enabled", "[", "cls", "]", "=", "self", ".", "is_component_enabled", "(", "cls", ")", "return", "self", ".", "enabled", "[", "cls", "]" ]
Return whether the given component class is enabled.
[ "Return", "whether", "the", "given", "component", "class", "is", "enabled", "." ]
python
train
44.2
astropy/photutils
photutils/aperture/core.py
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/aperture/core.py#L674-L761
def _prepare_photometry_input(data, error, mask, wcs, unit): """ Parse the inputs to `aperture_photometry`. `aperture_photometry` accepts a wide range of inputs, e.g. ``data`` could be a numpy array, a Quantity array, or a fits HDU. This requires some parsing and validation to ensure that all inputs are complete and consistent. For example, the data could carry a unit and the wcs itself, so we need to check that it is consistent with the unit and wcs given as input parameters. """ if isinstance(data, fits.HDUList): for i in range(len(data)): if data[i].data is not None: warnings.warn("Input data is a HDUList object, photometry is " "run only for the {0} HDU." .format(i), AstropyUserWarning) data = data[i] break if isinstance(data, (fits.PrimaryHDU, fits.ImageHDU)): header = data.header data = data.data if 'BUNIT' in header: bunit = u.Unit(header['BUNIT'], parse_strict='warn') if isinstance(bunit, u.UnrecognizedUnit): warnings.warn('The BUNIT in the header of the input data is ' 'not parseable as a valid unit.', AstropyUserWarning) else: data = u.Quantity(data, unit=bunit) if wcs is None: try: wcs = WCS(header) except Exception: # A valid WCS was not found in the header. Let the calling # application raise an exception if it needs a WCS. pass data = np.asanyarray(data) if data.ndim != 2: raise ValueError('data must be a 2D array.') if unit is not None: unit = u.Unit(unit, parse_strict='warn') if isinstance(unit, u.UnrecognizedUnit): warnings.warn('The input unit is not parseable as a valid ' 'unit.', AstropyUserWarning) unit = None if isinstance(data, u.Quantity): if unit is not None and data.unit != unit: warnings.warn('The input unit does not agree with the data ' 'unit.', AstropyUserWarning) else: if unit is not None: data = u.Quantity(data, unit=unit) if error is not None: if isinstance(error, u.Quantity): if unit is not None and error.unit != unit: warnings.warn('The input unit does not agree with the error ' 'unit.', AstropyUserWarning) if np.isscalar(error.value): error = u.Quantity(np.broadcast_arrays(error, data), unit=error.unit)[0] else: if np.isscalar(error): error = np.broadcast_arrays(error, data)[0] if unit is not None: error = u.Quantity(error, unit=unit) error = np.asanyarray(error) if error.shape != data.shape: raise ValueError('error and data must have the same shape.') if mask is not None: mask = np.asanyarray(mask) if mask.shape != data.shape: raise ValueError('mask and data must have the same shape.') return data, error, mask, wcs
[ "def", "_prepare_photometry_input", "(", "data", ",", "error", ",", "mask", ",", "wcs", ",", "unit", ")", ":", "if", "isinstance", "(", "data", ",", "fits", ".", "HDUList", ")", ":", "for", "i", "in", "range", "(", "len", "(", "data", ")", ")", ":", "if", "data", "[", "i", "]", ".", "data", "is", "not", "None", ":", "warnings", ".", "warn", "(", "\"Input data is a HDUList object, photometry is \"", "\"run only for the {0} HDU.\"", ".", "format", "(", "i", ")", ",", "AstropyUserWarning", ")", "data", "=", "data", "[", "i", "]", "break", "if", "isinstance", "(", "data", ",", "(", "fits", ".", "PrimaryHDU", ",", "fits", ".", "ImageHDU", ")", ")", ":", "header", "=", "data", ".", "header", "data", "=", "data", ".", "data", "if", "'BUNIT'", "in", "header", ":", "bunit", "=", "u", ".", "Unit", "(", "header", "[", "'BUNIT'", "]", ",", "parse_strict", "=", "'warn'", ")", "if", "isinstance", "(", "bunit", ",", "u", ".", "UnrecognizedUnit", ")", ":", "warnings", ".", "warn", "(", "'The BUNIT in the header of the input data is '", "'not parseable as a valid unit.'", ",", "AstropyUserWarning", ")", "else", ":", "data", "=", "u", ".", "Quantity", "(", "data", ",", "unit", "=", "bunit", ")", "if", "wcs", "is", "None", ":", "try", ":", "wcs", "=", "WCS", "(", "header", ")", "except", "Exception", ":", "# A valid WCS was not found in the header. Let the calling", "# application raise an exception if it needs a WCS.", "pass", "data", "=", "np", ".", "asanyarray", "(", "data", ")", "if", "data", ".", "ndim", "!=", "2", ":", "raise", "ValueError", "(", "'data must be a 2D array.'", ")", "if", "unit", "is", "not", "None", ":", "unit", "=", "u", ".", "Unit", "(", "unit", ",", "parse_strict", "=", "'warn'", ")", "if", "isinstance", "(", "unit", ",", "u", ".", "UnrecognizedUnit", ")", ":", "warnings", ".", "warn", "(", "'The input unit is not parseable as a valid '", "'unit.'", ",", "AstropyUserWarning", ")", "unit", "=", "None", "if", "isinstance", "(", "data", ",", "u", ".", "Quantity", ")", ":", "if", "unit", "is", "not", "None", "and", "data", ".", "unit", "!=", "unit", ":", "warnings", ".", "warn", "(", "'The input unit does not agree with the data '", "'unit.'", ",", "AstropyUserWarning", ")", "else", ":", "if", "unit", "is", "not", "None", ":", "data", "=", "u", ".", "Quantity", "(", "data", ",", "unit", "=", "unit", ")", "if", "error", "is", "not", "None", ":", "if", "isinstance", "(", "error", ",", "u", ".", "Quantity", ")", ":", "if", "unit", "is", "not", "None", "and", "error", ".", "unit", "!=", "unit", ":", "warnings", ".", "warn", "(", "'The input unit does not agree with the error '", "'unit.'", ",", "AstropyUserWarning", ")", "if", "np", ".", "isscalar", "(", "error", ".", "value", ")", ":", "error", "=", "u", ".", "Quantity", "(", "np", ".", "broadcast_arrays", "(", "error", ",", "data", ")", ",", "unit", "=", "error", ".", "unit", ")", "[", "0", "]", "else", ":", "if", "np", ".", "isscalar", "(", "error", ")", ":", "error", "=", "np", ".", "broadcast_arrays", "(", "error", ",", "data", ")", "[", "0", "]", "if", "unit", "is", "not", "None", ":", "error", "=", "u", ".", "Quantity", "(", "error", ",", "unit", "=", "unit", ")", "error", "=", "np", ".", "asanyarray", "(", "error", ")", "if", "error", ".", "shape", "!=", "data", ".", "shape", ":", "raise", "ValueError", "(", "'error and data must have the same shape.'", ")", "if", "mask", "is", "not", "None", ":", "mask", "=", "np", ".", "asanyarray", "(", "mask", ")", "if", "mask", ".", "shape", "!=", "data", ".", "shape", ":", "raise", "ValueError", "(", "'mask and data must have the same shape.'", ")", "return", "data", ",", "error", ",", "mask", ",", "wcs" ]
Parse the inputs to `aperture_photometry`. `aperture_photometry` accepts a wide range of inputs, e.g. ``data`` could be a numpy array, a Quantity array, or a fits HDU. This requires some parsing and validation to ensure that all inputs are complete and consistent. For example, the data could carry a unit and the wcs itself, so we need to check that it is consistent with the unit and wcs given as input parameters.
[ "Parse", "the", "inputs", "to", "aperture_photometry", "." ]
python
train
36.852273
nyrkovalex/httpsrv
httpsrv/httpsrv.py
https://github.com/nyrkovalex/httpsrv/blob/0acc3298be56856f73bda1ed10c9ab5153894b01/httpsrv/httpsrv.py#L188-L212
def always(self, method, path=None, headers=None, text=None, json=None): ''' Sends response every time matching parameters are found util :func:`Server.reset` is called :type method: str :param method: request method: ``'GET'``, ``'POST'``, etc. can be some custom string :type path: str :param path: request path including query parameters :type headers: dict :param headers: dictionary of headers to expect. If omitted any headers will do :type text: str :param text: request text to expect. If ommited any text will match :type json: dict :param json: request json to expect. If ommited any json will match, if present text param will be ignored :rtype: Rule :returns: newly created expectation rule ''' rule = Rule(method, path, headers, text, json) return self._add_rule_to(rule, self._always_rules)
[ "def", "always", "(", "self", ",", "method", ",", "path", "=", "None", ",", "headers", "=", "None", ",", "text", "=", "None", ",", "json", "=", "None", ")", ":", "rule", "=", "Rule", "(", "method", ",", "path", ",", "headers", ",", "text", ",", "json", ")", "return", "self", ".", "_add_rule_to", "(", "rule", ",", "self", ".", "_always_rules", ")" ]
Sends response every time matching parameters are found util :func:`Server.reset` is called :type method: str :param method: request method: ``'GET'``, ``'POST'``, etc. can be some custom string :type path: str :param path: request path including query parameters :type headers: dict :param headers: dictionary of headers to expect. If omitted any headers will do :type text: str :param text: request text to expect. If ommited any text will match :type json: dict :param json: request json to expect. If ommited any json will match, if present text param will be ignored :rtype: Rule :returns: newly created expectation rule
[ "Sends", "response", "every", "time", "matching", "parameters", "are", "found", "util", ":", "func", ":", "Server", ".", "reset", "is", "called" ]
python
train
37.36
fitnr/convertdate
convertdate/persian.py
https://github.com/fitnr/convertdate/blob/e920f168a87f99183b0aa7290d6c3af222582d43/convertdate/persian.py#L33-L48
def to_jd(year, month, day): '''Determine Julian day from Persian date''' if year >= 0: y = 474 else: y = 473 epbase = year - y epyear = 474 + (epbase % 2820) if month <= 7: m = (month - 1) * 31 else: m = (month - 1) * 30 + 6 return day + m + trunc(((epyear * 682) - 110) / 2816) + (epyear - 1) * 365 + trunc(epbase / 2820) * 1029983 + (EPOCH - 1)
[ "def", "to_jd", "(", "year", ",", "month", ",", "day", ")", ":", "if", "year", ">=", "0", ":", "y", "=", "474", "else", ":", "y", "=", "473", "epbase", "=", "year", "-", "y", "epyear", "=", "474", "+", "(", "epbase", "%", "2820", ")", "if", "month", "<=", "7", ":", "m", "=", "(", "month", "-", "1", ")", "*", "31", "else", ":", "m", "=", "(", "month", "-", "1", ")", "*", "30", "+", "6", "return", "day", "+", "m", "+", "trunc", "(", "(", "(", "epyear", "*", "682", ")", "-", "110", ")", "/", "2816", ")", "+", "(", "epyear", "-", "1", ")", "*", "365", "+", "trunc", "(", "epbase", "/", "2820", ")", "*", "1029983", "+", "(", "EPOCH", "-", "1", ")" ]
Determine Julian day from Persian date
[ "Determine", "Julian", "day", "from", "Persian", "date" ]
python
train
24.9375
saltstack/salt
salt/netapi/rest_cherrypy/app.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/netapi/rest_cherrypy/app.py#L921-L933
def hypermedia_out(): ''' Determine the best handler for the requested content type Wrap the normal handler and transform the output from that handler into the requested content type ''' request = cherrypy.serving.request request._hypermedia_inner_handler = request.handler # If handler has been explicitly set to None, don't override. if request.handler is not None: request.handler = hypermedia_handler
[ "def", "hypermedia_out", "(", ")", ":", "request", "=", "cherrypy", ".", "serving", ".", "request", "request", ".", "_hypermedia_inner_handler", "=", "request", ".", "handler", "# If handler has been explicitly set to None, don't override.", "if", "request", ".", "handler", "is", "not", "None", ":", "request", ".", "handler", "=", "hypermedia_handler" ]
Determine the best handler for the requested content type Wrap the normal handler and transform the output from that handler into the requested content type
[ "Determine", "the", "best", "handler", "for", "the", "requested", "content", "type" ]
python
train
33.692308
carta/ldap_tools
src/ldap_tools/audit.py
https://github.com/carta/ldap_tools/blob/7c039304a5abaf836c7afc35cf068b4471306264/src/ldap_tools/audit.py#L100-L105
def raw(config): # pragma: no cover """Dump the contents of LDAP to console in raw format.""" client = Client() client.prepare_connection() audit_api = API(client) print(audit_api.raw())
[ "def", "raw", "(", "config", ")", ":", "# pragma: no cover", "client", "=", "Client", "(", ")", "client", ".", "prepare_connection", "(", ")", "audit_api", "=", "API", "(", "client", ")", "print", "(", "audit_api", ".", "raw", "(", ")", ")" ]
Dump the contents of LDAP to console in raw format.
[ "Dump", "the", "contents", "of", "LDAP", "to", "console", "in", "raw", "format", "." ]
python
train
37
MainRo/cyclotron-py
cyclotron/router.py
https://github.com/MainRo/cyclotron-py/blob/4530f65173aa4b9e27c3d4a2f5d33900fc19f754/cyclotron/router.py#L5-L59
def make_crossroad_router(source, drain=False): ''' legacy crossroad implementation. deprecated ''' sink_observer = None def on_sink_subscribe(observer): nonlocal sink_observer sink_observer = observer def dispose(): nonlocal sink_observer sink_observer = None return dispose def route_crossroad(request): def on_response_subscribe(observer): def on_next_source(i): if type(i) is cyclotron.Drain: observer.on_completed() else: observer.on_next(i) source_disposable = source.subscribe( on_next=on_next_source, on_error=lambda e: observer.on_error(e), on_completed=lambda: observer.on_completed() ) def on_next_request(i): if sink_observer is not None: sink_observer.on_next(i) def on_request_completed(): if sink_observer is not None: if drain is True: sink_observer.on_next(cyclotron.Drain()) else: sink_observer.on_completed() request_disposable = request.subscribe( on_next=on_next_request, on_error=observer.on_error, on_completed=on_request_completed ) def dispose(): source_disposable.dispose() request_disposable.dispose() return dispose return Observable.create(on_response_subscribe) return Observable.create(on_sink_subscribe), route_crossroad
[ "def", "make_crossroad_router", "(", "source", ",", "drain", "=", "False", ")", ":", "sink_observer", "=", "None", "def", "on_sink_subscribe", "(", "observer", ")", ":", "nonlocal", "sink_observer", "sink_observer", "=", "observer", "def", "dispose", "(", ")", ":", "nonlocal", "sink_observer", "sink_observer", "=", "None", "return", "dispose", "def", "route_crossroad", "(", "request", ")", ":", "def", "on_response_subscribe", "(", "observer", ")", ":", "def", "on_next_source", "(", "i", ")", ":", "if", "type", "(", "i", ")", "is", "cyclotron", ".", "Drain", ":", "observer", ".", "on_completed", "(", ")", "else", ":", "observer", ".", "on_next", "(", "i", ")", "source_disposable", "=", "source", ".", "subscribe", "(", "on_next", "=", "on_next_source", ",", "on_error", "=", "lambda", "e", ":", "observer", ".", "on_error", "(", "e", ")", ",", "on_completed", "=", "lambda", ":", "observer", ".", "on_completed", "(", ")", ")", "def", "on_next_request", "(", "i", ")", ":", "if", "sink_observer", "is", "not", "None", ":", "sink_observer", ".", "on_next", "(", "i", ")", "def", "on_request_completed", "(", ")", ":", "if", "sink_observer", "is", "not", "None", ":", "if", "drain", "is", "True", ":", "sink_observer", ".", "on_next", "(", "cyclotron", ".", "Drain", "(", ")", ")", "else", ":", "sink_observer", ".", "on_completed", "(", ")", "request_disposable", "=", "request", ".", "subscribe", "(", "on_next", "=", "on_next_request", ",", "on_error", "=", "observer", ".", "on_error", ",", "on_completed", "=", "on_request_completed", ")", "def", "dispose", "(", ")", ":", "source_disposable", ".", "dispose", "(", ")", "request_disposable", ".", "dispose", "(", ")", "return", "dispose", "return", "Observable", ".", "create", "(", "on_response_subscribe", ")", "return", "Observable", ".", "create", "(", "on_sink_subscribe", ")", ",", "route_crossroad" ]
legacy crossroad implementation. deprecated
[ "legacy", "crossroad", "implementation", ".", "deprecated" ]
python
train
30.072727
click-contrib/click-repl
click_repl/__init__.py
https://github.com/click-contrib/click-repl/blob/2d78dc520eb0bb5b813bad3b72344edbd22a7f4e/click_repl/__init__.py#L146-L165
def bootstrap_prompt(prompt_kwargs, group): """ Bootstrap prompt_toolkit kwargs or use user defined values. :param prompt_kwargs: The user specified prompt kwargs. """ prompt_kwargs = prompt_kwargs or {} defaults = { "history": InMemoryHistory(), "completer": ClickCompleter(group), "message": u"> ", } for key in defaults: default_value = defaults[key] if key not in prompt_kwargs: prompt_kwargs[key] = default_value return prompt_kwargs
[ "def", "bootstrap_prompt", "(", "prompt_kwargs", ",", "group", ")", ":", "prompt_kwargs", "=", "prompt_kwargs", "or", "{", "}", "defaults", "=", "{", "\"history\"", ":", "InMemoryHistory", "(", ")", ",", "\"completer\"", ":", "ClickCompleter", "(", "group", ")", ",", "\"message\"", ":", "u\"> \"", ",", "}", "for", "key", "in", "defaults", ":", "default_value", "=", "defaults", "[", "key", "]", "if", "key", "not", "in", "prompt_kwargs", ":", "prompt_kwargs", "[", "key", "]", "=", "default_value", "return", "prompt_kwargs" ]
Bootstrap prompt_toolkit kwargs or use user defined values. :param prompt_kwargs: The user specified prompt kwargs.
[ "Bootstrap", "prompt_toolkit", "kwargs", "or", "use", "user", "defined", "values", "." ]
python
train
25.55
user-cont/conu
conu/utils/__init__.py
https://github.com/user-cont/conu/blob/08caae7bb6bdd265b55bb106c3da6a7946a5a352/conu/utils/__init__.py#L389-L399
def get_oc_api_token(): """ Get token of user logged in OpenShift cluster :return: str, API token """ oc_command_exists() try: return run_cmd(["oc", "whoami", "-t"], return_output=True).rstrip() # remove '\n' except subprocess.CalledProcessError as ex: raise ConuException("oc whoami -t failed: %s" % ex)
[ "def", "get_oc_api_token", "(", ")", ":", "oc_command_exists", "(", ")", "try", ":", "return", "run_cmd", "(", "[", "\"oc\"", ",", "\"whoami\"", ",", "\"-t\"", "]", ",", "return_output", "=", "True", ")", ".", "rstrip", "(", ")", "# remove '\\n'", "except", "subprocess", ".", "CalledProcessError", "as", "ex", ":", "raise", "ConuException", "(", "\"oc whoami -t failed: %s\"", "%", "ex", ")" ]
Get token of user logged in OpenShift cluster :return: str, API token
[ "Get", "token", "of", "user", "logged", "in", "OpenShift", "cluster", ":", "return", ":", "str", "API", "token" ]
python
train
30.909091
openstack/horizon
openstack_dashboard/api/neutron.py
https://github.com/openstack/horizon/blob/5601ea9477323e599d9b766fcac1f8be742935b2/openstack_dashboard/api/neutron.py#L615-L640
def allocate(self, pool, tenant_id=None, **params): """Allocates a floating IP to the tenant. You must provide a pool name or id for which you would like to allocate a floating IP. :returns: FloatingIp object corresponding to an allocated floating IP """ if not tenant_id: tenant_id = self.request.user.project_id create_dict = {'floating_network_id': pool, 'tenant_id': tenant_id} if 'subnet_id' in params: create_dict['subnet_id'] = params['subnet_id'] if 'floating_ip_address' in params: create_dict['floating_ip_address'] = params['floating_ip_address'] if 'description' in params: create_dict['description'] = params['description'] if 'dns_domain' in params: create_dict['dns_domain'] = params['dns_domain'] if 'dns_name' in params: create_dict['dns_name'] = params['dns_name'] fip = self.client.create_floatingip( {'floatingip': create_dict}).get('floatingip') self._set_instance_info(fip) return FloatingIp(fip)
[ "def", "allocate", "(", "self", ",", "pool", ",", "tenant_id", "=", "None", ",", "*", "*", "params", ")", ":", "if", "not", "tenant_id", ":", "tenant_id", "=", "self", ".", "request", ".", "user", ".", "project_id", "create_dict", "=", "{", "'floating_network_id'", ":", "pool", ",", "'tenant_id'", ":", "tenant_id", "}", "if", "'subnet_id'", "in", "params", ":", "create_dict", "[", "'subnet_id'", "]", "=", "params", "[", "'subnet_id'", "]", "if", "'floating_ip_address'", "in", "params", ":", "create_dict", "[", "'floating_ip_address'", "]", "=", "params", "[", "'floating_ip_address'", "]", "if", "'description'", "in", "params", ":", "create_dict", "[", "'description'", "]", "=", "params", "[", "'description'", "]", "if", "'dns_domain'", "in", "params", ":", "create_dict", "[", "'dns_domain'", "]", "=", "params", "[", "'dns_domain'", "]", "if", "'dns_name'", "in", "params", ":", "create_dict", "[", "'dns_name'", "]", "=", "params", "[", "'dns_name'", "]", "fip", "=", "self", ".", "client", ".", "create_floatingip", "(", "{", "'floatingip'", ":", "create_dict", "}", ")", ".", "get", "(", "'floatingip'", ")", "self", ".", "_set_instance_info", "(", "fip", ")", "return", "FloatingIp", "(", "fip", ")" ]
Allocates a floating IP to the tenant. You must provide a pool name or id for which you would like to allocate a floating IP. :returns: FloatingIp object corresponding to an allocated floating IP
[ "Allocates", "a", "floating", "IP", "to", "the", "tenant", "." ]
python
train
43.153846
ayust/kitnirc
kitnirc/client.py
https://github.com/ayust/kitnirc/blob/cf19fe39219da75f053e1a3976bf21331b6fefea/kitnirc/client.py#L332-L341
def send(self, *args): """Sends a single raw message to the IRC server. Arguments are automatically joined by spaces. No newlines are allowed. """ msg = " ".join(a.nick if isinstance(a, User) else str(a) for a in args) if "\n" in msg: raise ValueError("Cannot send() a newline. Args: %s" % repr(args)) _log.debug("%s <-- %s", self.server.host, msg) self.socket.send(msg + "\r\n")
[ "def", "send", "(", "self", ",", "*", "args", ")", ":", "msg", "=", "\" \"", ".", "join", "(", "a", ".", "nick", "if", "isinstance", "(", "a", ",", "User", ")", "else", "str", "(", "a", ")", "for", "a", "in", "args", ")", "if", "\"\\n\"", "in", "msg", ":", "raise", "ValueError", "(", "\"Cannot send() a newline. Args: %s\"", "%", "repr", "(", "args", ")", ")", "_log", ".", "debug", "(", "\"%s <-- %s\"", ",", "self", ".", "server", ".", "host", ",", "msg", ")", "self", ".", "socket", ".", "send", "(", "msg", "+", "\"\\r\\n\"", ")" ]
Sends a single raw message to the IRC server. Arguments are automatically joined by spaces. No newlines are allowed.
[ "Sends", "a", "single", "raw", "message", "to", "the", "IRC", "server", "." ]
python
train
43.9
pycontribs/python-crowd
crowd.py
https://github.com/pycontribs/python-crowd/blob/a075e45774dd5baecf0217843cda747084268e32/crowd.py#L483-L505
def remove_user_from_group(self, username, groupname, raise_on_error=False): """Remove a user from a group Attempts to remove a user from a group Args username: The username to remove from the group. groupname: The group name to be removed from the user. Returns: True: Succeeded False: If unsuccessful """ response = self._delete(self.rest_url + "/group/user/direct",params={"username": username, "groupname": groupname}) if response.status_code == 204: return True if raise_on_error: raise RuntimeError(response.json()['message']) return False
[ "def", "remove_user_from_group", "(", "self", ",", "username", ",", "groupname", ",", "raise_on_error", "=", "False", ")", ":", "response", "=", "self", ".", "_delete", "(", "self", ".", "rest_url", "+", "\"/group/user/direct\"", ",", "params", "=", "{", "\"username\"", ":", "username", ",", "\"groupname\"", ":", "groupname", "}", ")", "if", "response", ".", "status_code", "==", "204", ":", "return", "True", "if", "raise_on_error", ":", "raise", "RuntimeError", "(", "response", ".", "json", "(", ")", "[", "'message'", "]", ")", "return", "False" ]
Remove a user from a group Attempts to remove a user from a group Args username: The username to remove from the group. groupname: The group name to be removed from the user. Returns: True: Succeeded False: If unsuccessful
[ "Remove", "a", "user", "from", "a", "group" ]
python
train
28.782609
nickjj/ansigenome
ansigenome/utils.py
https://github.com/nickjj/ansigenome/blob/70cd98d7a23d36c56f4e713ea820cfb4c485c81c/ansigenome/utils.py#L303-L309
def exit_if_no_roles(roles_count, roles_path): """ Exit if there were no roles found. """ if roles_count == 0: ui.warn(c.MESSAGES["empty_roles_path"], roles_path) sys.exit()
[ "def", "exit_if_no_roles", "(", "roles_count", ",", "roles_path", ")", ":", "if", "roles_count", "==", "0", ":", "ui", ".", "warn", "(", "c", ".", "MESSAGES", "[", "\"empty_roles_path\"", "]", ",", "roles_path", ")", "sys", ".", "exit", "(", ")" ]
Exit if there were no roles found.
[ "Exit", "if", "there", "were", "no", "roles", "found", "." ]
python
train
28.428571
molmod/molmod
molmod/graphs.py
https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/graphs.py#L1248-L1257
def get_new_edges(self, level): """Get new edges from the pattern graph for the graph search algorithm The level argument denotes the distance of the new edges from the starting vertex in the pattern graph. """ return ( self.level_edges.get(level, []), self.level_constraints.get(level, []) )
[ "def", "get_new_edges", "(", "self", ",", "level", ")", ":", "return", "(", "self", ".", "level_edges", ".", "get", "(", "level", ",", "[", "]", ")", ",", "self", ".", "level_constraints", ".", "get", "(", "level", ",", "[", "]", ")", ")" ]
Get new edges from the pattern graph for the graph search algorithm The level argument denotes the distance of the new edges from the starting vertex in the pattern graph.
[ "Get", "new", "edges", "from", "the", "pattern", "graph", "for", "the", "graph", "search", "algorithm" ]
python
train
36.2