repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
sethmlarson/virtualbox-python
virtualbox/library.py
https://github.com/sethmlarson/virtualbox-python/blob/706c8e3f6e3aee17eb06458e73cbb4bc2d37878b/virtualbox/library.py#L16309-L16343
def add_disk_encryption_passwords(self, ids, passwords, clear_on_suspend): """Adds a password used for hard disk encryption/decryption. in ids of type str List of identifiers for the passwords. Must match the identifier used when the encrypted medium was created. in passwords of type str List of passwords. in clear_on_suspend of type bool Flag whether to clear the given passwords on VM suspend (due to a suspending host for example). The passwords must be supplied again before the VM can resume. raises :class:`VBoxErrorPasswordIncorrect` The password provided wasn't correct for at least one disk using the provided ID. """ if not isinstance(ids, list): raise TypeError("ids can only be an instance of type list") for a in ids[:10]: if not isinstance(a, basestring): raise TypeError( "array can only contain objects of type basestring") if not isinstance(passwords, list): raise TypeError("passwords can only be an instance of type list") for a in passwords[:10]: if not isinstance(a, basestring): raise TypeError( "array can only contain objects of type basestring") if not isinstance(clear_on_suspend, bool): raise TypeError("clear_on_suspend can only be an instance of type bool") self._call("addDiskEncryptionPasswords", in_p=[ids, passwords, clear_on_suspend])
[ "def", "add_disk_encryption_passwords", "(", "self", ",", "ids", ",", "passwords", ",", "clear_on_suspend", ")", ":", "if", "not", "isinstance", "(", "ids", ",", "list", ")", ":", "raise", "TypeError", "(", "\"ids can only be an instance of type list\"", ")", "for", "a", "in", "ids", "[", ":", "10", "]", ":", "if", "not", "isinstance", "(", "a", ",", "basestring", ")", ":", "raise", "TypeError", "(", "\"array can only contain objects of type basestring\"", ")", "if", "not", "isinstance", "(", "passwords", ",", "list", ")", ":", "raise", "TypeError", "(", "\"passwords can only be an instance of type list\"", ")", "for", "a", "in", "passwords", "[", ":", "10", "]", ":", "if", "not", "isinstance", "(", "a", ",", "basestring", ")", ":", "raise", "TypeError", "(", "\"array can only contain objects of type basestring\"", ")", "if", "not", "isinstance", "(", "clear_on_suspend", ",", "bool", ")", ":", "raise", "TypeError", "(", "\"clear_on_suspend can only be an instance of type bool\"", ")", "self", ".", "_call", "(", "\"addDiskEncryptionPasswords\"", ",", "in_p", "=", "[", "ids", ",", "passwords", ",", "clear_on_suspend", "]", ")" ]
Adds a password used for hard disk encryption/decryption. in ids of type str List of identifiers for the passwords. Must match the identifier used when the encrypted medium was created. in passwords of type str List of passwords. in clear_on_suspend of type bool Flag whether to clear the given passwords on VM suspend (due to a suspending host for example). The passwords must be supplied again before the VM can resume. raises :class:`VBoxErrorPasswordIncorrect` The password provided wasn't correct for at least one disk using the provided ID.
[ "Adds", "a", "password", "used", "for", "hard", "disk", "encryption", "/", "decryption", "." ]
python
train
45.085714
softlayer/softlayer-python
SoftLayer/shell/cmd_help.py
https://github.com/softlayer/softlayer-python/blob/9f181be08cc3668353b05a6de0cb324f52cff6fa/SoftLayer/shell/cmd_help.py#L15-L40
def cli(ctx, env): """Print shell help text.""" env.out("Welcome to the SoftLayer shell.") env.out("") formatter = formatting.HelpFormatter() commands = [] shell_commands = [] for name in cli_core.cli.list_commands(ctx): command = cli_core.cli.get_command(ctx, name) if command.short_help is None: command.short_help = command.help details = (name, command.short_help) if name in dict(routes.ALL_ROUTES): shell_commands.append(details) else: commands.append(details) with formatter.section('Shell Commands'): formatter.write_dl(shell_commands) with formatter.section('Commands'): formatter.write_dl(commands) for line in formatter.buffer: env.out(line, newline=False)
[ "def", "cli", "(", "ctx", ",", "env", ")", ":", "env", ".", "out", "(", "\"Welcome to the SoftLayer shell.\"", ")", "env", ".", "out", "(", "\"\"", ")", "formatter", "=", "formatting", ".", "HelpFormatter", "(", ")", "commands", "=", "[", "]", "shell_commands", "=", "[", "]", "for", "name", "in", "cli_core", ".", "cli", ".", "list_commands", "(", "ctx", ")", ":", "command", "=", "cli_core", ".", "cli", ".", "get_command", "(", "ctx", ",", "name", ")", "if", "command", ".", "short_help", "is", "None", ":", "command", ".", "short_help", "=", "command", ".", "help", "details", "=", "(", "name", ",", "command", ".", "short_help", ")", "if", "name", "in", "dict", "(", "routes", ".", "ALL_ROUTES", ")", ":", "shell_commands", ".", "append", "(", "details", ")", "else", ":", "commands", ".", "append", "(", "details", ")", "with", "formatter", ".", "section", "(", "'Shell Commands'", ")", ":", "formatter", ".", "write_dl", "(", "shell_commands", ")", "with", "formatter", ".", "section", "(", "'Commands'", ")", ":", "formatter", ".", "write_dl", "(", "commands", ")", "for", "line", "in", "formatter", ".", "buffer", ":", "env", ".", "out", "(", "line", ",", "newline", "=", "False", ")" ]
Print shell help text.
[ "Print", "shell", "help", "text", "." ]
python
train
30.230769
brandon-rhodes/uncommitted
uncommitted/command.py
https://github.com/brandon-rhodes/uncommitted/blob/80ebd95a3735e26bd8b9b7b62ff25e1e749a7472/uncommitted/command.py#L50-L65
def find_repositories_with_locate(path): """Use locate to return a sequence of (directory, dotdir) pairs.""" command = [b'locate', b'-0'] for dotdir in DOTDIRS: # Escaping the slash (using '\/' rather than '/') is an # important signal to locate(1) that these glob patterns are # supposed to match the full path, so that things like # '.hgignore' files do not show up in the result. command.append(br'%s\/%s' % (escape(path), escape(dotdir))) command.append(br'%s\/*/%s' % (escape(path), escape(dotdir))) try: paths = check_output(command).strip(b'\0').split(b'\0') except CalledProcessError: return [] return [os.path.split(p) for p in paths if not os.path.islink(p) and os.path.isdir(p)]
[ "def", "find_repositories_with_locate", "(", "path", ")", ":", "command", "=", "[", "b'locate'", ",", "b'-0'", "]", "for", "dotdir", "in", "DOTDIRS", ":", "# Escaping the slash (using '\\/' rather than '/') is an", "# important signal to locate(1) that these glob patterns are", "# supposed to match the full path, so that things like", "# '.hgignore' files do not show up in the result.", "command", ".", "append", "(", "br'%s\\/%s'", "%", "(", "escape", "(", "path", ")", ",", "escape", "(", "dotdir", ")", ")", ")", "command", ".", "append", "(", "br'%s\\/*/%s'", "%", "(", "escape", "(", "path", ")", ",", "escape", "(", "dotdir", ")", ")", ")", "try", ":", "paths", "=", "check_output", "(", "command", ")", ".", "strip", "(", "b'\\0'", ")", ".", "split", "(", "b'\\0'", ")", "except", "CalledProcessError", ":", "return", "[", "]", "return", "[", "os", ".", "path", ".", "split", "(", "p", ")", "for", "p", "in", "paths", "if", "not", "os", ".", "path", ".", "islink", "(", "p", ")", "and", "os", ".", "path", ".", "isdir", "(", "p", ")", "]" ]
Use locate to return a sequence of (directory, dotdir) pairs.
[ "Use", "locate", "to", "return", "a", "sequence", "of", "(", "directory", "dotdir", ")", "pairs", "." ]
python
train
48.375
idlesign/uwsgiconf
uwsgiconf/runtime/caching.py
https://github.com/idlesign/uwsgiconf/blob/475407acb44199edbf7e0a66261bfeb51de1afae/uwsgiconf/runtime/caching.py#L138-L147
def div(self, key, value=2): """Divides the specified key value by the specified value. :param str|unicode key: :param int value: :rtype: bool """ return uwsgi.cache_mul(key, value, self.timeout, self.name)
[ "def", "div", "(", "self", ",", "key", ",", "value", "=", "2", ")", ":", "return", "uwsgi", ".", "cache_mul", "(", "key", ",", "value", ",", "self", ".", "timeout", ",", "self", ".", "name", ")" ]
Divides the specified key value by the specified value. :param str|unicode key: :param int value: :rtype: bool
[ "Divides", "the", "specified", "key", "value", "by", "the", "specified", "value", "." ]
python
train
24.8
ARMmbed/icetea
icetea_lib/tools/HTTP/Api.py
https://github.com/ARMmbed/icetea/blob/b2b97ac607429830cf7d62dae2e3903692c7c778/icetea_lib/tools/HTTP/Api.py#L136-L170
def post(self, path, data=None, json=None, headers=None, **kwargs): """ Sends a POST request to host/path. :param path: String, resource path on server :param data: Dictionary, bytes or file-like object to send in the body of the request :param json: JSON formatted data to send in the body of the request :param headers: Dictionary of HTTP headers to be sent with the request, overwrites default headers if there is overlap :param kwargs: Other arguments used in the requests.request call valid parameters in kwargs are the optional parameters of Requests.Request http://docs.python-requests.org/en/master/api/ :return: requests.Response :raises: RequestException """ if headers is not None: merger = jsonmerge.Merger(SCHEMA) kwargs["headers"] = merger.merge(self.defaultHeaders, headers) else: kwargs["headers"] = self.defaultHeaders url = combine_urls(self.host, path) if self.cert is not None: kwargs["cert"] = self.cert self.logger.debug("Trying to send HTTP POST to {}".format(url)) try: resp = requests.post(url, data, json, **kwargs) self._log_response(resp) except requests.RequestException as es: self._log_exception(es) raise return resp
[ "def", "post", "(", "self", ",", "path", ",", "data", "=", "None", ",", "json", "=", "None", ",", "headers", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "headers", "is", "not", "None", ":", "merger", "=", "jsonmerge", ".", "Merger", "(", "SCHEMA", ")", "kwargs", "[", "\"headers\"", "]", "=", "merger", ".", "merge", "(", "self", ".", "defaultHeaders", ",", "headers", ")", "else", ":", "kwargs", "[", "\"headers\"", "]", "=", "self", ".", "defaultHeaders", "url", "=", "combine_urls", "(", "self", ".", "host", ",", "path", ")", "if", "self", ".", "cert", "is", "not", "None", ":", "kwargs", "[", "\"cert\"", "]", "=", "self", ".", "cert", "self", ".", "logger", ".", "debug", "(", "\"Trying to send HTTP POST to {}\"", ".", "format", "(", "url", ")", ")", "try", ":", "resp", "=", "requests", ".", "post", "(", "url", ",", "data", ",", "json", ",", "*", "*", "kwargs", ")", "self", ".", "_log_response", "(", "resp", ")", "except", "requests", ".", "RequestException", "as", "es", ":", "self", ".", "_log_exception", "(", "es", ")", "raise", "return", "resp" ]
Sends a POST request to host/path. :param path: String, resource path on server :param data: Dictionary, bytes or file-like object to send in the body of the request :param json: JSON formatted data to send in the body of the request :param headers: Dictionary of HTTP headers to be sent with the request, overwrites default headers if there is overlap :param kwargs: Other arguments used in the requests.request call valid parameters in kwargs are the optional parameters of Requests.Request http://docs.python-requests.org/en/master/api/ :return: requests.Response :raises: RequestException
[ "Sends", "a", "POST", "request", "to", "host", "/", "path", "." ]
python
train
39.514286
crate/crate-python
src/crate/client/http.py
https://github.com/crate/crate-python/blob/68e39c95f5bbe88b74bbfa26de4347fc644636a8/src/crate/client/http.py#L453-L484
def _get_server(self): """ Get server to use for request. Also process inactive server list, re-add them after given interval. """ with self._lock: inactive_server_count = len(self._inactive_servers) for i in range(inactive_server_count): try: ts, server, message = heapq.heappop(self._inactive_servers) except IndexError: pass else: if (ts + self.retry_interval) > time(): # Not yet, put it back heapq.heappush(self._inactive_servers, (ts, server, message)) else: self._active_servers.append(server) logger.warn("Restored server %s into active pool", server) # if none is old enough, use oldest if not self._active_servers: ts, server, message = heapq.heappop(self._inactive_servers) self._active_servers.append(server) logger.info("Restored server %s into active pool", server) server = self._active_servers[0] self._roundrobin() return server
[ "def", "_get_server", "(", "self", ")", ":", "with", "self", ".", "_lock", ":", "inactive_server_count", "=", "len", "(", "self", ".", "_inactive_servers", ")", "for", "i", "in", "range", "(", "inactive_server_count", ")", ":", "try", ":", "ts", ",", "server", ",", "message", "=", "heapq", ".", "heappop", "(", "self", ".", "_inactive_servers", ")", "except", "IndexError", ":", "pass", "else", ":", "if", "(", "ts", "+", "self", ".", "retry_interval", ")", ">", "time", "(", ")", ":", "# Not yet, put it back", "heapq", ".", "heappush", "(", "self", ".", "_inactive_servers", ",", "(", "ts", ",", "server", ",", "message", ")", ")", "else", ":", "self", ".", "_active_servers", ".", "append", "(", "server", ")", "logger", ".", "warn", "(", "\"Restored server %s into active pool\"", ",", "server", ")", "# if none is old enough, use oldest", "if", "not", "self", ".", "_active_servers", ":", "ts", ",", "server", ",", "message", "=", "heapq", ".", "heappop", "(", "self", ".", "_inactive_servers", ")", "self", ".", "_active_servers", ".", "append", "(", "server", ")", "logger", ".", "info", "(", "\"Restored server %s into active pool\"", ",", "server", ")", "server", "=", "self", ".", "_active_servers", "[", "0", "]", "self", ".", "_roundrobin", "(", ")", "return", "server" ]
Get server to use for request. Also process inactive server list, re-add them after given interval.
[ "Get", "server", "to", "use", "for", "request", ".", "Also", "process", "inactive", "server", "list", "re", "-", "add", "them", "after", "given", "interval", "." ]
python
train
40.25
sixty-north/added-value
source/added_value/pyobj_role.py
https://github.com/sixty-north/added-value/blob/7ae75b56712822b074fc874612d6058bb7d16a1e/source/added_value/pyobj_role.py#L10-L40
def pyobj_role(make_node, name, rawtext, text, lineno, inliner, options=None, content=None): """Include Python object value, rendering it to text using str. Returns 2 part tuple containing list of nodes to insert into the document and a list of system messages. Both are allowed to be empty. :param make_node: A callable which accepts (rawtext, app, prefixed_name, obj, parent, modname, options) and which returns a node :param name: The role name used in the document. :param rawtext: The entire markup snippet, with role. :param text: The text marked with the role. :param lineno: The line number where rawtext appears in the input. :param inliner: The inliner instance that called us. :param options: Directive options for customization. :param content: The directive content for customization. """ if options is None: options = {} if content is None: content = [] try: prefixed_name, obj, parent, modname = import_by_name(text) except ImportError: msg = inliner.reporter.error("Could not locate Python object {}".format(text), line=lineno) prb = inliner.problematic(rawtext, rawtext, msg) return [prb], [msg] app = inliner.document.settings.env.app node = make_node(rawtext, app, prefixed_name, obj, parent, modname, options) return [node], []
[ "def", "pyobj_role", "(", "make_node", ",", "name", ",", "rawtext", ",", "text", ",", "lineno", ",", "inliner", ",", "options", "=", "None", ",", "content", "=", "None", ")", ":", "if", "options", "is", "None", ":", "options", "=", "{", "}", "if", "content", "is", "None", ":", "content", "=", "[", "]", "try", ":", "prefixed_name", ",", "obj", ",", "parent", ",", "modname", "=", "import_by_name", "(", "text", ")", "except", "ImportError", ":", "msg", "=", "inliner", ".", "reporter", ".", "error", "(", "\"Could not locate Python object {}\"", ".", "format", "(", "text", ")", ",", "line", "=", "lineno", ")", "prb", "=", "inliner", ".", "problematic", "(", "rawtext", ",", "rawtext", ",", "msg", ")", "return", "[", "prb", "]", ",", "[", "msg", "]", "app", "=", "inliner", ".", "document", ".", "settings", ".", "env", ".", "app", "node", "=", "make_node", "(", "rawtext", ",", "app", ",", "prefixed_name", ",", "obj", ",", "parent", ",", "modname", ",", "options", ")", "return", "[", "node", "]", ",", "[", "]" ]
Include Python object value, rendering it to text using str. Returns 2 part tuple containing list of nodes to insert into the document and a list of system messages. Both are allowed to be empty. :param make_node: A callable which accepts (rawtext, app, prefixed_name, obj, parent, modname, options) and which returns a node :param name: The role name used in the document. :param rawtext: The entire markup snippet, with role. :param text: The text marked with the role. :param lineno: The line number where rawtext appears in the input. :param inliner: The inliner instance that called us. :param options: Directive options for customization. :param content: The directive content for customization.
[ "Include", "Python", "object", "value", "rendering", "it", "to", "text", "using", "str", "." ]
python
train
43.548387
sassoftware/saspy
saspy/sasbase.py
https://github.com/sassoftware/saspy/blob/e433f71990f249d3a6c3db323ceb11cb2d462cf9/saspy/sasbase.py#L733-L753
def read_csv(self, file: str, table: str = '_csv', libref: str = '', results: str = '', opts: dict = None) -> 'SASdata': """ :param file: either the OS filesystem path of the file, or HTTP://... for a url accessible file :param table: the name of the SAS Data Set to create :param libref: the libref for the SAS Data Set being created. Defaults to WORK, or USER if assigned :param results: format of results, SASsession.results is default, PANDAS, HTML or TEXT are the alternatives :param opts: a dictionary containing any of the following Proc Import options(datarow, delimiter, getnames, guessingrows) :return: SASdata object """ opts = opts if opts is not None else {} if results == '': results = self.results self._io.read_csv(file, table, libref, self.nosub, opts) if self.exist(table, libref): return SASdata(self, libref, table, results) else: return None
[ "def", "read_csv", "(", "self", ",", "file", ":", "str", ",", "table", ":", "str", "=", "'_csv'", ",", "libref", ":", "str", "=", "''", ",", "results", ":", "str", "=", "''", ",", "opts", ":", "dict", "=", "None", ")", "->", "'SASdata'", ":", "opts", "=", "opts", "if", "opts", "is", "not", "None", "else", "{", "}", "if", "results", "==", "''", ":", "results", "=", "self", ".", "results", "self", ".", "_io", ".", "read_csv", "(", "file", ",", "table", ",", "libref", ",", "self", ".", "nosub", ",", "opts", ")", "if", "self", ".", "exist", "(", "table", ",", "libref", ")", ":", "return", "SASdata", "(", "self", ",", "libref", ",", "table", ",", "results", ")", "else", ":", "return", "None" ]
:param file: either the OS filesystem path of the file, or HTTP://... for a url accessible file :param table: the name of the SAS Data Set to create :param libref: the libref for the SAS Data Set being created. Defaults to WORK, or USER if assigned :param results: format of results, SASsession.results is default, PANDAS, HTML or TEXT are the alternatives :param opts: a dictionary containing any of the following Proc Import options(datarow, delimiter, getnames, guessingrows) :return: SASdata object
[ ":", "param", "file", ":", "either", "the", "OS", "filesystem", "path", "of", "the", "file", "or", "HTTP", ":", "//", "...", "for", "a", "url", "accessible", "file", ":", "param", "table", ":", "the", "name", "of", "the", "SAS", "Data", "Set", "to", "create", ":", "param", "libref", ":", "the", "libref", "for", "the", "SAS", "Data", "Set", "being", "created", ".", "Defaults", "to", "WORK", "or", "USER", "if", "assigned", ":", "param", "results", ":", "format", "of", "results", "SASsession", ".", "results", "is", "default", "PANDAS", "HTML", "or", "TEXT", "are", "the", "alternatives", ":", "param", "opts", ":", "a", "dictionary", "containing", "any", "of", "the", "following", "Proc", "Import", "options", "(", "datarow", "delimiter", "getnames", "guessingrows", ")", ":", "return", ":", "SASdata", "object" ]
python
train
47.714286
rodricios/eatiht
eatiht/eatiht.py
https://github.com/rodricios/eatiht/blob/7341c46327cfe7e0c9f226ef5e9808975c4d43da/eatiht/eatiht.py#L189-L216
def get_sentence_xpath_tuples(filename_url_or_filelike, xpath_to_text=TEXT_FINDER_XPATH): """ Given a url and xpath, this function will download, parse, then iterate though queried text-nodes. From the resulting text-nodes, extract a list of (text, exact-xpath) tuples. """ parsed_html = get_html_tree(filename_url_or_filelike) try: xpath_finder = parsed_html.getroot().getroottree().getpath except(AttributeError): xpath_finder = parsed_html.getroottree().getpath nodes_with_text = parsed_html.xpath(xpath_to_text) sent_xpath_pairs = [ # hard-code paragraph breaks (there has to be a better way) ('\n\n' + s, xpath_finder(n)) if e == 0 else (s, xpath_finder(n)) for n in nodes_with_text for e, s in enumerate(SENTENCE_TOKEN_PATTERN.split( BRACKET_PATTERN.sub('', ''.join(n.xpath('.//text()'))))) if s.endswith(tuple(SENTENCE_ENDING)) ] return sent_xpath_pairs
[ "def", "get_sentence_xpath_tuples", "(", "filename_url_or_filelike", ",", "xpath_to_text", "=", "TEXT_FINDER_XPATH", ")", ":", "parsed_html", "=", "get_html_tree", "(", "filename_url_or_filelike", ")", "try", ":", "xpath_finder", "=", "parsed_html", ".", "getroot", "(", ")", ".", "getroottree", "(", ")", ".", "getpath", "except", "(", "AttributeError", ")", ":", "xpath_finder", "=", "parsed_html", ".", "getroottree", "(", ")", ".", "getpath", "nodes_with_text", "=", "parsed_html", ".", "xpath", "(", "xpath_to_text", ")", "sent_xpath_pairs", "=", "[", "# hard-code paragraph breaks (there has to be a better way)\r", "(", "'\\n\\n'", "+", "s", ",", "xpath_finder", "(", "n", ")", ")", "if", "e", "==", "0", "else", "(", "s", ",", "xpath_finder", "(", "n", ")", ")", "for", "n", "in", "nodes_with_text", "for", "e", ",", "s", "in", "enumerate", "(", "SENTENCE_TOKEN_PATTERN", ".", "split", "(", "BRACKET_PATTERN", ".", "sub", "(", "''", ",", "''", ".", "join", "(", "n", ".", "xpath", "(", "'.//text()'", ")", ")", ")", ")", ")", "if", "s", ".", "endswith", "(", "tuple", "(", "SENTENCE_ENDING", ")", ")", "]", "return", "sent_xpath_pairs" ]
Given a url and xpath, this function will download, parse, then iterate though queried text-nodes. From the resulting text-nodes, extract a list of (text, exact-xpath) tuples.
[ "Given", "a", "url", "and", "xpath", "this", "function", "will", "download", "parse", "then", "iterate", "though", "queried", "text", "-", "nodes", ".", "From", "the", "resulting", "text", "-", "nodes", "extract", "a", "list", "of", "(", "text", "exact", "-", "xpath", ")", "tuples", "." ]
python
train
36.535714
kensho-technologies/graphql-compiler
graphql_compiler/compiler/blocks.py
https://github.com/kensho-technologies/graphql-compiler/blob/f6079c6d10f64932f6b3af309b79bcea2123ca8f/graphql_compiler/compiler/blocks.py#L446-L450
def validate(self): """Ensure the Fold block is valid.""" if not isinstance(self.fold_scope_location, FoldScopeLocation): raise TypeError(u'Expected a FoldScopeLocation for fold_scope_location, got: {} ' u'{}'.format(type(self.fold_scope_location), self.fold_scope_location))
[ "def", "validate", "(", "self", ")", ":", "if", "not", "isinstance", "(", "self", ".", "fold_scope_location", ",", "FoldScopeLocation", ")", ":", "raise", "TypeError", "(", "u'Expected a FoldScopeLocation for fold_scope_location, got: {} '", "u'{}'", ".", "format", "(", "type", "(", "self", ".", "fold_scope_location", ")", ",", "self", ".", "fold_scope_location", ")", ")" ]
Ensure the Fold block is valid.
[ "Ensure", "the", "Fold", "block", "is", "valid", "." ]
python
train
65.4
titilambert/pyebox
pyebox/client.py
https://github.com/titilambert/pyebox/blob/f35fb75ab5f0df38e1d16a0420e4c13b4908c63d/pyebox/client.py#L192-L197
def close_session(self): """Close current session.""" if not self._session.closed: if self._session._connector_owner: self._session._connector.close() self._session._connector = None
[ "def", "close_session", "(", "self", ")", ":", "if", "not", "self", ".", "_session", ".", "closed", ":", "if", "self", ".", "_session", ".", "_connector_owner", ":", "self", ".", "_session", ".", "_connector", ".", "close", "(", ")", "self", ".", "_session", ".", "_connector", "=", "None" ]
Close current session.
[ "Close", "current", "session", "." ]
python
train
38.833333
marrabld/planarradpy
libplanarradpy/planrad.py
https://github.com/marrabld/planarradpy/blob/5095d1cb98d4f67a7c3108c9282f2d59253e89a8/libplanarradpy/planrad.py#L324-L330
def update_filenames(self): """Does nothing currently. May not need this method""" self.sky_file = os.path.abspath(os.path.join(os.path.join(self.input_path, 'sky_files'), 'sky_' + self.sky_state + '_z' + str( self.sky_zenith) + '_a' + str( self.sky_azimuth) + '_' + str( self.num_bands) + '_' + self.ds_code))
[ "def", "update_filenames", "(", "self", ")", ":", "self", ".", "sky_file", "=", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "join", "(", "self", ".", "input_path", ",", "'sky_files'", ")", ",", "'sky_'", "+", "self", ".", "sky_state", "+", "'_z'", "+", "str", "(", "self", ".", "sky_zenith", ")", "+", "'_a'", "+", "str", "(", "self", ".", "sky_azimuth", ")", "+", "'_'", "+", "str", "(", "self", ".", "num_bands", ")", "+", "'_'", "+", "self", ".", "ds_code", ")", ")" ]
Does nothing currently. May not need this method
[ "Does", "nothing", "currently", ".", "May", "not", "need", "this", "method" ]
python
test
77.857143
last-partizan/pytils
pytils/templatetags/pytils_translit.py
https://github.com/last-partizan/pytils/blob/1c570a32b15e564bc68587b8207e32d464e61d08/pytils/templatetags/pytils_translit.py#L36-L43
def detranslify(text): """Detranslify russian text""" try: res = translit.detranslify(text) except Exception as err: # because filter must die silently res = default_value % {'error': err, 'value': text} return res
[ "def", "detranslify", "(", "text", ")", ":", "try", ":", "res", "=", "translit", ".", "detranslify", "(", "text", ")", "except", "Exception", "as", "err", ":", "# because filter must die silently", "res", "=", "default_value", "%", "{", "'error'", ":", "err", ",", "'value'", ":", "text", "}", "return", "res" ]
Detranslify russian text
[ "Detranslify", "russian", "text" ]
python
train
30.875
hobson/aima
aima/search.py
https://github.com/hobson/aima/blob/3572b2fb92039b4a1abe384be8545560fbd3d470/aima/search.py#L739-L741
def score(self): "The total score for the words found, according to the rules." return sum([self.scores[len(w)] for w in self.words()])
[ "def", "score", "(", "self", ")", ":", "return", "sum", "(", "[", "self", ".", "scores", "[", "len", "(", "w", ")", "]", "for", "w", "in", "self", ".", "words", "(", ")", "]", ")" ]
The total score for the words found, according to the rules.
[ "The", "total", "score", "for", "the", "words", "found", "according", "to", "the", "rules", "." ]
python
valid
49.666667
francois-vincent/clingon
clingon/clingon.py
https://github.com/francois-vincent/clingon/blob/afc9db073dbc72b2562ce3e444152986a555dcbf/clingon/clingon.py#L147-L163
def eval_option_value(self, option): """ Evaluates an option :param option: a string :return: an object of type str, bool, int, float or list """ try: value = eval(option, {}, {}) except (SyntaxError, NameError, TypeError): return option if type(value) in (str, bool, int, float): return value elif type(value) in (list, tuple): for v in value: if type(v) not in (str, bool, int, float): self._write_error("Value of element of list object has wrong type %s" % v) return value return option
[ "def", "eval_option_value", "(", "self", ",", "option", ")", ":", "try", ":", "value", "=", "eval", "(", "option", ",", "{", "}", ",", "{", "}", ")", "except", "(", "SyntaxError", ",", "NameError", ",", "TypeError", ")", ":", "return", "option", "if", "type", "(", "value", ")", "in", "(", "str", ",", "bool", ",", "int", ",", "float", ")", ":", "return", "value", "elif", "type", "(", "value", ")", "in", "(", "list", ",", "tuple", ")", ":", "for", "v", "in", "value", ":", "if", "type", "(", "v", ")", "not", "in", "(", "str", ",", "bool", ",", "int", ",", "float", ")", ":", "self", ".", "_write_error", "(", "\"Value of element of list object has wrong type %s\"", "%", "v", ")", "return", "value", "return", "option" ]
Evaluates an option :param option: a string :return: an object of type str, bool, int, float or list
[ "Evaluates", "an", "option", ":", "param", "option", ":", "a", "string", ":", "return", ":", "an", "object", "of", "type", "str", "bool", "int", "float", "or", "list" ]
python
train
37.705882
mozillazg/python-shanbay
shanbay/__init__.py
https://github.com/mozillazg/python-shanbay/blob/d505ba614dc13a36afce46969d13fc64e10dde0d/shanbay/__init__.py#L68-L82
def login(self, **kwargs): """登录""" payload = { 'username': self.username, 'password': self.password, } headers = kwargs.setdefault('headers', {}) headers.setdefault( 'Referer', 'https://www.shanbay.com/web/account/login' ) url = 'https://www.shanbay.com/api/v1/account/login/web/' response = self.request(url, 'put', json=payload, **kwargs) r_json = response.json() return r_json['status_code'] == 0
[ "def", "login", "(", "self", ",", "*", "*", "kwargs", ")", ":", "payload", "=", "{", "'username'", ":", "self", ".", "username", ",", "'password'", ":", "self", ".", "password", ",", "}", "headers", "=", "kwargs", ".", "setdefault", "(", "'headers'", ",", "{", "}", ")", "headers", ".", "setdefault", "(", "'Referer'", ",", "'https://www.shanbay.com/web/account/login'", ")", "url", "=", "'https://www.shanbay.com/api/v1/account/login/web/'", "response", "=", "self", ".", "request", "(", "url", ",", "'put'", ",", "json", "=", "payload", ",", "*", "*", "kwargs", ")", "r_json", "=", "response", ".", "json", "(", ")", "return", "r_json", "[", "'status_code'", "]", "==", "0" ]
登录
[ "登录" ]
python
train
34.266667
ARMmbed/mbed-cloud-sdk-python
src/mbed_cloud/_backends/billing/models/service_package_quota_history_response.py
https://github.com/ARMmbed/mbed-cloud-sdk-python/blob/c0af86fb2cdd4dc7ed26f236139241067d293509/src/mbed_cloud/_backends/billing/models/service_package_quota_history_response.py#L182-L199
def object(self, object): """ Sets the object of this ServicePackageQuotaHistoryResponse. Always set to 'service-package-quota-history'. :param object: The object of this ServicePackageQuotaHistoryResponse. :type: str """ if object is None: raise ValueError("Invalid value for `object`, must not be `None`") allowed_values = ["service-package-quota-history"] if object not in allowed_values: raise ValueError( "Invalid value for `object` ({0}), must be one of {1}" .format(object, allowed_values) ) self._object = object
[ "def", "object", "(", "self", ",", "object", ")", ":", "if", "object", "is", "None", ":", "raise", "ValueError", "(", "\"Invalid value for `object`, must not be `None`\"", ")", "allowed_values", "=", "[", "\"service-package-quota-history\"", "]", "if", "object", "not", "in", "allowed_values", ":", "raise", "ValueError", "(", "\"Invalid value for `object` ({0}), must be one of {1}\"", ".", "format", "(", "object", ",", "allowed_values", ")", ")", "self", ".", "_object", "=", "object" ]
Sets the object of this ServicePackageQuotaHistoryResponse. Always set to 'service-package-quota-history'. :param object: The object of this ServicePackageQuotaHistoryResponse. :type: str
[ "Sets", "the", "object", "of", "this", "ServicePackageQuotaHistoryResponse", ".", "Always", "set", "to", "service", "-", "package", "-", "quota", "-", "history", "." ]
python
train
36.277778
HazyResearch/metal
metal/classifier.py
https://github.com/HazyResearch/metal/blob/c24e3772e25ac6d0917b8b7af4c1bcb92928f84a/metal/classifier.py#L172-L289
def _train_model( self, train_data, loss_fn, valid_data=None, log_writer=None, restore_state={} ): """The internal training routine called by train_model() after setup Args: train_data: a tuple of Tensors (X,Y), a Dataset, or a DataLoader of X (data) and Y (labels) for the train split loss_fn: the loss function to minimize (maps *data -> loss) valid_data: a tuple of Tensors (X,Y), a Dataset, or a DataLoader of X (data) and Y (labels) for the dev split restore_state: a dictionary containing model weights (optimizer, main network) and training information If valid_data is not provided, then no checkpointing or evaluation on the dev set will occur. """ # Set model to train mode self.train() train_config = self.config["train_config"] # Convert data to DataLoaders train_loader = self._create_data_loader(train_data) valid_loader = self._create_data_loader(valid_data) epoch_size = len(train_loader.dataset) # Move model to GPU if self.config["verbose"] and self.config["device"] != "cpu": print("Using GPU...") self.to(self.config["device"]) # Set training components self._set_writer(train_config) self._set_logger(train_config, epoch_size) self._set_checkpointer(train_config) self._set_optimizer(train_config) self._set_scheduler(train_config) # Restore model if necessary if restore_state: start_iteration = self._restore_training_state(restore_state) else: start_iteration = 0 # Train the model metrics_hist = {} # The most recently seen value for all metrics for epoch in range(start_iteration, train_config["n_epochs"]): progress_bar = ( train_config["progress_bar"] and self.config["verbose"] and self.logger.log_unit == "epochs" ) t = tqdm( enumerate(train_loader), total=len(train_loader), disable=(not progress_bar), ) self.running_loss = 0.0 self.running_examples = 0 for batch_num, data in t: # NOTE: actual batch_size may not equal config's target batch_size batch_size = len(data[0]) # Moving data to device if self.config["device"] != "cpu": data = place_on_gpu(data) # Zero the parameter gradients self.optimizer.zero_grad() # Forward pass to calculate the average loss per example loss = loss_fn(*data) if torch.isnan(loss): msg = "Loss is NaN. Consider reducing learning rate." raise Exception(msg) # Backward pass to calculate gradients # Loss is an average loss per example loss.backward() # Perform optimizer step self.optimizer.step() # Calculate metrics, log, and checkpoint as necessary metrics_dict = self._execute_logging( train_loader, valid_loader, loss, batch_size ) metrics_hist.update(metrics_dict) # tqdm output t.set_postfix(loss=metrics_dict["train/loss"]) # Apply learning rate scheduler self._update_scheduler(epoch, metrics_hist) self.eval() # Restore best model if applicable if self.checkpointer: self.checkpointer.load_best_model(model=self) # Write log if applicable if self.writer: if self.writer.include_config: self.writer.add_config(self.config) self.writer.close() # Print confusion matrix if applicable if self.config["verbose"]: print("Finished Training") if valid_loader is not None: self.score( valid_loader, metric=train_config["validation_metric"], verbose=True, print_confusion_matrix=True, )
[ "def", "_train_model", "(", "self", ",", "train_data", ",", "loss_fn", ",", "valid_data", "=", "None", ",", "log_writer", "=", "None", ",", "restore_state", "=", "{", "}", ")", ":", "# Set model to train mode", "self", ".", "train", "(", ")", "train_config", "=", "self", ".", "config", "[", "\"train_config\"", "]", "# Convert data to DataLoaders", "train_loader", "=", "self", ".", "_create_data_loader", "(", "train_data", ")", "valid_loader", "=", "self", ".", "_create_data_loader", "(", "valid_data", ")", "epoch_size", "=", "len", "(", "train_loader", ".", "dataset", ")", "# Move model to GPU", "if", "self", ".", "config", "[", "\"verbose\"", "]", "and", "self", ".", "config", "[", "\"device\"", "]", "!=", "\"cpu\"", ":", "print", "(", "\"Using GPU...\"", ")", "self", ".", "to", "(", "self", ".", "config", "[", "\"device\"", "]", ")", "# Set training components", "self", ".", "_set_writer", "(", "train_config", ")", "self", ".", "_set_logger", "(", "train_config", ",", "epoch_size", ")", "self", ".", "_set_checkpointer", "(", "train_config", ")", "self", ".", "_set_optimizer", "(", "train_config", ")", "self", ".", "_set_scheduler", "(", "train_config", ")", "# Restore model if necessary", "if", "restore_state", ":", "start_iteration", "=", "self", ".", "_restore_training_state", "(", "restore_state", ")", "else", ":", "start_iteration", "=", "0", "# Train the model", "metrics_hist", "=", "{", "}", "# The most recently seen value for all metrics", "for", "epoch", "in", "range", "(", "start_iteration", ",", "train_config", "[", "\"n_epochs\"", "]", ")", ":", "progress_bar", "=", "(", "train_config", "[", "\"progress_bar\"", "]", "and", "self", ".", "config", "[", "\"verbose\"", "]", "and", "self", ".", "logger", ".", "log_unit", "==", "\"epochs\"", ")", "t", "=", "tqdm", "(", "enumerate", "(", "train_loader", ")", ",", "total", "=", "len", "(", "train_loader", ")", ",", "disable", "=", "(", "not", "progress_bar", ")", ",", ")", "self", ".", "running_loss", "=", "0.0", "self", ".", "running_examples", "=", "0", "for", "batch_num", ",", "data", "in", "t", ":", "# NOTE: actual batch_size may not equal config's target batch_size", "batch_size", "=", "len", "(", "data", "[", "0", "]", ")", "# Moving data to device", "if", "self", ".", "config", "[", "\"device\"", "]", "!=", "\"cpu\"", ":", "data", "=", "place_on_gpu", "(", "data", ")", "# Zero the parameter gradients", "self", ".", "optimizer", ".", "zero_grad", "(", ")", "# Forward pass to calculate the average loss per example", "loss", "=", "loss_fn", "(", "*", "data", ")", "if", "torch", ".", "isnan", "(", "loss", ")", ":", "msg", "=", "\"Loss is NaN. Consider reducing learning rate.\"", "raise", "Exception", "(", "msg", ")", "# Backward pass to calculate gradients", "# Loss is an average loss per example", "loss", ".", "backward", "(", ")", "# Perform optimizer step", "self", ".", "optimizer", ".", "step", "(", ")", "# Calculate metrics, log, and checkpoint as necessary", "metrics_dict", "=", "self", ".", "_execute_logging", "(", "train_loader", ",", "valid_loader", ",", "loss", ",", "batch_size", ")", "metrics_hist", ".", "update", "(", "metrics_dict", ")", "# tqdm output", "t", ".", "set_postfix", "(", "loss", "=", "metrics_dict", "[", "\"train/loss\"", "]", ")", "# Apply learning rate scheduler", "self", ".", "_update_scheduler", "(", "epoch", ",", "metrics_hist", ")", "self", ".", "eval", "(", ")", "# Restore best model if applicable", "if", "self", ".", "checkpointer", ":", "self", ".", "checkpointer", ".", "load_best_model", "(", "model", "=", "self", ")", "# Write log if applicable", "if", "self", ".", "writer", ":", "if", "self", ".", "writer", ".", "include_config", ":", "self", ".", "writer", ".", "add_config", "(", "self", ".", "config", ")", "self", ".", "writer", ".", "close", "(", ")", "# Print confusion matrix if applicable", "if", "self", ".", "config", "[", "\"verbose\"", "]", ":", "print", "(", "\"Finished Training\"", ")", "if", "valid_loader", "is", "not", "None", ":", "self", ".", "score", "(", "valid_loader", ",", "metric", "=", "train_config", "[", "\"validation_metric\"", "]", ",", "verbose", "=", "True", ",", "print_confusion_matrix", "=", "True", ",", ")" ]
The internal training routine called by train_model() after setup Args: train_data: a tuple of Tensors (X,Y), a Dataset, or a DataLoader of X (data) and Y (labels) for the train split loss_fn: the loss function to minimize (maps *data -> loss) valid_data: a tuple of Tensors (X,Y), a Dataset, or a DataLoader of X (data) and Y (labels) for the dev split restore_state: a dictionary containing model weights (optimizer, main network) and training information If valid_data is not provided, then no checkpointing or evaluation on the dev set will occur.
[ "The", "internal", "training", "routine", "called", "by", "train_model", "()", "after", "setup" ]
python
train
36.008475
upsight/doctor
doctor/docs/base.py
https://github.com/upsight/doctor/blob/2cf1d433f6f1aa1355644b449a757c0660793cdd/doctor/docs/base.py#L124-L139
def get_object_reference(obj: Object) -> str: """Gets an object reference string from the obj instance. This adds the object type to ALL_RESOURCES so that it gets documented and returns a str which contains a sphinx reference to the documented object. :param obj: The Object instance. :returns: A sphinx docs reference str. """ resource_name = obj.title if resource_name is None: class_name = obj.__name__ resource_name = class_name_to_resource_name(class_name) ALL_RESOURCES[resource_name] = obj return ' See :ref:`resource-{}`.'.format( '-'.join(resource_name.split(' ')).lower().strip())
[ "def", "get_object_reference", "(", "obj", ":", "Object", ")", "->", "str", ":", "resource_name", "=", "obj", ".", "title", "if", "resource_name", "is", "None", ":", "class_name", "=", "obj", ".", "__name__", "resource_name", "=", "class_name_to_resource_name", "(", "class_name", ")", "ALL_RESOURCES", "[", "resource_name", "]", "=", "obj", "return", "' See :ref:`resource-{}`.'", ".", "format", "(", "'-'", ".", "join", "(", "resource_name", ".", "split", "(", "' '", ")", ")", ".", "lower", "(", ")", ".", "strip", "(", ")", ")" ]
Gets an object reference string from the obj instance. This adds the object type to ALL_RESOURCES so that it gets documented and returns a str which contains a sphinx reference to the documented object. :param obj: The Object instance. :returns: A sphinx docs reference str.
[ "Gets", "an", "object", "reference", "string", "from", "the", "obj", "instance", "." ]
python
train
40.0625
deepmind/pysc2
pysc2/lib/renderer_human.py
https://github.com/deepmind/pysc2/blob/df4cc4b00f07a2242be9ba153d4a7f4ad2017897/pysc2/lib/renderer_human.py#L669-L679
def get_mouse_pos(self, window_pos=None): """Return a MousePos filled with the world position and surf it hit.""" window_pos = window_pos or pygame.mouse.get_pos() # +0.5 to center the point on the middle of the pixel. window_pt = point.Point(*window_pos) + 0.5 for surf in reversed(self._surfaces): if (surf.surf_type != SurfType.CHROME and surf.surf_rect.contains_point(window_pt)): surf_rel_pt = window_pt - surf.surf_rect.tl world_pt = surf.world_to_surf.back_pt(surf_rel_pt) return MousePos(world_pt, surf)
[ "def", "get_mouse_pos", "(", "self", ",", "window_pos", "=", "None", ")", ":", "window_pos", "=", "window_pos", "or", "pygame", ".", "mouse", ".", "get_pos", "(", ")", "# +0.5 to center the point on the middle of the pixel.", "window_pt", "=", "point", ".", "Point", "(", "*", "window_pos", ")", "+", "0.5", "for", "surf", "in", "reversed", "(", "self", ".", "_surfaces", ")", ":", "if", "(", "surf", ".", "surf_type", "!=", "SurfType", ".", "CHROME", "and", "surf", ".", "surf_rect", ".", "contains_point", "(", "window_pt", ")", ")", ":", "surf_rel_pt", "=", "window_pt", "-", "surf", ".", "surf_rect", ".", "tl", "world_pt", "=", "surf", ".", "world_to_surf", ".", "back_pt", "(", "surf_rel_pt", ")", "return", "MousePos", "(", "world_pt", ",", "surf", ")" ]
Return a MousePos filled with the world position and surf it hit.
[ "Return", "a", "MousePos", "filled", "with", "the", "world", "position", "and", "surf", "it", "hit", "." ]
python
train
51
nephics/mat4py
mat4py/savemat.py
https://github.com/nephics/mat4py/blob/6c1a2ad903937437cc5f24f3c3f5aa2c5a77a1c1/mat4py/savemat.py#L207-L225
def write_numeric_array(fd, header, array): """Write the numeric array""" # make a memory file for writing array data bd = BytesIO() # write matrix header to memory file write_var_header(bd, header) if not isinstance(array, basestring) and header['dims'][0] > 1: # list array data in column major order array = list(chain.from_iterable(izip(*array))) # write matrix data to memory file write_elements(bd, header['mtp'], array) # write the variable to disk file data = bd.getvalue() bd.close() write_var_data(fd, data)
[ "def", "write_numeric_array", "(", "fd", ",", "header", ",", "array", ")", ":", "# make a memory file for writing array data", "bd", "=", "BytesIO", "(", ")", "# write matrix header to memory file", "write_var_header", "(", "bd", ",", "header", ")", "if", "not", "isinstance", "(", "array", ",", "basestring", ")", "and", "header", "[", "'dims'", "]", "[", "0", "]", ">", "1", ":", "# list array data in column major order", "array", "=", "list", "(", "chain", ".", "from_iterable", "(", "izip", "(", "*", "array", ")", ")", ")", "# write matrix data to memory file", "write_elements", "(", "bd", ",", "header", "[", "'mtp'", "]", ",", "array", ")", "# write the variable to disk file", "data", "=", "bd", ".", "getvalue", "(", ")", "bd", ".", "close", "(", ")", "write_var_data", "(", "fd", ",", "data", ")" ]
Write the numeric array
[ "Write", "the", "numeric", "array" ]
python
valid
29.842105
edx/edx-enterprise
enterprise/api/v1/decorators.py
https://github.com/edx/edx-enterprise/blob/aea91379ab0a87cd3bc798961fce28b60ee49a80/enterprise/api/v1/decorators.py#L55-L91
def require_at_least_one_query_parameter(*query_parameter_names): """ Ensure at least one of the specified query parameters are included in the request. This decorator checks for the existence of at least one of the specified query parameters and passes the values as function parameters to the decorated view. If none of the specified query parameters are included in the request, a ValidationError is raised. Usage:: @require_at_least_one_query_parameter('program_uuids', 'course_run_ids') def my_view(request, program_uuids, course_run_ids): # Some functionality ... """ def outer_wrapper(view): """ Allow the passing of parameters to require_at_least_one_query_parameter. """ @wraps(view) def wrapper(request, *args, **kwargs): """ Checks for the existence of the specified query parameters, raises a ValidationError if none of them were included in the request. """ requirement_satisfied = False for query_parameter_name in query_parameter_names: query_parameter_values = request.query_params.getlist(query_parameter_name) kwargs[query_parameter_name] = query_parameter_values if query_parameter_values: requirement_satisfied = True if not requirement_satisfied: raise ValidationError( detail='You must provide at least one of the following query parameters: {params}.'.format( params=', '.join(query_parameter_names) ) ) return view(request, *args, **kwargs) return wrapper return outer_wrapper
[ "def", "require_at_least_one_query_parameter", "(", "*", "query_parameter_names", ")", ":", "def", "outer_wrapper", "(", "view", ")", ":", "\"\"\" Allow the passing of parameters to require_at_least_one_query_parameter. \"\"\"", "@", "wraps", "(", "view", ")", "def", "wrapper", "(", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "\"\"\"\n Checks for the existence of the specified query parameters, raises a\n ValidationError if none of them were included in the request.\n \"\"\"", "requirement_satisfied", "=", "False", "for", "query_parameter_name", "in", "query_parameter_names", ":", "query_parameter_values", "=", "request", ".", "query_params", ".", "getlist", "(", "query_parameter_name", ")", "kwargs", "[", "query_parameter_name", "]", "=", "query_parameter_values", "if", "query_parameter_values", ":", "requirement_satisfied", "=", "True", "if", "not", "requirement_satisfied", ":", "raise", "ValidationError", "(", "detail", "=", "'You must provide at least one of the following query parameters: {params}.'", ".", "format", "(", "params", "=", "', '", ".", "join", "(", "query_parameter_names", ")", ")", ")", "return", "view", "(", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "wrapper", "return", "outer_wrapper" ]
Ensure at least one of the specified query parameters are included in the request. This decorator checks for the existence of at least one of the specified query parameters and passes the values as function parameters to the decorated view. If none of the specified query parameters are included in the request, a ValidationError is raised. Usage:: @require_at_least_one_query_parameter('program_uuids', 'course_run_ids') def my_view(request, program_uuids, course_run_ids): # Some functionality ...
[ "Ensure", "at", "least", "one", "of", "the", "specified", "query", "parameters", "are", "included", "in", "the", "request", "." ]
python
valid
46.648649
synw/dataswim
dataswim/data/count.py
https://github.com/synw/dataswim/blob/4a4a53f80daa7cd8e8409d76a19ce07296269da2/dataswim/data/count.py#L35-L44
def count_(self): """ Returns the number of rows of the main dataframe """ try: num = len(self.df.index) except Exception as e: self.err(e, "Can not count data") return return num
[ "def", "count_", "(", "self", ")", ":", "try", ":", "num", "=", "len", "(", "self", ".", "df", ".", "index", ")", "except", "Exception", "as", "e", ":", "self", ".", "err", "(", "e", ",", "\"Can not count data\"", ")", "return", "return", "num" ]
Returns the number of rows of the main dataframe
[ "Returns", "the", "number", "of", "rows", "of", "the", "main", "dataframe" ]
python
train
25.4
MozillaSecurity/laniakea
laniakea/core/providers/packet/manager.py
https://github.com/MozillaSecurity/laniakea/blob/7e80adc6ae92c6c1332d4c08473bb271fb3b6833/laniakea/core/providers/packet/manager.py#L216-L224
def attach_volume_to_device(self, volume_id, device_id): """Attaches the created Volume to a Device. """ try: volume = self.manager.get_volume(volume_id) volume.attach(device_id) except packet.baseapi.Error as msg: raise PacketManagerException(msg) return volume
[ "def", "attach_volume_to_device", "(", "self", ",", "volume_id", ",", "device_id", ")", ":", "try", ":", "volume", "=", "self", ".", "manager", ".", "get_volume", "(", "volume_id", ")", "volume", ".", "attach", "(", "device_id", ")", "except", "packet", ".", "baseapi", ".", "Error", "as", "msg", ":", "raise", "PacketManagerException", "(", "msg", ")", "return", "volume" ]
Attaches the created Volume to a Device.
[ "Attaches", "the", "created", "Volume", "to", "a", "Device", "." ]
python
train
36.666667
leetrout/python-nutritionix
nutritionix.py
https://github.com/leetrout/python-nutritionix/blob/2f900bf78dce5928b2a1c9d568f87e0c1d81455f/nutritionix.py#L38-L47
def mock_attr(self, *args, **kwargs): """ Empty method to call to slurp up args and kwargs. `args` get pushed onto the url path. `kwargs` are converted to a query string and appended to the URL. """ self.path.extend(args) self.qs.update(kwargs) return self
[ "def", "mock_attr", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "self", ".", "path", ".", "extend", "(", "args", ")", "self", ".", "qs", ".", "update", "(", "kwargs", ")", "return", "self" ]
Empty method to call to slurp up args and kwargs. `args` get pushed onto the url path. `kwargs` are converted to a query string and appended to the URL.
[ "Empty", "method", "to", "call", "to", "slurp", "up", "args", "and", "kwargs", "." ]
python
train
31.2
fabioz/PyDev.Debugger
pydevd_attach_to_process/winappdbg/thread.py
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/pydevd_attach_to_process/winappdbg/thread.py#L269-L295
def open_handle(self, dwDesiredAccess = win32.THREAD_ALL_ACCESS): """ Opens a new handle to the thread, closing the previous one. The new handle is stored in the L{hThread} property. @warn: Normally you should call L{get_handle} instead, since it's much "smarter" and tries to reuse handles and merge access rights. @type dwDesiredAccess: int @param dwDesiredAccess: Desired access rights. Defaults to L{win32.THREAD_ALL_ACCESS}. See: U{http://msdn.microsoft.com/en-us/library/windows/desktop/ms686769(v=vs.85).aspx} @raise WindowsError: It's not possible to open a handle to the thread with the requested access rights. This tipically happens because the target thread belongs to system process and the debugger is not runnning with administrative rights. """ hThread = win32.OpenThread(dwDesiredAccess, win32.FALSE, self.dwThreadId) # In case hThread was set to an actual handle value instead of a Handle # object. This shouldn't happen unless the user tinkered with it. if not hasattr(self.hThread, '__del__'): self.close_handle() self.hThread = hThread
[ "def", "open_handle", "(", "self", ",", "dwDesiredAccess", "=", "win32", ".", "THREAD_ALL_ACCESS", ")", ":", "hThread", "=", "win32", ".", "OpenThread", "(", "dwDesiredAccess", ",", "win32", ".", "FALSE", ",", "self", ".", "dwThreadId", ")", "# In case hThread was set to an actual handle value instead of a Handle", "# object. This shouldn't happen unless the user tinkered with it.", "if", "not", "hasattr", "(", "self", ".", "hThread", ",", "'__del__'", ")", ":", "self", ".", "close_handle", "(", ")", "self", ".", "hThread", "=", "hThread" ]
Opens a new handle to the thread, closing the previous one. The new handle is stored in the L{hThread} property. @warn: Normally you should call L{get_handle} instead, since it's much "smarter" and tries to reuse handles and merge access rights. @type dwDesiredAccess: int @param dwDesiredAccess: Desired access rights. Defaults to L{win32.THREAD_ALL_ACCESS}. See: U{http://msdn.microsoft.com/en-us/library/windows/desktop/ms686769(v=vs.85).aspx} @raise WindowsError: It's not possible to open a handle to the thread with the requested access rights. This tipically happens because the target thread belongs to system process and the debugger is not runnning with administrative rights.
[ "Opens", "a", "new", "handle", "to", "the", "thread", "closing", "the", "previous", "one", "." ]
python
train
45.37037
aio-libs/aiomysql
aiomysql/cursors.py
https://github.com/aio-libs/aiomysql/blob/131fb9f914739ff01a24b402d29bfd719f2d1a8b/aiomysql/cursors.py#L181-L193
async def nextset(self): """Get the next query set""" conn = self._get_db() current_result = self._result if current_result is None or current_result is not conn._result: return if not current_result.has_next: return self._result = None self._clear_result() await conn.next_result() await self._do_get_result() return True
[ "async", "def", "nextset", "(", "self", ")", ":", "conn", "=", "self", ".", "_get_db", "(", ")", "current_result", "=", "self", ".", "_result", "if", "current_result", "is", "None", "or", "current_result", "is", "not", "conn", ".", "_result", ":", "return", "if", "not", "current_result", ".", "has_next", ":", "return", "self", ".", "_result", "=", "None", "self", ".", "_clear_result", "(", ")", "await", "conn", ".", "next_result", "(", ")", "await", "self", ".", "_do_get_result", "(", ")", "return", "True" ]
Get the next query set
[ "Get", "the", "next", "query", "set" ]
python
train
31.846154
chaoss/grimoirelab-perceval
perceval/backend.py
https://github.com/chaoss/grimoirelab-perceval/blob/41c908605e88b7ebc3a536c643fa0f212eaf9e0e/perceval/backend.py#L404-L412
def _set_output_arguments(self): """Activate output arguments parsing""" group = self.parser.add_argument_group('output arguments') group.add_argument('-o', '--output', type=argparse.FileType('w'), dest='outfile', default=sys.stdout, help="output file") group.add_argument('--json-line', dest='json_line', action='store_true', help="produce a JSON line for each output item")
[ "def", "_set_output_arguments", "(", "self", ")", ":", "group", "=", "self", ".", "parser", ".", "add_argument_group", "(", "'output arguments'", ")", "group", ".", "add_argument", "(", "'-o'", ",", "'--output'", ",", "type", "=", "argparse", ".", "FileType", "(", "'w'", ")", ",", "dest", "=", "'outfile'", ",", "default", "=", "sys", ".", "stdout", ",", "help", "=", "\"output file\"", ")", "group", ".", "add_argument", "(", "'--json-line'", ",", "dest", "=", "'json_line'", ",", "action", "=", "'store_true'", ",", "help", "=", "\"produce a JSON line for each output item\"", ")" ]
Activate output arguments parsing
[ "Activate", "output", "arguments", "parsing" ]
python
test
53.444444
twisted/mantissa
xmantissa/websession.py
https://github.com/twisted/mantissa/blob/53e5502aba23ce99be78b27f923a276593033fe8/xmantissa/websession.py#L300-L311
def savorSessionCookie(self, request): """ Make the session cookie last as long as the persistent session. @type request: L{nevow.inevow.IRequest} @param request: The HTTP request object for the guard login URL. """ cookieValue = request.getSession().uid request.addCookie( self.cookieKey, cookieValue, path='/', max_age=PERSISTENT_SESSION_LIFETIME, domain=self.cookieDomainForRequest(request))
[ "def", "savorSessionCookie", "(", "self", ",", "request", ")", ":", "cookieValue", "=", "request", ".", "getSession", "(", ")", ".", "uid", "request", ".", "addCookie", "(", "self", ".", "cookieKey", ",", "cookieValue", ",", "path", "=", "'/'", ",", "max_age", "=", "PERSISTENT_SESSION_LIFETIME", ",", "domain", "=", "self", ".", "cookieDomainForRequest", "(", "request", ")", ")" ]
Make the session cookie last as long as the persistent session. @type request: L{nevow.inevow.IRequest} @param request: The HTTP request object for the guard login URL.
[ "Make", "the", "session", "cookie", "last", "as", "long", "as", "the", "persistent", "session", "." ]
python
train
39.666667
google/transitfeed
transitfeed/problems.py
https://github.com/google/transitfeed/blob/eb2991a3747ba541b2cb66502b305b6304a1f85f/transitfeed/problems.py#L431-L440
def ContextTupleToDict(context): """Convert a tuple representing a context into a dict of (key, value) pairs """ d = {} if not context: return d for k, v in zip(ExceptionWithContext.CONTEXT_PARTS, context): if v != '' and v != None: # Don't ignore int(0), a valid row_num d[k] = v return d
[ "def", "ContextTupleToDict", "(", "context", ")", ":", "d", "=", "{", "}", "if", "not", "context", ":", "return", "d", "for", "k", ",", "v", "in", "zip", "(", "ExceptionWithContext", ".", "CONTEXT_PARTS", ",", "context", ")", ":", "if", "v", "!=", "''", "and", "v", "!=", "None", ":", "# Don't ignore int(0), a valid row_num", "d", "[", "k", "]", "=", "v", "return", "d" ]
Convert a tuple representing a context into a dict of (key, value) pairs
[ "Convert", "a", "tuple", "representing", "a", "context", "into", "a", "dict", "of", "(", "key", "value", ")", "pairs" ]
python
train
32.5
yvesalexandre/bandicoot
bandicoot/spatial.py
https://github.com/yvesalexandre/bandicoot/blob/73a658f6f17331541cf0b1547028db9b70e8d58a/bandicoot/spatial.py#L133-L173
def churn_rate(user, summary='default', **kwargs): """ Computes the frequency spent at every towers each week, and returns the distribution of the cosine similarity between two consecutives week. .. note:: The churn rate is always computed between pairs of weeks. """ if len(user.records) == 0: return statistics([], summary=summary) query = { 'groupby': 'week', 'divide_by': OrderedDict([ ('part_of_week', ['allweek']), ('part_of_day', ['allday']) ]), 'using': 'records', 'filter_empty': True, 'binning': True } rv = grouping_query(user, query) weekly_positions = rv[0][1] all_positions = list(set(p for l in weekly_positions for p in l)) frequencies = {} cos_dist = [] for week, week_positions in enumerate(weekly_positions): count = Counter(week_positions) total = sum(count.values()) frequencies[week] = [count.get(p, 0) / total for p in all_positions] all_indexes = range(len(all_positions)) for f_1, f_2 in pairwise(list(frequencies.values())): num = sum(f_1[a] * f_2[a] for a in all_indexes) denom_1 = sum(f ** 2 for f in f_1) denom_2 = sum(f ** 2 for f in f_2) cos_dist.append(1 - num / (denom_1 ** .5 * denom_2 ** .5)) return statistics(cos_dist, summary=summary)
[ "def", "churn_rate", "(", "user", ",", "summary", "=", "'default'", ",", "*", "*", "kwargs", ")", ":", "if", "len", "(", "user", ".", "records", ")", "==", "0", ":", "return", "statistics", "(", "[", "]", ",", "summary", "=", "summary", ")", "query", "=", "{", "'groupby'", ":", "'week'", ",", "'divide_by'", ":", "OrderedDict", "(", "[", "(", "'part_of_week'", ",", "[", "'allweek'", "]", ")", ",", "(", "'part_of_day'", ",", "[", "'allday'", "]", ")", "]", ")", ",", "'using'", ":", "'records'", ",", "'filter_empty'", ":", "True", ",", "'binning'", ":", "True", "}", "rv", "=", "grouping_query", "(", "user", ",", "query", ")", "weekly_positions", "=", "rv", "[", "0", "]", "[", "1", "]", "all_positions", "=", "list", "(", "set", "(", "p", "for", "l", "in", "weekly_positions", "for", "p", "in", "l", ")", ")", "frequencies", "=", "{", "}", "cos_dist", "=", "[", "]", "for", "week", ",", "week_positions", "in", "enumerate", "(", "weekly_positions", ")", ":", "count", "=", "Counter", "(", "week_positions", ")", "total", "=", "sum", "(", "count", ".", "values", "(", ")", ")", "frequencies", "[", "week", "]", "=", "[", "count", ".", "get", "(", "p", ",", "0", ")", "/", "total", "for", "p", "in", "all_positions", "]", "all_indexes", "=", "range", "(", "len", "(", "all_positions", ")", ")", "for", "f_1", ",", "f_2", "in", "pairwise", "(", "list", "(", "frequencies", ".", "values", "(", ")", ")", ")", ":", "num", "=", "sum", "(", "f_1", "[", "a", "]", "*", "f_2", "[", "a", "]", "for", "a", "in", "all_indexes", ")", "denom_1", "=", "sum", "(", "f", "**", "2", "for", "f", "in", "f_1", ")", "denom_2", "=", "sum", "(", "f", "**", "2", "for", "f", "in", "f_2", ")", "cos_dist", ".", "append", "(", "1", "-", "num", "/", "(", "denom_1", "**", ".5", "*", "denom_2", "**", ".5", ")", ")", "return", "statistics", "(", "cos_dist", ",", "summary", "=", "summary", ")" ]
Computes the frequency spent at every towers each week, and returns the distribution of the cosine similarity between two consecutives week. .. note:: The churn rate is always computed between pairs of weeks.
[ "Computes", "the", "frequency", "spent", "at", "every", "towers", "each", "week", "and", "returns", "the", "distribution", "of", "the", "cosine", "similarity", "between", "two", "consecutives", "week", "." ]
python
train
32.756098
RedHatInsights/insights-core
insights/client/connection.py
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/connection.py#L434-L478
def get_branch_info(self): """ Retrieve branch_info from Satellite Server """ branch_info = None if os.path.exists(constants.cached_branch_info): # use cached branch info file if less than 10 minutes old # (failsafe, should be deleted at end of client run normally) logger.debug(u'Reading branch info from cached file.') ctime = datetime.utcfromtimestamp( os.path.getctime(constants.cached_branch_info)) if datetime.utcnow() < (ctime + timedelta(minutes=5)): with io.open(constants.cached_branch_info, encoding='utf8', mode='r') as f: branch_info = json.load(f) return branch_info else: logger.debug(u'Cached branch info is older than 5 minutes.') logger.debug(u'Obtaining branch information from %s', self.branch_info_url) net_logger.info(u'GET %s', self.branch_info_url) response = self.session.get(self.branch_info_url, timeout=self.config.http_timeout) logger.debug(u'GET branch_info status: %s', response.status_code) if response.status_code != 200: logger.debug("There was an error obtaining branch information.") logger.debug(u'Bad status from server: %s', response.status_code) logger.debug("Assuming default branch information %s" % self.branch_info) return False branch_info = response.json() logger.debug(u'Branch information: %s', json.dumps(branch_info)) # Determine if we are connected to Satellite 5 if ((branch_info[u'remote_branch'] is not -1 and branch_info[u'remote_leaf'] is -1)): self.get_satellite5_info(branch_info) logger.debug(u'Saving branch info to file.') with io.open(constants.cached_branch_info, encoding='utf8', mode='w') as f: # json.dump is broke in py2 so use dumps bi_str = json.dumps(branch_info, ensure_ascii=False) f.write(bi_str) self.branch_info = branch_info return branch_info
[ "def", "get_branch_info", "(", "self", ")", ":", "branch_info", "=", "None", "if", "os", ".", "path", ".", "exists", "(", "constants", ".", "cached_branch_info", ")", ":", "# use cached branch info file if less than 10 minutes old", "# (failsafe, should be deleted at end of client run normally)", "logger", ".", "debug", "(", "u'Reading branch info from cached file.'", ")", "ctime", "=", "datetime", ".", "utcfromtimestamp", "(", "os", ".", "path", ".", "getctime", "(", "constants", ".", "cached_branch_info", ")", ")", "if", "datetime", ".", "utcnow", "(", ")", "<", "(", "ctime", "+", "timedelta", "(", "minutes", "=", "5", ")", ")", ":", "with", "io", ".", "open", "(", "constants", ".", "cached_branch_info", ",", "encoding", "=", "'utf8'", ",", "mode", "=", "'r'", ")", "as", "f", ":", "branch_info", "=", "json", ".", "load", "(", "f", ")", "return", "branch_info", "else", ":", "logger", ".", "debug", "(", "u'Cached branch info is older than 5 minutes.'", ")", "logger", ".", "debug", "(", "u'Obtaining branch information from %s'", ",", "self", ".", "branch_info_url", ")", "net_logger", ".", "info", "(", "u'GET %s'", ",", "self", ".", "branch_info_url", ")", "response", "=", "self", ".", "session", ".", "get", "(", "self", ".", "branch_info_url", ",", "timeout", "=", "self", ".", "config", ".", "http_timeout", ")", "logger", ".", "debug", "(", "u'GET branch_info status: %s'", ",", "response", ".", "status_code", ")", "if", "response", ".", "status_code", "!=", "200", ":", "logger", ".", "debug", "(", "\"There was an error obtaining branch information.\"", ")", "logger", ".", "debug", "(", "u'Bad status from server: %s'", ",", "response", ".", "status_code", ")", "logger", ".", "debug", "(", "\"Assuming default branch information %s\"", "%", "self", ".", "branch_info", ")", "return", "False", "branch_info", "=", "response", ".", "json", "(", ")", "logger", ".", "debug", "(", "u'Branch information: %s'", ",", "json", ".", "dumps", "(", "branch_info", ")", ")", "# Determine if we are connected to Satellite 5", "if", "(", "(", "branch_info", "[", "u'remote_branch'", "]", "is", "not", "-", "1", "and", "branch_info", "[", "u'remote_leaf'", "]", "is", "-", "1", ")", ")", ":", "self", ".", "get_satellite5_info", "(", "branch_info", ")", "logger", ".", "debug", "(", "u'Saving branch info to file.'", ")", "with", "io", ".", "open", "(", "constants", ".", "cached_branch_info", ",", "encoding", "=", "'utf8'", ",", "mode", "=", "'w'", ")", "as", "f", ":", "# json.dump is broke in py2 so use dumps", "bi_str", "=", "json", ".", "dumps", "(", "branch_info", ",", "ensure_ascii", "=", "False", ")", "f", ".", "write", "(", "bi_str", ")", "self", ".", "branch_info", "=", "branch_info", "return", "branch_info" ]
Retrieve branch_info from Satellite Server
[ "Retrieve", "branch_info", "from", "Satellite", "Server" ]
python
train
47.688889
tensorflow/cleverhans
cleverhans/attacks_tf.py
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/attacks_tf.py#L55-L79
def apply_perturbations(i, j, X, increase, theta, clip_min, clip_max): """ TensorFlow implementation for apply perturbations to input features based on salency maps :param i: index of first selected feature :param j: index of second selected feature :param X: a matrix containing our input features for our sample :param increase: boolean; true if we are increasing pixels, false otherwise :param theta: delta for each feature adjustment :param clip_min: mininum value for a feature in our sample :param clip_max: maximum value for a feature in our sample : return: a perturbed input feature matrix for a target class """ warnings.warn( "This function is dead code and will be removed on or after 2019-07-18") # perturb our input sample if increase: X[0, i] = np.minimum(clip_max, X[0, i] + theta) X[0, j] = np.minimum(clip_max, X[0, j] + theta) else: X[0, i] = np.maximum(clip_min, X[0, i] - theta) X[0, j] = np.maximum(clip_min, X[0, j] - theta) return X
[ "def", "apply_perturbations", "(", "i", ",", "j", ",", "X", ",", "increase", ",", "theta", ",", "clip_min", ",", "clip_max", ")", ":", "warnings", ".", "warn", "(", "\"This function is dead code and will be removed on or after 2019-07-18\"", ")", "# perturb our input sample", "if", "increase", ":", "X", "[", "0", ",", "i", "]", "=", "np", ".", "minimum", "(", "clip_max", ",", "X", "[", "0", ",", "i", "]", "+", "theta", ")", "X", "[", "0", ",", "j", "]", "=", "np", ".", "minimum", "(", "clip_max", ",", "X", "[", "0", ",", "j", "]", "+", "theta", ")", "else", ":", "X", "[", "0", ",", "i", "]", "=", "np", ".", "maximum", "(", "clip_min", ",", "X", "[", "0", ",", "i", "]", "-", "theta", ")", "X", "[", "0", ",", "j", "]", "=", "np", ".", "maximum", "(", "clip_min", ",", "X", "[", "0", ",", "j", "]", "-", "theta", ")", "return", "X" ]
TensorFlow implementation for apply perturbations to input features based on salency maps :param i: index of first selected feature :param j: index of second selected feature :param X: a matrix containing our input features for our sample :param increase: boolean; true if we are increasing pixels, false otherwise :param theta: delta for each feature adjustment :param clip_min: mininum value for a feature in our sample :param clip_max: maximum value for a feature in our sample : return: a perturbed input feature matrix for a target class
[ "TensorFlow", "implementation", "for", "apply", "perturbations", "to", "input", "features", "based", "on", "salency", "maps", ":", "param", "i", ":", "index", "of", "first", "selected", "feature", ":", "param", "j", ":", "index", "of", "second", "selected", "feature", ":", "param", "X", ":", "a", "matrix", "containing", "our", "input", "features", "for", "our", "sample", ":", "param", "increase", ":", "boolean", ";", "true", "if", "we", "are", "increasing", "pixels", "false", "otherwise", ":", "param", "theta", ":", "delta", "for", "each", "feature", "adjustment", ":", "param", "clip_min", ":", "mininum", "value", "for", "a", "feature", "in", "our", "sample", ":", "param", "clip_max", ":", "maximum", "value", "for", "a", "feature", "in", "our", "sample", ":", "return", ":", "a", "perturbed", "input", "feature", "matrix", "for", "a", "target", "class" ]
python
train
39.6
openstack/proliantutils
proliantutils/redfish/resources/manager/virtual_media.py
https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/redfish/resources/manager/virtual_media.py#L30-L34
def _get_media(media_types): """Helper method to map the media types.""" get_mapped_media = (lambda x: maps.VIRTUAL_MEDIA_TYPES_MAP[x] if x in maps.VIRTUAL_MEDIA_TYPES_MAP else None) return list(map(get_mapped_media, media_types))
[ "def", "_get_media", "(", "media_types", ")", ":", "get_mapped_media", "=", "(", "lambda", "x", ":", "maps", ".", "VIRTUAL_MEDIA_TYPES_MAP", "[", "x", "]", "if", "x", "in", "maps", ".", "VIRTUAL_MEDIA_TYPES_MAP", "else", "None", ")", "return", "list", "(", "map", "(", "get_mapped_media", ",", "media_types", ")", ")" ]
Helper method to map the media types.
[ "Helper", "method", "to", "map", "the", "media", "types", "." ]
python
train
52.4
jopohl/urh
src/urh/plugins/MessageBreak/MessageBreakAction.py
https://github.com/jopohl/urh/blob/2eb33b125c8407964cd1092843cde5010eb88aae/src/urh/plugins/MessageBreak/MessageBreakAction.py#L33-L54
def __get_zero_seq_indexes(self, message: str, following_zeros: int): """ :rtype: list[tuple of int] """ result = [] if following_zeros > len(message): return result zero_counter = 0 for i in range(0, len(message)): if message[i] == "0": zero_counter += 1 else: if zero_counter >= following_zeros: result.append((i - zero_counter, i)) zero_counter = 0 if zero_counter >= following_zeros: result.append((len(message) - 1 - following_zeros, len(message) - 1)) return result
[ "def", "__get_zero_seq_indexes", "(", "self", ",", "message", ":", "str", ",", "following_zeros", ":", "int", ")", ":", "result", "=", "[", "]", "if", "following_zeros", ">", "len", "(", "message", ")", ":", "return", "result", "zero_counter", "=", "0", "for", "i", "in", "range", "(", "0", ",", "len", "(", "message", ")", ")", ":", "if", "message", "[", "i", "]", "==", "\"0\"", ":", "zero_counter", "+=", "1", "else", ":", "if", "zero_counter", ">=", "following_zeros", ":", "result", ".", "append", "(", "(", "i", "-", "zero_counter", ",", "i", ")", ")", "zero_counter", "=", "0", "if", "zero_counter", ">=", "following_zeros", ":", "result", ".", "append", "(", "(", "len", "(", "message", ")", "-", "1", "-", "following_zeros", ",", "len", "(", "message", ")", "-", "1", ")", ")", "return", "result" ]
:rtype: list[tuple of int]
[ ":", "rtype", ":", "list", "[", "tuple", "of", "int", "]" ]
python
train
29.181818
rigetti/quantumflow
quantumflow/backend/numpybk.py
https://github.com/rigetti/quantumflow/blob/13a66cabbe8aabf6e023cc675f4a4ebe6ccda8fb/quantumflow/backend/numpybk.py#L125-L128
def inner(tensor0: BKTensor, tensor1: BKTensor) -> BKTensor: """Return the inner product between two tensors""" # Note: Relying on fact that vdot flattens arrays return np.vdot(tensor0, tensor1)
[ "def", "inner", "(", "tensor0", ":", "BKTensor", ",", "tensor1", ":", "BKTensor", ")", "->", "BKTensor", ":", "# Note: Relying on fact that vdot flattens arrays", "return", "np", ".", "vdot", "(", "tensor0", ",", "tensor1", ")" ]
Return the inner product between two tensors
[ "Return", "the", "inner", "product", "between", "two", "tensors" ]
python
train
50.75
user-cont/conu
conu/backend/podman/image.py
https://github.com/user-cont/conu/blob/08caae7bb6bdd265b55bb106c3da6a7946a5a352/conu/backend/podman/image.py#L367-L380
def get_volume_options(volumes): """ Generates volume options to run methods. :param volumes: tuple or list of tuples in form target x source,target x source,target,mode. :return: list of the form ["-v", "/source:/target", "-v", "/other/source:/destination:z", ...] """ if not isinstance(volumes, list): volumes = [volumes] volumes = [Volume.create_from_tuple(v) for v in volumes] result = [] for v in volumes: result += ["-v", str(v)] return result
[ "def", "get_volume_options", "(", "volumes", ")", ":", "if", "not", "isinstance", "(", "volumes", ",", "list", ")", ":", "volumes", "=", "[", "volumes", "]", "volumes", "=", "[", "Volume", ".", "create_from_tuple", "(", "v", ")", "for", "v", "in", "volumes", "]", "result", "=", "[", "]", "for", "v", "in", "volumes", ":", "result", "+=", "[", "\"-v\"", ",", "str", "(", "v", ")", "]", "return", "result" ]
Generates volume options to run methods. :param volumes: tuple or list of tuples in form target x source,target x source,target,mode. :return: list of the form ["-v", "/source:/target", "-v", "/other/source:/destination:z", ...]
[ "Generates", "volume", "options", "to", "run", "methods", "." ]
python
train
38.642857
SALib/SALib
src/SALib/analyze/morris.py
https://github.com/SALib/SALib/blob/9744d73bb17cfcffc8282c7dc4a727efdc4bea3f/src/SALib/analyze/morris.py#L224-L258
def compute_elementary_effects(model_inputs, model_outputs, trajectory_size, delta): ''' Arguments --------- model_inputs : matrix of inputs to the model under analysis. x-by-r where x is the number of variables and r is the number of rows (a function of x and num_trajectories) model_outputs an r-length vector of model outputs trajectory_size a scalar indicating the number of rows in a trajectory delta : float scaling factor computed from `num_levels` ''' num_vars = model_inputs.shape[1] num_rows = model_inputs.shape[0] num_trajectories = int(num_rows / trajectory_size) ee = np.zeros((num_trajectories, num_vars), dtype=np.float) ip_vec = model_inputs.reshape(num_trajectories, trajectory_size, num_vars) ip_cha = np.subtract(ip_vec[:, 1:, :], ip_vec[:, 0:-1, :]) up = (ip_cha > 0) lo = (ip_cha < 0) op_vec = model_outputs.reshape(num_trajectories, trajectory_size) result_up = get_increased_values(op_vec, up, lo) result_lo = get_decreased_values(op_vec, up, lo) ee = np.subtract(result_up, result_lo) np.divide(ee, delta, out=ee) return ee
[ "def", "compute_elementary_effects", "(", "model_inputs", ",", "model_outputs", ",", "trajectory_size", ",", "delta", ")", ":", "num_vars", "=", "model_inputs", ".", "shape", "[", "1", "]", "num_rows", "=", "model_inputs", ".", "shape", "[", "0", "]", "num_trajectories", "=", "int", "(", "num_rows", "/", "trajectory_size", ")", "ee", "=", "np", ".", "zeros", "(", "(", "num_trajectories", ",", "num_vars", ")", ",", "dtype", "=", "np", ".", "float", ")", "ip_vec", "=", "model_inputs", ".", "reshape", "(", "num_trajectories", ",", "trajectory_size", ",", "num_vars", ")", "ip_cha", "=", "np", ".", "subtract", "(", "ip_vec", "[", ":", ",", "1", ":", ",", ":", "]", ",", "ip_vec", "[", ":", ",", "0", ":", "-", "1", ",", ":", "]", ")", "up", "=", "(", "ip_cha", ">", "0", ")", "lo", "=", "(", "ip_cha", "<", "0", ")", "op_vec", "=", "model_outputs", ".", "reshape", "(", "num_trajectories", ",", "trajectory_size", ")", "result_up", "=", "get_increased_values", "(", "op_vec", ",", "up", ",", "lo", ")", "result_lo", "=", "get_decreased_values", "(", "op_vec", ",", "up", ",", "lo", ")", "ee", "=", "np", ".", "subtract", "(", "result_up", ",", "result_lo", ")", "np", ".", "divide", "(", "ee", ",", "delta", ",", "out", "=", "ee", ")", "return", "ee" ]
Arguments --------- model_inputs : matrix of inputs to the model under analysis. x-by-r where x is the number of variables and r is the number of rows (a function of x and num_trajectories) model_outputs an r-length vector of model outputs trajectory_size a scalar indicating the number of rows in a trajectory delta : float scaling factor computed from `num_levels`
[ "Arguments", "---------", "model_inputs", ":", "matrix", "of", "inputs", "to", "the", "model", "under", "analysis", ".", "x", "-", "by", "-", "r", "where", "x", "is", "the", "number", "of", "variables", "and", "r", "is", "the", "number", "of", "rows", "(", "a", "function", "of", "x", "and", "num_trajectories", ")", "model_outputs", "an", "r", "-", "length", "vector", "of", "model", "outputs", "trajectory_size", "a", "scalar", "indicating", "the", "number", "of", "rows", "in", "a", "trajectory", "delta", ":", "float", "scaling", "factor", "computed", "from", "num_levels" ]
python
train
33.685714
networks-lab/metaknowledge
metaknowledge/graphHelpers.py
https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/graphHelpers.py#L691-L732
def mergeGraphs(targetGraph, addedGraph, incrementedNodeVal = 'count', incrementedEdgeVal = 'weight'): """A quick way of merging graphs, this is meant to be quick and is only intended for graphs generated by metaknowledge. This does not check anything and as such may cause unexpected results if the source and target were not generated by the same method. **mergeGraphs**() will **modify** _targetGraph_ in place by adding the nodes and edges found in the second, _addedGraph_. If a node or edge exists _targetGraph_ is given precedence, but the edge and node attributes given by _incrementedNodeVal_ and incrementedEdgeVal are added instead of being overwritten. # Parameters _targetGraph_ : `networkx Graph` > the graph to be modified, it has precedence. _addedGraph_ : `networkx Graph` > the graph that is unmodified, it is added and does **not** have precedence. _incrementedNodeVal_ : `optional [str]` > default `'count'`, the name of the count attribute for the graph's nodes. When merging this attribute will be the sum of the values in the input graphs, instead of _targetGraph_'s value. _incrementedEdgeVal_ : `optional [str]` > default `'weight'`, the name of the weight attribute for the graph's edges. When merging this attribute will be the sum of the values in the input graphs, instead of _targetGraph_'s value. """ for addedNode, attribs in addedGraph.nodes(data = True): if incrementedNodeVal: try: targetGraph.node[addedNode][incrementedNodeVal] += attribs[incrementedNodeVal] except KeyError: targetGraph.add_node(addedNode, **attribs) else: if not targetGraph.has_node(addedNode): targetGraph.add_node(addedNode, **attribs) for edgeNode1, edgeNode2, attribs in addedGraph.edges(data = True): if incrementedEdgeVal: try: targetGraph.edges[edgeNode1, edgeNode2][incrementedEdgeVal] += attribs[incrementedEdgeVal] except KeyError: targetGraph.add_edge(edgeNode1, edgeNode2, **attribs) else: if not targetGraph.Graph.has_edge(edgeNode1, edgeNode2): targetGraph.add_edge(edgeNode1, edgeNode2, **attribs)
[ "def", "mergeGraphs", "(", "targetGraph", ",", "addedGraph", ",", "incrementedNodeVal", "=", "'count'", ",", "incrementedEdgeVal", "=", "'weight'", ")", ":", "for", "addedNode", ",", "attribs", "in", "addedGraph", ".", "nodes", "(", "data", "=", "True", ")", ":", "if", "incrementedNodeVal", ":", "try", ":", "targetGraph", ".", "node", "[", "addedNode", "]", "[", "incrementedNodeVal", "]", "+=", "attribs", "[", "incrementedNodeVal", "]", "except", "KeyError", ":", "targetGraph", ".", "add_node", "(", "addedNode", ",", "*", "*", "attribs", ")", "else", ":", "if", "not", "targetGraph", ".", "has_node", "(", "addedNode", ")", ":", "targetGraph", ".", "add_node", "(", "addedNode", ",", "*", "*", "attribs", ")", "for", "edgeNode1", ",", "edgeNode2", ",", "attribs", "in", "addedGraph", ".", "edges", "(", "data", "=", "True", ")", ":", "if", "incrementedEdgeVal", ":", "try", ":", "targetGraph", ".", "edges", "[", "edgeNode1", ",", "edgeNode2", "]", "[", "incrementedEdgeVal", "]", "+=", "attribs", "[", "incrementedEdgeVal", "]", "except", "KeyError", ":", "targetGraph", ".", "add_edge", "(", "edgeNode1", ",", "edgeNode2", ",", "*", "*", "attribs", ")", "else", ":", "if", "not", "targetGraph", ".", "Graph", ".", "has_edge", "(", "edgeNode1", ",", "edgeNode2", ")", ":", "targetGraph", ".", "add_edge", "(", "edgeNode1", ",", "edgeNode2", ",", "*", "*", "attribs", ")" ]
A quick way of merging graphs, this is meant to be quick and is only intended for graphs generated by metaknowledge. This does not check anything and as such may cause unexpected results if the source and target were not generated by the same method. **mergeGraphs**() will **modify** _targetGraph_ in place by adding the nodes and edges found in the second, _addedGraph_. If a node or edge exists _targetGraph_ is given precedence, but the edge and node attributes given by _incrementedNodeVal_ and incrementedEdgeVal are added instead of being overwritten. # Parameters _targetGraph_ : `networkx Graph` > the graph to be modified, it has precedence. _addedGraph_ : `networkx Graph` > the graph that is unmodified, it is added and does **not** have precedence. _incrementedNodeVal_ : `optional [str]` > default `'count'`, the name of the count attribute for the graph's nodes. When merging this attribute will be the sum of the values in the input graphs, instead of _targetGraph_'s value. _incrementedEdgeVal_ : `optional [str]` > default `'weight'`, the name of the weight attribute for the graph's edges. When merging this attribute will be the sum of the values in the input graphs, instead of _targetGraph_'s value.
[ "A", "quick", "way", "of", "merging", "graphs", "this", "is", "meant", "to", "be", "quick", "and", "is", "only", "intended", "for", "graphs", "generated", "by", "metaknowledge", ".", "This", "does", "not", "check", "anything", "and", "as", "such", "may", "cause", "unexpected", "results", "if", "the", "source", "and", "target", "were", "not", "generated", "by", "the", "same", "method", "." ]
python
train
53.452381
bigchaindb/bigchaindb
bigchaindb/core.py
https://github.com/bigchaindb/bigchaindb/blob/835fdfcf598918f76139e3b88ee33dd157acaaa7/bigchaindb/core.py#L173-L193
def deliver_tx(self, raw_transaction): """Validate the transaction before mutating the state. Args: raw_tx: a raw string (in bytes) transaction. """ self.abort_if_abci_chain_is_not_synced() logger.debug('deliver_tx: %s', raw_transaction) transaction = self.bigchaindb.is_valid_transaction( decode_transaction(raw_transaction), self.block_transactions) if not transaction: logger.debug('deliver_tx: INVALID') return ResponseDeliverTx(code=CodeTypeError) else: logger.debug('storing tx') self.block_txn_ids.append(transaction.id) self.block_transactions.append(transaction) return ResponseDeliverTx(code=CodeTypeOk)
[ "def", "deliver_tx", "(", "self", ",", "raw_transaction", ")", ":", "self", ".", "abort_if_abci_chain_is_not_synced", "(", ")", "logger", ".", "debug", "(", "'deliver_tx: %s'", ",", "raw_transaction", ")", "transaction", "=", "self", ".", "bigchaindb", ".", "is_valid_transaction", "(", "decode_transaction", "(", "raw_transaction", ")", ",", "self", ".", "block_transactions", ")", "if", "not", "transaction", ":", "logger", ".", "debug", "(", "'deliver_tx: INVALID'", ")", "return", "ResponseDeliverTx", "(", "code", "=", "CodeTypeError", ")", "else", ":", "logger", ".", "debug", "(", "'storing tx'", ")", "self", ".", "block_txn_ids", ".", "append", "(", "transaction", ".", "id", ")", "self", ".", "block_transactions", ".", "append", "(", "transaction", ")", "return", "ResponseDeliverTx", "(", "code", "=", "CodeTypeOk", ")" ]
Validate the transaction before mutating the state. Args: raw_tx: a raw string (in bytes) transaction.
[ "Validate", "the", "transaction", "before", "mutating", "the", "state", "." ]
python
train
36.047619
deep-compute/deeputil
deeputil/misc.py
https://github.com/deep-compute/deeputil/blob/9af5702bc3fd990688bf2aed16c20fa104be66df/deeputil/misc.py#L223-L259
def deepgetattr(obj, attr, default=AttributeError): """ Recurses through an attribute chain to get the ultimate value (obj/data/member/value) from: http://pingfive.typepad.com/blog/2010/04/deep-getattr-python-function.html >>> class Universe(object): ... def __init__(self, galaxy): ... self.galaxy = galaxy ... >>> class Galaxy(object): ... def __init__(self, solarsystem): ... self.solarsystem = solarsystem ... >>> class SolarSystem(object): ... def __init__(self, planet): ... self.planet = planet ... >>> class Planet(object): ... def __init__(self, name): ... self.name = name ... >>> universe = Universe(Galaxy(SolarSystem(Planet('Earth')))) >>> deepgetattr(universe, 'galaxy.solarsystem.planet.name') 'Earth' >>> deepgetattr(universe, 'solarsystem.planet.name', default=TypeError) <class 'TypeError'> """ try: return reduce(getattr, attr.split('.'), obj) except AttributeError: if default is not AttributeError: return default raise
[ "def", "deepgetattr", "(", "obj", ",", "attr", ",", "default", "=", "AttributeError", ")", ":", "try", ":", "return", "reduce", "(", "getattr", ",", "attr", ".", "split", "(", "'.'", ")", ",", "obj", ")", "except", "AttributeError", ":", "if", "default", "is", "not", "AttributeError", ":", "return", "default", "raise" ]
Recurses through an attribute chain to get the ultimate value (obj/data/member/value) from: http://pingfive.typepad.com/blog/2010/04/deep-getattr-python-function.html >>> class Universe(object): ... def __init__(self, galaxy): ... self.galaxy = galaxy ... >>> class Galaxy(object): ... def __init__(self, solarsystem): ... self.solarsystem = solarsystem ... >>> class SolarSystem(object): ... def __init__(self, planet): ... self.planet = planet ... >>> class Planet(object): ... def __init__(self, name): ... self.name = name ... >>> universe = Universe(Galaxy(SolarSystem(Planet('Earth')))) >>> deepgetattr(universe, 'galaxy.solarsystem.planet.name') 'Earth' >>> deepgetattr(universe, 'solarsystem.planet.name', default=TypeError) <class 'TypeError'>
[ "Recurses", "through", "an", "attribute", "chain", "to", "get", "the", "ultimate", "value", "(", "obj", "/", "data", "/", "member", "/", "value", ")", "from", ":", "http", ":", "//", "pingfive", ".", "typepad", ".", "com", "/", "blog", "/", "2010", "/", "04", "/", "deep", "-", "getattr", "-", "python", "-", "function", ".", "html" ]
python
train
30.189189
biolink/ontobio
ontobio/golr/golr_query.py
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/golr/golr_query.py#L1669-L1679
def run_solr_text_on(solrInstance, category, q, qf, fields, optionals): """ Return the result of a solr query on the given solrInstance (Enum ESOLR), for a certain document_category (ESOLRDoc) and id """ if optionals == None: optionals = "" query = solrInstance.value + "select?q=" + q + "&qf=" + qf + "&fq=document_category:\"" + category.value + "\"&fl=" + fields + "&wt=json&indent=on" + optionals # print("QUERY: ", query) response = requests.get(query) return response.json()['response']['docs']
[ "def", "run_solr_text_on", "(", "solrInstance", ",", "category", ",", "q", ",", "qf", ",", "fields", ",", "optionals", ")", ":", "if", "optionals", "==", "None", ":", "optionals", "=", "\"\"", "query", "=", "solrInstance", ".", "value", "+", "\"select?q=\"", "+", "q", "+", "\"&qf=\"", "+", "qf", "+", "\"&fq=document_category:\\\"\"", "+", "category", ".", "value", "+", "\"\\\"&fl=\"", "+", "fields", "+", "\"&wt=json&indent=on\"", "+", "optionals", "# print(\"QUERY: \", query)", "response", "=", "requests", ".", "get", "(", "query", ")", "return", "response", ".", "json", "(", ")", "[", "'response'", "]", "[", "'docs'", "]" ]
Return the result of a solr query on the given solrInstance (Enum ESOLR), for a certain document_category (ESOLRDoc) and id
[ "Return", "the", "result", "of", "a", "solr", "query", "on", "the", "given", "solrInstance", "(", "Enum", "ESOLR", ")", "for", "a", "certain", "document_category", "(", "ESOLRDoc", ")", "and", "id" ]
python
train
48.272727
scivision/pymap3d
pymap3d/azelradec.py
https://github.com/scivision/pymap3d/blob/c9cf676594611cdb52ff7e0eca6388c80ed4f63f/pymap3d/azelradec.py#L59-L105
def radec2azel(ra_deg: float, dec_deg: float, lat_deg: float, lon_deg: float, time: datetime, usevallado: bool = False) -> Tuple[float, float]: """ sky coordinates (ra, dec) to viewing angle (az, el) Parameters ---------- ra_deg : float or numpy.ndarray of float ecliptic right ascension (degress) dec_deg : float or numpy.ndarray of float ecliptic declination (degrees) lat_deg : float observer latitude [-90, 90] lon_deg : float observer longitude [-180, 180] (degrees) time : datetime.datetime time of observation usevallado : bool, optional default use astropy. If true, use Vallado algorithm Returns ------- az_deg : float or numpy.ndarray of float azimuth [degrees clockwize from North] el_deg : float or numpy.ndarray of float elevation [degrees above horizon (neglecting aberration)] """ if usevallado or Time is None: return vradec2azel(ra_deg, dec_deg, lat_deg, lon_deg, time) # %% input trapping lat = np.atleast_1d(lat_deg) lon = np.atleast_1d(lon_deg) ra = np.atleast_1d(ra_deg) dec = np.atleast_1d(dec_deg) obs = EarthLocation(lat=lat * u.deg, lon=lon * u.deg) points = SkyCoord(Angle(ra, unit=u.deg), Angle(dec, unit=u.deg), equinox='J2000.0') altaz = points.transform_to(AltAz(location=obs, obstime=Time(str2dt(time)))) return altaz.az.degree, altaz.alt.degree
[ "def", "radec2azel", "(", "ra_deg", ":", "float", ",", "dec_deg", ":", "float", ",", "lat_deg", ":", "float", ",", "lon_deg", ":", "float", ",", "time", ":", "datetime", ",", "usevallado", ":", "bool", "=", "False", ")", "->", "Tuple", "[", "float", ",", "float", "]", ":", "if", "usevallado", "or", "Time", "is", "None", ":", "return", "vradec2azel", "(", "ra_deg", ",", "dec_deg", ",", "lat_deg", ",", "lon_deg", ",", "time", ")", "# %% input trapping", "lat", "=", "np", ".", "atleast_1d", "(", "lat_deg", ")", "lon", "=", "np", ".", "atleast_1d", "(", "lon_deg", ")", "ra", "=", "np", ".", "atleast_1d", "(", "ra_deg", ")", "dec", "=", "np", ".", "atleast_1d", "(", "dec_deg", ")", "obs", "=", "EarthLocation", "(", "lat", "=", "lat", "*", "u", ".", "deg", ",", "lon", "=", "lon", "*", "u", ".", "deg", ")", "points", "=", "SkyCoord", "(", "Angle", "(", "ra", ",", "unit", "=", "u", ".", "deg", ")", ",", "Angle", "(", "dec", ",", "unit", "=", "u", ".", "deg", ")", ",", "equinox", "=", "'J2000.0'", ")", "altaz", "=", "points", ".", "transform_to", "(", "AltAz", "(", "location", "=", "obs", ",", "obstime", "=", "Time", "(", "str2dt", "(", "time", ")", ")", ")", ")", "return", "altaz", ".", "az", ".", "degree", ",", "altaz", ".", "alt", ".", "degree" ]
sky coordinates (ra, dec) to viewing angle (az, el) Parameters ---------- ra_deg : float or numpy.ndarray of float ecliptic right ascension (degress) dec_deg : float or numpy.ndarray of float ecliptic declination (degrees) lat_deg : float observer latitude [-90, 90] lon_deg : float observer longitude [-180, 180] (degrees) time : datetime.datetime time of observation usevallado : bool, optional default use astropy. If true, use Vallado algorithm Returns ------- az_deg : float or numpy.ndarray of float azimuth [degrees clockwize from North] el_deg : float or numpy.ndarray of float elevation [degrees above horizon (neglecting aberration)]
[ "sky", "coordinates", "(", "ra", "dec", ")", "to", "viewing", "angle", "(", "az", "el", ")" ]
python
train
32.744681
dswah/pyGAM
pygam/pygam.py
https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/pygam/pygam.py#L2164-L2191
def _simulate_coef_from_bootstraps( self, n_draws, coef_bootstraps, cov_bootstraps): """Simulate coefficients using bootstrap samples.""" # Sample indices uniformly from {0, ..., n_bootstraps - 1} # (Wood pg. 199 step 6) random_bootstrap_indices = np.random.choice( np.arange(len(coef_bootstraps)), size=n_draws, replace=True) # Simulate `n_draws` many random coefficient vectors from a # multivariate normal distribution with mean and covariance given by # the bootstrap samples (indexed by `random_bootstrap_indices`) of # `coef_bootstraps` and `cov_bootstraps`. Because it's faster to draw # many samples from a certain distribution all at once, we make a dict # mapping bootstrap indices to draw indices and use the `size` # parameter of `np.random.multivariate_normal` to sample the draws # needed from that bootstrap sample all at once. bootstrap_index_to_draw_indices = defaultdict(list) for draw_index, bootstrap_index in enumerate(random_bootstrap_indices): bootstrap_index_to_draw_indices[bootstrap_index].append(draw_index) coef_draws = np.empty((n_draws, len(self.coef_))) for bootstrap, draw_indices in bootstrap_index_to_draw_indices.items(): coef_draws[draw_indices] = np.random.multivariate_normal( coef_bootstraps[bootstrap], cov_bootstraps[bootstrap], size=len(draw_indices)) return coef_draws
[ "def", "_simulate_coef_from_bootstraps", "(", "self", ",", "n_draws", ",", "coef_bootstraps", ",", "cov_bootstraps", ")", ":", "# Sample indices uniformly from {0, ..., n_bootstraps - 1}", "# (Wood pg. 199 step 6)", "random_bootstrap_indices", "=", "np", ".", "random", ".", "choice", "(", "np", ".", "arange", "(", "len", "(", "coef_bootstraps", ")", ")", ",", "size", "=", "n_draws", ",", "replace", "=", "True", ")", "# Simulate `n_draws` many random coefficient vectors from a", "# multivariate normal distribution with mean and covariance given by", "# the bootstrap samples (indexed by `random_bootstrap_indices`) of", "# `coef_bootstraps` and `cov_bootstraps`. Because it's faster to draw", "# many samples from a certain distribution all at once, we make a dict", "# mapping bootstrap indices to draw indices and use the `size`", "# parameter of `np.random.multivariate_normal` to sample the draws", "# needed from that bootstrap sample all at once.", "bootstrap_index_to_draw_indices", "=", "defaultdict", "(", "list", ")", "for", "draw_index", ",", "bootstrap_index", "in", "enumerate", "(", "random_bootstrap_indices", ")", ":", "bootstrap_index_to_draw_indices", "[", "bootstrap_index", "]", ".", "append", "(", "draw_index", ")", "coef_draws", "=", "np", ".", "empty", "(", "(", "n_draws", ",", "len", "(", "self", ".", "coef_", ")", ")", ")", "for", "bootstrap", ",", "draw_indices", "in", "bootstrap_index_to_draw_indices", ".", "items", "(", ")", ":", "coef_draws", "[", "draw_indices", "]", "=", "np", ".", "random", ".", "multivariate_normal", "(", "coef_bootstraps", "[", "bootstrap", "]", ",", "cov_bootstraps", "[", "bootstrap", "]", ",", "size", "=", "len", "(", "draw_indices", ")", ")", "return", "coef_draws" ]
Simulate coefficients using bootstrap samples.
[ "Simulate", "coefficients", "using", "bootstrap", "samples", "." ]
python
train
53.714286
inspirehep/harvesting-kit
harvestingkit/utils.py
https://github.com/inspirehep/harvesting-kit/blob/33a7f8aa9dade1d863110c6d8b27dfd955cb471f/harvestingkit/utils.py#L262-L269
def run_shell_command(commands, **kwargs): """Run a shell command.""" p = subprocess.Popen(commands, stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kwargs) output, error = p.communicate() return p.returncode, output, error
[ "def", "run_shell_command", "(", "commands", ",", "*", "*", "kwargs", ")", ":", "p", "=", "subprocess", ".", "Popen", "(", "commands", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "PIPE", ",", "*", "*", "kwargs", ")", "output", ",", "error", "=", "p", ".", "communicate", "(", ")", "return", "p", ".", "returncode", ",", "output", ",", "error" ]
Run a shell command.
[ "Run", "a", "shell", "command", "." ]
python
valid
38.625
saltstack/salt
salt/modules/postgres.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/postgres.py#L553-L607
def db_create(name, user=None, host=None, port=None, maintenance_db=None, password=None, tablespace=None, encoding=None, lc_collate=None, lc_ctype=None, owner=None, template=None, runas=None): ''' Adds a databases to the Postgres server. CLI Example: .. code-block:: bash salt '*' postgres.db_create 'dbname' salt '*' postgres.db_create 'dbname' template=template_postgis ''' # Base query to create a database query = 'CREATE DATABASE "{0}"'.format(name) # "With"-options to create a database with_args = salt.utils.odict.OrderedDict([ ('TABLESPACE', _quote_ddl_value(tablespace, '"')), # owner needs to be enclosed in double quotes so postgres # doesn't get thrown by dashes in the name ('OWNER', _quote_ddl_value(owner, '"')), ('TEMPLATE', template), ('ENCODING', _quote_ddl_value(encoding)), ('LC_COLLATE', _quote_ddl_value(lc_collate)), ('LC_CTYPE', _quote_ddl_value(lc_ctype)), ]) with_chunks = [] for key, value in with_args.items(): if value is not None: with_chunks += [key, '=', value] # Build a final query if with_chunks: with_chunks.insert(0, ' WITH') query += ' '.join(with_chunks) # Execute the command ret = _psql_prepare_and_run(['-c', query], user=user, host=host, port=port, maintenance_db=maintenance_db, password=password, runas=runas) return ret['retcode'] == 0
[ "def", "db_create", "(", "name", ",", "user", "=", "None", ",", "host", "=", "None", ",", "port", "=", "None", ",", "maintenance_db", "=", "None", ",", "password", "=", "None", ",", "tablespace", "=", "None", ",", "encoding", "=", "None", ",", "lc_collate", "=", "None", ",", "lc_ctype", "=", "None", ",", "owner", "=", "None", ",", "template", "=", "None", ",", "runas", "=", "None", ")", ":", "# Base query to create a database", "query", "=", "'CREATE DATABASE \"{0}\"'", ".", "format", "(", "name", ")", "# \"With\"-options to create a database", "with_args", "=", "salt", ".", "utils", ".", "odict", ".", "OrderedDict", "(", "[", "(", "'TABLESPACE'", ",", "_quote_ddl_value", "(", "tablespace", ",", "'\"'", ")", ")", ",", "# owner needs to be enclosed in double quotes so postgres", "# doesn't get thrown by dashes in the name", "(", "'OWNER'", ",", "_quote_ddl_value", "(", "owner", ",", "'\"'", ")", ")", ",", "(", "'TEMPLATE'", ",", "template", ")", ",", "(", "'ENCODING'", ",", "_quote_ddl_value", "(", "encoding", ")", ")", ",", "(", "'LC_COLLATE'", ",", "_quote_ddl_value", "(", "lc_collate", ")", ")", ",", "(", "'LC_CTYPE'", ",", "_quote_ddl_value", "(", "lc_ctype", ")", ")", ",", "]", ")", "with_chunks", "=", "[", "]", "for", "key", ",", "value", "in", "with_args", ".", "items", "(", ")", ":", "if", "value", "is", "not", "None", ":", "with_chunks", "+=", "[", "key", ",", "'='", ",", "value", "]", "# Build a final query", "if", "with_chunks", ":", "with_chunks", ".", "insert", "(", "0", ",", "' WITH'", ")", "query", "+=", "' '", ".", "join", "(", "with_chunks", ")", "# Execute the command", "ret", "=", "_psql_prepare_and_run", "(", "[", "'-c'", ",", "query", "]", ",", "user", "=", "user", ",", "host", "=", "host", ",", "port", "=", "port", ",", "maintenance_db", "=", "maintenance_db", ",", "password", "=", "password", ",", "runas", "=", "runas", ")", "return", "ret", "[", "'retcode'", "]", "==", "0" ]
Adds a databases to the Postgres server. CLI Example: .. code-block:: bash salt '*' postgres.db_create 'dbname' salt '*' postgres.db_create 'dbname' template=template_postgis
[ "Adds", "a", "databases", "to", "the", "Postgres", "server", "." ]
python
train
30.618182
python-bugzilla/python-bugzilla
bugzilla/bug.py
https://github.com/python-bugzilla/python-bugzilla/blob/7de8b225104f24a1eee3e837bf1e02d60aefe69f/bugzilla/bug.py#L282-L292
def addcomment(self, comment, private=False): """ Add the given comment to this bug. Set private to True to mark this comment as private. """ # Note: fedora bodhi uses this function vals = self.bugzilla.build_update(comment=comment, comment_private=private) log.debug("addcomment: update=%s", vals) return self.bugzilla.update_bugs(self.bug_id, vals)
[ "def", "addcomment", "(", "self", ",", "comment", ",", "private", "=", "False", ")", ":", "# Note: fedora bodhi uses this function", "vals", "=", "self", ".", "bugzilla", ".", "build_update", "(", "comment", "=", "comment", ",", "comment_private", "=", "private", ")", "log", ".", "debug", "(", "\"addcomment: update=%s\"", ",", "vals", ")", "return", "self", ".", "bugzilla", ".", "update_bugs", "(", "self", ".", "bug_id", ",", "vals", ")" ]
Add the given comment to this bug. Set private to True to mark this comment as private.
[ "Add", "the", "given", "comment", "to", "this", "bug", ".", "Set", "private", "to", "True", "to", "mark", "this", "comment", "as", "private", "." ]
python
train
40.636364
saltstack/salt
salt/modules/boto_cloudwatch.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_cloudwatch.py#L103-L124
def _safe_dump(data): ''' this presenter magic makes yaml.safe_dump work with the objects returned from boto.describe_alarms() ''' custom_dumper = __utils__['yaml.get_dumper']('SafeOrderedDumper') def boto_listelement_presenter(dumper, data): return dumper.represent_list(list(data)) yaml.add_representer(boto.ec2.cloudwatch.listelement.ListElement, boto_listelement_presenter, Dumper=custom_dumper) def dimension_presenter(dumper, data): return dumper.represent_dict(dict(data)) yaml.add_representer(boto.ec2.cloudwatch.dimension.Dimension, dimension_presenter, Dumper=custom_dumper) return __utils__['yaml.dump'](data, Dumper=custom_dumper)
[ "def", "_safe_dump", "(", "data", ")", ":", "custom_dumper", "=", "__utils__", "[", "'yaml.get_dumper'", "]", "(", "'SafeOrderedDumper'", ")", "def", "boto_listelement_presenter", "(", "dumper", ",", "data", ")", ":", "return", "dumper", ".", "represent_list", "(", "list", "(", "data", ")", ")", "yaml", ".", "add_representer", "(", "boto", ".", "ec2", ".", "cloudwatch", ".", "listelement", ".", "ListElement", ",", "boto_listelement_presenter", ",", "Dumper", "=", "custom_dumper", ")", "def", "dimension_presenter", "(", "dumper", ",", "data", ")", ":", "return", "dumper", ".", "represent_dict", "(", "dict", "(", "data", ")", ")", "yaml", ".", "add_representer", "(", "boto", ".", "ec2", ".", "cloudwatch", ".", "dimension", ".", "Dimension", ",", "dimension_presenter", ",", "Dumper", "=", "custom_dumper", ")", "return", "__utils__", "[", "'yaml.dump'", "]", "(", "data", ",", "Dumper", "=", "custom_dumper", ")" ]
this presenter magic makes yaml.safe_dump work with the objects returned from boto.describe_alarms()
[ "this", "presenter", "magic", "makes", "yaml", ".", "safe_dump", "work", "with", "the", "objects", "returned", "from", "boto", ".", "describe_alarms", "()" ]
python
train
34.590909
liminspace/dju-image
dju_image/tools.py
https://github.com/liminspace/dju-image/blob/b06eb3be2069cd6cb52cf1e26c2c761883142d4e/dju_image/tools.py#L251-L257
def remove_tmp_prefix_from_filename(filename): """ Remove tmp prefix from filename. """ if not filename.startswith(dju_settings.DJU_IMG_UPLOAD_TMP_PREFIX): raise RuntimeError(ERROR_MESSAGES['filename_hasnt_tmp_prefix'] % {'filename': filename}) return filename[len(dju_settings.DJU_IMG_UPLOAD_TMP_PREFIX):]
[ "def", "remove_tmp_prefix_from_filename", "(", "filename", ")", ":", "if", "not", "filename", ".", "startswith", "(", "dju_settings", ".", "DJU_IMG_UPLOAD_TMP_PREFIX", ")", ":", "raise", "RuntimeError", "(", "ERROR_MESSAGES", "[", "'filename_hasnt_tmp_prefix'", "]", "%", "{", "'filename'", ":", "filename", "}", ")", "return", "filename", "[", "len", "(", "dju_settings", ".", "DJU_IMG_UPLOAD_TMP_PREFIX", ")", ":", "]" ]
Remove tmp prefix from filename.
[ "Remove", "tmp", "prefix", "from", "filename", "." ]
python
train
46.857143
elyase/masstable
masstable/masstable.py
https://github.com/elyase/masstable/blob/3eb72b22cd3337bc5c6bb95bb7bb73fdbe6ae9e2/masstable/masstable.py#L70-L89
def from_ZNM(cls, Z, N, M, name=''): """ Creates a table from arrays Z, N and M Example: ________ >>> Z = [82, 82, 83] >>> N = [126, 127, 130] >>> M = [-21.34, -18.0, -14.45] >>> Table.from_ZNM(Z, N, M, name='Custom Table') Z N 82 126 -21.34 127 -18.00 83 130 -14.45 Name: Custom Table, dtype: float64 """ df = pd.DataFrame.from_dict({'Z': Z, 'N': N, 'M': M}).set_index(['Z', 'N'])['M'] df.name = name return cls(df=df, name=name)
[ "def", "from_ZNM", "(", "cls", ",", "Z", ",", "N", ",", "M", ",", "name", "=", "''", ")", ":", "df", "=", "pd", ".", "DataFrame", ".", "from_dict", "(", "{", "'Z'", ":", "Z", ",", "'N'", ":", "N", ",", "'M'", ":", "M", "}", ")", ".", "set_index", "(", "[", "'Z'", ",", "'N'", "]", ")", "[", "'M'", "]", "df", ".", "name", "=", "name", "return", "cls", "(", "df", "=", "df", ",", "name", "=", "name", ")" ]
Creates a table from arrays Z, N and M Example: ________ >>> Z = [82, 82, 83] >>> N = [126, 127, 130] >>> M = [-21.34, -18.0, -14.45] >>> Table.from_ZNM(Z, N, M, name='Custom Table') Z N 82 126 -21.34 127 -18.00 83 130 -14.45 Name: Custom Table, dtype: float64
[ "Creates", "a", "table", "from", "arrays", "Z", "N", "and", "M" ]
python
test
28.15
bioasp/iggy
src/profile_parser.py
https://github.com/bioasp/iggy/blob/451dee74f277d822d64cf8f3859c94b2f2b6d4db/src/profile_parser.py#L108-L110
def p_plus_assignment(self, t): '''plus_assignment : IDENT EQ PLUS''' self.accu.add(Term('obs_vlabel', [self.name,"gen(\""+t[1]+"\")","1"]))
[ "def", "p_plus_assignment", "(", "self", ",", "t", ")", ":", "self", ".", "accu", ".", "add", "(", "Term", "(", "'obs_vlabel'", ",", "[", "self", ".", "name", ",", "\"gen(\\\"\"", "+", "t", "[", "1", "]", "+", "\"\\\")\"", ",", "\"1\"", "]", ")", ")" ]
plus_assignment : IDENT EQ PLUS
[ "plus_assignment", ":", "IDENT", "EQ", "PLUS" ]
python
train
48.666667
linuxwhatelse/mapper
mapper.py
https://github.com/linuxwhatelse/mapper/blob/3481715b2a36d2da8bf5e9c6da80ceaed0d7ca59/mapper.py#L64-L84
def url(self, pattern, method=None, type_cast=None): """Decorator for registering a path pattern. Args: pattern (str): Regex pattern to match a certain path method (str, optional): Usually used to define one of GET, POST, PUT, DELETE. You may use whatever fits your situation though. Defaults to None. type_cast (dict, optional): Mapping between the param name and one of `int`, `float` or `bool`. The value reflected by the provided param name will than be casted to the given type. Defaults to None. """ if not type_cast: type_cast = {} def decorator(function): self.add(pattern, function, method, type_cast) return function return decorator
[ "def", "url", "(", "self", ",", "pattern", ",", "method", "=", "None", ",", "type_cast", "=", "None", ")", ":", "if", "not", "type_cast", ":", "type_cast", "=", "{", "}", "def", "decorator", "(", "function", ")", ":", "self", ".", "add", "(", "pattern", ",", "function", ",", "method", ",", "type_cast", ")", "return", "function", "return", "decorator" ]
Decorator for registering a path pattern. Args: pattern (str): Regex pattern to match a certain path method (str, optional): Usually used to define one of GET, POST, PUT, DELETE. You may use whatever fits your situation though. Defaults to None. type_cast (dict, optional): Mapping between the param name and one of `int`, `float` or `bool`. The value reflected by the provided param name will than be casted to the given type. Defaults to None.
[ "Decorator", "for", "registering", "a", "path", "pattern", "." ]
python
test
39.333333
HDI-Project/ballet
ballet/util/io.py
https://github.com/HDI-Project/ballet/blob/6f4d4b87b8234cb6bb38b9e9484a58ef8fe8fdb2/ballet/util/io.py#L118-L130
def load_table_from_config(input_dir, config): """Load table from table config dict Args: input_dir (path-like): directory containing input files config (dict): mapping with keys 'name', 'path', and 'pd_read_kwargs'. Returns: pd.DataFrame """ path = pathlib.Path(input_dir).joinpath(config['path']) kwargs = config['pd_read_kwargs'] return pd.read_csv(path, **kwargs)
[ "def", "load_table_from_config", "(", "input_dir", ",", "config", ")", ":", "path", "=", "pathlib", ".", "Path", "(", "input_dir", ")", ".", "joinpath", "(", "config", "[", "'path'", "]", ")", "kwargs", "=", "config", "[", "'pd_read_kwargs'", "]", "return", "pd", ".", "read_csv", "(", "path", ",", "*", "*", "kwargs", ")" ]
Load table from table config dict Args: input_dir (path-like): directory containing input files config (dict): mapping with keys 'name', 'path', and 'pd_read_kwargs'. Returns: pd.DataFrame
[ "Load", "table", "from", "table", "config", "dict" ]
python
train
31.461538
todddeluca/dones
dones.py
https://github.com/todddeluca/dones/blob/6ef56565556987e701fed797a405f0825fe2e15a/dones.py#L64-L73
def _get_k(self): ''' Accessing self.k indirectly allows for creating the kvstore table if necessary. ''' if not self.ready: self.k.create() # create table if it does not exist. self.ready = True return self.k
[ "def", "_get_k", "(", "self", ")", ":", "if", "not", "self", ".", "ready", ":", "self", ".", "k", ".", "create", "(", ")", "# create table if it does not exist.", "self", ".", "ready", "=", "True", "return", "self", ".", "k" ]
Accessing self.k indirectly allows for creating the kvstore table if necessary.
[ "Accessing", "self", ".", "k", "indirectly", "allows", "for", "creating", "the", "kvstore", "table", "if", "necessary", "." ]
python
train
27.3
polyaxon/polyaxon
polyaxon/scopes/permissions/projects.py
https://github.com/polyaxon/polyaxon/blob/e1724f0756b1a42f9e7aa08a976584a84ef7f016/polyaxon/scopes/permissions/projects.py#L19-L26
def has_project_permissions(user: 'User', project: 'Project', request_method: str) -> bool: """This logic is extracted here to be used also with Sanic api.""" # Superusers and the creator is allowed to do everything if user.is_staff or user.is_superuser or project.user == user: return True # Other user return request_method in permissions.SAFE_METHODS and project.is_public
[ "def", "has_project_permissions", "(", "user", ":", "'User'", ",", "project", ":", "'Project'", ",", "request_method", ":", "str", ")", "->", "bool", ":", "# Superusers and the creator is allowed to do everything", "if", "user", ".", "is_staff", "or", "user", ".", "is_superuser", "or", "project", ".", "user", "==", "user", ":", "return", "True", "# Other user", "return", "request_method", "in", "permissions", ".", "SAFE_METHODS", "and", "project", ".", "is_public" ]
This logic is extracted here to be used also with Sanic api.
[ "This", "logic", "is", "extracted", "here", "to", "be", "used", "also", "with", "Sanic", "api", "." ]
python
train
49.625
pgmpy/pgmpy
pgmpy/factors/discrete/DiscreteFactor.py
https://github.com/pgmpy/pgmpy/blob/9381a66aba3c3871d3ccd00672b148d17d63239e/pgmpy/factors/discrete/DiscreteFactor.py#L615-L680
def divide(self, phi1, inplace=True): """ DiscreteFactor division by `phi1`. Parameters ---------- phi1 : `DiscreteFactor` instance The denominator for division. inplace: boolean If inplace=True it will modify the factor itself, else would return a new factor. Returns ------- DiscreteFactor or None: if inplace=True (default) returns None if inplace=False returns a new `DiscreteFactor` instance. Examples -------- >>> from pgmpy.factors.discrete import DiscreteFactor >>> phi1 = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], range(12)) >>> phi2 = DiscreteFactor(['x3', 'x1'], [2, 2], range(1, 5)]) >>> phi1.divide(phi2) >>> phi1.variables ['x1', 'x2', 'x3'] >>> phi1.cardinality array([2, 3, 2]) >>> phi1.values array([[[ 0. , 0.33333333], [ 2. , 1. ], [ 4. , 1.66666667]], [[ 3. , 1.75 ], [ 4. , 2.25 ], [ 5. , 2.75 ]]]) """ phi = self if inplace else self.copy() phi1 = phi1.copy() if set(phi1.variables) - set(phi.variables): raise ValueError("Scope of divisor should be a subset of dividend") # Adding extra variables in phi1. extra_vars = set(phi.variables) - set(phi1.variables) if extra_vars: slice_ = [slice(None)] * len(phi1.variables) slice_.extend([np.newaxis] * len(extra_vars)) phi1.values = phi1.values[tuple(slice_)] phi1.variables.extend(extra_vars) # Rearranging the axes of phi1 to match phi for axis in range(phi.values.ndim): exchange_index = phi1.variables.index(phi.variables[axis]) phi1.variables[axis], phi1.variables[exchange_index] = phi1.variables[exchange_index], phi1.variables[axis] phi1.values = phi1.values.swapaxes(axis, exchange_index) phi.values = phi.values / phi1.values # If factor division 0/0 = 0 but is undefined for x/0. In pgmpy we are using # np.inf to represent x/0 cases. phi.values[np.isnan(phi.values)] = 0 if not inplace: return phi
[ "def", "divide", "(", "self", ",", "phi1", ",", "inplace", "=", "True", ")", ":", "phi", "=", "self", "if", "inplace", "else", "self", ".", "copy", "(", ")", "phi1", "=", "phi1", ".", "copy", "(", ")", "if", "set", "(", "phi1", ".", "variables", ")", "-", "set", "(", "phi", ".", "variables", ")", ":", "raise", "ValueError", "(", "\"Scope of divisor should be a subset of dividend\"", ")", "# Adding extra variables in phi1.", "extra_vars", "=", "set", "(", "phi", ".", "variables", ")", "-", "set", "(", "phi1", ".", "variables", ")", "if", "extra_vars", ":", "slice_", "=", "[", "slice", "(", "None", ")", "]", "*", "len", "(", "phi1", ".", "variables", ")", "slice_", ".", "extend", "(", "[", "np", ".", "newaxis", "]", "*", "len", "(", "extra_vars", ")", ")", "phi1", ".", "values", "=", "phi1", ".", "values", "[", "tuple", "(", "slice_", ")", "]", "phi1", ".", "variables", ".", "extend", "(", "extra_vars", ")", "# Rearranging the axes of phi1 to match phi", "for", "axis", "in", "range", "(", "phi", ".", "values", ".", "ndim", ")", ":", "exchange_index", "=", "phi1", ".", "variables", ".", "index", "(", "phi", ".", "variables", "[", "axis", "]", ")", "phi1", ".", "variables", "[", "axis", "]", ",", "phi1", ".", "variables", "[", "exchange_index", "]", "=", "phi1", ".", "variables", "[", "exchange_index", "]", ",", "phi1", ".", "variables", "[", "axis", "]", "phi1", ".", "values", "=", "phi1", ".", "values", ".", "swapaxes", "(", "axis", ",", "exchange_index", ")", "phi", ".", "values", "=", "phi", ".", "values", "/", "phi1", ".", "values", "# If factor division 0/0 = 0 but is undefined for x/0. In pgmpy we are using", "# np.inf to represent x/0 cases.", "phi", ".", "values", "[", "np", ".", "isnan", "(", "phi", ".", "values", ")", "]", "=", "0", "if", "not", "inplace", ":", "return", "phi" ]
DiscreteFactor division by `phi1`. Parameters ---------- phi1 : `DiscreteFactor` instance The denominator for division. inplace: boolean If inplace=True it will modify the factor itself, else would return a new factor. Returns ------- DiscreteFactor or None: if inplace=True (default) returns None if inplace=False returns a new `DiscreteFactor` instance. Examples -------- >>> from pgmpy.factors.discrete import DiscreteFactor >>> phi1 = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], range(12)) >>> phi2 = DiscreteFactor(['x3', 'x1'], [2, 2], range(1, 5)]) >>> phi1.divide(phi2) >>> phi1.variables ['x1', 'x2', 'x3'] >>> phi1.cardinality array([2, 3, 2]) >>> phi1.values array([[[ 0. , 0.33333333], [ 2. , 1. ], [ 4. , 1.66666667]], [[ 3. , 1.75 ], [ 4. , 2.25 ], [ 5. , 2.75 ]]])
[ "DiscreteFactor", "division", "by", "phi1", "." ]
python
train
35.257576
ClimateImpactLab/DataFS
datafs/core/data_api.py
https://github.com/ClimateImpactLab/DataFS/blob/0d32c2b4e18d300a11b748a552f6adbc3dd8f59d/datafs/core/data_api.py#L190-L233
def get_archive(self, archive_name, default_version=None): ''' Retrieve a data archive Parameters ---------- archive_name: str Name of the archive to retrieve default_version: version str or :py:class:`~distutils.StrictVersion` giving the default version number to be used on read operations Returns ------- archive: object New :py:class:`~datafs.core.data_archive.DataArchive` object Raises ------ KeyError: A KeyError is raised when the ``archive_name`` is not found ''' auth, archive_name = self._normalize_archive_name(archive_name) res = self.manager.get_archive(archive_name) if default_version is None: default_version = self._default_versions.get(archive_name, None) if (auth is not None) and (auth != res['authority_name']): raise ValueError( 'Archive "{}" not found on {}.'.format(archive_name, auth) + ' Did you mean "{}://{}"?'.format( res['authority_name'], archive_name)) return self._ArchiveConstructor( api=self, default_version=default_version, **res)
[ "def", "get_archive", "(", "self", ",", "archive_name", ",", "default_version", "=", "None", ")", ":", "auth", ",", "archive_name", "=", "self", ".", "_normalize_archive_name", "(", "archive_name", ")", "res", "=", "self", ".", "manager", ".", "get_archive", "(", "archive_name", ")", "if", "default_version", "is", "None", ":", "default_version", "=", "self", ".", "_default_versions", ".", "get", "(", "archive_name", ",", "None", ")", "if", "(", "auth", "is", "not", "None", ")", "and", "(", "auth", "!=", "res", "[", "'authority_name'", "]", ")", ":", "raise", "ValueError", "(", "'Archive \"{}\" not found on {}.'", ".", "format", "(", "archive_name", ",", "auth", ")", "+", "' Did you mean \"{}://{}\"?'", ".", "format", "(", "res", "[", "'authority_name'", "]", ",", "archive_name", ")", ")", "return", "self", ".", "_ArchiveConstructor", "(", "api", "=", "self", ",", "default_version", "=", "default_version", ",", "*", "*", "res", ")" ]
Retrieve a data archive Parameters ---------- archive_name: str Name of the archive to retrieve default_version: version str or :py:class:`~distutils.StrictVersion` giving the default version number to be used on read operations Returns ------- archive: object New :py:class:`~datafs.core.data_archive.DataArchive` object Raises ------ KeyError: A KeyError is raised when the ``archive_name`` is not found
[ "Retrieve", "a", "data", "archive" ]
python
train
28.477273
pymc-devs/pymc
pymc/StepMethods.py
https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/StepMethods.py#L1631-L1661
def walk(self): """Walk proposal kernel""" if self.verbose > 1: print_('\t' + self._id + ' Running Walk proposal kernel') # Mask for values to move phi = self.phi theta = self.walk_theta u = random(len(phi)) z = (theta / (1 + theta)) * (theta * u ** 2 + 2 * u - 1) if self._prime: xp, x = self.values else: x, xp = self.values if self.verbose > 1: print_('\t' + 'Current value = ' + str(x)) x = x + phi * (x - xp) * z if self.verbose > 1: print_('\t' + 'Proposed value = ' + str(x)) self.stochastic.value = x # Set proposal adjustment factor self.hastings_factor = 0.0
[ "def", "walk", "(", "self", ")", ":", "if", "self", ".", "verbose", ">", "1", ":", "print_", "(", "'\\t'", "+", "self", ".", "_id", "+", "' Running Walk proposal kernel'", ")", "# Mask for values to move", "phi", "=", "self", ".", "phi", "theta", "=", "self", ".", "walk_theta", "u", "=", "random", "(", "len", "(", "phi", ")", ")", "z", "=", "(", "theta", "/", "(", "1", "+", "theta", ")", ")", "*", "(", "theta", "*", "u", "**", "2", "+", "2", "*", "u", "-", "1", ")", "if", "self", ".", "_prime", ":", "xp", ",", "x", "=", "self", ".", "values", "else", ":", "x", ",", "xp", "=", "self", ".", "values", "if", "self", ".", "verbose", ">", "1", ":", "print_", "(", "'\\t'", "+", "'Current value = '", "+", "str", "(", "x", ")", ")", "x", "=", "x", "+", "phi", "*", "(", "x", "-", "xp", ")", "*", "z", "if", "self", ".", "verbose", ">", "1", ":", "print_", "(", "'\\t'", "+", "'Proposed value = '", "+", "str", "(", "x", ")", ")", "self", ".", "stochastic", ".", "value", "=", "x", "# Set proposal adjustment factor", "self", ".", "hastings_factor", "=", "0.0" ]
Walk proposal kernel
[ "Walk", "proposal", "kernel" ]
python
train
23.483871
Kronuz/pyScss
scss/source.py
https://github.com/Kronuz/pyScss/blob/fb32b317f6e2b4b4aad2b86a74844658ac4aa11e/scss/source.py#L224-L256
def from_string(cls, string, relpath=None, encoding=None, is_sass=None): """Read Sass source from the contents of a string. The origin is always None. `relpath` defaults to "string:...". """ if isinstance(string, six.text_type): # Already decoded; we don't know what encoding to use for output, # though, so still check for a @charset. # TODO what if the given encoding conflicts with the one in the # file? do we care? if encoding is None: encoding = determine_encoding(string) byte_contents = string.encode(encoding) text_contents = string elif isinstance(string, six.binary_type): encoding = determine_encoding(string) byte_contents = string text_contents = string.decode(encoding) else: raise TypeError("Expected text or bytes, got {0!r}".format(string)) origin = None if relpath is None: m = hashlib.sha256() m.update(byte_contents) relpath = repr("string:{0}:{1}".format( m.hexdigest()[:16], text_contents[:100])) return cls( origin, relpath, text_contents, encoding=encoding, is_sass=is_sass, )
[ "def", "from_string", "(", "cls", ",", "string", ",", "relpath", "=", "None", ",", "encoding", "=", "None", ",", "is_sass", "=", "None", ")", ":", "if", "isinstance", "(", "string", ",", "six", ".", "text_type", ")", ":", "# Already decoded; we don't know what encoding to use for output,", "# though, so still check for a @charset.", "# TODO what if the given encoding conflicts with the one in the", "# file? do we care?", "if", "encoding", "is", "None", ":", "encoding", "=", "determine_encoding", "(", "string", ")", "byte_contents", "=", "string", ".", "encode", "(", "encoding", ")", "text_contents", "=", "string", "elif", "isinstance", "(", "string", ",", "six", ".", "binary_type", ")", ":", "encoding", "=", "determine_encoding", "(", "string", ")", "byte_contents", "=", "string", "text_contents", "=", "string", ".", "decode", "(", "encoding", ")", "else", ":", "raise", "TypeError", "(", "\"Expected text or bytes, got {0!r}\"", ".", "format", "(", "string", ")", ")", "origin", "=", "None", "if", "relpath", "is", "None", ":", "m", "=", "hashlib", ".", "sha256", "(", ")", "m", ".", "update", "(", "byte_contents", ")", "relpath", "=", "repr", "(", "\"string:{0}:{1}\"", ".", "format", "(", "m", ".", "hexdigest", "(", ")", "[", ":", "16", "]", ",", "text_contents", "[", ":", "100", "]", ")", ")", "return", "cls", "(", "origin", ",", "relpath", ",", "text_contents", ",", "encoding", "=", "encoding", ",", "is_sass", "=", "is_sass", ",", ")" ]
Read Sass source from the contents of a string. The origin is always None. `relpath` defaults to "string:...".
[ "Read", "Sass", "source", "from", "the", "contents", "of", "a", "string", "." ]
python
train
38.757576
pndurette/gTTS
gtts/tokenizer/pre_processors.py
https://github.com/pndurette/gTTS/blob/b01ac4eb22d40c6241202e202d0418ccf4f98460/gtts/tokenizer/pre_processors.py#L31-L48
def abbreviations(text): """Remove periods after an abbreviation from a list of known abbrevations that can be spoken the same without that period. This prevents having to handle tokenization of that period. Note: Could potentially remove the ending period of a sentence. Note: Abbreviations that Google Translate can't pronounce without (or even with) a period should be added as a word substitution with a :class:`PreProcessorSub` pre-processor. Ex.: 'Esq.', 'Esquire'. """ return PreProcessorRegex( search_args=symbols.ABBREVIATIONS, search_func=lambda x: r"(?<={})(?=\.).".format(x), repl='', flags=re.IGNORECASE).run(text)
[ "def", "abbreviations", "(", "text", ")", ":", "return", "PreProcessorRegex", "(", "search_args", "=", "symbols", ".", "ABBREVIATIONS", ",", "search_func", "=", "lambda", "x", ":", "r\"(?<={})(?=\\.).\"", ".", "format", "(", "x", ")", ",", "repl", "=", "''", ",", "flags", "=", "re", ".", "IGNORECASE", ")", ".", "run", "(", "text", ")" ]
Remove periods after an abbreviation from a list of known abbrevations that can be spoken the same without that period. This prevents having to handle tokenization of that period. Note: Could potentially remove the ending period of a sentence. Note: Abbreviations that Google Translate can't pronounce without (or even with) a period should be added as a word substitution with a :class:`PreProcessorSub` pre-processor. Ex.: 'Esq.', 'Esquire'.
[ "Remove", "periods", "after", "an", "abbreviation", "from", "a", "list", "of", "known", "abbrevations", "that", "can", "be", "spoken", "the", "same", "without", "that", "period", ".", "This", "prevents", "having", "to", "handle", "tokenization", "of", "that", "period", "." ]
python
train
38.722222
flatangle/flatlib
flatlib/chart.py
https://github.com/flatangle/flatlib/blob/44e05b2991a296c678adbc17a1d51b6a21bc867c/flatlib/chart.py#L130-L140
def isDiurnal(self): """ Returns true if this chart is diurnal. """ sun = self.getObject(const.SUN) mc = self.getAngle(const.MC) # Get ecliptical positions and check if the # sun is above the horizon. lat = self.pos.lat sunRA, sunDecl = utils.eqCoords(sun.lon, sun.lat) mcRA, mcDecl = utils.eqCoords(mc.lon, 0) return utils.isAboveHorizon(sunRA, sunDecl, mcRA, lat)
[ "def", "isDiurnal", "(", "self", ")", ":", "sun", "=", "self", ".", "getObject", "(", "const", ".", "SUN", ")", "mc", "=", "self", ".", "getAngle", "(", "const", ".", "MC", ")", "# Get ecliptical positions and check if the", "# sun is above the horizon.", "lat", "=", "self", ".", "pos", ".", "lat", "sunRA", ",", "sunDecl", "=", "utils", ".", "eqCoords", "(", "sun", ".", "lon", ",", "sun", ".", "lat", ")", "mcRA", ",", "mcDecl", "=", "utils", ".", "eqCoords", "(", "mc", ".", "lon", ",", "0", ")", "return", "utils", ".", "isAboveHorizon", "(", "sunRA", ",", "sunDecl", ",", "mcRA", ",", "lat", ")" ]
Returns true if this chart is diurnal.
[ "Returns", "true", "if", "this", "chart", "is", "diurnal", "." ]
python
train
39.636364
saltstack/salt
salt/modules/introspect.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/introspect.py#L107-L146
def service_highstate(requires=True): ''' Return running and enabled services in a highstate structure. By default also returns package dependencies for those services, which means that package definitions must be created outside this function. To drop the package dependencies, set ``requires`` to False. CLI Example: salt myminion introspect.service_highstate salt myminion introspect.service_highstate requires=False ''' ret = {} running = running_service_owners() for service in running: ret[service] = {'service': ['running']} if requires: ret[service]['service'].append( {'require': {'pkg': running[service]}} ) enabled = enabled_service_owners() for service in enabled: if service in ret: ret[service]['service'].append({'enabled': True}) else: ret[service] = {'service': [{'enabled': True}]} if requires: exists = False for item in ret[service]['service']: if isinstance(item, dict) and next(six.iterkeys(item)) == 'require': exists = True if not exists: ret[service]['service'].append( {'require': {'pkg': enabled[service]}} ) return ret
[ "def", "service_highstate", "(", "requires", "=", "True", ")", ":", "ret", "=", "{", "}", "running", "=", "running_service_owners", "(", ")", "for", "service", "in", "running", ":", "ret", "[", "service", "]", "=", "{", "'service'", ":", "[", "'running'", "]", "}", "if", "requires", ":", "ret", "[", "service", "]", "[", "'service'", "]", ".", "append", "(", "{", "'require'", ":", "{", "'pkg'", ":", "running", "[", "service", "]", "}", "}", ")", "enabled", "=", "enabled_service_owners", "(", ")", "for", "service", "in", "enabled", ":", "if", "service", "in", "ret", ":", "ret", "[", "service", "]", "[", "'service'", "]", ".", "append", "(", "{", "'enabled'", ":", "True", "}", ")", "else", ":", "ret", "[", "service", "]", "=", "{", "'service'", ":", "[", "{", "'enabled'", ":", "True", "}", "]", "}", "if", "requires", ":", "exists", "=", "False", "for", "item", "in", "ret", "[", "service", "]", "[", "'service'", "]", ":", "if", "isinstance", "(", "item", ",", "dict", ")", "and", "next", "(", "six", ".", "iterkeys", "(", "item", ")", ")", "==", "'require'", ":", "exists", "=", "True", "if", "not", "exists", ":", "ret", "[", "service", "]", "[", "'service'", "]", ".", "append", "(", "{", "'require'", ":", "{", "'pkg'", ":", "enabled", "[", "service", "]", "}", "}", ")", "return", "ret" ]
Return running and enabled services in a highstate structure. By default also returns package dependencies for those services, which means that package definitions must be created outside this function. To drop the package dependencies, set ``requires`` to False. CLI Example: salt myminion introspect.service_highstate salt myminion introspect.service_highstate requires=False
[ "Return", "running", "and", "enabled", "services", "in", "a", "highstate", "structure", ".", "By", "default", "also", "returns", "package", "dependencies", "for", "those", "services", "which", "means", "that", "package", "definitions", "must", "be", "created", "outside", "this", "function", ".", "To", "drop", "the", "package", "dependencies", "set", "requires", "to", "False", "." ]
python
train
32.725
apache/incubator-superset
superset/connectors/druid/models.py
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/connectors/druid/models.py#L1361-L1484
def get_filters(cls, raw_filters, num_cols, columns_dict): # noqa """Given Superset filter data structure, returns pydruid Filter(s)""" filters = None for flt in raw_filters: col = flt.get('col') op = flt.get('op') eq = flt.get('val') if ( not col or not op or (eq is None and op not in ('IS NULL', 'IS NOT NULL'))): continue # Check if this dimension uses an extraction function # If so, create the appropriate pydruid extraction object column_def = columns_dict.get(col) dim_spec = column_def.dimension_spec if column_def else None extraction_fn = None if dim_spec and 'extractionFn' in dim_spec: (col, extraction_fn) = DruidDatasource._create_extraction_fn(dim_spec) cond = None is_numeric_col = col in num_cols is_list_target = op in ('in', 'not in') eq = cls.filter_values_handler( eq, is_list_target=is_list_target, target_column_is_numeric=is_numeric_col) # For these two ops, could have used Dimension, # but it doesn't support extraction functions if op == '==': cond = Filter(dimension=col, value=eq, extraction_function=extraction_fn) elif op == '!=': cond = ~Filter(dimension=col, value=eq, extraction_function=extraction_fn) elif op in ('in', 'not in'): fields = [] # ignore the filter if it has no value if not len(eq): continue # if it uses an extraction fn, use the "in" operator # as Dimension isn't supported elif extraction_fn is not None: cond = Filter( dimension=col, values=eq, type='in', extraction_function=extraction_fn, ) elif len(eq) == 1: cond = Dimension(col) == eq[0] else: for s in eq: fields.append(Dimension(col) == s) cond = Filter(type='or', fields=fields) if op == 'not in': cond = ~cond elif op == 'regex': cond = Filter( extraction_function=extraction_fn, type='regex', pattern=eq, dimension=col, ) # For the ops below, could have used pydruid's Bound, # but it doesn't support extraction functions elif op == '>=': cond = Filter( type='bound', extraction_function=extraction_fn, dimension=col, lowerStrict=False, upperStrict=False, lower=eq, upper=None, alphaNumeric=is_numeric_col, ) elif op == '<=': cond = Filter( type='bound', extraction_function=extraction_fn, dimension=col, lowerStrict=False, upperStrict=False, lower=None, upper=eq, alphaNumeric=is_numeric_col, ) elif op == '>': cond = Filter( type='bound', extraction_function=extraction_fn, lowerStrict=True, upperStrict=False, dimension=col, lower=eq, upper=None, alphaNumeric=is_numeric_col, ) elif op == '<': cond = Filter( type='bound', extraction_function=extraction_fn, upperStrict=True, lowerStrict=False, dimension=col, lower=None, upper=eq, alphaNumeric=is_numeric_col, ) elif op == 'IS NULL': cond = Dimension(col) == None # NOQA elif op == 'IS NOT NULL': cond = Dimension(col) != None # NOQA if filters: filters = Filter(type='and', fields=[ cond, filters, ]) else: filters = cond return filters
[ "def", "get_filters", "(", "cls", ",", "raw_filters", ",", "num_cols", ",", "columns_dict", ")", ":", "# noqa", "filters", "=", "None", "for", "flt", "in", "raw_filters", ":", "col", "=", "flt", ".", "get", "(", "'col'", ")", "op", "=", "flt", ".", "get", "(", "'op'", ")", "eq", "=", "flt", ".", "get", "(", "'val'", ")", "if", "(", "not", "col", "or", "not", "op", "or", "(", "eq", "is", "None", "and", "op", "not", "in", "(", "'IS NULL'", ",", "'IS NOT NULL'", ")", ")", ")", ":", "continue", "# Check if this dimension uses an extraction function", "# If so, create the appropriate pydruid extraction object", "column_def", "=", "columns_dict", ".", "get", "(", "col", ")", "dim_spec", "=", "column_def", ".", "dimension_spec", "if", "column_def", "else", "None", "extraction_fn", "=", "None", "if", "dim_spec", "and", "'extractionFn'", "in", "dim_spec", ":", "(", "col", ",", "extraction_fn", ")", "=", "DruidDatasource", ".", "_create_extraction_fn", "(", "dim_spec", ")", "cond", "=", "None", "is_numeric_col", "=", "col", "in", "num_cols", "is_list_target", "=", "op", "in", "(", "'in'", ",", "'not in'", ")", "eq", "=", "cls", ".", "filter_values_handler", "(", "eq", ",", "is_list_target", "=", "is_list_target", ",", "target_column_is_numeric", "=", "is_numeric_col", ")", "# For these two ops, could have used Dimension,", "# but it doesn't support extraction functions", "if", "op", "==", "'=='", ":", "cond", "=", "Filter", "(", "dimension", "=", "col", ",", "value", "=", "eq", ",", "extraction_function", "=", "extraction_fn", ")", "elif", "op", "==", "'!='", ":", "cond", "=", "~", "Filter", "(", "dimension", "=", "col", ",", "value", "=", "eq", ",", "extraction_function", "=", "extraction_fn", ")", "elif", "op", "in", "(", "'in'", ",", "'not in'", ")", ":", "fields", "=", "[", "]", "# ignore the filter if it has no value", "if", "not", "len", "(", "eq", ")", ":", "continue", "# if it uses an extraction fn, use the \"in\" operator", "# as Dimension isn't supported", "elif", "extraction_fn", "is", "not", "None", ":", "cond", "=", "Filter", "(", "dimension", "=", "col", ",", "values", "=", "eq", ",", "type", "=", "'in'", ",", "extraction_function", "=", "extraction_fn", ",", ")", "elif", "len", "(", "eq", ")", "==", "1", ":", "cond", "=", "Dimension", "(", "col", ")", "==", "eq", "[", "0", "]", "else", ":", "for", "s", "in", "eq", ":", "fields", ".", "append", "(", "Dimension", "(", "col", ")", "==", "s", ")", "cond", "=", "Filter", "(", "type", "=", "'or'", ",", "fields", "=", "fields", ")", "if", "op", "==", "'not in'", ":", "cond", "=", "~", "cond", "elif", "op", "==", "'regex'", ":", "cond", "=", "Filter", "(", "extraction_function", "=", "extraction_fn", ",", "type", "=", "'regex'", ",", "pattern", "=", "eq", ",", "dimension", "=", "col", ",", ")", "# For the ops below, could have used pydruid's Bound,", "# but it doesn't support extraction functions", "elif", "op", "==", "'>='", ":", "cond", "=", "Filter", "(", "type", "=", "'bound'", ",", "extraction_function", "=", "extraction_fn", ",", "dimension", "=", "col", ",", "lowerStrict", "=", "False", ",", "upperStrict", "=", "False", ",", "lower", "=", "eq", ",", "upper", "=", "None", ",", "alphaNumeric", "=", "is_numeric_col", ",", ")", "elif", "op", "==", "'<='", ":", "cond", "=", "Filter", "(", "type", "=", "'bound'", ",", "extraction_function", "=", "extraction_fn", ",", "dimension", "=", "col", ",", "lowerStrict", "=", "False", ",", "upperStrict", "=", "False", ",", "lower", "=", "None", ",", "upper", "=", "eq", ",", "alphaNumeric", "=", "is_numeric_col", ",", ")", "elif", "op", "==", "'>'", ":", "cond", "=", "Filter", "(", "type", "=", "'bound'", ",", "extraction_function", "=", "extraction_fn", ",", "lowerStrict", "=", "True", ",", "upperStrict", "=", "False", ",", "dimension", "=", "col", ",", "lower", "=", "eq", ",", "upper", "=", "None", ",", "alphaNumeric", "=", "is_numeric_col", ",", ")", "elif", "op", "==", "'<'", ":", "cond", "=", "Filter", "(", "type", "=", "'bound'", ",", "extraction_function", "=", "extraction_fn", ",", "upperStrict", "=", "True", ",", "lowerStrict", "=", "False", ",", "dimension", "=", "col", ",", "lower", "=", "None", ",", "upper", "=", "eq", ",", "alphaNumeric", "=", "is_numeric_col", ",", ")", "elif", "op", "==", "'IS NULL'", ":", "cond", "=", "Dimension", "(", "col", ")", "==", "None", "# NOQA", "elif", "op", "==", "'IS NOT NULL'", ":", "cond", "=", "Dimension", "(", "col", ")", "!=", "None", "# NOQA", "if", "filters", ":", "filters", "=", "Filter", "(", "type", "=", "'and'", ",", "fields", "=", "[", "cond", ",", "filters", ",", "]", ")", "else", ":", "filters", "=", "cond", "return", "filters" ]
Given Superset filter data structure, returns pydruid Filter(s)
[ "Given", "Superset", "filter", "data", "structure", "returns", "pydruid", "Filter", "(", "s", ")" ]
python
train
37.451613
fgmacedo/django-export-action
export_action/introspection.py
https://github.com/fgmacedo/django-export-action/blob/215fecb9044d22e3ae19d86c3b220041a11fad07/export_action/introspection.py#L77-L99
def get_model_from_path_string(root_model, path): """ Return a model class for a related model root_model is the class of the initial model path is like foo__bar where bar is related to foo """ for path_section in path.split('__'): if path_section: try: field, model, direct, m2m = _get_field_by_name(root_model, path_section) except FieldDoesNotExist: return root_model if direct: if _get_remote_field(field): try: root_model = _get_remote_field(field).parent_model() except AttributeError: root_model = _get_remote_field(field).model else: if hasattr(field, 'related_model'): root_model = field.related_model else: root_model = field.model return root_model
[ "def", "get_model_from_path_string", "(", "root_model", ",", "path", ")", ":", "for", "path_section", "in", "path", ".", "split", "(", "'__'", ")", ":", "if", "path_section", ":", "try", ":", "field", ",", "model", ",", "direct", ",", "m2m", "=", "_get_field_by_name", "(", "root_model", ",", "path_section", ")", "except", "FieldDoesNotExist", ":", "return", "root_model", "if", "direct", ":", "if", "_get_remote_field", "(", "field", ")", ":", "try", ":", "root_model", "=", "_get_remote_field", "(", "field", ")", ".", "parent_model", "(", ")", "except", "AttributeError", ":", "root_model", "=", "_get_remote_field", "(", "field", ")", ".", "model", "else", ":", "if", "hasattr", "(", "field", ",", "'related_model'", ")", ":", "root_model", "=", "field", ".", "related_model", "else", ":", "root_model", "=", "field", ".", "model", "return", "root_model" ]
Return a model class for a related model root_model is the class of the initial model path is like foo__bar where bar is related to foo
[ "Return", "a", "model", "class", "for", "a", "related", "model", "root_model", "is", "the", "class", "of", "the", "initial", "model", "path", "is", "like", "foo__bar", "where", "bar", "is", "related", "to", "foo" ]
python
train
40.217391
zsethna/OLGA
olga/utils.py
https://github.com/zsethna/OLGA/blob/e825c333f0f9a4eb02132e0bcf86f0dca9123114/olga/utils.py#L340-L361
def calc_steady_state_dist(R): """Calculate the steady state dist of a 4 state markov transition matrix. Parameters ---------- R : ndarray Markov transition matrix Returns ------- p_ss : ndarray Steady state probability distribution """ #Calc steady state distribution for a dinucleotide bias matrix w, v = np.linalg.eig(R) for i in range(4): if np.abs(w[i] - 1) < 1e-8: return np.real(v[:, i] / np.sum(v[:, i])) return -1
[ "def", "calc_steady_state_dist", "(", "R", ")", ":", "#Calc steady state distribution for a dinucleotide bias matrix", "w", ",", "v", "=", "np", ".", "linalg", ".", "eig", "(", "R", ")", "for", "i", "in", "range", "(", "4", ")", ":", "if", "np", ".", "abs", "(", "w", "[", "i", "]", "-", "1", ")", "<", "1e-8", ":", "return", "np", ".", "real", "(", "v", "[", ":", ",", "i", "]", "/", "np", ".", "sum", "(", "v", "[", ":", ",", "i", "]", ")", ")", "return", "-", "1" ]
Calculate the steady state dist of a 4 state markov transition matrix. Parameters ---------- R : ndarray Markov transition matrix Returns ------- p_ss : ndarray Steady state probability distribution
[ "Calculate", "the", "steady", "state", "dist", "of", "a", "4", "state", "markov", "transition", "matrix", "." ]
python
train
23
futapi/fut
fut/log.py
https://github.com/futapi/fut/blob/3792c9eee8f5884f38a02210e649c46c6c7a756d/fut/log.py#L20-L36
def logger(name=None, save=False): """Init and configure logger.""" logger = logging.getLogger(name) if save: logformat = '%(asctime)s [%(levelname)s] [%(name)s] %(funcName)s: %(message)s (line %(lineno)d)' log_file_path = 'fut.log' # TODO: define logpath open(log_file_path, 'w').write('') # remove old logs logger.setLevel(logging.DEBUG) logger_handler = logging.FileHandler(log_file_path) logger_handler.setFormatter(logging.Formatter(logformat)) else: logger_handler = NullHandler() logger.addHandler(logger_handler) return logger
[ "def", "logger", "(", "name", "=", "None", ",", "save", "=", "False", ")", ":", "logger", "=", "logging", ".", "getLogger", "(", "name", ")", "if", "save", ":", "logformat", "=", "'%(asctime)s [%(levelname)s] [%(name)s] %(funcName)s: %(message)s (line %(lineno)d)'", "log_file_path", "=", "'fut.log'", "# TODO: define logpath", "open", "(", "log_file_path", ",", "'w'", ")", ".", "write", "(", "''", ")", "# remove old logs", "logger", ".", "setLevel", "(", "logging", ".", "DEBUG", ")", "logger_handler", "=", "logging", ".", "FileHandler", "(", "log_file_path", ")", "logger_handler", ".", "setFormatter", "(", "logging", ".", "Formatter", "(", "logformat", ")", ")", "else", ":", "logger_handler", "=", "NullHandler", "(", ")", "logger", ".", "addHandler", "(", "logger_handler", ")", "return", "logger" ]
Init and configure logger.
[ "Init", "and", "configure", "logger", "." ]
python
valid
35.470588
jantman/awslimitchecker
awslimitchecker/services/elasticache.py
https://github.com/jantman/awslimitchecker/blob/e50197f70f3d0abcc5cfc7fde6336f548b790e34/awslimitchecker/services/elasticache.py#L55-L70
def find_usage(self): """ Determine the current usage for each limit of this service, and update corresponding Limit via :py:meth:`~.AwsLimit._add_current_usage`. """ logger.debug("Checking usage for service %s", self.service_name) self.connect() for lim in self.limits.values(): lim._reset_usage() self._find_usage_nodes() self._find_usage_subnet_groups() self._find_usage_parameter_groups() self._find_usage_security_groups() self._have_usage = True logger.debug("Done checking usage.")
[ "def", "find_usage", "(", "self", ")", ":", "logger", ".", "debug", "(", "\"Checking usage for service %s\"", ",", "self", ".", "service_name", ")", "self", ".", "connect", "(", ")", "for", "lim", "in", "self", ".", "limits", ".", "values", "(", ")", ":", "lim", ".", "_reset_usage", "(", ")", "self", ".", "_find_usage_nodes", "(", ")", "self", ".", "_find_usage_subnet_groups", "(", ")", "self", ".", "_find_usage_parameter_groups", "(", ")", "self", ".", "_find_usage_security_groups", "(", ")", "self", ".", "_have_usage", "=", "True", "logger", ".", "debug", "(", "\"Done checking usage.\"", ")" ]
Determine the current usage for each limit of this service, and update corresponding Limit via :py:meth:`~.AwsLimit._add_current_usage`.
[ "Determine", "the", "current", "usage", "for", "each", "limit", "of", "this", "service", "and", "update", "corresponding", "Limit", "via", ":", "py", ":", "meth", ":", "~", ".", "AwsLimit", ".", "_add_current_usage", "." ]
python
train
37.3125
fhcrc/taxtastic
taxtastic/utils.py
https://github.com/fhcrc/taxtastic/blob/4e874b7f2cc146178828bfba386314f8c342722b/taxtastic/utils.py#L76-L109
def parse_raxml(handle): """Parse RAxML's summary output. *handle* should be an open file handle containing the RAxML output. It is parsed and a dictionary returned. """ s = ''.join(handle.readlines()) result = {} try_set_fields(result, r'(?P<program>RAxML version [0-9.]+)', s) try_set_fields(result, r'(?P<datatype>DNA|RNA|AA)', s) result['empirical_frequencies'] = ( result['datatype'] != 'AA' or re.search('empirical base frequencies', s, re.IGNORECASE) is not None) try_set_fields(result, r'Substitution Matrix: (?P<subs_model>\w+)', s) rates = {} if result['datatype'] != 'AA': try_set_fields(rates, (r"rates\[0\] ac ag at cg ct gt: " r"(?P<ac>[0-9.]+) (?P<ag>[0-9.]+) (?P<at>[0-9.]+) " r"(?P<cg>[0-9.]+) (?P<ct>[0-9.]+) (?P<gt>[0-9.]+)"), s, hook=float) try_set_fields(rates, r'rate A <-> C: (?P<ac>[0-9.]+)', s, hook=float) try_set_fields(rates, r'rate A <-> G: (?P<ag>[0-9.]+)', s, hook=float) try_set_fields(rates, r'rate A <-> T: (?P<at>[0-9.]+)', s, hook=float) try_set_fields(rates, r'rate C <-> G: (?P<cg>[0-9.]+)', s, hook=float) try_set_fields(rates, r'rate C <-> T: (?P<ct>[0-9.]+)', s, hook=float) try_set_fields(rates, r'rate G <-> T: (?P<gt>[0-9.]+)', s, hook=float) if len(rates) > 0: result['subs_rates'] = rates result['gamma'] = {'n_cats': 4} try_set_fields(result['gamma'], r"alpha[\[\]0-9]*: (?P<alpha>[0-9.]+)", s, hook=float) result['ras_model'] = 'gamma' return result
[ "def", "parse_raxml", "(", "handle", ")", ":", "s", "=", "''", ".", "join", "(", "handle", ".", "readlines", "(", ")", ")", "result", "=", "{", "}", "try_set_fields", "(", "result", ",", "r'(?P<program>RAxML version [0-9.]+)'", ",", "s", ")", "try_set_fields", "(", "result", ",", "r'(?P<datatype>DNA|RNA|AA)'", ",", "s", ")", "result", "[", "'empirical_frequencies'", "]", "=", "(", "result", "[", "'datatype'", "]", "!=", "'AA'", "or", "re", ".", "search", "(", "'empirical base frequencies'", ",", "s", ",", "re", ".", "IGNORECASE", ")", "is", "not", "None", ")", "try_set_fields", "(", "result", ",", "r'Substitution Matrix: (?P<subs_model>\\w+)'", ",", "s", ")", "rates", "=", "{", "}", "if", "result", "[", "'datatype'", "]", "!=", "'AA'", ":", "try_set_fields", "(", "rates", ",", "(", "r\"rates\\[0\\] ac ag at cg ct gt: \"", "r\"(?P<ac>[0-9.]+) (?P<ag>[0-9.]+) (?P<at>[0-9.]+) \"", "r\"(?P<cg>[0-9.]+) (?P<ct>[0-9.]+) (?P<gt>[0-9.]+)\"", ")", ",", "s", ",", "hook", "=", "float", ")", "try_set_fields", "(", "rates", ",", "r'rate A <-> C: (?P<ac>[0-9.]+)'", ",", "s", ",", "hook", "=", "float", ")", "try_set_fields", "(", "rates", ",", "r'rate A <-> G: (?P<ag>[0-9.]+)'", ",", "s", ",", "hook", "=", "float", ")", "try_set_fields", "(", "rates", ",", "r'rate A <-> T: (?P<at>[0-9.]+)'", ",", "s", ",", "hook", "=", "float", ")", "try_set_fields", "(", "rates", ",", "r'rate C <-> G: (?P<cg>[0-9.]+)'", ",", "s", ",", "hook", "=", "float", ")", "try_set_fields", "(", "rates", ",", "r'rate C <-> T: (?P<ct>[0-9.]+)'", ",", "s", ",", "hook", "=", "float", ")", "try_set_fields", "(", "rates", ",", "r'rate G <-> T: (?P<gt>[0-9.]+)'", ",", "s", ",", "hook", "=", "float", ")", "if", "len", "(", "rates", ")", ">", "0", ":", "result", "[", "'subs_rates'", "]", "=", "rates", "result", "[", "'gamma'", "]", "=", "{", "'n_cats'", ":", "4", "}", "try_set_fields", "(", "result", "[", "'gamma'", "]", ",", "r\"alpha[\\[\\]0-9]*: (?P<alpha>[0-9.]+)\"", ",", "s", ",", "hook", "=", "float", ")", "result", "[", "'ras_model'", "]", "=", "'gamma'", "return", "result" ]
Parse RAxML's summary output. *handle* should be an open file handle containing the RAxML output. It is parsed and a dictionary returned.
[ "Parse", "RAxML", "s", "summary", "output", "." ]
python
train
48.088235
JukeboxPipeline/jukebox-core
src/jukeboxcore/gui/widgets/browser.py
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/gui/widgets/browser.py#L345-L358
def current_changed(self, i): """Slot for when the current index changes. Emits the :data:`AbstractLevel.new_root` signal. :param index: the new current index :type index: int :returns: None :rtype: None :raises: None """ m = self.model() ri = self.rootModelIndex() index = m.index(i, 0, ri) self.new_root.emit(index)
[ "def", "current_changed", "(", "self", ",", "i", ")", ":", "m", "=", "self", ".", "model", "(", ")", "ri", "=", "self", ".", "rootModelIndex", "(", ")", "index", "=", "m", ".", "index", "(", "i", ",", "0", ",", "ri", ")", "self", ".", "new_root", ".", "emit", "(", "index", ")" ]
Slot for when the current index changes. Emits the :data:`AbstractLevel.new_root` signal. :param index: the new current index :type index: int :returns: None :rtype: None :raises: None
[ "Slot", "for", "when", "the", "current", "index", "changes", ".", "Emits", "the", ":", "data", ":", "AbstractLevel", ".", "new_root", "signal", "." ]
python
train
28.642857
Kozea/pygal
pygal/graph/line.py
https://github.com/Kozea/pygal/blob/5e25c98a59a0642eecd9fcc5dbfeeb2190fbb5e7/pygal/graph/line.py#L86-L187
def line(self, serie, rescale=False): """Draw the line serie""" serie_node = self.svg.serie(serie) if rescale and self.secondary_series: points = self._rescale(serie.points) else: points = serie.points view_values = list(map(self.view, points)) if serie.show_dots: for i, (x, y) in enumerate(view_values): if None in (x, y): continue if self.logarithmic: if points[i][1] is None or points[i][1] <= 0: continue if (serie.show_only_major_dots and self.x_labels and i < len(self.x_labels) and self.x_labels[i] not in self._x_labels_major): continue metadata = serie.metadata.get(i) classes = [] if x > self.view.width / 2: classes.append('left') if y > self.view.height / 2: classes.append('top') classes = ' '.join(classes) self._confidence_interval( serie_node['overlay'], x, y, serie.values[i], metadata ) dots = decorate( self.svg, self.svg.node(serie_node['overlay'], class_="dots"), metadata ) val = self._format(serie, i) alter( self.svg.transposable_node( dots, 'circle', cx=x, cy=y, r=serie.dots_size, class_='dot reactive tooltip-trigger' ), metadata ) self._tooltip_data( dots, val, x, y, xlabel=self._get_x_label(i) ) self._static_value( serie_node, val, x + self.style.value_font_size, y + self.style.value_font_size, metadata ) if serie.stroke: if self.interpolate: points = serie.interpolated if rescale and self.secondary_series: points = self._rescale(points) view_values = list(map(self.view, points)) if serie.fill: view_values = self._fill(view_values) if serie.allow_interruptions: # view_values are in form [(x1, y1), (x2, y2)]. We # need to split that into multiple sequences if a # None is present here sequences = [] cur_sequence = [] for x, y in view_values: if y is None and len(cur_sequence) > 0: # emit current subsequence sequences.append(cur_sequence) cur_sequence = [] elif y is None: # just discard continue else: cur_sequence.append((x, y)) # append the element if len(cur_sequence) > 0: # emit last possible sequence sequences.append(cur_sequence) else: # plain vanilla rendering sequences = [view_values] if self.logarithmic: for seq in sequences: for ele in seq[::-1]: y = points[seq.index(ele)][1] if y is None or y <= 0: del seq[seq.index(ele)] for seq in sequences: self.svg.line( serie_node['plot'], seq, close=self._self_close, class_='line reactive' + (' nofill' if not serie.fill else '') )
[ "def", "line", "(", "self", ",", "serie", ",", "rescale", "=", "False", ")", ":", "serie_node", "=", "self", ".", "svg", ".", "serie", "(", "serie", ")", "if", "rescale", "and", "self", ".", "secondary_series", ":", "points", "=", "self", ".", "_rescale", "(", "serie", ".", "points", ")", "else", ":", "points", "=", "serie", ".", "points", "view_values", "=", "list", "(", "map", "(", "self", ".", "view", ",", "points", ")", ")", "if", "serie", ".", "show_dots", ":", "for", "i", ",", "(", "x", ",", "y", ")", "in", "enumerate", "(", "view_values", ")", ":", "if", "None", "in", "(", "x", ",", "y", ")", ":", "continue", "if", "self", ".", "logarithmic", ":", "if", "points", "[", "i", "]", "[", "1", "]", "is", "None", "or", "points", "[", "i", "]", "[", "1", "]", "<=", "0", ":", "continue", "if", "(", "serie", ".", "show_only_major_dots", "and", "self", ".", "x_labels", "and", "i", "<", "len", "(", "self", ".", "x_labels", ")", "and", "self", ".", "x_labels", "[", "i", "]", "not", "in", "self", ".", "_x_labels_major", ")", ":", "continue", "metadata", "=", "serie", ".", "metadata", ".", "get", "(", "i", ")", "classes", "=", "[", "]", "if", "x", ">", "self", ".", "view", ".", "width", "/", "2", ":", "classes", ".", "append", "(", "'left'", ")", "if", "y", ">", "self", ".", "view", ".", "height", "/", "2", ":", "classes", ".", "append", "(", "'top'", ")", "classes", "=", "' '", ".", "join", "(", "classes", ")", "self", ".", "_confidence_interval", "(", "serie_node", "[", "'overlay'", "]", ",", "x", ",", "y", ",", "serie", ".", "values", "[", "i", "]", ",", "metadata", ")", "dots", "=", "decorate", "(", "self", ".", "svg", ",", "self", ".", "svg", ".", "node", "(", "serie_node", "[", "'overlay'", "]", ",", "class_", "=", "\"dots\"", ")", ",", "metadata", ")", "val", "=", "self", ".", "_format", "(", "serie", ",", "i", ")", "alter", "(", "self", ".", "svg", ".", "transposable_node", "(", "dots", ",", "'circle'", ",", "cx", "=", "x", ",", "cy", "=", "y", ",", "r", "=", "serie", ".", "dots_size", ",", "class_", "=", "'dot reactive tooltip-trigger'", ")", ",", "metadata", ")", "self", ".", "_tooltip_data", "(", "dots", ",", "val", ",", "x", ",", "y", ",", "xlabel", "=", "self", ".", "_get_x_label", "(", "i", ")", ")", "self", ".", "_static_value", "(", "serie_node", ",", "val", ",", "x", "+", "self", ".", "style", ".", "value_font_size", ",", "y", "+", "self", ".", "style", ".", "value_font_size", ",", "metadata", ")", "if", "serie", ".", "stroke", ":", "if", "self", ".", "interpolate", ":", "points", "=", "serie", ".", "interpolated", "if", "rescale", "and", "self", ".", "secondary_series", ":", "points", "=", "self", ".", "_rescale", "(", "points", ")", "view_values", "=", "list", "(", "map", "(", "self", ".", "view", ",", "points", ")", ")", "if", "serie", ".", "fill", ":", "view_values", "=", "self", ".", "_fill", "(", "view_values", ")", "if", "serie", ".", "allow_interruptions", ":", "# view_values are in form [(x1, y1), (x2, y2)]. We", "# need to split that into multiple sequences if a", "# None is present here", "sequences", "=", "[", "]", "cur_sequence", "=", "[", "]", "for", "x", ",", "y", "in", "view_values", ":", "if", "y", "is", "None", "and", "len", "(", "cur_sequence", ")", ">", "0", ":", "# emit current subsequence", "sequences", ".", "append", "(", "cur_sequence", ")", "cur_sequence", "=", "[", "]", "elif", "y", "is", "None", ":", "# just discard", "continue", "else", ":", "cur_sequence", ".", "append", "(", "(", "x", ",", "y", ")", ")", "# append the element", "if", "len", "(", "cur_sequence", ")", ">", "0", ":", "# emit last possible sequence", "sequences", ".", "append", "(", "cur_sequence", ")", "else", ":", "# plain vanilla rendering", "sequences", "=", "[", "view_values", "]", "if", "self", ".", "logarithmic", ":", "for", "seq", "in", "sequences", ":", "for", "ele", "in", "seq", "[", ":", ":", "-", "1", "]", ":", "y", "=", "points", "[", "seq", ".", "index", "(", "ele", ")", "]", "[", "1", "]", "if", "y", "is", "None", "or", "y", "<=", "0", ":", "del", "seq", "[", "seq", ".", "index", "(", "ele", ")", "]", "for", "seq", "in", "sequences", ":", "self", ".", "svg", ".", "line", "(", "serie_node", "[", "'plot'", "]", ",", "seq", ",", "close", "=", "self", ".", "_self_close", ",", "class_", "=", "'line reactive'", "+", "(", "' nofill'", "if", "not", "serie", ".", "fill", "else", "''", ")", ")" ]
Draw the line serie
[ "Draw", "the", "line", "serie" ]
python
train
38.009804
Pringley/spyglass
spyglass/torrent.py
https://github.com/Pringley/spyglass/blob/091d74f34837673af936daa9f462ad8216be9916/spyglass/torrent.py#L114-L128
def as_dict(self, cache=None, fetch=True): """Return torrent properties as a dictionary. Set the cache flag to False to disable the cache. On the other hand, set the fetch flag to False to avoid fetching data if it's not cached. """ if not self._fetched and fetch: info = self.fetch(cache) elif self._use_cache(cache): info = self._attrs.copy() else: info = {} info.update(url=self.url) return info
[ "def", "as_dict", "(", "self", ",", "cache", "=", "None", ",", "fetch", "=", "True", ")", ":", "if", "not", "self", ".", "_fetched", "and", "fetch", ":", "info", "=", "self", ".", "fetch", "(", "cache", ")", "elif", "self", ".", "_use_cache", "(", "cache", ")", ":", "info", "=", "self", ".", "_attrs", ".", "copy", "(", ")", "else", ":", "info", "=", "{", "}", "info", ".", "update", "(", "url", "=", "self", ".", "url", ")", "return", "info" ]
Return torrent properties as a dictionary. Set the cache flag to False to disable the cache. On the other hand, set the fetch flag to False to avoid fetching data if it's not cached.
[ "Return", "torrent", "properties", "as", "a", "dictionary", "." ]
python
train
32.933333
sci-bots/pygtkhelpers
pygtkhelpers/ui/form_view_dialog.py
https://github.com/sci-bots/pygtkhelpers/blob/3a6e6d6340221c686229cd1c951d7537dae81b07/pygtkhelpers/ui/form_view_dialog.py#L70-L108
def create_ui(self): ''' .. versionchanged:: 0.21.2 Load the builder configuration file using :func:`pkgutil.getdata`, which supports loading from `.zip` archives (e.g., in an app packaged with Py2Exe). ''' builder = gtk.Builder() # Read glade file using `pkgutil` to also support loading from `.zip` # files (e.g., in app packaged with Py2Exe). glade_str = pkgutil.get_data(__name__, 'glade/form_view_dialog.glade') builder.add_from_string(glade_str) self.window = builder.get_object('form_view_dialog') self.vbox_form = builder.get_object('vbox_form') if self.title: self.window.set_title(self.title) if self.short_desc: self.short_label = gtk.Label() self.short_label.set_text(self.short_desc) self.short_label.set_alignment(0, .5) self.vbox_form.pack_start(self.short_label, expand=True, fill=True) if self.long_desc: self.long_label = gtk.Label() self.long_label.set_text(self.long_desc) self.long_label.set_alignment(.1, .5) self.long_expander = gtk.Expander(label='Details') self.long_expander.set_spacing(5) self.long_expander.add(self.long_label) self.vbox_form.pack_start(self.long_expander, expand=True, fill=True) if self.parent is None: self.parent = self.default_parent self.window.set_default_response(gtk.RESPONSE_OK) self.window.set_position(gtk.WIN_POS_CENTER_ON_PARENT) if self.parent: self.window.set_transient_for(self.parent) self.window.show_all()
[ "def", "create_ui", "(", "self", ")", ":", "builder", "=", "gtk", ".", "Builder", "(", ")", "# Read glade file using `pkgutil` to also support loading from `.zip`", "# files (e.g., in app packaged with Py2Exe).", "glade_str", "=", "pkgutil", ".", "get_data", "(", "__name__", ",", "'glade/form_view_dialog.glade'", ")", "builder", ".", "add_from_string", "(", "glade_str", ")", "self", ".", "window", "=", "builder", ".", "get_object", "(", "'form_view_dialog'", ")", "self", ".", "vbox_form", "=", "builder", ".", "get_object", "(", "'vbox_form'", ")", "if", "self", ".", "title", ":", "self", ".", "window", ".", "set_title", "(", "self", ".", "title", ")", "if", "self", ".", "short_desc", ":", "self", ".", "short_label", "=", "gtk", ".", "Label", "(", ")", "self", ".", "short_label", ".", "set_text", "(", "self", ".", "short_desc", ")", "self", ".", "short_label", ".", "set_alignment", "(", "0", ",", ".5", ")", "self", ".", "vbox_form", ".", "pack_start", "(", "self", ".", "short_label", ",", "expand", "=", "True", ",", "fill", "=", "True", ")", "if", "self", ".", "long_desc", ":", "self", ".", "long_label", "=", "gtk", ".", "Label", "(", ")", "self", ".", "long_label", ".", "set_text", "(", "self", ".", "long_desc", ")", "self", ".", "long_label", ".", "set_alignment", "(", ".1", ",", ".5", ")", "self", ".", "long_expander", "=", "gtk", ".", "Expander", "(", "label", "=", "'Details'", ")", "self", ".", "long_expander", ".", "set_spacing", "(", "5", ")", "self", ".", "long_expander", ".", "add", "(", "self", ".", "long_label", ")", "self", ".", "vbox_form", ".", "pack_start", "(", "self", ".", "long_expander", ",", "expand", "=", "True", ",", "fill", "=", "True", ")", "if", "self", ".", "parent", "is", "None", ":", "self", ".", "parent", "=", "self", ".", "default_parent", "self", ".", "window", ".", "set_default_response", "(", "gtk", ".", "RESPONSE_OK", ")", "self", ".", "window", ".", "set_position", "(", "gtk", ".", "WIN_POS_CENTER_ON_PARENT", ")", "if", "self", ".", "parent", ":", "self", ".", "window", ".", "set_transient_for", "(", "self", ".", "parent", ")", "self", ".", "window", ".", "show_all", "(", ")" ]
.. versionchanged:: 0.21.2 Load the builder configuration file using :func:`pkgutil.getdata`, which supports loading from `.zip` archives (e.g., in an app packaged with Py2Exe).
[ "..", "versionchanged", "::", "0", ".", "21", ".", "2", "Load", "the", "builder", "configuration", "file", "using", ":", "func", ":", "pkgutil", ".", "getdata", "which", "supports", "loading", "from", ".", "zip", "archives", "(", "e", ".", "g", ".", "in", "an", "app", "packaged", "with", "Py2Exe", ")", "." ]
python
train
45.025641
materialsvirtuallab/monty
monty/os/path.py
https://github.com/materialsvirtuallab/monty/blob/d99d6f3c68372d83489d28ff515566c93cd569e2/monty/os/path.py#L15-L41
def which(cmd): """ Returns full path to a executable. Args: cmd (str): Executable command to search for. Returns: (str) Full path to command. None if it is not found. Example:: full_path_to_python = which("python") """ def is_exe(fp): return os.path.isfile(fp) and os.access(fp, os.X_OK) fpath, fname = os.path.split(cmd) if fpath: if is_exe(cmd): return cmd else: for path in os.environ["PATH"].split(os.pathsep): exe_file = os.path.join(path, cmd) if is_exe(exe_file): return exe_file return None
[ "def", "which", "(", "cmd", ")", ":", "def", "is_exe", "(", "fp", ")", ":", "return", "os", ".", "path", ".", "isfile", "(", "fp", ")", "and", "os", ".", "access", "(", "fp", ",", "os", ".", "X_OK", ")", "fpath", ",", "fname", "=", "os", ".", "path", ".", "split", "(", "cmd", ")", "if", "fpath", ":", "if", "is_exe", "(", "cmd", ")", ":", "return", "cmd", "else", ":", "for", "path", "in", "os", ".", "environ", "[", "\"PATH\"", "]", ".", "split", "(", "os", ".", "pathsep", ")", ":", "exe_file", "=", "os", ".", "path", ".", "join", "(", "path", ",", "cmd", ")", "if", "is_exe", "(", "exe_file", ")", ":", "return", "exe_file", "return", "None" ]
Returns full path to a executable. Args: cmd (str): Executable command to search for. Returns: (str) Full path to command. None if it is not found. Example:: full_path_to_python = which("python")
[ "Returns", "full", "path", "to", "a", "executable", "." ]
python
train
23.037037
OLC-LOC-Bioinformatics/GenomeQAML
genomeqaml/extract_features.py
https://github.com/OLC-LOC-Bioinformatics/GenomeQAML/blob/2953e574c185afab23075641da4ce5392bc003e9/genomeqaml/extract_features.py#L245-L266
def find_n50(contig_lengths_dict, genome_length_dict): """ Calculate the N50 for each strain. N50 is defined as the largest contig such that at least half of the total genome size is contained in contigs equal to or larger than this contig :param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths :param genome_length_dict: dictionary of strain name: total genome length :return: n50_dict: dictionary of strain name: N50 """ # Initialise the dictionary n50_dict = dict() for file_name, contig_lengths in contig_lengths_dict.items(): # Initialise a variable to store a running total of contig lengths currentlength = 0 for contig_length in contig_lengths: # Increment the current length with the length of the current contig currentlength += contig_length # If the current length is now greater than the total genome / 2, the current contig length is the N50 if currentlength >= genome_length_dict[file_name] * 0.5: # Populate the dictionary, and break the loop n50_dict[file_name] = contig_length break return n50_dict
[ "def", "find_n50", "(", "contig_lengths_dict", ",", "genome_length_dict", ")", ":", "# Initialise the dictionary", "n50_dict", "=", "dict", "(", ")", "for", "file_name", ",", "contig_lengths", "in", "contig_lengths_dict", ".", "items", "(", ")", ":", "# Initialise a variable to store a running total of contig lengths", "currentlength", "=", "0", "for", "contig_length", "in", "contig_lengths", ":", "# Increment the current length with the length of the current contig", "currentlength", "+=", "contig_length", "# If the current length is now greater than the total genome / 2, the current contig length is the N50", "if", "currentlength", ">=", "genome_length_dict", "[", "file_name", "]", "*", "0.5", ":", "# Populate the dictionary, and break the loop", "n50_dict", "[", "file_name", "]", "=", "contig_length", "break", "return", "n50_dict" ]
Calculate the N50 for each strain. N50 is defined as the largest contig such that at least half of the total genome size is contained in contigs equal to or larger than this contig :param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths :param genome_length_dict: dictionary of strain name: total genome length :return: n50_dict: dictionary of strain name: N50
[ "Calculate", "the", "N50", "for", "each", "strain", ".", "N50", "is", "defined", "as", "the", "largest", "contig", "such", "that", "at", "least", "half", "of", "the", "total", "genome", "size", "is", "contained", "in", "contigs", "equal", "to", "or", "larger", "than", "this", "contig", ":", "param", "contig_lengths_dict", ":", "dictionary", "of", "strain", "name", ":", "reverse", "-", "sorted", "list", "of", "all", "contig", "lengths", ":", "param", "genome_length_dict", ":", "dictionary", "of", "strain", "name", ":", "total", "genome", "length", ":", "return", ":", "n50_dict", ":", "dictionary", "of", "strain", "name", ":", "N50" ]
python
train
54.590909
gwastro/pycbc
pycbc/transforms.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/transforms.py#L422-L454
def transform(self, maps): """This function transforms from chirp mass and symmetric mass ratio to component masses. Parameters ---------- maps : a mapping object Examples -------- Convert a dict of numpy.array: >>> import numpy >>> from pycbc import transforms >>> t = transforms.MchirpEtaToMass1Mass2() >>> t.transform({'mchirp': numpy.array([10.]), 'eta': numpy.array([0.25])}) {'mass1': array([ 16.4375183]), 'mass2': array([ 8.21875915]), 'mchirp': array([ 10.]), 'eta': array([ 0.25])} Returns ------- out : dict A dict with key as parameter name and value as numpy.array or float of transformed values. """ out = {} out[parameters.mass1] = conversions.mass1_from_mchirp_eta( maps[parameters.mchirp], maps[parameters.eta]) out[parameters.mass2] = conversions.mass2_from_mchirp_eta( maps[parameters.mchirp], maps[parameters.eta]) return self.format_output(maps, out)
[ "def", "transform", "(", "self", ",", "maps", ")", ":", "out", "=", "{", "}", "out", "[", "parameters", ".", "mass1", "]", "=", "conversions", ".", "mass1_from_mchirp_eta", "(", "maps", "[", "parameters", ".", "mchirp", "]", ",", "maps", "[", "parameters", ".", "eta", "]", ")", "out", "[", "parameters", ".", "mass2", "]", "=", "conversions", ".", "mass2_from_mchirp_eta", "(", "maps", "[", "parameters", ".", "mchirp", "]", ",", "maps", "[", "parameters", ".", "eta", "]", ")", "return", "self", ".", "format_output", "(", "maps", ",", "out", ")" ]
This function transforms from chirp mass and symmetric mass ratio to component masses. Parameters ---------- maps : a mapping object Examples -------- Convert a dict of numpy.array: >>> import numpy >>> from pycbc import transforms >>> t = transforms.MchirpEtaToMass1Mass2() >>> t.transform({'mchirp': numpy.array([10.]), 'eta': numpy.array([0.25])}) {'mass1': array([ 16.4375183]), 'mass2': array([ 8.21875915]), 'mchirp': array([ 10.]), 'eta': array([ 0.25])} Returns ------- out : dict A dict with key as parameter name and value as numpy.array or float of transformed values.
[ "This", "function", "transforms", "from", "chirp", "mass", "and", "symmetric", "mass", "ratio", "to", "component", "masses", "." ]
python
train
37.454545
santoshphilip/eppy
eppy/loops.py
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/loops.py#L116-L120
def splitterfields(data, commdct): """get splitter fields to diagram it""" objkey = "Connector:Splitter".upper() fieldlists = splittermixerfieldlists(data, commdct, objkey) return extractfields(data, commdct, objkey, fieldlists)
[ "def", "splitterfields", "(", "data", ",", "commdct", ")", ":", "objkey", "=", "\"Connector:Splitter\"", ".", "upper", "(", ")", "fieldlists", "=", "splittermixerfieldlists", "(", "data", ",", "commdct", ",", "objkey", ")", "return", "extractfields", "(", "data", ",", "commdct", ",", "objkey", ",", "fieldlists", ")" ]
get splitter fields to diagram it
[ "get", "splitter", "fields", "to", "diagram", "it" ]
python
train
48
davenquinn/Attitude
attitude/__dustbin/__report/__init__.py
https://github.com/davenquinn/Attitude/blob/2ce97b9aba0aa5deedc6617c2315e07e6396d240/attitude/__dustbin/__report/__init__.py#L34-L78
def report(*arrays, **kwargs): """ Outputs a standalone HTML 'report card' for a measurement (or several grouped measurements), including relevant statistical information. """ name = kwargs.pop("name",None) grouped = len(arrays) > 1 if grouped: arr = N.concatenate(arrays) components = [PCAOrientation(a) for a in arrays] else: arr = arrays[0] components = [] #r = LinearOrientation(arr) pca = PCAOrientation(arr) distances = list(distance_from_group(components,pca)) kwargs = dict( levels=[1,2,3], alpha=[0.8,0.5,0.2], linewidth=2) #ellipse=error_ellipse(pca) kwargs = dict(n=500,levels=[1,2], ellipse=True) stereonet_data = dict( main=pca.error_coords(**kwargs), components=[i.error_coords(**kwargs) for i in components]) t = env.get_template("report.html") return t.render( name=name, pca=pca, stereonet_data=stereonet_data, angular_errors=tuple(N.degrees(i) for i in pca.angular_errors()[::-1]), aligned=plot_aligned(pca), distances=distances)
[ "def", "report", "(", "*", "arrays", ",", "*", "*", "kwargs", ")", ":", "name", "=", "kwargs", ".", "pop", "(", "\"name\"", ",", "None", ")", "grouped", "=", "len", "(", "arrays", ")", ">", "1", "if", "grouped", ":", "arr", "=", "N", ".", "concatenate", "(", "arrays", ")", "components", "=", "[", "PCAOrientation", "(", "a", ")", "for", "a", "in", "arrays", "]", "else", ":", "arr", "=", "arrays", "[", "0", "]", "components", "=", "[", "]", "#r = LinearOrientation(arr)", "pca", "=", "PCAOrientation", "(", "arr", ")", "distances", "=", "list", "(", "distance_from_group", "(", "components", ",", "pca", ")", ")", "kwargs", "=", "dict", "(", "levels", "=", "[", "1", ",", "2", ",", "3", "]", ",", "alpha", "=", "[", "0.8", ",", "0.5", ",", "0.2", "]", ",", "linewidth", "=", "2", ")", "#ellipse=error_ellipse(pca)", "kwargs", "=", "dict", "(", "n", "=", "500", ",", "levels", "=", "[", "1", ",", "2", "]", ",", "ellipse", "=", "True", ")", "stereonet_data", "=", "dict", "(", "main", "=", "pca", ".", "error_coords", "(", "*", "*", "kwargs", ")", ",", "components", "=", "[", "i", ".", "error_coords", "(", "*", "*", "kwargs", ")", "for", "i", "in", "components", "]", ")", "t", "=", "env", ".", "get_template", "(", "\"report.html\"", ")", "return", "t", ".", "render", "(", "name", "=", "name", ",", "pca", "=", "pca", ",", "stereonet_data", "=", "stereonet_data", ",", "angular_errors", "=", "tuple", "(", "N", ".", "degrees", "(", "i", ")", "for", "i", "in", "pca", ".", "angular_errors", "(", ")", "[", ":", ":", "-", "1", "]", ")", ",", "aligned", "=", "plot_aligned", "(", "pca", ")", ",", "distances", "=", "distances", ")" ]
Outputs a standalone HTML 'report card' for a measurement (or several grouped measurements), including relevant statistical information.
[ "Outputs", "a", "standalone", "HTML", "report", "card", "for", "a", "measurement", "(", "or", "several", "grouped", "measurements", ")", "including", "relevant", "statistical", "information", "." ]
python
train
25.577778
WoLpH/python-statsd
statsd/client.py
https://github.com/WoLpH/python-statsd/blob/a757da04375c48d03d322246405b33382d37f03f/statsd/client.py#L104-L110
def get_timer(self, name=None): '''Shortcut for getting a :class:`~statsd.timer.Timer` instance :keyword name: See :func:`~statsd.client.Client.get_client` :type name: str ''' return self.get_client(name=name, class_=statsd.Timer)
[ "def", "get_timer", "(", "self", ",", "name", "=", "None", ")", ":", "return", "self", ".", "get_client", "(", "name", "=", "name", ",", "class_", "=", "statsd", ".", "Timer", ")" ]
Shortcut for getting a :class:`~statsd.timer.Timer` instance :keyword name: See :func:`~statsd.client.Client.get_client` :type name: str
[ "Shortcut", "for", "getting", "a", ":", "class", ":", "~statsd", ".", "timer", ".", "Timer", "instance" ]
python
train
37.857143
allenai/allennlp
allennlp/models/semantic_role_labeler.py
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/models/semantic_role_labeler.py#L226-L268
def write_to_conll_eval_file(prediction_file: TextIO, gold_file: TextIO, verb_index: Optional[int], sentence: List[str], prediction: List[str], gold_labels: List[str]): """ Prints predicate argument predictions and gold labels for a single verbal predicate in a sentence to two provided file references. Parameters ---------- prediction_file : TextIO, required. A file reference to print predictions to. gold_file : TextIO, required. A file reference to print gold labels to. verb_index : Optional[int], required. The index of the verbal predicate in the sentence which the gold labels are the arguments for, or None if the sentence contains no verbal predicate. sentence : List[str], required. The word tokens. prediction : List[str], required. The predicted BIO labels. gold_labels : List[str], required. The gold BIO labels. """ verb_only_sentence = ["-"] * len(sentence) if verb_index: verb_only_sentence[verb_index] = sentence[verb_index] conll_format_predictions = convert_bio_tags_to_conll_format(prediction) conll_format_gold_labels = convert_bio_tags_to_conll_format(gold_labels) for word, predicted, gold in zip(verb_only_sentence, conll_format_predictions, conll_format_gold_labels): prediction_file.write(word.ljust(15)) prediction_file.write(predicted.rjust(15) + "\n") gold_file.write(word.ljust(15)) gold_file.write(gold.rjust(15) + "\n") prediction_file.write("\n") gold_file.write("\n")
[ "def", "write_to_conll_eval_file", "(", "prediction_file", ":", "TextIO", ",", "gold_file", ":", "TextIO", ",", "verb_index", ":", "Optional", "[", "int", "]", ",", "sentence", ":", "List", "[", "str", "]", ",", "prediction", ":", "List", "[", "str", "]", ",", "gold_labels", ":", "List", "[", "str", "]", ")", ":", "verb_only_sentence", "=", "[", "\"-\"", "]", "*", "len", "(", "sentence", ")", "if", "verb_index", ":", "verb_only_sentence", "[", "verb_index", "]", "=", "sentence", "[", "verb_index", "]", "conll_format_predictions", "=", "convert_bio_tags_to_conll_format", "(", "prediction", ")", "conll_format_gold_labels", "=", "convert_bio_tags_to_conll_format", "(", "gold_labels", ")", "for", "word", ",", "predicted", ",", "gold", "in", "zip", "(", "verb_only_sentence", ",", "conll_format_predictions", ",", "conll_format_gold_labels", ")", ":", "prediction_file", ".", "write", "(", "word", ".", "ljust", "(", "15", ")", ")", "prediction_file", ".", "write", "(", "predicted", ".", "rjust", "(", "15", ")", "+", "\"\\n\"", ")", "gold_file", ".", "write", "(", "word", ".", "ljust", "(", "15", ")", ")", "gold_file", ".", "write", "(", "gold", ".", "rjust", "(", "15", ")", "+", "\"\\n\"", ")", "prediction_file", ".", "write", "(", "\"\\n\"", ")", "gold_file", ".", "write", "(", "\"\\n\"", ")" ]
Prints predicate argument predictions and gold labels for a single verbal predicate in a sentence to two provided file references. Parameters ---------- prediction_file : TextIO, required. A file reference to print predictions to. gold_file : TextIO, required. A file reference to print gold labels to. verb_index : Optional[int], required. The index of the verbal predicate in the sentence which the gold labels are the arguments for, or None if the sentence contains no verbal predicate. sentence : List[str], required. The word tokens. prediction : List[str], required. The predicted BIO labels. gold_labels : List[str], required. The gold BIO labels.
[ "Prints", "predicate", "argument", "predictions", "and", "gold", "labels", "for", "a", "single", "verbal", "predicate", "in", "a", "sentence", "to", "two", "provided", "file", "references", "." ]
python
train
41
MartijnBraam/python-isc-dhcp-leases
isc_dhcp_leases/iscdhcpleases.py
https://github.com/MartijnBraam/python-isc-dhcp-leases/blob/e96c00e31f3a52c01ef98193577d614d08a93285/isc_dhcp_leases/iscdhcpleases.py#L65-L98
def _extract_properties(config): """ Parse a line within a lease block The line should basically match the expression: >>> r"\s+(?P<key>(?:option|set)\s+\S+|\S+) (?P<value>[\s\S]+?);" For easier seperation of the cases and faster parsing this is done using substrings etc.. :param config: :return: tuple of properties dict, options dict and sets dict """ general, options, sets = {}, {}, {} for line in config.splitlines(): # skip empty & malformed lines if not line or not line[-1:] == ';' and '; #' not in line: continue # strip the trailing ';' and remove any whitespaces on the left side line = line[:-1].lstrip() # seperate the three cases if line[:6] == 'option': key, value = _extract_prop_option(line) options[key] = value elif line[:3] == 'set': key, value = _extract_prop_set(line) sets[key] = value else: # fall through to generic case key, value = _extract_prop_general(line) general[key] = value return general, options, sets
[ "def", "_extract_properties", "(", "config", ")", ":", "general", ",", "options", ",", "sets", "=", "{", "}", ",", "{", "}", ",", "{", "}", "for", "line", "in", "config", ".", "splitlines", "(", ")", ":", "# skip empty & malformed lines", "if", "not", "line", "or", "not", "line", "[", "-", "1", ":", "]", "==", "';'", "and", "'; #'", "not", "in", "line", ":", "continue", "# strip the trailing ';' and remove any whitespaces on the left side", "line", "=", "line", "[", ":", "-", "1", "]", ".", "lstrip", "(", ")", "# seperate the three cases", "if", "line", "[", ":", "6", "]", "==", "'option'", ":", "key", ",", "value", "=", "_extract_prop_option", "(", "line", ")", "options", "[", "key", "]", "=", "value", "elif", "line", "[", ":", "3", "]", "==", "'set'", ":", "key", ",", "value", "=", "_extract_prop_set", "(", "line", ")", "sets", "[", "key", "]", "=", "value", "else", ":", "# fall through to generic case", "key", ",", "value", "=", "_extract_prop_general", "(", "line", ")", "general", "[", "key", "]", "=", "value", "return", "general", ",", "options", ",", "sets" ]
Parse a line within a lease block The line should basically match the expression: >>> r"\s+(?P<key>(?:option|set)\s+\S+|\S+) (?P<value>[\s\S]+?);" For easier seperation of the cases and faster parsing this is done using substrings etc.. :param config: :return: tuple of properties dict, options dict and sets dict
[ "Parse", "a", "line", "within", "a", "lease", "block", "The", "line", "should", "basically", "match", "the", "expression", ":", ">>>", "r", "\\", "s", "+", "(", "?P<key", ">", "(", "?", ":", "option|set", ")", "\\", "s", "+", "\\", "S", "+", "|", "\\", "S", "+", ")", "(", "?P<value", ">", "[", "\\", "s", "\\", "S", "]", "+", "?", ")", ";", "For", "easier", "seperation", "of", "the", "cases", "and", "faster", "parsing", "this", "is", "done", "using", "substrings", "etc", "..", ":", "param", "config", ":", ":", "return", ":", "tuple", "of", "properties", "dict", "options", "dict", "and", "sets", "dict" ]
python
train
32.794118
fracpete/python-weka-wrapper3
python/weka/core/classes.py
https://github.com/fracpete/python-weka-wrapper3/blob/d850ab1bdb25fbd5a8d86e99f34a397975425838/python/weka/core/classes.py#L1471-L1485
def find(self, name): """ Returns the Tag that matches the name. :param name: the string representation of the tag :type name: str :return: the tag, None if not found :rtype: Tag """ result = None for t in self.array: if str(t) == name: result = Tag(t.jobject) break return result
[ "def", "find", "(", "self", ",", "name", ")", ":", "result", "=", "None", "for", "t", "in", "self", ".", "array", ":", "if", "str", "(", "t", ")", "==", "name", ":", "result", "=", "Tag", "(", "t", ".", "jobject", ")", "break", "return", "result" ]
Returns the Tag that matches the name. :param name: the string representation of the tag :type name: str :return: the tag, None if not found :rtype: Tag
[ "Returns", "the", "Tag", "that", "matches", "the", "name", "." ]
python
train
26.066667
Cadair/jupyter_environment_kernels
environment_kernels/core.py
https://github.com/Cadair/jupyter_environment_kernels/blob/3da304550b511bda7d5d39280379b5ca39bb31bc/environment_kernels/core.py#L187-L195
def find_kernel_specs(self): """Returns a dict mapping kernel names to resource directories.""" # let real installed kernels overwrite envs with the same name: # this is the same order as the get_kernel_spec way, which also prefers # kernels from the jupyter dir over env kernels. specs = self.find_kernel_specs_for_envs() specs.update(super(EnvironmentKernelSpecManager, self).find_kernel_specs()) return specs
[ "def", "find_kernel_specs", "(", "self", ")", ":", "# let real installed kernels overwrite envs with the same name:", "# this is the same order as the get_kernel_spec way, which also prefers", "# kernels from the jupyter dir over env kernels.", "specs", "=", "self", ".", "find_kernel_specs_for_envs", "(", ")", "specs", ".", "update", "(", "super", "(", "EnvironmentKernelSpecManager", ",", "self", ")", ".", "find_kernel_specs", "(", ")", ")", "return", "specs" ]
Returns a dict mapping kernel names to resource directories.
[ "Returns", "a", "dict", "mapping", "kernel", "names", "to", "resource", "directories", "." ]
python
train
54
RedHatInsights/insights-core
insights/contrib/importlib.py
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/contrib/importlib.py#L20-L38
def import_module(name, package=None): """Import a module. The 'package' argument is required when performing a relative import. It specifies the package to use as the anchor point from which to resolve the relative import to an absolute import. """ if name.startswith('.'): if not package: raise TypeError("relative imports require the 'package' argument") level = 0 for character in name: if character != '.': break level += 1 name = _resolve_name(name[level:], package, level) __import__(name) return sys.modules[name]
[ "def", "import_module", "(", "name", ",", "package", "=", "None", ")", ":", "if", "name", ".", "startswith", "(", "'.'", ")", ":", "if", "not", "package", ":", "raise", "TypeError", "(", "\"relative imports require the 'package' argument\"", ")", "level", "=", "0", "for", "character", "in", "name", ":", "if", "character", "!=", "'.'", ":", "break", "level", "+=", "1", "name", "=", "_resolve_name", "(", "name", "[", "level", ":", "]", ",", "package", ",", "level", ")", "__import__", "(", "name", ")", "return", "sys", ".", "modules", "[", "name", "]" ]
Import a module. The 'package' argument is required when performing a relative import. It specifies the package to use as the anchor point from which to resolve the relative import to an absolute import.
[ "Import", "a", "module", "." ]
python
train
32.684211
Jaymon/endpoints
endpoints/http.py
https://github.com/Jaymon/endpoints/blob/2f1c4ae2c69a168e69447d3d8395ada7becaa5fb/endpoints/http.py#L943-L968
def url(self): """return the full request url as an Url() instance""" scheme = self.scheme host = self.host path = self.path query = self.query port = self.port # normalize the port host_domain, host_port = Url.split_hostname_from_port(host) if host_port: port = host_port controller_path = "" if self.controller_info: controller_path = self.controller_info.get("path", "") u = Url( scheme=scheme, hostname=host, path=path, query=query, port=port, controller_path=controller_path, ) return u
[ "def", "url", "(", "self", ")", ":", "scheme", "=", "self", ".", "scheme", "host", "=", "self", ".", "host", "path", "=", "self", ".", "path", "query", "=", "self", ".", "query", "port", "=", "self", ".", "port", "# normalize the port", "host_domain", ",", "host_port", "=", "Url", ".", "split_hostname_from_port", "(", "host", ")", "if", "host_port", ":", "port", "=", "host_port", "controller_path", "=", "\"\"", "if", "self", ".", "controller_info", ":", "controller_path", "=", "self", ".", "controller_info", ".", "get", "(", "\"path\"", ",", "\"\"", ")", "u", "=", "Url", "(", "scheme", "=", "scheme", ",", "hostname", "=", "host", ",", "path", "=", "path", ",", "query", "=", "query", ",", "port", "=", "port", ",", "controller_path", "=", "controller_path", ",", ")", "return", "u" ]
return the full request url as an Url() instance
[ "return", "the", "full", "request", "url", "as", "an", "Url", "()", "instance" ]
python
train
26.038462
geophysics-ubonn/crtomo_tools
lib/crtomo/plotManager.py
https://github.com/geophysics-ubonn/crtomo_tools/blob/27c3e21a557f8df1c12455b96c4c2e00e08a5b4a/lib/crtomo/plotManager.py#L501-L526
def converter_pm_log10(data): """Convert the given data to: log10(subdata) for subdata > 0 log10(-subdata') for subdata' < 0 0 for subdata'' == 0 Parameters ---------- data: array input data Returns ------- array_converted: array converted data """ # indices_zero = np.where(data == 0) indices_gt_zero = np.where(data > 0) indices_lt_zero = np.where(data < 0) data_converted = np.zeros(data.shape) data_converted[indices_gt_zero] = np.log10(data[indices_gt_zero]) data_converted[indices_lt_zero] = -np.log10(-data[indices_lt_zero]) return indices_gt_zero, indices_lt_zero, data_converted
[ "def", "converter_pm_log10", "(", "data", ")", ":", "# indices_zero = np.where(data == 0)", "indices_gt_zero", "=", "np", ".", "where", "(", "data", ">", "0", ")", "indices_lt_zero", "=", "np", ".", "where", "(", "data", "<", "0", ")", "data_converted", "=", "np", ".", "zeros", "(", "data", ".", "shape", ")", "data_converted", "[", "indices_gt_zero", "]", "=", "np", ".", "log10", "(", "data", "[", "indices_gt_zero", "]", ")", "data_converted", "[", "indices_lt_zero", "]", "=", "-", "np", ".", "log10", "(", "-", "data", "[", "indices_lt_zero", "]", ")", "return", "indices_gt_zero", ",", "indices_lt_zero", ",", "data_converted" ]
Convert the given data to: log10(subdata) for subdata > 0 log10(-subdata') for subdata' < 0 0 for subdata'' == 0 Parameters ---------- data: array input data Returns ------- array_converted: array converted data
[ "Convert", "the", "given", "data", "to", ":" ]
python
train
25.653846
pandas-dev/pandas
pandas/core/internals/concat.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/internals/concat.py#L366-L384
def is_uniform_join_units(join_units): """ Check if the join units consist of blocks of uniform type that can be concatenated using Block.concat_same_type instead of the generic concatenate_join_units (which uses `_concat._concat_compat`). """ return ( # all blocks need to have the same type all(type(ju.block) is type(join_units[0].block) for ju in join_units) and # noqa # no blocks that would get missing values (can lead to type upcasts) # unless we're an extension dtype. all(not ju.is_na or ju.block.is_extension for ju in join_units) and # no blocks with indexers (as then the dimensions do not fit) all(not ju.indexers for ju in join_units) and # disregard Panels all(ju.block.ndim <= 2 for ju in join_units) and # only use this path when there is something to concatenate len(join_units) > 1)
[ "def", "is_uniform_join_units", "(", "join_units", ")", ":", "return", "(", "# all blocks need to have the same type", "all", "(", "type", "(", "ju", ".", "block", ")", "is", "type", "(", "join_units", "[", "0", "]", ".", "block", ")", "for", "ju", "in", "join_units", ")", "and", "# noqa", "# no blocks that would get missing values (can lead to type upcasts)", "# unless we're an extension dtype.", "all", "(", "not", "ju", ".", "is_na", "or", "ju", ".", "block", ".", "is_extension", "for", "ju", "in", "join_units", ")", "and", "# no blocks with indexers (as then the dimensions do not fit)", "all", "(", "not", "ju", ".", "indexers", "for", "ju", "in", "join_units", ")", "and", "# disregard Panels", "all", "(", "ju", ".", "block", ".", "ndim", "<=", "2", "for", "ju", "in", "join_units", ")", "and", "# only use this path when there is something to concatenate", "len", "(", "join_units", ")", ">", "1", ")" ]
Check if the join units consist of blocks of uniform type that can be concatenated using Block.concat_same_type instead of the generic concatenate_join_units (which uses `_concat._concat_compat`).
[ "Check", "if", "the", "join", "units", "consist", "of", "blocks", "of", "uniform", "type", "that", "can", "be", "concatenated", "using", "Block", ".", "concat_same_type", "instead", "of", "the", "generic", "concatenate_join_units", "(", "which", "uses", "_concat", ".", "_concat_compat", ")", "." ]
python
train
47.263158
jeremymcrae/denovonear
denovonear/__main__.py
https://github.com/jeremymcrae/denovonear/blob/feaab0fc77e89d70b31e8092899e4f0e68bac9fe/denovonear/__main__.py#L93-L133
def get_mutation_rates(transcripts, mut_dict, ensembl): """ determines mutation rates per functional category for transcripts Args: transcripts: list of transcript IDs for a gene mut_dict: dictionary of local sequence context mutation rates ensembl: EnsemblRequest object, to retrieve information from Ensembl. Returns: tuple of (rates, merged transcript, and transcript CDS length) """ rates = {'missense': 0, 'nonsense': 0, 'splice_lof': 0, 'splice_region': 0, 'synonymous': 0} combined = None for tx_id in transcripts: try: tx = construct_gene_object(ensembl, tx_id) except ValueError: continue if len(tx.get_cds_sequence()) % 3 != 0: raise ValueError("anomalous_coding_sequence") # ignore mitochondrial genes if tx.get_chrom() == "MT": continue sites = SiteRates(tx, mut_dict, masked_sites=combined) combined = tx + combined for cq in ['missense', 'nonsense', 'splice_lof', 'splice_region', 'synonymous']: rates[cq] += sites[cq].get_summed_rate() if combined is None: raise ValueError('no tx found') length = combined.get_coding_distance(combined.get_cds_end())['pos'] return rates, combined, length
[ "def", "get_mutation_rates", "(", "transcripts", ",", "mut_dict", ",", "ensembl", ")", ":", "rates", "=", "{", "'missense'", ":", "0", ",", "'nonsense'", ":", "0", ",", "'splice_lof'", ":", "0", ",", "'splice_region'", ":", "0", ",", "'synonymous'", ":", "0", "}", "combined", "=", "None", "for", "tx_id", "in", "transcripts", ":", "try", ":", "tx", "=", "construct_gene_object", "(", "ensembl", ",", "tx_id", ")", "except", "ValueError", ":", "continue", "if", "len", "(", "tx", ".", "get_cds_sequence", "(", ")", ")", "%", "3", "!=", "0", ":", "raise", "ValueError", "(", "\"anomalous_coding_sequence\"", ")", "# ignore mitochondrial genes", "if", "tx", ".", "get_chrom", "(", ")", "==", "\"MT\"", ":", "continue", "sites", "=", "SiteRates", "(", "tx", ",", "mut_dict", ",", "masked_sites", "=", "combined", ")", "combined", "=", "tx", "+", "combined", "for", "cq", "in", "[", "'missense'", ",", "'nonsense'", ",", "'splice_lof'", ",", "'splice_region'", ",", "'synonymous'", "]", ":", "rates", "[", "cq", "]", "+=", "sites", "[", "cq", "]", ".", "get_summed_rate", "(", ")", "if", "combined", "is", "None", ":", "raise", "ValueError", "(", "'no tx found'", ")", "length", "=", "combined", ".", "get_coding_distance", "(", "combined", ".", "get_cds_end", "(", ")", ")", "[", "'pos'", "]", "return", "rates", ",", "combined", ",", "length" ]
determines mutation rates per functional category for transcripts Args: transcripts: list of transcript IDs for a gene mut_dict: dictionary of local sequence context mutation rates ensembl: EnsemblRequest object, to retrieve information from Ensembl. Returns: tuple of (rates, merged transcript, and transcript CDS length)
[ "determines", "mutation", "rates", "per", "functional", "category", "for", "transcripts", "Args", ":", "transcripts", ":", "list", "of", "transcript", "IDs", "for", "a", "gene", "mut_dict", ":", "dictionary", "of", "local", "sequence", "context", "mutation", "rates", "ensembl", ":", "EnsemblRequest", "object", "to", "retrieve", "information", "from", "Ensembl", ".", "Returns", ":", "tuple", "of", "(", "rates", "merged", "transcript", "and", "transcript", "CDS", "length", ")" ]
python
train
32.829268
spulec/moto
moto/batch/models.py
https://github.com/spulec/moto/blob/4a286c4bc288933bb023396e2784a6fdbb966bc9/moto/batch/models.py#L310-L405
def run(self): """ Run the container. Logic is as follows: Generate container info (eventually from task definition) Start container Loop whilst not asked to stop and the container is running. Get all logs from container between the last time I checked and now. Convert logs into cloudwatch format Put logs into cloudwatch :return: """ try: self.job_state = 'PENDING' time.sleep(1) image = 'alpine:latest' cmd = '/bin/sh -c "for a in `seq 1 10`; do echo Hello World; sleep 1; done"' name = '{0}-{1}'.format(self.job_name, self.job_id) self.job_state = 'RUNNABLE' # TODO setup ecs container instance time.sleep(1) self.job_state = 'STARTING' container = self.docker_client.containers.run( image, cmd, detach=True, name=name ) self.job_state = 'RUNNING' self.job_started_at = datetime.datetime.now() try: # Log collection logs_stdout = [] logs_stderr = [] container.reload() # Dodgy hack, we can only check docker logs once a second, but we want to loop more # so we can stop if asked to in a quick manner, should all go away if we go async # There also be some dodgyness when sending an integer to docker logs and some # events seem to be duplicated. now = datetime.datetime.now() i = 1 while container.status == 'running' and not self.stop: time.sleep(0.15) if i % 10 == 0: logs_stderr.extend(container.logs(stdout=False, stderr=True, timestamps=True, since=datetime2int(now)).decode().split('\n')) logs_stdout.extend(container.logs(stdout=True, stderr=False, timestamps=True, since=datetime2int(now)).decode().split('\n')) now = datetime.datetime.now() container.reload() i += 1 # Container should be stopped by this point... unless asked to stop if container.status == 'running': container.kill() self.job_stopped_at = datetime.datetime.now() # Get final logs logs_stderr.extend(container.logs(stdout=False, stderr=True, timestamps=True, since=datetime2int(now)).decode().split('\n')) logs_stdout.extend(container.logs(stdout=True, stderr=False, timestamps=True, since=datetime2int(now)).decode().split('\n')) self.job_state = 'SUCCEEDED' if not self.stop else 'FAILED' # Process logs logs_stdout = [x for x in logs_stdout if len(x) > 0] logs_stderr = [x for x in logs_stderr if len(x) > 0] logs = [] for line in logs_stdout + logs_stderr: date, line = line.split(' ', 1) date = dateutil.parser.parse(date) date = int(date.timestamp()) logs.append({'timestamp': date, 'message': line.strip()}) # Send to cloudwatch log_group = '/aws/batch/job' stream_name = '{0}/default/{1}'.format(self.job_definition.name, self.job_id) self.log_stream_name = stream_name self._log_backend.ensure_log_group(log_group, None) self._log_backend.create_log_stream(log_group, stream_name) self._log_backend.put_log_events(log_group, stream_name, logs, None) except Exception as err: logger.error('Failed to run AWS Batch container {0}. Error {1}'.format(self.name, err)) self.job_state = 'FAILED' container.kill() finally: container.remove() except Exception as err: logger.error('Failed to run AWS Batch container {0}. Error {1}'.format(self.name, err)) self.job_state = 'FAILED' self.job_stopped = True self.job_stopped_at = datetime.datetime.now()
[ "def", "run", "(", "self", ")", ":", "try", ":", "self", ".", "job_state", "=", "'PENDING'", "time", ".", "sleep", "(", "1", ")", "image", "=", "'alpine:latest'", "cmd", "=", "'/bin/sh -c \"for a in `seq 1 10`; do echo Hello World; sleep 1; done\"'", "name", "=", "'{0}-{1}'", ".", "format", "(", "self", ".", "job_name", ",", "self", ".", "job_id", ")", "self", ".", "job_state", "=", "'RUNNABLE'", "# TODO setup ecs container instance", "time", ".", "sleep", "(", "1", ")", "self", ".", "job_state", "=", "'STARTING'", "container", "=", "self", ".", "docker_client", ".", "containers", ".", "run", "(", "image", ",", "cmd", ",", "detach", "=", "True", ",", "name", "=", "name", ")", "self", ".", "job_state", "=", "'RUNNING'", "self", ".", "job_started_at", "=", "datetime", ".", "datetime", ".", "now", "(", ")", "try", ":", "# Log collection", "logs_stdout", "=", "[", "]", "logs_stderr", "=", "[", "]", "container", ".", "reload", "(", ")", "# Dodgy hack, we can only check docker logs once a second, but we want to loop more", "# so we can stop if asked to in a quick manner, should all go away if we go async", "# There also be some dodgyness when sending an integer to docker logs and some", "# events seem to be duplicated.", "now", "=", "datetime", ".", "datetime", ".", "now", "(", ")", "i", "=", "1", "while", "container", ".", "status", "==", "'running'", "and", "not", "self", ".", "stop", ":", "time", ".", "sleep", "(", "0.15", ")", "if", "i", "%", "10", "==", "0", ":", "logs_stderr", ".", "extend", "(", "container", ".", "logs", "(", "stdout", "=", "False", ",", "stderr", "=", "True", ",", "timestamps", "=", "True", ",", "since", "=", "datetime2int", "(", "now", ")", ")", ".", "decode", "(", ")", ".", "split", "(", "'\\n'", ")", ")", "logs_stdout", ".", "extend", "(", "container", ".", "logs", "(", "stdout", "=", "True", ",", "stderr", "=", "False", ",", "timestamps", "=", "True", ",", "since", "=", "datetime2int", "(", "now", ")", ")", ".", "decode", "(", ")", ".", "split", "(", "'\\n'", ")", ")", "now", "=", "datetime", ".", "datetime", ".", "now", "(", ")", "container", ".", "reload", "(", ")", "i", "+=", "1", "# Container should be stopped by this point... unless asked to stop", "if", "container", ".", "status", "==", "'running'", ":", "container", ".", "kill", "(", ")", "self", ".", "job_stopped_at", "=", "datetime", ".", "datetime", ".", "now", "(", ")", "# Get final logs", "logs_stderr", ".", "extend", "(", "container", ".", "logs", "(", "stdout", "=", "False", ",", "stderr", "=", "True", ",", "timestamps", "=", "True", ",", "since", "=", "datetime2int", "(", "now", ")", ")", ".", "decode", "(", ")", ".", "split", "(", "'\\n'", ")", ")", "logs_stdout", ".", "extend", "(", "container", ".", "logs", "(", "stdout", "=", "True", ",", "stderr", "=", "False", ",", "timestamps", "=", "True", ",", "since", "=", "datetime2int", "(", "now", ")", ")", ".", "decode", "(", ")", ".", "split", "(", "'\\n'", ")", ")", "self", ".", "job_state", "=", "'SUCCEEDED'", "if", "not", "self", ".", "stop", "else", "'FAILED'", "# Process logs", "logs_stdout", "=", "[", "x", "for", "x", "in", "logs_stdout", "if", "len", "(", "x", ")", ">", "0", "]", "logs_stderr", "=", "[", "x", "for", "x", "in", "logs_stderr", "if", "len", "(", "x", ")", ">", "0", "]", "logs", "=", "[", "]", "for", "line", "in", "logs_stdout", "+", "logs_stderr", ":", "date", ",", "line", "=", "line", ".", "split", "(", "' '", ",", "1", ")", "date", "=", "dateutil", ".", "parser", ".", "parse", "(", "date", ")", "date", "=", "int", "(", "date", ".", "timestamp", "(", ")", ")", "logs", ".", "append", "(", "{", "'timestamp'", ":", "date", ",", "'message'", ":", "line", ".", "strip", "(", ")", "}", ")", "# Send to cloudwatch", "log_group", "=", "'/aws/batch/job'", "stream_name", "=", "'{0}/default/{1}'", ".", "format", "(", "self", ".", "job_definition", ".", "name", ",", "self", ".", "job_id", ")", "self", ".", "log_stream_name", "=", "stream_name", "self", ".", "_log_backend", ".", "ensure_log_group", "(", "log_group", ",", "None", ")", "self", ".", "_log_backend", ".", "create_log_stream", "(", "log_group", ",", "stream_name", ")", "self", ".", "_log_backend", ".", "put_log_events", "(", "log_group", ",", "stream_name", ",", "logs", ",", "None", ")", "except", "Exception", "as", "err", ":", "logger", ".", "error", "(", "'Failed to run AWS Batch container {0}. Error {1}'", ".", "format", "(", "self", ".", "name", ",", "err", ")", ")", "self", ".", "job_state", "=", "'FAILED'", "container", ".", "kill", "(", ")", "finally", ":", "container", ".", "remove", "(", ")", "except", "Exception", "as", "err", ":", "logger", ".", "error", "(", "'Failed to run AWS Batch container {0}. Error {1}'", ".", "format", "(", "self", ".", "name", ",", "err", ")", ")", "self", ".", "job_state", "=", "'FAILED'", "self", ".", "job_stopped", "=", "True", "self", ".", "job_stopped_at", "=", "datetime", ".", "datetime", ".", "now", "(", ")" ]
Run the container. Logic is as follows: Generate container info (eventually from task definition) Start container Loop whilst not asked to stop and the container is running. Get all logs from container between the last time I checked and now. Convert logs into cloudwatch format Put logs into cloudwatch :return:
[ "Run", "the", "container", "." ]
python
train
44.125
diffeo/rejester
rejester/_registry.py
https://github.com/diffeo/rejester/blob/5438a4a18be2801d7826c46e2079ba9639d2ecb4/rejester/_registry.py#L104-L129
def _acquire_lock(self, identifier, atime=30, ltime=5): '''Acquire a lock for a given identifier. If the lock cannot be obtained immediately, keep trying at random intervals, up to 3 seconds, until `atime` has passed. Once the lock has been obtained, continue to hold it for `ltime`. :param str identifier: lock token to write :param int atime: maximum time (in seconds) to acquire lock :param int ltime: maximum time (in seconds) to own lock :return: `identifier` if the lock was obtained, :const:`False` otherwise ''' conn = redis.Redis(connection_pool=self.pool) end = time.time() + atime while end > time.time(): if conn.set(self._lock_name, identifier, ex=ltime, nx=True): # logger.debug("won lock %s" % self._lock_name) return identifier sleep_time = random.uniform(0, 3) time.sleep(sleep_time) logger.warn('failed to acquire lock %s for %f seconds', self._lock_name, atime) return False
[ "def", "_acquire_lock", "(", "self", ",", "identifier", ",", "atime", "=", "30", ",", "ltime", "=", "5", ")", ":", "conn", "=", "redis", ".", "Redis", "(", "connection_pool", "=", "self", ".", "pool", ")", "end", "=", "time", ".", "time", "(", ")", "+", "atime", "while", "end", ">", "time", ".", "time", "(", ")", ":", "if", "conn", ".", "set", "(", "self", ".", "_lock_name", ",", "identifier", ",", "ex", "=", "ltime", ",", "nx", "=", "True", ")", ":", "# logger.debug(\"won lock %s\" % self._lock_name)", "return", "identifier", "sleep_time", "=", "random", ".", "uniform", "(", "0", ",", "3", ")", "time", ".", "sleep", "(", "sleep_time", ")", "logger", ".", "warn", "(", "'failed to acquire lock %s for %f seconds'", ",", "self", ".", "_lock_name", ",", "atime", ")", "return", "False" ]
Acquire a lock for a given identifier. If the lock cannot be obtained immediately, keep trying at random intervals, up to 3 seconds, until `atime` has passed. Once the lock has been obtained, continue to hold it for `ltime`. :param str identifier: lock token to write :param int atime: maximum time (in seconds) to acquire lock :param int ltime: maximum time (in seconds) to own lock :return: `identifier` if the lock was obtained, :const:`False` otherwise
[ "Acquire", "a", "lock", "for", "a", "given", "identifier", "." ]
python
train
41.653846
vtkiorg/vtki
vtki/common.py
https://github.com/vtkiorg/vtki/blob/5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1/vtki/common.py#L250-L259
def set_active_scalar(self, name, preference='cell'): """Finds the scalar by name and appropriately sets it as active""" _, field = get_scalar(self, name, preference=preference, info=True) if field == POINT_DATA_FIELD: self.GetPointData().SetActiveScalars(name) elif field == CELL_DATA_FIELD: self.GetCellData().SetActiveScalars(name) else: raise RuntimeError('Data field ({}) not useable'.format(field)) self._active_scalar_info = [field, name]
[ "def", "set_active_scalar", "(", "self", ",", "name", ",", "preference", "=", "'cell'", ")", ":", "_", ",", "field", "=", "get_scalar", "(", "self", ",", "name", ",", "preference", "=", "preference", ",", "info", "=", "True", ")", "if", "field", "==", "POINT_DATA_FIELD", ":", "self", ".", "GetPointData", "(", ")", ".", "SetActiveScalars", "(", "name", ")", "elif", "field", "==", "CELL_DATA_FIELD", ":", "self", ".", "GetCellData", "(", ")", ".", "SetActiveScalars", "(", "name", ")", "else", ":", "raise", "RuntimeError", "(", "'Data field ({}) not useable'", ".", "format", "(", "field", ")", ")", "self", ".", "_active_scalar_info", "=", "[", "field", ",", "name", "]" ]
Finds the scalar by name and appropriately sets it as active
[ "Finds", "the", "scalar", "by", "name", "and", "appropriately", "sets", "it", "as", "active" ]
python
train
52
quora/qcore
qcore/enum.py
https://github.com/quora/qcore/blob/fa5cd438eea554db35fd29cbc8dfbde69f09961c/qcore/enum.py#L175-L214
def create(cls, name, members): """Creates a new enum type based on this one (cls) and adds newly passed members to the newly created subclass of cls. This method helps to create enums having the same member values as values of other enum(s). :param name: name of the newly created type :param members: 1) a dict or 2) a list of (name, value) tuples and/or EnumBase instances describing new members :return: newly created enum type. """ NewEnum = type(name, (cls,), {}) if isinstance(members, dict): members = members.items() for member in members: if isinstance(member, tuple): name, value = member setattr(NewEnum, name, value) elif isinstance(member, EnumBase): setattr(NewEnum, member.short_name, member.value) else: assert False, ( "members must be either a dict, " + "a list of (name, value) tuples, " + "or a list of EnumBase instances." ) NewEnum.process() # needed for pickling to work (hopefully); taken from the namedtuple implementation in the # standard library try: NewEnum.__module__ = sys._getframe(1).f_globals.get("__name__", "__main__") except (AttributeError, ValueError): pass return NewEnum
[ "def", "create", "(", "cls", ",", "name", ",", "members", ")", ":", "NewEnum", "=", "type", "(", "name", ",", "(", "cls", ",", ")", ",", "{", "}", ")", "if", "isinstance", "(", "members", ",", "dict", ")", ":", "members", "=", "members", ".", "items", "(", ")", "for", "member", "in", "members", ":", "if", "isinstance", "(", "member", ",", "tuple", ")", ":", "name", ",", "value", "=", "member", "setattr", "(", "NewEnum", ",", "name", ",", "value", ")", "elif", "isinstance", "(", "member", ",", "EnumBase", ")", ":", "setattr", "(", "NewEnum", ",", "member", ".", "short_name", ",", "member", ".", "value", ")", "else", ":", "assert", "False", ",", "(", "\"members must be either a dict, \"", "+", "\"a list of (name, value) tuples, \"", "+", "\"or a list of EnumBase instances.\"", ")", "NewEnum", ".", "process", "(", ")", "# needed for pickling to work (hopefully); taken from the namedtuple implementation in the", "# standard library", "try", ":", "NewEnum", ".", "__module__", "=", "sys", ".", "_getframe", "(", "1", ")", ".", "f_globals", ".", "get", "(", "\"__name__\"", ",", "\"__main__\"", ")", "except", "(", "AttributeError", ",", "ValueError", ")", ":", "pass", "return", "NewEnum" ]
Creates a new enum type based on this one (cls) and adds newly passed members to the newly created subclass of cls. This method helps to create enums having the same member values as values of other enum(s). :param name: name of the newly created type :param members: 1) a dict or 2) a list of (name, value) tuples and/or EnumBase instances describing new members :return: newly created enum type.
[ "Creates", "a", "new", "enum", "type", "based", "on", "this", "one", "(", "cls", ")", "and", "adds", "newly", "passed", "members", "to", "the", "newly", "created", "subclass", "of", "cls", "." ]
python
train
36.3
kdeldycke/chessboard
setup.py
https://github.com/kdeldycke/chessboard/blob/ac7a14dc7b6905701e3f6d4e01e8fe1869241bed/setup.py#L117-L126
def long_description(): """ Collates project README and latest changes. """ changes = latest_changes() changes[0] = "`Changes for v{}".format(changes[0][1:]) changes[1] = '-' * len(changes[0]) return "\n\n\n".join([ read_file('README.rst'), '\n'.join(changes), "`Full changelog <{}/en/develop/changelog.html#changelog>`_.".format( DOCUMENTATION_URL)])
[ "def", "long_description", "(", ")", ":", "changes", "=", "latest_changes", "(", ")", "changes", "[", "0", "]", "=", "\"`Changes for v{}\"", ".", "format", "(", "changes", "[", "0", "]", "[", "1", ":", "]", ")", "changes", "[", "1", "]", "=", "'-'", "*", "len", "(", "changes", "[", "0", "]", ")", "return", "\"\\n\\n\\n\"", ".", "join", "(", "[", "read_file", "(", "'README.rst'", ")", ",", "'\\n'", ".", "join", "(", "changes", ")", ",", "\"`Full changelog <{}/en/develop/changelog.html#changelog>`_.\"", ".", "format", "(", "DOCUMENTATION_URL", ")", "]", ")" ]
Collates project README and latest changes.
[ "Collates", "project", "README", "and", "latest", "changes", "." ]
python
train
39.8
EUDAT-B2SAFE/B2HANDLE
b2handle/handlesystemconnector.py
https://github.com/EUDAT-B2SAFE/B2HANDLE/blob/a6d216d459644e01fbdfd5b318a535950bc5cdbb/b2handle/handlesystemconnector.py#L236-L247
def __set_basic_auth_string(self, username, password): ''' Creates and sets the authentication string for (write-)accessing the Handle Server. No return, the string is set as an attribute to the client instance. :param username: Username handle with index: index:prefix/suffix. :param password: The password contained in the index of the username handle. ''' auth = b2handle.utilhandle.create_authentication_string(username, password) self.__basic_authentication_string = auth
[ "def", "__set_basic_auth_string", "(", "self", ",", "username", ",", "password", ")", ":", "auth", "=", "b2handle", ".", "utilhandle", ".", "create_authentication_string", "(", "username", ",", "password", ")", "self", ".", "__basic_authentication_string", "=", "auth" ]
Creates and sets the authentication string for (write-)accessing the Handle Server. No return, the string is set as an attribute to the client instance. :param username: Username handle with index: index:prefix/suffix. :param password: The password contained in the index of the username handle.
[ "Creates", "and", "sets", "the", "authentication", "string", "for", "(", "write", "-", ")", "accessing", "the", "Handle", "Server", ".", "No", "return", "the", "string", "is", "set", "as", "an", "attribute", "to", "the", "client", "instance", "." ]
python
train
46.5
getpelican/pelican-plugins
feed_summary/magic_set.py
https://github.com/getpelican/pelican-plugins/blob/cfc7a3f224f1743063b034561f89a6a712d13587/feed_summary/magic_set.py#L9-L86
def magic_set(obj): """ Adds a function/method to an object. Uses the name of the first argument as a hint about whether it is a method (``self``), class method (``cls`` or ``klass``), or static method (anything else). Works on both instances and classes. >>> class color: ... def __init__(self, r, g, b): ... self.r, self.g, self.b = r, g, b >>> c = color(0, 1, 0) >>> c # doctest: +ELLIPSIS <__main__.color instance at ...> >>> @magic_set(color) ... def __repr__(self): ... return '<color %s %s %s>' % (self.r, self.g, self.b) >>> c <color 0 1 0> >>> @magic_set(color) ... def red(cls): ... return cls(1, 0, 0) >>> color.red() <color 1 0 0> >>> c.red() <color 1 0 0> >>> @magic_set(color) ... def name(): ... return 'color' >>> color.name() 'color' >>> @magic_set(c) ... def name(self): ... return 'red' >>> c.name() 'red' >>> @magic_set(c) ... def name(cls): ... return cls.__name__ >>> c.name() 'color' >>> @magic_set(c) ... def pr(obj): ... print obj >>> c.pr(1) 1 """ def decorator(func): is_class = isinstance(obj, six.class_types) args, varargs, varkw, defaults = inspect.getargspec(func) if not args or args[0] not in ('self', 'cls', 'klass'): # Static function/method if is_class: replacement = staticmethod(func) else: replacement = func elif args[0] == 'self': if is_class: replacement = func else: def replacement(*args, **kw): return func(obj, *args, **kw) try: replacement.__name__ = func.__name__ except: pass else: if is_class: replacement = classmethod(func) else: def replacement(*args, **kw): return func(obj.__class__, *args, **kw) try: replacement.__name__ = func.__name__ except: pass setattr(obj, func.__name__, replacement) return replacement return decorator
[ "def", "magic_set", "(", "obj", ")", ":", "def", "decorator", "(", "func", ")", ":", "is_class", "=", "isinstance", "(", "obj", ",", "six", ".", "class_types", ")", "args", ",", "varargs", ",", "varkw", ",", "defaults", "=", "inspect", ".", "getargspec", "(", "func", ")", "if", "not", "args", "or", "args", "[", "0", "]", "not", "in", "(", "'self'", ",", "'cls'", ",", "'klass'", ")", ":", "# Static function/method", "if", "is_class", ":", "replacement", "=", "staticmethod", "(", "func", ")", "else", ":", "replacement", "=", "func", "elif", "args", "[", "0", "]", "==", "'self'", ":", "if", "is_class", ":", "replacement", "=", "func", "else", ":", "def", "replacement", "(", "*", "args", ",", "*", "*", "kw", ")", ":", "return", "func", "(", "obj", ",", "*", "args", ",", "*", "*", "kw", ")", "try", ":", "replacement", ".", "__name__", "=", "func", ".", "__name__", "except", ":", "pass", "else", ":", "if", "is_class", ":", "replacement", "=", "classmethod", "(", "func", ")", "else", ":", "def", "replacement", "(", "*", "args", ",", "*", "*", "kw", ")", ":", "return", "func", "(", "obj", ".", "__class__", ",", "*", "args", ",", "*", "*", "kw", ")", "try", ":", "replacement", ".", "__name__", "=", "func", ".", "__name__", "except", ":", "pass", "setattr", "(", "obj", ",", "func", ".", "__name__", ",", "replacement", ")", "return", "replacement", "return", "decorator" ]
Adds a function/method to an object. Uses the name of the first argument as a hint about whether it is a method (``self``), class method (``cls`` or ``klass``), or static method (anything else). Works on both instances and classes. >>> class color: ... def __init__(self, r, g, b): ... self.r, self.g, self.b = r, g, b >>> c = color(0, 1, 0) >>> c # doctest: +ELLIPSIS <__main__.color instance at ...> >>> @magic_set(color) ... def __repr__(self): ... return '<color %s %s %s>' % (self.r, self.g, self.b) >>> c <color 0 1 0> >>> @magic_set(color) ... def red(cls): ... return cls(1, 0, 0) >>> color.red() <color 1 0 0> >>> c.red() <color 1 0 0> >>> @magic_set(color) ... def name(): ... return 'color' >>> color.name() 'color' >>> @magic_set(c) ... def name(self): ... return 'red' >>> c.name() 'red' >>> @magic_set(c) ... def name(cls): ... return cls.__name__ >>> c.name() 'color' >>> @magic_set(c) ... def pr(obj): ... print obj >>> c.pr(1) 1
[ "Adds", "a", "function", "/", "method", "to", "an", "object", ".", "Uses", "the", "name", "of", "the", "first", "argument", "as", "a", "hint", "about", "whether", "it", "is", "a", "method", "(", "self", ")", "class", "method", "(", "cls", "or", "klass", ")", "or", "static", "method", "(", "anything", "else", ")", ".", "Works", "on", "both", "instances", "and", "classes", "." ]
python
train
26.153846
napalm-automation/napalm
napalm/ios/ios.py
https://github.com/napalm-automation/napalm/blob/c11ae8bb5ce395698704a0051cdf8d144fbb150d/napalm/ios/ios.py#L2246-L2254
def get_ntp_peers(self): """Implementation of get_ntp_peers for IOS.""" ntp_stats = self.get_ntp_stats() return { ntp_peer.get("remote"): {} for ntp_peer in ntp_stats if ntp_peer.get("remote") }
[ "def", "get_ntp_peers", "(", "self", ")", ":", "ntp_stats", "=", "self", ".", "get_ntp_stats", "(", ")", "return", "{", "ntp_peer", ".", "get", "(", "\"remote\"", ")", ":", "{", "}", "for", "ntp_peer", "in", "ntp_stats", "if", "ntp_peer", ".", "get", "(", "\"remote\"", ")", "}" ]
Implementation of get_ntp_peers for IOS.
[ "Implementation", "of", "get_ntp_peers", "for", "IOS", "." ]
python
train
28.333333
chrisjrn/registrasion
registrasion/reporting/views.py
https://github.com/chrisjrn/registrasion/blob/461d5846c6f9f3b7099322a94f5d9911564448e4/registrasion/reporting/views.py#L457-L566
def attendee(request, form, user_id=None): ''' Returns a list of all manifested attendees if no attendee is specified, else displays the attendee manifest. ''' if user_id is None and form.cleaned_data["user"] is not None: user_id = form.cleaned_data["user"] if user_id is None: return attendee_list(request) attendee = people.Attendee.objects.get(user__id=user_id) name = attendee.attendeeprofilebase.attendee_name() reports = [] profile_data = [] try: profile = people.AttendeeProfileBase.objects.get_subclass( attendee=attendee ) fields = profile._meta.get_fields() except people.AttendeeProfileBase.DoesNotExist: fields = [] exclude = set(["attendeeprofilebase_ptr", "id"]) for field in fields: if field.name in exclude: # Not actually important continue if not hasattr(field, "verbose_name"): continue # Not a publicly visible field value = getattr(profile, field.name) if isinstance(field, models.ManyToManyField): value = ", ".join(str(i) for i in value.all()) profile_data.append((field.verbose_name, value)) cart = CartController.for_user(attendee.user) reservation = cart.cart.reservation_duration + cart.cart.time_last_updated profile_data.append(("Current cart reserved until", reservation)) reports.append(ListReport("Profile", ["", ""], profile_data)) links = [] links.append(( reverse(views.badge, args=[user_id]), "View badge", )) links.append(( reverse(views.amend_registration, args=[user_id]), "Amend current cart", )) links.append(( reverse(views.extend_reservation, args=[user_id]), "Extend reservation", )) reports.append(Links("Actions for " + name, links)) # Paid and pending products ic = ItemController(attendee.user) reports.append(ListReport( "Paid Products", ["Product", "Quantity"], [(pq.product, pq.quantity) for pq in ic.items_purchased()], )) reports.append(ListReport( "Unpaid Products", ["Product", "Quantity"], [(pq.product, pq.quantity) for pq in ic.items_pending()], )) # Invoices invoices = commerce.Invoice.objects.filter( user=attendee.user, ) reports.append(QuerysetReport( "Invoices", ["id", "get_status_display", "value"], invoices, headings=["Invoice ID", "Status", "Value"], link_view=views.invoice, )) # Credit Notes credit_notes = commerce.CreditNote.objects.filter( invoice__user=attendee.user, ).select_related("invoice", "creditnoteapplication", "creditnoterefund") reports.append(QuerysetReport( "Credit Notes", ["id", "status", "value"], credit_notes, link_view=views.credit_note, )) # All payments payments = commerce.PaymentBase.objects.filter( invoice__user=attendee.user, ).select_related("invoice") reports.append(QuerysetReport( "Payments", ["invoice__id", "id", "reference", "amount"], payments, link_view=views.invoice, )) return reports
[ "def", "attendee", "(", "request", ",", "form", ",", "user_id", "=", "None", ")", ":", "if", "user_id", "is", "None", "and", "form", ".", "cleaned_data", "[", "\"user\"", "]", "is", "not", "None", ":", "user_id", "=", "form", ".", "cleaned_data", "[", "\"user\"", "]", "if", "user_id", "is", "None", ":", "return", "attendee_list", "(", "request", ")", "attendee", "=", "people", ".", "Attendee", ".", "objects", ".", "get", "(", "user__id", "=", "user_id", ")", "name", "=", "attendee", ".", "attendeeprofilebase", ".", "attendee_name", "(", ")", "reports", "=", "[", "]", "profile_data", "=", "[", "]", "try", ":", "profile", "=", "people", ".", "AttendeeProfileBase", ".", "objects", ".", "get_subclass", "(", "attendee", "=", "attendee", ")", "fields", "=", "profile", ".", "_meta", ".", "get_fields", "(", ")", "except", "people", ".", "AttendeeProfileBase", ".", "DoesNotExist", ":", "fields", "=", "[", "]", "exclude", "=", "set", "(", "[", "\"attendeeprofilebase_ptr\"", ",", "\"id\"", "]", ")", "for", "field", "in", "fields", ":", "if", "field", ".", "name", "in", "exclude", ":", "# Not actually important", "continue", "if", "not", "hasattr", "(", "field", ",", "\"verbose_name\"", ")", ":", "continue", "# Not a publicly visible field", "value", "=", "getattr", "(", "profile", ",", "field", ".", "name", ")", "if", "isinstance", "(", "field", ",", "models", ".", "ManyToManyField", ")", ":", "value", "=", "\", \"", ".", "join", "(", "str", "(", "i", ")", "for", "i", "in", "value", ".", "all", "(", ")", ")", "profile_data", ".", "append", "(", "(", "field", ".", "verbose_name", ",", "value", ")", ")", "cart", "=", "CartController", ".", "for_user", "(", "attendee", ".", "user", ")", "reservation", "=", "cart", ".", "cart", ".", "reservation_duration", "+", "cart", ".", "cart", ".", "time_last_updated", "profile_data", ".", "append", "(", "(", "\"Current cart reserved until\"", ",", "reservation", ")", ")", "reports", ".", "append", "(", "ListReport", "(", "\"Profile\"", ",", "[", "\"\"", ",", "\"\"", "]", ",", "profile_data", ")", ")", "links", "=", "[", "]", "links", ".", "append", "(", "(", "reverse", "(", "views", ".", "badge", ",", "args", "=", "[", "user_id", "]", ")", ",", "\"View badge\"", ",", ")", ")", "links", ".", "append", "(", "(", "reverse", "(", "views", ".", "amend_registration", ",", "args", "=", "[", "user_id", "]", ")", ",", "\"Amend current cart\"", ",", ")", ")", "links", ".", "append", "(", "(", "reverse", "(", "views", ".", "extend_reservation", ",", "args", "=", "[", "user_id", "]", ")", ",", "\"Extend reservation\"", ",", ")", ")", "reports", ".", "append", "(", "Links", "(", "\"Actions for \"", "+", "name", ",", "links", ")", ")", "# Paid and pending products", "ic", "=", "ItemController", "(", "attendee", ".", "user", ")", "reports", ".", "append", "(", "ListReport", "(", "\"Paid Products\"", ",", "[", "\"Product\"", ",", "\"Quantity\"", "]", ",", "[", "(", "pq", ".", "product", ",", "pq", ".", "quantity", ")", "for", "pq", "in", "ic", ".", "items_purchased", "(", ")", "]", ",", ")", ")", "reports", ".", "append", "(", "ListReport", "(", "\"Unpaid Products\"", ",", "[", "\"Product\"", ",", "\"Quantity\"", "]", ",", "[", "(", "pq", ".", "product", ",", "pq", ".", "quantity", ")", "for", "pq", "in", "ic", ".", "items_pending", "(", ")", "]", ",", ")", ")", "# Invoices", "invoices", "=", "commerce", ".", "Invoice", ".", "objects", ".", "filter", "(", "user", "=", "attendee", ".", "user", ",", ")", "reports", ".", "append", "(", "QuerysetReport", "(", "\"Invoices\"", ",", "[", "\"id\"", ",", "\"get_status_display\"", ",", "\"value\"", "]", ",", "invoices", ",", "headings", "=", "[", "\"Invoice ID\"", ",", "\"Status\"", ",", "\"Value\"", "]", ",", "link_view", "=", "views", ".", "invoice", ",", ")", ")", "# Credit Notes", "credit_notes", "=", "commerce", ".", "CreditNote", ".", "objects", ".", "filter", "(", "invoice__user", "=", "attendee", ".", "user", ",", ")", ".", "select_related", "(", "\"invoice\"", ",", "\"creditnoteapplication\"", ",", "\"creditnoterefund\"", ")", "reports", ".", "append", "(", "QuerysetReport", "(", "\"Credit Notes\"", ",", "[", "\"id\"", ",", "\"status\"", ",", "\"value\"", "]", ",", "credit_notes", ",", "link_view", "=", "views", ".", "credit_note", ",", ")", ")", "# All payments", "payments", "=", "commerce", ".", "PaymentBase", ".", "objects", ".", "filter", "(", "invoice__user", "=", "attendee", ".", "user", ",", ")", ".", "select_related", "(", "\"invoice\"", ")", "reports", ".", "append", "(", "QuerysetReport", "(", "\"Payments\"", ",", "[", "\"invoice__id\"", ",", "\"id\"", ",", "\"reference\"", ",", "\"amount\"", "]", ",", "payments", ",", "link_view", "=", "views", ".", "invoice", ",", ")", ")", "return", "reports" ]
Returns a list of all manifested attendees if no attendee is specified, else displays the attendee manifest.
[ "Returns", "a", "list", "of", "all", "manifested", "attendees", "if", "no", "attendee", "is", "specified", "else", "displays", "the", "attendee", "manifest", "." ]
python
test
28.772727
log2timeline/plaso
plaso/analysis/viper.py
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/analysis/viper.py#L39-L63
def _QueryHash(self, digest): """Queries the Viper Server for a specfic hash. Args: digest (str): hash to look up. Returns: dict[str, object]: JSON response or None on error. """ if not self._url: self._url = '{0:s}://{1:s}:{2:d}/file/find'.format( self._protocol, self._host, self._port) request_data = {self.lookup_hash: digest} try: json_response = self.MakeRequestAndDecodeJSON( self._url, 'POST', data=request_data) except errors.ConnectionError as exception: json_response = None logger.error('Unable to query Viper with error: {0!s}.'.format( exception)) return json_response
[ "def", "_QueryHash", "(", "self", ",", "digest", ")", ":", "if", "not", "self", ".", "_url", ":", "self", ".", "_url", "=", "'{0:s}://{1:s}:{2:d}/file/find'", ".", "format", "(", "self", ".", "_protocol", ",", "self", ".", "_host", ",", "self", ".", "_port", ")", "request_data", "=", "{", "self", ".", "lookup_hash", ":", "digest", "}", "try", ":", "json_response", "=", "self", ".", "MakeRequestAndDecodeJSON", "(", "self", ".", "_url", ",", "'POST'", ",", "data", "=", "request_data", ")", "except", "errors", ".", "ConnectionError", "as", "exception", ":", "json_response", "=", "None", "logger", ".", "error", "(", "'Unable to query Viper with error: {0!s}.'", ".", "format", "(", "exception", ")", ")", "return", "json_response" ]
Queries the Viper Server for a specfic hash. Args: digest (str): hash to look up. Returns: dict[str, object]: JSON response or None on error.
[ "Queries", "the", "Viper", "Server", "for", "a", "specfic", "hash", "." ]
python
train
26.64
JnyJny/Geometry
Geometry/ellipse.py
https://github.com/JnyJny/Geometry/blob/3500f815fa56c535b36d1b6fd0afe69ce5d055be/Geometry/ellipse.py#L111-L115
def xAxisIsMajor(self): ''' Returns True if the major axis is parallel to the X axis, boolean. ''' return max(self.radius.x, self.radius.y) == self.radius.x
[ "def", "xAxisIsMajor", "(", "self", ")", ":", "return", "max", "(", "self", ".", "radius", ".", "x", ",", "self", ".", "radius", ".", "y", ")", "==", "self", ".", "radius", ".", "x" ]
Returns True if the major axis is parallel to the X axis, boolean.
[ "Returns", "True", "if", "the", "major", "axis", "is", "parallel", "to", "the", "X", "axis", "boolean", "." ]
python
train
36.8
helgi/python-command
command/core.py
https://github.com/helgi/python-command/blob/c41fb8cdd9074b847c7bc5b5ee7f027508f52d7f/command/core.py#L212-L248
def which(program, environ=None): """ Find out if an executable exists in the supplied PATH. If so, the absolute path to the executable is returned. If not, an exception is raised. :type string :param program: Executable to be checked for :param dict :param environ: Any additional ENV variables required, specifically PATH :return string|:class:`command.CommandException` Returns the location if found, otherwise raises exception """ def is_exe(path): """ Helper method to check if a file exists and is executable """ return isfile(path) and os.access(path, os.X_OK) if program is None: raise CommandException("Invalid program name passed") fpath, fname = split(program) if fpath: if is_exe(program): return program else: if environ is None: environ = os.environ for path in environ['PATH'].split(os.pathsep): exe_file = join(path, program) if is_exe(exe_file): return exe_file raise CommandException("Could not find %s" % program)
[ "def", "which", "(", "program", ",", "environ", "=", "None", ")", ":", "def", "is_exe", "(", "path", ")", ":", "\"\"\"\n Helper method to check if a file exists and is executable\n \"\"\"", "return", "isfile", "(", "path", ")", "and", "os", ".", "access", "(", "path", ",", "os", ".", "X_OK", ")", "if", "program", "is", "None", ":", "raise", "CommandException", "(", "\"Invalid program name passed\"", ")", "fpath", ",", "fname", "=", "split", "(", "program", ")", "if", "fpath", ":", "if", "is_exe", "(", "program", ")", ":", "return", "program", "else", ":", "if", "environ", "is", "None", ":", "environ", "=", "os", ".", "environ", "for", "path", "in", "environ", "[", "'PATH'", "]", ".", "split", "(", "os", ".", "pathsep", ")", ":", "exe_file", "=", "join", "(", "path", ",", "program", ")", "if", "is_exe", "(", "exe_file", ")", ":", "return", "exe_file", "raise", "CommandException", "(", "\"Could not find %s\"", "%", "program", ")" ]
Find out if an executable exists in the supplied PATH. If so, the absolute path to the executable is returned. If not, an exception is raised. :type string :param program: Executable to be checked for :param dict :param environ: Any additional ENV variables required, specifically PATH :return string|:class:`command.CommandException` Returns the location if found, otherwise raises exception
[ "Find", "out", "if", "an", "executable", "exists", "in", "the", "supplied", "PATH", ".", "If", "so", "the", "absolute", "path", "to", "the", "executable", "is", "returned", ".", "If", "not", "an", "exception", "is", "raised", "." ]
python
train
29.594595
IceflowRE/unidown
unidown/core/updater.py
https://github.com/IceflowRE/unidown/blob/2a6f82ab780bb825668bfc55b67c11c4f72ec05c/unidown/core/updater.py#L13-L28
def get_newest_app_version() -> Version: """ Download the version tag from remote. :return: version from remote :rtype: ~packaging.version.Version """ with urllib3.PoolManager(cert_reqs='CERT_REQUIRED', ca_certs=certifi.where()) as p_man: pypi_json = p_man.urlopen('GET', static_data.PYPI_JSON_URL).data.decode('utf-8') releases = json.loads(pypi_json).get('releases', []) online_version = Version('0.0.0') for release in releases: cur_version = Version(release) if not cur_version.is_prerelease: online_version = max(online_version, cur_version) return online_version
[ "def", "get_newest_app_version", "(", ")", "->", "Version", ":", "with", "urllib3", ".", "PoolManager", "(", "cert_reqs", "=", "'CERT_REQUIRED'", ",", "ca_certs", "=", "certifi", ".", "where", "(", ")", ")", "as", "p_man", ":", "pypi_json", "=", "p_man", ".", "urlopen", "(", "'GET'", ",", "static_data", ".", "PYPI_JSON_URL", ")", ".", "data", ".", "decode", "(", "'utf-8'", ")", "releases", "=", "json", ".", "loads", "(", "pypi_json", ")", ".", "get", "(", "'releases'", ",", "[", "]", ")", "online_version", "=", "Version", "(", "'0.0.0'", ")", "for", "release", "in", "releases", ":", "cur_version", "=", "Version", "(", "release", ")", "if", "not", "cur_version", ".", "is_prerelease", ":", "online_version", "=", "max", "(", "online_version", ",", "cur_version", ")", "return", "online_version" ]
Download the version tag from remote. :return: version from remote :rtype: ~packaging.version.Version
[ "Download", "the", "version", "tag", "from", "remote", "." ]
python
train
39.375