repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
ns1/ns1-python
ns1/records.py
https://github.com/ns1/ns1-python/blob/f3e1d90a3b76a1bd18f855f2c622a8a49d4b585e/ns1/records.py#L55-L70
def load(self, callback=None, errback=None, reload=False): """ Load record data from the API. """ if not reload and self.data: raise RecordException('record already loaded') def success(result, *args): self._parseModel(result) if callback: return callback(self) else: return self return self._rest.retrieve(self.parentZone.zone, self.domain, self.type, callback=success, errback=errback)
[ "def", "load", "(", "self", ",", "callback", "=", "None", ",", "errback", "=", "None", ",", "reload", "=", "False", ")", ":", "if", "not", "reload", "and", "self", ".", "data", ":", "raise", "RecordException", "(", "'record already loaded'", ")", "def", "success", "(", "result", ",", "*", "args", ")", ":", "self", ".", "_parseModel", "(", "result", ")", "if", "callback", ":", "return", "callback", "(", "self", ")", "else", ":", "return", "self", "return", "self", ".", "_rest", ".", "retrieve", "(", "self", ".", "parentZone", ".", "zone", ",", "self", ".", "domain", ",", "self", ".", "type", ",", "callback", "=", "success", ",", "errback", "=", "errback", ")" ]
Load record data from the API.
[ "Load", "record", "data", "from", "the", "API", "." ]
python
train
35.6875
lemieuxl/pyGenClean
pyGenClean/Ethnicity/plot_eigenvalues.py
https://github.com/lemieuxl/pyGenClean/blob/6173a48ccc0cf3a3bd711b1f2a1fa16248b8bf55/pyGenClean/Ethnicity/plot_eigenvalues.py#L178-L205
def parse_args(argString=None): """Parses the command line options and arguments. :returns: A :py:class:`argparse.Namespace` object created by the :py:mod:`argparse` module. It contains the values of the different options. ====================== ====== ================================ Options Type Description ====================== ====== ================================ ``--evec`` string The EVEC file from EIGENSOFT ``--scree-plot-title`` string The main title of the scree plot ``--out`` string The name of the output file ====================== ====== ================================ .. note:: No option check is done here (except for the one automatically done by :py:mod:`argparse`). Those need to be done elsewhere (see :py:func:`checkArgs`). """ args = None if argString is None: args = parser.parse_args() else: args = parser.parse_args(argString) return args
[ "def", "parse_args", "(", "argString", "=", "None", ")", ":", "args", "=", "None", "if", "argString", "is", "None", ":", "args", "=", "parser", ".", "parse_args", "(", ")", "else", ":", "args", "=", "parser", ".", "parse_args", "(", "argString", ")", "return", "args" ]
Parses the command line options and arguments. :returns: A :py:class:`argparse.Namespace` object created by the :py:mod:`argparse` module. It contains the values of the different options. ====================== ====== ================================ Options Type Description ====================== ====== ================================ ``--evec`` string The EVEC file from EIGENSOFT ``--scree-plot-title`` string The main title of the scree plot ``--out`` string The name of the output file ====================== ====== ================================ .. note:: No option check is done here (except for the one automatically done by :py:mod:`argparse`). Those need to be done elsewhere (see :py:func:`checkArgs`).
[ "Parses", "the", "command", "line", "options", "and", "arguments", "." ]
python
train
36.785714
tradenity/python-sdk
tradenity/resources/product.py
https://github.com/tradenity/python-sdk/blob/d13fbe23f4d6ff22554c6d8d2deaf209371adaf1/tradenity/resources/product.py#L1496-L1517
def replace_product_by_id(cls, product_id, product, **kwargs): """Replace Product Replace all attributes of Product This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.replace_product_by_id(product_id, product, async=True) >>> result = thread.get() :param async bool :param str product_id: ID of product to replace (required) :param Product product: Attributes of product to replace (required) :return: Product If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._replace_product_by_id_with_http_info(product_id, product, **kwargs) else: (data) = cls._replace_product_by_id_with_http_info(product_id, product, **kwargs) return data
[ "def", "replace_product_by_id", "(", "cls", ",", "product_id", ",", "product", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async'", ")", ":", "return", "cls", ".", "_replace_product_by_id_with_http_info", "(", "product_id", ",", "product", ",", "*", "*", "kwargs", ")", "else", ":", "(", "data", ")", "=", "cls", ".", "_replace_product_by_id_with_http_info", "(", "product_id", ",", "product", ",", "*", "*", "kwargs", ")", "return", "data" ]
Replace Product Replace all attributes of Product This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.replace_product_by_id(product_id, product, async=True) >>> result = thread.get() :param async bool :param str product_id: ID of product to replace (required) :param Product product: Attributes of product to replace (required) :return: Product If the method is called asynchronously, returns the request thread.
[ "Replace", "Product" ]
python
train
44.181818
MolSSI-BSE/basis_set_exchange
basis_set_exchange/misc.py
https://github.com/MolSSI-BSE/basis_set_exchange/blob/e79110aaeb65f392ed5032420322dee3336948f7/basis_set_exchange/misc.py#L58-L107
def compact_elements(elements): """ Create a string (with ranges) given a list of element numbers For example, [1, 2, 3, 6, 7, 8, 10] will return "H-Li,C-O,Ne" """ if len(elements) == 0: return # We have to convert to integers for this function elements = [int(el) for el in elements] # Just to be safe, sort the list el = sorted(set(elements)) ranges = [] i = 0 while i < len(el): start_el = el[i] end_el = start_el i += 1 while i < len(el): if el[i] != end_el + 1: break end_el += 1 i += 1 if start_el == end_el: ranges.append([start_el]) else: ranges.append([start_el, end_el]) # Convert to elemental symbols range_strs = [] for r in ranges: sym = lut.element_sym_from_Z(r[0], True) if len(r) == 1: range_strs.append(sym) elif len(r) == 2 and r[1] == r[0] + 1: sym2 = lut.element_sym_from_Z(r[1], True) range_strs.append(sym + "," + sym2) else: sym2 = lut.element_sym_from_Z(r[1], True) range_strs.append(sym + "-" + sym2) return ",".join(range_strs)
[ "def", "compact_elements", "(", "elements", ")", ":", "if", "len", "(", "elements", ")", "==", "0", ":", "return", "# We have to convert to integers for this function", "elements", "=", "[", "int", "(", "el", ")", "for", "el", "in", "elements", "]", "# Just to be safe, sort the list", "el", "=", "sorted", "(", "set", "(", "elements", ")", ")", "ranges", "=", "[", "]", "i", "=", "0", "while", "i", "<", "len", "(", "el", ")", ":", "start_el", "=", "el", "[", "i", "]", "end_el", "=", "start_el", "i", "+=", "1", "while", "i", "<", "len", "(", "el", ")", ":", "if", "el", "[", "i", "]", "!=", "end_el", "+", "1", ":", "break", "end_el", "+=", "1", "i", "+=", "1", "if", "start_el", "==", "end_el", ":", "ranges", ".", "append", "(", "[", "start_el", "]", ")", "else", ":", "ranges", ".", "append", "(", "[", "start_el", ",", "end_el", "]", ")", "# Convert to elemental symbols", "range_strs", "=", "[", "]", "for", "r", "in", "ranges", ":", "sym", "=", "lut", ".", "element_sym_from_Z", "(", "r", "[", "0", "]", ",", "True", ")", "if", "len", "(", "r", ")", "==", "1", ":", "range_strs", ".", "append", "(", "sym", ")", "elif", "len", "(", "r", ")", "==", "2", "and", "r", "[", "1", "]", "==", "r", "[", "0", "]", "+", "1", ":", "sym2", "=", "lut", ".", "element_sym_from_Z", "(", "r", "[", "1", "]", ",", "True", ")", "range_strs", ".", "append", "(", "sym", "+", "\",\"", "+", "sym2", ")", "else", ":", "sym2", "=", "lut", ".", "element_sym_from_Z", "(", "r", "[", "1", "]", ",", "True", ")", "range_strs", ".", "append", "(", "sym", "+", "\"-\"", "+", "sym2", ")", "return", "\",\"", ".", "join", "(", "range_strs", ")" ]
Create a string (with ranges) given a list of element numbers For example, [1, 2, 3, 6, 7, 8, 10] will return "H-Li,C-O,Ne"
[ "Create", "a", "string", "(", "with", "ranges", ")", "given", "a", "list", "of", "element", "numbers" ]
python
train
24.04
palantir/python-language-server
pyls/plugins/pylint_lint.py
https://github.com/palantir/python-language-server/blob/96e08d85635382d17024c352306c4759f124195d/pyls/plugins/pylint_lint.py#L15-L131
def lint(cls, document, is_saved, flags=''): """Plugin interface to pyls linter. Args: document: The document to be linted. is_saved: Whether or not the file has been saved to disk. flags: Additional flags to pass to pylint. Not exposed to pyls_lint, but used for testing. Returns: A list of dicts with the following format: { 'source': 'pylint', 'range': { 'start': { 'line': start_line, 'character': start_column, }, 'end': { 'line': end_line, 'character': end_column, }, } 'message': msg, 'severity': lsp.DiagnosticSeverity.*, } """ if not is_saved: # Pylint can only be run on files that have been saved to disk. # Rather than return nothing, return the previous list of # diagnostics. If we return an empty list, any diagnostics we'd # previously shown will be cleared until the next save. Instead, # continue showing (possibly stale) diagnostics until the next # save. return cls.last_diags[document.path] # py_run will call shlex.split on its arguments, and shlex.split does # not handle Windows paths (it will try to perform escaping). Turn # backslashes into forward slashes first to avoid this issue. path = document.path if sys.platform.startswith('win'): path = path.replace('\\', '/') out, _err = py_run( '{} -f json {}'.format(path, flags), return_std=True ) # pylint prints nothing rather than [] when there are no diagnostics. # json.loads will not parse an empty string, so just return. json_str = out.getvalue() if not json_str.strip(): cls.last_diags[document.path] = [] return [] # Pylint's JSON output is a list of objects with the following format. # # { # "obj": "main", # "path": "foo.py", # "message": "Missing function docstring", # "message-id": "C0111", # "symbol": "missing-docstring", # "column": 0, # "type": "convention", # "line": 5, # "module": "foo" # } # # The type can be any of: # # * convention # * error # * fatal # * refactor # * warning diagnostics = [] for diag in json.loads(json_str): # pylint lines index from 1, pyls lines index from 0 line = diag['line'] - 1 # But both index columns from 0 col = diag['column'] # It's possible that we're linting an empty file. Even an empty # file might fail linting if it isn't named properly. end_col = len(document.lines[line]) if document.lines else 0 err_range = { 'start': { 'line': line, 'character': col, }, 'end': { 'line': line, 'character': end_col, }, } if diag['type'] == 'convention': severity = lsp.DiagnosticSeverity.Information elif diag['type'] == 'error': severity = lsp.DiagnosticSeverity.Error elif diag['type'] == 'fatal': severity = lsp.DiagnosticSeverity.Error elif diag['type'] == 'refactor': severity = lsp.DiagnosticSeverity.Hint elif diag['type'] == 'warning': severity = lsp.DiagnosticSeverity.Warning diagnostics.append({ 'source': 'pylint', 'range': err_range, 'message': '[{}] {}'.format(diag['symbol'], diag['message']), 'severity': severity, 'code': diag['message-id'] }) cls.last_diags[document.path] = diagnostics return diagnostics
[ "def", "lint", "(", "cls", ",", "document", ",", "is_saved", ",", "flags", "=", "''", ")", ":", "if", "not", "is_saved", ":", "# Pylint can only be run on files that have been saved to disk.", "# Rather than return nothing, return the previous list of", "# diagnostics. If we return an empty list, any diagnostics we'd", "# previously shown will be cleared until the next save. Instead,", "# continue showing (possibly stale) diagnostics until the next", "# save.", "return", "cls", ".", "last_diags", "[", "document", ".", "path", "]", "# py_run will call shlex.split on its arguments, and shlex.split does", "# not handle Windows paths (it will try to perform escaping). Turn", "# backslashes into forward slashes first to avoid this issue.", "path", "=", "document", ".", "path", "if", "sys", ".", "platform", ".", "startswith", "(", "'win'", ")", ":", "path", "=", "path", ".", "replace", "(", "'\\\\'", ",", "'/'", ")", "out", ",", "_err", "=", "py_run", "(", "'{} -f json {}'", ".", "format", "(", "path", ",", "flags", ")", ",", "return_std", "=", "True", ")", "# pylint prints nothing rather than [] when there are no diagnostics.", "# json.loads will not parse an empty string, so just return.", "json_str", "=", "out", ".", "getvalue", "(", ")", "if", "not", "json_str", ".", "strip", "(", ")", ":", "cls", ".", "last_diags", "[", "document", ".", "path", "]", "=", "[", "]", "return", "[", "]", "# Pylint's JSON output is a list of objects with the following format.", "#", "# {", "# \"obj\": \"main\",", "# \"path\": \"foo.py\",", "# \"message\": \"Missing function docstring\",", "# \"message-id\": \"C0111\",", "# \"symbol\": \"missing-docstring\",", "# \"column\": 0,", "# \"type\": \"convention\",", "# \"line\": 5,", "# \"module\": \"foo\"", "# }", "#", "# The type can be any of:", "#", "# * convention", "# * error", "# * fatal", "# * refactor", "# * warning", "diagnostics", "=", "[", "]", "for", "diag", "in", "json", ".", "loads", "(", "json_str", ")", ":", "# pylint lines index from 1, pyls lines index from 0", "line", "=", "diag", "[", "'line'", "]", "-", "1", "# But both index columns from 0", "col", "=", "diag", "[", "'column'", "]", "# It's possible that we're linting an empty file. Even an empty", "# file might fail linting if it isn't named properly.", "end_col", "=", "len", "(", "document", ".", "lines", "[", "line", "]", ")", "if", "document", ".", "lines", "else", "0", "err_range", "=", "{", "'start'", ":", "{", "'line'", ":", "line", ",", "'character'", ":", "col", ",", "}", ",", "'end'", ":", "{", "'line'", ":", "line", ",", "'character'", ":", "end_col", ",", "}", ",", "}", "if", "diag", "[", "'type'", "]", "==", "'convention'", ":", "severity", "=", "lsp", ".", "DiagnosticSeverity", ".", "Information", "elif", "diag", "[", "'type'", "]", "==", "'error'", ":", "severity", "=", "lsp", ".", "DiagnosticSeverity", ".", "Error", "elif", "diag", "[", "'type'", "]", "==", "'fatal'", ":", "severity", "=", "lsp", ".", "DiagnosticSeverity", ".", "Error", "elif", "diag", "[", "'type'", "]", "==", "'refactor'", ":", "severity", "=", "lsp", ".", "DiagnosticSeverity", ".", "Hint", "elif", "diag", "[", "'type'", "]", "==", "'warning'", ":", "severity", "=", "lsp", ".", "DiagnosticSeverity", ".", "Warning", "diagnostics", ".", "append", "(", "{", "'source'", ":", "'pylint'", ",", "'range'", ":", "err_range", ",", "'message'", ":", "'[{}] {}'", ".", "format", "(", "diag", "[", "'symbol'", "]", ",", "diag", "[", "'message'", "]", ")", ",", "'severity'", ":", "severity", ",", "'code'", ":", "diag", "[", "'message-id'", "]", "}", ")", "cls", ".", "last_diags", "[", "document", ".", "path", "]", "=", "diagnostics", "return", "diagnostics" ]
Plugin interface to pyls linter. Args: document: The document to be linted. is_saved: Whether or not the file has been saved to disk. flags: Additional flags to pass to pylint. Not exposed to pyls_lint, but used for testing. Returns: A list of dicts with the following format: { 'source': 'pylint', 'range': { 'start': { 'line': start_line, 'character': start_column, }, 'end': { 'line': end_line, 'character': end_column, }, } 'message': msg, 'severity': lsp.DiagnosticSeverity.*, }
[ "Plugin", "interface", "to", "pyls", "linter", "." ]
python
train
36.666667
midasplatform/pydas
pydas/api.py
https://github.com/midasplatform/pydas/blob/e5f9e96e754fb2dc5da187b05e4abc77a9b2affd/pydas/api.py#L551-L592
def _find_resource_id_from_path(path): """ Get a folder id from a path on the server. Warning: This is NOT efficient at all. The schema for this path is: path := "/users/<name>/" | "/communities/<name>" , {<subfolder>/} name := <firstname> , "_" , <lastname> :param path: The virtual path on the server. :type path: string :returns: a tuple indicating True or False about whether the resource is an item and id of the resource i.e. (True, item_id) or (False, folder_id) :rtype: (bool, int | long) """ session.token = verify_credentials() parsed_path = path.split('/') if parsed_path[-1] == '': parsed_path.pop() if path.startswith('/users/'): parsed_path.pop(0) # remove '' before / parsed_path.pop(0) # remove 'users' name = parsed_path.pop(0) # remove '<firstname>_<lastname>' firstname, lastname = name.split('_') end = parsed_path.pop() user = session.communicator.get_user_by_name(firstname, lastname) leaf_folder_id = _descend_folder_for_id(parsed_path, user['folder_id']) return _search_folder_for_item_or_folder(end, leaf_folder_id) elif path.startswith('/communities/'): print(parsed_path) parsed_path.pop(0) # remove '' before / parsed_path.pop(0) # remove 'communities' community_name = parsed_path.pop(0) # remove '<community>' end = parsed_path.pop() community = session.communicator.get_community_by_name(community_name) leaf_folder_id = _descend_folder_for_id(parsed_path, community['folder_id']) return _search_folder_for_item_or_folder(end, leaf_folder_id) else: return False, -1
[ "def", "_find_resource_id_from_path", "(", "path", ")", ":", "session", ".", "token", "=", "verify_credentials", "(", ")", "parsed_path", "=", "path", ".", "split", "(", "'/'", ")", "if", "parsed_path", "[", "-", "1", "]", "==", "''", ":", "parsed_path", ".", "pop", "(", ")", "if", "path", ".", "startswith", "(", "'/users/'", ")", ":", "parsed_path", ".", "pop", "(", "0", ")", "# remove '' before /", "parsed_path", ".", "pop", "(", "0", ")", "# remove 'users'", "name", "=", "parsed_path", ".", "pop", "(", "0", ")", "# remove '<firstname>_<lastname>'", "firstname", ",", "lastname", "=", "name", ".", "split", "(", "'_'", ")", "end", "=", "parsed_path", ".", "pop", "(", ")", "user", "=", "session", ".", "communicator", ".", "get_user_by_name", "(", "firstname", ",", "lastname", ")", "leaf_folder_id", "=", "_descend_folder_for_id", "(", "parsed_path", ",", "user", "[", "'folder_id'", "]", ")", "return", "_search_folder_for_item_or_folder", "(", "end", ",", "leaf_folder_id", ")", "elif", "path", ".", "startswith", "(", "'/communities/'", ")", ":", "print", "(", "parsed_path", ")", "parsed_path", ".", "pop", "(", "0", ")", "# remove '' before /", "parsed_path", ".", "pop", "(", "0", ")", "# remove 'communities'", "community_name", "=", "parsed_path", ".", "pop", "(", "0", ")", "# remove '<community>'", "end", "=", "parsed_path", ".", "pop", "(", ")", "community", "=", "session", ".", "communicator", ".", "get_community_by_name", "(", "community_name", ")", "leaf_folder_id", "=", "_descend_folder_for_id", "(", "parsed_path", ",", "community", "[", "'folder_id'", "]", ")", "return", "_search_folder_for_item_or_folder", "(", "end", ",", "leaf_folder_id", ")", "else", ":", "return", "False", ",", "-", "1" ]
Get a folder id from a path on the server. Warning: This is NOT efficient at all. The schema for this path is: path := "/users/<name>/" | "/communities/<name>" , {<subfolder>/} name := <firstname> , "_" , <lastname> :param path: The virtual path on the server. :type path: string :returns: a tuple indicating True or False about whether the resource is an item and id of the resource i.e. (True, item_id) or (False, folder_id) :rtype: (bool, int | long)
[ "Get", "a", "folder", "id", "from", "a", "path", "on", "the", "server", "." ]
python
valid
41.261905
postmanlabs/httpbin
httpbin/core.py
https://github.com/postmanlabs/httpbin/blob/f8ec666b4d1b654e4ff6aedd356f510dcac09f83/httpbin/core.py#L1498-L1584
def range_request(numbytes): """Streams n random bytes generated with given seed, at given chunk size per packet. --- tags: - Dynamic data parameters: - in: path name: numbytes type: int produces: - application/octet-stream responses: 200: description: Bytes. """ if numbytes <= 0 or numbytes > (100 * 1024): response = Response( headers={"ETag": "range%d" % numbytes, "Accept-Ranges": "bytes"} ) response.status_code = 404 response.data = "number of bytes must be in the range (0, 102400]" return response params = CaseInsensitiveDict(request.args.items()) if "chunk_size" in params: chunk_size = max(1, int(params["chunk_size"])) else: chunk_size = 10 * 1024 duration = float(params.get("duration", 0)) pause_per_byte = duration / numbytes request_headers = get_headers() first_byte_pos, last_byte_pos = get_request_range(request_headers, numbytes) range_length = (last_byte_pos + 1) - first_byte_pos if ( first_byte_pos > last_byte_pos or first_byte_pos not in xrange(0, numbytes) or last_byte_pos not in xrange(0, numbytes) ): response = Response( headers={ "ETag": "range%d" % numbytes, "Accept-Ranges": "bytes", "Content-Range": "bytes */%d" % numbytes, "Content-Length": "0", } ) response.status_code = 416 return response def generate_bytes(): chunks = bytearray() for i in xrange(first_byte_pos, last_byte_pos + 1): # We don't want the resource to change across requests, so we need # to use a predictable data generation function chunks.append(ord("a") + (i % 26)) if len(chunks) == chunk_size: yield (bytes(chunks)) time.sleep(pause_per_byte * chunk_size) chunks = bytearray() if chunks: time.sleep(pause_per_byte * len(chunks)) yield (bytes(chunks)) content_range = "bytes %d-%d/%d" % (first_byte_pos, last_byte_pos, numbytes) response_headers = { "Content-Type": "application/octet-stream", "ETag": "range%d" % numbytes, "Accept-Ranges": "bytes", "Content-Length": str(range_length), "Content-Range": content_range, } response = Response(generate_bytes(), headers=response_headers) if (first_byte_pos == 0) and (last_byte_pos == (numbytes - 1)): response.status_code = 200 else: response.status_code = 206 return response
[ "def", "range_request", "(", "numbytes", ")", ":", "if", "numbytes", "<=", "0", "or", "numbytes", ">", "(", "100", "*", "1024", ")", ":", "response", "=", "Response", "(", "headers", "=", "{", "\"ETag\"", ":", "\"range%d\"", "%", "numbytes", ",", "\"Accept-Ranges\"", ":", "\"bytes\"", "}", ")", "response", ".", "status_code", "=", "404", "response", ".", "data", "=", "\"number of bytes must be in the range (0, 102400]\"", "return", "response", "params", "=", "CaseInsensitiveDict", "(", "request", ".", "args", ".", "items", "(", ")", ")", "if", "\"chunk_size\"", "in", "params", ":", "chunk_size", "=", "max", "(", "1", ",", "int", "(", "params", "[", "\"chunk_size\"", "]", ")", ")", "else", ":", "chunk_size", "=", "10", "*", "1024", "duration", "=", "float", "(", "params", ".", "get", "(", "\"duration\"", ",", "0", ")", ")", "pause_per_byte", "=", "duration", "/", "numbytes", "request_headers", "=", "get_headers", "(", ")", "first_byte_pos", ",", "last_byte_pos", "=", "get_request_range", "(", "request_headers", ",", "numbytes", ")", "range_length", "=", "(", "last_byte_pos", "+", "1", ")", "-", "first_byte_pos", "if", "(", "first_byte_pos", ">", "last_byte_pos", "or", "first_byte_pos", "not", "in", "xrange", "(", "0", ",", "numbytes", ")", "or", "last_byte_pos", "not", "in", "xrange", "(", "0", ",", "numbytes", ")", ")", ":", "response", "=", "Response", "(", "headers", "=", "{", "\"ETag\"", ":", "\"range%d\"", "%", "numbytes", ",", "\"Accept-Ranges\"", ":", "\"bytes\"", ",", "\"Content-Range\"", ":", "\"bytes */%d\"", "%", "numbytes", ",", "\"Content-Length\"", ":", "\"0\"", ",", "}", ")", "response", ".", "status_code", "=", "416", "return", "response", "def", "generate_bytes", "(", ")", ":", "chunks", "=", "bytearray", "(", ")", "for", "i", "in", "xrange", "(", "first_byte_pos", ",", "last_byte_pos", "+", "1", ")", ":", "# We don't want the resource to change across requests, so we need", "# to use a predictable data generation function", "chunks", ".", "append", "(", "ord", "(", "\"a\"", ")", "+", "(", "i", "%", "26", ")", ")", "if", "len", "(", "chunks", ")", "==", "chunk_size", ":", "yield", "(", "bytes", "(", "chunks", ")", ")", "time", ".", "sleep", "(", "pause_per_byte", "*", "chunk_size", ")", "chunks", "=", "bytearray", "(", ")", "if", "chunks", ":", "time", ".", "sleep", "(", "pause_per_byte", "*", "len", "(", "chunks", ")", ")", "yield", "(", "bytes", "(", "chunks", ")", ")", "content_range", "=", "\"bytes %d-%d/%d\"", "%", "(", "first_byte_pos", ",", "last_byte_pos", ",", "numbytes", ")", "response_headers", "=", "{", "\"Content-Type\"", ":", "\"application/octet-stream\"", ",", "\"ETag\"", ":", "\"range%d\"", "%", "numbytes", ",", "\"Accept-Ranges\"", ":", "\"bytes\"", ",", "\"Content-Length\"", ":", "str", "(", "range_length", ")", ",", "\"Content-Range\"", ":", "content_range", ",", "}", "response", "=", "Response", "(", "generate_bytes", "(", ")", ",", "headers", "=", "response_headers", ")", "if", "(", "first_byte_pos", "==", "0", ")", "and", "(", "last_byte_pos", "==", "(", "numbytes", "-", "1", ")", ")", ":", "response", ".", "status_code", "=", "200", "else", ":", "response", ".", "status_code", "=", "206", "return", "response" ]
Streams n random bytes generated with given seed, at given chunk size per packet. --- tags: - Dynamic data parameters: - in: path name: numbytes type: int produces: - application/octet-stream responses: 200: description: Bytes.
[ "Streams", "n", "random", "bytes", "generated", "with", "given", "seed", "at", "given", "chunk", "size", "per", "packet", ".", "---", "tags", ":", "-", "Dynamic", "data", "parameters", ":", "-", "in", ":", "path", "name", ":", "numbytes", "type", ":", "int", "produces", ":", "-", "application", "/", "octet", "-", "stream", "responses", ":", "200", ":", "description", ":", "Bytes", "." ]
python
train
30.172414
uw-it-aca/uw-restclients-canvas
uw_canvas/reports.py
https://github.com/uw-it-aca/uw-restclients-canvas/blob/9845faf33d49a8f06908efc22640c001116d6ea2/uw_canvas/reports.py#L53-L68
def create_report(self, report_type, account_id, term_id=None, params={}): """ Generates a report instance for the canvas account id. https://canvas.instructure.com/doc/api/account_reports.html#method.account_reports.create """ if term_id is not None: params["enrollment_term_id"] = term_id url = ACCOUNTS_API.format(account_id) + "/reports/{}".format( report_type) body = {"parameters": params} data = self._post_resource(url, body) data["account_id"] = account_id return Report(data=data)
[ "def", "create_report", "(", "self", ",", "report_type", ",", "account_id", ",", "term_id", "=", "None", ",", "params", "=", "{", "}", ")", ":", "if", "term_id", "is", "not", "None", ":", "params", "[", "\"enrollment_term_id\"", "]", "=", "term_id", "url", "=", "ACCOUNTS_API", ".", "format", "(", "account_id", ")", "+", "\"/reports/{}\"", ".", "format", "(", "report_type", ")", "body", "=", "{", "\"parameters\"", ":", "params", "}", "data", "=", "self", ".", "_post_resource", "(", "url", ",", "body", ")", "data", "[", "\"account_id\"", "]", "=", "account_id", "return", "Report", "(", "data", "=", "data", ")" ]
Generates a report instance for the canvas account id. https://canvas.instructure.com/doc/api/account_reports.html#method.account_reports.create
[ "Generates", "a", "report", "instance", "for", "the", "canvas", "account", "id", "." ]
python
test
36.375
peterldowns/djoauth2
djoauth2/access_token.py
https://github.com/peterldowns/djoauth2/blob/151c7619d1d7a91d720397cfecf3a29fcc9747a9/djoauth2/access_token.py#L126-L174
def make_error_response(self, validation_error, expose_errors): """ Return an appropriate ``HttpResponse`` on authentication failure. In case of an error, the specification only details the inclusion of the ``WWW-Authenticate`` header. Additionally, when allowed by the specification, we respond with error details formatted in JSON in the body of the response. For more information, read the specification: http://tools.ietf.org/html/rfc6750#section-3.1 . :param validation_error: A :py:class:`djoauth2.access_token.AuthenticationError` raised by the :py:meth:`validate` method. :param expose_errors: A boolean describing whether or not to expose error information in the error response, as described by the section of the specification linked to above. :rtype: a Django ``HttpResponse``. """ authenticate_header = ['Bearer realm="{}"'.format(settings.DJOAUTH2_REALM)] if not expose_errors: response = HttpResponse(status=400) response['WWW-Authenticate'] = ', '.join(authenticate_header) return response status_code = 401 error_details = get_error_details(validation_error) if isinstance(validation_error, InvalidRequest): status_code = 400 elif isinstance(validation_error, InvalidToken): status_code = 401 elif isinstance(validation_error, InsufficientScope): error_details['scope'] = ' '.join(self.required_scope_names) status_code = 403 # TODO(peter): should we return response details as JSON? This is not # touched upon by the spec and may limit use of this library. Many # programmers use other transport languaes such as YAML or XML. All of the # error information is already included in the headers. response = HttpResponse(content=json.dumps(error_details), content_type='application/json', status=status_code) for key, value in error_details.iteritems(): authenticate_header.append('{}="{}"'.format(key, value)) response['WWW-Authenticate'] = ', '.join(authenticate_header) return response
[ "def", "make_error_response", "(", "self", ",", "validation_error", ",", "expose_errors", ")", ":", "authenticate_header", "=", "[", "'Bearer realm=\"{}\"'", ".", "format", "(", "settings", ".", "DJOAUTH2_REALM", ")", "]", "if", "not", "expose_errors", ":", "response", "=", "HttpResponse", "(", "status", "=", "400", ")", "response", "[", "'WWW-Authenticate'", "]", "=", "', '", ".", "join", "(", "authenticate_header", ")", "return", "response", "status_code", "=", "401", "error_details", "=", "get_error_details", "(", "validation_error", ")", "if", "isinstance", "(", "validation_error", ",", "InvalidRequest", ")", ":", "status_code", "=", "400", "elif", "isinstance", "(", "validation_error", ",", "InvalidToken", ")", ":", "status_code", "=", "401", "elif", "isinstance", "(", "validation_error", ",", "InsufficientScope", ")", ":", "error_details", "[", "'scope'", "]", "=", "' '", ".", "join", "(", "self", ".", "required_scope_names", ")", "status_code", "=", "403", "# TODO(peter): should we return response details as JSON? This is not", "# touched upon by the spec and may limit use of this library. Many", "# programmers use other transport languaes such as YAML or XML. All of the", "# error information is already included in the headers.", "response", "=", "HttpResponse", "(", "content", "=", "json", ".", "dumps", "(", "error_details", ")", ",", "content_type", "=", "'application/json'", ",", "status", "=", "status_code", ")", "for", "key", ",", "value", "in", "error_details", ".", "iteritems", "(", ")", ":", "authenticate_header", ".", "append", "(", "'{}=\"{}\"'", ".", "format", "(", "key", ",", "value", ")", ")", "response", "[", "'WWW-Authenticate'", "]", "=", "', '", ".", "join", "(", "authenticate_header", ")", "return", "response" ]
Return an appropriate ``HttpResponse`` on authentication failure. In case of an error, the specification only details the inclusion of the ``WWW-Authenticate`` header. Additionally, when allowed by the specification, we respond with error details formatted in JSON in the body of the response. For more information, read the specification: http://tools.ietf.org/html/rfc6750#section-3.1 . :param validation_error: A :py:class:`djoauth2.access_token.AuthenticationError` raised by the :py:meth:`validate` method. :param expose_errors: A boolean describing whether or not to expose error information in the error response, as described by the section of the specification linked to above. :rtype: a Django ``HttpResponse``.
[ "Return", "an", "appropriate", "HttpResponse", "on", "authentication", "failure", "." ]
python
train
42.693878
InQuest/python-sandboxapi
sandboxapi/cuckoo.py
https://github.com/InQuest/python-sandboxapi/blob/9bad73f453e25d7d23e7b4b1ae927f44a35a5bc3/sandboxapi/cuckoo.py#L75-L99
def check(self, item_id): """Check if an analysis is complete :type item_id: int :param item_id: task_id to check. :rtype: bool :return: Boolean indicating if a report is done or not. """ response = self._request("tasks/view/{id}".format(id=item_id)) if response.status_code == 404: # probably an unknown task id return False try: content = json.loads(response.content.decode('utf-8')) status = content['task']["status"] if status == 'completed' or status == "reported": return True except ValueError as e: raise sandboxapi.SandboxError(e) return False
[ "def", "check", "(", "self", ",", "item_id", ")", ":", "response", "=", "self", ".", "_request", "(", "\"tasks/view/{id}\"", ".", "format", "(", "id", "=", "item_id", ")", ")", "if", "response", ".", "status_code", "==", "404", ":", "# probably an unknown task id", "return", "False", "try", ":", "content", "=", "json", ".", "loads", "(", "response", ".", "content", ".", "decode", "(", "'utf-8'", ")", ")", "status", "=", "content", "[", "'task'", "]", "[", "\"status\"", "]", "if", "status", "==", "'completed'", "or", "status", "==", "\"reported\"", ":", "return", "True", "except", "ValueError", "as", "e", ":", "raise", "sandboxapi", ".", "SandboxError", "(", "e", ")", "return", "False" ]
Check if an analysis is complete :type item_id: int :param item_id: task_id to check. :rtype: bool :return: Boolean indicating if a report is done or not.
[ "Check", "if", "an", "analysis", "is", "complete" ]
python
train
28.48
fracpete/python-weka-wrapper
python/weka/core/serialization.py
https://github.com/fracpete/python-weka-wrapper/blob/e865915146faf40d3bbfedb440328d1360541633/python/weka/core/serialization.py#L104-L122
def write_all(filename, jobjects): """ Serializes the list of objects to disk. JavaObject instances get automatically unwrapped. :param filename: the file to serialize the object to :type filename: str :param jobjects: the list of objects to serialize :type jobjects: list """ array = javabridge.get_env().make_object_array(len(jobjects), javabridge.get_env().find_class("java/lang/Object")) for i in xrange(len(jobjects)): obj = jobjects[i] if isinstance(obj, JavaObject): obj = obj.jobject javabridge.get_env().set_object_array_element(array, i, obj) javabridge.static_call( "Lweka/core/SerializationHelper;", "writeAll", "(Ljava/lang/String;[Ljava/lang/Object;)V", filename, array)
[ "def", "write_all", "(", "filename", ",", "jobjects", ")", ":", "array", "=", "javabridge", ".", "get_env", "(", ")", ".", "make_object_array", "(", "len", "(", "jobjects", ")", ",", "javabridge", ".", "get_env", "(", ")", ".", "find_class", "(", "\"java/lang/Object\"", ")", ")", "for", "i", "in", "xrange", "(", "len", "(", "jobjects", ")", ")", ":", "obj", "=", "jobjects", "[", "i", "]", "if", "isinstance", "(", "obj", ",", "JavaObject", ")", ":", "obj", "=", "obj", ".", "jobject", "javabridge", ".", "get_env", "(", ")", ".", "set_object_array_element", "(", "array", ",", "i", ",", "obj", ")", "javabridge", ".", "static_call", "(", "\"Lweka/core/SerializationHelper;\"", ",", "\"writeAll\"", ",", "\"(Ljava/lang/String;[Ljava/lang/Object;)V\"", ",", "filename", ",", "array", ")" ]
Serializes the list of objects to disk. JavaObject instances get automatically unwrapped. :param filename: the file to serialize the object to :type filename: str :param jobjects: the list of objects to serialize :type jobjects: list
[ "Serializes", "the", "list", "of", "objects", "to", "disk", ".", "JavaObject", "instances", "get", "automatically", "unwrapped", "." ]
python
train
40.368421
shreyaspotnis/rampage
rampage/server.py
https://github.com/shreyaspotnis/rampage/blob/e2565aef7ee16ee06523de975e8aa41aca14e3b2/rampage/server.py#L616-L626
def get_digital_channels(channel_list): """Goes through channel list and returns digital channels with ids Dev1/port0/line08, Dev1/port0/line09... Dev1/port0/line30.""" dig_ids = digital_channel_ids() dig_channels = [] for ln in dig_ids: for ch in channel_list: if ch.dct['id'] == ln: dig_channels.append(ch) break return dig_channels
[ "def", "get_digital_channels", "(", "channel_list", ")", ":", "dig_ids", "=", "digital_channel_ids", "(", ")", "dig_channels", "=", "[", "]", "for", "ln", "in", "dig_ids", ":", "for", "ch", "in", "channel_list", ":", "if", "ch", ".", "dct", "[", "'id'", "]", "==", "ln", ":", "dig_channels", ".", "append", "(", "ch", ")", "break", "return", "dig_channels" ]
Goes through channel list and returns digital channels with ids Dev1/port0/line08, Dev1/port0/line09... Dev1/port0/line30.
[ "Goes", "through", "channel", "list", "and", "returns", "digital", "channels", "with", "ids", "Dev1", "/", "port0", "/", "line08", "Dev1", "/", "port0", "/", "line09", "...", "Dev1", "/", "port0", "/", "line30", "." ]
python
train
36.363636
kstaniek/condoor
condoor/patterns.py
https://github.com/kstaniek/condoor/blob/77c054b29d4e286c1d7aca2c74dff86b805e1fae/condoor/patterns.py#L98-L108
def platform(self, with_prompt, platforms=None): """Return the platform name based on the prompt matching.""" if platforms is None: platforms = self._dict['generic']['prompt_detection'] for platform in platforms: pattern = self.pattern(platform, 'prompt') result = re.search(pattern, with_prompt) if result: return platform return None
[ "def", "platform", "(", "self", ",", "with_prompt", ",", "platforms", "=", "None", ")", ":", "if", "platforms", "is", "None", ":", "platforms", "=", "self", ".", "_dict", "[", "'generic'", "]", "[", "'prompt_detection'", "]", "for", "platform", "in", "platforms", ":", "pattern", "=", "self", ".", "pattern", "(", "platform", ",", "'prompt'", ")", "result", "=", "re", ".", "search", "(", "pattern", ",", "with_prompt", ")", "if", "result", ":", "return", "platform", "return", "None" ]
Return the platform name based on the prompt matching.
[ "Return", "the", "platform", "name", "based", "on", "the", "prompt", "matching", "." ]
python
train
38.363636
log2timeline/plaso
plaso/parsers/plist_plugins/spotlight_volume.py
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/parsers/plist_plugins/spotlight_volume.py#L23-L47
def GetEntries(self, parser_mediator, match=None, **unused_kwargs): """Extracts relevant Volume Configuration Spotlight entries. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. match (Optional[dict[str: object]]): keys extracted from PLIST_KEYS. """ stores = match.get('Stores', {}) for volume_name, volume in iter(stores.items()): datetime_value = volume.get('CreationDate', None) if not datetime_value: continue partial_path = volume['PartialPath'] event_data = plist_event.PlistTimeEventData() event_data.desc = 'Spotlight Volume {0:s} ({1:s}) activated.'.format( volume_name, partial_path) event_data.key = '' event_data.root = '/Stores' event = time_events.PythonDatetimeEvent( datetime_value, definitions.TIME_DESCRIPTION_WRITTEN) parser_mediator.ProduceEventWithEventData(event, event_data)
[ "def", "GetEntries", "(", "self", ",", "parser_mediator", ",", "match", "=", "None", ",", "*", "*", "unused_kwargs", ")", ":", "stores", "=", "match", ".", "get", "(", "'Stores'", ",", "{", "}", ")", "for", "volume_name", ",", "volume", "in", "iter", "(", "stores", ".", "items", "(", ")", ")", ":", "datetime_value", "=", "volume", ".", "get", "(", "'CreationDate'", ",", "None", ")", "if", "not", "datetime_value", ":", "continue", "partial_path", "=", "volume", "[", "'PartialPath'", "]", "event_data", "=", "plist_event", ".", "PlistTimeEventData", "(", ")", "event_data", ".", "desc", "=", "'Spotlight Volume {0:s} ({1:s}) activated.'", ".", "format", "(", "volume_name", ",", "partial_path", ")", "event_data", ".", "key", "=", "''", "event_data", ".", "root", "=", "'/Stores'", "event", "=", "time_events", ".", "PythonDatetimeEvent", "(", "datetime_value", ",", "definitions", ".", "TIME_DESCRIPTION_WRITTEN", ")", "parser_mediator", ".", "ProduceEventWithEventData", "(", "event", ",", "event_data", ")" ]
Extracts relevant Volume Configuration Spotlight entries. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. match (Optional[dict[str: object]]): keys extracted from PLIST_KEYS.
[ "Extracts", "relevant", "Volume", "Configuration", "Spotlight", "entries", "." ]
python
train
39.2
sphinx-gallery/sphinx-gallery
sphinx_gallery/backreferences.py
https://github.com/sphinx-gallery/sphinx-gallery/blob/b0c1f6701bf3f4cef238757e1105cf3686b5e674/sphinx_gallery/backreferences.py#L111-L122
def extract_object_names_from_docs(filename): """Add matches from the text blocks (must be full names!)""" text = split_code_and_text_blocks(filename)[1] text = '\n'.join(t[1] for t in text if t[0] == 'text') regex = re.compile(r':(?:' r'func(?:tion)?|' r'meth(?:od)?|' r'attr(?:ibute)?|' r'obj(?:ect)?|' r'class):`(\S*)`' ) return [(x, x) for x in re.findall(regex, text)]
[ "def", "extract_object_names_from_docs", "(", "filename", ")", ":", "text", "=", "split_code_and_text_blocks", "(", "filename", ")", "[", "1", "]", "text", "=", "'\\n'", ".", "join", "(", "t", "[", "1", "]", "for", "t", "in", "text", "if", "t", "[", "0", "]", "==", "'text'", ")", "regex", "=", "re", ".", "compile", "(", "r':(?:'", "r'func(?:tion)?|'", "r'meth(?:od)?|'", "r'attr(?:ibute)?|'", "r'obj(?:ect)?|'", "r'class):`(\\S*)`'", ")", "return", "[", "(", "x", ",", "x", ")", "for", "x", "in", "re", ".", "findall", "(", "regex", ",", "text", ")", "]" ]
Add matches from the text blocks (must be full names!)
[ "Add", "matches", "from", "the", "text", "blocks", "(", "must", "be", "full", "names!", ")" ]
python
train
43.333333
iotile/coretools
iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Action.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Action.py#L1266-L1271
def get_presig(self, target, source, env): """Return the signature contents of this action list. Simple concatenation of the signatures of the elements. """ return b"".join([bytes(x.get_contents(target, source, env)) for x in self.list])
[ "def", "get_presig", "(", "self", ",", "target", ",", "source", ",", "env", ")", ":", "return", "b\"\"", ".", "join", "(", "[", "bytes", "(", "x", ".", "get_contents", "(", "target", ",", "source", ",", "env", ")", ")", "for", "x", "in", "self", ".", "list", "]", ")" ]
Return the signature contents of this action list. Simple concatenation of the signatures of the elements.
[ "Return", "the", "signature", "contents", "of", "this", "action", "list", "." ]
python
train
44.166667
gem/oq-engine
openquake/hmtk/seismicity/selector.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hmtk/seismicity/selector.py#L133-L161
def select_catalogue(self, valid_id): ''' Method to post-process the catalogue based on the selection options :param numpy.ndarray valid_id: Boolean vector indicating whether each event is selected (True) or not (False) :returns: Catalogue of selected events as instance of openquake.hmtk.seismicity.catalogue.Catalogue class ''' if not np.any(valid_id): # No events selected - create clean instance of class output = Catalogue() output.processes = self.catalogue.processes elif np.all(valid_id): if self.copycat: output = deepcopy(self.catalogue) else: output = self.catalogue else: if self.copycat: output = deepcopy(self.catalogue) else: output = self.catalogue output.purge_catalogue(valid_id) return output
[ "def", "select_catalogue", "(", "self", ",", "valid_id", ")", ":", "if", "not", "np", ".", "any", "(", "valid_id", ")", ":", "# No events selected - create clean instance of class", "output", "=", "Catalogue", "(", ")", "output", ".", "processes", "=", "self", ".", "catalogue", ".", "processes", "elif", "np", ".", "all", "(", "valid_id", ")", ":", "if", "self", ".", "copycat", ":", "output", "=", "deepcopy", "(", "self", ".", "catalogue", ")", "else", ":", "output", "=", "self", ".", "catalogue", "else", ":", "if", "self", ".", "copycat", ":", "output", "=", "deepcopy", "(", "self", ".", "catalogue", ")", "else", ":", "output", "=", "self", ".", "catalogue", "output", ".", "purge_catalogue", "(", "valid_id", ")", "return", "output" ]
Method to post-process the catalogue based on the selection options :param numpy.ndarray valid_id: Boolean vector indicating whether each event is selected (True) or not (False) :returns: Catalogue of selected events as instance of openquake.hmtk.seismicity.catalogue.Catalogue class
[ "Method", "to", "post", "-", "process", "the", "catalogue", "based", "on", "the", "selection", "options" ]
python
train
33.310345
ga4gh/ga4gh-server
ga4gh/server/datarepo.py
https://github.com/ga4gh/ga4gh-server/blob/1aa18922ef136db8604f6f098cb1732cba6f2a76/ga4gh/server/datarepo.py#L599-L618
def open(self, mode=MODE_READ): """ Opens this repo in the specified mode. TODO: figure out the correct semantics of this and document the intended future behaviour as well as the current transitional behaviour. """ if mode not in [MODE_READ, MODE_WRITE]: error = "Open mode must be '{}' or '{}'".format( MODE_READ, MODE_WRITE) raise ValueError(error) self._openMode = mode if mode == MODE_READ: self.assertExists() if mode == MODE_READ: # This is part of the transitional behaviour where # we load the whole DB into memory to get access to # the data model. self.load()
[ "def", "open", "(", "self", ",", "mode", "=", "MODE_READ", ")", ":", "if", "mode", "not", "in", "[", "MODE_READ", ",", "MODE_WRITE", "]", ":", "error", "=", "\"Open mode must be '{}' or '{}'\"", ".", "format", "(", "MODE_READ", ",", "MODE_WRITE", ")", "raise", "ValueError", "(", "error", ")", "self", ".", "_openMode", "=", "mode", "if", "mode", "==", "MODE_READ", ":", "self", ".", "assertExists", "(", ")", "if", "mode", "==", "MODE_READ", ":", "# This is part of the transitional behaviour where", "# we load the whole DB into memory to get access to", "# the data model.", "self", ".", "load", "(", ")" ]
Opens this repo in the specified mode. TODO: figure out the correct semantics of this and document the intended future behaviour as well as the current transitional behaviour.
[ "Opens", "this", "repo", "in", "the", "specified", "mode", "." ]
python
train
36.6
sorgerlab/indra
indra/databases/hgnc_client.py
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/databases/hgnc_client.py#L126-L147
def get_current_hgnc_id(hgnc_name): """Return the HGNC ID(s) corresponding to a current or outdate HGNC symbol. Parameters ---------- hgnc_name : str The HGNC symbol to be converted, possibly an outdated symbol. Returns ------- str or list of str or None If there is a single HGNC ID corresponding to the given current or outdated HGNC symbol, that ID is returned as a string. If the symbol is outdated and maps to multiple current IDs, a list of these IDs is returned. If the given name doesn't correspond to either a current or an outdated HGNC symbol, None is returned. """ hgnc_id = get_hgnc_id(hgnc_name) if hgnc_id: return hgnc_id hgnc_id = prev_sym_map.get(hgnc_name) return hgnc_id
[ "def", "get_current_hgnc_id", "(", "hgnc_name", ")", ":", "hgnc_id", "=", "get_hgnc_id", "(", "hgnc_name", ")", "if", "hgnc_id", ":", "return", "hgnc_id", "hgnc_id", "=", "prev_sym_map", ".", "get", "(", "hgnc_name", ")", "return", "hgnc_id" ]
Return the HGNC ID(s) corresponding to a current or outdate HGNC symbol. Parameters ---------- hgnc_name : str The HGNC symbol to be converted, possibly an outdated symbol. Returns ------- str or list of str or None If there is a single HGNC ID corresponding to the given current or outdated HGNC symbol, that ID is returned as a string. If the symbol is outdated and maps to multiple current IDs, a list of these IDs is returned. If the given name doesn't correspond to either a current or an outdated HGNC symbol, None is returned.
[ "Return", "the", "HGNC", "ID", "(", "s", ")", "corresponding", "to", "a", "current", "or", "outdate", "HGNC", "symbol", "." ]
python
train
35.181818
spyder-ide/spyder
spyder/preferences/appearance.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/preferences/appearance.py#L488-L509
def get_edited_color_scheme(self): """ Get the values of the last edited color scheme to be used in an instant preview in the preview editor, without using `apply`. """ color_scheme = {} scheme_name = self.last_used_scheme for key in self.widgets[scheme_name]: items = self.widgets[scheme_name][key] if len(items) == 1: # ColorLayout value = items[0].text() else: # ColorLayout + checkboxes value = (items[0].text(), items[1].isChecked(), items[2].isChecked()) color_scheme[key] = value return color_scheme
[ "def", "get_edited_color_scheme", "(", "self", ")", ":", "color_scheme", "=", "{", "}", "scheme_name", "=", "self", ".", "last_used_scheme", "for", "key", "in", "self", ".", "widgets", "[", "scheme_name", "]", ":", "items", "=", "self", ".", "widgets", "[", "scheme_name", "]", "[", "key", "]", "if", "len", "(", "items", ")", "==", "1", ":", "# ColorLayout", "value", "=", "items", "[", "0", "]", ".", "text", "(", ")", "else", ":", "# ColorLayout + checkboxes", "value", "=", "(", "items", "[", "0", "]", ".", "text", "(", ")", ",", "items", "[", "1", "]", ".", "isChecked", "(", ")", ",", "items", "[", "2", "]", ".", "isChecked", "(", ")", ")", "color_scheme", "[", "key", "]", "=", "value", "return", "color_scheme" ]
Get the values of the last edited color scheme to be used in an instant preview in the preview editor, without using `apply`.
[ "Get", "the", "values", "of", "the", "last", "edited", "color", "scheme", "to", "be", "used", "in", "an", "instant", "preview", "in", "the", "preview", "editor", "without", "using", "apply", "." ]
python
train
31.363636
StackStorm/pybind
pybind/nos/v6_0_2f/overlay_gateway/site/bfd/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/nos/v6_0_2f/overlay_gateway/site/bfd/__init__.py#L94-L117
def _set_params(self, v, load=False): """ Setter method for params, mapped from YANG variable /overlay_gateway/site/bfd/params (container) If this variable is read-only (config: false) in the source YANG file, then _set_params is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_params() directly. YANG Description: Configure BFD parameters for the tunnels to the remote site. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=params.params, is_container='container', presence=False, yang_name="params", rest_name="bfd", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Create BFD session for the tunnels to the remote site.', u'alt-name': u'bfd', u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-tunnels', defining_module='brocade-tunnels', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """params must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=params.params, is_container='container', presence=False, yang_name="params", rest_name="bfd", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Create BFD session for the tunnels to the remote site.', u'alt-name': u'bfd', u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-tunnels', defining_module='brocade-tunnels', yang_type='container', is_config=True)""", }) self.__params = t if hasattr(self, '_set'): self._set()
[ "def", "_set_params", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base", "=", "params", ".", "params", ",", "is_container", "=", "'container'", ",", "presence", "=", "False", ",", "yang_name", "=", "\"params\"", ",", "rest_name", "=", "\"bfd\"", ",", "parent", "=", "self", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "extmethods", "=", "self", ".", "_extmethods", ",", "register_paths", "=", "True", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'info'", ":", "u'Create BFD session for the tunnels to the remote site.'", ",", "u'alt-name'", ":", "u'bfd'", ",", "u'cli-incomplete-no'", ":", "None", ",", "u'cli-incomplete-command'", ":", "None", "}", "}", ",", "namespace", "=", "'urn:brocade.com:mgmt:brocade-tunnels'", ",", "defining_module", "=", "'brocade-tunnels'", ",", "yang_type", "=", "'container'", ",", "is_config", "=", "True", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "ValueError", "(", "{", "'error-string'", ":", "\"\"\"params must be of a type compatible with container\"\"\"", ",", "'defined-type'", ":", "\"container\"", ",", "'generated-type'", ":", "\"\"\"YANGDynClass(base=params.params, is_container='container', presence=False, yang_name=\"params\", rest_name=\"bfd\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Create BFD session for the tunnels to the remote site.', u'alt-name': u'bfd', u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-tunnels', defining_module='brocade-tunnels', yang_type='container', is_config=True)\"\"\"", ",", "}", ")", "self", ".", "__params", "=", "t", "if", "hasattr", "(", "self", ",", "'_set'", ")", ":", "self", ".", "_set", "(", ")" ]
Setter method for params, mapped from YANG variable /overlay_gateway/site/bfd/params (container) If this variable is read-only (config: false) in the source YANG file, then _set_params is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_params() directly. YANG Description: Configure BFD parameters for the tunnels to the remote site.
[ "Setter", "method", "for", "params", "mapped", "from", "YANG", "variable", "/", "overlay_gateway", "/", "site", "/", "bfd", "/", "params", "(", "container", ")", "If", "this", "variable", "is", "read", "-", "only", "(", "config", ":", "false", ")", "in", "the", "source", "YANG", "file", "then", "_set_params", "is", "considered", "as", "a", "private", "method", ".", "Backends", "looking", "to", "populate", "this", "variable", "should", "do", "so", "via", "calling", "thisObj", ".", "_set_params", "()", "directly", "." ]
python
train
77.125
kevinconway/confpy
confpy/loaders/pyfile.py
https://github.com/kevinconway/confpy/blob/1ee8afcab46ac6915a5ff4184180434ac7b84a60/confpy/loaders/pyfile.py#L26-L35
def parsed(self): """Get the code object which represents the compiled Python file. This property is cached and only parses the content once. """ if not self._parsed: self._parsed = compile(self.content, self.path, 'exec') return self._parsed
[ "def", "parsed", "(", "self", ")", ":", "if", "not", "self", ".", "_parsed", ":", "self", ".", "_parsed", "=", "compile", "(", "self", ".", "content", ",", "self", ".", "path", ",", "'exec'", ")", "return", "self", ".", "_parsed" ]
Get the code object which represents the compiled Python file. This property is cached and only parses the content once.
[ "Get", "the", "code", "object", "which", "represents", "the", "compiled", "Python", "file", "." ]
python
train
28.8
celiao/tmdbsimple
tmdbsimple/movies.py
https://github.com/celiao/tmdbsimple/blob/ff17893110c99771d6398a62c35d36dd9735f4b9/tmdbsimple/movies.py#L152-L167
def recommendations(self, **kwargs): """ Get a list of recommended movies for a movie. Args: language: (optional) ISO 639-1 code. page: (optional) Minimum value of 1. Expected value is an integer. Returns: A dict representation of the JSON returned from the API. """ path = self._get_id_path('recommendations') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
[ "def", "recommendations", "(", "self", ",", "*", "*", "kwargs", ")", ":", "path", "=", "self", ".", "_get_id_path", "(", "'recommendations'", ")", "response", "=", "self", ".", "_GET", "(", "path", ",", "kwargs", ")", "self", ".", "_set_attrs_to_values", "(", "response", ")", "return", "response" ]
Get a list of recommended movies for a movie. Args: language: (optional) ISO 639-1 code. page: (optional) Minimum value of 1. Expected value is an integer. Returns: A dict representation of the JSON returned from the API.
[ "Get", "a", "list", "of", "recommended", "movies", "for", "a", "movie", "." ]
python
test
30.875
weluse/django-nose-selenium
noseselenium/plugins.py
https://github.com/weluse/django-nose-selenium/blob/19a09b9455545f70271f884649323a38812793e6/noseselenium/plugins.py#L81-L89
def _patch_static_handler(handler): """Patch in support for static files serving if supported and enabled. """ if django.VERSION[:2] < (1, 3): return from django.contrib.staticfiles.handlers import StaticFilesHandler return StaticFilesHandler(handler)
[ "def", "_patch_static_handler", "(", "handler", ")", ":", "if", "django", ".", "VERSION", "[", ":", "2", "]", "<", "(", "1", ",", "3", ")", ":", "return", "from", "django", ".", "contrib", ".", "staticfiles", ".", "handlers", "import", "StaticFilesHandler", "return", "StaticFilesHandler", "(", "handler", ")" ]
Patch in support for static files serving if supported and enabled.
[ "Patch", "in", "support", "for", "static", "files", "serving", "if", "supported", "and", "enabled", "." ]
python
train
30.333333
soasme/rio
rio/models/utils.py
https://github.com/soasme/rio/blob/f722eb0ff4b0382bceaff77737f0b87cb78429e7/rio/models/utils.py#L153-L165
def get_instance_by_bin_uuid(model, bin_uuid): """Get an instance by binary uuid. :param model: a string, model name in rio.models. :param bin_uuid: a 16-bytes binary string. :return: None or a SQLAlchemy instance. """ try: model = get_model(model) except ImportError: return None return model.query.filter_by(**{'bin_uuid': bin_uuid}).first()
[ "def", "get_instance_by_bin_uuid", "(", "model", ",", "bin_uuid", ")", ":", "try", ":", "model", "=", "get_model", "(", "model", ")", "except", "ImportError", ":", "return", "None", "return", "model", ".", "query", ".", "filter_by", "(", "*", "*", "{", "'bin_uuid'", ":", "bin_uuid", "}", ")", ".", "first", "(", ")" ]
Get an instance by binary uuid. :param model: a string, model name in rio.models. :param bin_uuid: a 16-bytes binary string. :return: None or a SQLAlchemy instance.
[ "Get", "an", "instance", "by", "binary", "uuid", "." ]
python
train
29.307692
numenta/nupic
src/nupic/frameworks/opf/metrics.py
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/frameworks/opf/metrics.py#L180-L195
def getInferenceTypeFromLabel(cls, label): """ Extracts the PredictionKind (temporal vs. nontemporal) from the given metric label. :param label: (string) for a metric spec generated by :meth:`getMetricLabel` :returns: (:class:`~nupic.frameworks.opf.opf_utils.InferenceType`) """ infType, _, _= label.partition(cls._LABEL_SEPARATOR) if not InferenceType.validate(infType): return None return infType
[ "def", "getInferenceTypeFromLabel", "(", "cls", ",", "label", ")", ":", "infType", ",", "_", ",", "_", "=", "label", ".", "partition", "(", "cls", ".", "_LABEL_SEPARATOR", ")", "if", "not", "InferenceType", ".", "validate", "(", "infType", ")", ":", "return", "None", "return", "infType" ]
Extracts the PredictionKind (temporal vs. nontemporal) from the given metric label. :param label: (string) for a metric spec generated by :meth:`getMetricLabel` :returns: (:class:`~nupic.frameworks.opf.opf_utils.InferenceType`)
[ "Extracts", "the", "PredictionKind", "(", "temporal", "vs", ".", "nontemporal", ")", "from", "the", "given", "metric", "label", "." ]
python
valid
27.625
FutunnOpen/futuquant
futuquant/quote/open_quote_context.py
https://github.com/FutunnOpen/futuquant/blob/1512b321845f92ec9c578ce2689aa4e8482669e4/futuquant/quote/open_quote_context.py#L1095-L1142
def unsubscribe(self, code_list, subtype_list): """ 取消订阅 :param code_list: 取消订阅的股票代码列表 :param subtype_list: 取消订阅的类型,参见SubType :return: (ret, err_message) ret == RET_OK err_message为None ret != RET_OK err_message为错误描述字符串 """ ret, msg, code_list, subtype_list = self._check_subscribe_param(code_list, subtype_list) if ret != RET_OK: return ret, msg query_processor = self._get_sync_query_processor(SubscriptionQuery.pack_unsubscribe_req, SubscriptionQuery.unpack_unsubscribe_rsp) kargs = { 'code_list': code_list, 'subtype_list': subtype_list, "conn_id": self.get_sync_conn_id() } for subtype in subtype_list: if subtype not in self._ctx_subscribe: continue code_set = self._ctx_subscribe[subtype] for code in code_list: if code not in code_set: continue code_set.remove(code) ret_code, msg, _ = query_processor(**kargs) if ret_code != RET_OK: return RET_ERROR, msg ret_code, msg, unpush_req_str = SubscriptionQuery.pack_unpush_req(code_list, subtype_list, self.get_async_conn_id()) if ret_code != RET_OK: return RET_ERROR, msg ret_code, msg = self._send_async_req(unpush_req_str) if ret_code != RET_OK: return RET_ERROR, msg return RET_OK, None
[ "def", "unsubscribe", "(", "self", ",", "code_list", ",", "subtype_list", ")", ":", "ret", ",", "msg", ",", "code_list", ",", "subtype_list", "=", "self", ".", "_check_subscribe_param", "(", "code_list", ",", "subtype_list", ")", "if", "ret", "!=", "RET_OK", ":", "return", "ret", ",", "msg", "query_processor", "=", "self", ".", "_get_sync_query_processor", "(", "SubscriptionQuery", ".", "pack_unsubscribe_req", ",", "SubscriptionQuery", ".", "unpack_unsubscribe_rsp", ")", "kargs", "=", "{", "'code_list'", ":", "code_list", ",", "'subtype_list'", ":", "subtype_list", ",", "\"conn_id\"", ":", "self", ".", "get_sync_conn_id", "(", ")", "}", "for", "subtype", "in", "subtype_list", ":", "if", "subtype", "not", "in", "self", ".", "_ctx_subscribe", ":", "continue", "code_set", "=", "self", ".", "_ctx_subscribe", "[", "subtype", "]", "for", "code", "in", "code_list", ":", "if", "code", "not", "in", "code_set", ":", "continue", "code_set", ".", "remove", "(", "code", ")", "ret_code", ",", "msg", ",", "_", "=", "query_processor", "(", "*", "*", "kargs", ")", "if", "ret_code", "!=", "RET_OK", ":", "return", "RET_ERROR", ",", "msg", "ret_code", ",", "msg", ",", "unpush_req_str", "=", "SubscriptionQuery", ".", "pack_unpush_req", "(", "code_list", ",", "subtype_list", ",", "self", ".", "get_async_conn_id", "(", ")", ")", "if", "ret_code", "!=", "RET_OK", ":", "return", "RET_ERROR", ",", "msg", "ret_code", ",", "msg", "=", "self", ".", "_send_async_req", "(", "unpush_req_str", ")", "if", "ret_code", "!=", "RET_OK", ":", "return", "RET_ERROR", ",", "msg", "return", "RET_OK", ",", "None" ]
取消订阅 :param code_list: 取消订阅的股票代码列表 :param subtype_list: 取消订阅的类型,参见SubType :return: (ret, err_message) ret == RET_OK err_message为None ret != RET_OK err_message为错误描述字符串
[ "取消订阅", ":", "param", "code_list", ":", "取消订阅的股票代码列表", ":", "param", "subtype_list", ":", "取消订阅的类型,参见SubType", ":", "return", ":", "(", "ret", "err_message", ")" ]
python
train
31.979167
cenkalti/kuyruk
kuyruk/config.py
https://github.com/cenkalti/kuyruk/blob/c99d66be9d8fb077610f2fa883d5a1d268b42f04/kuyruk/config.py#L90-L101
def from_pyfile(self, filename: str) -> None: """Load values from a Python file.""" globals_ = {} # type: Dict[str, Any] locals_ = {} # type: Dict[str, Any] with open(filename, "rb") as f: exec(compile(f.read(), filename, 'exec'), globals_, locals_) for key, value in locals_.items(): if (key.isupper() and not isinstance(value, types.ModuleType)): self._setattr(key, value) logger.info("Config is loaded from file: %s", filename)
[ "def", "from_pyfile", "(", "self", ",", "filename", ":", "str", ")", "->", "None", ":", "globals_", "=", "{", "}", "# type: Dict[str, Any]", "locals_", "=", "{", "}", "# type: Dict[str, Any]", "with", "open", "(", "filename", ",", "\"rb\"", ")", "as", "f", ":", "exec", "(", "compile", "(", "f", ".", "read", "(", ")", ",", "filename", ",", "'exec'", ")", ",", "globals_", ",", "locals_", ")", "for", "key", ",", "value", "in", "locals_", ".", "items", "(", ")", ":", "if", "(", "key", ".", "isupper", "(", ")", "and", "not", "isinstance", "(", "value", ",", "types", ".", "ModuleType", ")", ")", ":", "self", ".", "_setattr", "(", "key", ",", "value", ")", "logger", ".", "info", "(", "\"Config is loaded from file: %s\"", ",", "filename", ")" ]
Load values from a Python file.
[ "Load", "values", "from", "a", "Python", "file", "." ]
python
train
42.583333
cloudera/cm_api
python/src/cm_api/endpoints/host_templates.py
https://github.com/cloudera/cm_api/blob/5d2512375bd94684b4da36df9e0d9177865ffcbb/python/src/cm_api/endpoints/host_templates.py#L26-L38
def create_host_template(resource_root, name, cluster_name): """ Create a host template. @param resource_root: The root Resource object. @param name: Host template name @param cluster_name: Cluster name @return: An ApiHostTemplate object for the created host template. @since: API v3 """ apitemplate = ApiHostTemplate(resource_root, name, []) return call(resource_root.post, HOST_TEMPLATES_PATH % (cluster_name,), ApiHostTemplate, True, data=[apitemplate], api_version=3)[0]
[ "def", "create_host_template", "(", "resource_root", ",", "name", ",", "cluster_name", ")", ":", "apitemplate", "=", "ApiHostTemplate", "(", "resource_root", ",", "name", ",", "[", "]", ")", "return", "call", "(", "resource_root", ".", "post", ",", "HOST_TEMPLATES_PATH", "%", "(", "cluster_name", ",", ")", ",", "ApiHostTemplate", ",", "True", ",", "data", "=", "[", "apitemplate", "]", ",", "api_version", "=", "3", ")", "[", "0", "]" ]
Create a host template. @param resource_root: The root Resource object. @param name: Host template name @param cluster_name: Cluster name @return: An ApiHostTemplate object for the created host template. @since: API v3
[ "Create", "a", "host", "template", "." ]
python
train
38
Microsoft/malmo
MalmoEnv/malmoenv/bootstrap.py
https://github.com/Microsoft/malmo/blob/4139cd6f3e52f6e893a931a1d4b70d35f8e70e5a/MalmoEnv/malmoenv/bootstrap.py#L68-L88
def launch_minecraft(port, installdir="MalmoPlatform", replaceable=False): """Launch Minecraft listening for malmoenv connections. Args: port: the TCP port to listen on. installdir: the install dir name. Defaults to MalmoPlatform. Must be same as given (or defaulted) in download call if used. replaceable: whether or not to automatically restart Minecraft (default is false). """ launch_script = './launchClient.sh' if os.name == 'nt': launch_script = 'launchClient.bat' cwd = os.getcwd() os.chdir(installdir) os.chdir("Minecraft") try: cmd = [launch_script, '-port', str(port), '-env'] if replaceable: cmd.append('-replaceable') subprocess.check_call(cmd) finally: os.chdir(cwd)
[ "def", "launch_minecraft", "(", "port", ",", "installdir", "=", "\"MalmoPlatform\"", ",", "replaceable", "=", "False", ")", ":", "launch_script", "=", "'./launchClient.sh'", "if", "os", ".", "name", "==", "'nt'", ":", "launch_script", "=", "'launchClient.bat'", "cwd", "=", "os", ".", "getcwd", "(", ")", "os", ".", "chdir", "(", "installdir", ")", "os", ".", "chdir", "(", "\"Minecraft\"", ")", "try", ":", "cmd", "=", "[", "launch_script", ",", "'-port'", ",", "str", "(", "port", ")", ",", "'-env'", "]", "if", "replaceable", ":", "cmd", ".", "append", "(", "'-replaceable'", ")", "subprocess", ".", "check_call", "(", "cmd", ")", "finally", ":", "os", ".", "chdir", "(", "cwd", ")" ]
Launch Minecraft listening for malmoenv connections. Args: port: the TCP port to listen on. installdir: the install dir name. Defaults to MalmoPlatform. Must be same as given (or defaulted) in download call if used. replaceable: whether or not to automatically restart Minecraft (default is false).
[ "Launch", "Minecraft", "listening", "for", "malmoenv", "connections", ".", "Args", ":", "port", ":", "the", "TCP", "port", "to", "listen", "on", ".", "installdir", ":", "the", "install", "dir", "name", ".", "Defaults", "to", "MalmoPlatform", ".", "Must", "be", "same", "as", "given", "(", "or", "defaulted", ")", "in", "download", "call", "if", "used", ".", "replaceable", ":", "whether", "or", "not", "to", "automatically", "restart", "Minecraft", "(", "default", "is", "false", ")", "." ]
python
train
37.380952
KeplerGO/K2fov
K2fov/fov.py
https://github.com/KeplerGO/K2fov/blob/fb122b35687340e0357cba9e0dd47b3be0760693/K2fov/fov.py#L291-L300
def colRowIsOnSciencePixelList(self, col, row, padding=DEFAULT_PADDING): """similar to colRowIsOnSciencePixelList() but takes lists as input""" out = np.ones(len(col), dtype=bool) col_arr = np.array(col) row_arr = np.array(row) mask = np.bitwise_or(col_arr < 12. - padding, col_arr > 1111 + padding) out[mask] = False mask = np.bitwise_or(row_arr < 20. - padding, row_arr > 1043 + padding) out[mask] = False return out
[ "def", "colRowIsOnSciencePixelList", "(", "self", ",", "col", ",", "row", ",", "padding", "=", "DEFAULT_PADDING", ")", ":", "out", "=", "np", ".", "ones", "(", "len", "(", "col", ")", ",", "dtype", "=", "bool", ")", "col_arr", "=", "np", ".", "array", "(", "col", ")", "row_arr", "=", "np", ".", "array", "(", "row", ")", "mask", "=", "np", ".", "bitwise_or", "(", "col_arr", "<", "12.", "-", "padding", ",", "col_arr", ">", "1111", "+", "padding", ")", "out", "[", "mask", "]", "=", "False", "mask", "=", "np", ".", "bitwise_or", "(", "row_arr", "<", "20.", "-", "padding", ",", "row_arr", ">", "1043", "+", "padding", ")", "out", "[", "mask", "]", "=", "False", "return", "out" ]
similar to colRowIsOnSciencePixelList() but takes lists as input
[ "similar", "to", "colRowIsOnSciencePixelList", "()", "but", "takes", "lists", "as", "input" ]
python
train
48.1
davidmogar/cucco
cucco/batch.py
https://github.com/davidmogar/cucco/blob/e2a0ff3342e4a9f25a65c486206a5a2ae1a4dbd4/cucco/batch.py#L117-L128
def stop_watching(self): """Stop watching for files. Stop the observer started by watch function and finish thread life. """ self._watch = False if self._observer: self._logger.info('Stopping watcher') self._observer.stop() self._logger.info('Watcher stopped')
[ "def", "stop_watching", "(", "self", ")", ":", "self", ".", "_watch", "=", "False", "if", "self", ".", "_observer", ":", "self", ".", "_logger", ".", "info", "(", "'Stopping watcher'", ")", "self", ".", "_observer", ".", "stop", "(", ")", "self", ".", "_logger", ".", "info", "(", "'Watcher stopped'", ")" ]
Stop watching for files. Stop the observer started by watch function and finish thread life.
[ "Stop", "watching", "for", "files", "." ]
python
train
27.916667
fastai/fastai
fastai/callbacks/tensorboard.py
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/callbacks/tensorboard.py#L309-L312
def _write_max_norm(self, norms:[])->None: "Writes the maximum norm of the gradients to Tensorboard." max_norm = max(norms) self._add_gradient_scalar('max_norm', scalar_value=max_norm)
[ "def", "_write_max_norm", "(", "self", ",", "norms", ":", "[", "]", ")", "->", "None", ":", "max_norm", "=", "max", "(", "norms", ")", "self", ".", "_add_gradient_scalar", "(", "'max_norm'", ",", "scalar_value", "=", "max_norm", ")" ]
Writes the maximum norm of the gradients to Tensorboard.
[ "Writes", "the", "maximum", "norm", "of", "the", "gradients", "to", "Tensorboard", "." ]
python
train
51.25
Nic30/hwt
hwt/hdl/frameTmpl.py
https://github.com/Nic30/hwt/blob/8cbb399e326da3b22c233b98188a9d08dec057e6/hwt/hdl/frameTmpl.py#L67-L205
def framesFromTransTmpl(transaction: 'TransTmpl', wordWidth: int, maxFrameLen: Union[int, float]=inf, maxPaddingWords: Union[int, float]=inf, trimPaddingWordsOnStart: bool=False, trimPaddingWordsOnEnd: bool=False) -> Generator[ 'FrameTmpl', None, None]: """ Convert transaction template into FrameTmpls :param transaction: transaction template used which are FrameTmpls created from :param wordWidth: width of data signal in target interface where frames will be used :param maxFrameLen: maximum length of frame in bits, if exceeded another frame will be created :param maxPaddingWords: maximum of continual padding words in frame, if exceed frame is split and words are cut of :attention: if maxPaddingWords<inf trimPaddingWordsOnEnd or trimPaddingWordsOnStart has to be True to decide where padding should be trimmed :param trimPaddingWordsOnStart: trim padding from start of frame at word granularity :param trimPaddingWordsOnEnd: trim padding from end of frame at word granularity """ isFirstInFrame = True partsPending = False startOfThisFrame = 0 assert maxFrameLen > 0 assert maxPaddingWords >= 0 if maxPaddingWords < inf: assert trimPaddingWordsOnStart or trimPaddingWordsOnEnd, \ "Padding has to be cut off somewhere" it = TransTmplWordIterator(wordWidth) lastWordI = 0 endOfThisFrame = maxFrameLen parts = [] for wordI, word in it.groupByWordIndex(transaction, 0): if wordI * wordWidth >= endOfThisFrame: # now in first+ word behind the frame # cut off padding at end of frame paddingWords = wordI - lastWordI if trimPaddingWordsOnEnd and paddingWords > maxPaddingWords: # cut off padding and align end of frame to word _endOfThisFrame = (lastWordI + 1) * wordWidth else: _endOfThisFrame = wordI * wordWidth yield FrameTmpl(transaction, wordWidth, startOfThisFrame, _endOfThisFrame, parts) # prepare for start of new frame parts = [] isFirstInFrame = True partsPending = False # start on new word startOfThisFrame = _endOfThisFrame endOfThisFrame = startOfThisFrame + maxFrameLen lastWordI = wordI # check if padding at potential end of frame can be cut off if (not isFirstInFrame and trimPaddingWordsOnEnd and wordI - lastWordI > 1): # there is too much continual padding, # cut it out and start new frame _endOfThisFrame = (lastWordI + 1) * wordWidth yield FrameTmpl(transaction, wordWidth, startOfThisFrame, _endOfThisFrame, parts) # prepare for start of new frame parts = [] isFirstInFrame = True partsPending = False # start on new word startOfThisFrame = _endOfThisFrame endOfThisFrame = startOfThisFrame + maxFrameLen lastWordI = wordI - 1 if isFirstInFrame: partsPending = True isFirstInFrame = False # cut off padding at start of frame paddingWords = wordI - lastWordI if trimPaddingWordsOnStart and paddingWords > maxPaddingWords: startOfThisFrame += paddingWords * wordWidth endOfThisFrame = startOfThisFrame + maxFrameLen # resolve end of this part parts.extend(word) lastWordI = wordI # reminder in "parts" after last iteration endOfThisFrame = transaction.bitAddrEnd withPadding = not (trimPaddingWordsOnEnd or trimPaddingWordsOnStart) if partsPending or (withPadding and endOfThisFrame != startOfThisFrame): # cut off padding at end of frame endOfLastWord = (lastWordI + 1) * wordWidth if endOfThisFrame < endOfLastWord: endOfThisFrame = endOfLastWord else: paddingWords = it.fullWordCnt(endOfLastWord, endOfThisFrame) if trimPaddingWordsOnEnd and paddingWords > maxPaddingWords: endOfThisFrame -= paddingWords * wordWidth # align end of frame to word endOfThisFrame = min(startOfThisFrame + maxFrameLen, endOfThisFrame) yield FrameTmpl(transaction, wordWidth, startOfThisFrame, endOfThisFrame, parts) parts = [] startOfThisFrame = endOfThisFrame # final padding on the end while withPadding and startOfThisFrame < transaction.bitAddrEnd: endOfThisFrame = min(startOfThisFrame + maxFrameLen, transaction.bitAddrEnd) yield FrameTmpl(transaction, wordWidth, startOfThisFrame, endOfThisFrame, []) startOfThisFrame = endOfThisFrame
[ "def", "framesFromTransTmpl", "(", "transaction", ":", "'TransTmpl'", ",", "wordWidth", ":", "int", ",", "maxFrameLen", ":", "Union", "[", "int", ",", "float", "]", "=", "inf", ",", "maxPaddingWords", ":", "Union", "[", "int", ",", "float", "]", "=", "inf", ",", "trimPaddingWordsOnStart", ":", "bool", "=", "False", ",", "trimPaddingWordsOnEnd", ":", "bool", "=", "False", ")", "->", "Generator", "[", "'FrameTmpl'", ",", "None", ",", "None", "]", ":", "isFirstInFrame", "=", "True", "partsPending", "=", "False", "startOfThisFrame", "=", "0", "assert", "maxFrameLen", ">", "0", "assert", "maxPaddingWords", ">=", "0", "if", "maxPaddingWords", "<", "inf", ":", "assert", "trimPaddingWordsOnStart", "or", "trimPaddingWordsOnEnd", ",", "\"Padding has to be cut off somewhere\"", "it", "=", "TransTmplWordIterator", "(", "wordWidth", ")", "lastWordI", "=", "0", "endOfThisFrame", "=", "maxFrameLen", "parts", "=", "[", "]", "for", "wordI", ",", "word", "in", "it", ".", "groupByWordIndex", "(", "transaction", ",", "0", ")", ":", "if", "wordI", "*", "wordWidth", ">=", "endOfThisFrame", ":", "# now in first+ word behind the frame", "# cut off padding at end of frame", "paddingWords", "=", "wordI", "-", "lastWordI", "if", "trimPaddingWordsOnEnd", "and", "paddingWords", ">", "maxPaddingWords", ":", "# cut off padding and align end of frame to word", "_endOfThisFrame", "=", "(", "lastWordI", "+", "1", ")", "*", "wordWidth", "else", ":", "_endOfThisFrame", "=", "wordI", "*", "wordWidth", "yield", "FrameTmpl", "(", "transaction", ",", "wordWidth", ",", "startOfThisFrame", ",", "_endOfThisFrame", ",", "parts", ")", "# prepare for start of new frame", "parts", "=", "[", "]", "isFirstInFrame", "=", "True", "partsPending", "=", "False", "# start on new word", "startOfThisFrame", "=", "_endOfThisFrame", "endOfThisFrame", "=", "startOfThisFrame", "+", "maxFrameLen", "lastWordI", "=", "wordI", "# check if padding at potential end of frame can be cut off", "if", "(", "not", "isFirstInFrame", "and", "trimPaddingWordsOnEnd", "and", "wordI", "-", "lastWordI", ">", "1", ")", ":", "# there is too much continual padding,", "# cut it out and start new frame", "_endOfThisFrame", "=", "(", "lastWordI", "+", "1", ")", "*", "wordWidth", "yield", "FrameTmpl", "(", "transaction", ",", "wordWidth", ",", "startOfThisFrame", ",", "_endOfThisFrame", ",", "parts", ")", "# prepare for start of new frame", "parts", "=", "[", "]", "isFirstInFrame", "=", "True", "partsPending", "=", "False", "# start on new word", "startOfThisFrame", "=", "_endOfThisFrame", "endOfThisFrame", "=", "startOfThisFrame", "+", "maxFrameLen", "lastWordI", "=", "wordI", "-", "1", "if", "isFirstInFrame", ":", "partsPending", "=", "True", "isFirstInFrame", "=", "False", "# cut off padding at start of frame", "paddingWords", "=", "wordI", "-", "lastWordI", "if", "trimPaddingWordsOnStart", "and", "paddingWords", ">", "maxPaddingWords", ":", "startOfThisFrame", "+=", "paddingWords", "*", "wordWidth", "endOfThisFrame", "=", "startOfThisFrame", "+", "maxFrameLen", "# resolve end of this part", "parts", ".", "extend", "(", "word", ")", "lastWordI", "=", "wordI", "# reminder in \"parts\" after last iteration", "endOfThisFrame", "=", "transaction", ".", "bitAddrEnd", "withPadding", "=", "not", "(", "trimPaddingWordsOnEnd", "or", "trimPaddingWordsOnStart", ")", "if", "partsPending", "or", "(", "withPadding", "and", "endOfThisFrame", "!=", "startOfThisFrame", ")", ":", "# cut off padding at end of frame", "endOfLastWord", "=", "(", "lastWordI", "+", "1", ")", "*", "wordWidth", "if", "endOfThisFrame", "<", "endOfLastWord", ":", "endOfThisFrame", "=", "endOfLastWord", "else", ":", "paddingWords", "=", "it", ".", "fullWordCnt", "(", "endOfLastWord", ",", "endOfThisFrame", ")", "if", "trimPaddingWordsOnEnd", "and", "paddingWords", ">", "maxPaddingWords", ":", "endOfThisFrame", "-=", "paddingWords", "*", "wordWidth", "# align end of frame to word", "endOfThisFrame", "=", "min", "(", "startOfThisFrame", "+", "maxFrameLen", ",", "endOfThisFrame", ")", "yield", "FrameTmpl", "(", "transaction", ",", "wordWidth", ",", "startOfThisFrame", ",", "endOfThisFrame", ",", "parts", ")", "parts", "=", "[", "]", "startOfThisFrame", "=", "endOfThisFrame", "# final padding on the end", "while", "withPadding", "and", "startOfThisFrame", "<", "transaction", ".", "bitAddrEnd", ":", "endOfThisFrame", "=", "min", "(", "startOfThisFrame", "+", "maxFrameLen", ",", "transaction", ".", "bitAddrEnd", ")", "yield", "FrameTmpl", "(", "transaction", ",", "wordWidth", ",", "startOfThisFrame", ",", "endOfThisFrame", ",", "[", "]", ")", "startOfThisFrame", "=", "endOfThisFrame" ]
Convert transaction template into FrameTmpls :param transaction: transaction template used which are FrameTmpls created from :param wordWidth: width of data signal in target interface where frames will be used :param maxFrameLen: maximum length of frame in bits, if exceeded another frame will be created :param maxPaddingWords: maximum of continual padding words in frame, if exceed frame is split and words are cut of :attention: if maxPaddingWords<inf trimPaddingWordsOnEnd or trimPaddingWordsOnStart has to be True to decide where padding should be trimmed :param trimPaddingWordsOnStart: trim padding from start of frame at word granularity :param trimPaddingWordsOnEnd: trim padding from end of frame at word granularity
[ "Convert", "transaction", "template", "into", "FrameTmpls" ]
python
test
42.093525
bitshares/python-bitshares
bitsharesapi/websocket.py
https://github.com/bitshares/python-bitshares/blob/8a3b5954a6abcaaff7c6a5c41d910e58eea3142f/bitsharesapi/websocket.py#L213-L229
def process_notice(self, notice): """ This method is called on notices that need processing. Here, we call ``on_object`` and ``on_account`` slots. """ id = notice["id"] _a, _b, _ = id.split(".") if id in self.subscription_objects: self.on_object(notice) elif ".".join([_a, _b, "x"]) in self.subscription_objects: self.on_object(notice) elif id[:4] == "2.6.": # Treat account updates separately self.on_account(notice)
[ "def", "process_notice", "(", "self", ",", "notice", ")", ":", "id", "=", "notice", "[", "\"id\"", "]", "_a", ",", "_b", ",", "_", "=", "id", ".", "split", "(", "\".\"", ")", "if", "id", "in", "self", ".", "subscription_objects", ":", "self", ".", "on_object", "(", "notice", ")", "elif", "\".\"", ".", "join", "(", "[", "_a", ",", "_b", ",", "\"x\"", "]", ")", "in", "self", ".", "subscription_objects", ":", "self", ".", "on_object", "(", "notice", ")", "elif", "id", "[", ":", "4", "]", "==", "\"2.6.\"", ":", "# Treat account updates separately", "self", ".", "on_account", "(", "notice", ")" ]
This method is called on notices that need processing. Here, we call ``on_object`` and ``on_account`` slots.
[ "This", "method", "is", "called", "on", "notices", "that", "need", "processing", ".", "Here", "we", "call", "on_object", "and", "on_account", "slots", "." ]
python
train
30.647059
JukeboxPipeline/jukebox-core
src/jukeboxcore/addons/guerilla/guerillamgmt.py
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/addons/guerilla/guerillamgmt.py#L1843-L1860
def shot_remove_asset(self, *args, **kwargs): """Remove the, in the asset table view selected, asset. :returns: None :rtype: None :raises: None """ if not self.cur_shot: return i = self.shot_asset_treev.currentIndex() item = i.internalPointer() if item: asset = item.internal_data() if not isinstance(asset, djadapter.models.Asset): return log.debug("Removing asset %s.", asset.name) item.set_parent(None) self.cur_shot.assets.remove(asset)
[ "def", "shot_remove_asset", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "not", "self", ".", "cur_shot", ":", "return", "i", "=", "self", ".", "shot_asset_treev", ".", "currentIndex", "(", ")", "item", "=", "i", ".", "internalPointer", "(", ")", "if", "item", ":", "asset", "=", "item", ".", "internal_data", "(", ")", "if", "not", "isinstance", "(", "asset", ",", "djadapter", ".", "models", ".", "Asset", ")", ":", "return", "log", ".", "debug", "(", "\"Removing asset %s.\"", ",", "asset", ".", "name", ")", "item", ".", "set_parent", "(", "None", ")", "self", ".", "cur_shot", ".", "assets", ".", "remove", "(", "asset", ")" ]
Remove the, in the asset table view selected, asset. :returns: None :rtype: None :raises: None
[ "Remove", "the", "in", "the", "asset", "table", "view", "selected", "asset", "." ]
python
train
32.444444
CxAalto/gtfspy
gtfspy/osm_transfers.py
https://github.com/CxAalto/gtfspy/blob/bddba4b74faae6c1b91202f19184811e326547e5/gtfspy/osm_transfers.py#L77-L119
def match_stops_to_nodes(gtfs, walk_network): """ Parameters ---------- gtfs : a GTFS object walk_network : networkx.Graph Returns ------- stop_I_to_node: dict maps stop_I to closest walk_network node stop_I_to_dist: dict maps stop_I to the distance to the closest walk_network node """ network_nodes = walk_network.nodes(data="true") stop_Is = set(gtfs.get_straight_line_transfer_distances()['from_stop_I']) stops_df = gtfs.stops() geo_index = GeoGridIndex(precision=6) for net_node, data in network_nodes: geo_index.add_point(GeoPoint(data['lat'], data['lon'], ref=net_node)) stop_I_to_node = {} stop_I_to_dist = {} for stop_I in stop_Is: stop_lat = float(stops_df[stops_df.stop_I == stop_I].lat) stop_lon = float(stops_df[stops_df.stop_I == stop_I].lon) geo_point = GeoPoint(stop_lat, stop_lon) min_dist = float('inf') min_dist_node = None search_distances_m = [0.100, 0.500] for search_distance_m in search_distances_m: for point, distance in geo_index.get_nearest_points(geo_point, search_distance_m, "km"): if distance < min_dist: min_dist = distance * 1000 min_dist_node = point.ref if min_dist_node is not None: break if min_dist_node is None: warn("No OSM node found for stop: " + str(stops_df[stops_df.stop_I == stop_I])) stop_I_to_node[stop_I] = min_dist_node stop_I_to_dist[stop_I] = min_dist return stop_I_to_node, stop_I_to_dist
[ "def", "match_stops_to_nodes", "(", "gtfs", ",", "walk_network", ")", ":", "network_nodes", "=", "walk_network", ".", "nodes", "(", "data", "=", "\"true\"", ")", "stop_Is", "=", "set", "(", "gtfs", ".", "get_straight_line_transfer_distances", "(", ")", "[", "'from_stop_I'", "]", ")", "stops_df", "=", "gtfs", ".", "stops", "(", ")", "geo_index", "=", "GeoGridIndex", "(", "precision", "=", "6", ")", "for", "net_node", ",", "data", "in", "network_nodes", ":", "geo_index", ".", "add_point", "(", "GeoPoint", "(", "data", "[", "'lat'", "]", ",", "data", "[", "'lon'", "]", ",", "ref", "=", "net_node", ")", ")", "stop_I_to_node", "=", "{", "}", "stop_I_to_dist", "=", "{", "}", "for", "stop_I", "in", "stop_Is", ":", "stop_lat", "=", "float", "(", "stops_df", "[", "stops_df", ".", "stop_I", "==", "stop_I", "]", ".", "lat", ")", "stop_lon", "=", "float", "(", "stops_df", "[", "stops_df", ".", "stop_I", "==", "stop_I", "]", ".", "lon", ")", "geo_point", "=", "GeoPoint", "(", "stop_lat", ",", "stop_lon", ")", "min_dist", "=", "float", "(", "'inf'", ")", "min_dist_node", "=", "None", "search_distances_m", "=", "[", "0.100", ",", "0.500", "]", "for", "search_distance_m", "in", "search_distances_m", ":", "for", "point", ",", "distance", "in", "geo_index", ".", "get_nearest_points", "(", "geo_point", ",", "search_distance_m", ",", "\"km\"", ")", ":", "if", "distance", "<", "min_dist", ":", "min_dist", "=", "distance", "*", "1000", "min_dist_node", "=", "point", ".", "ref", "if", "min_dist_node", "is", "not", "None", ":", "break", "if", "min_dist_node", "is", "None", ":", "warn", "(", "\"No OSM node found for stop: \"", "+", "str", "(", "stops_df", "[", "stops_df", ".", "stop_I", "==", "stop_I", "]", ")", ")", "stop_I_to_node", "[", "stop_I", "]", "=", "min_dist_node", "stop_I_to_dist", "[", "stop_I", "]", "=", "min_dist", "return", "stop_I_to_node", ",", "stop_I_to_dist" ]
Parameters ---------- gtfs : a GTFS object walk_network : networkx.Graph Returns ------- stop_I_to_node: dict maps stop_I to closest walk_network node stop_I_to_dist: dict maps stop_I to the distance to the closest walk_network node
[ "Parameters", "----------", "gtfs", ":", "a", "GTFS", "object", "walk_network", ":", "networkx", ".", "Graph" ]
python
valid
37
razorpay/razorpay-python
razorpay/resources/payment.py
https://github.com/razorpay/razorpay-python/blob/5bc63fd8452165a4b54556888492e555222c8afe/razorpay/resources/payment.py#L67-L78
def transfer(self, payment_id, data={}, **kwargs): """" Create Transfer for given Payment Id Args: payment_id : Id for which payment object has to be transfered Returns: Payment dict after getting transfered """ url = "{}/{}/transfers".format(self.base_url, payment_id) return self.post_url(url, data, **kwargs)
[ "def", "transfer", "(", "self", ",", "payment_id", ",", "data", "=", "{", "}", ",", "*", "*", "kwargs", ")", ":", "url", "=", "\"{}/{}/transfers\"", ".", "format", "(", "self", ".", "base_url", ",", "payment_id", ")", "return", "self", ".", "post_url", "(", "url", ",", "data", ",", "*", "*", "kwargs", ")" ]
Create Transfer for given Payment Id Args: payment_id : Id for which payment object has to be transfered Returns: Payment dict after getting transfered
[ "Create", "Transfer", "for", "given", "Payment", "Id" ]
python
train
31.833333
ray-project/ray
python/ray/worker.py
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/worker.py#L1213-L1455
def init(redis_address=None, num_cpus=None, num_gpus=None, resources=None, object_store_memory=None, redis_max_memory=None, log_to_driver=True, node_ip_address=None, object_id_seed=None, local_mode=False, redirect_worker_output=None, redirect_output=None, ignore_reinit_error=False, num_redis_shards=None, redis_max_clients=None, redis_password=None, plasma_directory=None, huge_pages=False, include_webui=False, driver_id=None, configure_logging=True, logging_level=logging.INFO, logging_format=ray_constants.LOGGER_FORMAT, plasma_store_socket_name=None, raylet_socket_name=None, temp_dir=None, load_code_from_local=False, _internal_config=None): """Connect to an existing Ray cluster or start one and connect to it. This method handles two cases. Either a Ray cluster already exists and we just attach this driver to it, or we start all of the processes associated with a Ray cluster and attach to the newly started cluster. To start Ray and all of the relevant processes, use this as follows: .. code-block:: python ray.init() To connect to an existing Ray cluster, use this as follows (substituting in the appropriate address): .. code-block:: python ray.init(redis_address="123.45.67.89:6379") Args: redis_address (str): The address of the Redis server to connect to. If this address is not provided, then this command will start Redis, a raylet, a plasma store, a plasma manager, and some workers. It will also kill these processes when Python exits. num_cpus (int): Number of cpus the user wishes all raylets to be configured with. num_gpus (int): Number of gpus the user wishes all raylets to be configured with. resources: A dictionary mapping the name of a resource to the quantity of that resource available. object_store_memory: The amount of memory (in bytes) to start the object store with. By default, this is capped at 20GB but can be set higher. redis_max_memory: The max amount of memory (in bytes) to allow each redis shard to use. Once the limit is exceeded, redis will start LRU eviction of entries. This only applies to the sharded redis tables (task, object, and profile tables). By default, this is capped at 10GB but can be set higher. log_to_driver (bool): If true, then output from all of the worker processes on all nodes will be directed to the driver. node_ip_address (str): The IP address of the node that we are on. object_id_seed (int): Used to seed the deterministic generation of object IDs. The same value can be used across multiple runs of the same driver in order to generate the object IDs in a consistent manner. However, the same ID should not be used for different drivers. local_mode (bool): True if the code should be executed serially without Ray. This is useful for debugging. ignore_reinit_error: True if we should suppress errors from calling ray.init() a second time. num_redis_shards: The number of Redis shards to start in addition to the primary Redis shard. redis_max_clients: If provided, attempt to configure Redis with this maxclients number. redis_password (str): Prevents external clients without the password from connecting to Redis if provided. plasma_directory: A directory where the Plasma memory mapped files will be created. huge_pages: Boolean flag indicating whether to start the Object Store with hugetlbfs support. Requires plasma_directory. include_webui: Boolean flag indicating whether to start the web UI, which displays the status of the Ray cluster. driver_id: The ID of driver. configure_logging: True if allow the logging cofiguration here. Otherwise, the users may want to configure it by their own. logging_level: Logging level, default will be logging.INFO. logging_format: Logging format, default contains a timestamp, filename, line number, and message. See ray_constants.py. plasma_store_socket_name (str): If provided, it will specify the socket name used by the plasma store. raylet_socket_name (str): If provided, it will specify the socket path used by the raylet process. temp_dir (str): If provided, it will specify the root temporary directory for the Ray process. load_code_from_local: Whether code should be loaded from a local module or from the GCS. _internal_config (str): JSON configuration for overriding RayConfig defaults. For testing purposes ONLY. Returns: Address information about the started processes. Raises: Exception: An exception is raised if an inappropriate combination of arguments is passed in. """ if configure_logging: setup_logger(logging_level, logging_format) if local_mode: driver_mode = LOCAL_MODE else: driver_mode = SCRIPT_MODE if setproctitle is None: logger.warning( "WARNING: Not updating worker name since `setproctitle` is not " "installed. Install this with `pip install setproctitle` " "(or ray[debug]) to enable monitoring of worker processes.") if global_worker.connected: if ignore_reinit_error: logger.error("Calling ray.init() again after it has already been " "called.") return else: raise Exception("Perhaps you called ray.init twice by accident? " "This error can be suppressed by passing in " "'ignore_reinit_error=True' or by calling " "'ray.shutdown()' prior to 'ray.init()'.") # Convert hostnames to numerical IP address. if node_ip_address is not None: node_ip_address = services.address_to_ip(node_ip_address) if redis_address is not None: redis_address = services.address_to_ip(redis_address) global _global_node if driver_mode == LOCAL_MODE: # If starting Ray in LOCAL_MODE, don't start any other processes. _global_node = ray.node.LocalNode() elif redis_address is None: # In this case, we need to start a new cluster. ray_params = ray.parameter.RayParams( redis_address=redis_address, node_ip_address=node_ip_address, object_id_seed=object_id_seed, local_mode=local_mode, driver_mode=driver_mode, redirect_worker_output=redirect_worker_output, redirect_output=redirect_output, num_cpus=num_cpus, num_gpus=num_gpus, resources=resources, num_redis_shards=num_redis_shards, redis_max_clients=redis_max_clients, redis_password=redis_password, plasma_directory=plasma_directory, huge_pages=huge_pages, include_webui=include_webui, object_store_memory=object_store_memory, redis_max_memory=redis_max_memory, plasma_store_socket_name=plasma_store_socket_name, raylet_socket_name=raylet_socket_name, temp_dir=temp_dir, load_code_from_local=load_code_from_local, _internal_config=_internal_config, ) # Start the Ray processes. We set shutdown_at_exit=False because we # shutdown the node in the ray.shutdown call that happens in the atexit # handler. _global_node = ray.node.Node( head=True, shutdown_at_exit=False, ray_params=ray_params) else: # In this case, we are connecting to an existing cluster. if num_cpus is not None or num_gpus is not None: raise Exception("When connecting to an existing cluster, num_cpus " "and num_gpus must not be provided.") if resources is not None: raise Exception("When connecting to an existing cluster, " "resources must not be provided.") if num_redis_shards is not None: raise Exception("When connecting to an existing cluster, " "num_redis_shards must not be provided.") if redis_max_clients is not None: raise Exception("When connecting to an existing cluster, " "redis_max_clients must not be provided.") if object_store_memory is not None: raise Exception("When connecting to an existing cluster, " "object_store_memory must not be provided.") if redis_max_memory is not None: raise Exception("When connecting to an existing cluster, " "redis_max_memory must not be provided.") if plasma_directory is not None: raise Exception("When connecting to an existing cluster, " "plasma_directory must not be provided.") if huge_pages: raise Exception("When connecting to an existing cluster, " "huge_pages must not be provided.") if temp_dir is not None: raise Exception("When connecting to an existing cluster, " "temp_dir must not be provided.") if plasma_store_socket_name is not None: raise Exception("When connecting to an existing cluster, " "plasma_store_socket_name must not be provided.") if raylet_socket_name is not None: raise Exception("When connecting to an existing cluster, " "raylet_socket_name must not be provided.") if _internal_config is not None: raise Exception("When connecting to an existing cluster, " "_internal_config must not be provided.") # In this case, we only need to connect the node. ray_params = ray.parameter.RayParams( node_ip_address=node_ip_address, redis_address=redis_address, redis_password=redis_password, object_id_seed=object_id_seed, temp_dir=temp_dir, load_code_from_local=load_code_from_local) _global_node = ray.node.Node( ray_params, head=False, shutdown_at_exit=False, connect_only=True) connect( _global_node, mode=driver_mode, log_to_driver=log_to_driver, worker=global_worker, driver_id=driver_id) for hook in _post_init_hooks: hook() return _global_node.address_info
[ "def", "init", "(", "redis_address", "=", "None", ",", "num_cpus", "=", "None", ",", "num_gpus", "=", "None", ",", "resources", "=", "None", ",", "object_store_memory", "=", "None", ",", "redis_max_memory", "=", "None", ",", "log_to_driver", "=", "True", ",", "node_ip_address", "=", "None", ",", "object_id_seed", "=", "None", ",", "local_mode", "=", "False", ",", "redirect_worker_output", "=", "None", ",", "redirect_output", "=", "None", ",", "ignore_reinit_error", "=", "False", ",", "num_redis_shards", "=", "None", ",", "redis_max_clients", "=", "None", ",", "redis_password", "=", "None", ",", "plasma_directory", "=", "None", ",", "huge_pages", "=", "False", ",", "include_webui", "=", "False", ",", "driver_id", "=", "None", ",", "configure_logging", "=", "True", ",", "logging_level", "=", "logging", ".", "INFO", ",", "logging_format", "=", "ray_constants", ".", "LOGGER_FORMAT", ",", "plasma_store_socket_name", "=", "None", ",", "raylet_socket_name", "=", "None", ",", "temp_dir", "=", "None", ",", "load_code_from_local", "=", "False", ",", "_internal_config", "=", "None", ")", ":", "if", "configure_logging", ":", "setup_logger", "(", "logging_level", ",", "logging_format", ")", "if", "local_mode", ":", "driver_mode", "=", "LOCAL_MODE", "else", ":", "driver_mode", "=", "SCRIPT_MODE", "if", "setproctitle", "is", "None", ":", "logger", ".", "warning", "(", "\"WARNING: Not updating worker name since `setproctitle` is not \"", "\"installed. Install this with `pip install setproctitle` \"", "\"(or ray[debug]) to enable monitoring of worker processes.\"", ")", "if", "global_worker", ".", "connected", ":", "if", "ignore_reinit_error", ":", "logger", ".", "error", "(", "\"Calling ray.init() again after it has already been \"", "\"called.\"", ")", "return", "else", ":", "raise", "Exception", "(", "\"Perhaps you called ray.init twice by accident? \"", "\"This error can be suppressed by passing in \"", "\"'ignore_reinit_error=True' or by calling \"", "\"'ray.shutdown()' prior to 'ray.init()'.\"", ")", "# Convert hostnames to numerical IP address.", "if", "node_ip_address", "is", "not", "None", ":", "node_ip_address", "=", "services", ".", "address_to_ip", "(", "node_ip_address", ")", "if", "redis_address", "is", "not", "None", ":", "redis_address", "=", "services", ".", "address_to_ip", "(", "redis_address", ")", "global", "_global_node", "if", "driver_mode", "==", "LOCAL_MODE", ":", "# If starting Ray in LOCAL_MODE, don't start any other processes.", "_global_node", "=", "ray", ".", "node", ".", "LocalNode", "(", ")", "elif", "redis_address", "is", "None", ":", "# In this case, we need to start a new cluster.", "ray_params", "=", "ray", ".", "parameter", ".", "RayParams", "(", "redis_address", "=", "redis_address", ",", "node_ip_address", "=", "node_ip_address", ",", "object_id_seed", "=", "object_id_seed", ",", "local_mode", "=", "local_mode", ",", "driver_mode", "=", "driver_mode", ",", "redirect_worker_output", "=", "redirect_worker_output", ",", "redirect_output", "=", "redirect_output", ",", "num_cpus", "=", "num_cpus", ",", "num_gpus", "=", "num_gpus", ",", "resources", "=", "resources", ",", "num_redis_shards", "=", "num_redis_shards", ",", "redis_max_clients", "=", "redis_max_clients", ",", "redis_password", "=", "redis_password", ",", "plasma_directory", "=", "plasma_directory", ",", "huge_pages", "=", "huge_pages", ",", "include_webui", "=", "include_webui", ",", "object_store_memory", "=", "object_store_memory", ",", "redis_max_memory", "=", "redis_max_memory", ",", "plasma_store_socket_name", "=", "plasma_store_socket_name", ",", "raylet_socket_name", "=", "raylet_socket_name", ",", "temp_dir", "=", "temp_dir", ",", "load_code_from_local", "=", "load_code_from_local", ",", "_internal_config", "=", "_internal_config", ",", ")", "# Start the Ray processes. We set shutdown_at_exit=False because we", "# shutdown the node in the ray.shutdown call that happens in the atexit", "# handler.", "_global_node", "=", "ray", ".", "node", ".", "Node", "(", "head", "=", "True", ",", "shutdown_at_exit", "=", "False", ",", "ray_params", "=", "ray_params", ")", "else", ":", "# In this case, we are connecting to an existing cluster.", "if", "num_cpus", "is", "not", "None", "or", "num_gpus", "is", "not", "None", ":", "raise", "Exception", "(", "\"When connecting to an existing cluster, num_cpus \"", "\"and num_gpus must not be provided.\"", ")", "if", "resources", "is", "not", "None", ":", "raise", "Exception", "(", "\"When connecting to an existing cluster, \"", "\"resources must not be provided.\"", ")", "if", "num_redis_shards", "is", "not", "None", ":", "raise", "Exception", "(", "\"When connecting to an existing cluster, \"", "\"num_redis_shards must not be provided.\"", ")", "if", "redis_max_clients", "is", "not", "None", ":", "raise", "Exception", "(", "\"When connecting to an existing cluster, \"", "\"redis_max_clients must not be provided.\"", ")", "if", "object_store_memory", "is", "not", "None", ":", "raise", "Exception", "(", "\"When connecting to an existing cluster, \"", "\"object_store_memory must not be provided.\"", ")", "if", "redis_max_memory", "is", "not", "None", ":", "raise", "Exception", "(", "\"When connecting to an existing cluster, \"", "\"redis_max_memory must not be provided.\"", ")", "if", "plasma_directory", "is", "not", "None", ":", "raise", "Exception", "(", "\"When connecting to an existing cluster, \"", "\"plasma_directory must not be provided.\"", ")", "if", "huge_pages", ":", "raise", "Exception", "(", "\"When connecting to an existing cluster, \"", "\"huge_pages must not be provided.\"", ")", "if", "temp_dir", "is", "not", "None", ":", "raise", "Exception", "(", "\"When connecting to an existing cluster, \"", "\"temp_dir must not be provided.\"", ")", "if", "plasma_store_socket_name", "is", "not", "None", ":", "raise", "Exception", "(", "\"When connecting to an existing cluster, \"", "\"plasma_store_socket_name must not be provided.\"", ")", "if", "raylet_socket_name", "is", "not", "None", ":", "raise", "Exception", "(", "\"When connecting to an existing cluster, \"", "\"raylet_socket_name must not be provided.\"", ")", "if", "_internal_config", "is", "not", "None", ":", "raise", "Exception", "(", "\"When connecting to an existing cluster, \"", "\"_internal_config must not be provided.\"", ")", "# In this case, we only need to connect the node.", "ray_params", "=", "ray", ".", "parameter", ".", "RayParams", "(", "node_ip_address", "=", "node_ip_address", ",", "redis_address", "=", "redis_address", ",", "redis_password", "=", "redis_password", ",", "object_id_seed", "=", "object_id_seed", ",", "temp_dir", "=", "temp_dir", ",", "load_code_from_local", "=", "load_code_from_local", ")", "_global_node", "=", "ray", ".", "node", ".", "Node", "(", "ray_params", ",", "head", "=", "False", ",", "shutdown_at_exit", "=", "False", ",", "connect_only", "=", "True", ")", "connect", "(", "_global_node", ",", "mode", "=", "driver_mode", ",", "log_to_driver", "=", "log_to_driver", ",", "worker", "=", "global_worker", ",", "driver_id", "=", "driver_id", ")", "for", "hook", "in", "_post_init_hooks", ":", "hook", "(", ")", "return", "_global_node", ".", "address_info" ]
Connect to an existing Ray cluster or start one and connect to it. This method handles two cases. Either a Ray cluster already exists and we just attach this driver to it, or we start all of the processes associated with a Ray cluster and attach to the newly started cluster. To start Ray and all of the relevant processes, use this as follows: .. code-block:: python ray.init() To connect to an existing Ray cluster, use this as follows (substituting in the appropriate address): .. code-block:: python ray.init(redis_address="123.45.67.89:6379") Args: redis_address (str): The address of the Redis server to connect to. If this address is not provided, then this command will start Redis, a raylet, a plasma store, a plasma manager, and some workers. It will also kill these processes when Python exits. num_cpus (int): Number of cpus the user wishes all raylets to be configured with. num_gpus (int): Number of gpus the user wishes all raylets to be configured with. resources: A dictionary mapping the name of a resource to the quantity of that resource available. object_store_memory: The amount of memory (in bytes) to start the object store with. By default, this is capped at 20GB but can be set higher. redis_max_memory: The max amount of memory (in bytes) to allow each redis shard to use. Once the limit is exceeded, redis will start LRU eviction of entries. This only applies to the sharded redis tables (task, object, and profile tables). By default, this is capped at 10GB but can be set higher. log_to_driver (bool): If true, then output from all of the worker processes on all nodes will be directed to the driver. node_ip_address (str): The IP address of the node that we are on. object_id_seed (int): Used to seed the deterministic generation of object IDs. The same value can be used across multiple runs of the same driver in order to generate the object IDs in a consistent manner. However, the same ID should not be used for different drivers. local_mode (bool): True if the code should be executed serially without Ray. This is useful for debugging. ignore_reinit_error: True if we should suppress errors from calling ray.init() a second time. num_redis_shards: The number of Redis shards to start in addition to the primary Redis shard. redis_max_clients: If provided, attempt to configure Redis with this maxclients number. redis_password (str): Prevents external clients without the password from connecting to Redis if provided. plasma_directory: A directory where the Plasma memory mapped files will be created. huge_pages: Boolean flag indicating whether to start the Object Store with hugetlbfs support. Requires plasma_directory. include_webui: Boolean flag indicating whether to start the web UI, which displays the status of the Ray cluster. driver_id: The ID of driver. configure_logging: True if allow the logging cofiguration here. Otherwise, the users may want to configure it by their own. logging_level: Logging level, default will be logging.INFO. logging_format: Logging format, default contains a timestamp, filename, line number, and message. See ray_constants.py. plasma_store_socket_name (str): If provided, it will specify the socket name used by the plasma store. raylet_socket_name (str): If provided, it will specify the socket path used by the raylet process. temp_dir (str): If provided, it will specify the root temporary directory for the Ray process. load_code_from_local: Whether code should be loaded from a local module or from the GCS. _internal_config (str): JSON configuration for overriding RayConfig defaults. For testing purposes ONLY. Returns: Address information about the started processes. Raises: Exception: An exception is raised if an inappropriate combination of arguments is passed in.
[ "Connect", "to", "an", "existing", "Ray", "cluster", "or", "start", "one", "and", "connect", "to", "it", "." ]
python
train
44.90535
twilio/twilio-python
twilio/rest/api/v2010/account/sip/domain/credential_list_mapping.py
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/api/v2010/account/sip/domain/credential_list_mapping.py#L142-L156
def get(self, sid): """ Constructs a CredentialListMappingContext :param sid: A string that identifies the resource to fetch :returns: twilio.rest.api.v2010.account.sip.domain.credential_list_mapping.CredentialListMappingContext :rtype: twilio.rest.api.v2010.account.sip.domain.credential_list_mapping.CredentialListMappingContext """ return CredentialListMappingContext( self._version, account_sid=self._solution['account_sid'], domain_sid=self._solution['domain_sid'], sid=sid, )
[ "def", "get", "(", "self", ",", "sid", ")", ":", "return", "CredentialListMappingContext", "(", "self", ".", "_version", ",", "account_sid", "=", "self", ".", "_solution", "[", "'account_sid'", "]", ",", "domain_sid", "=", "self", ".", "_solution", "[", "'domain_sid'", "]", ",", "sid", "=", "sid", ",", ")" ]
Constructs a CredentialListMappingContext :param sid: A string that identifies the resource to fetch :returns: twilio.rest.api.v2010.account.sip.domain.credential_list_mapping.CredentialListMappingContext :rtype: twilio.rest.api.v2010.account.sip.domain.credential_list_mapping.CredentialListMappingContext
[ "Constructs", "a", "CredentialListMappingContext" ]
python
train
38.733333
pip-services3-python/pip-services3-commons-python
pip_services3_commons/data/AnyValueArray.py
https://github.com/pip-services3-python/pip-services3-commons-python/blob/22cbbb3e91e49717f65c083d36147fdb07ba9e3b/pip_services3_commons/data/AnyValueArray.py#L67-L80
def set_as_object(self, index = None, value= None): """ Sets a new value to array element specified by its index. When the index is not defined, it resets the entire array value. This method has double purpose because method overrides are not supported in JavaScript. :param index: (optional) an index of the element to set :param value: a new element or array value. """ if index == None and value != None: self.set_as_array(value) else: self[index] = value
[ "def", "set_as_object", "(", "self", ",", "index", "=", "None", ",", "value", "=", "None", ")", ":", "if", "index", "==", "None", "and", "value", "!=", "None", ":", "self", ".", "set_as_array", "(", "value", ")", "else", ":", "self", "[", "index", "]", "=", "value" ]
Sets a new value to array element specified by its index. When the index is not defined, it resets the entire array value. This method has double purpose because method overrides are not supported in JavaScript. :param index: (optional) an index of the element to set :param value: a new element or array value.
[ "Sets", "a", "new", "value", "to", "array", "element", "specified", "by", "its", "index", ".", "When", "the", "index", "is", "not", "defined", "it", "resets", "the", "entire", "array", "value", ".", "This", "method", "has", "double", "purpose", "because", "method", "overrides", "are", "not", "supported", "in", "JavaScript", "." ]
python
train
38.785714
PmagPy/PmagPy
programs/lsq_redo.py
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/lsq_redo.py#L8-L141
def main(): """ NAME lsq_redo.py DESCRIPTION converts a tab delimited LSQ format to PmagPy redo file and edits the magic_measurements table to mark "bad" measurements. SYNTAX lsq_redo.py [-h] [command line options] OPTIONS -h: prints help message and quits -f FILE: specify LSQ input file -fm MFILE: specify measurements file for editting, default is magic_measurements.txt -F FILE: specify output file, default is 'zeq_redo' """ letters=string.ascii_uppercase for l in string.ascii_lowercase: letters=letters+l dir_path='.' if '-WD' in sys.argv: ind=sys.argv.index('-WD') dir_path=sys.argv[ind+1] if '-h' in sys.argv: print(main.__doc__) sys.exit() if '-f' in sys.argv: ind=sys.argv.index('-f') inspec=dir_path+'/'+sys.argv[ind+1] else: zfile=dir_path+'/zeq_redo' if '-fm' in sys.argv: ind=sys.argv.index('-f') meas_file=dir_path+'/'+sys.argv[ind+1] else: meas_file=dir_path+'/magic_measurements.txt' if '-F' in sys.argv: ind=sys.argv.index('-F') zfile=dir_path+'/'+sys.argv[ind+1] else: zfile=dir_path+'/zeq_redo' try: open(meas_file,"r") meas_data,file_type=pmag.magic_read(meas_file) except IOError: print(main.__doc__) print("""You must have a valid measurements file prior to converting this LSQ file""") sys.exit() zredo=open(zfile,"w") MeasRecs=[] # # read in LSQ file # specs,MeasOuts=[],[] prior_spec_data=open(inspec,'r').readlines() for line in prior_spec_data: if len(line)<2: sys.exit() # spec=line[0:14].strip().replace(" ","") # get out the specimen name = collapsing spaces # rec=line[14:].split() # split up the rest of the line rec=line.split('\t') spec=rec[0].lower() specs.append(spec) comp_name=rec[2] # assign component name calculation_type="DE-FM" if rec[1][0]=="L": calculation_type="DE-BFL" # best-fit line else: calculation_type="DE-BFP" # best-fit line lists=rec[7].split('-') # get list of data used incl=[] for l in lists[0]: incl.append(letters.index(l)) for l in letters[letters.index(lists[0][-1])+1:letters.index(lists[1][0])]: incl.append(letters.index(l)) # add in the in between parts for l in lists[1]: incl.append(letters.index(l)) if len(lists)>2: for l in letters[letters.index(lists[1][-1])+1:letters.index(lists[2][0])]: incl.append(letters.index(l)) # add in the in between parts for l in lists[2]: incl.append(letters.index(l)) # now find all the data for this specimen in measurements datablock,min,max=[],"","" demag='N' for s in meas_data: if s['er_specimen_name'].lower()==spec.lower(): meths=s['magic_method_codes'].replace(" ","").split(":") if 'LT-NO' in meths or 'LT-AF-Z' in meths or 'LT-T-Z' in meths: datablock.append(s) if len(datablock)>0: for t in datablock:print(t['magic_method_codes']) incl_int=len(incl) while incl[-1]>len(datablock)-1: del incl[-1] # don't include measurements beyond what is in file if len(incl)!=incl_int: 'converting calculation type to best-fit line' meths0= datablock[incl[0]]['magic_method_codes'].replace(" ","").split(':') meths1= datablock[incl[-1]]['magic_method_codes'].replace(" ","").split(':') H0=datablock[incl[0]]['treatment_ac_field'] T0=datablock[incl[0]]['treatment_temp'] H1=datablock[incl[-1]]['treatment_ac_field'] T1=datablock[incl[-1]]['treatment_temp'] if 'LT-T-Z' in meths1: max=T1 demag="T" elif 'LT-AF-Z' in meths1: demag="AF" max=H1 if 'LT-NO' in meths0: if demag=='T': min=273 else: min=0 elif 'LT-T-Z' in meths0: min=T0 else: min=H0 for ind in range(incl[0]): MeasRecs.append(datablock[ind]) for ind in range(incl[0],incl[-1]): if ind not in incl: # datapoint not used in calculation datablock[ind]['measurement_flag']='b' MeasRecs.append(datablock[ind]) for ind in range(incl[-1],len(datablock)): MeasRecs.append(datablock[ind]) outstring='%s %s %s %s %s \n'%(spec,calculation_type,min,max,comp_name) zredo.write(outstring) for s in meas_data: # collect the rest of the measurement data not already included if s['er_specimen_name'] not in specs: MeasRecs.append(s) pmag.magic_write(meas_file,MeasRecs,'magic_measurements')
[ "def", "main", "(", ")", ":", "letters", "=", "string", ".", "ascii_uppercase", "for", "l", "in", "string", ".", "ascii_lowercase", ":", "letters", "=", "letters", "+", "l", "dir_path", "=", "'.'", "if", "'-WD'", "in", "sys", ".", "argv", ":", "ind", "=", "sys", ".", "argv", ".", "index", "(", "'-WD'", ")", "dir_path", "=", "sys", ".", "argv", "[", "ind", "+", "1", "]", "if", "'-h'", "in", "sys", ".", "argv", ":", "print", "(", "main", ".", "__doc__", ")", "sys", ".", "exit", "(", ")", "if", "'-f'", "in", "sys", ".", "argv", ":", "ind", "=", "sys", ".", "argv", ".", "index", "(", "'-f'", ")", "inspec", "=", "dir_path", "+", "'/'", "+", "sys", ".", "argv", "[", "ind", "+", "1", "]", "else", ":", "zfile", "=", "dir_path", "+", "'/zeq_redo'", "if", "'-fm'", "in", "sys", ".", "argv", ":", "ind", "=", "sys", ".", "argv", ".", "index", "(", "'-f'", ")", "meas_file", "=", "dir_path", "+", "'/'", "+", "sys", ".", "argv", "[", "ind", "+", "1", "]", "else", ":", "meas_file", "=", "dir_path", "+", "'/magic_measurements.txt'", "if", "'-F'", "in", "sys", ".", "argv", ":", "ind", "=", "sys", ".", "argv", ".", "index", "(", "'-F'", ")", "zfile", "=", "dir_path", "+", "'/'", "+", "sys", ".", "argv", "[", "ind", "+", "1", "]", "else", ":", "zfile", "=", "dir_path", "+", "'/zeq_redo'", "try", ":", "open", "(", "meas_file", ",", "\"r\"", ")", "meas_data", ",", "file_type", "=", "pmag", ".", "magic_read", "(", "meas_file", ")", "except", "IOError", ":", "print", "(", "main", ".", "__doc__", ")", "print", "(", "\"\"\"You must have a valid measurements file prior to converting\n this LSQ file\"\"\"", ")", "sys", ".", "exit", "(", ")", "zredo", "=", "open", "(", "zfile", ",", "\"w\"", ")", "MeasRecs", "=", "[", "]", "#", "# read in LSQ file", "#", "specs", ",", "MeasOuts", "=", "[", "]", ",", "[", "]", "prior_spec_data", "=", "open", "(", "inspec", ",", "'r'", ")", ".", "readlines", "(", ")", "for", "line", "in", "prior_spec_data", ":", "if", "len", "(", "line", ")", "<", "2", ":", "sys", ".", "exit", "(", ")", "# spec=line[0:14].strip().replace(\" \",\"\") # get out the specimen name = collapsing spaces", "# rec=line[14:].split() # split up the rest of the line", "rec", "=", "line", ".", "split", "(", "'\\t'", ")", "spec", "=", "rec", "[", "0", "]", ".", "lower", "(", ")", "specs", ".", "append", "(", "spec", ")", "comp_name", "=", "rec", "[", "2", "]", "# assign component name", "calculation_type", "=", "\"DE-FM\"", "if", "rec", "[", "1", "]", "[", "0", "]", "==", "\"L\"", ":", "calculation_type", "=", "\"DE-BFL\"", "# best-fit line", "else", ":", "calculation_type", "=", "\"DE-BFP\"", "# best-fit line", "lists", "=", "rec", "[", "7", "]", ".", "split", "(", "'-'", ")", "# get list of data used", "incl", "=", "[", "]", "for", "l", "in", "lists", "[", "0", "]", ":", "incl", ".", "append", "(", "letters", ".", "index", "(", "l", ")", ")", "for", "l", "in", "letters", "[", "letters", ".", "index", "(", "lists", "[", "0", "]", "[", "-", "1", "]", ")", "+", "1", ":", "letters", ".", "index", "(", "lists", "[", "1", "]", "[", "0", "]", ")", "]", ":", "incl", ".", "append", "(", "letters", ".", "index", "(", "l", ")", ")", "# add in the in between parts", "for", "l", "in", "lists", "[", "1", "]", ":", "incl", ".", "append", "(", "letters", ".", "index", "(", "l", ")", ")", "if", "len", "(", "lists", ")", ">", "2", ":", "for", "l", "in", "letters", "[", "letters", ".", "index", "(", "lists", "[", "1", "]", "[", "-", "1", "]", ")", "+", "1", ":", "letters", ".", "index", "(", "lists", "[", "2", "]", "[", "0", "]", ")", "]", ":", "incl", ".", "append", "(", "letters", ".", "index", "(", "l", ")", ")", "# add in the in between parts", "for", "l", "in", "lists", "[", "2", "]", ":", "incl", ".", "append", "(", "letters", ".", "index", "(", "l", ")", ")", "# now find all the data for this specimen in measurements", "datablock", ",", "min", ",", "max", "=", "[", "]", ",", "\"\"", ",", "\"\"", "demag", "=", "'N'", "for", "s", "in", "meas_data", ":", "if", "s", "[", "'er_specimen_name'", "]", ".", "lower", "(", ")", "==", "spec", ".", "lower", "(", ")", ":", "meths", "=", "s", "[", "'magic_method_codes'", "]", ".", "replace", "(", "\" \"", ",", "\"\"", ")", ".", "split", "(", "\":\"", ")", "if", "'LT-NO'", "in", "meths", "or", "'LT-AF-Z'", "in", "meths", "or", "'LT-T-Z'", "in", "meths", ":", "datablock", ".", "append", "(", "s", ")", "if", "len", "(", "datablock", ")", ">", "0", ":", "for", "t", "in", "datablock", ":", "print", "(", "t", "[", "'magic_method_codes'", "]", ")", "incl_int", "=", "len", "(", "incl", ")", "while", "incl", "[", "-", "1", "]", ">", "len", "(", "datablock", ")", "-", "1", ":", "del", "incl", "[", "-", "1", "]", "# don't include measurements beyond what is in file", "if", "len", "(", "incl", ")", "!=", "incl_int", ":", "'converting calculation type to best-fit line'", "meths0", "=", "datablock", "[", "incl", "[", "0", "]", "]", "[", "'magic_method_codes'", "]", ".", "replace", "(", "\" \"", ",", "\"\"", ")", ".", "split", "(", "':'", ")", "meths1", "=", "datablock", "[", "incl", "[", "-", "1", "]", "]", "[", "'magic_method_codes'", "]", ".", "replace", "(", "\" \"", ",", "\"\"", ")", ".", "split", "(", "':'", ")", "H0", "=", "datablock", "[", "incl", "[", "0", "]", "]", "[", "'treatment_ac_field'", "]", "T0", "=", "datablock", "[", "incl", "[", "0", "]", "]", "[", "'treatment_temp'", "]", "H1", "=", "datablock", "[", "incl", "[", "-", "1", "]", "]", "[", "'treatment_ac_field'", "]", "T1", "=", "datablock", "[", "incl", "[", "-", "1", "]", "]", "[", "'treatment_temp'", "]", "if", "'LT-T-Z'", "in", "meths1", ":", "max", "=", "T1", "demag", "=", "\"T\"", "elif", "'LT-AF-Z'", "in", "meths1", ":", "demag", "=", "\"AF\"", "max", "=", "H1", "if", "'LT-NO'", "in", "meths0", ":", "if", "demag", "==", "'T'", ":", "min", "=", "273", "else", ":", "min", "=", "0", "elif", "'LT-T-Z'", "in", "meths0", ":", "min", "=", "T0", "else", ":", "min", "=", "H0", "for", "ind", "in", "range", "(", "incl", "[", "0", "]", ")", ":", "MeasRecs", ".", "append", "(", "datablock", "[", "ind", "]", ")", "for", "ind", "in", "range", "(", "incl", "[", "0", "]", ",", "incl", "[", "-", "1", "]", ")", ":", "if", "ind", "not", "in", "incl", ":", "# datapoint not used in calculation", "datablock", "[", "ind", "]", "[", "'measurement_flag'", "]", "=", "'b'", "MeasRecs", ".", "append", "(", "datablock", "[", "ind", "]", ")", "for", "ind", "in", "range", "(", "incl", "[", "-", "1", "]", ",", "len", "(", "datablock", ")", ")", ":", "MeasRecs", ".", "append", "(", "datablock", "[", "ind", "]", ")", "outstring", "=", "'%s %s %s %s %s \\n'", "%", "(", "spec", ",", "calculation_type", ",", "min", ",", "max", ",", "comp_name", ")", "zredo", ".", "write", "(", "outstring", ")", "for", "s", "in", "meas_data", ":", "# collect the rest of the measurement data not already included", "if", "s", "[", "'er_specimen_name'", "]", "not", "in", "specs", ":", "MeasRecs", ".", "append", "(", "s", ")", "pmag", ".", "magic_write", "(", "meas_file", ",", "MeasRecs", ",", "'magic_measurements'", ")" ]
NAME lsq_redo.py DESCRIPTION converts a tab delimited LSQ format to PmagPy redo file and edits the magic_measurements table to mark "bad" measurements. SYNTAX lsq_redo.py [-h] [command line options] OPTIONS -h: prints help message and quits -f FILE: specify LSQ input file -fm MFILE: specify measurements file for editting, default is magic_measurements.txt -F FILE: specify output file, default is 'zeq_redo'
[ "NAME", "lsq_redo", ".", "py" ]
python
train
37.522388
brocade/pynos
pynos/versions/ver_7/ver_7_0_0/bgp.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_7/ver_7_0_0/bgp.py#L733-L781
def enable_peer_bfd(self, **kwargs): """BFD enable for each specified peer. Args: rbridge_id (str): Rbridge to configure. (1, 225, etc) peer_ip (str): Peer IPv4 address for BFD setting. delete (bool): True if BFD configuration should be deleted. Default value will be False if not specified. get (bool): Get config instead of editing config. (True, False) callback (function): A function executed upon completion of the method. The only parameter passed to `callback` will be the ``ElementTree`` `config`. Returns: XML to be passed to the switch. Raises: None Examples: >>> import pynos.device >>> switches = ['10.24.39.230'] >>> auth = ('admin', 'password') >>> for switch in switches: ... conn = (switch, '22') ... with pynos.device.Device(conn=conn, auth=auth) as dev: ... output = dev.bgp.neighbor(ip_addr='10.10.10.20', ... remote_as='65535', rbridge_id='230') ... output = dev.bgp.enable_peer_bfd(peer_ip='10.10.10.20', ... rbridge_id='230') ... output = dev.bgp.enable_peer_bfd(peer_ip='10.10.10.20', ... rbridge_id='230',get=True) ... output = dev.bgp.enable_peer_bfd(peer_ip='10.10.10.20', ... rbridge_id='230', delete=True) ... output = dev.bgp.neighbor(ip_addr='10.10.10.20', ... delete=True, rbridge_id='230', remote_as='65535') """ method_name = 'rbridge_id_router_router_bgp_router_bgp_attributes_' \ 'neighbor_neighbor_ips_neighbor_addr_bfd_bfd_enable' bfd_enable = getattr(self._rbridge, method_name) kwargs['router_bgp_neighbor_address'] = kwargs.pop('peer_ip') callback = kwargs.pop('callback', self._callback) config = bfd_enable(**kwargs) if kwargs.pop('delete', False): tag = 'bfd-enable' config.find('.//*%s' % tag).set('operation', 'delete') if kwargs.pop('get', False): return callback(config, handler='get_config') else: return callback(config)
[ "def", "enable_peer_bfd", "(", "self", ",", "*", "*", "kwargs", ")", ":", "method_name", "=", "'rbridge_id_router_router_bgp_router_bgp_attributes_'", "'neighbor_neighbor_ips_neighbor_addr_bfd_bfd_enable'", "bfd_enable", "=", "getattr", "(", "self", ".", "_rbridge", ",", "method_name", ")", "kwargs", "[", "'router_bgp_neighbor_address'", "]", "=", "kwargs", ".", "pop", "(", "'peer_ip'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "config", "=", "bfd_enable", "(", "*", "*", "kwargs", ")", "if", "kwargs", ".", "pop", "(", "'delete'", ",", "False", ")", ":", "tag", "=", "'bfd-enable'", "config", ".", "find", "(", "'.//*%s'", "%", "tag", ")", ".", "set", "(", "'operation'", ",", "'delete'", ")", "if", "kwargs", ".", "pop", "(", "'get'", ",", "False", ")", ":", "return", "callback", "(", "config", ",", "handler", "=", "'get_config'", ")", "else", ":", "return", "callback", "(", "config", ")" ]
BFD enable for each specified peer. Args: rbridge_id (str): Rbridge to configure. (1, 225, etc) peer_ip (str): Peer IPv4 address for BFD setting. delete (bool): True if BFD configuration should be deleted. Default value will be False if not specified. get (bool): Get config instead of editing config. (True, False) callback (function): A function executed upon completion of the method. The only parameter passed to `callback` will be the ``ElementTree`` `config`. Returns: XML to be passed to the switch. Raises: None Examples: >>> import pynos.device >>> switches = ['10.24.39.230'] >>> auth = ('admin', 'password') >>> for switch in switches: ... conn = (switch, '22') ... with pynos.device.Device(conn=conn, auth=auth) as dev: ... output = dev.bgp.neighbor(ip_addr='10.10.10.20', ... remote_as='65535', rbridge_id='230') ... output = dev.bgp.enable_peer_bfd(peer_ip='10.10.10.20', ... rbridge_id='230') ... output = dev.bgp.enable_peer_bfd(peer_ip='10.10.10.20', ... rbridge_id='230',get=True) ... output = dev.bgp.enable_peer_bfd(peer_ip='10.10.10.20', ... rbridge_id='230', delete=True) ... output = dev.bgp.neighbor(ip_addr='10.10.10.20', ... delete=True, rbridge_id='230', remote_as='65535')
[ "BFD", "enable", "for", "each", "specified", "peer", "." ]
python
train
47.142857
mdickinson/bigfloat
bigfloat/core.py
https://github.com/mdickinson/bigfloat/blob/e5fdd1048615191ed32a2b7460e14b3b3ff24662/bigfloat/core.py#L1043-L1056
def add(x, y, context=None): """ Return ``x`` + ``y``. """ return _apply_function_in_current_context( BigFloat, mpfr.mpfr_add, ( BigFloat._implicit_convert(x), BigFloat._implicit_convert(y), ), context, )
[ "def", "add", "(", "x", ",", "y", ",", "context", "=", "None", ")", ":", "return", "_apply_function_in_current_context", "(", "BigFloat", ",", "mpfr", ".", "mpfr_add", ",", "(", "BigFloat", ".", "_implicit_convert", "(", "x", ")", ",", "BigFloat", ".", "_implicit_convert", "(", "y", ")", ",", ")", ",", "context", ",", ")" ]
Return ``x`` + ``y``.
[ "Return", "x", "+", "y", "." ]
python
train
19.714286
Esri/ArcREST
tools/src/deleteUserContent.py
https://github.com/Esri/ArcREST/blob/ab240fde2b0200f61d4a5f6df033516e53f2f416/tools/src/deleteUserContent.py#L44-L99
def main(*argv): """ main driver of program """ try: # Inputs # adminUsername = argv[0] adminPassword = argv[1] siteURL = argv[2] username = argv[3] subFolders = argv[4].lower() == "true" # Logic # sh = arcrest.AGOLTokenSecurityHandler(adminUsername, adminPassword) admin = arcrest.manageorg.Administration(url=siteURL, securityHandler=sh) content = admin.content if isinstance(content, arcrest.manageorg._content.Content):pass usercontent = content.usercontent(username=username) res = usercontent.listUserContent(username=adminUsername) # Delete Root Items # eItems = "" itemsToErase = ",".join([item['id'] for item in res['items']]) usercontent.deleteItems(items=itemsToErase) # Walk Each Folder and erase items if subfolder == True # if subFolders: for folder in res['folders']: c = usercontent.listUserContent(username=username, folderId=folder['id']) itemsToErase = ",".join([item['id'] for item in c['items']]) if len(itemsToErase.split(',')) > 0: usercontent.deleteItems(items=itemsToErase) del c usercontent.deleteFolder(folderId=folder['id']) del folder arcpy.AddMessage("User %s content has been deleted." % username) arcpy.SetParameterAsText(4, True) except arcpy.ExecuteError: line, filename, synerror = trace() arcpy.AddError("error on line: %s" % line) arcpy.AddError("error in file name: %s" % filename) arcpy.AddError("with error message: %s" % synerror) arcpy.AddError("ArcPy Error Message: %s" % arcpy.GetMessages(2)) except FunctionError, f_e: messages = f_e.args[0] arcpy.AddError("error in function: %s" % messages["function"]) arcpy.AddError("error on line: %s" % messages["line"]) arcpy.AddError("error in file name: %s" % messages["filename"]) arcpy.AddError("with error message: %s" % messages["synerror"]) arcpy.AddError("ArcPy Error Message: %s" % messages["arc"]) except: line, filename, synerror = trace() arcpy.AddError("error on line: %s" % line) arcpy.AddError("error in file name: %s" % filename) arcpy.AddError("with error message: %s" % synerror)
[ "def", "main", "(", "*", "argv", ")", ":", "try", ":", "# Inputs", "#", "adminUsername", "=", "argv", "[", "0", "]", "adminPassword", "=", "argv", "[", "1", "]", "siteURL", "=", "argv", "[", "2", "]", "username", "=", "argv", "[", "3", "]", "subFolders", "=", "argv", "[", "4", "]", ".", "lower", "(", ")", "==", "\"true\"", "# Logic", "#", "sh", "=", "arcrest", ".", "AGOLTokenSecurityHandler", "(", "adminUsername", ",", "adminPassword", ")", "admin", "=", "arcrest", ".", "manageorg", ".", "Administration", "(", "url", "=", "siteURL", ",", "securityHandler", "=", "sh", ")", "content", "=", "admin", ".", "content", "if", "isinstance", "(", "content", ",", "arcrest", ".", "manageorg", ".", "_content", ".", "Content", ")", ":", "pass", "usercontent", "=", "content", ".", "usercontent", "(", "username", "=", "username", ")", "res", "=", "usercontent", ".", "listUserContent", "(", "username", "=", "adminUsername", ")", "# Delete Root Items", "#", "eItems", "=", "\"\"", "itemsToErase", "=", "\",\"", ".", "join", "(", "[", "item", "[", "'id'", "]", "for", "item", "in", "res", "[", "'items'", "]", "]", ")", "usercontent", ".", "deleteItems", "(", "items", "=", "itemsToErase", ")", "# Walk Each Folder and erase items if subfolder == True", "#", "if", "subFolders", ":", "for", "folder", "in", "res", "[", "'folders'", "]", ":", "c", "=", "usercontent", ".", "listUserContent", "(", "username", "=", "username", ",", "folderId", "=", "folder", "[", "'id'", "]", ")", "itemsToErase", "=", "\",\"", ".", "join", "(", "[", "item", "[", "'id'", "]", "for", "item", "in", "c", "[", "'items'", "]", "]", ")", "if", "len", "(", "itemsToErase", ".", "split", "(", "','", ")", ")", ">", "0", ":", "usercontent", ".", "deleteItems", "(", "items", "=", "itemsToErase", ")", "del", "c", "usercontent", ".", "deleteFolder", "(", "folderId", "=", "folder", "[", "'id'", "]", ")", "del", "folder", "arcpy", ".", "AddMessage", "(", "\"User %s content has been deleted.\"", "%", "username", ")", "arcpy", ".", "SetParameterAsText", "(", "4", ",", "True", ")", "except", "arcpy", ".", "ExecuteError", ":", "line", ",", "filename", ",", "synerror", "=", "trace", "(", ")", "arcpy", ".", "AddError", "(", "\"error on line: %s\"", "%", "line", ")", "arcpy", ".", "AddError", "(", "\"error in file name: %s\"", "%", "filename", ")", "arcpy", ".", "AddError", "(", "\"with error message: %s\"", "%", "synerror", ")", "arcpy", ".", "AddError", "(", "\"ArcPy Error Message: %s\"", "%", "arcpy", ".", "GetMessages", "(", "2", ")", ")", "except", "FunctionError", ",", "f_e", ":", "messages", "=", "f_e", ".", "args", "[", "0", "]", "arcpy", ".", "AddError", "(", "\"error in function: %s\"", "%", "messages", "[", "\"function\"", "]", ")", "arcpy", ".", "AddError", "(", "\"error on line: %s\"", "%", "messages", "[", "\"line\"", "]", ")", "arcpy", ".", "AddError", "(", "\"error in file name: %s\"", "%", "messages", "[", "\"filename\"", "]", ")", "arcpy", ".", "AddError", "(", "\"with error message: %s\"", "%", "messages", "[", "\"synerror\"", "]", ")", "arcpy", ".", "AddError", "(", "\"ArcPy Error Message: %s\"", "%", "messages", "[", "\"arc\"", "]", ")", "except", ":", "line", ",", "filename", ",", "synerror", "=", "trace", "(", ")", "arcpy", ".", "AddError", "(", "\"error on line: %s\"", "%", "line", ")", "arcpy", ".", "AddError", "(", "\"error in file name: %s\"", "%", "filename", ")", "arcpy", ".", "AddError", "(", "\"with error message: %s\"", "%", "synerror", ")" ]
main driver of program
[ "main", "driver", "of", "program" ]
python
train
43.785714
LonamiWebs/Telethon
telethon/utils.py
https://github.com/LonamiWebs/Telethon/blob/1ead9757d366b58c1e0567cddb0196e20f1a445f/telethon/utils.py#L1049-L1071
def resolve_invite_link(link): """ Resolves the given invite link. Returns a tuple of ``(link creator user id, global chat id, random int)``. Note that for broadcast channels, the link creator user ID will be zero to protect their identity. Normal chats and megagroup channels will have such ID. Note that the chat ID may not be accurate for chats with a link that were upgraded to megagroup, since the link can remain the same, but the chat ID will be correct once a new link is generated. """ link_hash, is_link = parse_username(link) if not is_link: # Perhaps the user passed the link hash directly link_hash = link try: return struct.unpack('>LLQ', _decode_telegram_base64(link_hash)) except (struct.error, TypeError): return None, None, None
[ "def", "resolve_invite_link", "(", "link", ")", ":", "link_hash", ",", "is_link", "=", "parse_username", "(", "link", ")", "if", "not", "is_link", ":", "# Perhaps the user passed the link hash directly", "link_hash", "=", "link", "try", ":", "return", "struct", ".", "unpack", "(", "'>LLQ'", ",", "_decode_telegram_base64", "(", "link_hash", ")", ")", "except", "(", "struct", ".", "error", ",", "TypeError", ")", ":", "return", "None", ",", "None", ",", "None" ]
Resolves the given invite link. Returns a tuple of ``(link creator user id, global chat id, random int)``. Note that for broadcast channels, the link creator user ID will be zero to protect their identity. Normal chats and megagroup channels will have such ID. Note that the chat ID may not be accurate for chats with a link that were upgraded to megagroup, since the link can remain the same, but the chat ID will be correct once a new link is generated.
[ "Resolves", "the", "given", "invite", "link", ".", "Returns", "a", "tuple", "of", "(", "link", "creator", "user", "id", "global", "chat", "id", "random", "int", ")", "." ]
python
train
35.608696
quantopian/zipline
zipline/utils/functional.py
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/functional.py#L307-L327
def set_attribute(name, value): """ Decorator factory for setting attributes on a function. Doesn't change the behavior of the wrapped function. Examples -------- >>> @set_attribute('__name__', 'foo') ... def bar(): ... return 3 ... >>> bar() 3 >>> bar.__name__ 'foo' """ def decorator(f): setattr(f, name, value) return f return decorator
[ "def", "set_attribute", "(", "name", ",", "value", ")", ":", "def", "decorator", "(", "f", ")", ":", "setattr", "(", "f", ",", "name", ",", "value", ")", "return", "f", "return", "decorator" ]
Decorator factory for setting attributes on a function. Doesn't change the behavior of the wrapped function. Examples -------- >>> @set_attribute('__name__', 'foo') ... def bar(): ... return 3 ... >>> bar() 3 >>> bar.__name__ 'foo'
[ "Decorator", "factory", "for", "setting", "attributes", "on", "a", "function", "." ]
python
train
19.285714
ssato/python-anyconfig
src/anyconfig/backend/base.py
https://github.com/ssato/python-anyconfig/blob/f2f4fb8d8e232aadea866c202e1dd7a5967e2877/src/anyconfig/backend/base.py#L632-L644
def load_from_stream(self, stream, container, **options): """ Load data from given stream 'stream'. :param stream: Stream provides configuration data :param container: callble to make a container object :param options: keyword options passed to '_load_from_stream_fn' :return: container object holding the configuration data """ return load_with_fn(self._load_from_stream_fn, stream, container, allow_primitives=self.allow_primitives(), **options)
[ "def", "load_from_stream", "(", "self", ",", "stream", ",", "container", ",", "*", "*", "options", ")", ":", "return", "load_with_fn", "(", "self", ".", "_load_from_stream_fn", ",", "stream", ",", "container", ",", "allow_primitives", "=", "self", ".", "allow_primitives", "(", ")", ",", "*", "*", "options", ")" ]
Load data from given stream 'stream'. :param stream: Stream provides configuration data :param container: callble to make a container object :param options: keyword options passed to '_load_from_stream_fn' :return: container object holding the configuration data
[ "Load", "data", "from", "given", "stream", "stream", "." ]
python
train
42.846154
josiah-wolf-oberholtzer/uqbar
uqbar/sphinx/api.py
https://github.com/josiah-wolf-oberholtzer/uqbar/blob/eca7fefebbbee1e2ae13bf5d6baa838be66b1db6/uqbar/sphinx/api.py#L49-L113
def on_builder_inited(app): """ Hooks into Sphinx's ``builder-inited`` event. Builds out the ReST API source. """ config = app.builder.config target_directory = ( pathlib.Path(app.builder.env.srcdir) / config.uqbar_api_directory_name ) initial_source_paths: List[str] = [] source_paths = config.uqbar_api_source_paths for source_path in source_paths: if isinstance(source_path, types.ModuleType): if hasattr(source_path, "__path__"): initial_source_paths.extend(getattr(source_path, "__path__")) else: initial_source_paths.extend(source_path.__file__) continue try: module = importlib.import_module(source_path) if hasattr(module, "__path__"): initial_source_paths.extend(getattr(module, "__path__")) else: initial_source_paths.append(module.__file__) except ImportError: initial_source_paths.append(source_path) root_documenter_class = config.uqbar_api_root_documenter_class if isinstance(root_documenter_class, str): module_name, _, class_name = root_documenter_class.rpartition(".") module = importlib.import_module(module_name) root_documenter_class = getattr(module, class_name) module_documenter_class = config.uqbar_api_module_documenter_class if isinstance(module_documenter_class, str): module_name, _, class_name = module_documenter_class.rpartition(".") module = importlib.import_module(module_name) module_documenter_class = getattr(module, class_name) # Don't modify the list in Sphinx's config. Sphinx won't pickle class # references, and strips them from the saved config. That leads to Sphinx # believing that the config has changed on every run. member_documenter_classes = list(config.uqbar_api_member_documenter_classes or []) for i, member_documenter_class in enumerate(member_documenter_classes): if isinstance(member_documenter_class, str): module_name, _, class_name = member_documenter_class.rpartition(".") module = importlib.import_module(module_name) member_documenter_classes[i] = getattr(module, class_name) api_builder = uqbar.apis.APIBuilder( initial_source_paths=initial_source_paths, target_directory=target_directory, document_empty_modules=config.uqbar_api_document_empty_modules, document_private_members=config.uqbar_api_document_private_members, document_private_modules=config.uqbar_api_document_private_modules, member_documenter_classes=member_documenter_classes or None, module_documenter_class=module_documenter_class, root_documenter_class=root_documenter_class, title=config.uqbar_api_title, logger_func=logger_func, ) api_builder()
[ "def", "on_builder_inited", "(", "app", ")", ":", "config", "=", "app", ".", "builder", ".", "config", "target_directory", "=", "(", "pathlib", ".", "Path", "(", "app", ".", "builder", ".", "env", ".", "srcdir", ")", "/", "config", ".", "uqbar_api_directory_name", ")", "initial_source_paths", ":", "List", "[", "str", "]", "=", "[", "]", "source_paths", "=", "config", ".", "uqbar_api_source_paths", "for", "source_path", "in", "source_paths", ":", "if", "isinstance", "(", "source_path", ",", "types", ".", "ModuleType", ")", ":", "if", "hasattr", "(", "source_path", ",", "\"__path__\"", ")", ":", "initial_source_paths", ".", "extend", "(", "getattr", "(", "source_path", ",", "\"__path__\"", ")", ")", "else", ":", "initial_source_paths", ".", "extend", "(", "source_path", ".", "__file__", ")", "continue", "try", ":", "module", "=", "importlib", ".", "import_module", "(", "source_path", ")", "if", "hasattr", "(", "module", ",", "\"__path__\"", ")", ":", "initial_source_paths", ".", "extend", "(", "getattr", "(", "module", ",", "\"__path__\"", ")", ")", "else", ":", "initial_source_paths", ".", "append", "(", "module", ".", "__file__", ")", "except", "ImportError", ":", "initial_source_paths", ".", "append", "(", "source_path", ")", "root_documenter_class", "=", "config", ".", "uqbar_api_root_documenter_class", "if", "isinstance", "(", "root_documenter_class", ",", "str", ")", ":", "module_name", ",", "_", ",", "class_name", "=", "root_documenter_class", ".", "rpartition", "(", "\".\"", ")", "module", "=", "importlib", ".", "import_module", "(", "module_name", ")", "root_documenter_class", "=", "getattr", "(", "module", ",", "class_name", ")", "module_documenter_class", "=", "config", ".", "uqbar_api_module_documenter_class", "if", "isinstance", "(", "module_documenter_class", ",", "str", ")", ":", "module_name", ",", "_", ",", "class_name", "=", "module_documenter_class", ".", "rpartition", "(", "\".\"", ")", "module", "=", "importlib", ".", "import_module", "(", "module_name", ")", "module_documenter_class", "=", "getattr", "(", "module", ",", "class_name", ")", "# Don't modify the list in Sphinx's config. Sphinx won't pickle class", "# references, and strips them from the saved config. That leads to Sphinx", "# believing that the config has changed on every run.", "member_documenter_classes", "=", "list", "(", "config", ".", "uqbar_api_member_documenter_classes", "or", "[", "]", ")", "for", "i", ",", "member_documenter_class", "in", "enumerate", "(", "member_documenter_classes", ")", ":", "if", "isinstance", "(", "member_documenter_class", ",", "str", ")", ":", "module_name", ",", "_", ",", "class_name", "=", "member_documenter_class", ".", "rpartition", "(", "\".\"", ")", "module", "=", "importlib", ".", "import_module", "(", "module_name", ")", "member_documenter_classes", "[", "i", "]", "=", "getattr", "(", "module", ",", "class_name", ")", "api_builder", "=", "uqbar", ".", "apis", ".", "APIBuilder", "(", "initial_source_paths", "=", "initial_source_paths", ",", "target_directory", "=", "target_directory", ",", "document_empty_modules", "=", "config", ".", "uqbar_api_document_empty_modules", ",", "document_private_members", "=", "config", ".", "uqbar_api_document_private_members", ",", "document_private_modules", "=", "config", ".", "uqbar_api_document_private_modules", ",", "member_documenter_classes", "=", "member_documenter_classes", "or", "None", ",", "module_documenter_class", "=", "module_documenter_class", ",", "root_documenter_class", "=", "root_documenter_class", ",", "title", "=", "config", ".", "uqbar_api_title", ",", "logger_func", "=", "logger_func", ",", ")", "api_builder", "(", ")" ]
Hooks into Sphinx's ``builder-inited`` event. Builds out the ReST API source.
[ "Hooks", "into", "Sphinx", "s", "builder", "-", "inited", "event", "." ]
python
train
44
SoCo/SoCo
soco/compat.py
https://github.com/SoCo/SoCo/blob/671937e07d7973b78c0cbee153d4f3ad68ec48c6/soco/compat.py#L39-L60
def with_metaclass(meta, *bases): """A Python 2/3 compatible way of declaring a metaclass. Taken from `Jinja 2 <https://github.com/mitsuhiko/jinja2/blob/master/jinja2 /_compat.py>`_ via `python-future <http://python-future.org>`_. License: BSD. Use it like this:: class MyClass(with_metaclass(MyMetaClass, BaseClass)): pass """ class _Metaclass(meta): """Inner class""" __call__ = type.__call__ __init__ = type.__init__ def __new__(cls, name, this_bases, attrs): if this_bases is None: return type.__new__(cls, name, (), attrs) return meta(name, bases, attrs) return _Metaclass(str('temporary_class'), None, {})
[ "def", "with_metaclass", "(", "meta", ",", "*", "bases", ")", ":", "class", "_Metaclass", "(", "meta", ")", ":", "\"\"\"Inner class\"\"\"", "__call__", "=", "type", ".", "__call__", "__init__", "=", "type", ".", "__init__", "def", "__new__", "(", "cls", ",", "name", ",", "this_bases", ",", "attrs", ")", ":", "if", "this_bases", "is", "None", ":", "return", "type", ".", "__new__", "(", "cls", ",", "name", ",", "(", ")", ",", "attrs", ")", "return", "meta", "(", "name", ",", "bases", ",", "attrs", ")", "return", "_Metaclass", "(", "str", "(", "'temporary_class'", ")", ",", "None", ",", "{", "}", ")" ]
A Python 2/3 compatible way of declaring a metaclass. Taken from `Jinja 2 <https://github.com/mitsuhiko/jinja2/blob/master/jinja2 /_compat.py>`_ via `python-future <http://python-future.org>`_. License: BSD. Use it like this:: class MyClass(with_metaclass(MyMetaClass, BaseClass)): pass
[ "A", "Python", "2", "/", "3", "compatible", "way", "of", "declaring", "a", "metaclass", "." ]
python
train
32.818182
rocky/python3-trepan
trepan/processor/cmdproc.py
https://github.com/rocky/python3-trepan/blob/14e91bc0acce090d67be145b1ac040cab92ac5f3/trepan/processor/cmdproc.py#L537-L551
def get_int_noerr(self, arg): """Eval arg and it is an integer return the value. Otherwise return None""" if self.curframe: g = self.curframe.f_globals l = self.curframe.f_locals else: g = globals() l = locals() pass try: val = int(eval(arg, g, l)) except (SyntaxError, NameError, ValueError, TypeError): return None return val
[ "def", "get_int_noerr", "(", "self", ",", "arg", ")", ":", "if", "self", ".", "curframe", ":", "g", "=", "self", ".", "curframe", ".", "f_globals", "l", "=", "self", ".", "curframe", ".", "f_locals", "else", ":", "g", "=", "globals", "(", ")", "l", "=", "locals", "(", ")", "pass", "try", ":", "val", "=", "int", "(", "eval", "(", "arg", ",", "g", ",", "l", ")", ")", "except", "(", "SyntaxError", ",", "NameError", ",", "ValueError", ",", "TypeError", ")", ":", "return", "None", "return", "val" ]
Eval arg and it is an integer return the value. Otherwise return None
[ "Eval", "arg", "and", "it", "is", "an", "integer", "return", "the", "value", ".", "Otherwise", "return", "None" ]
python
test
30.2
ibis-project/ibis
ibis/pandas/api.py
https://github.com/ibis-project/ibis/blob/1e39a5fd9ef088b45c155e8a5f541767ee8ef2e7/ibis/pandas/api.py#L53-L66
def _flatten_subclass_tree(cls): """Return the set of all child classes of `cls`. Parameters ---------- cls : Type Returns ------- frozenset[Type] """ subclasses = frozenset(cls.__subclasses__()) children = frozenset(toolz.concat(map(_flatten_subclass_tree, subclasses))) return frozenset({cls}) | subclasses | children
[ "def", "_flatten_subclass_tree", "(", "cls", ")", ":", "subclasses", "=", "frozenset", "(", "cls", ".", "__subclasses__", "(", ")", ")", "children", "=", "frozenset", "(", "toolz", ".", "concat", "(", "map", "(", "_flatten_subclass_tree", ",", "subclasses", ")", ")", ")", "return", "frozenset", "(", "{", "cls", "}", ")", "|", "subclasses", "|", "children" ]
Return the set of all child classes of `cls`. Parameters ---------- cls : Type Returns ------- frozenset[Type]
[ "Return", "the", "set", "of", "all", "child", "classes", "of", "cls", "." ]
python
train
25.142857
eqcorrscan/EQcorrscan
eqcorrscan/core/match_filter.py
https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/core/match_filter.py#L1281-L1311
def _uniq(self): """ Get list of unique detections. Works in place. .. rubric:: Example >>> family = Family( ... template=Template(name='a'), detections=[ ... Detection(template_name='a', detect_time=UTCDateTime(0), ... no_chans=8, detect_val=4.2, threshold=1.2, ... typeofdet='corr', threshold_type='MAD', ... threshold_input=8.0), ... Detection(template_name='a', detect_time=UTCDateTime(0) + 10, ... no_chans=8, detect_val=4.5, threshold=1.2, ... typeofdet='corr', threshold_type='MAD', ... threshold_input=8.0), ... Detection(template_name='a', detect_time=UTCDateTime(0) + 10, ... no_chans=8, detect_val=4.5, threshold=1.2, ... typeofdet='corr', threshold_type='MAD', ... threshold_input=8.0)]) >>> len(family) 3 >>> len(family._uniq()) 2 """ _detections = [] [_detections.append(d) for d in self.detections if not _detections.count(d)] self.detections = _detections return self
[ "def", "_uniq", "(", "self", ")", ":", "_detections", "=", "[", "]", "[", "_detections", ".", "append", "(", "d", ")", "for", "d", "in", "self", ".", "detections", "if", "not", "_detections", ".", "count", "(", "d", ")", "]", "self", ".", "detections", "=", "_detections", "return", "self" ]
Get list of unique detections. Works in place. .. rubric:: Example >>> family = Family( ... template=Template(name='a'), detections=[ ... Detection(template_name='a', detect_time=UTCDateTime(0), ... no_chans=8, detect_val=4.2, threshold=1.2, ... typeofdet='corr', threshold_type='MAD', ... threshold_input=8.0), ... Detection(template_name='a', detect_time=UTCDateTime(0) + 10, ... no_chans=8, detect_val=4.5, threshold=1.2, ... typeofdet='corr', threshold_type='MAD', ... threshold_input=8.0), ... Detection(template_name='a', detect_time=UTCDateTime(0) + 10, ... no_chans=8, detect_val=4.5, threshold=1.2, ... typeofdet='corr', threshold_type='MAD', ... threshold_input=8.0)]) >>> len(family) 3 >>> len(family._uniq()) 2
[ "Get", "list", "of", "unique", "detections", ".", "Works", "in", "place", "." ]
python
train
39.419355
bigchaindb/bigchaindb
bigchaindb/core.py
https://github.com/bigchaindb/bigchaindb/blob/835fdfcf598918f76139e3b88ee33dd157acaaa7/bigchaindb/core.py#L195-L229
def end_block(self, request_end_block): """Calculate block hash using transaction ids and previous block hash to be stored in the next block. Args: height (int): new height of the chain. """ self.abort_if_abci_chain_is_not_synced() chain_shift = 0 if self.chain is None else self.chain['height'] height = request_end_block.height + chain_shift self.new_height = height # store pre-commit state to recover in case there is a crash during # `end_block` or `commit` logger.debug(f'Updating pre-commit state: {self.new_height}') pre_commit_state = dict(height=self.new_height, transactions=self.block_txn_ids) self.bigchaindb.store_pre_commit_state(pre_commit_state) block_txn_hash = calculate_hash(self.block_txn_ids) block = self.bigchaindb.get_latest_block() if self.block_txn_ids: self.block_txn_hash = calculate_hash([block['app_hash'], block_txn_hash]) else: self.block_txn_hash = block['app_hash'] validator_update = Election.process_block(self.bigchaindb, self.new_height, self.block_transactions) return ResponseEndBlock(validator_updates=validator_update)
[ "def", "end_block", "(", "self", ",", "request_end_block", ")", ":", "self", ".", "abort_if_abci_chain_is_not_synced", "(", ")", "chain_shift", "=", "0", "if", "self", ".", "chain", "is", "None", "else", "self", ".", "chain", "[", "'height'", "]", "height", "=", "request_end_block", ".", "height", "+", "chain_shift", "self", ".", "new_height", "=", "height", "# store pre-commit state to recover in case there is a crash during", "# `end_block` or `commit`", "logger", ".", "debug", "(", "f'Updating pre-commit state: {self.new_height}'", ")", "pre_commit_state", "=", "dict", "(", "height", "=", "self", ".", "new_height", ",", "transactions", "=", "self", ".", "block_txn_ids", ")", "self", ".", "bigchaindb", ".", "store_pre_commit_state", "(", "pre_commit_state", ")", "block_txn_hash", "=", "calculate_hash", "(", "self", ".", "block_txn_ids", ")", "block", "=", "self", ".", "bigchaindb", ".", "get_latest_block", "(", ")", "if", "self", ".", "block_txn_ids", ":", "self", ".", "block_txn_hash", "=", "calculate_hash", "(", "[", "block", "[", "'app_hash'", "]", ",", "block_txn_hash", "]", ")", "else", ":", "self", ".", "block_txn_hash", "=", "block", "[", "'app_hash'", "]", "validator_update", "=", "Election", ".", "process_block", "(", "self", ".", "bigchaindb", ",", "self", ".", "new_height", ",", "self", ".", "block_transactions", ")", "return", "ResponseEndBlock", "(", "validator_updates", "=", "validator_update", ")" ]
Calculate block hash using transaction ids and previous block hash to be stored in the next block. Args: height (int): new height of the chain.
[ "Calculate", "block", "hash", "using", "transaction", "ids", "and", "previous", "block", "hash", "to", "be", "stored", "in", "the", "next", "block", "." ]
python
train
38.742857
google/grumpy
third_party/stdlib/collections.py
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/stdlib/collections.py#L219-L227
def fromkeys(cls, iterable, value=None): '''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S. If not specified, the value defaults to None. ''' self = cls() for key in iterable: self[key] = value return self
[ "def", "fromkeys", "(", "cls", ",", "iterable", ",", "value", "=", "None", ")", ":", "self", "=", "cls", "(", ")", "for", "key", "in", "iterable", ":", "self", "[", "key", "]", "=", "value", "return", "self" ]
OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S. If not specified, the value defaults to None.
[ "OD", ".", "fromkeys", "(", "S", "[", "v", "]", ")", "-", ">", "New", "ordered", "dictionary", "with", "keys", "from", "S", ".", "If", "not", "specified", "the", "value", "defaults", "to", "None", "." ]
python
valid
30.444444
ansible/tower-cli
tower_cli/resources/inventory.py
https://github.com/ansible/tower-cli/blob/a2b151fed93c47725018d3034848cb3a1814bed7/tower_cli/resources/inventory.py#L45-L62
def batch_update(self, pk=None, **kwargs): """Update all related inventory sources of the given inventory. Note global option --format is not available here, as the output would always be JSON-formatted. =====API DOCS===== Update all related inventory sources of the given inventory. :param pk: Primary key of the given inventory. :type pk: int :param `**kwargs`: Keyword arguments list of available fields used for searching resource objects. :returns: A JSON object of update status of the given inventory. :rtype: dict =====API DOCS===== """ res = self.get(pk=pk, **kwargs) url = self.endpoint + '%d/%s/' % (res['id'], 'update_inventory_sources') return client.post(url, data={}).json()
[ "def", "batch_update", "(", "self", ",", "pk", "=", "None", ",", "*", "*", "kwargs", ")", ":", "res", "=", "self", ".", "get", "(", "pk", "=", "pk", ",", "*", "*", "kwargs", ")", "url", "=", "self", ".", "endpoint", "+", "'%d/%s/'", "%", "(", "res", "[", "'id'", "]", ",", "'update_inventory_sources'", ")", "return", "client", ".", "post", "(", "url", ",", "data", "=", "{", "}", ")", ".", "json", "(", ")" ]
Update all related inventory sources of the given inventory. Note global option --format is not available here, as the output would always be JSON-formatted. =====API DOCS===== Update all related inventory sources of the given inventory. :param pk: Primary key of the given inventory. :type pk: int :param `**kwargs`: Keyword arguments list of available fields used for searching resource objects. :returns: A JSON object of update status of the given inventory. :rtype: dict =====API DOCS=====
[ "Update", "all", "related", "inventory", "sources", "of", "the", "given", "inventory", "." ]
python
valid
43.722222
diffeo/rejester
rejester/_registry.py
https://github.com/diffeo/rejester/blob/5438a4a18be2801d7826c46e2079ba9639d2ecb4/rejester/_registry.py#L230-L241
def force_clear_lock(self): '''Kick out whoever currently owns the namespace global lock. This is intended as purely a last-resort tool. If another process has managed to get the global lock for a very long time, or if it requested the lock with a long expiration and then crashed, this can make the system functional again. If the original lock holder is still alive, its session calls may fail with exceptions. ''' return redis.Redis(connection_pool=self.pool).delete(self._lock_name)
[ "def", "force_clear_lock", "(", "self", ")", ":", "return", "redis", ".", "Redis", "(", "connection_pool", "=", "self", ".", "pool", ")", ".", "delete", "(", "self", ".", "_lock_name", ")" ]
Kick out whoever currently owns the namespace global lock. This is intended as purely a last-resort tool. If another process has managed to get the global lock for a very long time, or if it requested the lock with a long expiration and then crashed, this can make the system functional again. If the original lock holder is still alive, its session calls may fail with exceptions.
[ "Kick", "out", "whoever", "currently", "owns", "the", "namespace", "global", "lock", "." ]
python
train
45.916667
persephone-tools/persephone
persephone/corpus_reader.py
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/corpus_reader.py#L158-L167
def untranscribed_batch_gen(self): """ A batch generator for all the untranscribed data. """ feat_fns = self.corpus.get_untranscribed_fns() fn_batches = self.make_batches(feat_fns) for fn_batch in fn_batches: batch_inputs, batch_inputs_lens = utils.load_batch_x(fn_batch, flatten=False) yield batch_inputs, batch_inputs_lens, fn_batch
[ "def", "untranscribed_batch_gen", "(", "self", ")", ":", "feat_fns", "=", "self", ".", "corpus", ".", "get_untranscribed_fns", "(", ")", "fn_batches", "=", "self", ".", "make_batches", "(", "feat_fns", ")", "for", "fn_batch", "in", "fn_batches", ":", "batch_inputs", ",", "batch_inputs_lens", "=", "utils", ".", "load_batch_x", "(", "fn_batch", ",", "flatten", "=", "False", ")", "yield", "batch_inputs", ",", "batch_inputs_lens", ",", "fn_batch" ]
A batch generator for all the untranscribed data.
[ "A", "batch", "generator", "for", "all", "the", "untranscribed", "data", "." ]
python
train
44.4
Cognexa/cxflow
cxflow/cli/common.py
https://github.com/Cognexa/cxflow/blob/dd609e6b0bd854424a8f86781dd77801a13038f9/cxflow/cli/common.py#L149-L212
def create_hooks(config: dict, model: AbstractModel, dataset: AbstractDataset, output_dir: str) -> Iterable[AbstractHook]: """ Create hooks specified in ``config['hooks']`` list. Hook config entries may be one of the following types: .. code-block:: yaml :caption: A hook with default args specified only by its name as a string; e.g. hooks: - LogVariables - cxflow_tensorflow.WriteTensorBoard .. code-block:: yaml :caption: A hook with custom args as a dict name -> args; e.g. hooks: - StopAfter: n_epochs: 10 :param config: config dict :param model: model object to be passed to the hooks :param dataset: dataset object to be passed to hooks :param output_dir: training output dir available to the hooks :return: list of hook objects """ logging.info('Creating hooks') hooks = [] if 'hooks' in config: for hook_config in config['hooks']: if isinstance(hook_config, str): hook_config = {hook_config: {}} assert len(hook_config) == 1, 'Hook configuration must have exactly one key (fully qualified name).' hook_path, hook_params = next(iter(hook_config.items())) if hook_params is None: logging.warning('\t\t Empty config of `%s` hook', hook_path) hook_params = {} # workaround for ruamel.yaml expansion bug; see #222 hook_params = dict(hook_params.items()) hook_module, hook_class = parse_fully_qualified_name(hook_path) # find the hook module if not specified if hook_module is None: hook_module = get_class_module(CXF_HOOKS_MODULE, hook_class) logging.debug('\tFound hook module `%s` for class `%s`', hook_module, hook_class) if hook_module is None: raise ValueError('Can`t find hook module for hook class `{}`. ' 'Make sure it is defined under `{}` sub-modules.' .format(hook_class, CXF_HOOKS_MODULE)) # create hook kwargs hook_kwargs = {'dataset': dataset, 'model': model, 'output_dir': output_dir, **hook_params} # create new hook try: hook = create_object(hook_module, hook_class, kwargs=hook_kwargs) hooks.append(hook) logging.info('\t%s created', type(hooks[-1]).__name__) except (ValueError, KeyError, TypeError, NameError, AttributeError, AssertionError, ImportError) as ex: logging.error('\tFailed to create a hook from config `%s`', hook_config) raise ex return hooks
[ "def", "create_hooks", "(", "config", ":", "dict", ",", "model", ":", "AbstractModel", ",", "dataset", ":", "AbstractDataset", ",", "output_dir", ":", "str", ")", "->", "Iterable", "[", "AbstractHook", "]", ":", "logging", ".", "info", "(", "'Creating hooks'", ")", "hooks", "=", "[", "]", "if", "'hooks'", "in", "config", ":", "for", "hook_config", "in", "config", "[", "'hooks'", "]", ":", "if", "isinstance", "(", "hook_config", ",", "str", ")", ":", "hook_config", "=", "{", "hook_config", ":", "{", "}", "}", "assert", "len", "(", "hook_config", ")", "==", "1", ",", "'Hook configuration must have exactly one key (fully qualified name).'", "hook_path", ",", "hook_params", "=", "next", "(", "iter", "(", "hook_config", ".", "items", "(", ")", ")", ")", "if", "hook_params", "is", "None", ":", "logging", ".", "warning", "(", "'\\t\\t Empty config of `%s` hook'", ",", "hook_path", ")", "hook_params", "=", "{", "}", "# workaround for ruamel.yaml expansion bug; see #222", "hook_params", "=", "dict", "(", "hook_params", ".", "items", "(", ")", ")", "hook_module", ",", "hook_class", "=", "parse_fully_qualified_name", "(", "hook_path", ")", "# find the hook module if not specified", "if", "hook_module", "is", "None", ":", "hook_module", "=", "get_class_module", "(", "CXF_HOOKS_MODULE", ",", "hook_class", ")", "logging", ".", "debug", "(", "'\\tFound hook module `%s` for class `%s`'", ",", "hook_module", ",", "hook_class", ")", "if", "hook_module", "is", "None", ":", "raise", "ValueError", "(", "'Can`t find hook module for hook class `{}`. '", "'Make sure it is defined under `{}` sub-modules.'", ".", "format", "(", "hook_class", ",", "CXF_HOOKS_MODULE", ")", ")", "# create hook kwargs", "hook_kwargs", "=", "{", "'dataset'", ":", "dataset", ",", "'model'", ":", "model", ",", "'output_dir'", ":", "output_dir", ",", "*", "*", "hook_params", "}", "# create new hook", "try", ":", "hook", "=", "create_object", "(", "hook_module", ",", "hook_class", ",", "kwargs", "=", "hook_kwargs", ")", "hooks", ".", "append", "(", "hook", ")", "logging", ".", "info", "(", "'\\t%s created'", ",", "type", "(", "hooks", "[", "-", "1", "]", ")", ".", "__name__", ")", "except", "(", "ValueError", ",", "KeyError", ",", "TypeError", ",", "NameError", ",", "AttributeError", ",", "AssertionError", ",", "ImportError", ")", "as", "ex", ":", "logging", ".", "error", "(", "'\\tFailed to create a hook from config `%s`'", ",", "hook_config", ")", "raise", "ex", "return", "hooks" ]
Create hooks specified in ``config['hooks']`` list. Hook config entries may be one of the following types: .. code-block:: yaml :caption: A hook with default args specified only by its name as a string; e.g. hooks: - LogVariables - cxflow_tensorflow.WriteTensorBoard .. code-block:: yaml :caption: A hook with custom args as a dict name -> args; e.g. hooks: - StopAfter: n_epochs: 10 :param config: config dict :param model: model object to be passed to the hooks :param dataset: dataset object to be passed to hooks :param output_dir: training output dir available to the hooks :return: list of hook objects
[ "Create", "hooks", "specified", "in", "config", "[", "hooks", "]", "list", "." ]
python
train
42.59375
graphql-python/graphql-core-next
graphql/utilities/build_client_schema.py
https://github.com/graphql-python/graphql-core-next/blob/073dce3f002f897d40f9348ffd8f107815160540/graphql/utilities/build_client_schema.py#L40-L351
def build_client_schema( introspection: Dict, assume_valid: bool = False ) -> GraphQLSchema: """Build a GraphQLSchema for use by client tools. Given the result of a client running the introspection query, creates and returns a GraphQLSchema instance which can be then used with all GraphQL-core-next tools, but cannot be used to execute a query, as introspection does not represent the "resolver", "parse" or "serialize" functions or any other server-internal mechanisms. This function expects a complete introspection result. Don't forget to check the "errors" field of a server response before calling this function. """ # Get the schema from the introspection result. schema_introspection = introspection["__schema"] # Given a type reference in introspection, return the GraphQLType instance, # preferring cached instances before building new instances. def get_type(type_ref: Dict) -> GraphQLType: kind = type_ref.get("kind") if kind == TypeKind.LIST.name: item_ref = type_ref.get("ofType") if not item_ref: raise TypeError("Decorated type deeper than introspection query.") return GraphQLList(get_type(item_ref)) elif kind == TypeKind.NON_NULL.name: nullable_ref = type_ref.get("ofType") if not nullable_ref: raise TypeError("Decorated type deeper than introspection query.") nullable_type = get_type(nullable_ref) return GraphQLNonNull(assert_nullable_type(nullable_type)) name = type_ref.get("name") if not name: raise TypeError(f"Unknown type reference: {inspect(type_ref)}") return get_named_type(name) def get_named_type(type_name: str) -> GraphQLNamedType: type_ = type_map.get(type_name) if not type_: raise TypeError( f"Invalid or incomplete schema, unknown type: {type_name}." " Ensure that a full introspection query is used in order" " to build a client schema." ) return type_ def get_input_type(type_ref: Dict) -> GraphQLInputType: input_type = get_type(type_ref) if not is_input_type(input_type): raise TypeError( "Introspection must provide input type for arguments," f" but received: {inspect(input_type)}." ) return cast(GraphQLInputType, input_type) def get_output_type(type_ref: Dict) -> GraphQLOutputType: output_type = get_type(type_ref) if not is_output_type(output_type): raise TypeError( "Introspection must provide output type for fields," f" but received: {inspect(output_type)}." ) return cast(GraphQLOutputType, output_type) def get_object_type(type_ref: Dict) -> GraphQLObjectType: object_type = get_type(type_ref) return assert_object_type(object_type) def get_interface_type(type_ref: Dict) -> GraphQLInterfaceType: interface_type = get_type(type_ref) return assert_interface_type(interface_type) # Given a type's introspection result, construct the correct GraphQLType instance. def build_type(type_: Dict) -> GraphQLNamedType: if type_ and "name" in type_ and "kind" in type_: builder = type_builders.get(cast(str, type_["kind"])) if builder: return cast(GraphQLNamedType, builder(type_)) raise TypeError( "Invalid or incomplete introspection result." " Ensure that a full introspection query is used in order" f" to build a client schema: {inspect(type_)}" ) def build_scalar_def(scalar_introspection: Dict) -> GraphQLScalarType: return GraphQLScalarType( name=scalar_introspection["name"], description=scalar_introspection.get("description"), serialize=lambda value: value, ) def build_object_def(object_introspection: Dict) -> GraphQLObjectType: interfaces = object_introspection.get("interfaces") if interfaces is None: raise TypeError( "Introspection result missing interfaces:" f" {inspect(object_introspection)}" ) return GraphQLObjectType( name=object_introspection["name"], description=object_introspection.get("description"), interfaces=lambda: [ get_interface_type(interface) for interface in cast(List[Dict], interfaces) ], fields=lambda: build_field_def_map(object_introspection), ) def build_interface_def(interface_introspection: Dict) -> GraphQLInterfaceType: return GraphQLInterfaceType( name=interface_introspection["name"], description=interface_introspection.get("description"), fields=lambda: build_field_def_map(interface_introspection), ) def build_union_def(union_introspection: Dict) -> GraphQLUnionType: possible_types = union_introspection.get("possibleTypes") if possible_types is None: raise TypeError( "Introspection result missing possibleTypes:" f" {inspect(union_introspection)}" ) return GraphQLUnionType( name=union_introspection["name"], description=union_introspection.get("description"), types=lambda: [ get_object_type(type_) for type_ in cast(List[Dict], possible_types) ], ) def build_enum_def(enum_introspection: Dict) -> GraphQLEnumType: if enum_introspection.get("enumValues") is None: raise TypeError( "Introspection result missing enumValues:" f" {inspect(enum_introspection)}" ) return GraphQLEnumType( name=enum_introspection["name"], description=enum_introspection.get("description"), values={ value_introspect["name"]: GraphQLEnumValue( description=value_introspect.get("description"), deprecation_reason=value_introspect.get("deprecationReason"), ) for value_introspect in enum_introspection["enumValues"] }, ) def build_input_object_def( input_object_introspection: Dict ) -> GraphQLInputObjectType: if input_object_introspection.get("inputFields") is None: raise TypeError( "Introspection result missing inputFields:" f" {inspect(input_object_introspection)}" ) return GraphQLInputObjectType( name=input_object_introspection["name"], description=input_object_introspection.get("description"), fields=lambda: build_input_value_def_map( input_object_introspection["inputFields"] ), ) type_builders: Dict[str, Callable[[Dict], GraphQLType]] = { TypeKind.SCALAR.name: build_scalar_def, TypeKind.OBJECT.name: build_object_def, TypeKind.INTERFACE.name: build_interface_def, TypeKind.UNION.name: build_union_def, TypeKind.ENUM.name: build_enum_def, TypeKind.INPUT_OBJECT.name: build_input_object_def, } def build_field(field_introspection: Dict) -> GraphQLField: if field_introspection.get("args") is None: raise TypeError( "Introspection result missing field args:" f" {inspect(field_introspection)}" ) return GraphQLField( get_output_type(field_introspection["type"]), args=build_arg_value_def_map(field_introspection["args"]), description=field_introspection.get("description"), deprecation_reason=field_introspection.get("deprecationReason"), ) def build_field_def_map(type_introspection: Dict) -> Dict[str, GraphQLField]: if type_introspection.get("fields") is None: raise TypeError( "Introspection result missing fields:" f" {type_introspection}" ) return { field_introspection["name"]: build_field(field_introspection) for field_introspection in type_introspection["fields"] } def build_arg_value(arg_introspection: Dict) -> GraphQLArgument: type_ = get_input_type(arg_introspection["type"]) default_value = arg_introspection.get("defaultValue") default_value = ( INVALID if default_value is None else value_from_ast(parse_value(default_value), type_) ) return GraphQLArgument( type_, default_value=default_value, description=arg_introspection.get("description"), ) def build_arg_value_def_map(arg_introspections: Dict) -> Dict[str, GraphQLArgument]: return { input_value_introspection["name"]: build_arg_value( input_value_introspection ) for input_value_introspection in arg_introspections } def build_input_value(input_value_introspection: Dict) -> GraphQLInputField: type_ = get_input_type(input_value_introspection["type"]) default_value = input_value_introspection.get("defaultValue") default_value = ( INVALID if default_value is None else value_from_ast(parse_value(default_value), type_) ) return GraphQLInputField( type_, default_value=default_value, description=input_value_introspection.get("description"), ) def build_input_value_def_map( input_value_introspections: Dict ) -> Dict[str, GraphQLInputField]: return { input_value_introspection["name"]: build_input_value( input_value_introspection ) for input_value_introspection in input_value_introspections } def build_directive(directive_introspection: Dict) -> GraphQLDirective: if directive_introspection.get("args") is None: raise TypeError( "Introspection result missing directive args:" f" {inspect(directive_introspection)}" ) if directive_introspection.get("locations") is None: raise TypeError( "Introspection result missing directive locations:" f" {inspect(directive_introspection)}" ) return GraphQLDirective( name=directive_introspection["name"], description=directive_introspection.get("description"), locations=list( cast( Sequence[DirectiveLocation], directive_introspection.get("locations"), ) ), args=build_arg_value_def_map(directive_introspection["args"]), ) # Iterate through all types, getting the type definition for each. type_map: Dict[str, GraphQLNamedType] = { type_introspection["name"]: build_type(type_introspection) for type_introspection in schema_introspection["types"] } for std_type_name, std_type in chain( specified_scalar_types.items(), introspection_types.items() ): type_map[std_type_name] = std_type # Get the root Query, Mutation, and Subscription types. query_type_ref = schema_introspection.get("queryType") query_type = None if query_type_ref is None else get_object_type(query_type_ref) mutation_type_ref = schema_introspection.get("mutationType") mutation_type = ( None if mutation_type_ref is None else get_object_type(mutation_type_ref) ) subscription_type_ref = schema_introspection.get("subscriptionType") subscription_type = ( None if subscription_type_ref is None else get_object_type(subscription_type_ref) ) # Get the directives supported by Introspection, assuming empty-set if directives # were not queried for. directive_introspections = schema_introspection.get("directives") directives = ( [ build_directive(directive_introspection) for directive_introspection in directive_introspections ] if directive_introspections else [] ) return GraphQLSchema( query=query_type, mutation=mutation_type, subscription=subscription_type, types=list(type_map.values()), directives=directives, assume_valid=assume_valid, )
[ "def", "build_client_schema", "(", "introspection", ":", "Dict", ",", "assume_valid", ":", "bool", "=", "False", ")", "->", "GraphQLSchema", ":", "# Get the schema from the introspection result.", "schema_introspection", "=", "introspection", "[", "\"__schema\"", "]", "# Given a type reference in introspection, return the GraphQLType instance,", "# preferring cached instances before building new instances.", "def", "get_type", "(", "type_ref", ":", "Dict", ")", "->", "GraphQLType", ":", "kind", "=", "type_ref", ".", "get", "(", "\"kind\"", ")", "if", "kind", "==", "TypeKind", ".", "LIST", ".", "name", ":", "item_ref", "=", "type_ref", ".", "get", "(", "\"ofType\"", ")", "if", "not", "item_ref", ":", "raise", "TypeError", "(", "\"Decorated type deeper than introspection query.\"", ")", "return", "GraphQLList", "(", "get_type", "(", "item_ref", ")", ")", "elif", "kind", "==", "TypeKind", ".", "NON_NULL", ".", "name", ":", "nullable_ref", "=", "type_ref", ".", "get", "(", "\"ofType\"", ")", "if", "not", "nullable_ref", ":", "raise", "TypeError", "(", "\"Decorated type deeper than introspection query.\"", ")", "nullable_type", "=", "get_type", "(", "nullable_ref", ")", "return", "GraphQLNonNull", "(", "assert_nullable_type", "(", "nullable_type", ")", ")", "name", "=", "type_ref", ".", "get", "(", "\"name\"", ")", "if", "not", "name", ":", "raise", "TypeError", "(", "f\"Unknown type reference: {inspect(type_ref)}\"", ")", "return", "get_named_type", "(", "name", ")", "def", "get_named_type", "(", "type_name", ":", "str", ")", "->", "GraphQLNamedType", ":", "type_", "=", "type_map", ".", "get", "(", "type_name", ")", "if", "not", "type_", ":", "raise", "TypeError", "(", "f\"Invalid or incomplete schema, unknown type: {type_name}.\"", "\" Ensure that a full introspection query is used in order\"", "\" to build a client schema.\"", ")", "return", "type_", "def", "get_input_type", "(", "type_ref", ":", "Dict", ")", "->", "GraphQLInputType", ":", "input_type", "=", "get_type", "(", "type_ref", ")", "if", "not", "is_input_type", "(", "input_type", ")", ":", "raise", "TypeError", "(", "\"Introspection must provide input type for arguments,\"", "f\" but received: {inspect(input_type)}.\"", ")", "return", "cast", "(", "GraphQLInputType", ",", "input_type", ")", "def", "get_output_type", "(", "type_ref", ":", "Dict", ")", "->", "GraphQLOutputType", ":", "output_type", "=", "get_type", "(", "type_ref", ")", "if", "not", "is_output_type", "(", "output_type", ")", ":", "raise", "TypeError", "(", "\"Introspection must provide output type for fields,\"", "f\" but received: {inspect(output_type)}.\"", ")", "return", "cast", "(", "GraphQLOutputType", ",", "output_type", ")", "def", "get_object_type", "(", "type_ref", ":", "Dict", ")", "->", "GraphQLObjectType", ":", "object_type", "=", "get_type", "(", "type_ref", ")", "return", "assert_object_type", "(", "object_type", ")", "def", "get_interface_type", "(", "type_ref", ":", "Dict", ")", "->", "GraphQLInterfaceType", ":", "interface_type", "=", "get_type", "(", "type_ref", ")", "return", "assert_interface_type", "(", "interface_type", ")", "# Given a type's introspection result, construct the correct GraphQLType instance.", "def", "build_type", "(", "type_", ":", "Dict", ")", "->", "GraphQLNamedType", ":", "if", "type_", "and", "\"name\"", "in", "type_", "and", "\"kind\"", "in", "type_", ":", "builder", "=", "type_builders", ".", "get", "(", "cast", "(", "str", ",", "type_", "[", "\"kind\"", "]", ")", ")", "if", "builder", ":", "return", "cast", "(", "GraphQLNamedType", ",", "builder", "(", "type_", ")", ")", "raise", "TypeError", "(", "\"Invalid or incomplete introspection result.\"", "\" Ensure that a full introspection query is used in order\"", "f\" to build a client schema: {inspect(type_)}\"", ")", "def", "build_scalar_def", "(", "scalar_introspection", ":", "Dict", ")", "->", "GraphQLScalarType", ":", "return", "GraphQLScalarType", "(", "name", "=", "scalar_introspection", "[", "\"name\"", "]", ",", "description", "=", "scalar_introspection", ".", "get", "(", "\"description\"", ")", ",", "serialize", "=", "lambda", "value", ":", "value", ",", ")", "def", "build_object_def", "(", "object_introspection", ":", "Dict", ")", "->", "GraphQLObjectType", ":", "interfaces", "=", "object_introspection", ".", "get", "(", "\"interfaces\"", ")", "if", "interfaces", "is", "None", ":", "raise", "TypeError", "(", "\"Introspection result missing interfaces:\"", "f\" {inspect(object_introspection)}\"", ")", "return", "GraphQLObjectType", "(", "name", "=", "object_introspection", "[", "\"name\"", "]", ",", "description", "=", "object_introspection", ".", "get", "(", "\"description\"", ")", ",", "interfaces", "=", "lambda", ":", "[", "get_interface_type", "(", "interface", ")", "for", "interface", "in", "cast", "(", "List", "[", "Dict", "]", ",", "interfaces", ")", "]", ",", "fields", "=", "lambda", ":", "build_field_def_map", "(", "object_introspection", ")", ",", ")", "def", "build_interface_def", "(", "interface_introspection", ":", "Dict", ")", "->", "GraphQLInterfaceType", ":", "return", "GraphQLInterfaceType", "(", "name", "=", "interface_introspection", "[", "\"name\"", "]", ",", "description", "=", "interface_introspection", ".", "get", "(", "\"description\"", ")", ",", "fields", "=", "lambda", ":", "build_field_def_map", "(", "interface_introspection", ")", ",", ")", "def", "build_union_def", "(", "union_introspection", ":", "Dict", ")", "->", "GraphQLUnionType", ":", "possible_types", "=", "union_introspection", ".", "get", "(", "\"possibleTypes\"", ")", "if", "possible_types", "is", "None", ":", "raise", "TypeError", "(", "\"Introspection result missing possibleTypes:\"", "f\" {inspect(union_introspection)}\"", ")", "return", "GraphQLUnionType", "(", "name", "=", "union_introspection", "[", "\"name\"", "]", ",", "description", "=", "union_introspection", ".", "get", "(", "\"description\"", ")", ",", "types", "=", "lambda", ":", "[", "get_object_type", "(", "type_", ")", "for", "type_", "in", "cast", "(", "List", "[", "Dict", "]", ",", "possible_types", ")", "]", ",", ")", "def", "build_enum_def", "(", "enum_introspection", ":", "Dict", ")", "->", "GraphQLEnumType", ":", "if", "enum_introspection", ".", "get", "(", "\"enumValues\"", ")", "is", "None", ":", "raise", "TypeError", "(", "\"Introspection result missing enumValues:\"", "f\" {inspect(enum_introspection)}\"", ")", "return", "GraphQLEnumType", "(", "name", "=", "enum_introspection", "[", "\"name\"", "]", ",", "description", "=", "enum_introspection", ".", "get", "(", "\"description\"", ")", ",", "values", "=", "{", "value_introspect", "[", "\"name\"", "]", ":", "GraphQLEnumValue", "(", "description", "=", "value_introspect", ".", "get", "(", "\"description\"", ")", ",", "deprecation_reason", "=", "value_introspect", ".", "get", "(", "\"deprecationReason\"", ")", ",", ")", "for", "value_introspect", "in", "enum_introspection", "[", "\"enumValues\"", "]", "}", ",", ")", "def", "build_input_object_def", "(", "input_object_introspection", ":", "Dict", ")", "->", "GraphQLInputObjectType", ":", "if", "input_object_introspection", ".", "get", "(", "\"inputFields\"", ")", "is", "None", ":", "raise", "TypeError", "(", "\"Introspection result missing inputFields:\"", "f\" {inspect(input_object_introspection)}\"", ")", "return", "GraphQLInputObjectType", "(", "name", "=", "input_object_introspection", "[", "\"name\"", "]", ",", "description", "=", "input_object_introspection", ".", "get", "(", "\"description\"", ")", ",", "fields", "=", "lambda", ":", "build_input_value_def_map", "(", "input_object_introspection", "[", "\"inputFields\"", "]", ")", ",", ")", "type_builders", ":", "Dict", "[", "str", ",", "Callable", "[", "[", "Dict", "]", ",", "GraphQLType", "]", "]", "=", "{", "TypeKind", ".", "SCALAR", ".", "name", ":", "build_scalar_def", ",", "TypeKind", ".", "OBJECT", ".", "name", ":", "build_object_def", ",", "TypeKind", ".", "INTERFACE", ".", "name", ":", "build_interface_def", ",", "TypeKind", ".", "UNION", ".", "name", ":", "build_union_def", ",", "TypeKind", ".", "ENUM", ".", "name", ":", "build_enum_def", ",", "TypeKind", ".", "INPUT_OBJECT", ".", "name", ":", "build_input_object_def", ",", "}", "def", "build_field", "(", "field_introspection", ":", "Dict", ")", "->", "GraphQLField", ":", "if", "field_introspection", ".", "get", "(", "\"args\"", ")", "is", "None", ":", "raise", "TypeError", "(", "\"Introspection result missing field args:\"", "f\" {inspect(field_introspection)}\"", ")", "return", "GraphQLField", "(", "get_output_type", "(", "field_introspection", "[", "\"type\"", "]", ")", ",", "args", "=", "build_arg_value_def_map", "(", "field_introspection", "[", "\"args\"", "]", ")", ",", "description", "=", "field_introspection", ".", "get", "(", "\"description\"", ")", ",", "deprecation_reason", "=", "field_introspection", ".", "get", "(", "\"deprecationReason\"", ")", ",", ")", "def", "build_field_def_map", "(", "type_introspection", ":", "Dict", ")", "->", "Dict", "[", "str", ",", "GraphQLField", "]", ":", "if", "type_introspection", ".", "get", "(", "\"fields\"", ")", "is", "None", ":", "raise", "TypeError", "(", "\"Introspection result missing fields:\"", "f\" {type_introspection}\"", ")", "return", "{", "field_introspection", "[", "\"name\"", "]", ":", "build_field", "(", "field_introspection", ")", "for", "field_introspection", "in", "type_introspection", "[", "\"fields\"", "]", "}", "def", "build_arg_value", "(", "arg_introspection", ":", "Dict", ")", "->", "GraphQLArgument", ":", "type_", "=", "get_input_type", "(", "arg_introspection", "[", "\"type\"", "]", ")", "default_value", "=", "arg_introspection", ".", "get", "(", "\"defaultValue\"", ")", "default_value", "=", "(", "INVALID", "if", "default_value", "is", "None", "else", "value_from_ast", "(", "parse_value", "(", "default_value", ")", ",", "type_", ")", ")", "return", "GraphQLArgument", "(", "type_", ",", "default_value", "=", "default_value", ",", "description", "=", "arg_introspection", ".", "get", "(", "\"description\"", ")", ",", ")", "def", "build_arg_value_def_map", "(", "arg_introspections", ":", "Dict", ")", "->", "Dict", "[", "str", ",", "GraphQLArgument", "]", ":", "return", "{", "input_value_introspection", "[", "\"name\"", "]", ":", "build_arg_value", "(", "input_value_introspection", ")", "for", "input_value_introspection", "in", "arg_introspections", "}", "def", "build_input_value", "(", "input_value_introspection", ":", "Dict", ")", "->", "GraphQLInputField", ":", "type_", "=", "get_input_type", "(", "input_value_introspection", "[", "\"type\"", "]", ")", "default_value", "=", "input_value_introspection", ".", "get", "(", "\"defaultValue\"", ")", "default_value", "=", "(", "INVALID", "if", "default_value", "is", "None", "else", "value_from_ast", "(", "parse_value", "(", "default_value", ")", ",", "type_", ")", ")", "return", "GraphQLInputField", "(", "type_", ",", "default_value", "=", "default_value", ",", "description", "=", "input_value_introspection", ".", "get", "(", "\"description\"", ")", ",", ")", "def", "build_input_value_def_map", "(", "input_value_introspections", ":", "Dict", ")", "->", "Dict", "[", "str", ",", "GraphQLInputField", "]", ":", "return", "{", "input_value_introspection", "[", "\"name\"", "]", ":", "build_input_value", "(", "input_value_introspection", ")", "for", "input_value_introspection", "in", "input_value_introspections", "}", "def", "build_directive", "(", "directive_introspection", ":", "Dict", ")", "->", "GraphQLDirective", ":", "if", "directive_introspection", ".", "get", "(", "\"args\"", ")", "is", "None", ":", "raise", "TypeError", "(", "\"Introspection result missing directive args:\"", "f\" {inspect(directive_introspection)}\"", ")", "if", "directive_introspection", ".", "get", "(", "\"locations\"", ")", "is", "None", ":", "raise", "TypeError", "(", "\"Introspection result missing directive locations:\"", "f\" {inspect(directive_introspection)}\"", ")", "return", "GraphQLDirective", "(", "name", "=", "directive_introspection", "[", "\"name\"", "]", ",", "description", "=", "directive_introspection", ".", "get", "(", "\"description\"", ")", ",", "locations", "=", "list", "(", "cast", "(", "Sequence", "[", "DirectiveLocation", "]", ",", "directive_introspection", ".", "get", "(", "\"locations\"", ")", ",", ")", ")", ",", "args", "=", "build_arg_value_def_map", "(", "directive_introspection", "[", "\"args\"", "]", ")", ",", ")", "# Iterate through all types, getting the type definition for each.", "type_map", ":", "Dict", "[", "str", ",", "GraphQLNamedType", "]", "=", "{", "type_introspection", "[", "\"name\"", "]", ":", "build_type", "(", "type_introspection", ")", "for", "type_introspection", "in", "schema_introspection", "[", "\"types\"", "]", "}", "for", "std_type_name", ",", "std_type", "in", "chain", "(", "specified_scalar_types", ".", "items", "(", ")", ",", "introspection_types", ".", "items", "(", ")", ")", ":", "type_map", "[", "std_type_name", "]", "=", "std_type", "# Get the root Query, Mutation, and Subscription types.", "query_type_ref", "=", "schema_introspection", ".", "get", "(", "\"queryType\"", ")", "query_type", "=", "None", "if", "query_type_ref", "is", "None", "else", "get_object_type", "(", "query_type_ref", ")", "mutation_type_ref", "=", "schema_introspection", ".", "get", "(", "\"mutationType\"", ")", "mutation_type", "=", "(", "None", "if", "mutation_type_ref", "is", "None", "else", "get_object_type", "(", "mutation_type_ref", ")", ")", "subscription_type_ref", "=", "schema_introspection", ".", "get", "(", "\"subscriptionType\"", ")", "subscription_type", "=", "(", "None", "if", "subscription_type_ref", "is", "None", "else", "get_object_type", "(", "subscription_type_ref", ")", ")", "# Get the directives supported by Introspection, assuming empty-set if directives", "# were not queried for.", "directive_introspections", "=", "schema_introspection", ".", "get", "(", "\"directives\"", ")", "directives", "=", "(", "[", "build_directive", "(", "directive_introspection", ")", "for", "directive_introspection", "in", "directive_introspections", "]", "if", "directive_introspections", "else", "[", "]", ")", "return", "GraphQLSchema", "(", "query", "=", "query_type", ",", "mutation", "=", "mutation_type", ",", "subscription", "=", "subscription_type", ",", "types", "=", "list", "(", "type_map", ".", "values", "(", ")", ")", ",", "directives", "=", "directives", ",", "assume_valid", "=", "assume_valid", ",", ")" ]
Build a GraphQLSchema for use by client tools. Given the result of a client running the introspection query, creates and returns a GraphQLSchema instance which can be then used with all GraphQL-core-next tools, but cannot be used to execute a query, as introspection does not represent the "resolver", "parse" or "serialize" functions or any other server-internal mechanisms. This function expects a complete introspection result. Don't forget to check the "errors" field of a server response before calling this function.
[ "Build", "a", "GraphQLSchema", "for", "use", "by", "client", "tools", "." ]
python
train
39.871795
gbowerman/azurerm
azurerm/acs.py
https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/azurerm/acs.py#L84-L101
def get_container_service(access_token, subscription_id, resource_group, service_name): '''Get details about an Azure Container Server Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. service_name (str): Name of container service. Returns: HTTP response. JSON model. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourcegroups/', resource_group, '/providers/Microsoft.ContainerService/ContainerServices/', service_name, '?api-version=', ACS_API]) return do_get(endpoint, access_token)
[ "def", "get_container_service", "(", "access_token", ",", "subscription_id", ",", "resource_group", ",", "service_name", ")", ":", "endpoint", "=", "''", ".", "join", "(", "[", "get_rm_endpoint", "(", ")", ",", "'/subscriptions/'", ",", "subscription_id", ",", "'/resourcegroups/'", ",", "resource_group", ",", "'/providers/Microsoft.ContainerService/ContainerServices/'", ",", "service_name", ",", "'?api-version='", ",", "ACS_API", "]", ")", "return", "do_get", "(", "endpoint", ",", "access_token", ")" ]
Get details about an Azure Container Server Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. service_name (str): Name of container service. Returns: HTTP response. JSON model.
[ "Get", "details", "about", "an", "Azure", "Container", "Server" ]
python
train
42.944444
ensime/ensime-vim
ensime_shared/typecheck.py
https://github.com/ensime/ensime-vim/blob/caa734e84f002b25446c615706283a74edd4ecfe/ensime_shared/typecheck.py#L11-L15
def buffer_typechecks(self, call_id, payload): """Adds typecheck events to the buffer""" if self.currently_buffering_typechecks: for note in payload['notes']: self.buffered_notes.append(note)
[ "def", "buffer_typechecks", "(", "self", ",", "call_id", ",", "payload", ")", ":", "if", "self", ".", "currently_buffering_typechecks", ":", "for", "note", "in", "payload", "[", "'notes'", "]", ":", "self", ".", "buffered_notes", ".", "append", "(", "note", ")" ]
Adds typecheck events to the buffer
[ "Adds", "typecheck", "events", "to", "the", "buffer" ]
python
train
46.2
jorgenschaefer/elpy
elpy/jedibackend.py
https://github.com/jorgenschaefer/elpy/blob/ffd982f829b11e53f2be187c7b770423341f29bc/elpy/jedibackend.py#L272-L283
def pos_to_linecol(text, pos): """Return a tuple of line and column for offset pos in text. Lines are one-based, columns zero-based. This is how Jedi wants it. Don't ask me why. """ line_start = text.rfind("\n", 0, pos) + 1 line = text.count("\n", 0, line_start) + 1 col = pos - line_start return line, col
[ "def", "pos_to_linecol", "(", "text", ",", "pos", ")", ":", "line_start", "=", "text", ".", "rfind", "(", "\"\\n\"", ",", "0", ",", "pos", ")", "+", "1", "line", "=", "text", ".", "count", "(", "\"\\n\"", ",", "0", ",", "line_start", ")", "+", "1", "col", "=", "pos", "-", "line_start", "return", "line", ",", "col" ]
Return a tuple of line and column for offset pos in text. Lines are one-based, columns zero-based. This is how Jedi wants it. Don't ask me why.
[ "Return", "a", "tuple", "of", "line", "and", "column", "for", "offset", "pos", "in", "text", "." ]
python
train
27.5
yandex/yandex-tank
yandextank/api/apiworker.py
https://github.com/yandex/yandex-tank/blob/d71d63b6ab5de8b8a5ea2b728b6ab9ac0b1ba71b/yandextank/api/apiworker.py#L135-L152
def get_default_configs(self): """ returns default configs list, from /etc, home dir and package_data""" # initialize basic defaults configs = [resource_filename(__name__, 'config/00-base.ini')] try: conf_files = sorted(os.listdir(self.baseconfigs_location)) for filename in conf_files: if fnmatch.fnmatch(filename, '*.ini'): configs += [ os.path.realpath( self.baseconfigs_location + os.sep + filename) ] except OSError: self.log.warn( self.baseconfigs_location + ' is not accessible to get configs list') configs += [os.path.expanduser('~/.yandex-tank')] return configs
[ "def", "get_default_configs", "(", "self", ")", ":", "# initialize basic defaults", "configs", "=", "[", "resource_filename", "(", "__name__", ",", "'config/00-base.ini'", ")", "]", "try", ":", "conf_files", "=", "sorted", "(", "os", ".", "listdir", "(", "self", ".", "baseconfigs_location", ")", ")", "for", "filename", "in", "conf_files", ":", "if", "fnmatch", ".", "fnmatch", "(", "filename", ",", "'*.ini'", ")", ":", "configs", "+=", "[", "os", ".", "path", ".", "realpath", "(", "self", ".", "baseconfigs_location", "+", "os", ".", "sep", "+", "filename", ")", "]", "except", "OSError", ":", "self", ".", "log", ".", "warn", "(", "self", ".", "baseconfigs_location", "+", "' is not accessible to get configs list'", ")", "configs", "+=", "[", "os", ".", "path", ".", "expanduser", "(", "'~/.yandex-tank'", ")", "]", "return", "configs" ]
returns default configs list, from /etc, home dir and package_data
[ "returns", "default", "configs", "list", "from", "/", "etc", "home", "dir", "and", "package_data" ]
python
test
42.833333
osrg/ryu
ryu/services/protocols/bgp/core_managers/configuration_manager.py
https://github.com/osrg/ryu/blob/6f906e72c92e10bd0264c9b91a2f7bb85b97780c/ryu/services/protocols/bgp/core_managers/configuration_manager.py#L80-L102
def on_remove_vrf_conf(self, evt): """Removes VRF table associated with given `vrf_conf`. Cleans up other links to this table as well. """ vrf_conf = evt.value # Detach VrfConf change listener. vrf_conf.remove_listener(VrfConf.VRF_CHG_EVT, self.on_chg_vrf_conf) self._table_manager.remove_vrf_by_vrf_conf(vrf_conf) # Update local RT NLRIs self._rt_manager.update_local_rt_nlris() self._signal_bus.vrf_removed(vrf_conf.route_dist) # Remove AttributeMaps under the removed vrf rd = vrf_conf.route_dist rf = vrf_conf.route_family peers = self._peer_manager.iterpeers for peer in peers: key = ':'.join([rd, rf]) peer.attribute_maps.pop(key, None)
[ "def", "on_remove_vrf_conf", "(", "self", ",", "evt", ")", ":", "vrf_conf", "=", "evt", ".", "value", "# Detach VrfConf change listener.", "vrf_conf", ".", "remove_listener", "(", "VrfConf", ".", "VRF_CHG_EVT", ",", "self", ".", "on_chg_vrf_conf", ")", "self", ".", "_table_manager", ".", "remove_vrf_by_vrf_conf", "(", "vrf_conf", ")", "# Update local RT NLRIs", "self", ".", "_rt_manager", ".", "update_local_rt_nlris", "(", ")", "self", ".", "_signal_bus", ".", "vrf_removed", "(", "vrf_conf", ".", "route_dist", ")", "# Remove AttributeMaps under the removed vrf", "rd", "=", "vrf_conf", ".", "route_dist", "rf", "=", "vrf_conf", ".", "route_family", "peers", "=", "self", ".", "_peer_manager", ".", "iterpeers", "for", "peer", "in", "peers", ":", "key", "=", "':'", ".", "join", "(", "[", "rd", ",", "rf", "]", ")", "peer", ".", "attribute_maps", ".", "pop", "(", "key", ",", "None", ")" ]
Removes VRF table associated with given `vrf_conf`. Cleans up other links to this table as well.
[ "Removes", "VRF", "table", "associated", "with", "given", "vrf_conf", "." ]
python
train
33.434783
readbeyond/aeneas
aeneas/audiofile.py
https://github.com/readbeyond/aeneas/blob/9d95535ad63eef4a98530cfdff033b8c35315ee1/aeneas/audiofile.py#L519-L547
def add_samples(self, samples, reverse=False): """ Concatenate the given new samples to the current audio data. This function initializes the memory if no audio data is present already. If ``reverse`` is ``True``, the new samples will be reversed and then concatenated. :param samples: the new samples to be concatenated :type samples: :class:`numpy.ndarray` (1D) :param bool reverse: if ``True``, concatenate new samples after reversing them .. versionadded:: 1.2.1 """ self.log(u"Adding samples...") samples_length = len(samples) current_length = self.__samples_length future_length = current_length + samples_length if (self.__samples is None) or (self.__samples_capacity < future_length): self.preallocate_memory(2 * future_length) if reverse: self.__samples[current_length:future_length] = samples[::-1] else: self.__samples[current_length:future_length] = samples[:] self.__samples_length = future_length self._update_length() self.log(u"Adding samples... done")
[ "def", "add_samples", "(", "self", ",", "samples", ",", "reverse", "=", "False", ")", ":", "self", ".", "log", "(", "u\"Adding samples...\"", ")", "samples_length", "=", "len", "(", "samples", ")", "current_length", "=", "self", ".", "__samples_length", "future_length", "=", "current_length", "+", "samples_length", "if", "(", "self", ".", "__samples", "is", "None", ")", "or", "(", "self", ".", "__samples_capacity", "<", "future_length", ")", ":", "self", ".", "preallocate_memory", "(", "2", "*", "future_length", ")", "if", "reverse", ":", "self", ".", "__samples", "[", "current_length", ":", "future_length", "]", "=", "samples", "[", ":", ":", "-", "1", "]", "else", ":", "self", ".", "__samples", "[", "current_length", ":", "future_length", "]", "=", "samples", "[", ":", "]", "self", ".", "__samples_length", "=", "future_length", "self", ".", "_update_length", "(", ")", "self", ".", "log", "(", "u\"Adding samples... done\"", ")" ]
Concatenate the given new samples to the current audio data. This function initializes the memory if no audio data is present already. If ``reverse`` is ``True``, the new samples will be reversed and then concatenated. :param samples: the new samples to be concatenated :type samples: :class:`numpy.ndarray` (1D) :param bool reverse: if ``True``, concatenate new samples after reversing them .. versionadded:: 1.2.1
[ "Concatenate", "the", "given", "new", "samples", "to", "the", "current", "audio", "data", "." ]
python
train
39.62069
all-umass/graphs
graphs/construction/directed.py
https://github.com/all-umass/graphs/blob/4fbeb025dfe33340335f34300f58dd3809228822/graphs/construction/directed.py#L26-L46
def _prune_edges(G, X, traj_lengths, pruning_thresh=0.1, verbose=False): '''Prune edges in graph G via cosine distance with trajectory edges.''' W = G.matrix('dense', copy=True) degree = G.degree(kind='out', weighted=False) i = 0 num_bad = 0 for n in traj_lengths: s, t = np.nonzero(W[i:i+n-1]) graph_edges = X[t] - X[s+i] traj_edges = np.diff(X[i:i+n], axis=0) traj_edges = np.repeat(traj_edges, degree[i:i+n-1], axis=0) theta = paired_distances(graph_edges, traj_edges, 'cosine') bad_edges = theta > pruning_thresh s, t = s[bad_edges], t[bad_edges] if verbose: # pragma: no cover num_bad += np.count_nonzero(W[s,t]) W[s,t] = 0 i += n if verbose: # pragma: no cover print('removed %d bad edges' % num_bad) return Graph.from_adj_matrix(W)
[ "def", "_prune_edges", "(", "G", ",", "X", ",", "traj_lengths", ",", "pruning_thresh", "=", "0.1", ",", "verbose", "=", "False", ")", ":", "W", "=", "G", ".", "matrix", "(", "'dense'", ",", "copy", "=", "True", ")", "degree", "=", "G", ".", "degree", "(", "kind", "=", "'out'", ",", "weighted", "=", "False", ")", "i", "=", "0", "num_bad", "=", "0", "for", "n", "in", "traj_lengths", ":", "s", ",", "t", "=", "np", ".", "nonzero", "(", "W", "[", "i", ":", "i", "+", "n", "-", "1", "]", ")", "graph_edges", "=", "X", "[", "t", "]", "-", "X", "[", "s", "+", "i", "]", "traj_edges", "=", "np", ".", "diff", "(", "X", "[", "i", ":", "i", "+", "n", "]", ",", "axis", "=", "0", ")", "traj_edges", "=", "np", ".", "repeat", "(", "traj_edges", ",", "degree", "[", "i", ":", "i", "+", "n", "-", "1", "]", ",", "axis", "=", "0", ")", "theta", "=", "paired_distances", "(", "graph_edges", ",", "traj_edges", ",", "'cosine'", ")", "bad_edges", "=", "theta", ">", "pruning_thresh", "s", ",", "t", "=", "s", "[", "bad_edges", "]", ",", "t", "[", "bad_edges", "]", "if", "verbose", ":", "# pragma: no cover", "num_bad", "+=", "np", ".", "count_nonzero", "(", "W", "[", "s", ",", "t", "]", ")", "W", "[", "s", ",", "t", "]", "=", "0", "i", "+=", "n", "if", "verbose", ":", "# pragma: no cover", "print", "(", "'removed %d bad edges'", "%", "num_bad", ")", "return", "Graph", ".", "from_adj_matrix", "(", "W", ")" ]
Prune edges in graph G via cosine distance with trajectory edges.
[ "Prune", "edges", "in", "graph", "G", "via", "cosine", "distance", "with", "trajectory", "edges", "." ]
python
train
37.428571
occrp-attic/exactitude
exactitude/name.py
https://github.com/occrp-attic/exactitude/blob/9fe13aa70f1aac644dbc999e0b21683db507f02d/exactitude/name.py#L8-L12
def clean_text(self, name, **kwargs): """Basic clean-up.""" name = strip_quotes(name) name = collapse_spaces(name) return name
[ "def", "clean_text", "(", "self", ",", "name", ",", "*", "*", "kwargs", ")", ":", "name", "=", "strip_quotes", "(", "name", ")", "name", "=", "collapse_spaces", "(", "name", ")", "return", "name" ]
Basic clean-up.
[ "Basic", "clean", "-", "up", "." ]
python
train
30.8
vinci1it2000/schedula
schedula/utils/alg.py
https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/utils/alg.py#L259-L418
def get_sub_node(dsp, path, node_attr='auto', solution=NONE, _level=0, _dsp_name=NONE): """ Returns a sub node of a dispatcher. :param dsp: A dispatcher object or a sub dispatch function. :type dsp: schedula.Dispatcher | SubDispatch :param path: A sequence of node ids or a single node id. Each id identifies a sub-level node. :type path: tuple, str :param node_attr: Output node attr. If the searched node does not have this attribute, all its attributes are returned. When 'auto', returns the "default" attributes of the searched node, which are: - for data node: its output, and if not exists, all its attributes. - for function and sub-dispatcher nodes: the 'function' attribute. :type node_attr: str | None :param solution: Parent Solution. :type solution: schedula.utils.Solution :param _level: Path level. :type _level: int :param _dsp_name: dsp name to show when the function raise a value error. :type _dsp_name: str :return: A sub node of a dispatcher and its path. :rtype: dict | object, tuple[str] **Example**: .. dispatcher:: o :opt: graph_attr={'ratio': '1'}, depth=-1 :code: >>> from schedula import Dispatcher >>> s_dsp = Dispatcher(name='Sub-dispatcher') >>> def fun(a, b): ... return a + b ... >>> s_dsp.add_function('a + b', fun, ['a', 'b'], ['c']) 'a + b' >>> dispatch = SubDispatch(s_dsp, ['c'], output_type='dict') >>> dsp = Dispatcher(name='Dispatcher') >>> dsp.add_function('Sub-dispatcher', dispatch, ['a'], ['b']) 'Sub-dispatcher' >>> o = dsp.dispatch(inputs={'a': {'a': 3, 'b': 1}}) ... Get the sub node 'c' output or type:: >>> get_sub_node(dsp, ('Sub-dispatcher', 'c')) (4, ('Sub-dispatcher', 'c')) >>> get_sub_node(dsp, ('Sub-dispatcher', 'c'), node_attr='type') ('data', ('Sub-dispatcher', 'c')) Get the sub-dispatcher output: .. dispatcher:: sol :opt: graph_attr={'ratio': '1'}, depth=-1 :code: >>> sol, p = get_sub_node(dsp, ('Sub-dispatcher',), node_attr='output') >>> sol, p (Solution([('a', 3), ('b', 1), ('c', 4)]), ('Sub-dispatcher',)) """ path = list(path) if isinstance(dsp, SubDispatch): # Take the dispatcher obj. dsp = dsp.dsp if _dsp_name is NONE: # Set origin dispatcher name for warning purpose. _dsp_name = dsp.name if solution is NONE: # Set origin dispatcher name for warning purpose. solution = dsp.solution node_id = path[_level] # Node id at given level. try: node_id, node = _get_node(dsp.nodes, node_id) # Get dispatcher node. path[_level] = node_id except KeyError: if _level == len(path) - 1 and node_attr in ('auto', 'output') \ and solution is not EMPTY: try: # Get dispatcher node. node_id, node = _get_node(solution, node_id, False) path[_level] = node_id return node, tuple(path) except KeyError: pass msg = 'Path %s does not exist in %s dispatcher.' % (path, _dsp_name) raise ValueError(msg) _level += 1 # Next level. if _level < len(path): # Is not path leaf?. try: if node['type'] in ('function', 'dispatcher'): try: solution = solution.workflow.node[node_id]['solution'] except (KeyError, AttributeError): solution = EMPTY dsp = parent_func(node['function']) # Get parent function. else: raise KeyError except KeyError: msg = 'Node of path %s at level %i is not a function or ' \ 'sub-dispatcher node of %s ' \ 'dispatcher.' % (path, _level, _dsp_name) raise ValueError(msg) # Continue the node search. return get_sub_node(dsp, path, node_attr, solution, _level, _dsp_name) else: data, sol = EMPTY, solution # Return the sub node. if node_attr == 'auto' and node['type'] != 'data': # Auto: function. node_attr = 'function' elif node_attr == 'auto' and sol is not EMPTY and node_id in sol: data = sol[node_id] # Auto: data output. elif node_attr == 'output' and node['type'] != 'data': data = sol.workflow.nodes[node_id]['solution'] elif node_attr == 'output' and node['type'] == 'data': data = sol[node_id] elif node_attr == 'description': # Search and return node description. data = dsp.search_node_description(node_id)[0] elif node_attr == 'value_type' and node['type'] == 'data': # Search and return data node value's type. data = dsp.search_node_description(node_id, node_attr)[0] elif node_attr == 'default_value': data = dsp.default_values[node_id] elif node_attr == 'dsp': data = dsp elif node_attr == 'sol': data = sol if data is EMPTY: data = node.get(node_attr, node) return data, tuple(path)
[ "def", "get_sub_node", "(", "dsp", ",", "path", ",", "node_attr", "=", "'auto'", ",", "solution", "=", "NONE", ",", "_level", "=", "0", ",", "_dsp_name", "=", "NONE", ")", ":", "path", "=", "list", "(", "path", ")", "if", "isinstance", "(", "dsp", ",", "SubDispatch", ")", ":", "# Take the dispatcher obj.", "dsp", "=", "dsp", ".", "dsp", "if", "_dsp_name", "is", "NONE", ":", "# Set origin dispatcher name for warning purpose.", "_dsp_name", "=", "dsp", ".", "name", "if", "solution", "is", "NONE", ":", "# Set origin dispatcher name for warning purpose.", "solution", "=", "dsp", ".", "solution", "node_id", "=", "path", "[", "_level", "]", "# Node id at given level.", "try", ":", "node_id", ",", "node", "=", "_get_node", "(", "dsp", ".", "nodes", ",", "node_id", ")", "# Get dispatcher node.", "path", "[", "_level", "]", "=", "node_id", "except", "KeyError", ":", "if", "_level", "==", "len", "(", "path", ")", "-", "1", "and", "node_attr", "in", "(", "'auto'", ",", "'output'", ")", "and", "solution", "is", "not", "EMPTY", ":", "try", ":", "# Get dispatcher node.", "node_id", ",", "node", "=", "_get_node", "(", "solution", ",", "node_id", ",", "False", ")", "path", "[", "_level", "]", "=", "node_id", "return", "node", ",", "tuple", "(", "path", ")", "except", "KeyError", ":", "pass", "msg", "=", "'Path %s does not exist in %s dispatcher.'", "%", "(", "path", ",", "_dsp_name", ")", "raise", "ValueError", "(", "msg", ")", "_level", "+=", "1", "# Next level.", "if", "_level", "<", "len", "(", "path", ")", ":", "# Is not path leaf?.", "try", ":", "if", "node", "[", "'type'", "]", "in", "(", "'function'", ",", "'dispatcher'", ")", ":", "try", ":", "solution", "=", "solution", ".", "workflow", ".", "node", "[", "node_id", "]", "[", "'solution'", "]", "except", "(", "KeyError", ",", "AttributeError", ")", ":", "solution", "=", "EMPTY", "dsp", "=", "parent_func", "(", "node", "[", "'function'", "]", ")", "# Get parent function.", "else", ":", "raise", "KeyError", "except", "KeyError", ":", "msg", "=", "'Node of path %s at level %i is not a function or '", "'sub-dispatcher node of %s '", "'dispatcher.'", "%", "(", "path", ",", "_level", ",", "_dsp_name", ")", "raise", "ValueError", "(", "msg", ")", "# Continue the node search.", "return", "get_sub_node", "(", "dsp", ",", "path", ",", "node_attr", ",", "solution", ",", "_level", ",", "_dsp_name", ")", "else", ":", "data", ",", "sol", "=", "EMPTY", ",", "solution", "# Return the sub node.", "if", "node_attr", "==", "'auto'", "and", "node", "[", "'type'", "]", "!=", "'data'", ":", "# Auto: function.", "node_attr", "=", "'function'", "elif", "node_attr", "==", "'auto'", "and", "sol", "is", "not", "EMPTY", "and", "node_id", "in", "sol", ":", "data", "=", "sol", "[", "node_id", "]", "# Auto: data output.", "elif", "node_attr", "==", "'output'", "and", "node", "[", "'type'", "]", "!=", "'data'", ":", "data", "=", "sol", ".", "workflow", ".", "nodes", "[", "node_id", "]", "[", "'solution'", "]", "elif", "node_attr", "==", "'output'", "and", "node", "[", "'type'", "]", "==", "'data'", ":", "data", "=", "sol", "[", "node_id", "]", "elif", "node_attr", "==", "'description'", ":", "# Search and return node description.", "data", "=", "dsp", ".", "search_node_description", "(", "node_id", ")", "[", "0", "]", "elif", "node_attr", "==", "'value_type'", "and", "node", "[", "'type'", "]", "==", "'data'", ":", "# Search and return data node value's type.", "data", "=", "dsp", ".", "search_node_description", "(", "node_id", ",", "node_attr", ")", "[", "0", "]", "elif", "node_attr", "==", "'default_value'", ":", "data", "=", "dsp", ".", "default_values", "[", "node_id", "]", "elif", "node_attr", "==", "'dsp'", ":", "data", "=", "dsp", "elif", "node_attr", "==", "'sol'", ":", "data", "=", "sol", "if", "data", "is", "EMPTY", ":", "data", "=", "node", ".", "get", "(", "node_attr", ",", "node", ")", "return", "data", ",", "tuple", "(", "path", ")" ]
Returns a sub node of a dispatcher. :param dsp: A dispatcher object or a sub dispatch function. :type dsp: schedula.Dispatcher | SubDispatch :param path: A sequence of node ids or a single node id. Each id identifies a sub-level node. :type path: tuple, str :param node_attr: Output node attr. If the searched node does not have this attribute, all its attributes are returned. When 'auto', returns the "default" attributes of the searched node, which are: - for data node: its output, and if not exists, all its attributes. - for function and sub-dispatcher nodes: the 'function' attribute. :type node_attr: str | None :param solution: Parent Solution. :type solution: schedula.utils.Solution :param _level: Path level. :type _level: int :param _dsp_name: dsp name to show when the function raise a value error. :type _dsp_name: str :return: A sub node of a dispatcher and its path. :rtype: dict | object, tuple[str] **Example**: .. dispatcher:: o :opt: graph_attr={'ratio': '1'}, depth=-1 :code: >>> from schedula import Dispatcher >>> s_dsp = Dispatcher(name='Sub-dispatcher') >>> def fun(a, b): ... return a + b ... >>> s_dsp.add_function('a + b', fun, ['a', 'b'], ['c']) 'a + b' >>> dispatch = SubDispatch(s_dsp, ['c'], output_type='dict') >>> dsp = Dispatcher(name='Dispatcher') >>> dsp.add_function('Sub-dispatcher', dispatch, ['a'], ['b']) 'Sub-dispatcher' >>> o = dsp.dispatch(inputs={'a': {'a': 3, 'b': 1}}) ... Get the sub node 'c' output or type:: >>> get_sub_node(dsp, ('Sub-dispatcher', 'c')) (4, ('Sub-dispatcher', 'c')) >>> get_sub_node(dsp, ('Sub-dispatcher', 'c'), node_attr='type') ('data', ('Sub-dispatcher', 'c')) Get the sub-dispatcher output: .. dispatcher:: sol :opt: graph_attr={'ratio': '1'}, depth=-1 :code: >>> sol, p = get_sub_node(dsp, ('Sub-dispatcher',), node_attr='output') >>> sol, p (Solution([('a', 3), ('b', 1), ('c', 4)]), ('Sub-dispatcher',))
[ "Returns", "a", "sub", "node", "of", "a", "dispatcher", "." ]
python
train
32.7875
scikit-learn-contrib/forest-confidence-interval
forestci/calibration.py
https://github.com/scikit-learn-contrib/forest-confidence-interval/blob/401c63a74a27d775eff0f72b6c20ffd568491fe0/forestci/calibration.py#L109-L130
def gbayes(x0, g_est, sigma): """ Estimate Bayes posterior with Gaussian noise [Efron2014]_. Parameters ---------- x0: ndarray an observation g_est: float a prior density, as returned by gfit sigma: int noise estimate Returns ------- An array of the posterior estimate E[mu | x0] """ Kx = norm().pdf((g_est[0] - x0) / sigma) post = Kx * g_est[1] post /= sum(post) return sum(post * g_est[0])
[ "def", "gbayes", "(", "x0", ",", "g_est", ",", "sigma", ")", ":", "Kx", "=", "norm", "(", ")", ".", "pdf", "(", "(", "g_est", "[", "0", "]", "-", "x0", ")", "/", "sigma", ")", "post", "=", "Kx", "*", "g_est", "[", "1", "]", "post", "/=", "sum", "(", "post", ")", "return", "sum", "(", "post", "*", "g_est", "[", "0", "]", ")" ]
Estimate Bayes posterior with Gaussian noise [Efron2014]_. Parameters ---------- x0: ndarray an observation g_est: float a prior density, as returned by gfit sigma: int noise estimate Returns ------- An array of the posterior estimate E[mu | x0]
[ "Estimate", "Bayes", "posterior", "with", "Gaussian", "noise", "[", "Efron2014", "]", "_", "." ]
python
valid
20.772727
tensorflow/probability
tensorflow_probability/python/layers/distribution_layer.py
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/layers/distribution_layer.py#L1698-L1703
def params_size(num_components, event_shape=(), name=None): """The number of `params` needed to create a single distribution.""" return MixtureSameFamily.params_size( num_components, IndependentLogistic.params_size(event_shape, name=name), name=name)
[ "def", "params_size", "(", "num_components", ",", "event_shape", "=", "(", ")", ",", "name", "=", "None", ")", ":", "return", "MixtureSameFamily", ".", "params_size", "(", "num_components", ",", "IndependentLogistic", ".", "params_size", "(", "event_shape", ",", "name", "=", "name", ")", ",", "name", "=", "name", ")" ]
The number of `params` needed to create a single distribution.
[ "The", "number", "of", "params", "needed", "to", "create", "a", "single", "distribution", "." ]
python
test
46.166667
clalancette/pycdlib
pycdlib/udf.py
https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/udf.py#L1130-L1151
def parse(self, data): # type: (bytes) -> None ''' Parse the passed in data into a UDF Implementation Use Volume Descriptor Implementation Use field. Parameters: data - The data to parse. Returns: Nothing. ''' if self._initialized: raise pycdlibexception.PyCdlibInternalError('UDF Implementation Use Volume Descriptor Implementation Use field already initialized') (self.char_set, self.log_vol_ident, self.lv_info1, self.lv_info2, self.lv_info3, impl_ident, self.impl_use) = struct.unpack_from(self.FMT, data, 0) self.impl_ident = UDFEntityID() self.impl_ident.parse(impl_ident) self._initialized = True
[ "def", "parse", "(", "self", ",", "data", ")", ":", "# type: (bytes) -> None", "if", "self", ".", "_initialized", ":", "raise", "pycdlibexception", ".", "PyCdlibInternalError", "(", "'UDF Implementation Use Volume Descriptor Implementation Use field already initialized'", ")", "(", "self", ".", "char_set", ",", "self", ".", "log_vol_ident", ",", "self", ".", "lv_info1", ",", "self", ".", "lv_info2", ",", "self", ".", "lv_info3", ",", "impl_ident", ",", "self", ".", "impl_use", ")", "=", "struct", ".", "unpack_from", "(", "self", ".", "FMT", ",", "data", ",", "0", ")", "self", ".", "impl_ident", "=", "UDFEntityID", "(", ")", "self", ".", "impl_ident", ".", "parse", "(", "impl_ident", ")", "self", ".", "_initialized", "=", "True" ]
Parse the passed in data into a UDF Implementation Use Volume Descriptor Implementation Use field. Parameters: data - The data to parse. Returns: Nothing.
[ "Parse", "the", "passed", "in", "data", "into", "a", "UDF", "Implementation", "Use", "Volume", "Descriptor", "Implementation", "Use", "field", "." ]
python
train
33.181818
pudo/jsonmapping
jsonmapping/value.py
https://github.com/pudo/jsonmapping/blob/4cf0a20a393ba82e00651c6fd39522a67a0155de/jsonmapping/value.py#L7-L25
def extract_value(mapping, bind, data): """ Given a mapping and JSON schema spec, extract a value from ``data`` and apply certain transformations to normalize the value. """ columns = mapping.get('columns', [mapping.get('column')]) values = [data.get(c) for c in columns] for transform in mapping.get('transforms', []): # any added transforms must also be added to the schema. values = list(TRANSFORMS[transform](mapping, bind, values)) format_str = mapping.get('format') value = values[0] if len(values) else None if not is_empty(format_str): value = format_str % tuple('' if v is None else v for v in values) empty = is_empty(value) if empty: value = mapping.get('default') or bind.schema.get('default') return empty, convert_value(bind, value)
[ "def", "extract_value", "(", "mapping", ",", "bind", ",", "data", ")", ":", "columns", "=", "mapping", ".", "get", "(", "'columns'", ",", "[", "mapping", ".", "get", "(", "'column'", ")", "]", ")", "values", "=", "[", "data", ".", "get", "(", "c", ")", "for", "c", "in", "columns", "]", "for", "transform", "in", "mapping", ".", "get", "(", "'transforms'", ",", "[", "]", ")", ":", "# any added transforms must also be added to the schema.", "values", "=", "list", "(", "TRANSFORMS", "[", "transform", "]", "(", "mapping", ",", "bind", ",", "values", ")", ")", "format_str", "=", "mapping", ".", "get", "(", "'format'", ")", "value", "=", "values", "[", "0", "]", "if", "len", "(", "values", ")", "else", "None", "if", "not", "is_empty", "(", "format_str", ")", ":", "value", "=", "format_str", "%", "tuple", "(", "''", "if", "v", "is", "None", "else", "v", "for", "v", "in", "values", ")", "empty", "=", "is_empty", "(", "value", ")", "if", "empty", ":", "value", "=", "mapping", ".", "get", "(", "'default'", ")", "or", "bind", ".", "schema", ".", "get", "(", "'default'", ")", "return", "empty", ",", "convert_value", "(", "bind", ",", "value", ")" ]
Given a mapping and JSON schema spec, extract a value from ``data`` and apply certain transformations to normalize the value.
[ "Given", "a", "mapping", "and", "JSON", "schema", "spec", "extract", "a", "value", "from", "data", "and", "apply", "certain", "transformations", "to", "normalize", "the", "value", "." ]
python
train
42.473684
gijzelaerr/python-snap7
snap7/util.py
https://github.com/gijzelaerr/python-snap7/blob/a6db134c7a3a2ef187b9eca04669221d6fc634c3/snap7/util.py#L111-L128
def set_bool(_bytearray, byte_index, bool_index, value): """ Set boolean value on location in bytearray """ assert value in [0, 1, True, False] current_value = get_bool(_bytearray, byte_index, bool_index) index_value = 1 << bool_index # check if bool already has correct value if current_value == value: return if value: # make sure index_v is IN current byte _bytearray[byte_index] += index_value else: # make sure index_v is NOT in current byte _bytearray[byte_index] -= index_value
[ "def", "set_bool", "(", "_bytearray", ",", "byte_index", ",", "bool_index", ",", "value", ")", ":", "assert", "value", "in", "[", "0", ",", "1", ",", "True", ",", "False", "]", "current_value", "=", "get_bool", "(", "_bytearray", ",", "byte_index", ",", "bool_index", ")", "index_value", "=", "1", "<<", "bool_index", "# check if bool already has correct value", "if", "current_value", "==", "value", ":", "return", "if", "value", ":", "# make sure index_v is IN current byte", "_bytearray", "[", "byte_index", "]", "+=", "index_value", "else", ":", "# make sure index_v is NOT in current byte", "_bytearray", "[", "byte_index", "]", "-=", "index_value" ]
Set boolean value on location in bytearray
[ "Set", "boolean", "value", "on", "location", "in", "bytearray" ]
python
train
30.5
Iotic-Labs/py-IoticAgent
src/IoticAgent/Core/Validation.py
https://github.com/Iotic-Labs/py-IoticAgent/blob/893e8582ad1dacfe32dfc0ee89452bbd6f57d28d/src/IoticAgent/Core/Validation.py#L231-L236
def __valid_url(cls, url): """Expects input to already be a valid string""" bits = urlparse(url) return ((bits.scheme == "http" or bits.scheme == "https") and _PATTERN_URL_PART.match(bits.netloc) and _PATTERN_URL_PART.match(bits.path))
[ "def", "__valid_url", "(", "cls", ",", "url", ")", ":", "bits", "=", "urlparse", "(", "url", ")", "return", "(", "(", "bits", ".", "scheme", "==", "\"http\"", "or", "bits", ".", "scheme", "==", "\"https\"", ")", "and", "_PATTERN_URL_PART", ".", "match", "(", "bits", ".", "netloc", ")", "and", "_PATTERN_URL_PART", ".", "match", "(", "bits", ".", "path", ")", ")" ]
Expects input to already be a valid string
[ "Expects", "input", "to", "already", "be", "a", "valid", "string" ]
python
train
47.666667
QUANTAXIS/QUANTAXIS
QUANTAXIS/QAUtil/QADate_trade.py
https://github.com/QUANTAXIS/QUANTAXIS/blob/bb1fe424e4108b62a1f712b81a05cf829297a5c0/QUANTAXIS/QAUtil/QADate_trade.py#L7406-L7426
def QA_util_date_gap(date, gap, methods): ''' :param date: 字符串起始日 类型 str eg: 2018-11-11 :param gap: 整数 间隔多数个交易日 :param methods: gt大于 ,gte 大于等于, 小于lt ,小于等于lte , 等于=== :return: 字符串 eg:2000-01-01 ''' try: if methods in ['>', 'gt']: return trade_date_sse[trade_date_sse.index(date) + gap] elif methods in ['>=', 'gte']: return trade_date_sse[trade_date_sse.index(date) + gap - 1] elif methods in ['<', 'lt']: return trade_date_sse[trade_date_sse.index(date) - gap] elif methods in ['<=', 'lte']: return trade_date_sse[trade_date_sse.index(date) - gap + 1] elif methods in ['==', '=', 'eq']: return date except: return 'wrong date'
[ "def", "QA_util_date_gap", "(", "date", ",", "gap", ",", "methods", ")", ":", "try", ":", "if", "methods", "in", "[", "'>'", ",", "'gt'", "]", ":", "return", "trade_date_sse", "[", "trade_date_sse", ".", "index", "(", "date", ")", "+", "gap", "]", "elif", "methods", "in", "[", "'>='", ",", "'gte'", "]", ":", "return", "trade_date_sse", "[", "trade_date_sse", ".", "index", "(", "date", ")", "+", "gap", "-", "1", "]", "elif", "methods", "in", "[", "'<'", ",", "'lt'", "]", ":", "return", "trade_date_sse", "[", "trade_date_sse", ".", "index", "(", "date", ")", "-", "gap", "]", "elif", "methods", "in", "[", "'<='", ",", "'lte'", "]", ":", "return", "trade_date_sse", "[", "trade_date_sse", ".", "index", "(", "date", ")", "-", "gap", "+", "1", "]", "elif", "methods", "in", "[", "'=='", ",", "'='", ",", "'eq'", "]", ":", "return", "date", "except", ":", "return", "'wrong date'" ]
:param date: 字符串起始日 类型 str eg: 2018-11-11 :param gap: 整数 间隔多数个交易日 :param methods: gt大于 ,gte 大于等于, 小于lt ,小于等于lte , 等于=== :return: 字符串 eg:2000-01-01
[ ":", "param", "date", ":", "字符串起始日", "类型", "str", "eg", ":", "2018", "-", "11", "-", "11", ":", "param", "gap", ":", "整数", "间隔多数个交易日", ":", "param", "methods", ":", "gt大于", ",gte", "大于等于,", "小于lt", ",小于等于lte", ",", "等于", "===", ":", "return", ":", "字符串", "eg:2000", "-", "01", "-", "01" ]
python
train
35.619048
aio-libs/aioredis
aioredis/commands/server.py
https://github.com/aio-libs/aioredis/blob/e8c33e39558d4cc91cf70dde490d8b330c97dc2e/aioredis/commands/server.py#L79-L82
def command_info(self, command, *commands): """Get array of specific Redis command details.""" return self.execute(b'COMMAND', b'INFO', command, *commands, encoding='utf-8')
[ "def", "command_info", "(", "self", ",", "command", ",", "*", "commands", ")", ":", "return", "self", ".", "execute", "(", "b'COMMAND'", ",", "b'INFO'", ",", "command", ",", "*", "commands", ",", "encoding", "=", "'utf-8'", ")" ]
Get array of specific Redis command details.
[ "Get", "array", "of", "specific", "Redis", "command", "details", "." ]
python
train
53.5
retext-project/retext
ReText/window.py
https://github.com/retext-project/retext/blob/ad70435341dd89c7a74742df9d1f9af70859a969/ReText/window.py#L499-L509
def tabModificationStateChanged(self, tab): ''' Perform all UI state changes that need to be done when the modification state of the current tab has changed. ''' if tab == self.currentTab: changed = tab.editBox.document().isModified() if self.autoSaveActive(tab): changed = False self.actionSave.setEnabled(changed) self.setWindowModified(changed)
[ "def", "tabModificationStateChanged", "(", "self", ",", "tab", ")", ":", "if", "tab", "==", "self", ".", "currentTab", ":", "changed", "=", "tab", ".", "editBox", ".", "document", "(", ")", ".", "isModified", "(", ")", "if", "self", ".", "autoSaveActive", "(", "tab", ")", ":", "changed", "=", "False", "self", ".", "actionSave", ".", "setEnabled", "(", "changed", ")", "self", ".", "setWindowModified", "(", "changed", ")" ]
Perform all UI state changes that need to be done when the modification state of the current tab has changed.
[ "Perform", "all", "UI", "state", "changes", "that", "need", "to", "be", "done", "when", "the", "modification", "state", "of", "the", "current", "tab", "has", "changed", "." ]
python
train
33
napalm-automation/napalm-nxos
napalm_nxos_ssh/nxos_ssh.py
https://github.com/napalm-automation/napalm-nxos/blob/936d641c99e068817abf247e0e5571fc31b3a92a/napalm_nxos_ssh/nxos_ssh.py#L1037-L1105
def get_arp_table(self): """ Get arp table information. Return a list of dictionaries having the following set of keys: * interface (string) * mac (string) * ip (string) * age (float) For example:: [ { 'interface' : 'MgmtEth0/RSP0/CPU0/0', 'mac' : '5c:5e:ab:da:3c:f0', 'ip' : '172.17.17.1', 'age' : 12.0 }, { 'interface': 'MgmtEth0/RSP0/CPU0/0', 'mac' : '66:0e:94:96:e0:ff', 'ip' : '172.17.17.2', 'age' : 14.0 } ] """ arp_table = [] command = 'show ip arp vrf default | exc INCOMPLETE' output = self.device.send_command(command) separator = r"^Address\s+Age.*Interface.*$" arp_list = re.split(separator, output, flags=re.M) if len(arp_list) != 2: raise ValueError("Error processing arp table output:\n\n{}".format(output)) arp_entries = arp_list[1].strip() for line in arp_entries.splitlines(): if len(line.split()) == 4: address, age, mac, interface = line.split() else: raise ValueError("Unexpected output from: {}".format(line.split())) if age == '-': age = -1.0 elif ':' not in age: # Cisco sometimes returns a sub second arp time 0.411797 try: age = float(age) except ValueError: age = -1.0 else: age = convert_hhmmss(age) age = float(age) age = round(age, 1) # Validate we matched correctly if not re.search(RE_IPADDR, address): raise ValueError("Invalid IP Address detected: {}".format(address)) if not re.search(RE_MAC, mac): raise ValueError("Invalid MAC Address detected: {}".format(mac)) entry = { 'interface': interface, 'mac': napalm_base.helpers.mac(mac), 'ip': address, 'age': age } arp_table.append(entry) return arp_table
[ "def", "get_arp_table", "(", "self", ")", ":", "arp_table", "=", "[", "]", "command", "=", "'show ip arp vrf default | exc INCOMPLETE'", "output", "=", "self", ".", "device", ".", "send_command", "(", "command", ")", "separator", "=", "r\"^Address\\s+Age.*Interface.*$\"", "arp_list", "=", "re", ".", "split", "(", "separator", ",", "output", ",", "flags", "=", "re", ".", "M", ")", "if", "len", "(", "arp_list", ")", "!=", "2", ":", "raise", "ValueError", "(", "\"Error processing arp table output:\\n\\n{}\"", ".", "format", "(", "output", ")", ")", "arp_entries", "=", "arp_list", "[", "1", "]", ".", "strip", "(", ")", "for", "line", "in", "arp_entries", ".", "splitlines", "(", ")", ":", "if", "len", "(", "line", ".", "split", "(", ")", ")", "==", "4", ":", "address", ",", "age", ",", "mac", ",", "interface", "=", "line", ".", "split", "(", ")", "else", ":", "raise", "ValueError", "(", "\"Unexpected output from: {}\"", ".", "format", "(", "line", ".", "split", "(", ")", ")", ")", "if", "age", "==", "'-'", ":", "age", "=", "-", "1.0", "elif", "':'", "not", "in", "age", ":", "# Cisco sometimes returns a sub second arp time 0.411797", "try", ":", "age", "=", "float", "(", "age", ")", "except", "ValueError", ":", "age", "=", "-", "1.0", "else", ":", "age", "=", "convert_hhmmss", "(", "age", ")", "age", "=", "float", "(", "age", ")", "age", "=", "round", "(", "age", ",", "1", ")", "# Validate we matched correctly", "if", "not", "re", ".", "search", "(", "RE_IPADDR", ",", "address", ")", ":", "raise", "ValueError", "(", "\"Invalid IP Address detected: {}\"", ".", "format", "(", "address", ")", ")", "if", "not", "re", ".", "search", "(", "RE_MAC", ",", "mac", ")", ":", "raise", "ValueError", "(", "\"Invalid MAC Address detected: {}\"", ".", "format", "(", "mac", ")", ")", "entry", "=", "{", "'interface'", ":", "interface", ",", "'mac'", ":", "napalm_base", ".", "helpers", ".", "mac", "(", "mac", ")", ",", "'ip'", ":", "address", ",", "'age'", ":", "age", "}", "arp_table", ".", "append", "(", "entry", ")", "return", "arp_table" ]
Get arp table information. Return a list of dictionaries having the following set of keys: * interface (string) * mac (string) * ip (string) * age (float) For example:: [ { 'interface' : 'MgmtEth0/RSP0/CPU0/0', 'mac' : '5c:5e:ab:da:3c:f0', 'ip' : '172.17.17.1', 'age' : 12.0 }, { 'interface': 'MgmtEth0/RSP0/CPU0/0', 'mac' : '66:0e:94:96:e0:ff', 'ip' : '172.17.17.2', 'age' : 14.0 } ]
[ "Get", "arp", "table", "information", "." ]
python
train
33.956522
sepandhaghighi/pycm
pycm/pycm_overall_func.py
https://github.com/sepandhaghighi/pycm/blob/cb03258afd6a821d10acba73c965aaac174bedcd/pycm/pycm_overall_func.py#L591-L607
def kappa_se_calc(PA, PE, POP): """ Calculate kappa standard error. :param PA: observed agreement among raters (overall accuracy) :type PA : float :param PE: hypothetical probability of chance agreement (random accuracy) :type PE : float :param POP: population :type POP:int :return: kappa standard error as float """ try: result = math.sqrt((PA * (1 - PA)) / (POP * ((1 - PE)**2))) return result except Exception: return "None"
[ "def", "kappa_se_calc", "(", "PA", ",", "PE", ",", "POP", ")", ":", "try", ":", "result", "=", "math", ".", "sqrt", "(", "(", "PA", "*", "(", "1", "-", "PA", ")", ")", "/", "(", "POP", "*", "(", "(", "1", "-", "PE", ")", "**", "2", ")", ")", ")", "return", "result", "except", "Exception", ":", "return", "\"None\"" ]
Calculate kappa standard error. :param PA: observed agreement among raters (overall accuracy) :type PA : float :param PE: hypothetical probability of chance agreement (random accuracy) :type PE : float :param POP: population :type POP:int :return: kappa standard error as float
[ "Calculate", "kappa", "standard", "error", "." ]
python
train
28.588235
yatiml/yatiml
yatiml/helpers.py
https://github.com/yatiml/yatiml/blob/4f55c058b72388350f0af3076ac3ea9bc1c142b0/yatiml/helpers.py#L96-L118
def set_value(self, value: ScalarType) -> None: """Sets the value of the node to a scalar value. After this, is_scalar(type(value)) will return true. Args: value: The value to set this node to, a str, int, float, \ bool, or None. """ if isinstance(value, bool): value_str = 'true' if value else 'false' else: value_str = str(value) start_mark = self.yaml_node.start_mark end_mark = self.yaml_node.end_mark # If we're of a class type, then we want to keep that tag so that the # correct Constructor is called. If we're a built-in type, set the tag # to the appropriate YAML tag. tag = self.yaml_node.tag if tag.startswith('tag:yaml.org,2002:'): tag = scalar_type_to_tag[type(value)] new_node = yaml.ScalarNode(tag, value_str, start_mark, end_mark) self.yaml_node = new_node
[ "def", "set_value", "(", "self", ",", "value", ":", "ScalarType", ")", "->", "None", ":", "if", "isinstance", "(", "value", ",", "bool", ")", ":", "value_str", "=", "'true'", "if", "value", "else", "'false'", "else", ":", "value_str", "=", "str", "(", "value", ")", "start_mark", "=", "self", ".", "yaml_node", ".", "start_mark", "end_mark", "=", "self", ".", "yaml_node", ".", "end_mark", "# If we're of a class type, then we want to keep that tag so that the", "# correct Constructor is called. If we're a built-in type, set the tag", "# to the appropriate YAML tag.", "tag", "=", "self", ".", "yaml_node", ".", "tag", "if", "tag", ".", "startswith", "(", "'tag:yaml.org,2002:'", ")", ":", "tag", "=", "scalar_type_to_tag", "[", "type", "(", "value", ")", "]", "new_node", "=", "yaml", ".", "ScalarNode", "(", "tag", ",", "value_str", ",", "start_mark", ",", "end_mark", ")", "self", ".", "yaml_node", "=", "new_node" ]
Sets the value of the node to a scalar value. After this, is_scalar(type(value)) will return true. Args: value: The value to set this node to, a str, int, float, \ bool, or None.
[ "Sets", "the", "value", "of", "the", "node", "to", "a", "scalar", "value", "." ]
python
train
40.869565
pantsbuild/pex
pex/translator.py
https://github.com/pantsbuild/pex/blob/87b2129d860250d3b9edce75b9cb62f9789ee521/pex/translator.py#L133-L146
def translate(self, package, into=None): """From a binary package, translate to a local binary distribution.""" if not package.local: raise ValueError('BinaryTranslator cannot translate remote packages.') if not isinstance(package, self._package_type): return None if not package.compatible(self._supported_tags): TRACER.log('Target package %s is not compatible with %s' % ( package, self._supported_tags)) return None into = into or safe_mkdtemp() target_path = os.path.join(into, package.filename) safe_copy(package.local_path, target_path) return DistributionHelper.distribution_from_path(target_path)
[ "def", "translate", "(", "self", ",", "package", ",", "into", "=", "None", ")", ":", "if", "not", "package", ".", "local", ":", "raise", "ValueError", "(", "'BinaryTranslator cannot translate remote packages.'", ")", "if", "not", "isinstance", "(", "package", ",", "self", ".", "_package_type", ")", ":", "return", "None", "if", "not", "package", ".", "compatible", "(", "self", ".", "_supported_tags", ")", ":", "TRACER", ".", "log", "(", "'Target package %s is not compatible with %s'", "%", "(", "package", ",", "self", ".", "_supported_tags", ")", ")", "return", "None", "into", "=", "into", "or", "safe_mkdtemp", "(", ")", "target_path", "=", "os", ".", "path", ".", "join", "(", "into", ",", "package", ".", "filename", ")", "safe_copy", "(", "package", ".", "local_path", ",", "target_path", ")", "return", "DistributionHelper", ".", "distribution_from_path", "(", "target_path", ")" ]
From a binary package, translate to a local binary distribution.
[ "From", "a", "binary", "package", "translate", "to", "a", "local", "binary", "distribution", "." ]
python
train
46.928571
Netflix-Skunkworks/historical
historical/models.py
https://github.com/Netflix-Skunkworks/historical/blob/c3ebaa8388a3fe67e23a6c9c6b04c3e618497c4a/historical/models.py#L136-L152
def serialize_me(self, account_id, region, next_token=None): """Dumps the proper JSON for the schema. :param account_id: :param region: :param next_token: :return: """ payload = { 'account_id': account_id, 'region': region } if next_token: payload['next_token'] = next_token return self.dumps(payload).data
[ "def", "serialize_me", "(", "self", ",", "account_id", ",", "region", ",", "next_token", "=", "None", ")", ":", "payload", "=", "{", "'account_id'", ":", "account_id", ",", "'region'", ":", "region", "}", "if", "next_token", ":", "payload", "[", "'next_token'", "]", "=", "next_token", "return", "self", ".", "dumps", "(", "payload", ")", ".", "data" ]
Dumps the proper JSON for the schema. :param account_id: :param region: :param next_token: :return:
[ "Dumps", "the", "proper", "JSON", "for", "the", "schema", "." ]
python
train
24.058824
clinicedc/edc-notification
edc_notification/mailing_list_manager.py
https://github.com/clinicedc/edc-notification/blob/79e43a44261e37566c63a8780d80b0d8ece89cc9/edc_notification/mailing_list_manager.py#L138-L145
def delete(self): """Returns a response after attempting to delete the list. """ if not self.email_enabled: raise EmailNotEnabledError("See settings.EMAIL_ENABLED") return requests.delete( f"{self.api_url}/{self.address}", auth=("api", self.api_key) )
[ "def", "delete", "(", "self", ")", ":", "if", "not", "self", ".", "email_enabled", ":", "raise", "EmailNotEnabledError", "(", "\"See settings.EMAIL_ENABLED\"", ")", "return", "requests", ".", "delete", "(", "f\"{self.api_url}/{self.address}\"", ",", "auth", "=", "(", "\"api\"", ",", "self", ".", "api_key", ")", ")" ]
Returns a response after attempting to delete the list.
[ "Returns", "a", "response", "after", "attempting", "to", "delete", "the", "list", "." ]
python
train
38.5
mdickinson/refcycle
refcycle/object_graph.py
https://github.com/mdickinson/refcycle/blob/627fad74c74efc601209c96405f8118cd99b2241/refcycle/object_graph.py#L377-L381
def find_by_typename(self, typename): """ List of all objects whose type has the given name. """ return self.find_by(lambda obj: type(obj).__name__ == typename)
[ "def", "find_by_typename", "(", "self", ",", "typename", ")", ":", "return", "self", ".", "find_by", "(", "lambda", "obj", ":", "type", "(", "obj", ")", ".", "__name__", "==", "typename", ")" ]
List of all objects whose type has the given name.
[ "List", "of", "all", "objects", "whose", "type", "has", "the", "given", "name", "." ]
python
train
37.6
geronimp/graftM
graftm/sequence_searcher.py
https://github.com/geronimp/graftM/blob/c82576517290167f605fd0bc4facd009cee29f48/graftm/sequence_searcher.py#L1015-L1108
def search_and_extract_nucleotides_matching_nucleotide_database(self, unpack, euk_check, search_method, maximum_range, threads, evalue, hmmsearch_output_table, hit_reads_fasta): '''As per nt_db_search() except slightly lower level. Search an input read set (unpack) and then extract the sequences that hit. Parameters ---------- hmmsearch_output_table: str path to hmmsearch output table hit_reads_fasta: str path to hit nucleotide sequences Returns ------- direction_information: dict {read_1: False ... read n: True} where True = Forward direction and False = Reverse direction result: DBSearchResult object containing file locations and hit information ''' if search_method == "hmmsearch": # First search the reads using the HMM search_result, table_list = self.nhmmer( hmmsearch_output_table, unpack, threads, evalue ) elif search_method == 'diamond': raise Exception("Diamond searches not supported for nucelotide databases yet") if maximum_range: hits = self._get_read_names( search_result, # define the span of hits maximum_range ) else: hits = self._get_sequence_directions(search_result) hit_readnames = hits.keys() if euk_check: euk_reads = self._check_euk_contamination(table_list) hit_readnames = set([read for read in hit_readnames if read not in euk_reads]) hits = {key:item for key, item in hits.iteritems() if key in hit_readnames} hit_read_count = [len(euk_reads), len(hit_readnames)] else: hit_read_count = [0, len(hit_readnames)] hit_reads_fasta, direction_information = self._extract_from_raw_reads( hit_reads_fasta, hit_readnames, unpack.read_file, unpack.format(), hits ) if not hit_readnames: result = DBSearchResult(None, search_result, hit_read_count, None) else: slash_endings=self._check_for_slash_endings(hit_readnames) result = DBSearchResult(hit_reads_fasta, search_result, hit_read_count, slash_endings) if maximum_range: n_hits = sum([len(x["strand"]) for x in hits.values()]) else: n_hits = len(hits) logging.info("%s read(s) detected" % n_hits) return result, direction_information
[ "def", "search_and_extract_nucleotides_matching_nucleotide_database", "(", "self", ",", "unpack", ",", "euk_check", ",", "search_method", ",", "maximum_range", ",", "threads", ",", "evalue", ",", "hmmsearch_output_table", ",", "hit_reads_fasta", ")", ":", "if", "search_method", "==", "\"hmmsearch\"", ":", "# First search the reads using the HMM", "search_result", ",", "table_list", "=", "self", ".", "nhmmer", "(", "hmmsearch_output_table", ",", "unpack", ",", "threads", ",", "evalue", ")", "elif", "search_method", "==", "'diamond'", ":", "raise", "Exception", "(", "\"Diamond searches not supported for nucelotide databases yet\"", ")", "if", "maximum_range", ":", "hits", "=", "self", ".", "_get_read_names", "(", "search_result", ",", "# define the span of hits", "maximum_range", ")", "else", ":", "hits", "=", "self", ".", "_get_sequence_directions", "(", "search_result", ")", "hit_readnames", "=", "hits", ".", "keys", "(", ")", "if", "euk_check", ":", "euk_reads", "=", "self", ".", "_check_euk_contamination", "(", "table_list", ")", "hit_readnames", "=", "set", "(", "[", "read", "for", "read", "in", "hit_readnames", "if", "read", "not", "in", "euk_reads", "]", ")", "hits", "=", "{", "key", ":", "item", "for", "key", ",", "item", "in", "hits", ".", "iteritems", "(", ")", "if", "key", "in", "hit_readnames", "}", "hit_read_count", "=", "[", "len", "(", "euk_reads", ")", ",", "len", "(", "hit_readnames", ")", "]", "else", ":", "hit_read_count", "=", "[", "0", ",", "len", "(", "hit_readnames", ")", "]", "hit_reads_fasta", ",", "direction_information", "=", "self", ".", "_extract_from_raw_reads", "(", "hit_reads_fasta", ",", "hit_readnames", ",", "unpack", ".", "read_file", ",", "unpack", ".", "format", "(", ")", ",", "hits", ")", "if", "not", "hit_readnames", ":", "result", "=", "DBSearchResult", "(", "None", ",", "search_result", ",", "hit_read_count", ",", "None", ")", "else", ":", "slash_endings", "=", "self", ".", "_check_for_slash_endings", "(", "hit_readnames", ")", "result", "=", "DBSearchResult", "(", "hit_reads_fasta", ",", "search_result", ",", "hit_read_count", ",", "slash_endings", ")", "if", "maximum_range", ":", "n_hits", "=", "sum", "(", "[", "len", "(", "x", "[", "\"strand\"", "]", ")", "for", "x", "in", "hits", ".", "values", "(", ")", "]", ")", "else", ":", "n_hits", "=", "len", "(", "hits", ")", "logging", ".", "info", "(", "\"%s read(s) detected\"", "%", "n_hits", ")", "return", "result", ",", "direction_information" ]
As per nt_db_search() except slightly lower level. Search an input read set (unpack) and then extract the sequences that hit. Parameters ---------- hmmsearch_output_table: str path to hmmsearch output table hit_reads_fasta: str path to hit nucleotide sequences Returns ------- direction_information: dict {read_1: False ... read n: True} where True = Forward direction and False = Reverse direction result: DBSearchResult object containing file locations and hit information
[ "As", "per", "nt_db_search", "()", "except", "slightly", "lower", "level", ".", "Search", "an", "input", "read", "set", "(", "unpack", ")", "and", "then", "extract", "the", "sequences", "that", "hit", "." ]
python
train
41.202128
MartinThoma/hwrt
hwrt/segmentation/beam.py
https://github.com/MartinThoma/hwrt/blob/725c21a3d0f5a30b8492cbc184b3688ceb364e1c/hwrt/segmentation/beam.py#L137-L189
def _add_hypotheses_assuming_new_stroke(self, new_stroke, stroke_nr, new_beam): """ Get new guesses by assuming new_stroke is a new symbol. Parameters ---------- new_stroke : list of dicts A list of dicts [{'x': 12, 'y': 34, 'time': 56}, ...] which represent a point. stroke_nr : int Number of the stroke for segmentation new_beam : beam object """ guesses = single_clf.predict({'data': [new_stroke], 'id': None})[:self.m] for hyp in self.hypotheses: new_geometry = deepcopy(hyp['geometry']) most_right = new_geometry if len(hyp['symbols']) == 0: while 'right' in most_right: most_right = most_right['right'] most_right['right'] = {'symbol_index': len(hyp['symbols']), 'right': None} else: most_right = {'symbol_index': len(hyp['symbols']), 'right': None} for guess in guesses: sym = {'symbol': guess['semantics'], 'probability': guess['probability']} new_seg = deepcopy(hyp['segmentation']) new_seg.append([stroke_nr]) new_sym = deepcopy(hyp['symbols']) new_sym.append(sym) b = {'segmentation': new_seg, 'symbols': new_sym, 'geometry': new_geometry, 'probability': None } # spacial_rels = [] # TODO # for s1_indices, s2_indices in zip(b['segmentation'], # b['segmentation'][1:]): # tmp = [new_beam.history['data'][el] for el in s1_indices] # s1 = HandwrittenData(json.dumps(tmp)) # tmp = [new_beam.history['data'][el] for el in s2_indices] # s2 = HandwrittenData(json.dumps(tmp)) # rel = spacial_relationship.estimate(s1, s2) # spacial_rels.append(rel) # b['geometry'] = spacial_rels new_beam.hypotheses.append(b)
[ "def", "_add_hypotheses_assuming_new_stroke", "(", "self", ",", "new_stroke", ",", "stroke_nr", ",", "new_beam", ")", ":", "guesses", "=", "single_clf", ".", "predict", "(", "{", "'data'", ":", "[", "new_stroke", "]", ",", "'id'", ":", "None", "}", ")", "[", ":", "self", ".", "m", "]", "for", "hyp", "in", "self", ".", "hypotheses", ":", "new_geometry", "=", "deepcopy", "(", "hyp", "[", "'geometry'", "]", ")", "most_right", "=", "new_geometry", "if", "len", "(", "hyp", "[", "'symbols'", "]", ")", "==", "0", ":", "while", "'right'", "in", "most_right", ":", "most_right", "=", "most_right", "[", "'right'", "]", "most_right", "[", "'right'", "]", "=", "{", "'symbol_index'", ":", "len", "(", "hyp", "[", "'symbols'", "]", ")", ",", "'right'", ":", "None", "}", "else", ":", "most_right", "=", "{", "'symbol_index'", ":", "len", "(", "hyp", "[", "'symbols'", "]", ")", ",", "'right'", ":", "None", "}", "for", "guess", "in", "guesses", ":", "sym", "=", "{", "'symbol'", ":", "guess", "[", "'semantics'", "]", ",", "'probability'", ":", "guess", "[", "'probability'", "]", "}", "new_seg", "=", "deepcopy", "(", "hyp", "[", "'segmentation'", "]", ")", "new_seg", ".", "append", "(", "[", "stroke_nr", "]", ")", "new_sym", "=", "deepcopy", "(", "hyp", "[", "'symbols'", "]", ")", "new_sym", ".", "append", "(", "sym", ")", "b", "=", "{", "'segmentation'", ":", "new_seg", ",", "'symbols'", ":", "new_sym", ",", "'geometry'", ":", "new_geometry", ",", "'probability'", ":", "None", "}", "# spacial_rels = [] # TODO", "# for s1_indices, s2_indices in zip(b['segmentation'],", "# b['segmentation'][1:]):", "# tmp = [new_beam.history['data'][el] for el in s1_indices]", "# s1 = HandwrittenData(json.dumps(tmp))", "# tmp = [new_beam.history['data'][el] for el in s2_indices]", "# s2 = HandwrittenData(json.dumps(tmp))", "# rel = spacial_relationship.estimate(s1, s2)", "# spacial_rels.append(rel)", "# b['geometry'] = spacial_rels", "new_beam", ".", "hypotheses", ".", "append", "(", "b", ")" ]
Get new guesses by assuming new_stroke is a new symbol. Parameters ---------- new_stroke : list of dicts A list of dicts [{'x': 12, 'y': 34, 'time': 56}, ...] which represent a point. stroke_nr : int Number of the stroke for segmentation new_beam : beam object
[ "Get", "new", "guesses", "by", "assuming", "new_stroke", "is", "a", "new", "symbol", "." ]
python
train
45
ml4ai/delphi
delphi/apps/rest_api/api.py
https://github.com/ml4ai/delphi/blob/6d03d8aafeab99610387c51b89c99738ff2abbe3/delphi/apps/rest_api/api.py#L78-L86
def getICMPrimitives(uuid: str): """ returns all ICM primitives (TODO - needs filter support)""" primitives = [ p.deserialize() for p in CausalPrimitive.query.filter_by(model_id=uuid).all() ] for p in primitives: del p["model_id"] return jsonify(primitives)
[ "def", "getICMPrimitives", "(", "uuid", ":", "str", ")", ":", "primitives", "=", "[", "p", ".", "deserialize", "(", ")", "for", "p", "in", "CausalPrimitive", ".", "query", ".", "filter_by", "(", "model_id", "=", "uuid", ")", ".", "all", "(", ")", "]", "for", "p", "in", "primitives", ":", "del", "p", "[", "\"model_id\"", "]", "return", "jsonify", "(", "primitives", ")" ]
returns all ICM primitives (TODO - needs filter support)
[ "returns", "all", "ICM", "primitives", "(", "TODO", "-", "needs", "filter", "support", ")" ]
python
train
32.555556
tinybike/coinbridge
coinbridge/__init__.py
https://github.com/tinybike/coinbridge/blob/c9bde6f4196fecc09e8119f51dff8a26cfc1aee6/coinbridge/__init__.py#L184-L231
def record_tx(self, origin, destination, amount, outcome, destination_id=None): """Records a transaction in the database. Args: origin (str): user_id of the sender destination (str): coin address or user_id of the recipient amount (str, Decimal, number): amount to send outcome (str, bool): the transaction hash if this is a "sendfrom" transaction; for "move", True if successful, False otherwise destination_id (str): the destination account label ("move" only) Returns: str or bool: the outcome (input) argument """ # "move" commands if destination_id: tx = db.Transaction( txtype="move", from_user_id=origin, to_user_id=destination_id, txdate=datetime.now(), amount=amount, currency=COINS[self.coin]["ticker"], to_coin_address=destination, ) # "sendfrom" commands else: self.logger.debug(self.gettransaction(outcome)) confirmations = self.gettransaction(outcome)["confirmations"] last_confirmation = datetime.now() if confirmations else None tx = db.Transaction( txtype="sendfrom", from_user_id=origin, txhash=outcome, txdate=datetime.now(), amount=amount, currency=COINS[self.coin]["ticker"], to_coin_address=destination, confirmations=confirmations, last_confirmation=last_confirmation ) db.session.add(tx) db.session.commit() return outcome
[ "def", "record_tx", "(", "self", ",", "origin", ",", "destination", ",", "amount", ",", "outcome", ",", "destination_id", "=", "None", ")", ":", "# \"move\" commands", "if", "destination_id", ":", "tx", "=", "db", ".", "Transaction", "(", "txtype", "=", "\"move\"", ",", "from_user_id", "=", "origin", ",", "to_user_id", "=", "destination_id", ",", "txdate", "=", "datetime", ".", "now", "(", ")", ",", "amount", "=", "amount", ",", "currency", "=", "COINS", "[", "self", ".", "coin", "]", "[", "\"ticker\"", "]", ",", "to_coin_address", "=", "destination", ",", ")", "# \"sendfrom\" commands", "else", ":", "self", ".", "logger", ".", "debug", "(", "self", ".", "gettransaction", "(", "outcome", ")", ")", "confirmations", "=", "self", ".", "gettransaction", "(", "outcome", ")", "[", "\"confirmations\"", "]", "last_confirmation", "=", "datetime", ".", "now", "(", ")", "if", "confirmations", "else", "None", "tx", "=", "db", ".", "Transaction", "(", "txtype", "=", "\"sendfrom\"", ",", "from_user_id", "=", "origin", ",", "txhash", "=", "outcome", ",", "txdate", "=", "datetime", ".", "now", "(", ")", ",", "amount", "=", "amount", ",", "currency", "=", "COINS", "[", "self", ".", "coin", "]", "[", "\"ticker\"", "]", ",", "to_coin_address", "=", "destination", ",", "confirmations", "=", "confirmations", ",", "last_confirmation", "=", "last_confirmation", ")", "db", ".", "session", ".", "add", "(", "tx", ")", "db", ".", "session", ".", "commit", "(", ")", "return", "outcome" ]
Records a transaction in the database. Args: origin (str): user_id of the sender destination (str): coin address or user_id of the recipient amount (str, Decimal, number): amount to send outcome (str, bool): the transaction hash if this is a "sendfrom" transaction; for "move", True if successful, False otherwise destination_id (str): the destination account label ("move" only) Returns: str or bool: the outcome (input) argument
[ "Records", "a", "transaction", "in", "the", "database", "." ]
python
train
36.895833
santoshphilip/eppy
eppy/json_functions.py
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/json_functions.py#L37-L51
def updateidf(idf, dct): """update idf using dct""" for key in list(dct.keys()): if key.startswith('idf.'): idftag, objkey, objname, field = key2elements(key) if objname == '': try: idfobj = idf.idfobjects[objkey.upper()][0] except IndexError as e: idfobj = idf.newidfobject(objkey.upper()) else: idfobj = idf.getobject(objkey.upper(), objname) if idfobj == None: idfobj = idf.newidfobject(objkey.upper(), Name=objname) idfobj[field] = dct[key]
[ "def", "updateidf", "(", "idf", ",", "dct", ")", ":", "for", "key", "in", "list", "(", "dct", ".", "keys", "(", ")", ")", ":", "if", "key", ".", "startswith", "(", "'idf.'", ")", ":", "idftag", ",", "objkey", ",", "objname", ",", "field", "=", "key2elements", "(", "key", ")", "if", "objname", "==", "''", ":", "try", ":", "idfobj", "=", "idf", ".", "idfobjects", "[", "objkey", ".", "upper", "(", ")", "]", "[", "0", "]", "except", "IndexError", "as", "e", ":", "idfobj", "=", "idf", ".", "newidfobject", "(", "objkey", ".", "upper", "(", ")", ")", "else", ":", "idfobj", "=", "idf", ".", "getobject", "(", "objkey", ".", "upper", "(", ")", ",", "objname", ")", "if", "idfobj", "==", "None", ":", "idfobj", "=", "idf", ".", "newidfobject", "(", "objkey", ".", "upper", "(", ")", ",", "Name", "=", "objname", ")", "idfobj", "[", "field", "]", "=", "dct", "[", "key", "]" ]
update idf using dct
[ "update", "idf", "using", "dct" ]
python
train
41.2
threeML/astromodels
astromodels/xspec/factory.py
https://github.com/threeML/astromodels/blob/9aac365a372f77603039533df9a6b694c1e360d5/astromodels/xspec/factory.py#L38-L70
def find_model_dat(): """ Find the file containing the definition of all the models in Xspec (model.dat) and return its path """ # model.dat is in $HEADAS/../spectral headas_env = os.environ.get("HEADAS") assert headas_env is not None, ("You need to setup the HEADAS variable before importing this module." " See Heasoft documentation.") # Expand all variables and other things like ~ headas_env = os.path.expandvars(os.path.expanduser(headas_env)) # Lazy check that it exists assert os.path.exists(headas_env), "The HEADAS env. variable point to a non-existent directory: %s" % (headas_env) # Get one directory above HEADAS (i.e., $HEADAS/..) inferred_path = os.path.dirname(headas_env) # Now model.dat should be in $HEADAS/../spectral/manager final_path = os.path.join(inferred_path, 'spectral', 'manager', 'model.dat') # Check that model.dat exists assert os.path.exists(final_path), "Cannot find Xspec model definition file %s" % (final_path) return os.path.abspath(final_path)
[ "def", "find_model_dat", "(", ")", ":", "# model.dat is in $HEADAS/../spectral", "headas_env", "=", "os", ".", "environ", ".", "get", "(", "\"HEADAS\"", ")", "assert", "headas_env", "is", "not", "None", ",", "(", "\"You need to setup the HEADAS variable before importing this module.\"", "\" See Heasoft documentation.\"", ")", "# Expand all variables and other things like ~", "headas_env", "=", "os", ".", "path", ".", "expandvars", "(", "os", ".", "path", ".", "expanduser", "(", "headas_env", ")", ")", "# Lazy check that it exists", "assert", "os", ".", "path", ".", "exists", "(", "headas_env", ")", ",", "\"The HEADAS env. variable point to a non-existent directory: %s\"", "%", "(", "headas_env", ")", "# Get one directory above HEADAS (i.e., $HEADAS/..)", "inferred_path", "=", "os", ".", "path", ".", "dirname", "(", "headas_env", ")", "# Now model.dat should be in $HEADAS/../spectral/manager", "final_path", "=", "os", ".", "path", ".", "join", "(", "inferred_path", ",", "'spectral'", ",", "'manager'", ",", "'model.dat'", ")", "# Check that model.dat exists", "assert", "os", ".", "path", ".", "exists", "(", "final_path", ")", ",", "\"Cannot find Xspec model definition file %s\"", "%", "(", "final_path", ")", "return", "os", ".", "path", ".", "abspath", "(", "final_path", ")" ]
Find the file containing the definition of all the models in Xspec (model.dat) and return its path
[ "Find", "the", "file", "containing", "the", "definition", "of", "all", "the", "models", "in", "Xspec", "(", "model", ".", "dat", ")", "and", "return", "its", "path" ]
python
train
32.424242
shoebot/shoebot
lib/database/__init__.py
https://github.com/shoebot/shoebot/blob/d554c1765c1899fa25727c9fc6805d221585562b/lib/database/__init__.py#L65-L81
def create(self, name, overwrite=True): """Creates an SQLite database file. Creates an SQLite database with the given name. The .box file extension is added automatically. Overwrites any existing database by default. """ self._name = name.rstrip(".db") from os import unlink if overwrite: try: unlink(self._name + ".db") except: pass self._con = sqlite.connect(self._name + ".db") self._cur = self._con.cursor()
[ "def", "create", "(", "self", ",", "name", ",", "overwrite", "=", "True", ")", ":", "self", ".", "_name", "=", "name", ".", "rstrip", "(", "\".db\"", ")", "from", "os", "import", "unlink", "if", "overwrite", ":", "try", ":", "unlink", "(", "self", ".", "_name", "+", "\".db\"", ")", "except", ":", "pass", "self", ".", "_con", "=", "sqlite", ".", "connect", "(", "self", ".", "_name", "+", "\".db\"", ")", "self", ".", "_cur", "=", "self", ".", "_con", ".", "cursor", "(", ")" ]
Creates an SQLite database file. Creates an SQLite database with the given name. The .box file extension is added automatically. Overwrites any existing database by default.
[ "Creates", "an", "SQLite", "database", "file", ".", "Creates", "an", "SQLite", "database", "with", "the", "given", "name", ".", "The", ".", "box", "file", "extension", "is", "added", "automatically", ".", "Overwrites", "any", "existing", "database", "by", "default", "." ]
python
valid
31.941176
flowersteam/explauto
explauto/interest_model/tree.py
https://github.com/flowersteam/explauto/blob/cf0f81ecb9f6412f7276a95bd27359000e1e26b6/explauto/interest_model/tree.py#L265-L288
def sample_greedy(self): """ Sample a point in the leaf with the max progress. """ if self.leafnode: return self.sample_bounds() else: lp = self.lower.max_leaf_progress gp = self.greater.max_leaf_progress maxp = max(lp, gp) if self.sampling_mode['multiscale']: tp = self.progress if tp > maxp: return self.sample_bounds() if gp == maxp: sampling_mode = self.sampling_mode sampling_mode['mode'] = 'greedy' return self.greater.sample(sampling_mode=sampling_mode) else: sampling_mode = self.sampling_mode sampling_mode['mode'] = 'greedy' return self.lower.sample(sampling_mode=sampling_mode)
[ "def", "sample_greedy", "(", "self", ")", ":", "if", "self", ".", "leafnode", ":", "return", "self", ".", "sample_bounds", "(", ")", "else", ":", "lp", "=", "self", ".", "lower", ".", "max_leaf_progress", "gp", "=", "self", ".", "greater", ".", "max_leaf_progress", "maxp", "=", "max", "(", "lp", ",", "gp", ")", "if", "self", ".", "sampling_mode", "[", "'multiscale'", "]", ":", "tp", "=", "self", ".", "progress", "if", "tp", ">", "maxp", ":", "return", "self", ".", "sample_bounds", "(", ")", "if", "gp", "==", "maxp", ":", "sampling_mode", "=", "self", ".", "sampling_mode", "sampling_mode", "[", "'mode'", "]", "=", "'greedy'", "return", "self", ".", "greater", ".", "sample", "(", "sampling_mode", "=", "sampling_mode", ")", "else", ":", "sampling_mode", "=", "self", ".", "sampling_mode", "sampling_mode", "[", "'mode'", "]", "=", "'greedy'", "return", "self", ".", "lower", ".", "sample", "(", "sampling_mode", "=", "sampling_mode", ")" ]
Sample a point in the leaf with the max progress.
[ "Sample", "a", "point", "in", "the", "leaf", "with", "the", "max", "progress", "." ]
python
train
37.125
dwavesystems/dwave-system
dwave/embedding/chain_breaks.py
https://github.com/dwavesystems/dwave-system/blob/86a1698f15ccd8b0ece0ed868ee49292d3f67f5b/dwave/embedding/chain_breaks.py#L209-L256
def weighted_random(samples, chains): """Determine the sample values of chains by weighed random choice. Args: samples (array_like): Samples as a nS x nV array_like object where nS is the number of samples and nV is the number of variables. The values should all be 0/1 or -1/+1. chains (list[array_like]): List of chains of length nC where nC is the number of chains. Each chain should be an array_like collection of column indices in samples. Returns: tuple: A 2-tuple containing: :obj:`numpy.ndarray`: A nS x nC array of unembedded samples. The array has dtype 'int8'. Where there is a chain break, the value is chosen randomly, weighted by frequency of the chain's value. :obj:`numpy.ndarray`: Equivalent to :code:`np.arange(nS)` because all samples are kept and no samples are added. Examples: This example unembeds samples from a target graph that chains nodes 0 and 1 to represent one source node and nodes 2, 3, and 4 to represent another. The sample has broken chains for both source nodes. >>> import dimod >>> import numpy as np ... >>> chains = [(0, 1), (2, 3, 4)] >>> samples = np.array([[1, 0, 1, 0, 1]], dtype=np.int8) >>> unembedded, idx = dwave.embedding.weighted_random(samples, chains) # doctest: +SKIP >>> unembedded # doctest: +SKIP array([[1, 1]], dtype=int8) >>> idx # doctest: +SKIP array([0, 1]) """ samples = np.asarray(samples) if samples.ndim != 2: raise ValueError("expected samples to be a numpy 2D array") # it sufficies to choose a random index from each chain and use that to construct the matrix idx = [np.random.choice(chain) for chain in chains] num_samples, num_variables = samples.shape return samples[:, idx], np.arange(num_samples)
[ "def", "weighted_random", "(", "samples", ",", "chains", ")", ":", "samples", "=", "np", ".", "asarray", "(", "samples", ")", "if", "samples", ".", "ndim", "!=", "2", ":", "raise", "ValueError", "(", "\"expected samples to be a numpy 2D array\"", ")", "# it sufficies to choose a random index from each chain and use that to construct the matrix", "idx", "=", "[", "np", ".", "random", ".", "choice", "(", "chain", ")", "for", "chain", "in", "chains", "]", "num_samples", ",", "num_variables", "=", "samples", ".", "shape", "return", "samples", "[", ":", ",", "idx", "]", ",", "np", ".", "arange", "(", "num_samples", ")" ]
Determine the sample values of chains by weighed random choice. Args: samples (array_like): Samples as a nS x nV array_like object where nS is the number of samples and nV is the number of variables. The values should all be 0/1 or -1/+1. chains (list[array_like]): List of chains of length nC where nC is the number of chains. Each chain should be an array_like collection of column indices in samples. Returns: tuple: A 2-tuple containing: :obj:`numpy.ndarray`: A nS x nC array of unembedded samples. The array has dtype 'int8'. Where there is a chain break, the value is chosen randomly, weighted by frequency of the chain's value. :obj:`numpy.ndarray`: Equivalent to :code:`np.arange(nS)` because all samples are kept and no samples are added. Examples: This example unembeds samples from a target graph that chains nodes 0 and 1 to represent one source node and nodes 2, 3, and 4 to represent another. The sample has broken chains for both source nodes. >>> import dimod >>> import numpy as np ... >>> chains = [(0, 1), (2, 3, 4)] >>> samples = np.array([[1, 0, 1, 0, 1]], dtype=np.int8) >>> unembedded, idx = dwave.embedding.weighted_random(samples, chains) # doctest: +SKIP >>> unembedded # doctest: +SKIP array([[1, 1]], dtype=int8) >>> idx # doctest: +SKIP array([0, 1])
[ "Determine", "the", "sample", "values", "of", "chains", "by", "weighed", "random", "choice", "." ]
python
train
40.104167
satellogic/telluric
telluric/util/raster_utils.py
https://github.com/satellogic/telluric/blob/e752cd3ee71e339f79717e526fde362e80055d9e/telluric/util/raster_utils.py#L261-L360
def warp(source_file, destination_file, dst_crs=None, resolution=None, dimensions=None, src_bounds=None, dst_bounds=None, src_nodata=None, dst_nodata=None, target_aligned_pixels=False, check_invert_proj=True, creation_options=None, resampling=Resampling.cubic, **kwargs): """Warp a raster dataset. Parameters ------------ source_file: str, file object or pathlib.Path object Source file. destination_file: str, file object or pathlib.Path object Destination file. dst_crs: rasterio.crs.CRS, optional Target coordinate reference system. resolution: tuple (x resolution, y resolution) or float, optional Target resolution, in units of target coordinate reference system. dimensions: tuple (width, height), optional Output file size in pixels and lines. src_bounds: tuple (xmin, ymin, xmax, ymax), optional Georeferenced extent of output file from source bounds (in source georeferenced units). dst_bounds: tuple (xmin, ymin, xmax, ymax), optional Georeferenced extent of output file from destination bounds (in destination georeferenced units). src_nodata: int, float, or nan, optional Manually overridden source nodata. dst_nodata: int, float, or nan, optional Manually overridden destination nodata. target_aligned_pixels: bool, optional Align the output bounds based on the resolution. Default is `False`. check_invert_proj: bool, optional Constrain output to valid coordinate region in dst_crs. Default is `True`. creation_options: dict, optional Custom creation options. resampling: rasterio.enums.Resampling Reprojection resampling method. Default is `cubic`. kwargs: optional Additional arguments passed to transformation function. Returns --------- out: None Output is written to destination. """ with rasterio.Env(CHECK_WITH_INVERT_PROJ=check_invert_proj): with rasterio.open(source_file) as src: out_kwargs = src.profile.copy() dst_crs, dst_transform, dst_width, dst_height = calc_transform( src, dst_crs, resolution, dimensions, src_bounds, dst_bounds, target_aligned_pixels) # If src_nodata is not None, update the dst metadata NODATA # value to src_nodata (will be overridden by dst_nodata if it is not None. if src_nodata is not None: # Update the destination NODATA value out_kwargs.update({ 'nodata': src_nodata }) # Validate a manually set destination NODATA value. if dst_nodata is not None: if src_nodata is None and src.meta['nodata'] is None: raise ValueError('src_nodata must be provided because dst_nodata is not None') else: out_kwargs.update({'nodata': dst_nodata}) out_kwargs.update({ 'crs': dst_crs, 'transform': dst_transform, 'width': dst_width, 'height': dst_height }) # Adjust block size if necessary. if ('blockxsize' in out_kwargs and dst_width < out_kwargs['blockxsize']): del out_kwargs['blockxsize'] if ('blockysize' in out_kwargs and dst_height < out_kwargs['blockysize']): del out_kwargs['blockysize'] if creation_options is not None: out_kwargs.update(**creation_options) with rasterio.open(destination_file, 'w', **out_kwargs) as dst: reproject( source=rasterio.band(src, src.indexes), destination=rasterio.band(dst, dst.indexes), src_transform=src.transform, src_crs=src.crs, src_nodata=src_nodata, dst_transform=out_kwargs['transform'], dst_crs=out_kwargs['crs'], dst_nodata=dst_nodata, resampling=resampling, **kwargs)
[ "def", "warp", "(", "source_file", ",", "destination_file", ",", "dst_crs", "=", "None", ",", "resolution", "=", "None", ",", "dimensions", "=", "None", ",", "src_bounds", "=", "None", ",", "dst_bounds", "=", "None", ",", "src_nodata", "=", "None", ",", "dst_nodata", "=", "None", ",", "target_aligned_pixels", "=", "False", ",", "check_invert_proj", "=", "True", ",", "creation_options", "=", "None", ",", "resampling", "=", "Resampling", ".", "cubic", ",", "*", "*", "kwargs", ")", ":", "with", "rasterio", ".", "Env", "(", "CHECK_WITH_INVERT_PROJ", "=", "check_invert_proj", ")", ":", "with", "rasterio", ".", "open", "(", "source_file", ")", "as", "src", ":", "out_kwargs", "=", "src", ".", "profile", ".", "copy", "(", ")", "dst_crs", ",", "dst_transform", ",", "dst_width", ",", "dst_height", "=", "calc_transform", "(", "src", ",", "dst_crs", ",", "resolution", ",", "dimensions", ",", "src_bounds", ",", "dst_bounds", ",", "target_aligned_pixels", ")", "# If src_nodata is not None, update the dst metadata NODATA", "# value to src_nodata (will be overridden by dst_nodata if it is not None.", "if", "src_nodata", "is", "not", "None", ":", "# Update the destination NODATA value", "out_kwargs", ".", "update", "(", "{", "'nodata'", ":", "src_nodata", "}", ")", "# Validate a manually set destination NODATA value.", "if", "dst_nodata", "is", "not", "None", ":", "if", "src_nodata", "is", "None", "and", "src", ".", "meta", "[", "'nodata'", "]", "is", "None", ":", "raise", "ValueError", "(", "'src_nodata must be provided because dst_nodata is not None'", ")", "else", ":", "out_kwargs", ".", "update", "(", "{", "'nodata'", ":", "dst_nodata", "}", ")", "out_kwargs", ".", "update", "(", "{", "'crs'", ":", "dst_crs", ",", "'transform'", ":", "dst_transform", ",", "'width'", ":", "dst_width", ",", "'height'", ":", "dst_height", "}", ")", "# Adjust block size if necessary.", "if", "(", "'blockxsize'", "in", "out_kwargs", "and", "dst_width", "<", "out_kwargs", "[", "'blockxsize'", "]", ")", ":", "del", "out_kwargs", "[", "'blockxsize'", "]", "if", "(", "'blockysize'", "in", "out_kwargs", "and", "dst_height", "<", "out_kwargs", "[", "'blockysize'", "]", ")", ":", "del", "out_kwargs", "[", "'blockysize'", "]", "if", "creation_options", "is", "not", "None", ":", "out_kwargs", ".", "update", "(", "*", "*", "creation_options", ")", "with", "rasterio", ".", "open", "(", "destination_file", ",", "'w'", ",", "*", "*", "out_kwargs", ")", "as", "dst", ":", "reproject", "(", "source", "=", "rasterio", ".", "band", "(", "src", ",", "src", ".", "indexes", ")", ",", "destination", "=", "rasterio", ".", "band", "(", "dst", ",", "dst", ".", "indexes", ")", ",", "src_transform", "=", "src", ".", "transform", ",", "src_crs", "=", "src", ".", "crs", ",", "src_nodata", "=", "src_nodata", ",", "dst_transform", "=", "out_kwargs", "[", "'transform'", "]", ",", "dst_crs", "=", "out_kwargs", "[", "'crs'", "]", ",", "dst_nodata", "=", "dst_nodata", ",", "resampling", "=", "resampling", ",", "*", "*", "kwargs", ")" ]
Warp a raster dataset. Parameters ------------ source_file: str, file object or pathlib.Path object Source file. destination_file: str, file object or pathlib.Path object Destination file. dst_crs: rasterio.crs.CRS, optional Target coordinate reference system. resolution: tuple (x resolution, y resolution) or float, optional Target resolution, in units of target coordinate reference system. dimensions: tuple (width, height), optional Output file size in pixels and lines. src_bounds: tuple (xmin, ymin, xmax, ymax), optional Georeferenced extent of output file from source bounds (in source georeferenced units). dst_bounds: tuple (xmin, ymin, xmax, ymax), optional Georeferenced extent of output file from destination bounds (in destination georeferenced units). src_nodata: int, float, or nan, optional Manually overridden source nodata. dst_nodata: int, float, or nan, optional Manually overridden destination nodata. target_aligned_pixels: bool, optional Align the output bounds based on the resolution. Default is `False`. check_invert_proj: bool, optional Constrain output to valid coordinate region in dst_crs. Default is `True`. creation_options: dict, optional Custom creation options. resampling: rasterio.enums.Resampling Reprojection resampling method. Default is `cubic`. kwargs: optional Additional arguments passed to transformation function. Returns --------- out: None Output is written to destination.
[ "Warp", "a", "raster", "dataset", "." ]
python
train
41.6
browniebroke/deezer-python
deezer/client.py
https://github.com/browniebroke/deezer-python/blob/fb869c3617045b22e7124e4b783ec1a68d283ac3/deezer/client.py#L128-L138
def get_object( self, object_t, object_id=None, relation=None, parent=None, **kwargs ): """ Actually query the Deezer API to retrieve the object :returns: json dictionary """ url = self.object_url(object_t, object_id, relation, **kwargs) response = self.session.get(url) return self._process_json(response.json(), parent)
[ "def", "get_object", "(", "self", ",", "object_t", ",", "object_id", "=", "None", ",", "relation", "=", "None", ",", "parent", "=", "None", ",", "*", "*", "kwargs", ")", ":", "url", "=", "self", ".", "object_url", "(", "object_t", ",", "object_id", ",", "relation", ",", "*", "*", "kwargs", ")", "response", "=", "self", ".", "session", ".", "get", "(", "url", ")", "return", "self", ".", "_process_json", "(", "response", ".", "json", "(", ")", ",", "parent", ")" ]
Actually query the Deezer API to retrieve the object :returns: json dictionary
[ "Actually", "query", "the", "Deezer", "API", "to", "retrieve", "the", "object" ]
python
train
34.545455
ArchiveTeam/wpull
wpull/application/tasks/log.py
https://github.com/ArchiveTeam/wpull/blob/ddf051aa3322479325ba20aa778cb2cb97606bf5/wpull/application/tasks/log.py#L67-L83
def _setup_console_logger(cls, session: AppSession, args, stderr): '''Set up the console logger. A handler and with a formatter is added to the root logger. ''' stream = new_encoded_stream(args, stderr) logger = logging.getLogger() session.console_log_handler = handler = logging.StreamHandler(stream) formatter = logging.Formatter('%(levelname)s %(message)s') log_filter = logging.Filter('wpull') handler.setFormatter(formatter) handler.setLevel(args.verbosity or logging.INFO) handler.addFilter(log_filter) logger.addHandler(handler)
[ "def", "_setup_console_logger", "(", "cls", ",", "session", ":", "AppSession", ",", "args", ",", "stderr", ")", ":", "stream", "=", "new_encoded_stream", "(", "args", ",", "stderr", ")", "logger", "=", "logging", ".", "getLogger", "(", ")", "session", ".", "console_log_handler", "=", "handler", "=", "logging", ".", "StreamHandler", "(", "stream", ")", "formatter", "=", "logging", ".", "Formatter", "(", "'%(levelname)s %(message)s'", ")", "log_filter", "=", "logging", ".", "Filter", "(", "'wpull'", ")", "handler", ".", "setFormatter", "(", "formatter", ")", "handler", ".", "setLevel", "(", "args", ".", "verbosity", "or", "logging", ".", "INFO", ")", "handler", ".", "addFilter", "(", "log_filter", ")", "logger", ".", "addHandler", "(", "handler", ")" ]
Set up the console logger. A handler and with a formatter is added to the root logger.
[ "Set", "up", "the", "console", "logger", "." ]
python
train
36.411765
ynop/audiomate
audiomate/annotations/label_list.py
https://github.com/ynop/audiomate/blob/61727920b23a708293c3d526fa3000d4de9c6c21/audiomate/annotations/label_list.py#L520-L627
def split(self, cutting_points, shift_times=False, overlap=0.0): """ Split the label-list into x parts and return them as new label-lists. x is defined by the number of cutting-points(``x == len(cutting_points) + 1``) The result is a list of label-lists corresponding to each part. Label-list 0 contains labels between ``0`` and ``cutting_points[0]``. Label-list 1 contains labels between ``cutting_points[0]`` and ``cutting_points[1]``. And so on. Args: cutting_points(list): List of floats defining the points in seconds, where the label-list is splitted. shift_times(bool): If True, start and end-time are shifted in splitted label-lists. So the start is relative to the cutting point and not to the beginning of the original label-list. overlap(float): Amount of overlap in seconds. This amount is subtracted from a start-cutting-point, and added to a end-cutting-point. Returns: list: A list of of: class: `audiomate.annotations.LabelList`. Example: >>> ll = LabelList(labels=[ >>> Label('a', 0, 5), >>> Label('b', 5, 10), >>> Label('c', 11, 15), >>>]) >>> >>> res = ll.split([4.1, 8.9, 12.0]) >>> len(res) 4 >>> res[0].labels [Label('a', 0.0, 4.1)] >>> res[1].labels [ Label('a', 4.1, 5.0), Label('b', 5.0, 8.9) ] >>> res[2].labels [ Label('b', 8.9, 10.0), Label('c', 11.0, 12.0) ] >>> res[3].labels [Label('c', 12.0, 15.0)] If ``shift_times = True``, the times are adjusted to be relative to the cutting-points for every label-list but the first. >>> ll = LabelList(labels=[ >>> Label('a', 0, 5), >>> Label('b', 5, 10), >>>]) >>> >>> res = ll.split([4.6]) >>> len(res) 4 >>> res[0].labels [Label('a', 0.0, 4.6)] >>> res[1].labels [ Label('a', 0.0, 0.4), Label('b', 0.4, 5.4) ] """ if len(cutting_points) == 0: raise ValueError('At least one cutting-point is needed!') # we have to loop in sorted order cutting_points = sorted(cutting_points) splits = [] iv_start = 0.0 for i in range(len(cutting_points) + 1): if i < len(cutting_points): iv_end = cutting_points[i] else: iv_end = float('inf') # get all intervals intersecting range intervals = self.label_tree.overlap( iv_start - overlap, iv_end + overlap ) cp_splits = LabelList(idx=self.idx) # Extract labels from intervals with updated times for iv in intervals: label = copy.deepcopy(iv.data) label.start = max(0, iv_start - overlap, label.start) label.end = min(iv_end + overlap, label.end) if shift_times: orig_start = max(0, iv_start - overlap) label.start -= orig_start label.end -= orig_start cp_splits.add(label) splits.append(cp_splits) iv_start = iv_end return splits
[ "def", "split", "(", "self", ",", "cutting_points", ",", "shift_times", "=", "False", ",", "overlap", "=", "0.0", ")", ":", "if", "len", "(", "cutting_points", ")", "==", "0", ":", "raise", "ValueError", "(", "'At least one cutting-point is needed!'", ")", "# we have to loop in sorted order", "cutting_points", "=", "sorted", "(", "cutting_points", ")", "splits", "=", "[", "]", "iv_start", "=", "0.0", "for", "i", "in", "range", "(", "len", "(", "cutting_points", ")", "+", "1", ")", ":", "if", "i", "<", "len", "(", "cutting_points", ")", ":", "iv_end", "=", "cutting_points", "[", "i", "]", "else", ":", "iv_end", "=", "float", "(", "'inf'", ")", "# get all intervals intersecting range", "intervals", "=", "self", ".", "label_tree", ".", "overlap", "(", "iv_start", "-", "overlap", ",", "iv_end", "+", "overlap", ")", "cp_splits", "=", "LabelList", "(", "idx", "=", "self", ".", "idx", ")", "# Extract labels from intervals with updated times", "for", "iv", "in", "intervals", ":", "label", "=", "copy", ".", "deepcopy", "(", "iv", ".", "data", ")", "label", ".", "start", "=", "max", "(", "0", ",", "iv_start", "-", "overlap", ",", "label", ".", "start", ")", "label", ".", "end", "=", "min", "(", "iv_end", "+", "overlap", ",", "label", ".", "end", ")", "if", "shift_times", ":", "orig_start", "=", "max", "(", "0", ",", "iv_start", "-", "overlap", ")", "label", ".", "start", "-=", "orig_start", "label", ".", "end", "-=", "orig_start", "cp_splits", ".", "add", "(", "label", ")", "splits", ".", "append", "(", "cp_splits", ")", "iv_start", "=", "iv_end", "return", "splits" ]
Split the label-list into x parts and return them as new label-lists. x is defined by the number of cutting-points(``x == len(cutting_points) + 1``) The result is a list of label-lists corresponding to each part. Label-list 0 contains labels between ``0`` and ``cutting_points[0]``. Label-list 1 contains labels between ``cutting_points[0]`` and ``cutting_points[1]``. And so on. Args: cutting_points(list): List of floats defining the points in seconds, where the label-list is splitted. shift_times(bool): If True, start and end-time are shifted in splitted label-lists. So the start is relative to the cutting point and not to the beginning of the original label-list. overlap(float): Amount of overlap in seconds. This amount is subtracted from a start-cutting-point, and added to a end-cutting-point. Returns: list: A list of of: class: `audiomate.annotations.LabelList`. Example: >>> ll = LabelList(labels=[ >>> Label('a', 0, 5), >>> Label('b', 5, 10), >>> Label('c', 11, 15), >>>]) >>> >>> res = ll.split([4.1, 8.9, 12.0]) >>> len(res) 4 >>> res[0].labels [Label('a', 0.0, 4.1)] >>> res[1].labels [ Label('a', 4.1, 5.0), Label('b', 5.0, 8.9) ] >>> res[2].labels [ Label('b', 8.9, 10.0), Label('c', 11.0, 12.0) ] >>> res[3].labels [Label('c', 12.0, 15.0)] If ``shift_times = True``, the times are adjusted to be relative to the cutting-points for every label-list but the first. >>> ll = LabelList(labels=[ >>> Label('a', 0, 5), >>> Label('b', 5, 10), >>>]) >>> >>> res = ll.split([4.6]) >>> len(res) 4 >>> res[0].labels [Label('a', 0.0, 4.6)] >>> res[1].labels [ Label('a', 0.0, 0.4), Label('b', 0.4, 5.4) ]
[ "Split", "the", "label", "-", "list", "into", "x", "parts", "and", "return", "them", "as", "new", "label", "-", "lists", ".", "x", "is", "defined", "by", "the", "number", "of", "cutting", "-", "points", "(", "x", "==", "len", "(", "cutting_points", ")", "+", "1", ")" ]
python
train
33.472222
sdispater/orator
orator/query/grammars/sqlite_grammar.py
https://github.com/sdispater/orator/blob/bd90bf198ee897751848f9a92e49d18e60a74136/orator/query/grammars/sqlite_grammar.py#L26-L64
def compile_insert(self, query, values): """ Compile insert statement into SQL :param query: A QueryBuilder instance :type query: QueryBuilder :param values: The insert values :type values: dict or list :return: The compiled insert :rtype: str """ table = self.wrap_table(query.from__) if not isinstance(values, list): values = [values] # If there is only one row to insert, we just use the normal grammar if len(values) == 1: return super(SQLiteQueryGrammar, self).compile_insert(query, values) names = self.columnize(values[0].keys()) columns = [] # SQLite requires us to build the multi-row insert as a listing of select with # unions joining them together. So we'll build out this list of columns and # then join them all together with select unions to complete the queries. for column in values[0].keys(): columns.append("%s AS %s" % (self.get_marker(), self.wrap(column))) columns = [", ".join(columns)] * len(values) return "INSERT INTO %s (%s) SELECT %s" % ( table, names, " UNION ALL SELECT ".join(columns), )
[ "def", "compile_insert", "(", "self", ",", "query", ",", "values", ")", ":", "table", "=", "self", ".", "wrap_table", "(", "query", ".", "from__", ")", "if", "not", "isinstance", "(", "values", ",", "list", ")", ":", "values", "=", "[", "values", "]", "# If there is only one row to insert, we just use the normal grammar", "if", "len", "(", "values", ")", "==", "1", ":", "return", "super", "(", "SQLiteQueryGrammar", ",", "self", ")", ".", "compile_insert", "(", "query", ",", "values", ")", "names", "=", "self", ".", "columnize", "(", "values", "[", "0", "]", ".", "keys", "(", ")", ")", "columns", "=", "[", "]", "# SQLite requires us to build the multi-row insert as a listing of select with", "# unions joining them together. So we'll build out this list of columns and", "# then join them all together with select unions to complete the queries.", "for", "column", "in", "values", "[", "0", "]", ".", "keys", "(", ")", ":", "columns", ".", "append", "(", "\"%s AS %s\"", "%", "(", "self", ".", "get_marker", "(", ")", ",", "self", ".", "wrap", "(", "column", ")", ")", ")", "columns", "=", "[", "\", \"", ".", "join", "(", "columns", ")", "]", "*", "len", "(", "values", ")", "return", "\"INSERT INTO %s (%s) SELECT %s\"", "%", "(", "table", ",", "names", ",", "\" UNION ALL SELECT \"", ".", "join", "(", "columns", ")", ",", ")" ]
Compile insert statement into SQL :param query: A QueryBuilder instance :type query: QueryBuilder :param values: The insert values :type values: dict or list :return: The compiled insert :rtype: str
[ "Compile", "insert", "statement", "into", "SQL" ]
python
train
31.74359
orb-framework/orb
orb/core/column_types/data.py
https://github.com/orb-framework/orb/blob/575be2689cb269e65a0a2678232ff940acc19e5a/orb/core/column_types/data.py#L80-L96
def dbRestore(self, db_value, context=None): """ Converts a stored database value to Python. :param py_value: <variant> :param context: <orb.Context> :return: <variant> """ if db_value is not None: try: return rest.unjsonify(db_value) except StandardError: log.exception('Failed to restore json') raise orb.errors.DataStoreError('Failed to restore json.') else: return db_value
[ "def", "dbRestore", "(", "self", ",", "db_value", ",", "context", "=", "None", ")", ":", "if", "db_value", "is", "not", "None", ":", "try", ":", "return", "rest", ".", "unjsonify", "(", "db_value", ")", "except", "StandardError", ":", "log", ".", "exception", "(", "'Failed to restore json'", ")", "raise", "orb", ".", "errors", ".", "DataStoreError", "(", "'Failed to restore json.'", ")", "else", ":", "return", "db_value" ]
Converts a stored database value to Python. :param py_value: <variant> :param context: <orb.Context> :return: <variant>
[ "Converts", "a", "stored", "database", "value", "to", "Python", "." ]
python
train
30.058824