repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
atlassian-api/atlassian-python-api
atlassian/jira.py
https://github.com/atlassian-api/atlassian-python-api/blob/540d269905c3e7547b666fe30c647b2d512cf358/atlassian/jira.py#L1167-L1174
def delete_agile_board(self, board_id): """ Delete agile board by id :param board_id: :return: """ url = 'rest/agile/1.0/board/{}'.format(str(board_id)) return self.delete(url)
[ "def", "delete_agile_board", "(", "self", ",", "board_id", ")", ":", "url", "=", "'rest/agile/1.0/board/{}'", ".", "format", "(", "str", "(", "board_id", ")", ")", "return", "self", ".", "delete", "(", "url", ")" ]
Delete agile board by id :param board_id: :return:
[ "Delete", "agile", "board", "by", "id", ":", "param", "board_id", ":", ":", "return", ":" ]
python
train
28.125
sassoftware/sas_kernel
sas_kernel/kernel.py
https://github.com/sassoftware/sas_kernel/blob/ed63dceb9d1d51157b465f4892ffb793c1c32307/sas_kernel/kernel.py#L131-L179
def do_execute_direct(self, code: str, silent: bool = False) -> [str, dict]: """ This is the main method that takes code from the Jupyter cell and submits it to the SAS server :param code: code from the cell :param silent: :return: str with either the log or list """ if not code.strip(): return {'status': 'ok', 'execution_count': self.execution_count, 'payload': [], 'user_expressions': {}} if self.mva is None: self._allow_stdin = True self._start_sas() if self.lst_len < 0: self._get_lst_len() if code.startswith('Obfuscated SAS Code'): logger.debug("decoding string") tmp1 = code.split() decode = base64.b64decode(tmp1[-1]) code = decode.decode('utf-8') if code.startswith('showSASLog_11092015') == False and code.startswith("CompleteshowSASLog_11092015") == False: logger.debug("code type: " + str(type(code))) logger.debug("code length: " + str(len(code))) logger.debug("code string: " + code) if code.startswith("/*SASKernelTest*/"): res = self.mva.submit(code, "text") else: res = self.mva.submit(code, prompt=self.promptDict) self.promptDict = {} if res['LOG'].find("SAS process has terminated unexpectedly") > -1: print(res['LOG'], '\n' "Restarting SAS session on your behalf") self.do_shutdown(True) return res['LOG'] output = res['LST'] log = res['LOG'] return self._which_display(log, output) elif code.startswith("CompleteshowSASLog_11092015") == True and code.startswith('showSASLog_11092015') == False: full_log = highlight(self.mva.saslog(), SASLogLexer(), HtmlFormatter(full=True, style=SASLogStyle, lineseparator="<br>", title="Full SAS Log")) return full_log.replace('\n', ' ') else: return self.cachedlog.replace('\n', ' ')
[ "def", "do_execute_direct", "(", "self", ",", "code", ":", "str", ",", "silent", ":", "bool", "=", "False", ")", "->", "[", "str", ",", "dict", "]", ":", "if", "not", "code", ".", "strip", "(", ")", ":", "return", "{", "'status'", ":", "'ok'", ",", "'execution_count'", ":", "self", ".", "execution_count", ",", "'payload'", ":", "[", "]", ",", "'user_expressions'", ":", "{", "}", "}", "if", "self", ".", "mva", "is", "None", ":", "self", ".", "_allow_stdin", "=", "True", "self", ".", "_start_sas", "(", ")", "if", "self", ".", "lst_len", "<", "0", ":", "self", ".", "_get_lst_len", "(", ")", "if", "code", ".", "startswith", "(", "'Obfuscated SAS Code'", ")", ":", "logger", ".", "debug", "(", "\"decoding string\"", ")", "tmp1", "=", "code", ".", "split", "(", ")", "decode", "=", "base64", ".", "b64decode", "(", "tmp1", "[", "-", "1", "]", ")", "code", "=", "decode", ".", "decode", "(", "'utf-8'", ")", "if", "code", ".", "startswith", "(", "'showSASLog_11092015'", ")", "==", "False", "and", "code", ".", "startswith", "(", "\"CompleteshowSASLog_11092015\"", ")", "==", "False", ":", "logger", ".", "debug", "(", "\"code type: \"", "+", "str", "(", "type", "(", "code", ")", ")", ")", "logger", ".", "debug", "(", "\"code length: \"", "+", "str", "(", "len", "(", "code", ")", ")", ")", "logger", ".", "debug", "(", "\"code string: \"", "+", "code", ")", "if", "code", ".", "startswith", "(", "\"/*SASKernelTest*/\"", ")", ":", "res", "=", "self", ".", "mva", ".", "submit", "(", "code", ",", "\"text\"", ")", "else", ":", "res", "=", "self", ".", "mva", ".", "submit", "(", "code", ",", "prompt", "=", "self", ".", "promptDict", ")", "self", ".", "promptDict", "=", "{", "}", "if", "res", "[", "'LOG'", "]", ".", "find", "(", "\"SAS process has terminated unexpectedly\"", ")", ">", "-", "1", ":", "print", "(", "res", "[", "'LOG'", "]", ",", "'\\n'", "\"Restarting SAS session on your behalf\"", ")", "self", ".", "do_shutdown", "(", "True", ")", "return", "res", "[", "'LOG'", "]", "output", "=", "res", "[", "'LST'", "]", "log", "=", "res", "[", "'LOG'", "]", "return", "self", ".", "_which_display", "(", "log", ",", "output", ")", "elif", "code", ".", "startswith", "(", "\"CompleteshowSASLog_11092015\"", ")", "==", "True", "and", "code", ".", "startswith", "(", "'showSASLog_11092015'", ")", "==", "False", ":", "full_log", "=", "highlight", "(", "self", ".", "mva", ".", "saslog", "(", ")", ",", "SASLogLexer", "(", ")", ",", "HtmlFormatter", "(", "full", "=", "True", ",", "style", "=", "SASLogStyle", ",", "lineseparator", "=", "\"<br>\"", ",", "title", "=", "\"Full SAS Log\"", ")", ")", "return", "full_log", ".", "replace", "(", "'\\n'", ",", "' '", ")", "else", ":", "return", "self", ".", "cachedlog", ".", "replace", "(", "'\\n'", ",", "' '", ")" ]
This is the main method that takes code from the Jupyter cell and submits it to the SAS server :param code: code from the cell :param silent: :return: str with either the log or list
[ "This", "is", "the", "main", "method", "that", "takes", "code", "from", "the", "Jupyter", "cell", "and", "submits", "it", "to", "the", "SAS", "server" ]
python
train
43.734694
internetarchive/doublethink
doublethink/services.py
https://github.com/internetarchive/doublethink/blob/f7fc7da725c9b572d473c717b3dad9af98a7a2b4/doublethink/services.py#L163-L173
def unregister(self, id): ''' Remove the service with id `id` from the service registry. ''' result = self.rr.table(self.table).get(id).delete().run() if result != { 'deleted':1, 'errors':0,'inserted':0, 'replaced':0,'skipped':0,'unchanged':0}: self.logger.warn( 'unexpected result attempting to delete id=%s from ' 'rethinkdb services table: %s', id, result)
[ "def", "unregister", "(", "self", ",", "id", ")", ":", "result", "=", "self", ".", "rr", ".", "table", "(", "self", ".", "table", ")", ".", "get", "(", "id", ")", ".", "delete", "(", ")", ".", "run", "(", ")", "if", "result", "!=", "{", "'deleted'", ":", "1", ",", "'errors'", ":", "0", ",", "'inserted'", ":", "0", ",", "'replaced'", ":", "0", ",", "'skipped'", ":", "0", ",", "'unchanged'", ":", "0", "}", ":", "self", ".", "logger", ".", "warn", "(", "'unexpected result attempting to delete id=%s from '", "'rethinkdb services table: %s'", ",", "id", ",", "result", ")" ]
Remove the service with id `id` from the service registry.
[ "Remove", "the", "service", "with", "id", "id", "from", "the", "service", "registry", "." ]
python
train
43
RedFantom/ttkwidgets
ttkwidgets/timeline.py
https://github.com/RedFantom/ttkwidgets/blob/02150322060f867b6e59a175522ef84b09168019/ttkwidgets/timeline.py#L817-L824
def _left_click(self, event): """Function bound to left click event for marker canvas""" self.update_active() iid = self.current_iid if iid is None: return args = (iid, event.x_root, event.y_root) self.call_callbacks(iid, "left_callback", args)
[ "def", "_left_click", "(", "self", ",", "event", ")", ":", "self", ".", "update_active", "(", ")", "iid", "=", "self", ".", "current_iid", "if", "iid", "is", "None", ":", "return", "args", "=", "(", "iid", ",", "event", ".", "x_root", ",", "event", ".", "y_root", ")", "self", ".", "call_callbacks", "(", "iid", ",", "\"left_callback\"", ",", "args", ")" ]
Function bound to left click event for marker canvas
[ "Function", "bound", "to", "left", "click", "event", "for", "marker", "canvas" ]
python
train
37.125
cloudtools/stacker
stacker/actions/build.py
https://github.com/cloudtools/stacker/blob/ad6013a03a560c46ba3c63c4d153336273e6da5d/stacker/actions/build.py#L62-L76
def should_submit(stack): """Tests whether a stack should be submitted to CF for update/create Args: stack (:class:`stacker.stack.Stack`): The stack object to check. Returns: bool: If the stack should be submitted, return True. """ if stack.enabled: return True logger.debug("Stack %s is not enabled. Skipping.", stack.name) return False
[ "def", "should_submit", "(", "stack", ")", ":", "if", "stack", ".", "enabled", ":", "return", "True", "logger", ".", "debug", "(", "\"Stack %s is not enabled. Skipping.\"", ",", "stack", ".", "name", ")", "return", "False" ]
Tests whether a stack should be submitted to CF for update/create Args: stack (:class:`stacker.stack.Stack`): The stack object to check. Returns: bool: If the stack should be submitted, return True.
[ "Tests", "whether", "a", "stack", "should", "be", "submitted", "to", "CF", "for", "update", "/", "create" ]
python
train
25.333333
SuperCowPowers/workbench
workbench/server/els_indexer.py
https://github.com/SuperCowPowers/workbench/blob/710232756dd717f734253315e3d0b33c9628dafb/workbench/server/els_indexer.py#L29-L50
def index_data(self, data, index_name, doc_type): """Take an arbitrary dictionary of data and index it with ELS. Args: data: data to be Indexed. Should be a dictionary. index_name: Name of the index. doc_type: The type of the document. Raises: RuntimeError: When the Indexing fails. """ # Index the data (which needs to be a dict/object) if it's not # we're going to toss an exception if not isinstance(data, dict): raise RuntimeError('Index failed, data needs to be a dict!') try: self.els_search.index(index=index_name, doc_type=doc_type, body=data) except Exception, error: print 'Index failed: %s' % str(error) raise RuntimeError('Index failed: %s' % str(error))
[ "def", "index_data", "(", "self", ",", "data", ",", "index_name", ",", "doc_type", ")", ":", "# Index the data (which needs to be a dict/object) if it's not", "# we're going to toss an exception", "if", "not", "isinstance", "(", "data", ",", "dict", ")", ":", "raise", "RuntimeError", "(", "'Index failed, data needs to be a dict!'", ")", "try", ":", "self", ".", "els_search", ".", "index", "(", "index", "=", "index_name", ",", "doc_type", "=", "doc_type", ",", "body", "=", "data", ")", "except", "Exception", ",", "error", ":", "print", "'Index failed: %s'", "%", "str", "(", "error", ")", "raise", "RuntimeError", "(", "'Index failed: %s'", "%", "str", "(", "error", ")", ")" ]
Take an arbitrary dictionary of data and index it with ELS. Args: data: data to be Indexed. Should be a dictionary. index_name: Name of the index. doc_type: The type of the document. Raises: RuntimeError: When the Indexing fails.
[ "Take", "an", "arbitrary", "dictionary", "of", "data", "and", "index", "it", "with", "ELS", "." ]
python
train
37.136364
rocky/python3-trepan
trepan/processor/parse/scanner.py
https://github.com/rocky/python3-trepan/blob/14e91bc0acce090d67be145b1ac040cab92ac5f3/trepan/processor/parse/scanner.py#L107-L111
def t_direction(self, s): r'^[+-]$' # Used in the "list" command self.add_token('DIRECTION', s) self.pos += len(s)
[ "def", "t_direction", "(", "self", ",", "s", ")", ":", "# Used in the \"list\" command", "self", ".", "add_token", "(", "'DIRECTION'", ",", "s", ")", "self", ".", "pos", "+=", "len", "(", "s", ")" ]
r'^[+-]$
[ "r", "^", "[", "+", "-", "]", "$" ]
python
test
28.4
darothen/xbpch
xbpch/uff.py
https://github.com/darothen/xbpch/blob/31972dd6fd5f3f7cecc3a46080ce4f43ca23fbe5/xbpch/uff.py#L110-L123
def skipline(self): """ Skip the next line and returns position and size of line. Raises IOError if pre- and suffix of line do not match. """ position = self.tell() prefix = self._fix() self.seek(prefix, 1) # skip content suffix = self._fix() if prefix != suffix: raise IOError(_FIX_ERROR) return position, prefix
[ "def", "skipline", "(", "self", ")", ":", "position", "=", "self", ".", "tell", "(", ")", "prefix", "=", "self", ".", "_fix", "(", ")", "self", ".", "seek", "(", "prefix", ",", "1", ")", "# skip content", "suffix", "=", "self", ".", "_fix", "(", ")", "if", "prefix", "!=", "suffix", ":", "raise", "IOError", "(", "_FIX_ERROR", ")", "return", "position", ",", "prefix" ]
Skip the next line and returns position and size of line. Raises IOError if pre- and suffix of line do not match.
[ "Skip", "the", "next", "line", "and", "returns", "position", "and", "size", "of", "line", ".", "Raises", "IOError", "if", "pre", "-", "and", "suffix", "of", "line", "do", "not", "match", "." ]
python
train
28.214286
deepmipt/DeepPavlov
deeppavlov/metrics/accuracy.py
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/metrics/accuracy.py#L24-L37
def accuracy(y_true: [list, np.ndarray], y_predicted: [list, np.ndarray]) -> float: """ Calculate accuracy in terms of absolute coincidence Args: y_true: array of true values y_predicted: array of predicted values Returns: portion of absolutely coincidental samples """ examples_len = len(y_true) correct = sum([y1 == y2 for y1, y2 in zip(y_true, y_predicted)]) return correct / examples_len if examples_len else 0
[ "def", "accuracy", "(", "y_true", ":", "[", "list", ",", "np", ".", "ndarray", "]", ",", "y_predicted", ":", "[", "list", ",", "np", ".", "ndarray", "]", ")", "->", "float", ":", "examples_len", "=", "len", "(", "y_true", ")", "correct", "=", "sum", "(", "[", "y1", "==", "y2", "for", "y1", ",", "y2", "in", "zip", "(", "y_true", ",", "y_predicted", ")", "]", ")", "return", "correct", "/", "examples_len", "if", "examples_len", "else", "0" ]
Calculate accuracy in terms of absolute coincidence Args: y_true: array of true values y_predicted: array of predicted values Returns: portion of absolutely coincidental samples
[ "Calculate", "accuracy", "in", "terms", "of", "absolute", "coincidence" ]
python
test
32.785714
controversial/livejson
livejson.py
https://github.com/controversial/livejson/blob/91021de60903d2d8b2cfb7d8d8910bcf27ec003b/livejson.py#L214-L233
def data(self, data): """Overwrite the file with new data. You probably shouldn't do this yourself, it's easy to screw up your whole file with this.""" if self.is_caching: self.cache = data else: fcontents = self.file_contents with open(self.path, "w") as f: try: # Write the file. Keep user settings about indentation, etc indent = self.indent if self.pretty else None json.dump(data, f, sort_keys=self.sort_keys, indent=indent) except Exception as e: # Rollback to prevent data loss f.seek(0) f.truncate() f.write(fcontents) # And re-raise the exception raise e self._updateType()
[ "def", "data", "(", "self", ",", "data", ")", ":", "if", "self", ".", "is_caching", ":", "self", ".", "cache", "=", "data", "else", ":", "fcontents", "=", "self", ".", "file_contents", "with", "open", "(", "self", ".", "path", ",", "\"w\"", ")", "as", "f", ":", "try", ":", "# Write the file. Keep user settings about indentation, etc", "indent", "=", "self", ".", "indent", "if", "self", ".", "pretty", "else", "None", "json", ".", "dump", "(", "data", ",", "f", ",", "sort_keys", "=", "self", ".", "sort_keys", ",", "indent", "=", "indent", ")", "except", "Exception", "as", "e", ":", "# Rollback to prevent data loss", "f", ".", "seek", "(", "0", ")", "f", ".", "truncate", "(", ")", "f", ".", "write", "(", "fcontents", ")", "# And re-raise the exception", "raise", "e", "self", ".", "_updateType", "(", ")" ]
Overwrite the file with new data. You probably shouldn't do this yourself, it's easy to screw up your whole file with this.
[ "Overwrite", "the", "file", "with", "new", "data", ".", "You", "probably", "shouldn", "t", "do", "this", "yourself", "it", "s", "easy", "to", "screw", "up", "your", "whole", "file", "with", "this", "." ]
python
valid
42.55
apple/turicreate
src/unity/python/turicreate/toolkits/_decision_tree.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_decision_tree.py#L80-L123
def get_decision(self, child, is_missing = False): """ Get the decision from this node to a child node. Parameters ---------- child: Node A child node of this node. Returns ------- dict: A dictionary that describes how to get from this node to the child node. """ # Child does exist and there is a path to the child. value = self.value feature = self.split_feature_column index = self.split_feature_index if not is_missing: if self.left_id == child.node_id: if self.node_type in ["float", "integer"]: sign = "<" else: sign = "=" else: if self.node_type in ["float", "integer"]: sign = ">=" else: sign = "!=" else: sign = "missing" value = None return { "node_id" : self.node_id, "node_type" : self.node_type, "feature" : feature, "index" : index, "sign" : sign, "value" : value, "child_id" : child.node_id, "is_missing" : is_missing }
[ "def", "get_decision", "(", "self", ",", "child", ",", "is_missing", "=", "False", ")", ":", "# Child does exist and there is a path to the child.", "value", "=", "self", ".", "value", "feature", "=", "self", ".", "split_feature_column", "index", "=", "self", ".", "split_feature_index", "if", "not", "is_missing", ":", "if", "self", ".", "left_id", "==", "child", ".", "node_id", ":", "if", "self", ".", "node_type", "in", "[", "\"float\"", ",", "\"integer\"", "]", ":", "sign", "=", "\"<\"", "else", ":", "sign", "=", "\"=\"", "else", ":", "if", "self", ".", "node_type", "in", "[", "\"float\"", ",", "\"integer\"", "]", ":", "sign", "=", "\">=\"", "else", ":", "sign", "=", "\"!=\"", "else", ":", "sign", "=", "\"missing\"", "value", "=", "None", "return", "{", "\"node_id\"", ":", "self", ".", "node_id", ",", "\"node_type\"", ":", "self", ".", "node_type", ",", "\"feature\"", ":", "feature", ",", "\"index\"", ":", "index", ",", "\"sign\"", ":", "sign", ",", "\"value\"", ":", "value", ",", "\"child_id\"", ":", "child", ".", "node_id", ",", "\"is_missing\"", ":", "is_missing", "}" ]
Get the decision from this node to a child node. Parameters ---------- child: Node A child node of this node. Returns ------- dict: A dictionary that describes how to get from this node to the child node.
[ "Get", "the", "decision", "from", "this", "node", "to", "a", "child", "node", "." ]
python
train
28.727273
h2oai/h2o-3
scripts/run.py
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/scripts/run.py#L645-L651
def get_port(self): """ Return a port to use to talk to this cluster. """ if len(self.client_nodes) > 0: node = self.client_nodes[0] else: node = self.nodes[0] return node.get_port()
[ "def", "get_port", "(", "self", ")", ":", "if", "len", "(", "self", ".", "client_nodes", ")", ">", "0", ":", "node", "=", "self", ".", "client_nodes", "[", "0", "]", "else", ":", "node", "=", "self", ".", "nodes", "[", "0", "]", "return", "node", ".", "get_port", "(", ")" ]
Return a port to use to talk to this cluster.
[ "Return", "a", "port", "to", "use", "to", "talk", "to", "this", "cluster", "." ]
python
test
33.142857
google/grr
grr/server/grr_response_server/databases/mem_clients.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/databases/mem_clients.py#L416-L419
def CountClientPlatformReleasesByLabel(self, day_buckets): """Computes client-activity stats for OS-release strings in the DB.""" return self._CountClientStatisticByLabel( day_buckets, lambda client_info: client_info.last_snapshot.Uname())
[ "def", "CountClientPlatformReleasesByLabel", "(", "self", ",", "day_buckets", ")", ":", "return", "self", ".", "_CountClientStatisticByLabel", "(", "day_buckets", ",", "lambda", "client_info", ":", "client_info", ".", "last_snapshot", ".", "Uname", "(", ")", ")" ]
Computes client-activity stats for OS-release strings in the DB.
[ "Computes", "client", "-", "activity", "stats", "for", "OS", "-", "release", "strings", "in", "the", "DB", "." ]
python
train
63
OpenTreeOfLife/peyotl
peyotl/git_storage/sharded_doc_store.py
https://github.com/OpenTreeOfLife/peyotl/blob/5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0/peyotl/git_storage/sharded_doc_store.py#L38-L42
def get_public_url(self, doc_id, branch='master'): """Returns a GitHub URL for the doc in question (study, collection, ...) """ name, path_frag = self.get_repo_and_path_fragment(doc_id) return 'https://raw.githubusercontent.com/OpenTreeOfLife/' + name + '/' + branch + '/' + path_frag
[ "def", "get_public_url", "(", "self", ",", "doc_id", ",", "branch", "=", "'master'", ")", ":", "name", ",", "path_frag", "=", "self", ".", "get_repo_and_path_fragment", "(", "doc_id", ")", "return", "'https://raw.githubusercontent.com/OpenTreeOfLife/'", "+", "name", "+", "'/'", "+", "branch", "+", "'/'", "+", "path_frag" ]
Returns a GitHub URL for the doc in question (study, collection, ...)
[ "Returns", "a", "GitHub", "URL", "for", "the", "doc", "in", "question", "(", "study", "collection", "...", ")" ]
python
train
62.4
coghost/izen
izen/helper.py
https://github.com/coghost/izen/blob/432db017f99dd2ba809e1ba1792145ab6510263d/izen/helper.py#L708-L726
def check_sum(buf, csum): """ 检查数据的校验和 :param buf: :type buf: :param csum: :type csum: :return: :rtype: """ csum = csum.encode('utf-8') _csum = ord(buf[0]) for x in buf[1:]: _csum ^= ord(x) _csum = binascii.b2a_hex(chr(_csum).encode('utf-8')).upper() if _csum != csum: sys.stderr.write('csum not matched: ({} {})\n'.format(_csum, csum)) return _csum == csum
[ "def", "check_sum", "(", "buf", ",", "csum", ")", ":", "csum", "=", "csum", ".", "encode", "(", "'utf-8'", ")", "_csum", "=", "ord", "(", "buf", "[", "0", "]", ")", "for", "x", "in", "buf", "[", "1", ":", "]", ":", "_csum", "^=", "ord", "(", "x", ")", "_csum", "=", "binascii", ".", "b2a_hex", "(", "chr", "(", "_csum", ")", ".", "encode", "(", "'utf-8'", ")", ")", ".", "upper", "(", ")", "if", "_csum", "!=", "csum", ":", "sys", ".", "stderr", ".", "write", "(", "'csum not matched: ({} {})\\n'", ".", "format", "(", "_csum", ",", "csum", ")", ")", "return", "_csum", "==", "csum" ]
检查数据的校验和 :param buf: :type buf: :param csum: :type csum: :return: :rtype:
[ "检查数据的校验和", ":", "param", "buf", ":", ":", "type", "buf", ":", ":", "param", "csum", ":", ":", "type", "csum", ":", ":", "return", ":", ":", "rtype", ":" ]
python
train
21.894737
Drekin/win-unicode-console
win_unicode_console/raw_input.py
https://github.com/Drekin/win-unicode-console/blob/6beb5df5219ac1e5495b415d286e634a27720b2e/win_unicode_console/raw_input.py#L71-L100
def raw_input(prompt=""): """raw_input([prompt]) -> string Read a string from standard input. The trailing newline is stripped. If the user hits EOF (Unix: Ctl-D, Windows: Ctl-Z+Return), raise EOFError. On Unix, GNU readline is used if enabled. The prompt string, if given, is printed without a trailing newline before reading.""" sys.stderr.flush() tty = STDIN.is_a_TTY() and STDOUT.is_a_TTY() if RETURN_UNICODE: if tty: line_bytes = readline(prompt) line = stdin_decode(line_bytes) else: line = stdio_readline(prompt) else: if tty: line = readline(prompt) else: line_unicode = stdio_readline(prompt) line = stdin_encode(line_unicode) if line: return line[:-1] # strip strailing "\n" else: raise EOFError
[ "def", "raw_input", "(", "prompt", "=", "\"\"", ")", ":", "sys", ".", "stderr", ".", "flush", "(", ")", "tty", "=", "STDIN", ".", "is_a_TTY", "(", ")", "and", "STDOUT", ".", "is_a_TTY", "(", ")", "if", "RETURN_UNICODE", ":", "if", "tty", ":", "line_bytes", "=", "readline", "(", "prompt", ")", "line", "=", "stdin_decode", "(", "line_bytes", ")", "else", ":", "line", "=", "stdio_readline", "(", "prompt", ")", "else", ":", "if", "tty", ":", "line", "=", "readline", "(", "prompt", ")", "else", ":", "line_unicode", "=", "stdio_readline", "(", "prompt", ")", "line", "=", "stdin_encode", "(", "line_unicode", ")", "if", "line", ":", "return", "line", "[", ":", "-", "1", "]", "# strip strailing \"\\n\"", "else", ":", "raise", "EOFError" ]
raw_input([prompt]) -> string Read a string from standard input. The trailing newline is stripped. If the user hits EOF (Unix: Ctl-D, Windows: Ctl-Z+Return), raise EOFError. On Unix, GNU readline is used if enabled. The prompt string, if given, is printed without a trailing newline before reading.
[ "raw_input", "(", "[", "prompt", "]", ")", "-", ">", "string" ]
python
train
24.233333
Laufire/ec
ec/modules/core.py
https://github.com/Laufire/ec/blob/63e84a1daef9234487d7de538e5da233a7d13071/ec/modules/core.py#L45-L74
def execCommand(Argv, collect_missing): r"""Executes the given task with parameters. """ try: return _execCommand(Argv, collect_missing) except Exception as e: if Settings['errorHandler']: Settings['errorHandler'](e) if Settings['debug']: # #ToDo: Have an option to debug through stderr. The issue is, the way to make pdb.post_mortem, to use stderr, like pdb.set_trace is unknown. import pdb pdb.post_mortem(sys.exc_info()[2]) if not Settings['silent']: # Debug, then log the trace. import traceback etype, value, tb = sys.exc_info() tb = tb.tb_next.tb_next # remove the ec - calls from the traceback, to make it more understandable message = ''.join(traceback.format_exception(etype, value, tb))[:-1] else: if isinstance(e, HandledException): # let the modes handle the HandledException raise e message = str(e) # provide a succinct error message raise HandledException(message)
[ "def", "execCommand", "(", "Argv", ",", "collect_missing", ")", ":", "try", ":", "return", "_execCommand", "(", "Argv", ",", "collect_missing", ")", "except", "Exception", "as", "e", ":", "if", "Settings", "[", "'errorHandler'", "]", ":", "Settings", "[", "'errorHandler'", "]", "(", "e", ")", "if", "Settings", "[", "'debug'", "]", ":", "# #ToDo: Have an option to debug through stderr. The issue is, the way to make pdb.post_mortem, to use stderr, like pdb.set_trace is unknown.\r", "import", "pdb", "pdb", ".", "post_mortem", "(", "sys", ".", "exc_info", "(", ")", "[", "2", "]", ")", "if", "not", "Settings", "[", "'silent'", "]", ":", "# Debug, then log the trace.\r", "import", "traceback", "etype", ",", "value", ",", "tb", "=", "sys", ".", "exc_info", "(", ")", "tb", "=", "tb", ".", "tb_next", ".", "tb_next", "# remove the ec - calls from the traceback, to make it more understandable\r", "message", "=", "''", ".", "join", "(", "traceback", ".", "format_exception", "(", "etype", ",", "value", ",", "tb", ")", ")", "[", ":", "-", "1", "]", "else", ":", "if", "isinstance", "(", "e", ",", "HandledException", ")", ":", "# let the modes handle the HandledException\r", "raise", "e", "message", "=", "str", "(", "e", ")", "# provide a succinct error message\r", "raise", "HandledException", "(", "message", ")" ]
r"""Executes the given task with parameters.
[ "r", "Executes", "the", "given", "task", "with", "parameters", "." ]
python
train
32.8
bitesofcode/projexui
projexui/widgets/xchart/xchartscene.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xchart/xchartscene.py#L27-L38
def addDataset(self, dataset): """ Creates a new dataset instance for this scene. :param dataset | <XChartDataset> :return <XChartDatasetItem> """ item = XChartDatasetItem() self.addItem(item) item.setDataset(dataset) return item
[ "def", "addDataset", "(", "self", ",", "dataset", ")", ":", "item", "=", "XChartDatasetItem", "(", ")", "self", ".", "addItem", "(", "item", ")", "item", ".", "setDataset", "(", "dataset", ")", "return", "item" ]
Creates a new dataset instance for this scene. :param dataset | <XChartDataset> :return <XChartDatasetItem>
[ "Creates", "a", "new", "dataset", "instance", "for", "this", "scene", ".", ":", "param", "dataset", "|", "<XChartDataset", ">", ":", "return", "<XChartDatasetItem", ">" ]
python
train
27.333333
tensorflow/tensorboard
tensorboard/plugins/hparams/backend_context.py
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/hparams/backend_context.py#L152-L192
def _compute_hparam_info_from_values(self, name, values): """Builds an HParamInfo message from the hparam name and list of values. Args: name: string. The hparam name. values: list of google.protobuf.Value messages. The list of values for the hparam. Returns: An api_pb2.HParamInfo message. """ # Figure out the type from the values. # Ignore values whose type is not listed in api_pb2.DataType # If all values have the same type, then that is the type used. # Otherwise, the returned type is DATA_TYPE_STRING. result = api_pb2.HParamInfo(name=name, type=api_pb2.DATA_TYPE_UNSET) distinct_values = set( _protobuf_value_to_string(v) for v in values if _protobuf_value_type(v)) for v in values: v_type = _protobuf_value_type(v) if not v_type: continue if result.type == api_pb2.DATA_TYPE_UNSET: result.type = v_type elif result.type != v_type: result.type = api_pb2.DATA_TYPE_STRING if result.type == api_pb2.DATA_TYPE_STRING: # A string result.type does not change, so we can exit the loop. break # If we couldn't figure out a type, then we can't compute the hparam_info. if result.type == api_pb2.DATA_TYPE_UNSET: return None # If the result is a string, set the domain to be the distinct values if # there aren't too many of them. if (result.type == api_pb2.DATA_TYPE_STRING and len(distinct_values) <= self._max_domain_discrete_len): result.domain_discrete.extend(distinct_values) return result
[ "def", "_compute_hparam_info_from_values", "(", "self", ",", "name", ",", "values", ")", ":", "# Figure out the type from the values.", "# Ignore values whose type is not listed in api_pb2.DataType", "# If all values have the same type, then that is the type used.", "# Otherwise, the returned type is DATA_TYPE_STRING.", "result", "=", "api_pb2", ".", "HParamInfo", "(", "name", "=", "name", ",", "type", "=", "api_pb2", ".", "DATA_TYPE_UNSET", ")", "distinct_values", "=", "set", "(", "_protobuf_value_to_string", "(", "v", ")", "for", "v", "in", "values", "if", "_protobuf_value_type", "(", "v", ")", ")", "for", "v", "in", "values", ":", "v_type", "=", "_protobuf_value_type", "(", "v", ")", "if", "not", "v_type", ":", "continue", "if", "result", ".", "type", "==", "api_pb2", ".", "DATA_TYPE_UNSET", ":", "result", ".", "type", "=", "v_type", "elif", "result", ".", "type", "!=", "v_type", ":", "result", ".", "type", "=", "api_pb2", ".", "DATA_TYPE_STRING", "if", "result", ".", "type", "==", "api_pb2", ".", "DATA_TYPE_STRING", ":", "# A string result.type does not change, so we can exit the loop.", "break", "# If we couldn't figure out a type, then we can't compute the hparam_info.", "if", "result", ".", "type", "==", "api_pb2", ".", "DATA_TYPE_UNSET", ":", "return", "None", "# If the result is a string, set the domain to be the distinct values if", "# there aren't too many of them.", "if", "(", "result", ".", "type", "==", "api_pb2", ".", "DATA_TYPE_STRING", "and", "len", "(", "distinct_values", ")", "<=", "self", ".", "_max_domain_discrete_len", ")", ":", "result", ".", "domain_discrete", ".", "extend", "(", "distinct_values", ")", "return", "result" ]
Builds an HParamInfo message from the hparam name and list of values. Args: name: string. The hparam name. values: list of google.protobuf.Value messages. The list of values for the hparam. Returns: An api_pb2.HParamInfo message.
[ "Builds", "an", "HParamInfo", "message", "from", "the", "hparam", "name", "and", "list", "of", "values", "." ]
python
train
37.878049
fbcotter/py3nvml
py3nvml/py3nvml.py
https://github.com/fbcotter/py3nvml/blob/47f0f2c0eee56dec4e4beebec26b734e01d357b7/py3nvml/py3nvml.py#L4791-L4826
def nvmlDeviceSetAccountingMode(handle, mode): r""" /** * Enables or disables per process accounting. * * For Kepler &tm; or newer fully supported devices. * Requires root/admin permissions. * * @note This setting is not persistent and will default to disabled after driver unloads. * Enable persistence mode to be sure the setting doesn't switch off to disabled. * * @note Enabling accounting mode has no negative impact on the GPU performance. * * @note Disabling accounting clears all accounting pids information. * * See \ref nvmlDeviceGetAccountingMode * See \ref nvmlDeviceGetAccountingStats * See \ref nvmlDeviceClearAccountingPids * * @param device The identifier of the target device * @param mode The target accounting mode * * @return * - \ref NVML_SUCCESS if the new mode has been set * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device or \a mode are invalid * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation * - \ref NVML_ERROR_UNKNOWN on any unexpected error */ nvmlReturn_t DECLDIR nvmlDeviceSetAccountingMode """ fn = _nvmlGetFunctionPointer("nvmlDeviceSetAccountingMode") ret = fn(handle, _nvmlEnableState_t(mode)) _nvmlCheckReturn(ret) return None
[ "def", "nvmlDeviceSetAccountingMode", "(", "handle", ",", "mode", ")", ":", "fn", "=", "_nvmlGetFunctionPointer", "(", "\"nvmlDeviceSetAccountingMode\"", ")", "ret", "=", "fn", "(", "handle", ",", "_nvmlEnableState_t", "(", "mode", ")", ")", "_nvmlCheckReturn", "(", "ret", ")", "return", "None" ]
r""" /** * Enables or disables per process accounting. * * For Kepler &tm; or newer fully supported devices. * Requires root/admin permissions. * * @note This setting is not persistent and will default to disabled after driver unloads. * Enable persistence mode to be sure the setting doesn't switch off to disabled. * * @note Enabling accounting mode has no negative impact on the GPU performance. * * @note Disabling accounting clears all accounting pids information. * * See \ref nvmlDeviceGetAccountingMode * See \ref nvmlDeviceGetAccountingStats * See \ref nvmlDeviceClearAccountingPids * * @param device The identifier of the target device * @param mode The target accounting mode * * @return * - \ref NVML_SUCCESS if the new mode has been set * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device or \a mode are invalid * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation * - \ref NVML_ERROR_UNKNOWN on any unexpected error */ nvmlReturn_t DECLDIR nvmlDeviceSetAccountingMode
[ "r", "/", "**", "*", "Enables", "or", "disables", "per", "process", "accounting", ".", "*", "*", "For", "Kepler", "&tm", ";", "or", "newer", "fully", "supported", "devices", ".", "*", "Requires", "root", "/", "admin", "permissions", ".", "*", "*" ]
python
train
46.138889
scraperwiki/dumptruck
dumptruck/dumptruck.py
https://github.com/scraperwiki/dumptruck/blob/ac5855e34d4dffc7e53a13ff925ccabda19604fc/dumptruck/dumptruck.py#L329-L357
def save_var(self, key, value, **kwargs): 'Save one variable to the database.' # Check whether Highwall's variables table exists self.__check_or_create_vars_table() column_type = get_column_type(value) tmp = quote(self.__vars_table_tmp) self.execute(u'DROP TABLE IF EXISTS %s' % tmp, commit = False) # This is vulnerable to injection self.execute(u'CREATE TABLE %s (`value` %s)' % (tmp, column_type), commit = False) # This is ugly self.execute(u'INSERT INTO %s (`value`) VALUES (?)' % tmp, [value], commit = False) table = (quote(self.__vars_table), tmp) params = [key, column_type] self.execute(u''' INSERT OR REPLACE INTO %s (`key`, `type`, `value`) SELECT ? AS key, ? AS type, value FROM %s ''' % table, params) self.execute(u'DROP TABLE %s' % tmp, commit = False) self.__commit_if_necessary(kwargs)
[ "def", "save_var", "(", "self", ",", "key", ",", "value", ",", "*", "*", "kwargs", ")", ":", "# Check whether Highwall's variables table exists", "self", ".", "__check_or_create_vars_table", "(", ")", "column_type", "=", "get_column_type", "(", "value", ")", "tmp", "=", "quote", "(", "self", ".", "__vars_table_tmp", ")", "self", ".", "execute", "(", "u'DROP TABLE IF EXISTS %s'", "%", "tmp", ",", "commit", "=", "False", ")", "# This is vulnerable to injection", "self", ".", "execute", "(", "u'CREATE TABLE %s (`value` %s)'", "%", "(", "tmp", ",", "column_type", ")", ",", "commit", "=", "False", ")", "# This is ugly", "self", ".", "execute", "(", "u'INSERT INTO %s (`value`) VALUES (?)'", "%", "tmp", ",", "[", "value", "]", ",", "commit", "=", "False", ")", "table", "=", "(", "quote", "(", "self", ".", "__vars_table", ")", ",", "tmp", ")", "params", "=", "[", "key", ",", "column_type", "]", "self", ".", "execute", "(", "u'''\nINSERT OR REPLACE INTO %s (`key`, `type`, `value`)\n SELECT\n ? AS key,\n ? AS type,\n value\n FROM %s\n'''", "%", "table", ",", "params", ")", "self", ".", "execute", "(", "u'DROP TABLE %s'", "%", "tmp", ",", "commit", "=", "False", ")", "self", ".", "__commit_if_necessary", "(", "kwargs", ")" ]
Save one variable to the database.
[ "Save", "one", "variable", "to", "the", "database", "." ]
python
train
29.551724
budacom/trading-bots
trading_bots/utils.py
https://github.com/budacom/trading-bots/blob/8cb68bb8d0b5f822108db1cc5dae336e3d3c3452/trading_bots/utils.py#L40-L42
def truncate(value: Decimal, n_digits: int) -> Decimal: """Truncates a value to a number of decimals places""" return Decimal(math.trunc(value * (10 ** n_digits))) / (10 ** n_digits)
[ "def", "truncate", "(", "value", ":", "Decimal", ",", "n_digits", ":", "int", ")", "->", "Decimal", ":", "return", "Decimal", "(", "math", ".", "trunc", "(", "value", "*", "(", "10", "**", "n_digits", ")", ")", ")", "/", "(", "10", "**", "n_digits", ")" ]
Truncates a value to a number of decimals places
[ "Truncates", "a", "value", "to", "a", "number", "of", "decimals", "places" ]
python
train
62.666667
atlassian-api/atlassian-python-api
atlassian/service_desk.py
https://github.com/atlassian-api/atlassian-python-api/blob/540d269905c3e7547b666fe30c647b2d512cf358/atlassian/service_desk.py#L268-L280
def create_organization(self, name): """ To create an organization Jira administrator global permission or agent permission is required depending on the settings :param name: str :return: Organization data """ log.warning('Creating organization...') url = 'rest/servicedeskapi/organization' data = {'name': name} return self.post(url, headers=self.experimental_headers, data=data)
[ "def", "create_organization", "(", "self", ",", "name", ")", ":", "log", ".", "warning", "(", "'Creating organization...'", ")", "url", "=", "'rest/servicedeskapi/organization'", "data", "=", "{", "'name'", ":", "name", "}", "return", "self", ".", "post", "(", "url", ",", "headers", "=", "self", ".", "experimental_headers", ",", "data", "=", "data", ")" ]
To create an organization Jira administrator global permission or agent permission is required depending on the settings :param name: str :return: Organization data
[ "To", "create", "an", "organization", "Jira", "administrator", "global", "permission", "or", "agent", "permission", "is", "required", "depending", "on", "the", "settings" ]
python
train
34.615385
BlueBrain/NeuroM
neurom/fst/_neuritefunc.py
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/fst/_neuritefunc.py#L69-L71
def n_bifurcation_points(neurites, neurite_type=NeuriteType.all): '''number of bifurcation points in a collection of neurites''' return n_sections(neurites, neurite_type=neurite_type, iterator_type=Tree.ibifurcation_point)
[ "def", "n_bifurcation_points", "(", "neurites", ",", "neurite_type", "=", "NeuriteType", ".", "all", ")", ":", "return", "n_sections", "(", "neurites", ",", "neurite_type", "=", "neurite_type", ",", "iterator_type", "=", "Tree", ".", "ibifurcation_point", ")" ]
number of bifurcation points in a collection of neurites
[ "number", "of", "bifurcation", "points", "in", "a", "collection", "of", "neurites" ]
python
train
76
janpipek/physt
physt/binnings.py
https://github.com/janpipek/physt/blob/6dd441b073514e7728235f50b2352d56aacf38d4/physt/binnings.py#L106-L120
def is_consecutive(self, rtol: float = 1.e-5, atol: float = 1.e-8) -> bool: """Whether all bins are in a growing order. Parameters ---------- rtol, atol : numpy tolerance parameters """ if self.inconsecutive_allowed: if self._consecutive is None: if self._numpy_bins is not None: self._consecutive = True self._consecutive = is_consecutive(self.bins, rtol, atol) return self._consecutive else: return True
[ "def", "is_consecutive", "(", "self", ",", "rtol", ":", "float", "=", "1.e-5", ",", "atol", ":", "float", "=", "1.e-8", ")", "->", "bool", ":", "if", "self", ".", "inconsecutive_allowed", ":", "if", "self", ".", "_consecutive", "is", "None", ":", "if", "self", ".", "_numpy_bins", "is", "not", "None", ":", "self", ".", "_consecutive", "=", "True", "self", ".", "_consecutive", "=", "is_consecutive", "(", "self", ".", "bins", ",", "rtol", ",", "atol", ")", "return", "self", ".", "_consecutive", "else", ":", "return", "True" ]
Whether all bins are in a growing order. Parameters ---------- rtol, atol : numpy tolerance parameters
[ "Whether", "all", "bins", "are", "in", "a", "growing", "order", "." ]
python
train
35.733333
csaez/wishlib
wishlib/si/siwrapper.py
https://github.com/csaez/wishlib/blob/c212fa7875006a332a4cefbf69885ced9647bc2f/wishlib/si/siwrapper.py#L108-L126
def update(self): """ This method should be called when you want to ensure all cached attributes are in sync with the actual object attributes at runtime. This happens because attributes could store mutable objects and be modified outside the scope of this class. The most common idiom that isn't automagically caught is mutating a list or dictionary. Lets say 'user' object have an attribute named 'friends' containing a list, calling 'user.friends.append(new_friend)' only get the attribute, SIWrapper isn't aware that the object returned was modified and the cached data is not updated. """ self.holder = siget(self.holder.FullName) # fix dispatch issues for key, value in self.__dict__.iteritems(): key = self.namespace + key if self._validate_key(key): if not self.holder.Parameters(key): self.holder.AddParameter3(key, C.siString) self.holder.Parameters(key).Value = encode(value)
[ "def", "update", "(", "self", ")", ":", "self", ".", "holder", "=", "siget", "(", "self", ".", "holder", ".", "FullName", ")", "# fix dispatch issues", "for", "key", ",", "value", "in", "self", ".", "__dict__", ".", "iteritems", "(", ")", ":", "key", "=", "self", ".", "namespace", "+", "key", "if", "self", ".", "_validate_key", "(", "key", ")", ":", "if", "not", "self", ".", "holder", ".", "Parameters", "(", "key", ")", ":", "self", ".", "holder", ".", "AddParameter3", "(", "key", ",", "C", ".", "siString", ")", "self", ".", "holder", ".", "Parameters", "(", "key", ")", ".", "Value", "=", "encode", "(", "value", ")" ]
This method should be called when you want to ensure all cached attributes are in sync with the actual object attributes at runtime. This happens because attributes could store mutable objects and be modified outside the scope of this class. The most common idiom that isn't automagically caught is mutating a list or dictionary. Lets say 'user' object have an attribute named 'friends' containing a list, calling 'user.friends.append(new_friend)' only get the attribute, SIWrapper isn't aware that the object returned was modified and the cached data is not updated.
[ "This", "method", "should", "be", "called", "when", "you", "want", "to", "ensure", "all", "cached", "attributes", "are", "in", "sync", "with", "the", "actual", "object", "attributes", "at", "runtime", ".", "This", "happens", "because", "attributes", "could", "store", "mutable", "objects", "and", "be", "modified", "outside", "the", "scope", "of", "this", "class", ".", "The", "most", "common", "idiom", "that", "isn", "t", "automagically", "caught", "is", "mutating", "a", "list", "or", "dictionary", ".", "Lets", "say", "user", "object", "have", "an", "attribute", "named", "friends", "containing", "a", "list", "calling", "user", ".", "friends", ".", "append", "(", "new_friend", ")", "only", "get", "the", "attribute", "SIWrapper", "isn", "t", "aware", "that", "the", "object", "returned", "was", "modified", "and", "the", "cached", "data", "is", "not", "updated", "." ]
python
train
55.210526
jaseg/python-mpv
mpv.py
https://github.com/jaseg/python-mpv/blob/7117de4005cc470a45efd9cf2e9657bdf63a9079/mpv.py#L808-L814
def property_observer(self, name): """Function decorator to register a property observer. See ``MPV.observe_property`` for details.""" def wrapper(fun): self.observe_property(name, fun) fun.unobserve_mpv_properties = lambda: self.unobserve_property(name, fun) return fun return wrapper
[ "def", "property_observer", "(", "self", ",", "name", ")", ":", "def", "wrapper", "(", "fun", ")", ":", "self", ".", "observe_property", "(", "name", ",", "fun", ")", "fun", ".", "unobserve_mpv_properties", "=", "lambda", ":", "self", ".", "unobserve_property", "(", "name", ",", "fun", ")", "return", "fun", "return", "wrapper" ]
Function decorator to register a property observer. See ``MPV.observe_property`` for details.
[ "Function", "decorator", "to", "register", "a", "property", "observer", ".", "See", "MPV", ".", "observe_property", "for", "details", "." ]
python
train
48.428571
PmagPy/PmagPy
programs/apwp.py
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/apwp.py#L13-L117
def main(): """ NAME apwp.py DESCRIPTION returns predicted paleolatitudes, directions and pole latitude/longitude from apparent polar wander paths of Besse and Courtillot (2002). SYNTAX apwp.py [command line options][< filename] OPTIONS -h prints help message and quits -i allows interactive data entry f file: read plate, lat, lon, age data from file -F output_file: write output to output_file -P [NA, SA, AF, IN, EU, AU, ANT, GL] plate -lat LAT specify present latitude (positive = North; negative=South) -lon LON specify present longitude (positive = East, negative=West) -age AGE specify Age in Ma Note: must have all -P, -lat, -lon, -age or none. OUTPUT Age Paleolat. Dec. Inc. Pole_lat. Pole_Long. """ infile,outfile,data,indata="","",[],[] if '-h' in sys.argv: print(main.__doc__) sys.exit() if '-F' in sys.argv: ind=sys.argv.index('-F') outfile=sys.argv[ind+1] out=open(outfile,'w') if '-i' in sys.argv: print("Welcome to paleolatitude calculator\n") while 1: data=[] print("pick a plate: NA, SA, AF, IN, EU, AU, ANT, GL \n cntl-D to quit") try: plate=input("Plate\n").upper() except: print("Goodbye \n") sys.exit() lat=float(input( "Site latitude\n")) lon=float(input(" Site longitude\n")) age=float(input(" Age\n")) data=[plate,lat,lon,age] print("Age Paleolat. Dec. Inc. Pole_lat. Pole_Long.") print(spitout(data)) elif '-f' in sys.argv: ind=sys.argv.index('-f') infile=sys.argv[ind+1] f=open(infile,'r') inp=f.readlines() elif '-P' in sys.argv: ind=sys.argv.index('-P') plate=sys.argv[ind+1].upper() if '-lat' in sys.argv: ind=sys.argv.index('-lat') lat=float(sys.argv[ind+1]) else: print(main.__doc__) sys.exit() if '-lon' in sys.argv: ind=sys.argv.index('-lon') lon=float(sys.argv[ind+1]) else: print(main.__doc__) sys.exit() if '-age' in sys.argv: ind=sys.argv.index('-age') age=float(sys.argv[ind+1]) else: print(main.__doc__) sys.exit() data=[plate,lat,lon,age] outstring=spitout(data) if outfile=="": print("Age Paleolat. Dec. Inc. Pole_lat. Pole_Long.") print(outstring) else: out.write(outstring) sys.exit() else: inp=sys.stdin.readlines() # read from standard input if len(inp)>0: for line in inp: data=[] rec=line.split() data.append(rec[0]) for k in range(1,4): data.append(float(rec[k])) indata.append(data) if len(indata)>0: for line in indata: outstring=spitout(line) if outfile=="": print(outstring) else: out.write(outstring) else: print('no input data') sys.exit()
[ "def", "main", "(", ")", ":", "infile", ",", "outfile", ",", "data", ",", "indata", "=", "\"\"", ",", "\"\"", ",", "[", "]", ",", "[", "]", "if", "'-h'", "in", "sys", ".", "argv", ":", "print", "(", "main", ".", "__doc__", ")", "sys", ".", "exit", "(", ")", "if", "'-F'", "in", "sys", ".", "argv", ":", "ind", "=", "sys", ".", "argv", ".", "index", "(", "'-F'", ")", "outfile", "=", "sys", ".", "argv", "[", "ind", "+", "1", "]", "out", "=", "open", "(", "outfile", ",", "'w'", ")", "if", "'-i'", "in", "sys", ".", "argv", ":", "print", "(", "\"Welcome to paleolatitude calculator\\n\"", ")", "while", "1", ":", "data", "=", "[", "]", "print", "(", "\"pick a plate: NA, SA, AF, IN, EU, AU, ANT, GL \\n cntl-D to quit\"", ")", "try", ":", "plate", "=", "input", "(", "\"Plate\\n\"", ")", ".", "upper", "(", ")", "except", ":", "print", "(", "\"Goodbye \\n\"", ")", "sys", ".", "exit", "(", ")", "lat", "=", "float", "(", "input", "(", "\"Site latitude\\n\"", ")", ")", "lon", "=", "float", "(", "input", "(", "\" Site longitude\\n\"", ")", ")", "age", "=", "float", "(", "input", "(", "\" Age\\n\"", ")", ")", "data", "=", "[", "plate", ",", "lat", ",", "lon", ",", "age", "]", "print", "(", "\"Age Paleolat. Dec. Inc. Pole_lat. Pole_Long.\"", ")", "print", "(", "spitout", "(", "data", ")", ")", "elif", "'-f'", "in", "sys", ".", "argv", ":", "ind", "=", "sys", ".", "argv", ".", "index", "(", "'-f'", ")", "infile", "=", "sys", ".", "argv", "[", "ind", "+", "1", "]", "f", "=", "open", "(", "infile", ",", "'r'", ")", "inp", "=", "f", ".", "readlines", "(", ")", "elif", "'-P'", "in", "sys", ".", "argv", ":", "ind", "=", "sys", ".", "argv", ".", "index", "(", "'-P'", ")", "plate", "=", "sys", ".", "argv", "[", "ind", "+", "1", "]", ".", "upper", "(", ")", "if", "'-lat'", "in", "sys", ".", "argv", ":", "ind", "=", "sys", ".", "argv", ".", "index", "(", "'-lat'", ")", "lat", "=", "float", "(", "sys", ".", "argv", "[", "ind", "+", "1", "]", ")", "else", ":", "print", "(", "main", ".", "__doc__", ")", "sys", ".", "exit", "(", ")", "if", "'-lon'", "in", "sys", ".", "argv", ":", "ind", "=", "sys", ".", "argv", ".", "index", "(", "'-lon'", ")", "lon", "=", "float", "(", "sys", ".", "argv", "[", "ind", "+", "1", "]", ")", "else", ":", "print", "(", "main", ".", "__doc__", ")", "sys", ".", "exit", "(", ")", "if", "'-age'", "in", "sys", ".", "argv", ":", "ind", "=", "sys", ".", "argv", ".", "index", "(", "'-age'", ")", "age", "=", "float", "(", "sys", ".", "argv", "[", "ind", "+", "1", "]", ")", "else", ":", "print", "(", "main", ".", "__doc__", ")", "sys", ".", "exit", "(", ")", "data", "=", "[", "plate", ",", "lat", ",", "lon", ",", "age", "]", "outstring", "=", "spitout", "(", "data", ")", "if", "outfile", "==", "\"\"", ":", "print", "(", "\"Age Paleolat. Dec. Inc. Pole_lat. Pole_Long.\"", ")", "print", "(", "outstring", ")", "else", ":", "out", ".", "write", "(", "outstring", ")", "sys", ".", "exit", "(", ")", "else", ":", "inp", "=", "sys", ".", "stdin", ".", "readlines", "(", ")", "# read from standard input", "if", "len", "(", "inp", ")", ">", "0", ":", "for", "line", "in", "inp", ":", "data", "=", "[", "]", "rec", "=", "line", ".", "split", "(", ")", "data", ".", "append", "(", "rec", "[", "0", "]", ")", "for", "k", "in", "range", "(", "1", ",", "4", ")", ":", "data", ".", "append", "(", "float", "(", "rec", "[", "k", "]", ")", ")", "indata", ".", "append", "(", "data", ")", "if", "len", "(", "indata", ")", ">", "0", ":", "for", "line", "in", "indata", ":", "outstring", "=", "spitout", "(", "line", ")", "if", "outfile", "==", "\"\"", ":", "print", "(", "outstring", ")", "else", ":", "out", ".", "write", "(", "outstring", ")", "else", ":", "print", "(", "'no input data'", ")", "sys", ".", "exit", "(", ")" ]
NAME apwp.py DESCRIPTION returns predicted paleolatitudes, directions and pole latitude/longitude from apparent polar wander paths of Besse and Courtillot (2002). SYNTAX apwp.py [command line options][< filename] OPTIONS -h prints help message and quits -i allows interactive data entry f file: read plate, lat, lon, age data from file -F output_file: write output to output_file -P [NA, SA, AF, IN, EU, AU, ANT, GL] plate -lat LAT specify present latitude (positive = North; negative=South) -lon LON specify present longitude (positive = East, negative=West) -age AGE specify Age in Ma Note: must have all -P, -lat, -lon, -age or none. OUTPUT Age Paleolat. Dec. Inc. Pole_lat. Pole_Long.
[ "NAME", "apwp", ".", "py" ]
python
train
30.314286
rameshg87/pyremotevbox
pyremotevbox/ZSI/generate/wsdl2dispatch.py
https://github.com/rameshg87/pyremotevbox/blob/123dffff27da57c8faa3ac1dd4c68b1cf4558b1a/pyremotevbox/ZSI/generate/wsdl2dispatch.py#L143-L157
def setUpImports(self): '''set import statements ''' i = self.imports print >>i, 'from pyremotevbox.ZSI.schema import GED, GTD' print >>i, 'from pyremotevbox.ZSI.TCcompound import ComplexType, Struct' module = self.getTypesModuleName() package = self.getTypesModulePath() if package: module = '%s.%s' %(package, module) print >>i, 'from %s import *' %(module) print >>i, 'from %s import %s' %(self.base_module_name, self.base_class_name)
[ "def", "setUpImports", "(", "self", ")", ":", "i", "=", "self", ".", "imports", "print", ">>", "i", ",", "'from pyremotevbox.ZSI.schema import GED, GTD'", "print", ">>", "i", ",", "'from pyremotevbox.ZSI.TCcompound import ComplexType, Struct'", "module", "=", "self", ".", "getTypesModuleName", "(", ")", "package", "=", "self", ".", "getTypesModulePath", "(", ")", "if", "package", ":", "module", "=", "'%s.%s'", "%", "(", "package", ",", "module", ")", "print", ">>", "i", ",", "'from %s import *'", "%", "(", "module", ")", "print", ">>", "i", ",", "'from %s import %s'", "%", "(", "self", ".", "base_module_name", ",", "self", ".", "base_class_name", ")" ]
set import statements
[ "set", "import", "statements" ]
python
train
36.133333
bokeh/bokeh
bokeh/sphinxext/bokeh_plot.py
https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/bokeh/sphinxext/bokeh_plot.py#L212-L222
def setup(app): ''' Required Sphinx extension setup function. ''' # These two are deprecated and no longer have any effect, to be removed 2.0 app.add_config_value('bokeh_plot_pyfile_include_dirs', [], 'html') app.add_config_value('bokeh_plot_use_relative_paths', False, 'html') app.add_directive('bokeh-plot', BokehPlotDirective) app.add_config_value('bokeh_missing_google_api_key_ok', True, 'html') app.connect('builder-inited', builder_inited) app.connect('build-finished', build_finished)
[ "def", "setup", "(", "app", ")", ":", "# These two are deprecated and no longer have any effect, to be removed 2.0", "app", ".", "add_config_value", "(", "'bokeh_plot_pyfile_include_dirs'", ",", "[", "]", ",", "'html'", ")", "app", ".", "add_config_value", "(", "'bokeh_plot_use_relative_paths'", ",", "False", ",", "'html'", ")", "app", ".", "add_directive", "(", "'bokeh-plot'", ",", "BokehPlotDirective", ")", "app", ".", "add_config_value", "(", "'bokeh_missing_google_api_key_ok'", ",", "True", ",", "'html'", ")", "app", ".", "connect", "(", "'builder-inited'", ",", "builder_inited", ")", "app", ".", "connect", "(", "'build-finished'", ",", "build_finished", ")" ]
Required Sphinx extension setup function.
[ "Required", "Sphinx", "extension", "setup", "function", "." ]
python
train
46.818182
apache/incubator-mxnet
python/mxnet/callback.py
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/callback.py#L27-L52
def module_checkpoint(mod, prefix, period=1, save_optimizer_states=False): """Callback to checkpoint Module to prefix every epoch. Parameters ---------- mod : subclass of BaseModule The module to checkpoint. prefix : str The file prefix for this checkpoint. period : int How many epochs to wait before checkpointing. Defaults to 1. save_optimizer_states : bool Indicates whether or not to save optimizer states for continued training. Returns ------- callback : function The callback function that can be passed as iter_end_callback to fit. """ period = int(max(1, period)) # pylint: disable=unused-argument def _callback(iter_no, sym=None, arg=None, aux=None): """The checkpoint function.""" if (iter_no + 1) % period == 0: mod.save_checkpoint(prefix, iter_no + 1, save_optimizer_states) return _callback
[ "def", "module_checkpoint", "(", "mod", ",", "prefix", ",", "period", "=", "1", ",", "save_optimizer_states", "=", "False", ")", ":", "period", "=", "int", "(", "max", "(", "1", ",", "period", ")", ")", "# pylint: disable=unused-argument", "def", "_callback", "(", "iter_no", ",", "sym", "=", "None", ",", "arg", "=", "None", ",", "aux", "=", "None", ")", ":", "\"\"\"The checkpoint function.\"\"\"", "if", "(", "iter_no", "+", "1", ")", "%", "period", "==", "0", ":", "mod", ".", "save_checkpoint", "(", "prefix", ",", "iter_no", "+", "1", ",", "save_optimizer_states", ")", "return", "_callback" ]
Callback to checkpoint Module to prefix every epoch. Parameters ---------- mod : subclass of BaseModule The module to checkpoint. prefix : str The file prefix for this checkpoint. period : int How many epochs to wait before checkpointing. Defaults to 1. save_optimizer_states : bool Indicates whether or not to save optimizer states for continued training. Returns ------- callback : function The callback function that can be passed as iter_end_callback to fit.
[ "Callback", "to", "checkpoint", "Module", "to", "prefix", "every", "epoch", "." ]
python
train
35
qbicsoftware/mtb-parser-lib
mtbparser/snv_item.py
https://github.com/qbicsoftware/mtb-parser-lib/blob/e8b96e34b27e457ea7def2927fe44017fa173ba7/mtbparser/snv_item.py#L6-L11
def _format_dict(self, info_dict): """Replaces empty content with 'NA's""" for key, value in info_dict.items(): if not value: info_dict[key] = "NA" return info_dict
[ "def", "_format_dict", "(", "self", ",", "info_dict", ")", ":", "for", "key", ",", "value", "in", "info_dict", ".", "items", "(", ")", ":", "if", "not", "value", ":", "info_dict", "[", "key", "]", "=", "\"NA\"", "return", "info_dict" ]
Replaces empty content with 'NA's
[ "Replaces", "empty", "content", "with", "NA", "s" ]
python
train
35.166667
mottosso/be
be/vendor/requests/packages/urllib3/_collections.py
https://github.com/mottosso/be/blob/0f3d4f3597c71223f616d78c6d9b2c8dffcd8a71/be/vendor/requests/packages/urllib3/_collections.py#L290-L295
def iteritems(self): """Iterate over all header lines, including duplicate ones.""" for key in self: vals = _dict_getitem(self, key) for val in vals[1:]: yield vals[0], val
[ "def", "iteritems", "(", "self", ")", ":", "for", "key", "in", "self", ":", "vals", "=", "_dict_getitem", "(", "self", ",", "key", ")", "for", "val", "in", "vals", "[", "1", ":", "]", ":", "yield", "vals", "[", "0", "]", ",", "val" ]
Iterate over all header lines, including duplicate ones.
[ "Iterate", "over", "all", "header", "lines", "including", "duplicate", "ones", "." ]
python
train
37.166667
aodag/WebDispatch
webdispatch/base.py
https://github.com/aodag/WebDispatch/blob/55f8658a2b4100498e098a80303a346c3940f1bc/webdispatch/base.py#L44-L49
def on_view_not_found( self, environ: Dict[str, Any], start_response: Callable) -> Iterable[bytes]: # pragma: nocover """ called when view is not found""" raise NotImplementedError()
[ "def", "on_view_not_found", "(", "self", ",", "environ", ":", "Dict", "[", "str", ",", "Any", "]", ",", "start_response", ":", "Callable", ")", "->", "Iterable", "[", "bytes", "]", ":", "# pragma: nocover", "raise", "NotImplementedError", "(", ")" ]
called when view is not found
[ "called", "when", "view", "is", "not", "found" ]
python
train
38.333333
lbusoni/pysilico
pysilico/gui/image_show_widget/image_show_basic_widget.py
https://github.com/lbusoni/pysilico/blob/44872c8c202bedc8af5d7ac0cd2971912a59a365/pysilico/gui/image_show_widget/image_show_basic_widget.py#L504-L513
def _getProcessedImage(self): """Returns the image data after it has been processed by any normalization options in use. This method also sets the attributes self.levelMin and self.levelMax to indicate the range of data in the image.""" if self.imageDisp is None: self.imageDisp = self.image self.levelMin, self.levelMax = self._quickLevels( self.imageDisp) #list( map(float, self._quickLevels(self.imageDisp))) return self.imageDisp
[ "def", "_getProcessedImage", "(", "self", ")", ":", "if", "self", ".", "imageDisp", "is", "None", ":", "self", ".", "imageDisp", "=", "self", ".", "image", "self", ".", "levelMin", ",", "self", ".", "levelMax", "=", "self", ".", "_quickLevels", "(", "self", ".", "imageDisp", ")", "#list( map(float, self._quickLevels(self.imageDisp)))", "return", "self", ".", "imageDisp" ]
Returns the image data after it has been processed by any normalization options in use. This method also sets the attributes self.levelMin and self.levelMax to indicate the range of data in the image.
[ "Returns", "the", "image", "data", "after", "it", "has", "been", "processed", "by", "any", "normalization", "options", "in", "use", ".", "This", "method", "also", "sets", "the", "attributes", "self", ".", "levelMin", "and", "self", ".", "levelMax", "to", "indicate", "the", "range", "of", "data", "in", "the", "image", "." ]
python
train
51.7
jkwill87/mapi
mapi/endpoints.py
https://github.com/jkwill87/mapi/blob/730bf57c12aecaf49e18c15bf2b35af7f554b3cc/mapi/endpoints.py#L260-L273
def tvdb_login(api_key): """ Logs into TVDb using the provided api key Note: You can register for a free TVDb key at thetvdb.com/?tab=apiregister Online docs: api.thetvdb.com/swagger#!/Authentication/post_login= """ url = "https://api.thetvdb.com/login" body = {"apikey": api_key} status, content = _request_json(url, body=body, cache=False) if status == 401: raise MapiProviderException("invalid api key") elif status != 200 or not content.get("token"): raise MapiNetworkException("TVDb down or unavailable?") return content["token"]
[ "def", "tvdb_login", "(", "api_key", ")", ":", "url", "=", "\"https://api.thetvdb.com/login\"", "body", "=", "{", "\"apikey\"", ":", "api_key", "}", "status", ",", "content", "=", "_request_json", "(", "url", ",", "body", "=", "body", ",", "cache", "=", "False", ")", "if", "status", "==", "401", ":", "raise", "MapiProviderException", "(", "\"invalid api key\"", ")", "elif", "status", "!=", "200", "or", "not", "content", ".", "get", "(", "\"token\"", ")", ":", "raise", "MapiNetworkException", "(", "\"TVDb down or unavailable?\"", ")", "return", "content", "[", "\"token\"", "]" ]
Logs into TVDb using the provided api key Note: You can register for a free TVDb key at thetvdb.com/?tab=apiregister Online docs: api.thetvdb.com/swagger#!/Authentication/post_login=
[ "Logs", "into", "TVDb", "using", "the", "provided", "api", "key" ]
python
train
41.285714
tensorforce/tensorforce
tensorforce/core/optimizers/optimizer.py
https://github.com/tensorforce/tensorforce/blob/520a8d992230e382f08e315ede5fc477f5e26bfb/tensorforce/core/optimizers/optimizer.py#L75-L90
def apply_step(self, variables, deltas): """ Applies the given (and already calculated) step deltas to the variable values. Args: variables: List of variables. deltas: List of deltas of same length. Returns: The step-applied operation. A tf.group of tf.assign_add ops. """ if len(variables) != len(deltas): raise TensorForceError("Invalid variables and deltas lists.") return tf.group( *(tf.assign_add(ref=variable, value=delta) for variable, delta in zip(variables, deltas)) )
[ "def", "apply_step", "(", "self", ",", "variables", ",", "deltas", ")", ":", "if", "len", "(", "variables", ")", "!=", "len", "(", "deltas", ")", ":", "raise", "TensorForceError", "(", "\"Invalid variables and deltas lists.\"", ")", "return", "tf", ".", "group", "(", "*", "(", "tf", ".", "assign_add", "(", "ref", "=", "variable", ",", "value", "=", "delta", ")", "for", "variable", ",", "delta", "in", "zip", "(", "variables", ",", "deltas", ")", ")", ")" ]
Applies the given (and already calculated) step deltas to the variable values. Args: variables: List of variables. deltas: List of deltas of same length. Returns: The step-applied operation. A tf.group of tf.assign_add ops.
[ "Applies", "the", "given", "(", "and", "already", "calculated", ")", "step", "deltas", "to", "the", "variable", "values", "." ]
python
valid
36.75
frmdstryr/enamlx
enamlx/qt/qt_table_view.py
https://github.com/frmdstryr/enamlx/blob/9582e29c88dc0c0340f912b49168b7307a47ed4f/enamlx/qt/qt_table_view.py#L194-L198
def data_changed(self, change): """ Notify the model that data has changed in this cell! """ index = self.index if index: self.view.model.dataChanged.emit(index, index)
[ "def", "data_changed", "(", "self", ",", "change", ")", ":", "index", "=", "self", ".", "index", "if", "index", ":", "self", ".", "view", ".", "model", ".", "dataChanged", ".", "emit", "(", "index", ",", "index", ")" ]
Notify the model that data has changed in this cell!
[ "Notify", "the", "model", "that", "data", "has", "changed", "in", "this", "cell!" ]
python
train
40
HPENetworking/PYHPEIMC
pyhpeimc/plat/alarms.py
https://github.com/HPENetworking/PYHPEIMC/blob/4fba31827573587e03a6233c7db60f188038c8e5/pyhpeimc/plat/alarms.py#L20-L62
def get_dev_alarms(auth, url, devid=None, devip=None): """ function takes the devId of a specific device and issues a RESTFUL call to get the current alarms for the target device. :param devid: int or str value of the target device :param devip: str of ipv4 address of the target device :param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class :param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass :return:list of dictionaries containing the alarms for this device :rtype: list >>> from pyhpeimc.auth import * >>> from pyhpeimc.plat.alarms import * >>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin") >>> dev_alarms = get_dev_alarms(auth.creds, auth.url, devip='10.101.0.221') >>> assert 'ackStatus' in dev_alarms[0] """ # checks to see if the imc credentials are already available if devip is not None: devid = get_dev_details(devip, auth, url)['id'] f_url = url + "/imcrs/fault/alarm?operatorName=admin&deviceId=" + \ str(devid) + "&desc=false" response = requests.get(f_url, auth=auth, headers=HEADERS) try: if response.status_code == 200: dev_alarm = (json.loads(response.text)) if 'alarm' in dev_alarm: return dev_alarm['alarm'] else: return "Device has no alarms" except requests.exceptions.RequestException as error: return "Error:\n" + str(error) + ' get_dev_alarms: An Error has occured'
[ "def", "get_dev_alarms", "(", "auth", ",", "url", ",", "devid", "=", "None", ",", "devip", "=", "None", ")", ":", "# checks to see if the imc credentials are already available", "if", "devip", "is", "not", "None", ":", "devid", "=", "get_dev_details", "(", "devip", ",", "auth", ",", "url", ")", "[", "'id'", "]", "f_url", "=", "url", "+", "\"/imcrs/fault/alarm?operatorName=admin&deviceId=\"", "+", "str", "(", "devid", ")", "+", "\"&desc=false\"", "response", "=", "requests", ".", "get", "(", "f_url", ",", "auth", "=", "auth", ",", "headers", "=", "HEADERS", ")", "try", ":", "if", "response", ".", "status_code", "==", "200", ":", "dev_alarm", "=", "(", "json", ".", "loads", "(", "response", ".", "text", ")", ")", "if", "'alarm'", "in", "dev_alarm", ":", "return", "dev_alarm", "[", "'alarm'", "]", "else", ":", "return", "\"Device has no alarms\"", "except", "requests", ".", "exceptions", ".", "RequestException", "as", "error", ":", "return", "\"Error:\\n\"", "+", "str", "(", "error", ")", "+", "' get_dev_alarms: An Error has occured'" ]
function takes the devId of a specific device and issues a RESTFUL call to get the current alarms for the target device. :param devid: int or str value of the target device :param devip: str of ipv4 address of the target device :param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class :param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass :return:list of dictionaries containing the alarms for this device :rtype: list >>> from pyhpeimc.auth import * >>> from pyhpeimc.plat.alarms import * >>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin") >>> dev_alarms = get_dev_alarms(auth.creds, auth.url, devip='10.101.0.221') >>> assert 'ackStatus' in dev_alarms[0]
[ "function", "takes", "the", "devId", "of", "a", "specific", "device", "and", "issues", "a", "RESTFUL", "call", "to", "get", "the", "current", "alarms", "for", "the", "target", "device", "." ]
python
train
35.976744
briancappello/flask-unchained
flask_unchained/bundles/security/views/security_controller.py
https://github.com/briancappello/flask-unchained/blob/4d536cb90e2cc4829c1c05f2c74d3e22901a1399/flask_unchained/bundles/security/views/security_controller.py#L198-L242
def reset_password(self, token): """ View function verify a users reset password token from the email we sent to them. It also handles the form for them to set a new password. Supports html and json requests. """ expired, invalid, user = \ self.security_utils_service.reset_password_token_status(token) if invalid: self.flash( _('flask_unchained.bundles.security:flash.invalid_reset_password_token'), category='error') return self.redirect('SECURITY_INVALID_RESET_TOKEN_REDIRECT') elif expired: self.security_service.send_reset_password_instructions(user) self.flash(_('flask_unchained.bundles.security:flash.password_reset_expired', email=user.email, within=app.config.SECURITY_RESET_PASSWORD_WITHIN), category='error') return self.redirect('SECURITY_EXPIRED_RESET_TOKEN_REDIRECT') spa_redirect = app.config.SECURITY_API_RESET_PASSWORD_HTTP_GET_REDIRECT if request.method == 'GET' and spa_redirect: return self.redirect(spa_redirect, token=token, _external=True) form = self._get_form('SECURITY_RESET_PASSWORD_FORM') if form.validate_on_submit(): self.security_service.reset_password(user, form.password.data) self.security_service.login_user(user) self.after_this_request(self._commit) self.flash(_('flask_unchained.bundles.security:flash.password_reset'), category='success') if request.is_json: return self.jsonify({'token': user.get_auth_token(), 'user': user}) return self.redirect('SECURITY_POST_RESET_REDIRECT_ENDPOINT', 'SECURITY_POST_LOGIN_REDIRECT_ENDPOINT') elif form.errors and request.is_json: return self.errors(form.errors) return self.render('reset_password', reset_password_form=form, reset_password_token=token, **self.security.run_ctx_processor('reset_password'))
[ "def", "reset_password", "(", "self", ",", "token", ")", ":", "expired", ",", "invalid", ",", "user", "=", "self", ".", "security_utils_service", ".", "reset_password_token_status", "(", "token", ")", "if", "invalid", ":", "self", ".", "flash", "(", "_", "(", "'flask_unchained.bundles.security:flash.invalid_reset_password_token'", ")", ",", "category", "=", "'error'", ")", "return", "self", ".", "redirect", "(", "'SECURITY_INVALID_RESET_TOKEN_REDIRECT'", ")", "elif", "expired", ":", "self", ".", "security_service", ".", "send_reset_password_instructions", "(", "user", ")", "self", ".", "flash", "(", "_", "(", "'flask_unchained.bundles.security:flash.password_reset_expired'", ",", "email", "=", "user", ".", "email", ",", "within", "=", "app", ".", "config", ".", "SECURITY_RESET_PASSWORD_WITHIN", ")", ",", "category", "=", "'error'", ")", "return", "self", ".", "redirect", "(", "'SECURITY_EXPIRED_RESET_TOKEN_REDIRECT'", ")", "spa_redirect", "=", "app", ".", "config", ".", "SECURITY_API_RESET_PASSWORD_HTTP_GET_REDIRECT", "if", "request", ".", "method", "==", "'GET'", "and", "spa_redirect", ":", "return", "self", ".", "redirect", "(", "spa_redirect", ",", "token", "=", "token", ",", "_external", "=", "True", ")", "form", "=", "self", ".", "_get_form", "(", "'SECURITY_RESET_PASSWORD_FORM'", ")", "if", "form", ".", "validate_on_submit", "(", ")", ":", "self", ".", "security_service", ".", "reset_password", "(", "user", ",", "form", ".", "password", ".", "data", ")", "self", ".", "security_service", ".", "login_user", "(", "user", ")", "self", ".", "after_this_request", "(", "self", ".", "_commit", ")", "self", ".", "flash", "(", "_", "(", "'flask_unchained.bundles.security:flash.password_reset'", ")", ",", "category", "=", "'success'", ")", "if", "request", ".", "is_json", ":", "return", "self", ".", "jsonify", "(", "{", "'token'", ":", "user", ".", "get_auth_token", "(", ")", ",", "'user'", ":", "user", "}", ")", "return", "self", ".", "redirect", "(", "'SECURITY_POST_RESET_REDIRECT_ENDPOINT'", ",", "'SECURITY_POST_LOGIN_REDIRECT_ENDPOINT'", ")", "elif", "form", ".", "errors", "and", "request", ".", "is_json", ":", "return", "self", ".", "errors", "(", "form", ".", "errors", ")", "return", "self", ".", "render", "(", "'reset_password'", ",", "reset_password_form", "=", "form", ",", "reset_password_token", "=", "token", ",", "*", "*", "self", ".", "security", ".", "run_ctx_processor", "(", "'reset_password'", ")", ")" ]
View function verify a users reset password token from the email we sent to them. It also handles the form for them to set a new password. Supports html and json requests.
[ "View", "function", "verify", "a", "users", "reset", "password", "token", "from", "the", "email", "we", "sent", "to", "them", ".", "It", "also", "handles", "the", "form", "for", "them", "to", "set", "a", "new", "password", ".", "Supports", "html", "and", "json", "requests", "." ]
python
train
49.288889
django-notifications/django-notifications
notifications/views.py
https://github.com/django-notifications/django-notifications/blob/c271193215a053d6477d5ceca6f2524793bf9cb1/notifications/views.py#L143-L187
def live_unread_notification_list(request): ''' Return a json with a unread notification list ''' try: user_is_authenticated = request.user.is_authenticated() except TypeError: # Django >= 1.11 user_is_authenticated = request.user.is_authenticated if not user_is_authenticated: data = { 'unread_count': 0, 'unread_list': [] } return JsonResponse(data) default_num_to_fetch = get_config()['NUM_TO_FETCH'] try: # If they don't specify, make it 5. num_to_fetch = request.GET.get('max', default_num_to_fetch) num_to_fetch = int(num_to_fetch) if not (1 <= num_to_fetch <= 100): num_to_fetch = default_num_to_fetch except ValueError: # If casting to an int fails. num_to_fetch = default_num_to_fetch unread_list = [] for notification in request.user.notifications.unread()[0:num_to_fetch]: struct = model_to_dict(notification) struct['slug'] = id2slug(notification.id) if notification.actor: struct['actor'] = str(notification.actor) if notification.target: struct['target'] = str(notification.target) if notification.action_object: struct['action_object'] = str(notification.action_object) if notification.data: struct['data'] = notification.data unread_list.append(struct) if request.GET.get('mark_as_read'): notification.mark_as_read() data = { 'unread_count': request.user.notifications.unread().count(), 'unread_list': unread_list } return JsonResponse(data)
[ "def", "live_unread_notification_list", "(", "request", ")", ":", "try", ":", "user_is_authenticated", "=", "request", ".", "user", ".", "is_authenticated", "(", ")", "except", "TypeError", ":", "# Django >= 1.11", "user_is_authenticated", "=", "request", ".", "user", ".", "is_authenticated", "if", "not", "user_is_authenticated", ":", "data", "=", "{", "'unread_count'", ":", "0", ",", "'unread_list'", ":", "[", "]", "}", "return", "JsonResponse", "(", "data", ")", "default_num_to_fetch", "=", "get_config", "(", ")", "[", "'NUM_TO_FETCH'", "]", "try", ":", "# If they don't specify, make it 5.", "num_to_fetch", "=", "request", ".", "GET", ".", "get", "(", "'max'", ",", "default_num_to_fetch", ")", "num_to_fetch", "=", "int", "(", "num_to_fetch", ")", "if", "not", "(", "1", "<=", "num_to_fetch", "<=", "100", ")", ":", "num_to_fetch", "=", "default_num_to_fetch", "except", "ValueError", ":", "# If casting to an int fails.", "num_to_fetch", "=", "default_num_to_fetch", "unread_list", "=", "[", "]", "for", "notification", "in", "request", ".", "user", ".", "notifications", ".", "unread", "(", ")", "[", "0", ":", "num_to_fetch", "]", ":", "struct", "=", "model_to_dict", "(", "notification", ")", "struct", "[", "'slug'", "]", "=", "id2slug", "(", "notification", ".", "id", ")", "if", "notification", ".", "actor", ":", "struct", "[", "'actor'", "]", "=", "str", "(", "notification", ".", "actor", ")", "if", "notification", ".", "target", ":", "struct", "[", "'target'", "]", "=", "str", "(", "notification", ".", "target", ")", "if", "notification", ".", "action_object", ":", "struct", "[", "'action_object'", "]", "=", "str", "(", "notification", ".", "action_object", ")", "if", "notification", ".", "data", ":", "struct", "[", "'data'", "]", "=", "notification", ".", "data", "unread_list", ".", "append", "(", "struct", ")", "if", "request", ".", "GET", ".", "get", "(", "'mark_as_read'", ")", ":", "notification", ".", "mark_as_read", "(", ")", "data", "=", "{", "'unread_count'", ":", "request", ".", "user", ".", "notifications", ".", "unread", "(", ")", ".", "count", "(", ")", ",", "'unread_list'", ":", "unread_list", "}", "return", "JsonResponse", "(", "data", ")" ]
Return a json with a unread notification list
[ "Return", "a", "json", "with", "a", "unread", "notification", "list" ]
python
train
36.066667
econ-ark/HARK
HARK/ConsumptionSaving/ConsAggShockModel.py
https://github.com/econ-ark/HARK/blob/3d184153a189e618a87c9540df1cd12044039cc5/HARK/ConsumptionSaving/ConsAggShockModel.py#L655-L858
def solveConsAggMarkov(solution_next,IncomeDstn,LivPrb,DiscFac,CRRA,MrkvArray, PermGroFac,PermGroFacAgg,aXtraGrid,BoroCnstArt,Mgrid, AFunc,Rfunc,wFunc,DeprFac): ''' Solve one period of a consumption-saving problem with idiosyncratic and aggregate shocks (transitory and permanent). Moreover, the macroeconomic state follows a Markov process that determines the income distribution and aggregate permanent growth factor. This is a basic solver that can't handle cubic splines, nor can it calculate a value function. Parameters ---------- solution_next : ConsumerSolution The solution to the succeeding one period problem. IncomeDstn : [[np.array]] A list of lists, each containing five arrays of floats, representing a discrete approximation to the income process between the period being solved and the one immediately following (in solution_next). Order: event probabilities, idisyncratic permanent shocks, idiosyncratic transitory shocks, aggregate permanent shocks, aggregate transitory shocks. LivPrb : float Survival probability; likelihood of being alive at the beginning of the succeeding period. DiscFac : float Intertemporal discount factor for future utility. CRRA : float Coefficient of relative risk aversion. MrkvArray : np.array Markov transition matrix between discrete macroeconomic states. MrkvArray[i,j] is probability of being in state j next period conditional on being in state i this period. PermGroFac : float Expected permanent income growth factor at the end of this period, for the *individual*'s productivity. PermGroFacAgg : [float] Expected aggregate productivity growth in each Markov macro state. aXtraGrid : np.array Array of "extra" end-of-period asset values-- assets above the absolute minimum acceptable level. BoroCnstArt : float Artificial borrowing constraint; minimum allowable end-of-period asset-to- permanent-income ratio. Unlike other models, this *can't* be None. Mgrid : np.array A grid of aggregate market resourses to permanent income in the economy. AFunc : [function] Aggregate savings as a function of aggregate market resources, for each Markov macro state. Rfunc : function The net interest factor on assets as a function of capital ratio k. wFunc : function The wage rate for labor as a function of capital-to-labor ratio k. DeprFac : float Capital Depreciation Rate Returns ------- solution_now : ConsumerSolution The solution to the single period consumption-saving problem. Includes a consumption function cFunc (linear interpolation over linear interpola- tions) and marginal value function vPfunc. ''' # Get sizes of grids aCount = aXtraGrid.size Mcount = Mgrid.size StateCount = MrkvArray.shape[0] # Loop through next period's states, assuming we reach each one at a time. # Construct EndOfPrdvP_cond functions for each state. EndOfPrdvPfunc_cond = [] BoroCnstNat_cond = [] for j in range(StateCount): # Unpack next period's solution vPfuncNext = solution_next.vPfunc[j] mNrmMinNext = solution_next.mNrmMin[j] # Unpack the income shocks ShkPrbsNext = IncomeDstn[j][0] PermShkValsNext = IncomeDstn[j][1] TranShkValsNext = IncomeDstn[j][2] PermShkAggValsNext = IncomeDstn[j][3] TranShkAggValsNext = IncomeDstn[j][4] ShkCount = ShkPrbsNext.size aXtra_tiled = np.tile(np.reshape(aXtraGrid,(1,aCount,1)),(Mcount,1,ShkCount)) # Make tiled versions of the income shocks # Dimension order: Mnow, aNow, Shk ShkPrbsNext_tiled = np.tile(np.reshape(ShkPrbsNext,(1,1,ShkCount)),(Mcount,aCount,1)) PermShkValsNext_tiled = np.tile(np.reshape(PermShkValsNext,(1,1,ShkCount)),(Mcount,aCount,1)) TranShkValsNext_tiled = np.tile(np.reshape(TranShkValsNext,(1,1,ShkCount)),(Mcount,aCount,1)) PermShkAggValsNext_tiled = np.tile(np.reshape(PermShkAggValsNext,(1,1,ShkCount)),(Mcount,aCount,1)) TranShkAggValsNext_tiled = np.tile(np.reshape(TranShkAggValsNext,(1,1,ShkCount)),(Mcount,aCount,1)) # Make a tiled grid of end-of-period aggregate assets. These lines use # next prd state j's aggregate saving rule to get a relevant set of Aagg, # which will be used to make an interpolated EndOfPrdvP_cond function. # After constructing these functions, we will use the aggregate saving # rule for *current* state i to get values of Aagg at which to evaluate # these conditional marginal value functions. In the strange, maybe even # impossible case where the aggregate saving rules differ wildly across # macro states *and* there is "anti-persistence", so that the macro state # is very likely to change each period, then this procedure will lead to # an inaccurate solution because the grid of Aagg values on which the # conditional marginal value functions are constructed is not relevant # to the values at which it will actually be evaluated. AaggGrid = AFunc[j](Mgrid) AaggNow_tiled = np.tile(np.reshape(AaggGrid,(Mcount,1,1)),(1,aCount,ShkCount)) # Calculate returns to capital and labor in the next period kNext_array = AaggNow_tiled/(PermGroFacAgg[j]*PermShkAggValsNext_tiled) # Next period's aggregate capital to labor ratio kNextEff_array = kNext_array/TranShkAggValsNext_tiled # Same thing, but account for *transitory* shock R_array = Rfunc(kNextEff_array) # Interest factor on aggregate assets Reff_array = R_array/LivPrb # Effective interest factor on individual assets *for survivors* wEff_array = wFunc(kNextEff_array)*TranShkAggValsNext_tiled # Effective wage rate (accounts for labor supply) PermShkTotal_array = PermGroFac*PermGroFacAgg[j]*PermShkValsNext_tiled*PermShkAggValsNext_tiled # total / combined permanent shock Mnext_array = kNext_array*R_array + wEff_array # next period's aggregate market resources # Find the natural borrowing constraint for each value of M in the Mgrid. # There is likely a faster way to do this, but someone needs to do the math: # is aNrmMin determined by getting the worst shock of all four types? aNrmMin_candidates = PermGroFac*PermGroFacAgg[j]*PermShkValsNext_tiled[:,0,:]*PermShkAggValsNext_tiled[:,0,:]/Reff_array[:,0,:]*\ (mNrmMinNext(Mnext_array[:,0,:]) - wEff_array[:,0,:]*TranShkValsNext_tiled[:,0,:]) aNrmMin_vec = np.max(aNrmMin_candidates,axis=1) BoroCnstNat_vec = aNrmMin_vec aNrmMin_tiled = np.tile(np.reshape(aNrmMin_vec,(Mcount,1,1)),(1,aCount,ShkCount)) aNrmNow_tiled = aNrmMin_tiled + aXtra_tiled # Calculate market resources next period (and a constant array of capital-to-labor ratio) mNrmNext_array = Reff_array*aNrmNow_tiled/PermShkTotal_array + TranShkValsNext_tiled*wEff_array # Find marginal value next period at every income shock realization and every aggregate market resource gridpoint vPnext_array = Reff_array*PermShkTotal_array**(-CRRA)*vPfuncNext(mNrmNext_array,Mnext_array) # Calculate expectated marginal value at the end of the period at every asset gridpoint EndOfPrdvP = DiscFac*LivPrb*np.sum(vPnext_array*ShkPrbsNext_tiled,axis=2) # Make the conditional end-of-period marginal value function BoroCnstNat = LinearInterp(np.insert(AaggGrid,0,0.0),np.insert(BoroCnstNat_vec,0,0.0)) EndOfPrdvPnvrs = np.concatenate((np.zeros((Mcount,1)),EndOfPrdvP**(-1./CRRA)),axis=1) EndOfPrdvPnvrsFunc_base = BilinearInterp(np.transpose(EndOfPrdvPnvrs),np.insert(aXtraGrid,0,0.0),AaggGrid) EndOfPrdvPnvrsFunc = VariableLowerBoundFunc2D(EndOfPrdvPnvrsFunc_base,BoroCnstNat) EndOfPrdvPfunc_cond.append(MargValueFunc2D(EndOfPrdvPnvrsFunc,CRRA)) BoroCnstNat_cond.append(BoroCnstNat) # Prepare some objects that are the same across all current states aXtra_tiled = np.tile(np.reshape(aXtraGrid,(1,aCount)),(Mcount,1)) cFuncCnst = BilinearInterp(np.array([[0.0,0.0],[1.0,1.0]]),np.array([BoroCnstArt,BoroCnstArt+1.0]),np.array([0.0,1.0])) # Now loop through *this* period's discrete states, calculating end-of-period # marginal value (weighting across state transitions), then construct consumption # and marginal value function for each state. cFuncNow = [] vPfuncNow = [] mNrmMinNow = [] for i in range(StateCount): # Find natural borrowing constraint for this state by Aagg AaggNow = AFunc[i](Mgrid) aNrmMin_candidates = np.zeros((StateCount,Mcount)) + np.nan for j in range(StateCount): if MrkvArray[i,j] > 0.: # Irrelevant if transition is impossible aNrmMin_candidates[j,:] = BoroCnstNat_cond[j](AaggNow) aNrmMin_vec = np.nanmax(aNrmMin_candidates,axis=0) BoroCnstNat_vec = aNrmMin_vec # Make tiled grids of aNrm and Aagg aNrmMin_tiled = np.tile(np.reshape(aNrmMin_vec,(Mcount,1)),(1,aCount)) aNrmNow_tiled = aNrmMin_tiled + aXtra_tiled AaggNow_tiled = np.tile(np.reshape(AaggNow,(Mcount,1)),(1,aCount)) # Loop through feasible transitions and calculate end-of-period marginal value EndOfPrdvP = np.zeros((Mcount,aCount)) for j in range(StateCount): if MrkvArray[i,j] > 0.: temp = EndOfPrdvPfunc_cond[j](aNrmNow_tiled,AaggNow_tiled) EndOfPrdvP += MrkvArray[i,j]*temp # Calculate consumption and the endogenous mNrm gridpoints for this state cNrmNow = EndOfPrdvP**(-1./CRRA) mNrmNow = aNrmNow_tiled + cNrmNow # Loop through the values in Mgrid and make a piecewise linear consumption function for each cFuncBaseByM_list = [] for n in range(Mcount): c_temp = np.insert(cNrmNow[n,:],0,0.0) # Add point at bottom m_temp = np.insert(mNrmNow[n,:] - BoroCnstNat_vec[n],0,0.0) cFuncBaseByM_list.append(LinearInterp(m_temp,c_temp)) # Add the M-specific consumption function to the list # Construct the unconstrained consumption function by combining the M-specific functions BoroCnstNat = LinearInterp(np.insert(Mgrid,0,0.0),np.insert(BoroCnstNat_vec,0,0.0)) cFuncBase = LinearInterpOnInterp1D(cFuncBaseByM_list,Mgrid) cFuncUnc = VariableLowerBoundFunc2D(cFuncBase,BoroCnstNat) # Combine the constrained consumption function with unconstrained component cFuncNow.append(LowerEnvelope2D(cFuncUnc,cFuncCnst)) # Make the minimum m function as the greater of the natural and artificial constraints mNrmMinNow.append(UpperEnvelope(BoroCnstNat,ConstantFunction(BoroCnstArt))) # Construct the marginal value function using the envelope condition vPfuncNow.append(MargValueFunc2D(cFuncNow[-1],CRRA)) # Pack up and return the solution solution_now = ConsumerSolution(cFunc=cFuncNow,vPfunc=vPfuncNow,mNrmMin=mNrmMinNow) return solution_now
[ "def", "solveConsAggMarkov", "(", "solution_next", ",", "IncomeDstn", ",", "LivPrb", ",", "DiscFac", ",", "CRRA", ",", "MrkvArray", ",", "PermGroFac", ",", "PermGroFacAgg", ",", "aXtraGrid", ",", "BoroCnstArt", ",", "Mgrid", ",", "AFunc", ",", "Rfunc", ",", "wFunc", ",", "DeprFac", ")", ":", "# Get sizes of grids", "aCount", "=", "aXtraGrid", ".", "size", "Mcount", "=", "Mgrid", ".", "size", "StateCount", "=", "MrkvArray", ".", "shape", "[", "0", "]", "# Loop through next period's states, assuming we reach each one at a time.", "# Construct EndOfPrdvP_cond functions for each state.", "EndOfPrdvPfunc_cond", "=", "[", "]", "BoroCnstNat_cond", "=", "[", "]", "for", "j", "in", "range", "(", "StateCount", ")", ":", "# Unpack next period's solution", "vPfuncNext", "=", "solution_next", ".", "vPfunc", "[", "j", "]", "mNrmMinNext", "=", "solution_next", ".", "mNrmMin", "[", "j", "]", "# Unpack the income shocks", "ShkPrbsNext", "=", "IncomeDstn", "[", "j", "]", "[", "0", "]", "PermShkValsNext", "=", "IncomeDstn", "[", "j", "]", "[", "1", "]", "TranShkValsNext", "=", "IncomeDstn", "[", "j", "]", "[", "2", "]", "PermShkAggValsNext", "=", "IncomeDstn", "[", "j", "]", "[", "3", "]", "TranShkAggValsNext", "=", "IncomeDstn", "[", "j", "]", "[", "4", "]", "ShkCount", "=", "ShkPrbsNext", ".", "size", "aXtra_tiled", "=", "np", ".", "tile", "(", "np", ".", "reshape", "(", "aXtraGrid", ",", "(", "1", ",", "aCount", ",", "1", ")", ")", ",", "(", "Mcount", ",", "1", ",", "ShkCount", ")", ")", "# Make tiled versions of the income shocks", "# Dimension order: Mnow, aNow, Shk", "ShkPrbsNext_tiled", "=", "np", ".", "tile", "(", "np", ".", "reshape", "(", "ShkPrbsNext", ",", "(", "1", ",", "1", ",", "ShkCount", ")", ")", ",", "(", "Mcount", ",", "aCount", ",", "1", ")", ")", "PermShkValsNext_tiled", "=", "np", ".", "tile", "(", "np", ".", "reshape", "(", "PermShkValsNext", ",", "(", "1", ",", "1", ",", "ShkCount", ")", ")", ",", "(", "Mcount", ",", "aCount", ",", "1", ")", ")", "TranShkValsNext_tiled", "=", "np", ".", "tile", "(", "np", ".", "reshape", "(", "TranShkValsNext", ",", "(", "1", ",", "1", ",", "ShkCount", ")", ")", ",", "(", "Mcount", ",", "aCount", ",", "1", ")", ")", "PermShkAggValsNext_tiled", "=", "np", ".", "tile", "(", "np", ".", "reshape", "(", "PermShkAggValsNext", ",", "(", "1", ",", "1", ",", "ShkCount", ")", ")", ",", "(", "Mcount", ",", "aCount", ",", "1", ")", ")", "TranShkAggValsNext_tiled", "=", "np", ".", "tile", "(", "np", ".", "reshape", "(", "TranShkAggValsNext", ",", "(", "1", ",", "1", ",", "ShkCount", ")", ")", ",", "(", "Mcount", ",", "aCount", ",", "1", ")", ")", "# Make a tiled grid of end-of-period aggregate assets. These lines use", "# next prd state j's aggregate saving rule to get a relevant set of Aagg,", "# which will be used to make an interpolated EndOfPrdvP_cond function.", "# After constructing these functions, we will use the aggregate saving", "# rule for *current* state i to get values of Aagg at which to evaluate", "# these conditional marginal value functions. In the strange, maybe even", "# impossible case where the aggregate saving rules differ wildly across", "# macro states *and* there is \"anti-persistence\", so that the macro state", "# is very likely to change each period, then this procedure will lead to", "# an inaccurate solution because the grid of Aagg values on which the", "# conditional marginal value functions are constructed is not relevant", "# to the values at which it will actually be evaluated.", "AaggGrid", "=", "AFunc", "[", "j", "]", "(", "Mgrid", ")", "AaggNow_tiled", "=", "np", ".", "tile", "(", "np", ".", "reshape", "(", "AaggGrid", ",", "(", "Mcount", ",", "1", ",", "1", ")", ")", ",", "(", "1", ",", "aCount", ",", "ShkCount", ")", ")", "# Calculate returns to capital and labor in the next period", "kNext_array", "=", "AaggNow_tiled", "/", "(", "PermGroFacAgg", "[", "j", "]", "*", "PermShkAggValsNext_tiled", ")", "# Next period's aggregate capital to labor ratio", "kNextEff_array", "=", "kNext_array", "/", "TranShkAggValsNext_tiled", "# Same thing, but account for *transitory* shock", "R_array", "=", "Rfunc", "(", "kNextEff_array", ")", "# Interest factor on aggregate assets", "Reff_array", "=", "R_array", "/", "LivPrb", "# Effective interest factor on individual assets *for survivors*", "wEff_array", "=", "wFunc", "(", "kNextEff_array", ")", "*", "TranShkAggValsNext_tiled", "# Effective wage rate (accounts for labor supply)", "PermShkTotal_array", "=", "PermGroFac", "*", "PermGroFacAgg", "[", "j", "]", "*", "PermShkValsNext_tiled", "*", "PermShkAggValsNext_tiled", "# total / combined permanent shock", "Mnext_array", "=", "kNext_array", "*", "R_array", "+", "wEff_array", "# next period's aggregate market resources", "# Find the natural borrowing constraint for each value of M in the Mgrid.", "# There is likely a faster way to do this, but someone needs to do the math:", "# is aNrmMin determined by getting the worst shock of all four types?", "aNrmMin_candidates", "=", "PermGroFac", "*", "PermGroFacAgg", "[", "j", "]", "*", "PermShkValsNext_tiled", "[", ":", ",", "0", ",", ":", "]", "*", "PermShkAggValsNext_tiled", "[", ":", ",", "0", ",", ":", "]", "/", "Reff_array", "[", ":", ",", "0", ",", ":", "]", "*", "(", "mNrmMinNext", "(", "Mnext_array", "[", ":", ",", "0", ",", ":", "]", ")", "-", "wEff_array", "[", ":", ",", "0", ",", ":", "]", "*", "TranShkValsNext_tiled", "[", ":", ",", "0", ",", ":", "]", ")", "aNrmMin_vec", "=", "np", ".", "max", "(", "aNrmMin_candidates", ",", "axis", "=", "1", ")", "BoroCnstNat_vec", "=", "aNrmMin_vec", "aNrmMin_tiled", "=", "np", ".", "tile", "(", "np", ".", "reshape", "(", "aNrmMin_vec", ",", "(", "Mcount", ",", "1", ",", "1", ")", ")", ",", "(", "1", ",", "aCount", ",", "ShkCount", ")", ")", "aNrmNow_tiled", "=", "aNrmMin_tiled", "+", "aXtra_tiled", "# Calculate market resources next period (and a constant array of capital-to-labor ratio)", "mNrmNext_array", "=", "Reff_array", "*", "aNrmNow_tiled", "/", "PermShkTotal_array", "+", "TranShkValsNext_tiled", "*", "wEff_array", "# Find marginal value next period at every income shock realization and every aggregate market resource gridpoint", "vPnext_array", "=", "Reff_array", "*", "PermShkTotal_array", "**", "(", "-", "CRRA", ")", "*", "vPfuncNext", "(", "mNrmNext_array", ",", "Mnext_array", ")", "# Calculate expectated marginal value at the end of the period at every asset gridpoint", "EndOfPrdvP", "=", "DiscFac", "*", "LivPrb", "*", "np", ".", "sum", "(", "vPnext_array", "*", "ShkPrbsNext_tiled", ",", "axis", "=", "2", ")", "# Make the conditional end-of-period marginal value function", "BoroCnstNat", "=", "LinearInterp", "(", "np", ".", "insert", "(", "AaggGrid", ",", "0", ",", "0.0", ")", ",", "np", ".", "insert", "(", "BoroCnstNat_vec", ",", "0", ",", "0.0", ")", ")", "EndOfPrdvPnvrs", "=", "np", ".", "concatenate", "(", "(", "np", ".", "zeros", "(", "(", "Mcount", ",", "1", ")", ")", ",", "EndOfPrdvP", "**", "(", "-", "1.", "/", "CRRA", ")", ")", ",", "axis", "=", "1", ")", "EndOfPrdvPnvrsFunc_base", "=", "BilinearInterp", "(", "np", ".", "transpose", "(", "EndOfPrdvPnvrs", ")", ",", "np", ".", "insert", "(", "aXtraGrid", ",", "0", ",", "0.0", ")", ",", "AaggGrid", ")", "EndOfPrdvPnvrsFunc", "=", "VariableLowerBoundFunc2D", "(", "EndOfPrdvPnvrsFunc_base", ",", "BoroCnstNat", ")", "EndOfPrdvPfunc_cond", ".", "append", "(", "MargValueFunc2D", "(", "EndOfPrdvPnvrsFunc", ",", "CRRA", ")", ")", "BoroCnstNat_cond", ".", "append", "(", "BoroCnstNat", ")", "# Prepare some objects that are the same across all current states", "aXtra_tiled", "=", "np", ".", "tile", "(", "np", ".", "reshape", "(", "aXtraGrid", ",", "(", "1", ",", "aCount", ")", ")", ",", "(", "Mcount", ",", "1", ")", ")", "cFuncCnst", "=", "BilinearInterp", "(", "np", ".", "array", "(", "[", "[", "0.0", ",", "0.0", "]", ",", "[", "1.0", ",", "1.0", "]", "]", ")", ",", "np", ".", "array", "(", "[", "BoroCnstArt", ",", "BoroCnstArt", "+", "1.0", "]", ")", ",", "np", ".", "array", "(", "[", "0.0", ",", "1.0", "]", ")", ")", "# Now loop through *this* period's discrete states, calculating end-of-period", "# marginal value (weighting across state transitions), then construct consumption", "# and marginal value function for each state.", "cFuncNow", "=", "[", "]", "vPfuncNow", "=", "[", "]", "mNrmMinNow", "=", "[", "]", "for", "i", "in", "range", "(", "StateCount", ")", ":", "# Find natural borrowing constraint for this state by Aagg", "AaggNow", "=", "AFunc", "[", "i", "]", "(", "Mgrid", ")", "aNrmMin_candidates", "=", "np", ".", "zeros", "(", "(", "StateCount", ",", "Mcount", ")", ")", "+", "np", ".", "nan", "for", "j", "in", "range", "(", "StateCount", ")", ":", "if", "MrkvArray", "[", "i", ",", "j", "]", ">", "0.", ":", "# Irrelevant if transition is impossible", "aNrmMin_candidates", "[", "j", ",", ":", "]", "=", "BoroCnstNat_cond", "[", "j", "]", "(", "AaggNow", ")", "aNrmMin_vec", "=", "np", ".", "nanmax", "(", "aNrmMin_candidates", ",", "axis", "=", "0", ")", "BoroCnstNat_vec", "=", "aNrmMin_vec", "# Make tiled grids of aNrm and Aagg", "aNrmMin_tiled", "=", "np", ".", "tile", "(", "np", ".", "reshape", "(", "aNrmMin_vec", ",", "(", "Mcount", ",", "1", ")", ")", ",", "(", "1", ",", "aCount", ")", ")", "aNrmNow_tiled", "=", "aNrmMin_tiled", "+", "aXtra_tiled", "AaggNow_tiled", "=", "np", ".", "tile", "(", "np", ".", "reshape", "(", "AaggNow", ",", "(", "Mcount", ",", "1", ")", ")", ",", "(", "1", ",", "aCount", ")", ")", "# Loop through feasible transitions and calculate end-of-period marginal value", "EndOfPrdvP", "=", "np", ".", "zeros", "(", "(", "Mcount", ",", "aCount", ")", ")", "for", "j", "in", "range", "(", "StateCount", ")", ":", "if", "MrkvArray", "[", "i", ",", "j", "]", ">", "0.", ":", "temp", "=", "EndOfPrdvPfunc_cond", "[", "j", "]", "(", "aNrmNow_tiled", ",", "AaggNow_tiled", ")", "EndOfPrdvP", "+=", "MrkvArray", "[", "i", ",", "j", "]", "*", "temp", "# Calculate consumption and the endogenous mNrm gridpoints for this state", "cNrmNow", "=", "EndOfPrdvP", "**", "(", "-", "1.", "/", "CRRA", ")", "mNrmNow", "=", "aNrmNow_tiled", "+", "cNrmNow", "# Loop through the values in Mgrid and make a piecewise linear consumption function for each", "cFuncBaseByM_list", "=", "[", "]", "for", "n", "in", "range", "(", "Mcount", ")", ":", "c_temp", "=", "np", ".", "insert", "(", "cNrmNow", "[", "n", ",", ":", "]", ",", "0", ",", "0.0", ")", "# Add point at bottom", "m_temp", "=", "np", ".", "insert", "(", "mNrmNow", "[", "n", ",", ":", "]", "-", "BoroCnstNat_vec", "[", "n", "]", ",", "0", ",", "0.0", ")", "cFuncBaseByM_list", ".", "append", "(", "LinearInterp", "(", "m_temp", ",", "c_temp", ")", ")", "# Add the M-specific consumption function to the list", "# Construct the unconstrained consumption function by combining the M-specific functions", "BoroCnstNat", "=", "LinearInterp", "(", "np", ".", "insert", "(", "Mgrid", ",", "0", ",", "0.0", ")", ",", "np", ".", "insert", "(", "BoroCnstNat_vec", ",", "0", ",", "0.0", ")", ")", "cFuncBase", "=", "LinearInterpOnInterp1D", "(", "cFuncBaseByM_list", ",", "Mgrid", ")", "cFuncUnc", "=", "VariableLowerBoundFunc2D", "(", "cFuncBase", ",", "BoroCnstNat", ")", "# Combine the constrained consumption function with unconstrained component", "cFuncNow", ".", "append", "(", "LowerEnvelope2D", "(", "cFuncUnc", ",", "cFuncCnst", ")", ")", "# Make the minimum m function as the greater of the natural and artificial constraints", "mNrmMinNow", ".", "append", "(", "UpperEnvelope", "(", "BoroCnstNat", ",", "ConstantFunction", "(", "BoroCnstArt", ")", ")", ")", "# Construct the marginal value function using the envelope condition", "vPfuncNow", ".", "append", "(", "MargValueFunc2D", "(", "cFuncNow", "[", "-", "1", "]", ",", "CRRA", ")", ")", "# Pack up and return the solution", "solution_now", "=", "ConsumerSolution", "(", "cFunc", "=", "cFuncNow", ",", "vPfunc", "=", "vPfuncNow", ",", "mNrmMin", "=", "mNrmMinNow", ")", "return", "solution_now" ]
Solve one period of a consumption-saving problem with idiosyncratic and aggregate shocks (transitory and permanent). Moreover, the macroeconomic state follows a Markov process that determines the income distribution and aggregate permanent growth factor. This is a basic solver that can't handle cubic splines, nor can it calculate a value function. Parameters ---------- solution_next : ConsumerSolution The solution to the succeeding one period problem. IncomeDstn : [[np.array]] A list of lists, each containing five arrays of floats, representing a discrete approximation to the income process between the period being solved and the one immediately following (in solution_next). Order: event probabilities, idisyncratic permanent shocks, idiosyncratic transitory shocks, aggregate permanent shocks, aggregate transitory shocks. LivPrb : float Survival probability; likelihood of being alive at the beginning of the succeeding period. DiscFac : float Intertemporal discount factor for future utility. CRRA : float Coefficient of relative risk aversion. MrkvArray : np.array Markov transition matrix between discrete macroeconomic states. MrkvArray[i,j] is probability of being in state j next period conditional on being in state i this period. PermGroFac : float Expected permanent income growth factor at the end of this period, for the *individual*'s productivity. PermGroFacAgg : [float] Expected aggregate productivity growth in each Markov macro state. aXtraGrid : np.array Array of "extra" end-of-period asset values-- assets above the absolute minimum acceptable level. BoroCnstArt : float Artificial borrowing constraint; minimum allowable end-of-period asset-to- permanent-income ratio. Unlike other models, this *can't* be None. Mgrid : np.array A grid of aggregate market resourses to permanent income in the economy. AFunc : [function] Aggregate savings as a function of aggregate market resources, for each Markov macro state. Rfunc : function The net interest factor on assets as a function of capital ratio k. wFunc : function The wage rate for labor as a function of capital-to-labor ratio k. DeprFac : float Capital Depreciation Rate Returns ------- solution_now : ConsumerSolution The solution to the single period consumption-saving problem. Includes a consumption function cFunc (linear interpolation over linear interpola- tions) and marginal value function vPfunc.
[ "Solve", "one", "period", "of", "a", "consumption", "-", "saving", "problem", "with", "idiosyncratic", "and", "aggregate", "shocks", "(", "transitory", "and", "permanent", ")", ".", "Moreover", "the", "macroeconomic", "state", "follows", "a", "Markov", "process", "that", "determines", "the", "income", "distribution", "and", "aggregate", "permanent", "growth", "factor", ".", "This", "is", "a", "basic", "solver", "that", "can", "t", "handle", "cubic", "splines", "nor", "can", "it", "calculate", "a", "value", "function", "." ]
python
train
54.892157
dh1tw/pyhamtools
pyhamtools/lookuplib.py
https://github.com/dh1tw/pyhamtools/blob/ee7e4b8732e23c298da10e07163748156c16d0fa/pyhamtools/lookuplib.py#L627-L673
def lookup_zone_exception(self, callsign, timestamp=datetime.utcnow().replace(tzinfo=UTC)): """ Returns a CQ Zone if an exception exists for the given callsign Args: callsign (string): Amateur radio callsign timestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC) Returns: int: Value of the the CQ Zone exception which exists for this callsign (at the given time) Raises: KeyError: No matching callsign found APIKeyMissingError: API Key for Clublog missing or incorrect Example: The following code checks the Clublog XML database if a CQ Zone exception exists for the callsign DP0GVN. >>> from pyhamtools import LookupLib >>> my_lookuplib = LookupLib(lookuptype="clublogxml", apikey="myapikey") >>> print my_lookuplib.lookup_zone_exception("DP0GVN") 38 The prefix "DP" It is assigned to Germany, but the station is located in Antarctica, and therefore in CQ Zone 38 Note: This method is available for - clublogxml - redis """ callsign = callsign.strip().upper() if self._lookuptype == "clublogxml": return self._check_zone_exception_for_date(callsign, timestamp, self._zone_exceptions, self._zone_exceptions_index) elif self._lookuptype == "redis": data_dict, index = self._get_dicts_from_redis("_zone_ex_", "_zone_ex_index_", self._redis_prefix, callsign) return self._check_zone_exception_for_date(callsign, timestamp, data_dict, index) #no matching case raise KeyError
[ "def", "lookup_zone_exception", "(", "self", ",", "callsign", ",", "timestamp", "=", "datetime", ".", "utcnow", "(", ")", ".", "replace", "(", "tzinfo", "=", "UTC", ")", ")", ":", "callsign", "=", "callsign", ".", "strip", "(", ")", ".", "upper", "(", ")", "if", "self", ".", "_lookuptype", "==", "\"clublogxml\"", ":", "return", "self", ".", "_check_zone_exception_for_date", "(", "callsign", ",", "timestamp", ",", "self", ".", "_zone_exceptions", ",", "self", ".", "_zone_exceptions_index", ")", "elif", "self", ".", "_lookuptype", "==", "\"redis\"", ":", "data_dict", ",", "index", "=", "self", ".", "_get_dicts_from_redis", "(", "\"_zone_ex_\"", ",", "\"_zone_ex_index_\"", ",", "self", ".", "_redis_prefix", ",", "callsign", ")", "return", "self", ".", "_check_zone_exception_for_date", "(", "callsign", ",", "timestamp", ",", "data_dict", ",", "index", ")", "#no matching case", "raise", "KeyError" ]
Returns a CQ Zone if an exception exists for the given callsign Args: callsign (string): Amateur radio callsign timestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC) Returns: int: Value of the the CQ Zone exception which exists for this callsign (at the given time) Raises: KeyError: No matching callsign found APIKeyMissingError: API Key for Clublog missing or incorrect Example: The following code checks the Clublog XML database if a CQ Zone exception exists for the callsign DP0GVN. >>> from pyhamtools import LookupLib >>> my_lookuplib = LookupLib(lookuptype="clublogxml", apikey="myapikey") >>> print my_lookuplib.lookup_zone_exception("DP0GVN") 38 The prefix "DP" It is assigned to Germany, but the station is located in Antarctica, and therefore in CQ Zone 38 Note: This method is available for - clublogxml - redis
[ "Returns", "a", "CQ", "Zone", "if", "an", "exception", "exists", "for", "the", "given", "callsign" ]
python
train
35.212766
s1s1ty/py-jsonq
pyjsonq/query.py
https://github.com/s1s1ty/py-jsonq/blob/9625597a2578bddcbed4e540174d5253b1fc3b75/pyjsonq/query.py#L32-L44
def __parse_json_data(self, data): """Process Json data :@param data :@type data: json/dict :throws TypeError """ if isinstance(data, dict) or isinstance(data, list): self._raw_data = data self._json_data = copy.deepcopy(self._raw_data) else: raise TypeError("Provided Data is not json")
[ "def", "__parse_json_data", "(", "self", ",", "data", ")", ":", "if", "isinstance", "(", "data", ",", "dict", ")", "or", "isinstance", "(", "data", ",", "list", ")", ":", "self", ".", "_raw_data", "=", "data", "self", ".", "_json_data", "=", "copy", ".", "deepcopy", "(", "self", ".", "_raw_data", ")", "else", ":", "raise", "TypeError", "(", "\"Provided Data is not json\"", ")" ]
Process Json data :@param data :@type data: json/dict :throws TypeError
[ "Process", "Json", "data" ]
python
train
28.384615
usc-isi-i2/etk
etk/extractors/dbpedia_spotlight_extractor.py
https://github.com/usc-isi-i2/etk/blob/aab077c984ea20f5e8ae33af622fe11d3c4df866/etk/extractors/dbpedia_spotlight_extractor.py#L35-L58
def extract(self, text: str, confidence=0.5, filter=['Person', 'Place', 'Organisation']) -> List[Extraction]: """ Extract with the input text, confidence and fields filter to be used. Args: text (str): text input to be annotated confidence (float): the confidence of the annotation filter (List[str]): the fields that to be extracted Returns: List[Extraction] """ filter = ','.join(filter) search_data = [('confidence', confidence), ('text', text), ('types', filter)] search_headers = {'Accept': 'application/json'} r = requests.post(self._search_url, data=search_data, headers=search_headers) results = r.json() last_results = self._combiner(results) return last_results
[ "def", "extract", "(", "self", ",", "text", ":", "str", ",", "confidence", "=", "0.5", ",", "filter", "=", "[", "'Person'", ",", "'Place'", ",", "'Organisation'", "]", ")", "->", "List", "[", "Extraction", "]", ":", "filter", "=", "','", ".", "join", "(", "filter", ")", "search_data", "=", "[", "(", "'confidence'", ",", "confidence", ")", ",", "(", "'text'", ",", "text", ")", ",", "(", "'types'", ",", "filter", ")", "]", "search_headers", "=", "{", "'Accept'", ":", "'application/json'", "}", "r", "=", "requests", ".", "post", "(", "self", ".", "_search_url", ",", "data", "=", "search_data", ",", "headers", "=", "search_headers", ")", "results", "=", "r", ".", "json", "(", ")", "last_results", "=", "self", ".", "_combiner", "(", "results", ")", "return", "last_results" ]
Extract with the input text, confidence and fields filter to be used. Args: text (str): text input to be annotated confidence (float): the confidence of the annotation filter (List[str]): the fields that to be extracted Returns: List[Extraction]
[ "Extract", "with", "the", "input", "text", "confidence", "and", "fields", "filter", "to", "be", "used", ".", "Args", ":", "text", "(", "str", ")", ":", "text", "input", "to", "be", "annotated", "confidence", "(", "float", ")", ":", "the", "confidence", "of", "the", "annotation", "filter", "(", "List", "[", "str", "]", ")", ":", "the", "fields", "that", "to", "be", "extracted" ]
python
train
38.375
iotile/coretools
iotilecore/iotile/core/hw/transport/adapter/legacy.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilecore/iotile/core/hw/transport/adapter/legacy.py#L384-L413
def debug_sync(self, conn_id, cmd_name, cmd_args, progress_callback): """Asynchronously complete a named debug command. The command name and arguments are passed to the underlying device adapter and interpreted there. If the command is long running, progress_callback may be used to provide status updates. Callback is called when the command has finished. Args: conn_id (int): A unique identifier that will refer to this connection cmd_name (string): the name of the debug command we want to invoke cmd_args (dict): any arguments that we want to send with this command. progress_callback (callable): A function to be called with status on our progress, called as: progress_callback(done_count, total_count) """ done = threading.Event() result = {} def _debug_done(conn_id, adapter_id, success, retval, reason): result['success'] = success result['failure_reason'] = reason result['return_value'] = retval done.set() self.debug_async(conn_id, cmd_name, cmd_args, progress_callback, _debug_done) done.wait() return result
[ "def", "debug_sync", "(", "self", ",", "conn_id", ",", "cmd_name", ",", "cmd_args", ",", "progress_callback", ")", ":", "done", "=", "threading", ".", "Event", "(", ")", "result", "=", "{", "}", "def", "_debug_done", "(", "conn_id", ",", "adapter_id", ",", "success", ",", "retval", ",", "reason", ")", ":", "result", "[", "'success'", "]", "=", "success", "result", "[", "'failure_reason'", "]", "=", "reason", "result", "[", "'return_value'", "]", "=", "retval", "done", ".", "set", "(", ")", "self", ".", "debug_async", "(", "conn_id", ",", "cmd_name", ",", "cmd_args", ",", "progress_callback", ",", "_debug_done", ")", "done", ".", "wait", "(", ")", "return", "result" ]
Asynchronously complete a named debug command. The command name and arguments are passed to the underlying device adapter and interpreted there. If the command is long running, progress_callback may be used to provide status updates. Callback is called when the command has finished. Args: conn_id (int): A unique identifier that will refer to this connection cmd_name (string): the name of the debug command we want to invoke cmd_args (dict): any arguments that we want to send with this command. progress_callback (callable): A function to be called with status on our progress, called as: progress_callback(done_count, total_count)
[ "Asynchronously", "complete", "a", "named", "debug", "command", "." ]
python
train
40.533333
pantsbuild/pants
src/python/pants/base/worker_pool.py
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/base/worker_pool.py#L57-L80
def submit_async_work(self, work, workunit_parent=None, on_success=None, on_failure=None): """Submit work to be executed in the background. :param work: The work to execute. :param workunit_parent: If specified, work is accounted for under this workunit. :param on_success: If specified, a callable taking a single argument, which will be a list of return values of each invocation, in order. Called only if all work succeeded. :param on_failure: If specified, a callable taking a single argument, which is an exception thrown in the work. :return: `multiprocessing.pool.MapResult` Don't do work in on_success: not only will it block the result handling thread, but that thread is not a worker and doesn't have a logging context etc. Use it just to submit further work to the pool. """ if work is None or len(work.args_tuples) == 0: # map_async hangs on 0-length iterables. if on_success: on_success([]) else: def do_work(*args): self._do_work(work.func, *args, workunit_name=work.workunit_name, workunit_parent=workunit_parent, on_failure=on_failure) return self._pool.map_async(do_work, work.args_tuples, chunksize=1, callback=on_success)
[ "def", "submit_async_work", "(", "self", ",", "work", ",", "workunit_parent", "=", "None", ",", "on_success", "=", "None", ",", "on_failure", "=", "None", ")", ":", "if", "work", "is", "None", "or", "len", "(", "work", ".", "args_tuples", ")", "==", "0", ":", "# map_async hangs on 0-length iterables.", "if", "on_success", ":", "on_success", "(", "[", "]", ")", "else", ":", "def", "do_work", "(", "*", "args", ")", ":", "self", ".", "_do_work", "(", "work", ".", "func", ",", "*", "args", ",", "workunit_name", "=", "work", ".", "workunit_name", ",", "workunit_parent", "=", "workunit_parent", ",", "on_failure", "=", "on_failure", ")", "return", "self", ".", "_pool", ".", "map_async", "(", "do_work", ",", "work", ".", "args_tuples", ",", "chunksize", "=", "1", ",", "callback", "=", "on_success", ")" ]
Submit work to be executed in the background. :param work: The work to execute. :param workunit_parent: If specified, work is accounted for under this workunit. :param on_success: If specified, a callable taking a single argument, which will be a list of return values of each invocation, in order. Called only if all work succeeded. :param on_failure: If specified, a callable taking a single argument, which is an exception thrown in the work. :return: `multiprocessing.pool.MapResult` Don't do work in on_success: not only will it block the result handling thread, but that thread is not a worker and doesn't have a logging context etc. Use it just to submit further work to the pool.
[ "Submit", "work", "to", "be", "executed", "in", "the", "background", "." ]
python
train
52.541667
apple/turicreate
src/unity/python/turicreate/toolkits/topic_model/topic_model.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/topic_model/topic_model.py#L303-L338
def _get_summary_struct(self): """ Returns a structured description of the model, including (where relevant) the schema of the training data, description of the training data, training statistics, and model hyperparameters. Returns ------- sections : list (of list of tuples) A list of summary sections. Each section is a list. Each item in a section list is a tuple of the form: ('<label>','<field>') section_titles: list A list of section titles. The order matches that of the 'sections' object. """ section_titles=['Schema','Settings'] vocab_length = len(self.vocabulary) verbose = self.verbose == 1 sections=[ [ ('Vocabulary Size',_precomputed_field(vocab_length)) ], [ ('Number of Topics', 'num_topics'), ('alpha','alpha'), ('beta','beta'), ('Iterations', 'num_iterations'), ('Training time', 'training_time'), ('Verbose', _precomputed_field(verbose)) ] ] return (sections, section_titles)
[ "def", "_get_summary_struct", "(", "self", ")", ":", "section_titles", "=", "[", "'Schema'", ",", "'Settings'", "]", "vocab_length", "=", "len", "(", "self", ".", "vocabulary", ")", "verbose", "=", "self", ".", "verbose", "==", "1", "sections", "=", "[", "[", "(", "'Vocabulary Size'", ",", "_precomputed_field", "(", "vocab_length", ")", ")", "]", ",", "[", "(", "'Number of Topics'", ",", "'num_topics'", ")", ",", "(", "'alpha'", ",", "'alpha'", ")", ",", "(", "'beta'", ",", "'beta'", ")", ",", "(", "'Iterations'", ",", "'num_iterations'", ")", ",", "(", "'Training time'", ",", "'training_time'", ")", ",", "(", "'Verbose'", ",", "_precomputed_field", "(", "verbose", ")", ")", "]", "]", "return", "(", "sections", ",", "section_titles", ")" ]
Returns a structured description of the model, including (where relevant) the schema of the training data, description of the training data, training statistics, and model hyperparameters. Returns ------- sections : list (of list of tuples) A list of summary sections. Each section is a list. Each item in a section list is a tuple of the form: ('<label>','<field>') section_titles: list A list of section titles. The order matches that of the 'sections' object.
[ "Returns", "a", "structured", "description", "of", "the", "model", "including", "(", "where", "relevant", ")", "the", "schema", "of", "the", "training", "data", "description", "of", "the", "training", "data", "training", "statistics", "and", "model", "hyperparameters", "." ]
python
train
36.777778
phoebe-project/phoebe2
phoebe/backend/backends.py
https://github.com/phoebe-project/phoebe2/blob/e64b8be683977064e2d55dd1b3ac400f64c3e379/phoebe/backend/backends.py#L404-L427
def run(self, b, compute, times=[], **kwargs): """ if within mpirun, workers should call _run_worker instead of run """ self.run_checks(b, compute, times, **kwargs) logger.debug("rank:{}/{} calling get_packet_and_syns".format(mpi.myrank, mpi.nprocs)) packet, new_syns = self.get_packet_and_syns(b, compute, times, **kwargs) if mpi.enabled: # broadcast the packet to ALL workers mpi.comm.bcast(packet, root=0) # now even the master can become a worker and take on a chunk packet['b'] = b rpacketlists = self._run_chunk(**packet) # now receive all packetlists rpacketlists_per_worker = mpi.comm.gather(rpacketlists, root=0) else: rpacketlists_per_worker = [self._run_chunk(**packet)] return self._fill_syns(new_syns, rpacketlists_per_worker)
[ "def", "run", "(", "self", ",", "b", ",", "compute", ",", "times", "=", "[", "]", ",", "*", "*", "kwargs", ")", ":", "self", ".", "run_checks", "(", "b", ",", "compute", ",", "times", ",", "*", "*", "kwargs", ")", "logger", ".", "debug", "(", "\"rank:{}/{} calling get_packet_and_syns\"", ".", "format", "(", "mpi", ".", "myrank", ",", "mpi", ".", "nprocs", ")", ")", "packet", ",", "new_syns", "=", "self", ".", "get_packet_and_syns", "(", "b", ",", "compute", ",", "times", ",", "*", "*", "kwargs", ")", "if", "mpi", ".", "enabled", ":", "# broadcast the packet to ALL workers", "mpi", ".", "comm", ".", "bcast", "(", "packet", ",", "root", "=", "0", ")", "# now even the master can become a worker and take on a chunk", "packet", "[", "'b'", "]", "=", "b", "rpacketlists", "=", "self", ".", "_run_chunk", "(", "*", "*", "packet", ")", "# now receive all packetlists", "rpacketlists_per_worker", "=", "mpi", ".", "comm", ".", "gather", "(", "rpacketlists", ",", "root", "=", "0", ")", "else", ":", "rpacketlists_per_worker", "=", "[", "self", ".", "_run_chunk", "(", "*", "*", "packet", ")", "]", "return", "self", ".", "_fill_syns", "(", "new_syns", ",", "rpacketlists_per_worker", ")" ]
if within mpirun, workers should call _run_worker instead of run
[ "if", "within", "mpirun", "workers", "should", "call", "_run_worker", "instead", "of", "run" ]
python
train
37.083333
garyelephant/pygrok
pygrok/pygrok.py
https://github.com/garyelephant/pygrok/blob/de9e3f92f5a52f0fc101aaa0f694f52aee6afba8/pygrok/pygrok.py#L33-L57
def match(self, text): """If text is matched with pattern, return variable names specified(%{pattern:variable name}) in pattern and their corresponding values.If not matched, return None. custom patterns can be passed in by custom_patterns(pattern name, pattern regular expression pair) or custom_patterns_dir. """ match_obj = None if self.fullmatch: match_obj = self.regex_obj.fullmatch(text) else: match_obj = self.regex_obj.search(text) if match_obj == None: return None matches = match_obj.groupdict() for key,match in matches.items(): try: if self.type_mapper[key] == 'int': matches[key] = int(match) if self.type_mapper[key] == 'float': matches[key] = float(match) except (TypeError, KeyError) as e: pass return matches
[ "def", "match", "(", "self", ",", "text", ")", ":", "match_obj", "=", "None", "if", "self", ".", "fullmatch", ":", "match_obj", "=", "self", ".", "regex_obj", ".", "fullmatch", "(", "text", ")", "else", ":", "match_obj", "=", "self", ".", "regex_obj", ".", "search", "(", "text", ")", "if", "match_obj", "==", "None", ":", "return", "None", "matches", "=", "match_obj", ".", "groupdict", "(", ")", "for", "key", ",", "match", "in", "matches", ".", "items", "(", ")", ":", "try", ":", "if", "self", ".", "type_mapper", "[", "key", "]", "==", "'int'", ":", "matches", "[", "key", "]", "=", "int", "(", "match", ")", "if", "self", ".", "type_mapper", "[", "key", "]", "==", "'float'", ":", "matches", "[", "key", "]", "=", "float", "(", "match", ")", "except", "(", "TypeError", ",", "KeyError", ")", "as", "e", ":", "pass", "return", "matches" ]
If text is matched with pattern, return variable names specified(%{pattern:variable name}) in pattern and their corresponding values.If not matched, return None. custom patterns can be passed in by custom_patterns(pattern name, pattern regular expression pair) or custom_patterns_dir.
[ "If", "text", "is", "matched", "with", "pattern", "return", "variable", "names", "specified", "(", "%", "{", "pattern", ":", "variable", "name", "}", ")", "in", "pattern", "and", "their", "corresponding", "values", ".", "If", "not", "matched", "return", "None", ".", "custom", "patterns", "can", "be", "passed", "in", "by", "custom_patterns", "(", "pattern", "name", "pattern", "regular", "expression", "pair", ")", "or", "custom_patterns_dir", "." ]
python
test
37.88
pantsbuild/pants
src/python/pants/backend/jvm/targets/jarable.py
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/backend/jvm/targets/jarable.py#L32-L43
def get_artifact_info(self): """Returns a tuple composed of a :class:`pants.java.jar.JarDependency` describing the jar for this target and a bool indicating if this target is exportable. """ exported = bool(self.provides) org = self.provides.org if exported else 'internal' name = self.provides.name if exported else self.identifier # TODO(John Sirois): This should return something less than a JarDependency encapsulating just # the org and name. Perhaps a JarFamily? return JarDependency(org=org, name=name, rev=None), exported
[ "def", "get_artifact_info", "(", "self", ")", ":", "exported", "=", "bool", "(", "self", ".", "provides", ")", "org", "=", "self", ".", "provides", ".", "org", "if", "exported", "else", "'internal'", "name", "=", "self", ".", "provides", ".", "name", "if", "exported", "else", "self", ".", "identifier", "# TODO(John Sirois): This should return something less than a JarDependency encapsulating just", "# the org and name. Perhaps a JarFamily?", "return", "JarDependency", "(", "org", "=", "org", ",", "name", "=", "name", ",", "rev", "=", "None", ")", ",", "exported" ]
Returns a tuple composed of a :class:`pants.java.jar.JarDependency` describing the jar for this target and a bool indicating if this target is exportable.
[ "Returns", "a", "tuple", "composed", "of", "a", ":", "class", ":", "pants", ".", "java", ".", "jar", ".", "JarDependency", "describing", "the", "jar", "for", "this", "target", "and", "a", "bool", "indicating", "if", "this", "target", "is", "exportable", "." ]
python
train
46.416667
sethmlarson/trytravis
trytravis.py
https://github.com/sethmlarson/trytravis/blob/d92ed708fe71d8db93a6df8077d23ee39ec0364e/trytravis.py#L128-L138
def _load_github_repo(): """ Loads the GitHub repository from the users config. """ if 'TRAVIS' in os.environ: raise RuntimeError('Detected that we are running in Travis. ' 'Stopping to prevent infinite loops.') try: with open(os.path.join(config_dir, 'repo'), 'r') as f: return f.read() except (OSError, IOError): raise RuntimeError('Could not find your repository. ' 'Have you ran `trytravis --repo`?')
[ "def", "_load_github_repo", "(", ")", ":", "if", "'TRAVIS'", "in", "os", ".", "environ", ":", "raise", "RuntimeError", "(", "'Detected that we are running in Travis. '", "'Stopping to prevent infinite loops.'", ")", "try", ":", "with", "open", "(", "os", ".", "path", ".", "join", "(", "config_dir", ",", "'repo'", ")", ",", "'r'", ")", "as", "f", ":", "return", "f", ".", "read", "(", ")", "except", "(", "OSError", ",", "IOError", ")", ":", "raise", "RuntimeError", "(", "'Could not find your repository. '", "'Have you ran `trytravis --repo`?'", ")" ]
Loads the GitHub repository from the users config.
[ "Loads", "the", "GitHub", "repository", "from", "the", "users", "config", "." ]
python
train
45.454545
cons3rt/pycons3rt
pycons3rt/awsapi/ec2util.py
https://github.com/cons3rt/pycons3rt/blob/f004ab3a35c5bff2f698131fef3b2a8ed5a7596d/pycons3rt/awsapi/ec2util.py#L256-L311
def allocate_elastic_ip(self): """Allocates an elastic IP address :return: Dict with allocation ID and Public IP that were created :raises: AWSAPIError, EC2UtilError """ log = logging.getLogger(self.cls_logger + '.allocate_elastic_ip') # Attempt to allocate a new elastic IP log.info('Attempting to allocate an elastic IP...') try: response = self.client.allocate_address( DryRun=False, Domain='vpc' ) except ClientError: _, ex, trace = sys.exc_info() msg = 'Unable to allocate a new elastic IP address\n{e}'.format(e=str(ex)) log.error(msg) raise AWSAPIError, msg, trace allocation_id = response['AllocationId'] public_ip = response['PublicIp'] log.info('Allocated Elastic IP with ID {a} and Public IP address {p}'. format(a=allocation_id, p=public_ip)) # Verify the Address was allocated successfully log.info('Verifying the elastic IP address was allocated and is available ' 'for use...') ready = False verification_timer = [2]*60 + [5]*60 + [10]*18 num_checks = len(verification_timer) for i in range(0, num_checks): wait_time = verification_timer[i] try: self.client.describe_addresses( DryRun=False, AllocationIds=[allocation_id] ) except ClientError: _, ex, trace = sys.exc_info() log.info('Elastic IP address {p} with Allocation ID {a} is not available for use, trying again in ' '{w} sec...\n{e}'.format(p=public_ip, a=allocation_id, w=wait_time, e=str(ex))) time.sleep(wait_time) else: log.info('Elastic IP {p} with Allocation ID {a} is available for use'.format( p=public_ip, a=allocation_id)) ready = True break if ready: return {'AllocationId': allocation_id, 'PublicIp': public_ip} else: msg = 'Unable to verify existence of new Elastic IP {p} with Allocation ID: {a}'. \ format(p=public_ip, a=allocation_id) log.error(msg) raise EC2UtilError(msg)
[ "def", "allocate_elastic_ip", "(", "self", ")", ":", "log", "=", "logging", ".", "getLogger", "(", "self", ".", "cls_logger", "+", "'.allocate_elastic_ip'", ")", "# Attempt to allocate a new elastic IP", "log", ".", "info", "(", "'Attempting to allocate an elastic IP...'", ")", "try", ":", "response", "=", "self", ".", "client", ".", "allocate_address", "(", "DryRun", "=", "False", ",", "Domain", "=", "'vpc'", ")", "except", "ClientError", ":", "_", ",", "ex", ",", "trace", "=", "sys", ".", "exc_info", "(", ")", "msg", "=", "'Unable to allocate a new elastic IP address\\n{e}'", ".", "format", "(", "e", "=", "str", "(", "ex", ")", ")", "log", ".", "error", "(", "msg", ")", "raise", "AWSAPIError", ",", "msg", ",", "trace", "allocation_id", "=", "response", "[", "'AllocationId'", "]", "public_ip", "=", "response", "[", "'PublicIp'", "]", "log", ".", "info", "(", "'Allocated Elastic IP with ID {a} and Public IP address {p}'", ".", "format", "(", "a", "=", "allocation_id", ",", "p", "=", "public_ip", ")", ")", "# Verify the Address was allocated successfully", "log", ".", "info", "(", "'Verifying the elastic IP address was allocated and is available '", "'for use...'", ")", "ready", "=", "False", "verification_timer", "=", "[", "2", "]", "*", "60", "+", "[", "5", "]", "*", "60", "+", "[", "10", "]", "*", "18", "num_checks", "=", "len", "(", "verification_timer", ")", "for", "i", "in", "range", "(", "0", ",", "num_checks", ")", ":", "wait_time", "=", "verification_timer", "[", "i", "]", "try", ":", "self", ".", "client", ".", "describe_addresses", "(", "DryRun", "=", "False", ",", "AllocationIds", "=", "[", "allocation_id", "]", ")", "except", "ClientError", ":", "_", ",", "ex", ",", "trace", "=", "sys", ".", "exc_info", "(", ")", "log", ".", "info", "(", "'Elastic IP address {p} with Allocation ID {a} is not available for use, trying again in '", "'{w} sec...\\n{e}'", ".", "format", "(", "p", "=", "public_ip", ",", "a", "=", "allocation_id", ",", "w", "=", "wait_time", ",", "e", "=", "str", "(", "ex", ")", ")", ")", "time", ".", "sleep", "(", "wait_time", ")", "else", ":", "log", ".", "info", "(", "'Elastic IP {p} with Allocation ID {a} is available for use'", ".", "format", "(", "p", "=", "public_ip", ",", "a", "=", "allocation_id", ")", ")", "ready", "=", "True", "break", "if", "ready", ":", "return", "{", "'AllocationId'", ":", "allocation_id", ",", "'PublicIp'", ":", "public_ip", "}", "else", ":", "msg", "=", "'Unable to verify existence of new Elastic IP {p} with Allocation ID: {a}'", ".", "format", "(", "p", "=", "public_ip", ",", "a", "=", "allocation_id", ")", "log", ".", "error", "(", "msg", ")", "raise", "EC2UtilError", "(", "msg", ")" ]
Allocates an elastic IP address :return: Dict with allocation ID and Public IP that were created :raises: AWSAPIError, EC2UtilError
[ "Allocates", "an", "elastic", "IP", "address" ]
python
train
41.964286
swistakm/graceful
src/graceful/serializers.py
https://github.com/swistakm/graceful/blob/d4678cb6349a5c843a5e58002fc80140821609e4/src/graceful/serializers.py#L41-L66
def _get_fields(mcs, bases, namespace): """Create fields dictionary to be used in resource class namespace. Pop all field objects from attributes dict (namespace) and store them under _field_storage_key atrribute. Also collect all fields from base classes in order that ensures fields can be overriden. Args: bases: all base classes of created serializer class namespace (dict): namespace as dictionary of attributes """ fields = [ (name, namespace.pop(name)) for name, attribute in list(namespace.items()) if isinstance(attribute, BaseField) ] for base in reversed(bases): if hasattr(base, mcs._fields_storage_key): fields = list( getattr(base, mcs._fields_storage_key).items() ) + fields return OrderedDict(fields)
[ "def", "_get_fields", "(", "mcs", ",", "bases", ",", "namespace", ")", ":", "fields", "=", "[", "(", "name", ",", "namespace", ".", "pop", "(", "name", ")", ")", "for", "name", ",", "attribute", "in", "list", "(", "namespace", ".", "items", "(", ")", ")", "if", "isinstance", "(", "attribute", ",", "BaseField", ")", "]", "for", "base", "in", "reversed", "(", "bases", ")", ":", "if", "hasattr", "(", "base", ",", "mcs", ".", "_fields_storage_key", ")", ":", "fields", "=", "list", "(", "getattr", "(", "base", ",", "mcs", ".", "_fields_storage_key", ")", ".", "items", "(", ")", ")", "+", "fields", "return", "OrderedDict", "(", "fields", ")" ]
Create fields dictionary to be used in resource class namespace. Pop all field objects from attributes dict (namespace) and store them under _field_storage_key atrribute. Also collect all fields from base classes in order that ensures fields can be overriden. Args: bases: all base classes of created serializer class namespace (dict): namespace as dictionary of attributes
[ "Create", "fields", "dictionary", "to", "be", "used", "in", "resource", "class", "namespace", "." ]
python
train
35.076923
aio-libs/aiomcache
aiomcache/client.py
https://github.com/aio-libs/aiomcache/blob/75d44b201aea91bc2856b10940922d5ebfbfcd7b/aiomcache/client.py#L153-L162
def gets(self, conn, key, default=None): """Gets a single value from the server together with the cas token. :param key: ``bytes``, is the key for the item being fetched :param default: default value if there is no value. :return: ``bytes``, ``bytes tuple with the value and the cas """ values, cas_tokens = yield from self._multi_get( conn, key, with_cas=True) return values.get(key, default), cas_tokens.get(key)
[ "def", "gets", "(", "self", ",", "conn", ",", "key", ",", "default", "=", "None", ")", ":", "values", ",", "cas_tokens", "=", "yield", "from", "self", ".", "_multi_get", "(", "conn", ",", "key", ",", "with_cas", "=", "True", ")", "return", "values", ".", "get", "(", "key", ",", "default", ")", ",", "cas_tokens", ".", "get", "(", "key", ")" ]
Gets a single value from the server together with the cas token. :param key: ``bytes``, is the key for the item being fetched :param default: default value if there is no value. :return: ``bytes``, ``bytes tuple with the value and the cas
[ "Gets", "a", "single", "value", "from", "the", "server", "together", "with", "the", "cas", "token", "." ]
python
train
47.4
SeleniumHQ/selenium
py/selenium/webdriver/common/touch_actions.py
https://github.com/SeleniumHQ/selenium/blob/df40c28b41d4b3953f90eaff84838a9ac052b84a/py/selenium/webdriver/common/touch_actions.py#L154-L166
def flick(self, xspeed, yspeed): """ Flicks, starting anywhere on the screen. :Args: - xspeed: The X speed in pixels per second. - yspeed: The Y speed in pixels per second. """ self._actions.append(lambda: self._driver.execute( Command.FLICK, { 'xspeed': int(xspeed), 'yspeed': int(yspeed)})) return self
[ "def", "flick", "(", "self", ",", "xspeed", ",", "yspeed", ")", ":", "self", ".", "_actions", ".", "append", "(", "lambda", ":", "self", ".", "_driver", ".", "execute", "(", "Command", ".", "FLICK", ",", "{", "'xspeed'", ":", "int", "(", "xspeed", ")", ",", "'yspeed'", ":", "int", "(", "yspeed", ")", "}", ")", ")", "return", "self" ]
Flicks, starting anywhere on the screen. :Args: - xspeed: The X speed in pixels per second. - yspeed: The Y speed in pixels per second.
[ "Flicks", "starting", "anywhere", "on", "the", "screen", "." ]
python
train
31
cnt-dev/cnt.rulebase
cnt/rulebase/rules/interval_based_operations/interval_based_replacer.py
https://github.com/cnt-dev/cnt.rulebase/blob/d1c767c356d8ee05b23ec5b04aaac84784ee547c/cnt/rulebase/rules/interval_based_operations/interval_based_replacer.py#L48-L75
def _result(self) -> ResultLazyType: """ ``self.config.replacer_function``(``Callable[[str], str]``) must exists. """ config = cast(IntervalsCollectionBasedReplacerConfig, self.config) diff_acc = 0 for interval, aggregated_mark in self.continuous_intervals(): start, end = interval processed_start = start + diff_acc processed_end = end + diff_acc segment = self.input_sequence[start:end] if aggregated_mark is not None: processed_segment = config.labeler2repl[cast(Type[workflow.IntervalLabeler], aggregated_mark)](segment) if not processed_segment: # segment is removed. processed_end = processed_start else: processed_end = processed_start + len(processed_segment) diff_acc += len(processed_segment) - len(segment) segment = processed_segment yield segment, (interval, (processed_start, processed_end), aggregated_mark is not None)
[ "def", "_result", "(", "self", ")", "->", "ResultLazyType", ":", "config", "=", "cast", "(", "IntervalsCollectionBasedReplacerConfig", ",", "self", ".", "config", ")", "diff_acc", "=", "0", "for", "interval", ",", "aggregated_mark", "in", "self", ".", "continuous_intervals", "(", ")", ":", "start", ",", "end", "=", "interval", "processed_start", "=", "start", "+", "diff_acc", "processed_end", "=", "end", "+", "diff_acc", "segment", "=", "self", ".", "input_sequence", "[", "start", ":", "end", "]", "if", "aggregated_mark", "is", "not", "None", ":", "processed_segment", "=", "config", ".", "labeler2repl", "[", "cast", "(", "Type", "[", "workflow", ".", "IntervalLabeler", "]", ",", "aggregated_mark", ")", "]", "(", "segment", ")", "if", "not", "processed_segment", ":", "# segment is removed.", "processed_end", "=", "processed_start", "else", ":", "processed_end", "=", "processed_start", "+", "len", "(", "processed_segment", ")", "diff_acc", "+=", "len", "(", "processed_segment", ")", "-", "len", "(", "segment", ")", "segment", "=", "processed_segment", "yield", "segment", ",", "(", "interval", ",", "(", "processed_start", ",", "processed_end", ")", ",", "aggregated_mark", "is", "not", "None", ")" ]
``self.config.replacer_function``(``Callable[[str], str]``) must exists.
[ "self", ".", "config", ".", "replacer_function", "(", "Callable", "[[", "str", "]", "str", "]", ")", "must", "exists", "." ]
python
train
40.5
klmitch/bark
bark/conversions.py
https://github.com/klmitch/bark/blob/6e0e002d55f01fee27e3e45bb86e30af1bfeef36/bark/conversions.py#L411-L428
def convert(self, request, response, data): """ Performs the desired Conversion. :param request: The webob Request object describing the request. :param response: The webob Response object describing the response. :param data: The data dictionary returned by the prepare() method. :returns: A string, the results of which are the desired conversion. """ # Notes are in bark.notes dictionary return self.escape(request.environ.get('bark.notes', {}).get( self.modifier.param, '-'))
[ "def", "convert", "(", "self", ",", "request", ",", "response", ",", "data", ")", ":", "# Notes are in bark.notes dictionary", "return", "self", ".", "escape", "(", "request", ".", "environ", ".", "get", "(", "'bark.notes'", ",", "{", "}", ")", ".", "get", "(", "self", ".", "modifier", ".", "param", ",", "'-'", ")", ")" ]
Performs the desired Conversion. :param request: The webob Request object describing the request. :param response: The webob Response object describing the response. :param data: The data dictionary returned by the prepare() method. :returns: A string, the results of which are the desired conversion.
[ "Performs", "the", "desired", "Conversion", "." ]
python
train
35.388889
delph-in/pydelphin
delphin/itsdb.py
https://github.com/delph-in/pydelphin/blob/7bd2cd63ab7cf74803e1d6547b9ebc014b382abd/delphin/itsdb.py#L1648-L1683
def match_rows(rows1, rows2, key, sort_keys=True): """ Yield triples of `(value, left_rows, right_rows)` where `left_rows` and `right_rows` are lists of rows that share the same column value for *key*. This means that both *rows1* and *rows2* must have a column with the same name *key*. .. warning:: Both *rows1* and *rows2* will exist in memory for this operation, so it is not recommended for very large tables on low-memory systems. Args: rows1: a :class:`Table` or list of :class:`Record` objects rows2: a :class:`Table` or list of :class:`Record` objects key (str): the column name on which to match sort_keys (bool): if `True`, yield matching rows sorted by the matched key instead of the original order """ matched = OrderedDict() for i, rows in enumerate([rows1, rows2]): for row in rows: val = row[key] try: data = matched[val] except KeyError: matched[val] = ([], []) data = matched[val] data[i].append(row) vals = matched.keys() if sort_keys: vals = sorted(vals, key=safe_int) for val in vals: left, right = matched[val] yield (val, left, right)
[ "def", "match_rows", "(", "rows1", ",", "rows2", ",", "key", ",", "sort_keys", "=", "True", ")", ":", "matched", "=", "OrderedDict", "(", ")", "for", "i", ",", "rows", "in", "enumerate", "(", "[", "rows1", ",", "rows2", "]", ")", ":", "for", "row", "in", "rows", ":", "val", "=", "row", "[", "key", "]", "try", ":", "data", "=", "matched", "[", "val", "]", "except", "KeyError", ":", "matched", "[", "val", "]", "=", "(", "[", "]", ",", "[", "]", ")", "data", "=", "matched", "[", "val", "]", "data", "[", "i", "]", ".", "append", "(", "row", ")", "vals", "=", "matched", ".", "keys", "(", ")", "if", "sort_keys", ":", "vals", "=", "sorted", "(", "vals", ",", "key", "=", "safe_int", ")", "for", "val", "in", "vals", ":", "left", ",", "right", "=", "matched", "[", "val", "]", "yield", "(", "val", ",", "left", ",", "right", ")" ]
Yield triples of `(value, left_rows, right_rows)` where `left_rows` and `right_rows` are lists of rows that share the same column value for *key*. This means that both *rows1* and *rows2* must have a column with the same name *key*. .. warning:: Both *rows1* and *rows2* will exist in memory for this operation, so it is not recommended for very large tables on low-memory systems. Args: rows1: a :class:`Table` or list of :class:`Record` objects rows2: a :class:`Table` or list of :class:`Record` objects key (str): the column name on which to match sort_keys (bool): if `True`, yield matching rows sorted by the matched key instead of the original order
[ "Yield", "triples", "of", "(", "value", "left_rows", "right_rows", ")", "where", "left_rows", "and", "right_rows", "are", "lists", "of", "rows", "that", "share", "the", "same", "column", "value", "for", "*", "key", "*", ".", "This", "means", "that", "both", "*", "rows1", "*", "and", "*", "rows2", "*", "must", "have", "a", "column", "with", "the", "same", "name", "*", "key", "*", "." ]
python
train
35.277778
osrg/ryu
ryu/lib/stplib.py
https://github.com/osrg/ryu/blob/6f906e72c92e10bd0264c9b91a2f7bb85b97780c/ryu/lib/stplib.py#L566-L603
def _spanning_tree_algorithm(self): """ Update tree roles. - Root bridge: all port is DESIGNATED_PORT. - Non root bridge: select one ROOT_PORT and some DESIGNATED_PORT, and the other port is set to NON_DESIGNATED_PORT.""" port_roles = {} root_port = self._select_root_port() if root_port is None: # My bridge is a root bridge. self.logger.info('Root bridge.', extra=self.dpid_str) root_priority = self.root_priority root_times = self.root_times for port_no in self.ports: if self.ports[port_no].state is not PORT_STATE_DISABLE: port_roles[port_no] = DESIGNATED_PORT else: # Other bridge is a root bridge. self.logger.info('Non root bridge.', extra=self.dpid_str) root_priority = root_port.designated_priority root_times = root_port.designated_times port_roles[root_port.ofport.port_no] = ROOT_PORT d_ports = self._select_designated_port(root_port) for port_no in d_ports: port_roles[port_no] = DESIGNATED_PORT for port in self.ports.values(): if port.state is not PORT_STATE_DISABLE: port_roles.setdefault(port.ofport.port_no, NON_DESIGNATED_PORT) return port_roles, root_priority, root_times
[ "def", "_spanning_tree_algorithm", "(", "self", ")", ":", "port_roles", "=", "{", "}", "root_port", "=", "self", ".", "_select_root_port", "(", ")", "if", "root_port", "is", "None", ":", "# My bridge is a root bridge.", "self", ".", "logger", ".", "info", "(", "'Root bridge.'", ",", "extra", "=", "self", ".", "dpid_str", ")", "root_priority", "=", "self", ".", "root_priority", "root_times", "=", "self", ".", "root_times", "for", "port_no", "in", "self", ".", "ports", ":", "if", "self", ".", "ports", "[", "port_no", "]", ".", "state", "is", "not", "PORT_STATE_DISABLE", ":", "port_roles", "[", "port_no", "]", "=", "DESIGNATED_PORT", "else", ":", "# Other bridge is a root bridge.", "self", ".", "logger", ".", "info", "(", "'Non root bridge.'", ",", "extra", "=", "self", ".", "dpid_str", ")", "root_priority", "=", "root_port", ".", "designated_priority", "root_times", "=", "root_port", ".", "designated_times", "port_roles", "[", "root_port", ".", "ofport", ".", "port_no", "]", "=", "ROOT_PORT", "d_ports", "=", "self", ".", "_select_designated_port", "(", "root_port", ")", "for", "port_no", "in", "d_ports", ":", "port_roles", "[", "port_no", "]", "=", "DESIGNATED_PORT", "for", "port", "in", "self", ".", "ports", ".", "values", "(", ")", ":", "if", "port", ".", "state", "is", "not", "PORT_STATE_DISABLE", ":", "port_roles", ".", "setdefault", "(", "port", ".", "ofport", ".", "port_no", ",", "NON_DESIGNATED_PORT", ")", "return", "port_roles", ",", "root_priority", ",", "root_times" ]
Update tree roles. - Root bridge: all port is DESIGNATED_PORT. - Non root bridge: select one ROOT_PORT and some DESIGNATED_PORT, and the other port is set to NON_DESIGNATED_PORT.
[ "Update", "tree", "roles", ".", "-", "Root", "bridge", ":", "all", "port", "is", "DESIGNATED_PORT", ".", "-", "Non", "root", "bridge", ":", "select", "one", "ROOT_PORT", "and", "some", "DESIGNATED_PORT", "and", "the", "other", "port", "is", "set", "to", "NON_DESIGNATED_PORT", "." ]
python
train
38.684211
autokey/autokey
lib/autokey/model.py
https://github.com/autokey/autokey/blob/35decb72f286ce68cd2a1f09ace8891a520b58d1/lib/autokey/model.py#L970-L984
def _remove_non_serializable_store_entries(store: Store) -> dict: """ Copy all serializable data into a new dict, and skip the rest. This makes sure to keep the items during runtime, even if the user edits and saves the script. """ cleaned_store_data = {} for key, value in store.items(): if Script._is_serializable(key) and Script._is_serializable(value): cleaned_store_data[key] = value else: _logger.info("Skip non-serializable item in the local script store. Key: '{}', Value: '{}'. " "This item cannot be saved and therefore will be lost when autokey quits.".format( key, value )) return cleaned_store_data
[ "def", "_remove_non_serializable_store_entries", "(", "store", ":", "Store", ")", "->", "dict", ":", "cleaned_store_data", "=", "{", "}", "for", "key", ",", "value", "in", "store", ".", "items", "(", ")", ":", "if", "Script", ".", "_is_serializable", "(", "key", ")", "and", "Script", ".", "_is_serializable", "(", "value", ")", ":", "cleaned_store_data", "[", "key", "]", "=", "value", "else", ":", "_logger", ".", "info", "(", "\"Skip non-serializable item in the local script store. Key: '{}', Value: '{}'. \"", "\"This item cannot be saved and therefore will be lost when autokey quits.\"", ".", "format", "(", "key", ",", "value", ")", ")", "return", "cleaned_store_data" ]
Copy all serializable data into a new dict, and skip the rest. This makes sure to keep the items during runtime, even if the user edits and saves the script.
[ "Copy", "all", "serializable", "data", "into", "a", "new", "dict", "and", "skip", "the", "rest", ".", "This", "makes", "sure", "to", "keep", "the", "items", "during", "runtime", "even", "if", "the", "user", "edits", "and", "saves", "the", "script", "." ]
python
train
52.4
gc3-uzh-ch/elasticluster
elasticluster/providers/ansible_provider.py
https://github.com/gc3-uzh-ch/elasticluster/blob/e6345633308c76de13b889417df572815aabe744/elasticluster/providers/ansible_provider.py#L158-L179
def setup_cluster(self, cluster, extra_args=tuple()): """ Configure the cluster by running an Ansible playbook. The ElastiCluster configuration attribute `<kind>_groups` determines, for each node kind, what Ansible groups nodes of that kind are assigned to. :param cluster: cluster to configure :type cluster: :py:class:`elasticluster.cluster.Cluster` :param list extra_args: List of additional command-line arguments that are appended to each invocation of the setup program. :return: ``True`` on success, ``False`` otherwise. Please note, if nothing has to be configured, then ``True`` is returned. :raises: `ConfigurationError` if the playbook can not be found or is corrupt. """ return self._run_playbook(cluster, self._playbook_path, extra_args)
[ "def", "setup_cluster", "(", "self", ",", "cluster", ",", "extra_args", "=", "tuple", "(", ")", ")", ":", "return", "self", ".", "_run_playbook", "(", "cluster", ",", "self", ".", "_playbook_path", ",", "extra_args", ")" ]
Configure the cluster by running an Ansible playbook. The ElastiCluster configuration attribute `<kind>_groups` determines, for each node kind, what Ansible groups nodes of that kind are assigned to. :param cluster: cluster to configure :type cluster: :py:class:`elasticluster.cluster.Cluster` :param list extra_args: List of additional command-line arguments that are appended to each invocation of the setup program. :return: ``True`` on success, ``False`` otherwise. Please note, if nothing has to be configured, then ``True`` is returned. :raises: `ConfigurationError` if the playbook can not be found or is corrupt.
[ "Configure", "the", "cluster", "by", "running", "an", "Ansible", "playbook", "." ]
python
train
40.181818
googleapis/oauth2client
oauth2client/service_account.py
https://github.com/googleapis/oauth2client/blob/50d20532a748f18e53f7d24ccbe6647132c979a9/oauth2client/service_account.py#L495-L520
def create_with_claims(self, claims): """Create credentials that specify additional claims. Args: claims: dict, key-value pairs for claims. Returns: ServiceAccountCredentials, a copy of the current service account credentials with updated claims to use when obtaining access tokens. """ new_kwargs = dict(self._kwargs) new_kwargs.update(claims) result = self.__class__(self._service_account_email, self._signer, scopes=self._scopes, private_key_id=self._private_key_id, client_id=self.client_id, user_agent=self._user_agent, **new_kwargs) result.token_uri = self.token_uri result.revoke_uri = self.revoke_uri result._private_key_pkcs8_pem = self._private_key_pkcs8_pem result._private_key_pkcs12 = self._private_key_pkcs12 result._private_key_password = self._private_key_password return result
[ "def", "create_with_claims", "(", "self", ",", "claims", ")", ":", "new_kwargs", "=", "dict", "(", "self", ".", "_kwargs", ")", "new_kwargs", ".", "update", "(", "claims", ")", "result", "=", "self", ".", "__class__", "(", "self", ".", "_service_account_email", ",", "self", ".", "_signer", ",", "scopes", "=", "self", ".", "_scopes", ",", "private_key_id", "=", "self", ".", "_private_key_id", ",", "client_id", "=", "self", ".", "client_id", ",", "user_agent", "=", "self", ".", "_user_agent", ",", "*", "*", "new_kwargs", ")", "result", ".", "token_uri", "=", "self", ".", "token_uri", "result", ".", "revoke_uri", "=", "self", ".", "revoke_uri", "result", ".", "_private_key_pkcs8_pem", "=", "self", ".", "_private_key_pkcs8_pem", "result", ".", "_private_key_pkcs12", "=", "self", ".", "_private_key_pkcs12", "result", ".", "_private_key_password", "=", "self", ".", "_private_key_password", "return", "result" ]
Create credentials that specify additional claims. Args: claims: dict, key-value pairs for claims. Returns: ServiceAccountCredentials, a copy of the current service account credentials with updated claims to use when obtaining access tokens.
[ "Create", "credentials", "that", "specify", "additional", "claims", "." ]
python
valid
42.884615
OCHA-DAP/hdx-python-api
src/hdx/data/dataset.py
https://github.com/OCHA-DAP/hdx-python-api/blob/212440f54f73805826a16db77dbcb6033b18a313/src/hdx/data/dataset.py#L835-L847
def get_dataset_date(self, date_format=None): # type: (Optional[str]) -> Optional[str] """Get dataset date as string in specified format. For range returns start date. If no format is supplied, an ISO 8601 string is returned. Args: date_format (Optional[str]): Date format. None is taken to be ISO 8601. Defaults to None. Returns: Optional[str]: Dataset date string or None if no date is set """ dataset_date = self.get_dataset_date_as_datetime() return self._get_formatted_date(dataset_date, date_format)
[ "def", "get_dataset_date", "(", "self", ",", "date_format", "=", "None", ")", ":", "# type: (Optional[str]) -> Optional[str]", "dataset_date", "=", "self", ".", "get_dataset_date_as_datetime", "(", ")", "return", "self", ".", "_get_formatted_date", "(", "dataset_date", ",", "date_format", ")" ]
Get dataset date as string in specified format. For range returns start date. If no format is supplied, an ISO 8601 string is returned. Args: date_format (Optional[str]): Date format. None is taken to be ISO 8601. Defaults to None. Returns: Optional[str]: Dataset date string or None if no date is set
[ "Get", "dataset", "date", "as", "string", "in", "specified", "format", ".", "For", "range", "returns", "start", "date", ".", "If", "no", "format", "is", "supplied", "an", "ISO", "8601", "string", "is", "returned", "." ]
python
train
44.846154
readbeyond/aeneas
aeneas/runtimeconfiguration.py
https://github.com/readbeyond/aeneas/blob/9d95535ad63eef4a98530cfdff033b8c35315ee1/aeneas/runtimeconfiguration.py#L1119-L1138
def set_tts(self, level): """ Set the values for :data:`~aeneas.runtimeconfiguration.RuntimeConfiguration.TTS` and :data:`~aeneas.runtimeconfiguration.RuntimeConfiguration.TTS_PATH` matching the given granularity level. Currently supported levels: * ``1`` (paragraph) * ``2`` (sentence) * ``3`` (word) :param int level: the desired granularity level """ if level in self.TTS_GRANULARITY_MAP.keys(): tts_key, tts_path_key = self.TTS_GRANULARITY_MAP[level] self[self.TTS] = self[tts_key] self[self.TTS_PATH] = self[tts_path_key]
[ "def", "set_tts", "(", "self", ",", "level", ")", ":", "if", "level", "in", "self", ".", "TTS_GRANULARITY_MAP", ".", "keys", "(", ")", ":", "tts_key", ",", "tts_path_key", "=", "self", ".", "TTS_GRANULARITY_MAP", "[", "level", "]", "self", "[", "self", ".", "TTS", "]", "=", "self", "[", "tts_key", "]", "self", "[", "self", ".", "TTS_PATH", "]", "=", "self", "[", "tts_path_key", "]" ]
Set the values for :data:`~aeneas.runtimeconfiguration.RuntimeConfiguration.TTS` and :data:`~aeneas.runtimeconfiguration.RuntimeConfiguration.TTS_PATH` matching the given granularity level. Currently supported levels: * ``1`` (paragraph) * ``2`` (sentence) * ``3`` (word) :param int level: the desired granularity level
[ "Set", "the", "values", "for", ":", "data", ":", "~aeneas", ".", "runtimeconfiguration", ".", "RuntimeConfiguration", ".", "TTS", "and", ":", "data", ":", "~aeneas", ".", "runtimeconfiguration", ".", "RuntimeConfiguration", ".", "TTS_PATH", "matching", "the", "given", "granularity", "level", "." ]
python
train
32.5
pybel/pybel
src/pybel/canonicalize.py
https://github.com/pybel/pybel/blob/c8a7a1bdae4c475fa2a8c77f3a9a5f6d79556ca0/src/pybel/canonicalize.py#L186-L201
def _to_bel_lines_header(graph) -> Iterable[str]: """Iterate the lines of a BEL graph's corresponding BEL script's header. :param pybel.BELGraph graph: A BEL graph """ yield '# This document was created by PyBEL v{} and bel-resources v{} on {}\n'.format( VERSION, bel_resources.constants.VERSION, time.asctime() ) yield from make_knowledge_header( namespace_url=graph.namespace_url, namespace_patterns=graph.namespace_pattern, annotation_url=graph.annotation_url, annotation_patterns=graph.annotation_pattern, annotation_list=graph.annotation_list, **graph.document )
[ "def", "_to_bel_lines_header", "(", "graph", ")", "->", "Iterable", "[", "str", "]", ":", "yield", "'# This document was created by PyBEL v{} and bel-resources v{} on {}\\n'", ".", "format", "(", "VERSION", ",", "bel_resources", ".", "constants", ".", "VERSION", ",", "time", ".", "asctime", "(", ")", ")", "yield", "from", "make_knowledge_header", "(", "namespace_url", "=", "graph", ".", "namespace_url", ",", "namespace_patterns", "=", "graph", ".", "namespace_pattern", ",", "annotation_url", "=", "graph", ".", "annotation_url", ",", "annotation_patterns", "=", "graph", ".", "annotation_pattern", ",", "annotation_list", "=", "graph", ".", "annotation_list", ",", "*", "*", "graph", ".", "document", ")" ]
Iterate the lines of a BEL graph's corresponding BEL script's header. :param pybel.BELGraph graph: A BEL graph
[ "Iterate", "the", "lines", "of", "a", "BEL", "graph", "s", "corresponding", "BEL", "script", "s", "header", "." ]
python
train
39.8125
ivanprjcts/sdklib
sdklib/http/base.py
https://github.com/ivanprjcts/sdklib/blob/7ba4273a05c40e2e338f49f2dd564920ed98fcab/sdklib/http/base.py#L353-L363
def set_default_host(cls, value): """ Default: "http://127.0.0.1:80" A string that will be automatically included at the beginning of the url generated for doing each http request. """ if value is None: cls.DEFAULT_HOST = "http://127.0.0.1:80" else: scheme, host, port = get_hostname_parameters_from_url(value) cls.DEFAULT_HOST = "%s://%s:%s" % (scheme, host, port)
[ "def", "set_default_host", "(", "cls", ",", "value", ")", ":", "if", "value", "is", "None", ":", "cls", ".", "DEFAULT_HOST", "=", "\"http://127.0.0.1:80\"", "else", ":", "scheme", ",", "host", ",", "port", "=", "get_hostname_parameters_from_url", "(", "value", ")", "cls", ".", "DEFAULT_HOST", "=", "\"%s://%s:%s\"", "%", "(", "scheme", ",", "host", ",", "port", ")" ]
Default: "http://127.0.0.1:80" A string that will be automatically included at the beginning of the url generated for doing each http request.
[ "Default", ":", "http", ":", "//", "127", ".", "0", ".", "0", ".", "1", ":", "80" ]
python
train
40
apache/spark
python/pyspark/cloudpickle.py
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/cloudpickle.py#L1060-L1113
def _fill_function(*args): """Fills in the rest of function data into the skeleton function object The skeleton itself is create by _make_skel_func(). """ if len(args) == 2: func = args[0] state = args[1] elif len(args) == 5: # Backwards compat for cloudpickle v0.4.0, after which the `module` # argument was introduced func = args[0] keys = ['globals', 'defaults', 'dict', 'closure_values'] state = dict(zip(keys, args[1:])) elif len(args) == 6: # Backwards compat for cloudpickle v0.4.1, after which the function # state was passed as a dict to the _fill_function it-self. func = args[0] keys = ['globals', 'defaults', 'dict', 'module', 'closure_values'] state = dict(zip(keys, args[1:])) else: raise ValueError('Unexpected _fill_value arguments: %r' % (args,)) # - At pickling time, any dynamic global variable used by func is # serialized by value (in state['globals']). # - At unpickling time, func's __globals__ attribute is initialized by # first retrieving an empty isolated namespace that will be shared # with other functions pickled from the same original module # by the same CloudPickler instance and then updated with the # content of state['globals'] to populate the shared isolated # namespace with all the global variables that are specifically # referenced for this function. func.__globals__.update(state['globals']) func.__defaults__ = state['defaults'] func.__dict__ = state['dict'] if 'annotations' in state: func.__annotations__ = state['annotations'] if 'doc' in state: func.__doc__ = state['doc'] if 'name' in state: func.__name__ = state['name'] if 'module' in state: func.__module__ = state['module'] if 'qualname' in state: func.__qualname__ = state['qualname'] cells = func.__closure__ if cells is not None: for cell, value in zip(cells, state['closure_values']): if value is not _empty_cell_value: cell_set(cell, value) return func
[ "def", "_fill_function", "(", "*", "args", ")", ":", "if", "len", "(", "args", ")", "==", "2", ":", "func", "=", "args", "[", "0", "]", "state", "=", "args", "[", "1", "]", "elif", "len", "(", "args", ")", "==", "5", ":", "# Backwards compat for cloudpickle v0.4.0, after which the `module`", "# argument was introduced", "func", "=", "args", "[", "0", "]", "keys", "=", "[", "'globals'", ",", "'defaults'", ",", "'dict'", ",", "'closure_values'", "]", "state", "=", "dict", "(", "zip", "(", "keys", ",", "args", "[", "1", ":", "]", ")", ")", "elif", "len", "(", "args", ")", "==", "6", ":", "# Backwards compat for cloudpickle v0.4.1, after which the function", "# state was passed as a dict to the _fill_function it-self.", "func", "=", "args", "[", "0", "]", "keys", "=", "[", "'globals'", ",", "'defaults'", ",", "'dict'", ",", "'module'", ",", "'closure_values'", "]", "state", "=", "dict", "(", "zip", "(", "keys", ",", "args", "[", "1", ":", "]", ")", ")", "else", ":", "raise", "ValueError", "(", "'Unexpected _fill_value arguments: %r'", "%", "(", "args", ",", ")", ")", "# - At pickling time, any dynamic global variable used by func is", "# serialized by value (in state['globals']).", "# - At unpickling time, func's __globals__ attribute is initialized by", "# first retrieving an empty isolated namespace that will be shared", "# with other functions pickled from the same original module", "# by the same CloudPickler instance and then updated with the", "# content of state['globals'] to populate the shared isolated", "# namespace with all the global variables that are specifically", "# referenced for this function.", "func", ".", "__globals__", ".", "update", "(", "state", "[", "'globals'", "]", ")", "func", ".", "__defaults__", "=", "state", "[", "'defaults'", "]", "func", ".", "__dict__", "=", "state", "[", "'dict'", "]", "if", "'annotations'", "in", "state", ":", "func", ".", "__annotations__", "=", "state", "[", "'annotations'", "]", "if", "'doc'", "in", "state", ":", "func", ".", "__doc__", "=", "state", "[", "'doc'", "]", "if", "'name'", "in", "state", ":", "func", ".", "__name__", "=", "state", "[", "'name'", "]", "if", "'module'", "in", "state", ":", "func", ".", "__module__", "=", "state", "[", "'module'", "]", "if", "'qualname'", "in", "state", ":", "func", ".", "__qualname__", "=", "state", "[", "'qualname'", "]", "cells", "=", "func", ".", "__closure__", "if", "cells", "is", "not", "None", ":", "for", "cell", ",", "value", "in", "zip", "(", "cells", ",", "state", "[", "'closure_values'", "]", ")", ":", "if", "value", "is", "not", "_empty_cell_value", ":", "cell_set", "(", "cell", ",", "value", ")", "return", "func" ]
Fills in the rest of function data into the skeleton function object The skeleton itself is create by _make_skel_func().
[ "Fills", "in", "the", "rest", "of", "function", "data", "into", "the", "skeleton", "function", "object" ]
python
train
39.185185
datakortet/dkfileutils
dkfileutils/path.py
https://github.com/datakortet/dkfileutils/blob/924098d6e2edf88ad9b3ffdec9c74530f80a7d77/dkfileutils/path.py#L99-L121
def touch(self, mode=0o666, exist_ok=True): """Create this file with the given access mode, if it doesn't exist. Based on: https://github.com/python/cpython/blob/master/Lib/pathlib.py) """ if exist_ok: # First try to bump modification time # Implementation note: GNU touch uses the UTIME_NOW option of # the utimensat() / futimens() functions. try: os.utime(self, None) except OSError: # Avoid exception chaining pass else: return flags = os.O_CREAT | os.O_WRONLY if not exist_ok: flags |= os.O_EXCL fd = os.open(self, flags, mode) os.close(fd)
[ "def", "touch", "(", "self", ",", "mode", "=", "0o666", ",", "exist_ok", "=", "True", ")", ":", "if", "exist_ok", ":", "# First try to bump modification time", "# Implementation note: GNU touch uses the UTIME_NOW option of", "# the utimensat() / futimens() functions.", "try", ":", "os", ".", "utime", "(", "self", ",", "None", ")", "except", "OSError", ":", "# Avoid exception chaining", "pass", "else", ":", "return", "flags", "=", "os", ".", "O_CREAT", "|", "os", ".", "O_WRONLY", "if", "not", "exist_ok", ":", "flags", "|=", "os", ".", "O_EXCL", "fd", "=", "os", ".", "open", "(", "self", ",", "flags", ",", "mode", ")", "os", ".", "close", "(", "fd", ")" ]
Create this file with the given access mode, if it doesn't exist. Based on: https://github.com/python/cpython/blob/master/Lib/pathlib.py)
[ "Create", "this", "file", "with", "the", "given", "access", "mode", "if", "it", "doesn", "t", "exist", ".", "Based", "on", ":", "https", ":", "//", "github", ".", "com", "/", "python", "/", "cpython", "/", "blob", "/", "master", "/", "Lib", "/", "pathlib", ".", "py", ")" ]
python
train
33.869565
dhondta/tinyscript
tinyscript/argreparse.py
https://github.com/dhondta/tinyscript/blob/624a0718db698899e7bc3ba6ac694baed251e81d/tinyscript/argreparse.py#L422-L440
def config_args(self, section="main"): """ Additional method for feeding input arguments from a config file. :param section: current config section name """ if self._config_parsed: return for a in self._filtered_actions("config"): for o in a.option_strings: try: i = sys.argv.index(o) sys.argv.pop(i) # remove the option string sys.argv.pop(i) # remove the value that follows except ValueError: pass for a in self._sorted_actions(): self._set_arg(a, section, True) self._config_parsed = True
[ "def", "config_args", "(", "self", ",", "section", "=", "\"main\"", ")", ":", "if", "self", ".", "_config_parsed", ":", "return", "for", "a", "in", "self", ".", "_filtered_actions", "(", "\"config\"", ")", ":", "for", "o", "in", "a", ".", "option_strings", ":", "try", ":", "i", "=", "sys", ".", "argv", ".", "index", "(", "o", ")", "sys", ".", "argv", ".", "pop", "(", "i", ")", "# remove the option string", "sys", ".", "argv", ".", "pop", "(", "i", ")", "# remove the value that follows", "except", "ValueError", ":", "pass", "for", "a", "in", "self", ".", "_sorted_actions", "(", ")", ":", "self", ".", "_set_arg", "(", "a", ",", "section", ",", "True", ")", "self", ".", "_config_parsed", "=", "True" ]
Additional method for feeding input arguments from a config file. :param section: current config section name
[ "Additional", "method", "for", "feeding", "input", "arguments", "from", "a", "config", "file", ".", ":", "param", "section", ":", "current", "config", "section", "name" ]
python
train
36.631579
oauthlib/oauthlib
oauthlib/oauth1/rfc5849/request_validator.py
https://github.com/oauthlib/oauthlib/blob/30321dd3c0ca784d3508a1970cf90d9f76835c79/oauthlib/oauth1/rfc5849/request_validator.py#L190-L196
def check_verifier(self, verifier): """Checks that the verifier contains only safe characters and is no shorter than lower and no longer than upper. """ lower, upper = self.verifier_length return (set(verifier) <= self.safe_characters and lower <= len(verifier) <= upper)
[ "def", "check_verifier", "(", "self", ",", "verifier", ")", ":", "lower", ",", "upper", "=", "self", ".", "verifier_length", "return", "(", "set", "(", "verifier", ")", "<=", "self", ".", "safe_characters", "and", "lower", "<=", "len", "(", "verifier", ")", "<=", "upper", ")" ]
Checks that the verifier contains only safe characters and is no shorter than lower and no longer than upper.
[ "Checks", "that", "the", "verifier", "contains", "only", "safe", "characters", "and", "is", "no", "shorter", "than", "lower", "and", "no", "longer", "than", "upper", "." ]
python
train
45.857143
googlemaps/google-maps-services-python
googlemaps/convert.py
https://github.com/googlemaps/google-maps-services-python/blob/7ed40b4d8df63479794c46ce29d03ed6083071d7/googlemaps/convert.py#L83-L107
def normalize_lat_lng(arg): """Take the various lat/lng representations and return a tuple. Accepts various representations: 1) dict with two entries - "lat" and "lng" 2) list or tuple - e.g. (-33, 151) or [-33, 151] :param arg: The lat/lng pair. :type arg: dict or list or tuple :rtype: tuple (lat, lng) """ if isinstance(arg, dict): if "lat" in arg and "lng" in arg: return arg["lat"], arg["lng"] if "latitude" in arg and "longitude" in arg: return arg["latitude"], arg["longitude"] # List or tuple. if _is_list(arg): return arg[0], arg[1] raise TypeError( "Expected a lat/lng dict or tuple, " "but got %s" % type(arg).__name__)
[ "def", "normalize_lat_lng", "(", "arg", ")", ":", "if", "isinstance", "(", "arg", ",", "dict", ")", ":", "if", "\"lat\"", "in", "arg", "and", "\"lng\"", "in", "arg", ":", "return", "arg", "[", "\"lat\"", "]", ",", "arg", "[", "\"lng\"", "]", "if", "\"latitude\"", "in", "arg", "and", "\"longitude\"", "in", "arg", ":", "return", "arg", "[", "\"latitude\"", "]", ",", "arg", "[", "\"longitude\"", "]", "# List or tuple.", "if", "_is_list", "(", "arg", ")", ":", "return", "arg", "[", "0", "]", ",", "arg", "[", "1", "]", "raise", "TypeError", "(", "\"Expected a lat/lng dict or tuple, \"", "\"but got %s\"", "%", "type", "(", "arg", ")", ".", "__name__", ")" ]
Take the various lat/lng representations and return a tuple. Accepts various representations: 1) dict with two entries - "lat" and "lng" 2) list or tuple - e.g. (-33, 151) or [-33, 151] :param arg: The lat/lng pair. :type arg: dict or list or tuple :rtype: tuple (lat, lng)
[ "Take", "the", "various", "lat", "/", "lng", "representations", "and", "return", "a", "tuple", "." ]
python
train
28.92
project-ncl/pnc-cli
pnc_cli/swagger_client/apis/licenses_api.py
https://github.com/project-ncl/pnc-cli/blob/3dc149bf84928f60a8044ac50b58bbaddd451902/pnc_cli/swagger_client/apis/licenses_api.py#L366-L390
def get_specific(self, id, **kwargs): """ Get specific License This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_specific(id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int id: License id (required) :return: LicenseSingleton If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.get_specific_with_http_info(id, **kwargs) else: (data) = self.get_specific_with_http_info(id, **kwargs) return data
[ "def", "get_specific", "(", "self", ",", "id", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'callback'", ")", ":", "return", "self", ".", "get_specific_with_http_info", "(", "id", ",", "*", "*", "kwargs", ")", "else", ":", "(", "data", ")", "=", "self", ".", "get_specific_with_http_info", "(", "id", ",", "*", "*", "kwargs", ")", "return", "data" ]
Get specific License This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_specific(id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int id: License id (required) :return: LicenseSingleton If the method is called asynchronously, returns the request thread.
[ "Get", "specific", "License", "This", "method", "makes", "a", "synchronous", "HTTP", "request", "by", "default", ".", "To", "make", "an", "asynchronous", "HTTP", "request", "please", "define", "a", "callback", "function", "to", "be", "invoked", "when", "receiving", "the", "response", ".", ">>>", "def", "callback_function", "(", "response", ")", ":", ">>>", "pprint", "(", "response", ")", ">>>", ">>>", "thread", "=", "api", ".", "get_specific", "(", "id", "callback", "=", "callback_function", ")" ]
python
train
39
Azure/azure-cli-extensions
src/storage-preview/azext_storage_preview/_format.py
https://github.com/Azure/azure-cli-extensions/blob/3d4854205b0f0d882f688cfa12383d14506c2e35/src/storage-preview/azext_storage_preview/_format.py#L72-L90
def transform_file_output(result): """ Transform to convert SDK file/dir list output to something that more clearly distinguishes between files and directories. """ from collections import OrderedDict new_result = [] iterable = result if isinstance(result, list) else result.get('items', result) for item in iterable: new_entry = OrderedDict() entity_type = item['type'] # type property is added by transform_file_directory_result is_dir = entity_type == 'dir' new_entry['Name'] = item['name'] + '/' if is_dir else item['name'] new_entry['Content Length'] = ' ' if is_dir else item['properties']['contentLength'] new_entry['Type'] = item['type'] new_entry['Last Modified'] = item['properties']['lastModified'] or ' ' new_result.append(new_entry) return sorted(new_result, key=lambda k: k['Name'])
[ "def", "transform_file_output", "(", "result", ")", ":", "from", "collections", "import", "OrderedDict", "new_result", "=", "[", "]", "iterable", "=", "result", "if", "isinstance", "(", "result", ",", "list", ")", "else", "result", ".", "get", "(", "'items'", ",", "result", ")", "for", "item", "in", "iterable", ":", "new_entry", "=", "OrderedDict", "(", ")", "entity_type", "=", "item", "[", "'type'", "]", "# type property is added by transform_file_directory_result", "is_dir", "=", "entity_type", "==", "'dir'", "new_entry", "[", "'Name'", "]", "=", "item", "[", "'name'", "]", "+", "'/'", "if", "is_dir", "else", "item", "[", "'name'", "]", "new_entry", "[", "'Content Length'", "]", "=", "' '", "if", "is_dir", "else", "item", "[", "'properties'", "]", "[", "'contentLength'", "]", "new_entry", "[", "'Type'", "]", "=", "item", "[", "'type'", "]", "new_entry", "[", "'Last Modified'", "]", "=", "item", "[", "'properties'", "]", "[", "'lastModified'", "]", "or", "' '", "new_result", ".", "append", "(", "new_entry", ")", "return", "sorted", "(", "new_result", ",", "key", "=", "lambda", "k", ":", "k", "[", "'Name'", "]", ")" ]
Transform to convert SDK file/dir list output to something that more clearly distinguishes between files and directories.
[ "Transform", "to", "convert", "SDK", "file", "/", "dir", "list", "output", "to", "something", "that", "more", "clearly", "distinguishes", "between", "files", "and", "directories", "." ]
python
train
46
astropy/pyregion
pyregion/core.py
https://github.com/astropy/pyregion/blob/913af7ea4917855cb2e43d5086d1c8dd99c31363/pyregion/core.py#L223-L242
def parse(region_string): """Parse DS9 region string into a ShapeList. Parameters ---------- region_string : str Region string Returns ------- shapes : `ShapeList` List of `~pyregion.Shape` """ rp = RegionParser() ss = rp.parse(region_string) sss1 = rp.convert_attr(ss) sss2 = _check_wcs(sss1) shape_list, comment_list = rp.filter_shape2(sss2) return ShapeList(shape_list, comment_list=comment_list)
[ "def", "parse", "(", "region_string", ")", ":", "rp", "=", "RegionParser", "(", ")", "ss", "=", "rp", ".", "parse", "(", "region_string", ")", "sss1", "=", "rp", ".", "convert_attr", "(", "ss", ")", "sss2", "=", "_check_wcs", "(", "sss1", ")", "shape_list", ",", "comment_list", "=", "rp", ".", "filter_shape2", "(", "sss2", ")", "return", "ShapeList", "(", "shape_list", ",", "comment_list", "=", "comment_list", ")" ]
Parse DS9 region string into a ShapeList. Parameters ---------- region_string : str Region string Returns ------- shapes : `ShapeList` List of `~pyregion.Shape`
[ "Parse", "DS9", "region", "string", "into", "a", "ShapeList", "." ]
python
train
22.75
apache/spark
python/pyspark/sql/readwriter.py
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/readwriter.py#L521-L569
def jdbc(self, url, table, column=None, lowerBound=None, upperBound=None, numPartitions=None, predicates=None, properties=None): """ Construct a :class:`DataFrame` representing the database table named ``table`` accessible via JDBC URL ``url`` and connection ``properties``. Partitions of the table will be retrieved in parallel if either ``column`` or ``predicates`` is specified. ``lowerBound`, ``upperBound`` and ``numPartitions`` is needed when ``column`` is specified. If both ``column`` and ``predicates`` are specified, ``column`` will be used. .. note:: Don't create too many partitions in parallel on a large cluster; otherwise Spark might crash your external database systems. :param url: a JDBC URL of the form ``jdbc:subprotocol:subname`` :param table: the name of the table :param column: the name of an integer column that will be used for partitioning; if this parameter is specified, then ``numPartitions``, ``lowerBound`` (inclusive), and ``upperBound`` (exclusive) will form partition strides for generated WHERE clause expressions used to split the column ``column`` evenly :param lowerBound: the minimum value of ``column`` used to decide partition stride :param upperBound: the maximum value of ``column`` used to decide partition stride :param numPartitions: the number of partitions :param predicates: a list of expressions suitable for inclusion in WHERE clauses; each one defines one partition of the :class:`DataFrame` :param properties: a dictionary of JDBC database connection arguments. Normally at least properties "user" and "password" with their corresponding values. For example { 'user' : 'SYSTEM', 'password' : 'mypassword' } :return: a DataFrame """ if properties is None: properties = dict() jprop = JavaClass("java.util.Properties", self._spark._sc._gateway._gateway_client)() for k in properties: jprop.setProperty(k, properties[k]) if column is not None: assert lowerBound is not None, "lowerBound can not be None when ``column`` is specified" assert upperBound is not None, "upperBound can not be None when ``column`` is specified" assert numPartitions is not None, \ "numPartitions can not be None when ``column`` is specified" return self._df(self._jreader.jdbc(url, table, column, int(lowerBound), int(upperBound), int(numPartitions), jprop)) if predicates is not None: gateway = self._spark._sc._gateway jpredicates = utils.toJArray(gateway, gateway.jvm.java.lang.String, predicates) return self._df(self._jreader.jdbc(url, table, jpredicates, jprop)) return self._df(self._jreader.jdbc(url, table, jprop))
[ "def", "jdbc", "(", "self", ",", "url", ",", "table", ",", "column", "=", "None", ",", "lowerBound", "=", "None", ",", "upperBound", "=", "None", ",", "numPartitions", "=", "None", ",", "predicates", "=", "None", ",", "properties", "=", "None", ")", ":", "if", "properties", "is", "None", ":", "properties", "=", "dict", "(", ")", "jprop", "=", "JavaClass", "(", "\"java.util.Properties\"", ",", "self", ".", "_spark", ".", "_sc", ".", "_gateway", ".", "_gateway_client", ")", "(", ")", "for", "k", "in", "properties", ":", "jprop", ".", "setProperty", "(", "k", ",", "properties", "[", "k", "]", ")", "if", "column", "is", "not", "None", ":", "assert", "lowerBound", "is", "not", "None", ",", "\"lowerBound can not be None when ``column`` is specified\"", "assert", "upperBound", "is", "not", "None", ",", "\"upperBound can not be None when ``column`` is specified\"", "assert", "numPartitions", "is", "not", "None", ",", "\"numPartitions can not be None when ``column`` is specified\"", "return", "self", ".", "_df", "(", "self", ".", "_jreader", ".", "jdbc", "(", "url", ",", "table", ",", "column", ",", "int", "(", "lowerBound", ")", ",", "int", "(", "upperBound", ")", ",", "int", "(", "numPartitions", ")", ",", "jprop", ")", ")", "if", "predicates", "is", "not", "None", ":", "gateway", "=", "self", ".", "_spark", ".", "_sc", ".", "_gateway", "jpredicates", "=", "utils", ".", "toJArray", "(", "gateway", ",", "gateway", ".", "jvm", ".", "java", ".", "lang", ".", "String", ",", "predicates", ")", "return", "self", ".", "_df", "(", "self", ".", "_jreader", ".", "jdbc", "(", "url", ",", "table", ",", "jpredicates", ",", "jprop", ")", ")", "return", "self", ".", "_df", "(", "self", ".", "_jreader", ".", "jdbc", "(", "url", ",", "table", ",", "jprop", ")", ")" ]
Construct a :class:`DataFrame` representing the database table named ``table`` accessible via JDBC URL ``url`` and connection ``properties``. Partitions of the table will be retrieved in parallel if either ``column`` or ``predicates`` is specified. ``lowerBound`, ``upperBound`` and ``numPartitions`` is needed when ``column`` is specified. If both ``column`` and ``predicates`` are specified, ``column`` will be used. .. note:: Don't create too many partitions in parallel on a large cluster; otherwise Spark might crash your external database systems. :param url: a JDBC URL of the form ``jdbc:subprotocol:subname`` :param table: the name of the table :param column: the name of an integer column that will be used for partitioning; if this parameter is specified, then ``numPartitions``, ``lowerBound`` (inclusive), and ``upperBound`` (exclusive) will form partition strides for generated WHERE clause expressions used to split the column ``column`` evenly :param lowerBound: the minimum value of ``column`` used to decide partition stride :param upperBound: the maximum value of ``column`` used to decide partition stride :param numPartitions: the number of partitions :param predicates: a list of expressions suitable for inclusion in WHERE clauses; each one defines one partition of the :class:`DataFrame` :param properties: a dictionary of JDBC database connection arguments. Normally at least properties "user" and "password" with their corresponding values. For example { 'user' : 'SYSTEM', 'password' : 'mypassword' } :return: a DataFrame
[ "Construct", "a", ":", "class", ":", "DataFrame", "representing", "the", "database", "table", "named", "table", "accessible", "via", "JDBC", "URL", "url", "and", "connection", "properties", "." ]
python
train
62.612245
horazont/aioxmpp
aioxmpp/bookmarks/service.py
https://github.com/horazont/aioxmpp/blob/22a68e5e1d23f2a4dee470092adbd4672f9ef061/aioxmpp/bookmarks/service.py#L141-L265
def _diff_emit_update(self, new_bookmarks): """ Diff the bookmark cache and the new bookmark state, emit signals as needed and set the bookmark cache to the new data. """ self.logger.debug("diffing %s, %s", self._bookmark_cache, new_bookmarks) def subdivide(level, old, new): """ Subdivide the bookmarks according to the data item ``bookmark.secondary[level]`` and emit the appropriate events. """ if len(old) == len(new) == 1: old_entry = old.pop() new_entry = new.pop() if old_entry == new_entry: pass else: self.on_bookmark_changed(old_entry, new_entry) return ([], []) elif len(old) == 0: return ([], new) elif len(new) == 0: return (old, []) else: try: groups = {} for entry in old: group = groups.setdefault( entry.secondary[level], ([], []) ) group[0].append(entry) for entry in new: group = groups.setdefault( entry.secondary[level], ([], []) ) group[1].append(entry) except IndexError: # the classification is exhausted, this means # all entries in this bin are equal by the # defininition of bookmark equivalence! common = min(len(old), len(new)) assert old[:common] == new[:common] return (old[common:], new[common:]) old_unhandled, new_unhandled = [], [] for old, new in groups.values(): unhandled = subdivide(level+1, old, new) old_unhandled += unhandled[0] new_unhandled += unhandled[1] # match up unhandleds as changes as early as possible i = -1 for i, (old_entry, new_entry) in enumerate( zip(old_unhandled, new_unhandled)): self.logger.debug("changed %s -> %s", old_entry, new_entry) self.on_bookmark_changed(old_entry, new_entry) i += 1 return old_unhandled[i:], new_unhandled[i:] # group the bookmarks into groups whose elements may transform # among one another by on_bookmark_changed events. This information # is given by the type of the bookmark and the .primary property changable_groups = {} for item in self._bookmark_cache: group = changable_groups.setdefault( (type(item), item.primary), ([], []) ) group[0].append(item) for item in new_bookmarks: group = changable_groups.setdefault( (type(item), item.primary), ([], []) ) group[1].append(item) for old, new in changable_groups.values(): # the first branches are fast paths which should catch # most cases – especially all cases where each bare jid of # a conference bookmark or each url of an url bookmark is # only used in one bookmark if len(old) == len(new) == 1: old_entry = old.pop() new_entry = new.pop() if old_entry == new_entry: # the bookmark is unchanged, do not emit an event pass else: self.logger.debug("changed %s -> %s", old_entry, new_entry) self.on_bookmark_changed(old_entry, new_entry) elif len(new) == 0: for removed in old: self.logger.debug("removed %s", removed) self.on_bookmark_removed(removed) elif len(old) == 0: for added in new: self.logger.debug("added %s", added) self.on_bookmark_added(added) else: old, new = subdivide(0, old, new) assert len(old) == 0 or len(new) == 0 for removed in old: self.logger.debug("removed %s", removed) self.on_bookmark_removed(removed) for added in new: self.logger.debug("added %s", added) self.on_bookmark_added(added) self._bookmark_cache = new_bookmarks
[ "def", "_diff_emit_update", "(", "self", ",", "new_bookmarks", ")", ":", "self", ".", "logger", ".", "debug", "(", "\"diffing %s, %s\"", ",", "self", ".", "_bookmark_cache", ",", "new_bookmarks", ")", "def", "subdivide", "(", "level", ",", "old", ",", "new", ")", ":", "\"\"\"\n Subdivide the bookmarks according to the data item\n ``bookmark.secondary[level]`` and emit the appropriate\n events.\n \"\"\"", "if", "len", "(", "old", ")", "==", "len", "(", "new", ")", "==", "1", ":", "old_entry", "=", "old", ".", "pop", "(", ")", "new_entry", "=", "new", ".", "pop", "(", ")", "if", "old_entry", "==", "new_entry", ":", "pass", "else", ":", "self", ".", "on_bookmark_changed", "(", "old_entry", ",", "new_entry", ")", "return", "(", "[", "]", ",", "[", "]", ")", "elif", "len", "(", "old", ")", "==", "0", ":", "return", "(", "[", "]", ",", "new", ")", "elif", "len", "(", "new", ")", "==", "0", ":", "return", "(", "old", ",", "[", "]", ")", "else", ":", "try", ":", "groups", "=", "{", "}", "for", "entry", "in", "old", ":", "group", "=", "groups", ".", "setdefault", "(", "entry", ".", "secondary", "[", "level", "]", ",", "(", "[", "]", ",", "[", "]", ")", ")", "group", "[", "0", "]", ".", "append", "(", "entry", ")", "for", "entry", "in", "new", ":", "group", "=", "groups", ".", "setdefault", "(", "entry", ".", "secondary", "[", "level", "]", ",", "(", "[", "]", ",", "[", "]", ")", ")", "group", "[", "1", "]", ".", "append", "(", "entry", ")", "except", "IndexError", ":", "# the classification is exhausted, this means", "# all entries in this bin are equal by the", "# defininition of bookmark equivalence!", "common", "=", "min", "(", "len", "(", "old", ")", ",", "len", "(", "new", ")", ")", "assert", "old", "[", ":", "common", "]", "==", "new", "[", ":", "common", "]", "return", "(", "old", "[", "common", ":", "]", ",", "new", "[", "common", ":", "]", ")", "old_unhandled", ",", "new_unhandled", "=", "[", "]", ",", "[", "]", "for", "old", ",", "new", "in", "groups", ".", "values", "(", ")", ":", "unhandled", "=", "subdivide", "(", "level", "+", "1", ",", "old", ",", "new", ")", "old_unhandled", "+=", "unhandled", "[", "0", "]", "new_unhandled", "+=", "unhandled", "[", "1", "]", "# match up unhandleds as changes as early as possible", "i", "=", "-", "1", "for", "i", ",", "(", "old_entry", ",", "new_entry", ")", "in", "enumerate", "(", "zip", "(", "old_unhandled", ",", "new_unhandled", ")", ")", ":", "self", ".", "logger", ".", "debug", "(", "\"changed %s -> %s\"", ",", "old_entry", ",", "new_entry", ")", "self", ".", "on_bookmark_changed", "(", "old_entry", ",", "new_entry", ")", "i", "+=", "1", "return", "old_unhandled", "[", "i", ":", "]", ",", "new_unhandled", "[", "i", ":", "]", "# group the bookmarks into groups whose elements may transform", "# among one another by on_bookmark_changed events. This information", "# is given by the type of the bookmark and the .primary property", "changable_groups", "=", "{", "}", "for", "item", "in", "self", ".", "_bookmark_cache", ":", "group", "=", "changable_groups", ".", "setdefault", "(", "(", "type", "(", "item", ")", ",", "item", ".", "primary", ")", ",", "(", "[", "]", ",", "[", "]", ")", ")", "group", "[", "0", "]", ".", "append", "(", "item", ")", "for", "item", "in", "new_bookmarks", ":", "group", "=", "changable_groups", ".", "setdefault", "(", "(", "type", "(", "item", ")", ",", "item", ".", "primary", ")", ",", "(", "[", "]", ",", "[", "]", ")", ")", "group", "[", "1", "]", ".", "append", "(", "item", ")", "for", "old", ",", "new", "in", "changable_groups", ".", "values", "(", ")", ":", "# the first branches are fast paths which should catch", "# most cases – especially all cases where each bare jid of", "# a conference bookmark or each url of an url bookmark is", "# only used in one bookmark", "if", "len", "(", "old", ")", "==", "len", "(", "new", ")", "==", "1", ":", "old_entry", "=", "old", ".", "pop", "(", ")", "new_entry", "=", "new", ".", "pop", "(", ")", "if", "old_entry", "==", "new_entry", ":", "# the bookmark is unchanged, do not emit an event", "pass", "else", ":", "self", ".", "logger", ".", "debug", "(", "\"changed %s -> %s\"", ",", "old_entry", ",", "new_entry", ")", "self", ".", "on_bookmark_changed", "(", "old_entry", ",", "new_entry", ")", "elif", "len", "(", "new", ")", "==", "0", ":", "for", "removed", "in", "old", ":", "self", ".", "logger", ".", "debug", "(", "\"removed %s\"", ",", "removed", ")", "self", ".", "on_bookmark_removed", "(", "removed", ")", "elif", "len", "(", "old", ")", "==", "0", ":", "for", "added", "in", "new", ":", "self", ".", "logger", ".", "debug", "(", "\"added %s\"", ",", "added", ")", "self", ".", "on_bookmark_added", "(", "added", ")", "else", ":", "old", ",", "new", "=", "subdivide", "(", "0", ",", "old", ",", "new", ")", "assert", "len", "(", "old", ")", "==", "0", "or", "len", "(", "new", ")", "==", "0", "for", "removed", "in", "old", ":", "self", ".", "logger", ".", "debug", "(", "\"removed %s\"", ",", "removed", ")", "self", ".", "on_bookmark_removed", "(", "removed", ")", "for", "added", "in", "new", ":", "self", ".", "logger", ".", "debug", "(", "\"added %s\"", ",", "added", ")", "self", ".", "on_bookmark_added", "(", "added", ")", "self", ".", "_bookmark_cache", "=", "new_bookmarks" ]
Diff the bookmark cache and the new bookmark state, emit signals as needed and set the bookmark cache to the new data.
[ "Diff", "the", "bookmark", "cache", "and", "the", "new", "bookmark", "state", "emit", "signals", "as", "needed", "and", "set", "the", "bookmark", "cache", "to", "the", "new", "data", "." ]
python
train
37.824
jimzhan/pyx
rex/core/cache.py
https://github.com/jimzhan/pyx/blob/819e8251323a7923e196c0c438aa8524f5aaee6e/rex/core/cache.py#L16-L40
def memorize(func): """ Simply memorize the calculated result :data:`func`. previously returned. Simply cached all calculated results from the decorated method/function into a global `dict`. """ @wraps(func) def wrapped_func(*args, **kwargs): if (len(args) > 0 and len(kwargs) > 0): cacheKey = list(args) cacheKey.append(kwargs) elif (len(args) > 0): cacheKey = args else: cacheKey = func.__name__ global __cache__ result = __cache__.get(cacheKey) if result is None: result = func(*args, **kwargs) __cache__[cacheKey] = result return result return wrapped_func
[ "def", "memorize", "(", "func", ")", ":", "@", "wraps", "(", "func", ")", "def", "wrapped_func", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "(", "len", "(", "args", ")", ">", "0", "and", "len", "(", "kwargs", ")", ">", "0", ")", ":", "cacheKey", "=", "list", "(", "args", ")", "cacheKey", ".", "append", "(", "kwargs", ")", "elif", "(", "len", "(", "args", ")", ">", "0", ")", ":", "cacheKey", "=", "args", "else", ":", "cacheKey", "=", "func", ".", "__name__", "global", "__cache__", "result", "=", "__cache__", ".", "get", "(", "cacheKey", ")", "if", "result", "is", "None", ":", "result", "=", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "__cache__", "[", "cacheKey", "]", "=", "result", "return", "result", "return", "wrapped_func" ]
Simply memorize the calculated result :data:`func`. previously returned. Simply cached all calculated results from the decorated method/function into a global `dict`.
[ "Simply", "memorize", "the", "calculated", "result", ":", "data", ":", "func", ".", "previously", "returned", "." ]
python
train
28.52
horejsek/python-fastjsonschema
fastjsonschema/ref_resolver.py
https://github.com/horejsek/python-fastjsonschema/blob/8c38d0f91fa5d928ff629080cdb75ab23f96590f/fastjsonschema/ref_resolver.py#L20-L36
def resolve_path(schema, fragment): """ Return definition from path. Path is unescaped according https://tools.ietf.org/html/rfc6901 """ fragment = fragment.lstrip('/') parts = unquote(fragment).split('/') if fragment else [] for part in parts: part = part.replace('~1', '/').replace('~0', '~') if isinstance(schema, list): schema = schema[int(part)] elif part in schema: schema = schema[part] else: raise JsonSchemaException('Unresolvable ref: {}'.format(part)) return schema
[ "def", "resolve_path", "(", "schema", ",", "fragment", ")", ":", "fragment", "=", "fragment", ".", "lstrip", "(", "'/'", ")", "parts", "=", "unquote", "(", "fragment", ")", ".", "split", "(", "'/'", ")", "if", "fragment", "else", "[", "]", "for", "part", "in", "parts", ":", "part", "=", "part", ".", "replace", "(", "'~1'", ",", "'/'", ")", ".", "replace", "(", "'~0'", ",", "'~'", ")", "if", "isinstance", "(", "schema", ",", "list", ")", ":", "schema", "=", "schema", "[", "int", "(", "part", ")", "]", "elif", "part", "in", "schema", ":", "schema", "=", "schema", "[", "part", "]", "else", ":", "raise", "JsonSchemaException", "(", "'Unresolvable ref: {}'", ".", "format", "(", "part", ")", ")", "return", "schema" ]
Return definition from path. Path is unescaped according https://tools.ietf.org/html/rfc6901
[ "Return", "definition", "from", "path", ".", "Path", "is", "unescaped", "according", "https", ":", "//", "tools", ".", "ietf", ".", "org", "/", "html", "/", "rfc6901" ]
python
train
33.941176
mitsei/dlkit
dlkit/json_/grading/sessions.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/grading/sessions.py#L2709-L2734
def delete_grade_entry(self, grade_entry_id): """Deletes the ``GradeEntry`` identified by the given ``Id``. arg: grade_entry_id (osid.id.Id): the ``Id`` of the ``GradeEntry`` to delete raise: NotFound - a ``GradeEntry`` was not found identified by the given ``Id`` raise: NullArgument - ``grade_entry_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.ResourceAdminSession.delete_resource_template collection = JSONClientValidated('grading', collection='GradeEntry', runtime=self._runtime) if not isinstance(grade_entry_id, ABCId): raise errors.InvalidArgument('the argument is not a valid OSID Id') grade_entry_map = collection.find_one( dict({'_id': ObjectId(grade_entry_id.get_identifier())}, **self._view_filter())) objects.GradeEntry(osid_object_map=grade_entry_map, runtime=self._runtime, proxy=self._proxy)._delete() collection.delete_one({'_id': ObjectId(grade_entry_id.get_identifier())})
[ "def", "delete_grade_entry", "(", "self", ",", "grade_entry_id", ")", ":", "# Implemented from template for", "# osid.resource.ResourceAdminSession.delete_resource_template", "collection", "=", "JSONClientValidated", "(", "'grading'", ",", "collection", "=", "'GradeEntry'", ",", "runtime", "=", "self", ".", "_runtime", ")", "if", "not", "isinstance", "(", "grade_entry_id", ",", "ABCId", ")", ":", "raise", "errors", ".", "InvalidArgument", "(", "'the argument is not a valid OSID Id'", ")", "grade_entry_map", "=", "collection", ".", "find_one", "(", "dict", "(", "{", "'_id'", ":", "ObjectId", "(", "grade_entry_id", ".", "get_identifier", "(", ")", ")", "}", ",", "*", "*", "self", ".", "_view_filter", "(", ")", ")", ")", "objects", ".", "GradeEntry", "(", "osid_object_map", "=", "grade_entry_map", ",", "runtime", "=", "self", ".", "_runtime", ",", "proxy", "=", "self", ".", "_proxy", ")", ".", "_delete", "(", ")", "collection", ".", "delete_one", "(", "{", "'_id'", ":", "ObjectId", "(", "grade_entry_id", ".", "get_identifier", "(", ")", ")", "}", ")" ]
Deletes the ``GradeEntry`` identified by the given ``Id``. arg: grade_entry_id (osid.id.Id): the ``Id`` of the ``GradeEntry`` to delete raise: NotFound - a ``GradeEntry`` was not found identified by the given ``Id`` raise: NullArgument - ``grade_entry_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
[ "Deletes", "the", "GradeEntry", "identified", "by", "the", "given", "Id", "." ]
python
train
51.346154
cloud9ers/gurumate
environment/lib/python2.7/site-packages/MySQL_python-1.2.4c1-py2.7-linux-x86_64.egg/MySQLdb/cursors.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/MySQL_python-1.2.4c1-py2.7-linux-x86_64.egg/MySQLdb/cursors.py#L427-L435
def fetchmany(self, size=None): """Fetch up to size rows from the cursor. Result set may be smaller than size. If size is not defined, cursor.arraysize is used.""" self._check_executed() r = self._fetch_row(size or self.arraysize) self.rownumber = self.rownumber + len(r) if not r: self._warning_check() return r
[ "def", "fetchmany", "(", "self", ",", "size", "=", "None", ")", ":", "self", ".", "_check_executed", "(", ")", "r", "=", "self", ".", "_fetch_row", "(", "size", "or", "self", ".", "arraysize", ")", "self", ".", "rownumber", "=", "self", ".", "rownumber", "+", "len", "(", "r", ")", "if", "not", "r", ":", "self", ".", "_warning_check", "(", ")", "return", "r" ]
Fetch up to size rows from the cursor. Result set may be smaller than size. If size is not defined, cursor.arraysize is used.
[ "Fetch", "up", "to", "size", "rows", "from", "the", "cursor", ".", "Result", "set", "may", "be", "smaller", "than", "size", ".", "If", "size", "is", "not", "defined", "cursor", ".", "arraysize", "is", "used", "." ]
python
test
41.333333
blockadeio/analyst_toolbench
blockade/common/utils.py
https://github.com/blockadeio/analyst_toolbench/blob/159b6f8cf8a91c5ff050f1579636ea90ab269863/blockade/common/utils.py#L4-L17
def clean_indicators(indicators): """Remove any extra details from indicators.""" output = list() for indicator in indicators: strip = ['http://', 'https://'] for item in strip: indicator = indicator.replace(item, '') indicator = indicator.strip('.').strip() parts = indicator.split('/') if len(parts) > 0: indicator = parts.pop(0) output.append(indicator) output = list(set(output)) return output
[ "def", "clean_indicators", "(", "indicators", ")", ":", "output", "=", "list", "(", ")", "for", "indicator", "in", "indicators", ":", "strip", "=", "[", "'http://'", ",", "'https://'", "]", "for", "item", "in", "strip", ":", "indicator", "=", "indicator", ".", "replace", "(", "item", ",", "''", ")", "indicator", "=", "indicator", ".", "strip", "(", "'.'", ")", ".", "strip", "(", ")", "parts", "=", "indicator", ".", "split", "(", "'/'", ")", "if", "len", "(", "parts", ")", ">", "0", ":", "indicator", "=", "parts", ".", "pop", "(", "0", ")", "output", ".", "append", "(", "indicator", ")", "output", "=", "list", "(", "set", "(", "output", ")", ")", "return", "output" ]
Remove any extra details from indicators.
[ "Remove", "any", "extra", "details", "from", "indicators", "." ]
python
train
34
bluedazzle/wechat_sender
wechat_sender/listener.py
https://github.com/bluedazzle/wechat_sender/blob/21d861735509153d6b34408157911c25a5d7018b/wechat_sender/listener.py#L295-L326
def listen(bot, receivers=None, token=None, port=10245, status_report=False, status_receiver=None, status_interval=DEFAULT_REPORT_TIME): """ 传入 bot 实例并启动 wechat_sender 服务 :param bot: (必填|Bot对象) - wxpy 的 Bot 对象实例 :param receivers: (选填|wxpy.Chat 对象|Chat 对象列表) - 消息接收者,wxpy 的 Chat 对象实例, 或 Chat 对象列表,如果为 list 第一个 Chat 为默认接收者。如果为 Chat 对象,则默认接收者也是此对象。 不填为当前 bot 对象的文件接收者 :param token: (选填|str) - 信令,防止 receiver 被非法滥用,建议加上 token 防止非法使用,如果使用 token 请在初始化 `Sender()` 时也使用统一 token,否则无法发送。token 建议为 32 位及以上的无规律字符串 :param port: (选填|int) - 监听端口, 监听端口默认为 10245 ,如有冲突或特殊需要请自行指定,需要和 `Sender()` 统一 :param status_report: (选填|bool) - 是否开启状态报告,如果开启,wechat_sender 将会定时发送状态信息到 status_receiver :param status_receiver: (选填|Chat 对象) - 指定 status_receiver,不填将会发送状态消息给默认接收者 :param status_interval: (选填|int|datetime.timedelta) - 指定状态报告发送间隔时间,为 integer 时代表毫秒 """ global glb periodic_list = [] app = Application() wxbot = WxBot(bot, receivers, status_receiver) register_listener_handle(wxbot) process = psutil.Process() app.listen(port) if status_report: if isinstance(status_interval, datetime.timedelta): status_interval = status_interval.seconds * 1000 check_periodic = tornado.ioloop.PeriodicCallback(functools.partial(check_bot, SYSTEM_TASK), status_interval) check_periodic.start() periodic_list.append(check_periodic) glb = Global(wxbot=wxbot, run_info=process, periodic_list=periodic_list, ioloop=tornado.ioloop.IOLoop.instance(), token=token) tornado.ioloop.IOLoop.current().start()
[ "def", "listen", "(", "bot", ",", "receivers", "=", "None", ",", "token", "=", "None", ",", "port", "=", "10245", ",", "status_report", "=", "False", ",", "status_receiver", "=", "None", ",", "status_interval", "=", "DEFAULT_REPORT_TIME", ")", ":", "global", "glb", "periodic_list", "=", "[", "]", "app", "=", "Application", "(", ")", "wxbot", "=", "WxBot", "(", "bot", ",", "receivers", ",", "status_receiver", ")", "register_listener_handle", "(", "wxbot", ")", "process", "=", "psutil", ".", "Process", "(", ")", "app", ".", "listen", "(", "port", ")", "if", "status_report", ":", "if", "isinstance", "(", "status_interval", ",", "datetime", ".", "timedelta", ")", ":", "status_interval", "=", "status_interval", ".", "seconds", "*", "1000", "check_periodic", "=", "tornado", ".", "ioloop", ".", "PeriodicCallback", "(", "functools", ".", "partial", "(", "check_bot", ",", "SYSTEM_TASK", ")", ",", "status_interval", ")", "check_periodic", ".", "start", "(", ")", "periodic_list", ".", "append", "(", "check_periodic", ")", "glb", "=", "Global", "(", "wxbot", "=", "wxbot", ",", "run_info", "=", "process", ",", "periodic_list", "=", "periodic_list", ",", "ioloop", "=", "tornado", ".", "ioloop", ".", "IOLoop", ".", "instance", "(", ")", ",", "token", "=", "token", ")", "tornado", ".", "ioloop", ".", "IOLoop", ".", "current", "(", ")", ".", "start", "(", ")" ]
传入 bot 实例并启动 wechat_sender 服务 :param bot: (必填|Bot对象) - wxpy 的 Bot 对象实例 :param receivers: (选填|wxpy.Chat 对象|Chat 对象列表) - 消息接收者,wxpy 的 Chat 对象实例, 或 Chat 对象列表,如果为 list 第一个 Chat 为默认接收者。如果为 Chat 对象,则默认接收者也是此对象。 不填为当前 bot 对象的文件接收者 :param token: (选填|str) - 信令,防止 receiver 被非法滥用,建议加上 token 防止非法使用,如果使用 token 请在初始化 `Sender()` 时也使用统一 token,否则无法发送。token 建议为 32 位及以上的无规律字符串 :param port: (选填|int) - 监听端口, 监听端口默认为 10245 ,如有冲突或特殊需要请自行指定,需要和 `Sender()` 统一 :param status_report: (选填|bool) - 是否开启状态报告,如果开启,wechat_sender 将会定时发送状态信息到 status_receiver :param status_receiver: (选填|Chat 对象) - 指定 status_receiver,不填将会发送状态消息给默认接收者 :param status_interval: (选填|int|datetime.timedelta) - 指定状态报告发送间隔时间,为 integer 时代表毫秒
[ "传入", "bot", "实例并启动", "wechat_sender", "服务" ]
python
train
49.5
lionheart/django-pyodbc
django_pyodbc/operations.py
https://github.com/lionheart/django-pyodbc/blob/46adda7b0bfabfa2640f72592c6f6f407f78b363/django_pyodbc/operations.py#L485-L524
def convert_values(self, value, field): """ Coerce the value returned by the database backend into a consistent type that is compatible with the field type. In our case, cater for the fact that SQL Server < 2008 has no separate Date and Time data types. TODO: See how we'll handle this for SQL Server >= 2008 """ if value is None: return None if field and field.get_internal_type() == 'DateTimeField': if isinstance(value, string_types) and value: value = parse_datetime(value) return value elif field and field.get_internal_type() == 'DateField': if isinstance(value, datetime.datetime): value = value.date() # extract date elif isinstance(value, string_types): value = parse_date(value) elif field and field.get_internal_type() == 'TimeField': if (isinstance(value, datetime.datetime) and value.year == 1900 and value.month == value.day == 1): value = value.time() # extract time elif isinstance(value, string_types): # If the value is a string, parse it using parse_time. value = parse_time(value) # Some cases (for example when select_related() is used) aren't # caught by the DateField case above and date fields arrive from # the DB as datetime instances. # Implement a workaround stealing the idea from the Oracle # backend. It's not perfect so the same warning applies (i.e. if a # query results in valid date+time values with the time part set # to midnight, this workaround can surprise us by converting them # to the datetime.date Python type). elif isinstance(value, datetime.datetime) and value.hour == value.minute == value.second == value.microsecond == 0: value = value.date() # Force floats to the correct type elif value is not None and field and field.get_internal_type() == 'FloatField': value = float(value) return value
[ "def", "convert_values", "(", "self", ",", "value", ",", "field", ")", ":", "if", "value", "is", "None", ":", "return", "None", "if", "field", "and", "field", ".", "get_internal_type", "(", ")", "==", "'DateTimeField'", ":", "if", "isinstance", "(", "value", ",", "string_types", ")", "and", "value", ":", "value", "=", "parse_datetime", "(", "value", ")", "return", "value", "elif", "field", "and", "field", ".", "get_internal_type", "(", ")", "==", "'DateField'", ":", "if", "isinstance", "(", "value", ",", "datetime", ".", "datetime", ")", ":", "value", "=", "value", ".", "date", "(", ")", "# extract date", "elif", "isinstance", "(", "value", ",", "string_types", ")", ":", "value", "=", "parse_date", "(", "value", ")", "elif", "field", "and", "field", ".", "get_internal_type", "(", ")", "==", "'TimeField'", ":", "if", "(", "isinstance", "(", "value", ",", "datetime", ".", "datetime", ")", "and", "value", ".", "year", "==", "1900", "and", "value", ".", "month", "==", "value", ".", "day", "==", "1", ")", ":", "value", "=", "value", ".", "time", "(", ")", "# extract time", "elif", "isinstance", "(", "value", ",", "string_types", ")", ":", "# If the value is a string, parse it using parse_time.", "value", "=", "parse_time", "(", "value", ")", "# Some cases (for example when select_related() is used) aren't", "# caught by the DateField case above and date fields arrive from", "# the DB as datetime instances.", "# Implement a workaround stealing the idea from the Oracle", "# backend. It's not perfect so the same warning applies (i.e. if a", "# query results in valid date+time values with the time part set", "# to midnight, this workaround can surprise us by converting them", "# to the datetime.date Python type).", "elif", "isinstance", "(", "value", ",", "datetime", ".", "datetime", ")", "and", "value", ".", "hour", "==", "value", ".", "minute", "==", "value", ".", "second", "==", "value", ".", "microsecond", "==", "0", ":", "value", "=", "value", ".", "date", "(", ")", "# Force floats to the correct type", "elif", "value", "is", "not", "None", "and", "field", "and", "field", ".", "get_internal_type", "(", ")", "==", "'FloatField'", ":", "value", "=", "float", "(", "value", ")", "return", "value" ]
Coerce the value returned by the database backend into a consistent type that is compatible with the field type. In our case, cater for the fact that SQL Server < 2008 has no separate Date and Time data types. TODO: See how we'll handle this for SQL Server >= 2008
[ "Coerce", "the", "value", "returned", "by", "the", "database", "backend", "into", "a", "consistent", "type", "that", "is", "compatible", "with", "the", "field", "type", "." ]
python
train
52.275
loganasherjones/yapconf
yapconf/items.py
https://github.com/loganasherjones/yapconf/blob/d2970e6e7e3334615d4d978d8b0ca33006d79d16/yapconf/items.py#L294-L342
def get_config_value(self, overrides, skip_environment=False): """Get the configuration value from all overrides. Iterates over all overrides given to see if a value can be pulled out from them. It will convert each of these values to ensure they are the correct type. Args: overrides: A list of tuples where each tuple is a label and a dictionary representing a configuration. skip_environment: Skip looking through the environment. Returns: The converted configuration value. Raises: YapconfItemNotFound: If an item is required but could not be found in the configuration. YapconfItemError: If a possible value was found but the type cannot be determined. YapconfValueError: If a possible value is found but during conversion, an exception was raised. """ label, override, key = self._search_overrides( overrides, skip_environment ) if override is None and self.default is None and self.required: raise YapconfItemNotFound( 'Could not find config value for {0}'.format(self.fq_name), self ) if override is None: self.logger.debug( 'Config value not found for {0}, falling back to default.' .format(self.name) ) value = self.default else: value = override[key] if value is None: return value converted_value = self.convert_config_value(value, label) self._validate_value(converted_value) return converted_value
[ "def", "get_config_value", "(", "self", ",", "overrides", ",", "skip_environment", "=", "False", ")", ":", "label", ",", "override", ",", "key", "=", "self", ".", "_search_overrides", "(", "overrides", ",", "skip_environment", ")", "if", "override", "is", "None", "and", "self", ".", "default", "is", "None", "and", "self", ".", "required", ":", "raise", "YapconfItemNotFound", "(", "'Could not find config value for {0}'", ".", "format", "(", "self", ".", "fq_name", ")", ",", "self", ")", "if", "override", "is", "None", ":", "self", ".", "logger", ".", "debug", "(", "'Config value not found for {0}, falling back to default.'", ".", "format", "(", "self", ".", "name", ")", ")", "value", "=", "self", ".", "default", "else", ":", "value", "=", "override", "[", "key", "]", "if", "value", "is", "None", ":", "return", "value", "converted_value", "=", "self", ".", "convert_config_value", "(", "value", ",", "label", ")", "self", ".", "_validate_value", "(", "converted_value", ")", "return", "converted_value" ]
Get the configuration value from all overrides. Iterates over all overrides given to see if a value can be pulled out from them. It will convert each of these values to ensure they are the correct type. Args: overrides: A list of tuples where each tuple is a label and a dictionary representing a configuration. skip_environment: Skip looking through the environment. Returns: The converted configuration value. Raises: YapconfItemNotFound: If an item is required but could not be found in the configuration. YapconfItemError: If a possible value was found but the type cannot be determined. YapconfValueError: If a possible value is found but during conversion, an exception was raised.
[ "Get", "the", "configuration", "value", "from", "all", "overrides", "." ]
python
train
34.77551
bxlab/bx-python
lib/bx/bitset_builders.py
https://github.com/bxlab/bx-python/blob/09cb725284803df90a468d910f2274628d8647de/lib/bx/bitset_builders.py#L146-L155
def binned_bitsets_by_chrom( f, chrom, chrom_col=0, start_col=1, end_col=2): """Read a file by chrom name into a bitset""" bitset = BinnedBitSet( MAX ) for line in f: if line.startswith("#"): continue fields = line.split() if fields[chrom_col] == chrom: start, end = int( fields[start_col] ), int( fields[end_col] ) bitset.set_range( start, end-start ) return bitset
[ "def", "binned_bitsets_by_chrom", "(", "f", ",", "chrom", ",", "chrom_col", "=", "0", ",", "start_col", "=", "1", ",", "end_col", "=", "2", ")", ":", "bitset", "=", "BinnedBitSet", "(", "MAX", ")", "for", "line", "in", "f", ":", "if", "line", ".", "startswith", "(", "\"#\"", ")", ":", "continue", "fields", "=", "line", ".", "split", "(", ")", "if", "fields", "[", "chrom_col", "]", "==", "chrom", ":", "start", ",", "end", "=", "int", "(", "fields", "[", "start_col", "]", ")", ",", "int", "(", "fields", "[", "end_col", "]", ")", "bitset", ".", "set_range", "(", "start", ",", "end", "-", "start", ")", "return", "bitset" ]
Read a file by chrom name into a bitset
[ "Read", "a", "file", "by", "chrom", "name", "into", "a", "bitset" ]
python
train
42.1
CalebBell/thermo
thermo/identifiers.py
https://github.com/CalebBell/thermo/blob/3857ed023a3e64fd3039a32d53576c24990ef1c3/thermo/identifiers.py#L359-L528
def CAS_from_any(ID, autoload=False): '''Looks up the CAS number of a chemical by searching and testing for the string being any of the following types of chemical identifiers: * Name, in IUPAC form or common form or a synonym registered in PubChem * InChI name, prefixed by 'InChI=1S/' or 'InChI=1/' * InChI key, prefixed by 'InChIKey=' * PubChem CID, prefixed by 'PubChem=' * SMILES (prefix with 'SMILES=' to ensure smiles parsing; ex. 'C' will return Carbon as it is an element whereas the SMILES interpretation for 'C' is methane) * CAS number (obsolete numbers may point to the current number) If the input is an ID representing an element, the following additional inputs may be specified as * Atomic symbol (ex 'Na') * Atomic number (as a string) Parameters ---------- ID : str One of the name formats described above Returns ------- CASRN : string A three-piece, dash-separated set of numbers Notes ----- An exception is raised if the name cannot be identified. The PubChem database includes a wide variety of other synonyms, but these may not be present for all chemcials. Examples -------- >>> CAS_from_any('water') '7732-18-5' >>> CAS_from_any('InChI=1S/C2H6O/c1-2-3/h3H,2H2,1H3') '64-17-5' >>> CAS_from_any('CCCCCCCCCC') '124-18-5' >>> CAS_from_any('InChIKey=LFQSCWFLJHTTHZ-UHFFFAOYSA-N') '64-17-5' >>> CAS_from_any('pubchem=702') '64-17-5' >>> CAS_from_any('O') # only elements can be specified by symbol '17778-80-2' ''' ID = ID.strip() ID_lower = ID.lower() if ID in periodic_table: if periodic_table[ID].number not in homonuclear_elemental_gases: return periodic_table[ID].CAS else: for i in [periodic_table.symbol_to_elements, periodic_table.number_to_elements, periodic_table.CAS_to_elements]: if i == periodic_table.number_to_elements: if int(ID in i): return periodic_table[int(ID)].CAS else: if ID in i: return periodic_table[ID].CAS if checkCAS(ID): CAS_lookup = pubchem_db.search_CAS(ID, autoload) if CAS_lookup: return CAS_lookup.CASs # handle the case of synonyms CAS_alternate_loopup = pubchem_db.search_name(ID, autoload) if CAS_alternate_loopup: return CAS_alternate_loopup.CASs if not autoload: return CAS_from_any(ID, autoload=True) raise Exception('A valid CAS number was recognized, but is not in the database') ID_len = len(ID) if ID_len > 9: inchi_search = False # normal upper case is 'InChI=1S/' if ID_lower[0:9] == 'inchi=1s/': inchi_search = ID[9:] elif ID_lower[0:8] == 'inchi=1/': inchi_search = ID[8:] if inchi_search: inchi_lookup = pubchem_db.search_InChI(inchi_search, autoload) if inchi_lookup: return inchi_lookup.CASs else: if not autoload: return CAS_from_any(ID, autoload=True) raise Exception('A valid InChI name was recognized, but it is not in the database') if ID_lower[0:9] == 'inchikey=': inchi_key_lookup = pubchem_db.search_InChI_key(ID[9:], autoload) if inchi_key_lookup: return inchi_key_lookup.CASs else: if not autoload: return CAS_from_any(ID, autoload=True) raise Exception('A valid InChI Key was recognized, but it is not in the database') if ID_len > 8: if ID_lower[0:8] == 'pubchem=': pubchem_lookup = pubchem_db.search_pubchem(ID[8:], autoload) if pubchem_lookup: return pubchem_lookup.CASs else: if not autoload: return CAS_from_any(ID, autoload=True) raise Exception('A PubChem integer identifier was recognized, but it is not in the database.') if ID_len > 7: if ID_lower[0:7] == 'smiles=': smiles_lookup = pubchem_db.search_smiles(ID[7:], autoload) if smiles_lookup: return smiles_lookup.CASs else: if not autoload: return CAS_from_any(ID, autoload=True) raise Exception('A SMILES identifier was recognized, but it is not in the database.') # Try the smiles lookup anyway # Parsing SMILES is an option, but this is faster # Pybel API also prints messages to console on failure smiles_lookup = pubchem_db.search_smiles(ID, autoload) if smiles_lookup: return smiles_lookup.CASs try: formula_query = pubchem_db.search_formula(serialize_formula(ID), autoload) if formula_query and type(formula_query) == ChemicalMetadata: return formula_query.CASs except: pass # Try a direct lookup with the name - the fastest name_lookup = pubchem_db.search_name(ID, autoload) if name_lookup: return name_lookup.CASs # Permutate through various name options ID_no_space = ID.replace(' ', '') ID_no_space_dash = ID_no_space.replace('-', '') for name in [ID, ID_no_space, ID_no_space_dash]: for name2 in [name, name.lower()]: name_lookup = pubchem_db.search_name(name2, autoload) if name_lookup: return name_lookup.CASs if ID[-1] == ')' and '(' in ID:# # Try to matck in the form 'water (H2O)' first_identifier, second_identifier = ID[0:-1].split('(', 1) try: CAS1 = CAS_from_any(first_identifier) CAS2 = CAS_from_any(second_identifier) assert CAS1 == CAS2 return CAS1 except: pass if not autoload: return CAS_from_any(ID, autoload=True) raise Exception('Chemical name not recognized')
[ "def", "CAS_from_any", "(", "ID", ",", "autoload", "=", "False", ")", ":", "ID", "=", "ID", ".", "strip", "(", ")", "ID_lower", "=", "ID", ".", "lower", "(", ")", "if", "ID", "in", "periodic_table", ":", "if", "periodic_table", "[", "ID", "]", ".", "number", "not", "in", "homonuclear_elemental_gases", ":", "return", "periodic_table", "[", "ID", "]", ".", "CAS", "else", ":", "for", "i", "in", "[", "periodic_table", ".", "symbol_to_elements", ",", "periodic_table", ".", "number_to_elements", ",", "periodic_table", ".", "CAS_to_elements", "]", ":", "if", "i", "==", "periodic_table", ".", "number_to_elements", ":", "if", "int", "(", "ID", "in", "i", ")", ":", "return", "periodic_table", "[", "int", "(", "ID", ")", "]", ".", "CAS", "else", ":", "if", "ID", "in", "i", ":", "return", "periodic_table", "[", "ID", "]", ".", "CAS", "if", "checkCAS", "(", "ID", ")", ":", "CAS_lookup", "=", "pubchem_db", ".", "search_CAS", "(", "ID", ",", "autoload", ")", "if", "CAS_lookup", ":", "return", "CAS_lookup", ".", "CASs", "# handle the case of synonyms", "CAS_alternate_loopup", "=", "pubchem_db", ".", "search_name", "(", "ID", ",", "autoload", ")", "if", "CAS_alternate_loopup", ":", "return", "CAS_alternate_loopup", ".", "CASs", "if", "not", "autoload", ":", "return", "CAS_from_any", "(", "ID", ",", "autoload", "=", "True", ")", "raise", "Exception", "(", "'A valid CAS number was recognized, but is not in the database'", ")", "ID_len", "=", "len", "(", "ID", ")", "if", "ID_len", ">", "9", ":", "inchi_search", "=", "False", "# normal upper case is 'InChI=1S/'", "if", "ID_lower", "[", "0", ":", "9", "]", "==", "'inchi=1s/'", ":", "inchi_search", "=", "ID", "[", "9", ":", "]", "elif", "ID_lower", "[", "0", ":", "8", "]", "==", "'inchi=1/'", ":", "inchi_search", "=", "ID", "[", "8", ":", "]", "if", "inchi_search", ":", "inchi_lookup", "=", "pubchem_db", ".", "search_InChI", "(", "inchi_search", ",", "autoload", ")", "if", "inchi_lookup", ":", "return", "inchi_lookup", ".", "CASs", "else", ":", "if", "not", "autoload", ":", "return", "CAS_from_any", "(", "ID", ",", "autoload", "=", "True", ")", "raise", "Exception", "(", "'A valid InChI name was recognized, but it is not in the database'", ")", "if", "ID_lower", "[", "0", ":", "9", "]", "==", "'inchikey='", ":", "inchi_key_lookup", "=", "pubchem_db", ".", "search_InChI_key", "(", "ID", "[", "9", ":", "]", ",", "autoload", ")", "if", "inchi_key_lookup", ":", "return", "inchi_key_lookup", ".", "CASs", "else", ":", "if", "not", "autoload", ":", "return", "CAS_from_any", "(", "ID", ",", "autoload", "=", "True", ")", "raise", "Exception", "(", "'A valid InChI Key was recognized, but it is not in the database'", ")", "if", "ID_len", ">", "8", ":", "if", "ID_lower", "[", "0", ":", "8", "]", "==", "'pubchem='", ":", "pubchem_lookup", "=", "pubchem_db", ".", "search_pubchem", "(", "ID", "[", "8", ":", "]", ",", "autoload", ")", "if", "pubchem_lookup", ":", "return", "pubchem_lookup", ".", "CASs", "else", ":", "if", "not", "autoload", ":", "return", "CAS_from_any", "(", "ID", ",", "autoload", "=", "True", ")", "raise", "Exception", "(", "'A PubChem integer identifier was recognized, but it is not in the database.'", ")", "if", "ID_len", ">", "7", ":", "if", "ID_lower", "[", "0", ":", "7", "]", "==", "'smiles='", ":", "smiles_lookup", "=", "pubchem_db", ".", "search_smiles", "(", "ID", "[", "7", ":", "]", ",", "autoload", ")", "if", "smiles_lookup", ":", "return", "smiles_lookup", ".", "CASs", "else", ":", "if", "not", "autoload", ":", "return", "CAS_from_any", "(", "ID", ",", "autoload", "=", "True", ")", "raise", "Exception", "(", "'A SMILES identifier was recognized, but it is not in the database.'", ")", "# Try the smiles lookup anyway", "# Parsing SMILES is an option, but this is faster", "# Pybel API also prints messages to console on failure", "smiles_lookup", "=", "pubchem_db", ".", "search_smiles", "(", "ID", ",", "autoload", ")", "if", "smiles_lookup", ":", "return", "smiles_lookup", ".", "CASs", "try", ":", "formula_query", "=", "pubchem_db", ".", "search_formula", "(", "serialize_formula", "(", "ID", ")", ",", "autoload", ")", "if", "formula_query", "and", "type", "(", "formula_query", ")", "==", "ChemicalMetadata", ":", "return", "formula_query", ".", "CASs", "except", ":", "pass", "# Try a direct lookup with the name - the fastest", "name_lookup", "=", "pubchem_db", ".", "search_name", "(", "ID", ",", "autoload", ")", "if", "name_lookup", ":", "return", "name_lookup", ".", "CASs", "# Permutate through various name options", "ID_no_space", "=", "ID", ".", "replace", "(", "' '", ",", "''", ")", "ID_no_space_dash", "=", "ID_no_space", ".", "replace", "(", "'-'", ",", "''", ")", "for", "name", "in", "[", "ID", ",", "ID_no_space", ",", "ID_no_space_dash", "]", ":", "for", "name2", "in", "[", "name", ",", "name", ".", "lower", "(", ")", "]", ":", "name_lookup", "=", "pubchem_db", ".", "search_name", "(", "name2", ",", "autoload", ")", "if", "name_lookup", ":", "return", "name_lookup", ".", "CASs", "if", "ID", "[", "-", "1", "]", "==", "')'", "and", "'('", "in", "ID", ":", "#", "# Try to matck in the form 'water (H2O)'", "first_identifier", ",", "second_identifier", "=", "ID", "[", "0", ":", "-", "1", "]", ".", "split", "(", "'('", ",", "1", ")", "try", ":", "CAS1", "=", "CAS_from_any", "(", "first_identifier", ")", "CAS2", "=", "CAS_from_any", "(", "second_identifier", ")", "assert", "CAS1", "==", "CAS2", "return", "CAS1", "except", ":", "pass", "if", "not", "autoload", ":", "return", "CAS_from_any", "(", "ID", ",", "autoload", "=", "True", ")", "raise", "Exception", "(", "'Chemical name not recognized'", ")" ]
Looks up the CAS number of a chemical by searching and testing for the string being any of the following types of chemical identifiers: * Name, in IUPAC form or common form or a synonym registered in PubChem * InChI name, prefixed by 'InChI=1S/' or 'InChI=1/' * InChI key, prefixed by 'InChIKey=' * PubChem CID, prefixed by 'PubChem=' * SMILES (prefix with 'SMILES=' to ensure smiles parsing; ex. 'C' will return Carbon as it is an element whereas the SMILES interpretation for 'C' is methane) * CAS number (obsolete numbers may point to the current number) If the input is an ID representing an element, the following additional inputs may be specified as * Atomic symbol (ex 'Na') * Atomic number (as a string) Parameters ---------- ID : str One of the name formats described above Returns ------- CASRN : string A three-piece, dash-separated set of numbers Notes ----- An exception is raised if the name cannot be identified. The PubChem database includes a wide variety of other synonyms, but these may not be present for all chemcials. Examples -------- >>> CAS_from_any('water') '7732-18-5' >>> CAS_from_any('InChI=1S/C2H6O/c1-2-3/h3H,2H2,1H3') '64-17-5' >>> CAS_from_any('CCCCCCCCCC') '124-18-5' >>> CAS_from_any('InChIKey=LFQSCWFLJHTTHZ-UHFFFAOYSA-N') '64-17-5' >>> CAS_from_any('pubchem=702') '64-17-5' >>> CAS_from_any('O') # only elements can be specified by symbol '17778-80-2'
[ "Looks", "up", "the", "CAS", "number", "of", "a", "chemical", "by", "searching", "and", "testing", "for", "the", "string", "being", "any", "of", "the", "following", "types", "of", "chemical", "identifiers", ":", "*", "Name", "in", "IUPAC", "form", "or", "common", "form", "or", "a", "synonym", "registered", "in", "PubChem", "*", "InChI", "name", "prefixed", "by", "InChI", "=", "1S", "/", "or", "InChI", "=", "1", "/", "*", "InChI", "key", "prefixed", "by", "InChIKey", "=", "*", "PubChem", "CID", "prefixed", "by", "PubChem", "=", "*", "SMILES", "(", "prefix", "with", "SMILES", "=", "to", "ensure", "smiles", "parsing", ";", "ex", ".", "C", "will", "return", "Carbon", "as", "it", "is", "an", "element", "whereas", "the", "SMILES", "interpretation", "for", "C", "is", "methane", ")", "*", "CAS", "number", "(", "obsolete", "numbers", "may", "point", "to", "the", "current", "number", ")" ]
python
valid
35.770588
fusionbox/django-ogmios
ogmios/__init__.py
https://github.com/fusionbox/django-ogmios/blob/65b818d6059acd90aee5e874f31be8bee7d36ca6/ogmios/__init__.py#L92-L101
def get_recipients(self, name): """ For example get_recipients('to') """ to_str = self.render_string(self.data[name]) formatted_emails = [ email.utils.formataddr(addr_pair) for addr_pair in email.utils.getaddresses([to_str]) ] return [i for i in formatted_emails if i]
[ "def", "get_recipients", "(", "self", ",", "name", ")", ":", "to_str", "=", "self", ".", "render_string", "(", "self", ".", "data", "[", "name", "]", ")", "formatted_emails", "=", "[", "email", ".", "utils", ".", "formataddr", "(", "addr_pair", ")", "for", "addr_pair", "in", "email", ".", "utils", ".", "getaddresses", "(", "[", "to_str", "]", ")", "]", "return", "[", "i", "for", "i", "in", "formatted_emails", "if", "i", "]" ]
For example get_recipients('to')
[ "For", "example", "get_recipients", "(", "to", ")" ]
python
train
33.9
openstack/networking-cisco
networking_cisco/apps/saf/server/dfa_openstack_helper.py
https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/dfa_openstack_helper.py#L184-L198
def get_network_by_name(self, nwk_name): """Search for a openstack network by name. """ ret_net_lst = [] try: body = {} net_list = self.neutronclient.list_networks(body=body) net_list = net_list.get('networks') for net in net_list: if net.get('name') == nwk_name: ret_net_lst.append(net) except Exception as exc: LOG.error("Failed to get network by name %(name)s, " "Exc %(exc)s", {'name': nwk_name, 'exc': str(exc)}) return ret_net_lst
[ "def", "get_network_by_name", "(", "self", ",", "nwk_name", ")", ":", "ret_net_lst", "=", "[", "]", "try", ":", "body", "=", "{", "}", "net_list", "=", "self", ".", "neutronclient", ".", "list_networks", "(", "body", "=", "body", ")", "net_list", "=", "net_list", ".", "get", "(", "'networks'", ")", "for", "net", "in", "net_list", ":", "if", "net", ".", "get", "(", "'name'", ")", "==", "nwk_name", ":", "ret_net_lst", ".", "append", "(", "net", ")", "except", "Exception", "as", "exc", ":", "LOG", ".", "error", "(", "\"Failed to get network by name %(name)s, \"", "\"Exc %(exc)s\"", ",", "{", "'name'", ":", "nwk_name", ",", "'exc'", ":", "str", "(", "exc", ")", "}", ")", "return", "ret_net_lst" ]
Search for a openstack network by name.
[ "Search", "for", "a", "openstack", "network", "by", "name", "." ]
python
train
40.133333
PyGithub/PyGithub
github/PullRequestComment.py
https://github.com/PyGithub/PyGithub/blob/f716df86bbe7dc276c6596699fa9712b61ef974c/github/PullRequestComment.py#L180-L195
def edit(self, body): """ :calls: `PATCH /repos/:owner/:repo/pulls/comments/:number <http://developer.github.com/v3/pulls/comments>`_ :param body: string :rtype: None """ assert isinstance(body, (str, unicode)), body post_parameters = { "body": body, } headers, data = self._requester.requestJsonAndCheck( "PATCH", self.url, input=post_parameters ) self._useAttributes(data)
[ "def", "edit", "(", "self", ",", "body", ")", ":", "assert", "isinstance", "(", "body", ",", "(", "str", ",", "unicode", ")", ")", ",", "body", "post_parameters", "=", "{", "\"body\"", ":", "body", ",", "}", "headers", ",", "data", "=", "self", ".", "_requester", ".", "requestJsonAndCheck", "(", "\"PATCH\"", ",", "self", ".", "url", ",", "input", "=", "post_parameters", ")", "self", ".", "_useAttributes", "(", "data", ")" ]
:calls: `PATCH /repos/:owner/:repo/pulls/comments/:number <http://developer.github.com/v3/pulls/comments>`_ :param body: string :rtype: None
[ ":", "calls", ":", "PATCH", "/", "repos", "/", ":", "owner", "/", ":", "repo", "/", "pulls", "/", "comments", "/", ":", "number", "<http", ":", "//", "developer", ".", "github", ".", "com", "/", "v3", "/", "pulls", "/", "comments", ">", "_", ":", "param", "body", ":", "string", ":", "rtype", ":", "None" ]
python
train
30.9375
Bystroushaak/pyDHTMLParser
src/dhtmlparser/htmlelement/shared.py
https://github.com/Bystroushaak/pyDHTMLParser/blob/4756f93dd048500b038ece2323fe26e46b6bfdea/src/dhtmlparser/htmlelement/shared.py#L39-L72
def _closeElements(childs, HTMLElement): """ Create `endtags` to elements which looks like openers, but doesn't have proper :attr:`HTMLElement.endtag`. Args: childs (list): List of childs (:class:`HTMLElement` obj) - typically from :attr:`HTMLElement.childs` property. Returns: list: List of closed elements. """ out = [] # close all unclosed pair tags for e in childs: if not e.isTag(): out.append(e) continue if not e.isNonPairTag() and not e.isEndTag() and not e.isComment() \ and e.endtag is None: e.childs = _closeElements(e.childs, HTMLElement) out.append(e) out.append(HTMLElement("</" + e.getTagName() + ">")) # join opener and endtag e.endtag = out[-1] out[-1].openertag = e else: out.append(e) return out
[ "def", "_closeElements", "(", "childs", ",", "HTMLElement", ")", ":", "out", "=", "[", "]", "# close all unclosed pair tags", "for", "e", "in", "childs", ":", "if", "not", "e", ".", "isTag", "(", ")", ":", "out", ".", "append", "(", "e", ")", "continue", "if", "not", "e", ".", "isNonPairTag", "(", ")", "and", "not", "e", ".", "isEndTag", "(", ")", "and", "not", "e", ".", "isComment", "(", ")", "and", "e", ".", "endtag", "is", "None", ":", "e", ".", "childs", "=", "_closeElements", "(", "e", ".", "childs", ",", "HTMLElement", ")", "out", ".", "append", "(", "e", ")", "out", ".", "append", "(", "HTMLElement", "(", "\"</\"", "+", "e", ".", "getTagName", "(", ")", "+", "\">\"", ")", ")", "# join opener and endtag", "e", ".", "endtag", "=", "out", "[", "-", "1", "]", "out", "[", "-", "1", "]", ".", "openertag", "=", "e", "else", ":", "out", ".", "append", "(", "e", ")", "return", "out" ]
Create `endtags` to elements which looks like openers, but doesn't have proper :attr:`HTMLElement.endtag`. Args: childs (list): List of childs (:class:`HTMLElement` obj) - typically from :attr:`HTMLElement.childs` property. Returns: list: List of closed elements.
[ "Create", "endtags", "to", "elements", "which", "looks", "like", "openers", "but", "doesn", "t", "have", "proper", ":", "attr", ":", "HTMLElement", ".", "endtag", "." ]
python
train
26.529412
iotile/typedargs
typedargs/typeinfo.py
https://github.com/iotile/typedargs/blob/0a5091a664b9b4d836e091e9ba583e944f438fd8/typedargs/typeinfo.py#L350-L366
def load_external_types(self, path): """ Given a path to a python package or module, load that module, search for all defined variables inside of it that do not start with _ or __ and inject them into the type system. If any of the types cannot be injected, silently ignore them unless verbose is True. If path points to a module it should not contain the trailing .py since this is added automatically by the python import system """ folder, filename = os.path.split(path) try: fileobj, pathname, description = imp.find_module(filename, [folder]) mod = imp.load_module(filename, fileobj, pathname, description) except ImportError as exc: raise ArgumentError("could not import module in order to load external types", module_path=path, parent_directory=folder, module_name=filename, error=str(exc)) self.load_type_module(mod)
[ "def", "load_external_types", "(", "self", ",", "path", ")", ":", "folder", ",", "filename", "=", "os", ".", "path", ".", "split", "(", "path", ")", "try", ":", "fileobj", ",", "pathname", ",", "description", "=", "imp", ".", "find_module", "(", "filename", ",", "[", "folder", "]", ")", "mod", "=", "imp", ".", "load_module", "(", "filename", ",", "fileobj", ",", "pathname", ",", "description", ")", "except", "ImportError", "as", "exc", ":", "raise", "ArgumentError", "(", "\"could not import module in order to load external types\"", ",", "module_path", "=", "path", ",", "parent_directory", "=", "folder", ",", "module_name", "=", "filename", ",", "error", "=", "str", "(", "exc", ")", ")", "self", ".", "load_type_module", "(", "mod", ")" ]
Given a path to a python package or module, load that module, search for all defined variables inside of it that do not start with _ or __ and inject them into the type system. If any of the types cannot be injected, silently ignore them unless verbose is True. If path points to a module it should not contain the trailing .py since this is added automatically by the python import system
[ "Given", "a", "path", "to", "a", "python", "package", "or", "module", "load", "that", "module", "search", "for", "all", "defined", "variables", "inside", "of", "it", "that", "do", "not", "start", "with", "_", "or", "__", "and", "inject", "them", "into", "the", "type", "system", ".", "If", "any", "of", "the", "types", "cannot", "be", "injected", "silently", "ignore", "them", "unless", "verbose", "is", "True", ".", "If", "path", "points", "to", "a", "module", "it", "should", "not", "contain", "the", "trailing", ".", "py", "since", "this", "is", "added", "automatically", "by", "the", "python", "import", "system" ]
python
test
54.705882
davedoesdev/python-jwt
python_jwt/__init__.py
https://github.com/davedoesdev/python-jwt/blob/5c753a26955cc666f00f6ff8e601406d95071368/python_jwt/__init__.py#L209-L224
def process_jwt(jwt): """ Process a JSON Web Token without verifying it. Call this before :func:`verify_jwt` if you need access to the header or claims in the token before verifying it. For example, the claims might identify the issuer such that you can retrieve the appropriate public key. :param jwt: The JSON Web Token to verify. :type jwt: str or unicode :rtype: tuple :returns: ``(header, claims)`` """ header, claims, _ = jwt.split('.') parsed_header = json_decode(base64url_decode(header)) parsed_claims = json_decode(base64url_decode(claims)) return parsed_header, parsed_claims
[ "def", "process_jwt", "(", "jwt", ")", ":", "header", ",", "claims", ",", "_", "=", "jwt", ".", "split", "(", "'.'", ")", "parsed_header", "=", "json_decode", "(", "base64url_decode", "(", "header", ")", ")", "parsed_claims", "=", "json_decode", "(", "base64url_decode", "(", "claims", ")", ")", "return", "parsed_header", ",", "parsed_claims" ]
Process a JSON Web Token without verifying it. Call this before :func:`verify_jwt` if you need access to the header or claims in the token before verifying it. For example, the claims might identify the issuer such that you can retrieve the appropriate public key. :param jwt: The JSON Web Token to verify. :type jwt: str or unicode :rtype: tuple :returns: ``(header, claims)``
[ "Process", "a", "JSON", "Web", "Token", "without", "verifying", "it", "." ]
python
train
38.875
oauthlib/oauthlib
oauthlib/oauth2/rfc6749/endpoints/revocation.py
https://github.com/oauthlib/oauthlib/blob/30321dd3c0ca784d3508a1970cf90d9f76835c79/oauthlib/oauth2/rfc6749/endpoints/revocation.py#L87-L126
def validate_revocation_request(self, request): """Ensure the request is valid. The client constructs the request by including the following parameters using the "application/x-www-form-urlencoded" format in the HTTP request entity-body: token (REQUIRED). The token that the client wants to get revoked. token_type_hint (OPTIONAL). A hint about the type of the token submitted for revocation. Clients MAY pass this parameter in order to help the authorization server to optimize the token lookup. If the server is unable to locate the token using the given hint, it MUST extend its search accross all of its supported token types. An authorization server MAY ignore this parameter, particularly if it is able to detect the token type automatically. This specification defines two such values: * access_token: An Access Token as defined in [RFC6749], `section 1.4`_ * refresh_token: A Refresh Token as defined in [RFC6749], `section 1.5`_ Specific implementations, profiles, and extensions of this specification MAY define other values for this parameter using the registry defined in `Section 4.1.2`_. The client also includes its authentication credentials as described in `Section 2.3`_. of [`RFC6749`_]. .. _`section 1.4`: https://tools.ietf.org/html/rfc6749#section-1.4 .. _`section 1.5`: https://tools.ietf.org/html/rfc6749#section-1.5 .. _`section 2.3`: https://tools.ietf.org/html/rfc6749#section-2.3 .. _`Section 4.1.2`: https://tools.ietf.org/html/draft-ietf-oauth-revocation-11#section-4.1.2 .. _`RFC6749`: https://tools.ietf.org/html/rfc6749 """ self._raise_on_missing_token(request) self._raise_on_invalid_client(request) self._raise_on_unsupported_token(request)
[ "def", "validate_revocation_request", "(", "self", ",", "request", ")", ":", "self", ".", "_raise_on_missing_token", "(", "request", ")", "self", ".", "_raise_on_invalid_client", "(", "request", ")", "self", ".", "_raise_on_unsupported_token", "(", "request", ")" ]
Ensure the request is valid. The client constructs the request by including the following parameters using the "application/x-www-form-urlencoded" format in the HTTP request entity-body: token (REQUIRED). The token that the client wants to get revoked. token_type_hint (OPTIONAL). A hint about the type of the token submitted for revocation. Clients MAY pass this parameter in order to help the authorization server to optimize the token lookup. If the server is unable to locate the token using the given hint, it MUST extend its search accross all of its supported token types. An authorization server MAY ignore this parameter, particularly if it is able to detect the token type automatically. This specification defines two such values: * access_token: An Access Token as defined in [RFC6749], `section 1.4`_ * refresh_token: A Refresh Token as defined in [RFC6749], `section 1.5`_ Specific implementations, profiles, and extensions of this specification MAY define other values for this parameter using the registry defined in `Section 4.1.2`_. The client also includes its authentication credentials as described in `Section 2.3`_. of [`RFC6749`_]. .. _`section 1.4`: https://tools.ietf.org/html/rfc6749#section-1.4 .. _`section 1.5`: https://tools.ietf.org/html/rfc6749#section-1.5 .. _`section 2.3`: https://tools.ietf.org/html/rfc6749#section-2.3 .. _`Section 4.1.2`: https://tools.ietf.org/html/draft-ietf-oauth-revocation-11#section-4.1.2 .. _`RFC6749`: https://tools.ietf.org/html/rfc6749
[ "Ensure", "the", "request", "is", "valid", "." ]
python
train
49.1
yvesalexandre/bandicoot
bandicoot/helper/tools.py
https://github.com/yvesalexandre/bandicoot/blob/73a658f6f17331541cf0b1547028db9b70e8d58a/bandicoot/helper/tools.py#L210-L234
def percent_overlapping_calls(records, min_gab=300): """ Return the percentage of calls that overlap with the next call. Parameters ---------- records : list The records for a single user. min_gab : int Number of seconds that the calls must overlap to be considered an issue. Defaults to 5 minutes. """ calls = [r for r in records if r.interaction == "call"] if len(calls) == 0: return 0. overlapping_calls = 0 for i, r in enumerate(calls): if i <= len(calls) - 2: if r.datetime + timedelta(seconds=r.call_duration - min_gab) >= calls[i + 1].datetime: overlapping_calls += 1 return (float(overlapping_calls) / len(calls))
[ "def", "percent_overlapping_calls", "(", "records", ",", "min_gab", "=", "300", ")", ":", "calls", "=", "[", "r", "for", "r", "in", "records", "if", "r", ".", "interaction", "==", "\"call\"", "]", "if", "len", "(", "calls", ")", "==", "0", ":", "return", "0.", "overlapping_calls", "=", "0", "for", "i", ",", "r", "in", "enumerate", "(", "calls", ")", ":", "if", "i", "<=", "len", "(", "calls", ")", "-", "2", ":", "if", "r", ".", "datetime", "+", "timedelta", "(", "seconds", "=", "r", ".", "call_duration", "-", "min_gab", ")", ">=", "calls", "[", "i", "+", "1", "]", ".", "datetime", ":", "overlapping_calls", "+=", "1", "return", "(", "float", "(", "overlapping_calls", ")", "/", "len", "(", "calls", ")", ")" ]
Return the percentage of calls that overlap with the next call. Parameters ---------- records : list The records for a single user. min_gab : int Number of seconds that the calls must overlap to be considered an issue. Defaults to 5 minutes.
[ "Return", "the", "percentage", "of", "calls", "that", "overlap", "with", "the", "next", "call", "." ]
python
train
28.72
facetoe/zenpy
zenpy/__init__.py
https://github.com/facetoe/zenpy/blob/34c54c7e408b9ed01604ddf8b3422204c8bf31ea/zenpy/__init__.py#L210-L214
def set_cache_implementation(self, cache_name, impl_name, maxsize, **kwargs): """ Changes the cache implementation for the named cache """ self._get_cache(cache_name).set_cache_impl(impl_name, maxsize, **kwargs)
[ "def", "set_cache_implementation", "(", "self", ",", "cache_name", ",", "impl_name", ",", "maxsize", ",", "*", "*", "kwargs", ")", ":", "self", ".", "_get_cache", "(", "cache_name", ")", ".", "set_cache_impl", "(", "impl_name", ",", "maxsize", ",", "*", "*", "kwargs", ")" ]
Changes the cache implementation for the named cache
[ "Changes", "the", "cache", "implementation", "for", "the", "named", "cache" ]
python
train
47.8
pantsbuild/pants
src/python/pants/task/simple_codegen_task.py
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/task/simple_codegen_task.py#L312-L375
def _inject_synthetic_target(self, vt, sources): """Create, inject, and return a synthetic target for the given target and workdir. :param vt: A codegen input VersionedTarget to inject a synthetic target for. :param sources: A FilesetWithSpec to inject for the target. """ target = vt.target # NB: For stability, the injected target exposes the stable-symlinked `vt.results_dir`, # rather than the hash-named `vt.current_results_dir`. synthetic_target_dir = self.synthetic_target_dir(target, vt.results_dir) synthetic_target_type = self.synthetic_target_type(target) synthetic_extra_dependencies = self.synthetic_target_extra_dependencies(target, synthetic_target_dir) copied_attributes = {} for attribute in self._copy_target_attributes: copied_attributes[attribute] = getattr(target, attribute) if self._supports_exports(synthetic_target_type): extra_exports = self.synthetic_target_extra_exports(target, synthetic_target_dir) extra_exports_not_in_extra_dependencies = set(extra_exports).difference( set(synthetic_extra_dependencies)) if len(extra_exports_not_in_extra_dependencies) > 0: raise self.MismatchedExtraExports( 'Extra synthetic exports included targets not in the extra dependencies: {}. Affected target: {}' .format(extra_exports_not_in_extra_dependencies, target)) extra_export_specs = {e.address.spec for e in extra_exports} original_export_specs = self._original_export_specs(target) union = set(original_export_specs).union(extra_export_specs) copied_attributes['exports'] = sorted(union) synthetic_target = self.context.add_new_target( address=self._get_synthetic_address(target, synthetic_target_dir), target_type=synthetic_target_type, dependencies=synthetic_extra_dependencies, sources=sources, derived_from=target, **copied_attributes ) build_graph = self.context.build_graph # NB(pl): This bypasses the convenience function (Target.inject_dependency) in order # to improve performance. Note that we can walk the transitive dependee subgraph once # for transitive invalidation rather than walking a smaller subgraph for every single # dependency injected. for dependent_address in build_graph.dependents_of(target.address): build_graph.inject_dependency( dependent=dependent_address, dependency=synthetic_target.address, ) # NB(pl): See the above comment. The same note applies. for concrete_dependency_address in build_graph.dependencies_of(target.address): build_graph.inject_dependency( dependent=synthetic_target.address, dependency=concrete_dependency_address, ) if target in self.context.target_roots: self.context.target_roots.append(synthetic_target) return synthetic_target
[ "def", "_inject_synthetic_target", "(", "self", ",", "vt", ",", "sources", ")", ":", "target", "=", "vt", ".", "target", "# NB: For stability, the injected target exposes the stable-symlinked `vt.results_dir`,", "# rather than the hash-named `vt.current_results_dir`.", "synthetic_target_dir", "=", "self", ".", "synthetic_target_dir", "(", "target", ",", "vt", ".", "results_dir", ")", "synthetic_target_type", "=", "self", ".", "synthetic_target_type", "(", "target", ")", "synthetic_extra_dependencies", "=", "self", ".", "synthetic_target_extra_dependencies", "(", "target", ",", "synthetic_target_dir", ")", "copied_attributes", "=", "{", "}", "for", "attribute", "in", "self", ".", "_copy_target_attributes", ":", "copied_attributes", "[", "attribute", "]", "=", "getattr", "(", "target", ",", "attribute", ")", "if", "self", ".", "_supports_exports", "(", "synthetic_target_type", ")", ":", "extra_exports", "=", "self", ".", "synthetic_target_extra_exports", "(", "target", ",", "synthetic_target_dir", ")", "extra_exports_not_in_extra_dependencies", "=", "set", "(", "extra_exports", ")", ".", "difference", "(", "set", "(", "synthetic_extra_dependencies", ")", ")", "if", "len", "(", "extra_exports_not_in_extra_dependencies", ")", ">", "0", ":", "raise", "self", ".", "MismatchedExtraExports", "(", "'Extra synthetic exports included targets not in the extra dependencies: {}. Affected target: {}'", ".", "format", "(", "extra_exports_not_in_extra_dependencies", ",", "target", ")", ")", "extra_export_specs", "=", "{", "e", ".", "address", ".", "spec", "for", "e", "in", "extra_exports", "}", "original_export_specs", "=", "self", ".", "_original_export_specs", "(", "target", ")", "union", "=", "set", "(", "original_export_specs", ")", ".", "union", "(", "extra_export_specs", ")", "copied_attributes", "[", "'exports'", "]", "=", "sorted", "(", "union", ")", "synthetic_target", "=", "self", ".", "context", ".", "add_new_target", "(", "address", "=", "self", ".", "_get_synthetic_address", "(", "target", ",", "synthetic_target_dir", ")", ",", "target_type", "=", "synthetic_target_type", ",", "dependencies", "=", "synthetic_extra_dependencies", ",", "sources", "=", "sources", ",", "derived_from", "=", "target", ",", "*", "*", "copied_attributes", ")", "build_graph", "=", "self", ".", "context", ".", "build_graph", "# NB(pl): This bypasses the convenience function (Target.inject_dependency) in order", "# to improve performance. Note that we can walk the transitive dependee subgraph once", "# for transitive invalidation rather than walking a smaller subgraph for every single", "# dependency injected.", "for", "dependent_address", "in", "build_graph", ".", "dependents_of", "(", "target", ".", "address", ")", ":", "build_graph", ".", "inject_dependency", "(", "dependent", "=", "dependent_address", ",", "dependency", "=", "synthetic_target", ".", "address", ",", ")", "# NB(pl): See the above comment. The same note applies.", "for", "concrete_dependency_address", "in", "build_graph", ".", "dependencies_of", "(", "target", ".", "address", ")", ":", "build_graph", ".", "inject_dependency", "(", "dependent", "=", "synthetic_target", ".", "address", ",", "dependency", "=", "concrete_dependency_address", ",", ")", "if", "target", "in", "self", ".", "context", ".", "target_roots", ":", "self", ".", "context", ".", "target_roots", ".", "append", "(", "synthetic_target", ")", "return", "synthetic_target" ]
Create, inject, and return a synthetic target for the given target and workdir. :param vt: A codegen input VersionedTarget to inject a synthetic target for. :param sources: A FilesetWithSpec to inject for the target.
[ "Create", "inject", "and", "return", "a", "synthetic", "target", "for", "the", "given", "target", "and", "workdir", "." ]
python
train
44.375
mbrenig/SheetSync
sheetsync/__init__.py
https://github.com/mbrenig/SheetSync/blob/110e10c1f4388a91e5087cba6ea5517e04df8680/sheetsync/__init__.py#L867-L879
def sync(self, raw_data, row_change_callback=None): """ Equivalent to the inject method but will delete rows from the google spreadsheet if their key is not found in the input (raw_data) dictionary. Args: raw_data (dict): See inject method row_change_callback (Optional) (func): See inject method Returns: UpdateResults (object): See inject method """ return self._update(raw_data, row_change_callback, delete_rows=True)
[ "def", "sync", "(", "self", ",", "raw_data", ",", "row_change_callback", "=", "None", ")", ":", "return", "self", ".", "_update", "(", "raw_data", ",", "row_change_callback", ",", "delete_rows", "=", "True", ")" ]
Equivalent to the inject method but will delete rows from the google spreadsheet if their key is not found in the input (raw_data) dictionary. Args: raw_data (dict): See inject method row_change_callback (Optional) (func): See inject method Returns: UpdateResults (object): See inject method
[ "Equivalent", "to", "the", "inject", "method", "but", "will", "delete", "rows", "from", "the", "google", "spreadsheet", "if", "their", "key", "is", "not", "found", "in", "the", "input", "(", "raw_data", ")", "dictionary", ".", "Args", ":", "raw_data", "(", "dict", ")", ":", "See", "inject", "method", "row_change_callback", "(", "Optional", ")", "(", "func", ")", ":", "See", "inject", "method", "Returns", ":", "UpdateResults", "(", "object", ")", ":", "See", "inject", "method" ]
python
train
39.923077
StorjOld/pyp2p
pyp2p/rendezvous_server.py
https://github.com/StorjOld/pyp2p/blob/7024208c3af20511496a652ff212f54c420e0464/pyp2p/rendezvous_server.py#L151-L177
def synchronize_simultaneous(self, node_ip): """ Because adjacent mappings for certain NAT types can be stolen by other connections, the purpose of this function is to ensure the last connection by a passive simultaneous node is recent compared to the time for a candidate to increase the chance that the precited mappings remain active for the TCP hole punching attempt. """ for candidate in self.factory.candidates[node_ip]: # Only if candidate is connected. if not candidate["con"].connected: continue # Synchronise simultaneous node. if candidate["time"] -\ self.factory.nodes["simultaneous"][node_ip]["time"] >\ self.challege_timeout: msg = "RECONNECT" self.factory.nodes["simultaneous"][node_ip]["con"].\ send_line(msg) return self.cleanup_candidates(node_ip) self.propogate_candidates(node_ip)
[ "def", "synchronize_simultaneous", "(", "self", ",", "node_ip", ")", ":", "for", "candidate", "in", "self", ".", "factory", ".", "candidates", "[", "node_ip", "]", ":", "# Only if candidate is connected.\r", "if", "not", "candidate", "[", "\"con\"", "]", ".", "connected", ":", "continue", "# Synchronise simultaneous node.\r", "if", "candidate", "[", "\"time\"", "]", "-", "self", ".", "factory", ".", "nodes", "[", "\"simultaneous\"", "]", "[", "node_ip", "]", "[", "\"time\"", "]", ">", "self", ".", "challege_timeout", ":", "msg", "=", "\"RECONNECT\"", "self", ".", "factory", ".", "nodes", "[", "\"simultaneous\"", "]", "[", "node_ip", "]", "[", "\"con\"", "]", ".", "send_line", "(", "msg", ")", "return", "self", ".", "cleanup_candidates", "(", "node_ip", ")", "self", ".", "propogate_candidates", "(", "node_ip", ")" ]
Because adjacent mappings for certain NAT types can be stolen by other connections, the purpose of this function is to ensure the last connection by a passive simultaneous node is recent compared to the time for a candidate to increase the chance that the precited mappings remain active for the TCP hole punching attempt.
[ "Because", "adjacent", "mappings", "for", "certain", "NAT", "types", "can", "be", "stolen", "by", "other", "connections", "the", "purpose", "of", "this", "function", "is", "to", "ensure", "the", "last", "connection", "by", "a", "passive", "simultaneous", "node", "is", "recent", "compared", "to", "the", "time", "for", "a", "candidate", "to", "increase", "the", "chance", "that", "the", "precited", "mappings", "remain", "active", "for", "the", "TCP", "hole", "punching", "attempt", "." ]
python
train
39.666667
mitsei/dlkit
dlkit/records/assessment/qti/numeric_response_records.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/records/assessment/qti/numeric_response_records.py#L905-L931
def _init_metadata(self): """stub""" DecimalValuesFormRecord._init_metadata(self) IntegerValuesFormRecord._init_metadata(self) TextAnswerFormRecord._init_metadata(self) super(MultiLanguageCalculationInteractionFeedbackAndFilesAnswerFormRecord, self)._init_metadata() self._tolerance_mode_metadata = { 'element_id': Id(self.my_osid_object_form._authority, self.my_osid_object_form._namespace, 'tolerance_mode'), 'element_label': 'tolerance_mode', 'instructions': 'enter the tolerance mode', 'required': True, 'read_only': False, 'linked': False, 'array': False, 'default_string_values': [{ 'text': '', 'languageTypeId': str(DEFAULT_LANGUAGE_TYPE), 'scriptTypeId': str(DEFAULT_SCRIPT_TYPE), 'formatTypeId': str(DEFAULT_FORMAT_TYPE), }], 'syntax': 'STRING', 'minimum_string_length': 0, 'maximum_string_length': 1024, 'string_set': [] }
[ "def", "_init_metadata", "(", "self", ")", ":", "DecimalValuesFormRecord", ".", "_init_metadata", "(", "self", ")", "IntegerValuesFormRecord", ".", "_init_metadata", "(", "self", ")", "TextAnswerFormRecord", ".", "_init_metadata", "(", "self", ")", "super", "(", "MultiLanguageCalculationInteractionFeedbackAndFilesAnswerFormRecord", ",", "self", ")", ".", "_init_metadata", "(", ")", "self", ".", "_tolerance_mode_metadata", "=", "{", "'element_id'", ":", "Id", "(", "self", ".", "my_osid_object_form", ".", "_authority", ",", "self", ".", "my_osid_object_form", ".", "_namespace", ",", "'tolerance_mode'", ")", ",", "'element_label'", ":", "'tolerance_mode'", ",", "'instructions'", ":", "'enter the tolerance mode'", ",", "'required'", ":", "True", ",", "'read_only'", ":", "False", ",", "'linked'", ":", "False", ",", "'array'", ":", "False", ",", "'default_string_values'", ":", "[", "{", "'text'", ":", "''", ",", "'languageTypeId'", ":", "str", "(", "DEFAULT_LANGUAGE_TYPE", ")", ",", "'scriptTypeId'", ":", "str", "(", "DEFAULT_SCRIPT_TYPE", ")", ",", "'formatTypeId'", ":", "str", "(", "DEFAULT_FORMAT_TYPE", ")", ",", "}", "]", ",", "'syntax'", ":", "'STRING'", ",", "'minimum_string_length'", ":", "0", ",", "'maximum_string_length'", ":", "1024", ",", "'string_set'", ":", "[", "]", "}" ]
stub
[ "stub" ]
python
train
42.222222