repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
sequence
docstring
stringlengths
1
46.9k
docstring_tokens
sequence
language
stringclasses
1 value
partition
stringclasses
3 values
bcbio/bcbio-nextgen
bcbio/rnaseq/ericscript.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/rnaseq/ericscript.py#L125-L129
def _get_ericscript_path(self): """Retrieve PATH to the isolated eriscript anaconda environment. """ es = utils.which(os.path.join(utils.get_bcbio_bin(), self.EXECUTABLE)) return os.path.dirname(os.path.realpath(es))
[ "def", "_get_ericscript_path", "(", "self", ")", ":", "es", "=", "utils", ".", "which", "(", "os", ".", "path", ".", "join", "(", "utils", ".", "get_bcbio_bin", "(", ")", ",", "self", ".", "EXECUTABLE", ")", ")", "return", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "realpath", "(", "es", ")", ")" ]
Retrieve PATH to the isolated eriscript anaconda environment.
[ "Retrieve", "PATH", "to", "the", "isolated", "eriscript", "anaconda", "environment", "." ]
python
train
skorokithakis/shortuuid
shortuuid/main.py
https://github.com/skorokithakis/shortuuid/blob/4da632a986c3a43f75c7df64f27a90bbf7ff8039/shortuuid/main.py#L123-L128
def encoded_length(self, num_bytes=16): """ Returns the string length of the shortened UUID. """ factor = math.log(256) / math.log(self._alpha_len) return int(math.ceil(factor * num_bytes))
[ "def", "encoded_length", "(", "self", ",", "num_bytes", "=", "16", ")", ":", "factor", "=", "math", ".", "log", "(", "256", ")", "/", "math", ".", "log", "(", "self", ".", "_alpha_len", ")", "return", "int", "(", "math", ".", "ceil", "(", "factor", "*", "num_bytes", ")", ")" ]
Returns the string length of the shortened UUID.
[ "Returns", "the", "string", "length", "of", "the", "shortened", "UUID", "." ]
python
train
gagneurlab/concise
concise/preprocessing/sequence.py
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/preprocessing/sequence.py#L32-L38
def one_hot2string(arr, vocab): """Convert a one-hot encoded array back to string """ tokens = one_hot2token(arr) indexToLetter = _get_index_dict(vocab) return [''.join([indexToLetter[x] for x in row]) for row in tokens]
[ "def", "one_hot2string", "(", "arr", ",", "vocab", ")", ":", "tokens", "=", "one_hot2token", "(", "arr", ")", "indexToLetter", "=", "_get_index_dict", "(", "vocab", ")", "return", "[", "''", ".", "join", "(", "[", "indexToLetter", "[", "x", "]", "for", "x", "in", "row", "]", ")", "for", "row", "in", "tokens", "]" ]
Convert a one-hot encoded array back to string
[ "Convert", "a", "one", "-", "hot", "encoded", "array", "back", "to", "string" ]
python
train
proycon/pynlpl
pynlpl/formats/folia.py
https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/folia.py#L6527-L6535
def alias(self, annotationtype, set, fallback=False): """Return the alias for a set (if applicable, returns the unaltered set otherwise iff fallback is enabled)""" if inspect.isclass(annotationtype): annotationtype = annotationtype.ANNOTATIONTYPE if annotationtype in self.set_alias and set in self.set_alias[annotationtype]: return self.set_alias[annotationtype][set] elif fallback: return set else: raise KeyError("No alias for set " + set)
[ "def", "alias", "(", "self", ",", "annotationtype", ",", "set", ",", "fallback", "=", "False", ")", ":", "if", "inspect", ".", "isclass", "(", "annotationtype", ")", ":", "annotationtype", "=", "annotationtype", ".", "ANNOTATIONTYPE", "if", "annotationtype", "in", "self", ".", "set_alias", "and", "set", "in", "self", ".", "set_alias", "[", "annotationtype", "]", ":", "return", "self", ".", "set_alias", "[", "annotationtype", "]", "[", "set", "]", "elif", "fallback", ":", "return", "set", "else", ":", "raise", "KeyError", "(", "\"No alias for set \"", "+", "set", ")" ]
Return the alias for a set (if applicable, returns the unaltered set otherwise iff fallback is enabled)
[ "Return", "the", "alias", "for", "a", "set", "(", "if", "applicable", "returns", "the", "unaltered", "set", "otherwise", "iff", "fallback", "is", "enabled", ")" ]
python
train
googleapis/google-cloud-python
storage/google/cloud/storage/bucket.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/storage/google/cloud/storage/bucket.py#L1670-L1699
def get_iam_policy(self, client=None): """Retrieve the IAM policy for the bucket. See https://cloud.google.com/storage/docs/json_api/v1/buckets/getIamPolicy If :attr:`user_project` is set, bills the API request to that project. :type client: :class:`~google.cloud.storage.client.Client` or ``NoneType`` :param client: Optional. The client to use. If not passed, falls back to the ``client`` stored on the current bucket. :rtype: :class:`google.api_core.iam.Policy` :returns: the policy instance, based on the resource returned from the ``getIamPolicy`` API request. """ client = self._require_client(client) query_params = {} if self.user_project is not None: query_params["userProject"] = self.user_project info = client._connection.api_request( method="GET", path="%s/iam" % (self.path,), query_params=query_params, _target_object=None, ) return Policy.from_api_repr(info)
[ "def", "get_iam_policy", "(", "self", ",", "client", "=", "None", ")", ":", "client", "=", "self", ".", "_require_client", "(", "client", ")", "query_params", "=", "{", "}", "if", "self", ".", "user_project", "is", "not", "None", ":", "query_params", "[", "\"userProject\"", "]", "=", "self", ".", "user_project", "info", "=", "client", ".", "_connection", ".", "api_request", "(", "method", "=", "\"GET\"", ",", "path", "=", "\"%s/iam\"", "%", "(", "self", ".", "path", ",", ")", ",", "query_params", "=", "query_params", ",", "_target_object", "=", "None", ",", ")", "return", "Policy", ".", "from_api_repr", "(", "info", ")" ]
Retrieve the IAM policy for the bucket. See https://cloud.google.com/storage/docs/json_api/v1/buckets/getIamPolicy If :attr:`user_project` is set, bills the API request to that project. :type client: :class:`~google.cloud.storage.client.Client` or ``NoneType`` :param client: Optional. The client to use. If not passed, falls back to the ``client`` stored on the current bucket. :rtype: :class:`google.api_core.iam.Policy` :returns: the policy instance, based on the resource returned from the ``getIamPolicy`` API request.
[ "Retrieve", "the", "IAM", "policy", "for", "the", "bucket", "." ]
python
train
watson-developer-cloud/python-sdk
ibm_watson/speech_to_text_v1.py
https://github.com/watson-developer-cloud/python-sdk/blob/4c2c9df4466fcde88975da9ecd834e6ba95eb353/ibm_watson/speech_to_text_v1.py#L3039-L3046
def _to_dict(self): """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'customizations') and self.customizations is not None: _dict['customizations'] = [ x._to_dict() for x in self.customizations ] return _dict
[ "def", "_to_dict", "(", "self", ")", ":", "_dict", "=", "{", "}", "if", "hasattr", "(", "self", ",", "'customizations'", ")", "and", "self", ".", "customizations", "is", "not", "None", ":", "_dict", "[", "'customizations'", "]", "=", "[", "x", ".", "_to_dict", "(", ")", "for", "x", "in", "self", ".", "customizations", "]", "return", "_dict" ]
Return a json dictionary representing this model.
[ "Return", "a", "json", "dictionary", "representing", "this", "model", "." ]
python
train
esheldon/fitsio
fitsio/hdu/table.py
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L515-L560
def resize(self, nrows, front=False): """ Resize the table to the given size, removing or adding rows as necessary. Note if expanding the table at the end, it is more efficient to use the append function than resizing and then writing. New added rows are zerod, except for 'i1', 'u2' and 'u4' data types which get -128,32768,2147483648 respectively parameters ---------- nrows: int new size of table front: bool, optional If True, add or remove rows from the front. Default is False """ nrows_current = self.get_nrows() if nrows == nrows_current: return if nrows < nrows_current: rowdiff = nrows_current - nrows if front: # delete from the front start = 0 stop = rowdiff else: # delete from the back start = nrows stop = nrows_current self.delete_rows(slice(start, stop)) else: rowdiff = nrows - nrows_current if front: # in this case zero is what we want, since the code inserts firstrow = 0 else: firstrow = nrows_current self._FITS.insert_rows(self._ext+1, firstrow, rowdiff) self._update_info()
[ "def", "resize", "(", "self", ",", "nrows", ",", "front", "=", "False", ")", ":", "nrows_current", "=", "self", ".", "get_nrows", "(", ")", "if", "nrows", "==", "nrows_current", ":", "return", "if", "nrows", "<", "nrows_current", ":", "rowdiff", "=", "nrows_current", "-", "nrows", "if", "front", ":", "# delete from the front", "start", "=", "0", "stop", "=", "rowdiff", "else", ":", "# delete from the back", "start", "=", "nrows", "stop", "=", "nrows_current", "self", ".", "delete_rows", "(", "slice", "(", "start", ",", "stop", ")", ")", "else", ":", "rowdiff", "=", "nrows", "-", "nrows_current", "if", "front", ":", "# in this case zero is what we want, since the code inserts", "firstrow", "=", "0", "else", ":", "firstrow", "=", "nrows_current", "self", ".", "_FITS", ".", "insert_rows", "(", "self", ".", "_ext", "+", "1", ",", "firstrow", ",", "rowdiff", ")", "self", ".", "_update_info", "(", ")" ]
Resize the table to the given size, removing or adding rows as necessary. Note if expanding the table at the end, it is more efficient to use the append function than resizing and then writing. New added rows are zerod, except for 'i1', 'u2' and 'u4' data types which get -128,32768,2147483648 respectively parameters ---------- nrows: int new size of table front: bool, optional If True, add or remove rows from the front. Default is False
[ "Resize", "the", "table", "to", "the", "given", "size", "removing", "or", "adding", "rows", "as", "necessary", ".", "Note", "if", "expanding", "the", "table", "at", "the", "end", "it", "is", "more", "efficient", "to", "use", "the", "append", "function", "than", "resizing", "and", "then", "writing", "." ]
python
train
tonybaloney/wily
wily/commands/report.py
https://github.com/tonybaloney/wily/blob/bae259354a91b57d56603f0ca7403186f086a84c/wily/commands/report.py#L19-L188
def report( config, path, metrics, n, output, include_message=False, format=ReportFormat.CONSOLE, console_format=None, ): """ Show information about the cache and runtime. :param config: The configuration :type config: :class:`wily.config.WilyConfig` :param path: The path to the file :type path: ``str`` :param metrics: Name of the metric to report on :type metrics: ``str`` :param n: Number of items to list :type n: ``int`` :param output: Output path :type output: ``Path`` :param include_message: Include revision messages :type include_message: ``bool`` :param format: Output format :type format: ``ReportFormat`` :param console_format: Grid format style for tabulate :type console_format: ``str`` """ logger.debug("Running report command") logger.info(f"-----------History for {metrics}------------") data = [] metric_metas = [] for metric in metrics: operator, metric = resolve_metric_as_tuple(metric) key = metric.name operator = operator.name # Set the delta colors depending on the metric type if metric.measure == MetricType.AimHigh: good_color = 32 bad_color = 31 elif metric.measure == MetricType.AimLow: good_color = 31 bad_color = 32 elif metric.measure == MetricType.Informational: good_color = 33 bad_color = 33 metric_meta = { "key": key, "operator": operator, "good_color": good_color, "bad_color": bad_color, "title": metric.description, "type": metric.type, } metric_metas.append(metric_meta) state = State(config) for archiver in state.archivers: # We have to do it backwards to get the deltas between releases history = state.index[archiver].revisions[:n][::-1] last = {} for rev in history: vals = [] for meta in metric_metas: try: logger.debug( f"Fetching metric {meta['key']} for {meta['operator']} in {path}" ) val = rev.get(config, archiver, meta["operator"], path, meta["key"]) last_val = last.get(meta["key"], None) # Measure the difference between this value and the last if meta["type"] in (int, float): if last_val: delta = val - last_val else: delta = 0 last[meta["key"]] = val else: # TODO : Measure ranking increases/decreases for str types? delta = 0 if delta == 0: delta_col = delta elif delta < 0: delta_col = f"\u001b[{meta['good_color']}m{delta:n}\u001b[0m" else: delta_col = f"\u001b[{meta['bad_color']}m+{delta:n}\u001b[0m" if meta["type"] in (int, float): k = f"{val:n} ({delta_col})" else: k = f"{val}" except KeyError as e: k = f"Not found {e}" vals.append(k) if include_message: data.append( ( format_revision(rev.revision.key), rev.revision.message[:MAX_MESSAGE_WIDTH], rev.revision.author_name, format_date(rev.revision.date), *vals, ) ) else: data.append( ( format_revision(rev.revision.key), rev.revision.author_name, format_date(rev.revision.date), *vals, ) ) descriptions = [meta["title"] for meta in metric_metas] if include_message: headers = ("Revision", "Message", "Author", "Date", *descriptions) else: headers = ("Revision", "Author", "Date", *descriptions) if format == ReportFormat.HTML: if output.is_file and output.suffix == ".html": report_path = output.parents[0] report_output = output else: report_path = output report_output = output.joinpath("index.html") report_path.mkdir(exist_ok=True, parents=True) templates_dir = (Path(__file__).parents[1] / "templates").resolve() report_template = Template((templates_dir / "report_template.html").read_text()) table_headers = "".join([f"<th>{header}</th>" for header in headers]) table_content = "" for line in data[::-1]: table_content += "<tr>" for element in line: element = element.replace("[32m", "<span class='green-color'>") element = element.replace("[31m", "<span class='red-color'>") element = element.replace("[33m", "<span class='orange-color'>") element = element.replace("[0m", "</span>") table_content += f"<td>{element}</td>" table_content += "</tr>" report_template = report_template.safe_substitute( headers=table_headers, content=table_content ) with report_output.open("w") as output: output.write(report_template) try: copytree(str(templates_dir / "css"), str(report_path / "css")) except FileExistsError: pass logger.info(f"wily report was saved to {report_path}") else: print( # But it still makes more sense to show the newest at the top, so reverse again tabulate.tabulate( headers=headers, tabular_data=data[::-1], tablefmt=console_format ) )
[ "def", "report", "(", "config", ",", "path", ",", "metrics", ",", "n", ",", "output", ",", "include_message", "=", "False", ",", "format", "=", "ReportFormat", ".", "CONSOLE", ",", "console_format", "=", "None", ",", ")", ":", "logger", ".", "debug", "(", "\"Running report command\"", ")", "logger", ".", "info", "(", "f\"-----------History for {metrics}------------\"", ")", "data", "=", "[", "]", "metric_metas", "=", "[", "]", "for", "metric", "in", "metrics", ":", "operator", ",", "metric", "=", "resolve_metric_as_tuple", "(", "metric", ")", "key", "=", "metric", ".", "name", "operator", "=", "operator", ".", "name", "# Set the delta colors depending on the metric type", "if", "metric", ".", "measure", "==", "MetricType", ".", "AimHigh", ":", "good_color", "=", "32", "bad_color", "=", "31", "elif", "metric", ".", "measure", "==", "MetricType", ".", "AimLow", ":", "good_color", "=", "31", "bad_color", "=", "32", "elif", "metric", ".", "measure", "==", "MetricType", ".", "Informational", ":", "good_color", "=", "33", "bad_color", "=", "33", "metric_meta", "=", "{", "\"key\"", ":", "key", ",", "\"operator\"", ":", "operator", ",", "\"good_color\"", ":", "good_color", ",", "\"bad_color\"", ":", "bad_color", ",", "\"title\"", ":", "metric", ".", "description", ",", "\"type\"", ":", "metric", ".", "type", ",", "}", "metric_metas", ".", "append", "(", "metric_meta", ")", "state", "=", "State", "(", "config", ")", "for", "archiver", "in", "state", ".", "archivers", ":", "# We have to do it backwards to get the deltas between releases", "history", "=", "state", ".", "index", "[", "archiver", "]", ".", "revisions", "[", ":", "n", "]", "[", ":", ":", "-", "1", "]", "last", "=", "{", "}", "for", "rev", "in", "history", ":", "vals", "=", "[", "]", "for", "meta", "in", "metric_metas", ":", "try", ":", "logger", ".", "debug", "(", "f\"Fetching metric {meta['key']} for {meta['operator']} in {path}\"", ")", "val", "=", "rev", ".", "get", "(", "config", ",", "archiver", ",", "meta", "[", "\"operator\"", "]", ",", "path", ",", "meta", "[", "\"key\"", "]", ")", "last_val", "=", "last", ".", "get", "(", "meta", "[", "\"key\"", "]", ",", "None", ")", "# Measure the difference between this value and the last", "if", "meta", "[", "\"type\"", "]", "in", "(", "int", ",", "float", ")", ":", "if", "last_val", ":", "delta", "=", "val", "-", "last_val", "else", ":", "delta", "=", "0", "last", "[", "meta", "[", "\"key\"", "]", "]", "=", "val", "else", ":", "# TODO : Measure ranking increases/decreases for str types?", "delta", "=", "0", "if", "delta", "==", "0", ":", "delta_col", "=", "delta", "elif", "delta", "<", "0", ":", "delta_col", "=", "f\"\\u001b[{meta['good_color']}m{delta:n}\\u001b[0m\"", "else", ":", "delta_col", "=", "f\"\\u001b[{meta['bad_color']}m+{delta:n}\\u001b[0m\"", "if", "meta", "[", "\"type\"", "]", "in", "(", "int", ",", "float", ")", ":", "k", "=", "f\"{val:n} ({delta_col})\"", "else", ":", "k", "=", "f\"{val}\"", "except", "KeyError", "as", "e", ":", "k", "=", "f\"Not found {e}\"", "vals", ".", "append", "(", "k", ")", "if", "include_message", ":", "data", ".", "append", "(", "(", "format_revision", "(", "rev", ".", "revision", ".", "key", ")", ",", "rev", ".", "revision", ".", "message", "[", ":", "MAX_MESSAGE_WIDTH", "]", ",", "rev", ".", "revision", ".", "author_name", ",", "format_date", "(", "rev", ".", "revision", ".", "date", ")", ",", "*", "vals", ",", ")", ")", "else", ":", "data", ".", "append", "(", "(", "format_revision", "(", "rev", ".", "revision", ".", "key", ")", ",", "rev", ".", "revision", ".", "author_name", ",", "format_date", "(", "rev", ".", "revision", ".", "date", ")", ",", "*", "vals", ",", ")", ")", "descriptions", "=", "[", "meta", "[", "\"title\"", "]", "for", "meta", "in", "metric_metas", "]", "if", "include_message", ":", "headers", "=", "(", "\"Revision\"", ",", "\"Message\"", ",", "\"Author\"", ",", "\"Date\"", ",", "*", "descriptions", ")", "else", ":", "headers", "=", "(", "\"Revision\"", ",", "\"Author\"", ",", "\"Date\"", ",", "*", "descriptions", ")", "if", "format", "==", "ReportFormat", ".", "HTML", ":", "if", "output", ".", "is_file", "and", "output", ".", "suffix", "==", "\".html\"", ":", "report_path", "=", "output", ".", "parents", "[", "0", "]", "report_output", "=", "output", "else", ":", "report_path", "=", "output", "report_output", "=", "output", ".", "joinpath", "(", "\"index.html\"", ")", "report_path", ".", "mkdir", "(", "exist_ok", "=", "True", ",", "parents", "=", "True", ")", "templates_dir", "=", "(", "Path", "(", "__file__", ")", ".", "parents", "[", "1", "]", "/", "\"templates\"", ")", ".", "resolve", "(", ")", "report_template", "=", "Template", "(", "(", "templates_dir", "/", "\"report_template.html\"", ")", ".", "read_text", "(", ")", ")", "table_headers", "=", "\"\"", ".", "join", "(", "[", "f\"<th>{header}</th>\"", "for", "header", "in", "headers", "]", ")", "table_content", "=", "\"\"", "for", "line", "in", "data", "[", ":", ":", "-", "1", "]", ":", "table_content", "+=", "\"<tr>\"", "for", "element", "in", "line", ":", "element", "=", "element", ".", "replace", "(", "\"[32m\"", ",", "\"<span class='green-color'>\"", ")", "element", "=", "element", ".", "replace", "(", "\"[31m\"", ",", "\"<span class='red-color'>\"", ")", "element", "=", "element", ".", "replace", "(", "\"[33m\"", ",", "\"<span class='orange-color'>\"", ")", "element", "=", "element", ".", "replace", "(", "\"[0m\"", ",", "\"</span>\"", ")", "table_content", "+=", "f\"<td>{element}</td>\"", "table_content", "+=", "\"</tr>\"", "report_template", "=", "report_template", ".", "safe_substitute", "(", "headers", "=", "table_headers", ",", "content", "=", "table_content", ")", "with", "report_output", ".", "open", "(", "\"w\"", ")", "as", "output", ":", "output", ".", "write", "(", "report_template", ")", "try", ":", "copytree", "(", "str", "(", "templates_dir", "/", "\"css\"", ")", ",", "str", "(", "report_path", "/", "\"css\"", ")", ")", "except", "FileExistsError", ":", "pass", "logger", ".", "info", "(", "f\"wily report was saved to {report_path}\"", ")", "else", ":", "print", "(", "# But it still makes more sense to show the newest at the top, so reverse again", "tabulate", ".", "tabulate", "(", "headers", "=", "headers", ",", "tabular_data", "=", "data", "[", ":", ":", "-", "1", "]", ",", "tablefmt", "=", "console_format", ")", ")" ]
Show information about the cache and runtime. :param config: The configuration :type config: :class:`wily.config.WilyConfig` :param path: The path to the file :type path: ``str`` :param metrics: Name of the metric to report on :type metrics: ``str`` :param n: Number of items to list :type n: ``int`` :param output: Output path :type output: ``Path`` :param include_message: Include revision messages :type include_message: ``bool`` :param format: Output format :type format: ``ReportFormat`` :param console_format: Grid format style for tabulate :type console_format: ``str``
[ "Show", "information", "about", "the", "cache", "and", "runtime", "." ]
python
train
boriel/zxbasic
arch/zx48k/optimizer.py
https://github.com/boriel/zxbasic/blob/23b28db10e41117805bdb3c0f78543590853b132/arch/zx48k/optimizer.py#L1635-L1652
def goes_requires(self, regs): """ Returns whether any of the goes_to block requires any of the given registers. """ if len(self) and self.mem[-1].inst == 'call' and self.mem[-1].condition_flag is None: for block in self.calls: if block.is_used(regs, 0): return True d = block.destroys() if not len([x for x in regs if x not in d]): return False # If all registers are destroyed then they're not used for block in self.goes_to: if block.is_used(regs, 0): return True return False
[ "def", "goes_requires", "(", "self", ",", "regs", ")", ":", "if", "len", "(", "self", ")", "and", "self", ".", "mem", "[", "-", "1", "]", ".", "inst", "==", "'call'", "and", "self", ".", "mem", "[", "-", "1", "]", ".", "condition_flag", "is", "None", ":", "for", "block", "in", "self", ".", "calls", ":", "if", "block", ".", "is_used", "(", "regs", ",", "0", ")", ":", "return", "True", "d", "=", "block", ".", "destroys", "(", ")", "if", "not", "len", "(", "[", "x", "for", "x", "in", "regs", "if", "x", "not", "in", "d", "]", ")", ":", "return", "False", "# If all registers are destroyed then they're not used", "for", "block", "in", "self", ".", "goes_to", ":", "if", "block", ".", "is_used", "(", "regs", ",", "0", ")", ":", "return", "True", "return", "False" ]
Returns whether any of the goes_to block requires any of the given registers.
[ "Returns", "whether", "any", "of", "the", "goes_to", "block", "requires", "any", "of", "the", "given", "registers", "." ]
python
train
juju/charm-helpers
charmhelpers/contrib/storage/linux/ceph.py
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/storage/linux/ceph.py#L958-L967
def image_mapped(name): """Determine whether a RADOS block device is mapped locally.""" try: out = check_output(['rbd', 'showmapped']) if six.PY3: out = out.decode('UTF-8') except CalledProcessError: return False return name in out
[ "def", "image_mapped", "(", "name", ")", ":", "try", ":", "out", "=", "check_output", "(", "[", "'rbd'", ",", "'showmapped'", "]", ")", "if", "six", ".", "PY3", ":", "out", "=", "out", ".", "decode", "(", "'UTF-8'", ")", "except", "CalledProcessError", ":", "return", "False", "return", "name", "in", "out" ]
Determine whether a RADOS block device is mapped locally.
[ "Determine", "whether", "a", "RADOS", "block", "device", "is", "mapped", "locally", "." ]
python
train
SchroterQuentin/django-search-listview
search_listview/list.py
https://github.com/SchroterQuentin/django-search-listview/blob/8b027a6908dc30c6ebc613bb4fde6b1ba40124a3/search_listview/list.py#L192-L201
def associate_model(model, field): """ Return the model associate to the ForeignKey or ManyToMany relation """ class_field = model._meta.get_field(field) if hasattr(class_field, "field"): return class_field.field.related.related_model else: return class_field.related_model
[ "def", "associate_model", "(", "model", ",", "field", ")", ":", "class_field", "=", "model", ".", "_meta", ".", "get_field", "(", "field", ")", "if", "hasattr", "(", "class_field", ",", "\"field\"", ")", ":", "return", "class_field", ".", "field", ".", "related", ".", "related_model", "else", ":", "return", "class_field", ".", "related_model" ]
Return the model associate to the ForeignKey or ManyToMany relation
[ "Return", "the", "model", "associate", "to", "the", "ForeignKey", "or", "ManyToMany", "relation" ]
python
train
yyuu/botornado
boto/ec2/connection.py
https://github.com/yyuu/botornado/blob/fffb056f5ff2324d1d5c1304014cfb1d899f602e/boto/ec2/connection.py#L1371-L1406
def detach_volume(self, volume_id, instance_id=None, device=None, force=False): """ Detach an EBS volume from an EC2 instance. :type volume_id: str :param volume_id: The ID of the EBS volume to be attached. :type instance_id: str :param instance_id: The ID of the EC2 instance from which it will be detached. :type device: str :param device: The device on the instance through which the volume is exposted (e.g. /dev/sdh) :type force: bool :param force: Forces detachment if the previous detachment attempt did not occur cleanly. This option can lead to data loss or a corrupted file system. Use this option only as a last resort to detach a volume from a failed instance. The instance will not have an opportunity to flush file system caches nor file system meta data. If you use this option, you must perform file system check and repair procedures. :rtype: bool :return: True if successful """ params = {'VolumeId' : volume_id} if instance_id: params['InstanceId'] = instance_id if device: params['Device'] = device if force: params['Force'] = 'true' return self.get_status('DetachVolume', params, verb='POST')
[ "def", "detach_volume", "(", "self", ",", "volume_id", ",", "instance_id", "=", "None", ",", "device", "=", "None", ",", "force", "=", "False", ")", ":", "params", "=", "{", "'VolumeId'", ":", "volume_id", "}", "if", "instance_id", ":", "params", "[", "'InstanceId'", "]", "=", "instance_id", "if", "device", ":", "params", "[", "'Device'", "]", "=", "device", "if", "force", ":", "params", "[", "'Force'", "]", "=", "'true'", "return", "self", ".", "get_status", "(", "'DetachVolume'", ",", "params", ",", "verb", "=", "'POST'", ")" ]
Detach an EBS volume from an EC2 instance. :type volume_id: str :param volume_id: The ID of the EBS volume to be attached. :type instance_id: str :param instance_id: The ID of the EC2 instance from which it will be detached. :type device: str :param device: The device on the instance through which the volume is exposted (e.g. /dev/sdh) :type force: bool :param force: Forces detachment if the previous detachment attempt did not occur cleanly. This option can lead to data loss or a corrupted file system. Use this option only as a last resort to detach a volume from a failed instance. The instance will not have an opportunity to flush file system caches nor file system meta data. If you use this option, you must perform file system check and repair procedures. :rtype: bool :return: True if successful
[ "Detach", "an", "EBS", "volume", "from", "an", "EC2", "instance", "." ]
python
train
arista-eosplus/pyeapi
pyeapi/api/switchports.py
https://github.com/arista-eosplus/pyeapi/blob/96a74faef1fe3bd79c4e900aed29c9956a0587d6/pyeapi/api/switchports.py#L344-L377
def set_trunk_groups(self, intf, value=None, default=False, disable=False): """Configures the switchport trunk group value Args: intf (str): The interface identifier to configure. value (str): The set of values to configure the trunk group default (bool): Configures the trunk group default value disable (bool): Negates all trunk group settings Returns: True if the config operation succeeds otherwise False """ if default: cmd = 'default switchport trunk group' return self.configure_interface(intf, cmd) if disable: cmd = 'no switchport trunk group' return self.configure_interface(intf, cmd) current_value = self.get(intf)['trunk_groups'] failure = False value = make_iterable(value) for name in set(value).difference(current_value): if not self.add_trunk_group(intf, name): failure = True for name in set(current_value).difference(value): if not self.remove_trunk_group(intf, name): failure = True return not failure
[ "def", "set_trunk_groups", "(", "self", ",", "intf", ",", "value", "=", "None", ",", "default", "=", "False", ",", "disable", "=", "False", ")", ":", "if", "default", ":", "cmd", "=", "'default switchport trunk group'", "return", "self", ".", "configure_interface", "(", "intf", ",", "cmd", ")", "if", "disable", ":", "cmd", "=", "'no switchport trunk group'", "return", "self", ".", "configure_interface", "(", "intf", ",", "cmd", ")", "current_value", "=", "self", ".", "get", "(", "intf", ")", "[", "'trunk_groups'", "]", "failure", "=", "False", "value", "=", "make_iterable", "(", "value", ")", "for", "name", "in", "set", "(", "value", ")", ".", "difference", "(", "current_value", ")", ":", "if", "not", "self", ".", "add_trunk_group", "(", "intf", ",", "name", ")", ":", "failure", "=", "True", "for", "name", "in", "set", "(", "current_value", ")", ".", "difference", "(", "value", ")", ":", "if", "not", "self", ".", "remove_trunk_group", "(", "intf", ",", "name", ")", ":", "failure", "=", "True", "return", "not", "failure" ]
Configures the switchport trunk group value Args: intf (str): The interface identifier to configure. value (str): The set of values to configure the trunk group default (bool): Configures the trunk group default value disable (bool): Negates all trunk group settings Returns: True if the config operation succeeds otherwise False
[ "Configures", "the", "switchport", "trunk", "group", "value" ]
python
train
scarface-4711/denonavr
denonavr/denonavr.py
https://github.com/scarface-4711/denonavr/blob/59a136e27b43cb1d1e140cf67705087b3aa377cd/denonavr/denonavr.py#L1439-L1471
def set_sound_mode(self, sound_mode): """ Set sound_mode of device. Valid values depend on the device and should be taken from "sound_mode_list". Return "True" on success and "False" on fail. """ if sound_mode == ALL_ZONE_STEREO: if self._set_all_zone_stereo(True): self._sound_mode_raw = ALL_ZONE_STEREO return True else: return False if self._sound_mode_raw == ALL_ZONE_STEREO: if not self._set_all_zone_stereo(False): return False # For selection of sound mode other names then at receiving sound modes # have to be used # Therefore source mapping is needed to get sound_mode # Create command URL and send command via HTTP GET command_url = self._urls.command_sel_sound_mode + sound_mode # sent command try: if self.send_get_command(command_url): self._sound_mode_raw = self._sound_mode_dict[sound_mode][0] return True else: return False except requests.exceptions.RequestException: _LOGGER.error("Connection error: sound mode function %s not set.", sound_mode) return False
[ "def", "set_sound_mode", "(", "self", ",", "sound_mode", ")", ":", "if", "sound_mode", "==", "ALL_ZONE_STEREO", ":", "if", "self", ".", "_set_all_zone_stereo", "(", "True", ")", ":", "self", ".", "_sound_mode_raw", "=", "ALL_ZONE_STEREO", "return", "True", "else", ":", "return", "False", "if", "self", ".", "_sound_mode_raw", "==", "ALL_ZONE_STEREO", ":", "if", "not", "self", ".", "_set_all_zone_stereo", "(", "False", ")", ":", "return", "False", "# For selection of sound mode other names then at receiving sound modes", "# have to be used", "# Therefore source mapping is needed to get sound_mode", "# Create command URL and send command via HTTP GET", "command_url", "=", "self", ".", "_urls", ".", "command_sel_sound_mode", "+", "sound_mode", "# sent command", "try", ":", "if", "self", ".", "send_get_command", "(", "command_url", ")", ":", "self", ".", "_sound_mode_raw", "=", "self", ".", "_sound_mode_dict", "[", "sound_mode", "]", "[", "0", "]", "return", "True", "else", ":", "return", "False", "except", "requests", ".", "exceptions", ".", "RequestException", ":", "_LOGGER", ".", "error", "(", "\"Connection error: sound mode function %s not set.\"", ",", "sound_mode", ")", "return", "False" ]
Set sound_mode of device. Valid values depend on the device and should be taken from "sound_mode_list". Return "True" on success and "False" on fail.
[ "Set", "sound_mode", "of", "device", "." ]
python
train
dmbee/seglearn
seglearn/transform.py
https://github.com/dmbee/seglearn/blob/d8d7039e92c4c6571a70350c03298aceab8dbeec/seglearn/transform.py#L1301-L1319
def _retrieve_indices(cols): ''' Retrieve a list of indices corresponding to the provided column specification. ''' if isinstance(cols, int): return [cols] elif isinstance(cols, slice): start = cols.start if cols.start else 0 stop = cols.stop step = cols.step if cols.step else 1 return list(range(start, stop, step)) elif isinstance(cols, list) and cols: if isinstance(cols[0], bool): return np.flatnonzero(np.asarray(cols)) elif isinstance(cols[0], int): return cols else: raise TypeError('No valid column specifier. Only a scalar, list or slice of all' 'integers or a boolean mask are allowed.')
[ "def", "_retrieve_indices", "(", "cols", ")", ":", "if", "isinstance", "(", "cols", ",", "int", ")", ":", "return", "[", "cols", "]", "elif", "isinstance", "(", "cols", ",", "slice", ")", ":", "start", "=", "cols", ".", "start", "if", "cols", ".", "start", "else", "0", "stop", "=", "cols", ".", "stop", "step", "=", "cols", ".", "step", "if", "cols", ".", "step", "else", "1", "return", "list", "(", "range", "(", "start", ",", "stop", ",", "step", ")", ")", "elif", "isinstance", "(", "cols", ",", "list", ")", "and", "cols", ":", "if", "isinstance", "(", "cols", "[", "0", "]", ",", "bool", ")", ":", "return", "np", ".", "flatnonzero", "(", "np", ".", "asarray", "(", "cols", ")", ")", "elif", "isinstance", "(", "cols", "[", "0", "]", ",", "int", ")", ":", "return", "cols", "else", ":", "raise", "TypeError", "(", "'No valid column specifier. Only a scalar, list or slice of all'", "'integers or a boolean mask are allowed.'", ")" ]
Retrieve a list of indices corresponding to the provided column specification.
[ "Retrieve", "a", "list", "of", "indices", "corresponding", "to", "the", "provided", "column", "specification", "." ]
python
train
mitsei/dlkit
dlkit/services/assessment.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/services/assessment.py#L397-L408
def _set_bank_view(self, session): """Sets the underlying bank view to match current view""" if self._bank_view == COMPARATIVE: try: session.use_comparative_bank_view() except AttributeError: pass else: try: session.use_plenary_bank_view() except AttributeError: pass
[ "def", "_set_bank_view", "(", "self", ",", "session", ")", ":", "if", "self", ".", "_bank_view", "==", "COMPARATIVE", ":", "try", ":", "session", ".", "use_comparative_bank_view", "(", ")", "except", "AttributeError", ":", "pass", "else", ":", "try", ":", "session", ".", "use_plenary_bank_view", "(", ")", "except", "AttributeError", ":", "pass" ]
Sets the underlying bank view to match current view
[ "Sets", "the", "underlying", "bank", "view", "to", "match", "current", "view" ]
python
train
pandas-dev/pandas
pandas/core/resample.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/resample.py#L1334-L1373
def _get_resampler(self, obj, kind=None): """ Return my resampler or raise if we have an invalid axis. Parameters ---------- obj : input object kind : string, optional 'period','timestamp','timedelta' are valid Returns ------- a Resampler Raises ------ TypeError if incompatible axis """ self._set_grouper(obj) ax = self.ax if isinstance(ax, DatetimeIndex): return DatetimeIndexResampler(obj, groupby=self, kind=kind, axis=self.axis) elif isinstance(ax, PeriodIndex) or kind == 'period': return PeriodIndexResampler(obj, groupby=self, kind=kind, axis=self.axis) elif isinstance(ax, TimedeltaIndex): return TimedeltaIndexResampler(obj, groupby=self, axis=self.axis) raise TypeError("Only valid with DatetimeIndex, " "TimedeltaIndex or PeriodIndex, " "but got an instance of %r" % type(ax).__name__)
[ "def", "_get_resampler", "(", "self", ",", "obj", ",", "kind", "=", "None", ")", ":", "self", ".", "_set_grouper", "(", "obj", ")", "ax", "=", "self", ".", "ax", "if", "isinstance", "(", "ax", ",", "DatetimeIndex", ")", ":", "return", "DatetimeIndexResampler", "(", "obj", ",", "groupby", "=", "self", ",", "kind", "=", "kind", ",", "axis", "=", "self", ".", "axis", ")", "elif", "isinstance", "(", "ax", ",", "PeriodIndex", ")", "or", "kind", "==", "'period'", ":", "return", "PeriodIndexResampler", "(", "obj", ",", "groupby", "=", "self", ",", "kind", "=", "kind", ",", "axis", "=", "self", ".", "axis", ")", "elif", "isinstance", "(", "ax", ",", "TimedeltaIndex", ")", ":", "return", "TimedeltaIndexResampler", "(", "obj", ",", "groupby", "=", "self", ",", "axis", "=", "self", ".", "axis", ")", "raise", "TypeError", "(", "\"Only valid with DatetimeIndex, \"", "\"TimedeltaIndex or PeriodIndex, \"", "\"but got an instance of %r\"", "%", "type", "(", "ax", ")", ".", "__name__", ")" ]
Return my resampler or raise if we have an invalid axis. Parameters ---------- obj : input object kind : string, optional 'period','timestamp','timedelta' are valid Returns ------- a Resampler Raises ------ TypeError if incompatible axis
[ "Return", "my", "resampler", "or", "raise", "if", "we", "have", "an", "invalid", "axis", "." ]
python
train
tradenity/python-sdk
tradenity/resources/free_shipping.py
https://github.com/tradenity/python-sdk/blob/d13fbe23f4d6ff22554c6d8d2deaf209371adaf1/tradenity/resources/free_shipping.py#L728-L750
def list_all_free_shippings(cls, **kwargs): """List FreeShippings Return a list of FreeShippings This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.list_all_free_shippings(async=True) >>> result = thread.get() :param async bool :param int page: page number :param int size: page size :param str sort: page order :return: page[FreeShipping] If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._list_all_free_shippings_with_http_info(**kwargs) else: (data) = cls._list_all_free_shippings_with_http_info(**kwargs) return data
[ "def", "list_all_free_shippings", "(", "cls", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async'", ")", ":", "return", "cls", ".", "_list_all_free_shippings_with_http_info", "(", "*", "*", "kwargs", ")", "else", ":", "(", "data", ")", "=", "cls", ".", "_list_all_free_shippings_with_http_info", "(", "*", "*", "kwargs", ")", "return", "data" ]
List FreeShippings Return a list of FreeShippings This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.list_all_free_shippings(async=True) >>> result = thread.get() :param async bool :param int page: page number :param int size: page size :param str sort: page order :return: page[FreeShipping] If the method is called asynchronously, returns the request thread.
[ "List", "FreeShippings" ]
python
train
projectshift/shift-boiler
boiler/user/session_interface.py
https://github.com/projectshift/shift-boiler/blob/8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b/boiler/user/session_interface.py#L45-L59
def save_session(self, *args, **kwargs): """ Save session Skip setting session cookie if requested via g.stateless_sessions """ # do not send session cookie if g.get('stateless_sessions'): return # send cookie return super(BoilerSessionInterface, self).save_session( *args, **kwargs )
[ "def", "save_session", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# do not send session cookie", "if", "g", ".", "get", "(", "'stateless_sessions'", ")", ":", "return", "# send cookie", "return", "super", "(", "BoilerSessionInterface", ",", "self", ")", ".", "save_session", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
Save session Skip setting session cookie if requested via g.stateless_sessions
[ "Save", "session", "Skip", "setting", "session", "cookie", "if", "requested", "via", "g", ".", "stateless_sessions" ]
python
train
tensorflow/tensorboard
tensorboard/backend/event_processing/event_accumulator.py
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/backend/event_processing/event_accumulator.py#L675-L716
def _Purge(self, event, by_tags): """Purge all events that have occurred after the given event.step. If by_tags is True, purge all events that occurred after the given event.step, but only for the tags that the event has. Non-sequential event.steps suggest that a TensorFlow restart occurred, and we discard the out-of-order events to display a consistent view in TensorBoard. Discarding by tags is the safer method, when we are unsure whether a restart has occurred, given that threading in supervisor can cause events of different tags to arrive with unsynchronized step values. If by_tags is False, then purge all events with event.step greater than the given event.step. This can be used when we are certain that a TensorFlow restart has occurred and these events can be discarded. Args: event: The event to use as reference for the purge. All events with the same tags, but with a greater event.step will be purged. by_tags: Bool to dictate whether to discard all out-of-order events or only those that are associated with the given reference event. """ ## Keep data in reservoirs that has a step less than event.step _NotExpired = lambda x: x.step < event.step if by_tags: def _ExpiredPerTag(value): return [getattr(self, x).FilterItems(_NotExpired, value.tag) for x in self.accumulated_attrs] expired_per_tags = [_ExpiredPerTag(value) for value in event.summary.value] expired_per_type = [sum(x) for x in zip(*expired_per_tags)] else: expired_per_type = [getattr(self, x).FilterItems(_NotExpired) for x in self.accumulated_attrs] if sum(expired_per_type) > 0: purge_msg = _GetPurgeMessage(self.most_recent_step, self.most_recent_wall_time, event.step, event.wall_time, *expired_per_type) logger.warn(purge_msg)
[ "def", "_Purge", "(", "self", ",", "event", ",", "by_tags", ")", ":", "## Keep data in reservoirs that has a step less than event.step", "_NotExpired", "=", "lambda", "x", ":", "x", ".", "step", "<", "event", ".", "step", "if", "by_tags", ":", "def", "_ExpiredPerTag", "(", "value", ")", ":", "return", "[", "getattr", "(", "self", ",", "x", ")", ".", "FilterItems", "(", "_NotExpired", ",", "value", ".", "tag", ")", "for", "x", "in", "self", ".", "accumulated_attrs", "]", "expired_per_tags", "=", "[", "_ExpiredPerTag", "(", "value", ")", "for", "value", "in", "event", ".", "summary", ".", "value", "]", "expired_per_type", "=", "[", "sum", "(", "x", ")", "for", "x", "in", "zip", "(", "*", "expired_per_tags", ")", "]", "else", ":", "expired_per_type", "=", "[", "getattr", "(", "self", ",", "x", ")", ".", "FilterItems", "(", "_NotExpired", ")", "for", "x", "in", "self", ".", "accumulated_attrs", "]", "if", "sum", "(", "expired_per_type", ")", ">", "0", ":", "purge_msg", "=", "_GetPurgeMessage", "(", "self", ".", "most_recent_step", ",", "self", ".", "most_recent_wall_time", ",", "event", ".", "step", ",", "event", ".", "wall_time", ",", "*", "expired_per_type", ")", "logger", ".", "warn", "(", "purge_msg", ")" ]
Purge all events that have occurred after the given event.step. If by_tags is True, purge all events that occurred after the given event.step, but only for the tags that the event has. Non-sequential event.steps suggest that a TensorFlow restart occurred, and we discard the out-of-order events to display a consistent view in TensorBoard. Discarding by tags is the safer method, when we are unsure whether a restart has occurred, given that threading in supervisor can cause events of different tags to arrive with unsynchronized step values. If by_tags is False, then purge all events with event.step greater than the given event.step. This can be used when we are certain that a TensorFlow restart has occurred and these events can be discarded. Args: event: The event to use as reference for the purge. All events with the same tags, but with a greater event.step will be purged. by_tags: Bool to dictate whether to discard all out-of-order events or only those that are associated with the given reference event.
[ "Purge", "all", "events", "that", "have", "occurred", "after", "the", "given", "event", ".", "step", "." ]
python
train
fedora-infra/fedora-messaging
fedora_messaging/twisted/protocol.py
https://github.com/fedora-infra/fedora-messaging/blob/be3e88534e2b15d579bcd24f9c4b7e795cb7e0b7/fedora_messaging/twisted/protocol.py#L915-L931
def pauseProducing(self): """ Pause the reception of messages by canceling all existing consumers. This does not disconnect from the server. Message reception can be resumed with :meth:`resumeProducing`. Returns: Deferred: fired when the production is paused. """ if not self._running: return # Exit the read loop and cancel the consumer on the server. self._running = False for consumer in self._consumers.values(): yield consumer.channel.basic_cancel(consumer_tag=consumer.tag) _legacy_twisted_log.msg("Paused retrieval of messages for the server queue")
[ "def", "pauseProducing", "(", "self", ")", ":", "if", "not", "self", ".", "_running", ":", "return", "# Exit the read loop and cancel the consumer on the server.", "self", ".", "_running", "=", "False", "for", "consumer", "in", "self", ".", "_consumers", ".", "values", "(", ")", ":", "yield", "consumer", ".", "channel", ".", "basic_cancel", "(", "consumer_tag", "=", "consumer", ".", "tag", ")", "_legacy_twisted_log", ".", "msg", "(", "\"Paused retrieval of messages for the server queue\"", ")" ]
Pause the reception of messages by canceling all existing consumers. This does not disconnect from the server. Message reception can be resumed with :meth:`resumeProducing`. Returns: Deferred: fired when the production is paused.
[ "Pause", "the", "reception", "of", "messages", "by", "canceling", "all", "existing", "consumers", ".", "This", "does", "not", "disconnect", "from", "the", "server", "." ]
python
train
thiagopbueno/tf-rddlsim
tfrddlsim/simulation/policy_simulator.py
https://github.com/thiagopbueno/tf-rddlsim/blob/d7102a0ad37d179dbb23141640254ea383d3b43f/tfrddlsim/simulation/policy_simulator.py#L226-L272
def trajectory(self, horizon: int, initial_state: Optional[StateTensor] = None) -> TrajectoryOutput: '''Returns the ops for the trajectory generation with given `horizon` and `initial_state`. The simulation returns states, actions and interms as a sequence of tensors (i.e., all representations are factored). The reward is a batch sized tensor. The trajectoty output is a tuple: (initial_state, states, actions, interms, rewards). If initial state is None, use default compiler's initial state. Note: All tensors have shape: (batch_size, horizon, fluent_shape). Except initial state that has shape: (batch_size, fluent_shape). Args: horizon (int): The number of simulation timesteps. initial_state (Optional[Sequence[tf.Tensor]]): The initial state tensors. Returns: Tuple[StateTensor, StatesTensor, ActionsTensor, IntermsTensor, tf.Tensor]: Trajectory output tuple. ''' if initial_state is None: initial_state = self._cell.initial_state() with self.graph.as_default(): self.inputs = self.timesteps(horizon) outputs, _ = tf.nn.dynamic_rnn( self._cell, self.inputs, initial_state=initial_state, dtype=tf.float32, scope="trajectory") states, actions, interms, rewards = outputs # fluent types state_dtype = map(rddl2tf.utils.range_type_to_dtype, self._cell._compiler.rddl.state_range_type) states = self._output(states, state_dtype) interm_dtype = map(rddl2tf.utils.range_type_to_dtype, self._cell._compiler.rddl.interm_range_type) interms = self._output(interms, interm_dtype) action_dtype = map(rddl2tf.utils.range_type_to_dtype, self._cell._compiler.rddl.action_range_type) actions = self._output(actions, action_dtype) outputs = (initial_state, states, actions, interms, rewards) return outputs
[ "def", "trajectory", "(", "self", ",", "horizon", ":", "int", ",", "initial_state", ":", "Optional", "[", "StateTensor", "]", "=", "None", ")", "->", "TrajectoryOutput", ":", "if", "initial_state", "is", "None", ":", "initial_state", "=", "self", ".", "_cell", ".", "initial_state", "(", ")", "with", "self", ".", "graph", ".", "as_default", "(", ")", ":", "self", ".", "inputs", "=", "self", ".", "timesteps", "(", "horizon", ")", "outputs", ",", "_", "=", "tf", ".", "nn", ".", "dynamic_rnn", "(", "self", ".", "_cell", ",", "self", ".", "inputs", ",", "initial_state", "=", "initial_state", ",", "dtype", "=", "tf", ".", "float32", ",", "scope", "=", "\"trajectory\"", ")", "states", ",", "actions", ",", "interms", ",", "rewards", "=", "outputs", "# fluent types", "state_dtype", "=", "map", "(", "rddl2tf", ".", "utils", ".", "range_type_to_dtype", ",", "self", ".", "_cell", ".", "_compiler", ".", "rddl", ".", "state_range_type", ")", "states", "=", "self", ".", "_output", "(", "states", ",", "state_dtype", ")", "interm_dtype", "=", "map", "(", "rddl2tf", ".", "utils", ".", "range_type_to_dtype", ",", "self", ".", "_cell", ".", "_compiler", ".", "rddl", ".", "interm_range_type", ")", "interms", "=", "self", ".", "_output", "(", "interms", ",", "interm_dtype", ")", "action_dtype", "=", "map", "(", "rddl2tf", ".", "utils", ".", "range_type_to_dtype", ",", "self", ".", "_cell", ".", "_compiler", ".", "rddl", ".", "action_range_type", ")", "actions", "=", "self", ".", "_output", "(", "actions", ",", "action_dtype", ")", "outputs", "=", "(", "initial_state", ",", "states", ",", "actions", ",", "interms", ",", "rewards", ")", "return", "outputs" ]
Returns the ops for the trajectory generation with given `horizon` and `initial_state`. The simulation returns states, actions and interms as a sequence of tensors (i.e., all representations are factored). The reward is a batch sized tensor. The trajectoty output is a tuple: (initial_state, states, actions, interms, rewards). If initial state is None, use default compiler's initial state. Note: All tensors have shape: (batch_size, horizon, fluent_shape). Except initial state that has shape: (batch_size, fluent_shape). Args: horizon (int): The number of simulation timesteps. initial_state (Optional[Sequence[tf.Tensor]]): The initial state tensors. Returns: Tuple[StateTensor, StatesTensor, ActionsTensor, IntermsTensor, tf.Tensor]: Trajectory output tuple.
[ "Returns", "the", "ops", "for", "the", "trajectory", "generation", "with", "given", "horizon", "and", "initial_state", "." ]
python
train
bernii/querystring-parser
querystring_parser/parser.py
https://github.com/bernii/querystring-parser/blob/1d3b652512d55622a37b5f5712909ea41490454b/querystring_parser/parser.py#L55-L68
def get_key(s): ''' Get data between [ and ] remove ' if exist @param s: string to process ''' start = s.find("[") end = s.find("]") if start == -1 or end == -1: return None if s[start + 1] == "'": start += 1 if s[end - 1] == "'": end -= 1 return s[start + 1:end]
[ "def", "get_key", "(", "s", ")", ":", "start", "=", "s", ".", "find", "(", "\"[\"", ")", "end", "=", "s", ".", "find", "(", "\"]\"", ")", "if", "start", "==", "-", "1", "or", "end", "==", "-", "1", ":", "return", "None", "if", "s", "[", "start", "+", "1", "]", "==", "\"'\"", ":", "start", "+=", "1", "if", "s", "[", "end", "-", "1", "]", "==", "\"'\"", ":", "end", "-=", "1", "return", "s", "[", "start", "+", "1", ":", "end", "]" ]
Get data between [ and ] remove ' if exist @param s: string to process
[ "Get", "data", "between", "[", "and", "]", "remove", "if", "exist" ]
python
train
spacetelescope/synphot_refactor
synphot/observation.py
https://github.com/spacetelescope/synphot_refactor/blob/9c064f3cff0c41dd8acadc0f67c6350931275b9f/synphot/observation.py#L142-L196
def _init_bins(self, binset): """Calculated binned wavelength centers, edges, and flux. By contrast, the native waveset and flux should be considered samples of a continuous function. Thus, it makes sense to interpolate ``self.waveset`` and ``self(self.waveset)``, but not `binset` and `binflux`. """ if binset is None: if self.bandpass.waveset is not None: self._binset = self.bandpass.waveset elif self.spectrum.waveset is not None: self._binset = self.spectrum.waveset log.info('Bandpass waveset is undefined; ' 'Using source spectrum waveset instead.') else: raise exceptions.UndefinedBinset( 'Both source spectrum and bandpass have undefined ' 'waveset; Provide binset manually.') else: self._binset = self._validate_wavelengths(binset) # binset must be in ascending order for calcbinflux() # to work properly. if self._binset[0] > self._binset[-1]: self._binset = self._binset[::-1] self._bin_edges = binning.calculate_bin_edges(self._binset) # Merge bin edges and centers in with the natural waveset spwave = utils.merge_wavelengths( self._bin_edges.value, self._binset.value) if self.waveset is not None: spwave = utils.merge_wavelengths(spwave, self.waveset.value) # Throw out invalid wavelengths after merging. spwave = spwave[spwave > 0] # Compute indices associated to each endpoint. indices = np.searchsorted(spwave, self._bin_edges.value) i_beg = indices[:-1] i_end = indices[1:] # Prepare integration variables. flux = self(spwave) avflux = (flux.value[1:] + flux.value[:-1]) * 0.5 deltaw = spwave[1:] - spwave[:-1] # Sum over each bin. binflux, intwave = binning.calcbinflux( self._binset.size, i_beg, i_end, avflux, deltaw) self._binflux = binflux * flux.unit
[ "def", "_init_bins", "(", "self", ",", "binset", ")", ":", "if", "binset", "is", "None", ":", "if", "self", ".", "bandpass", ".", "waveset", "is", "not", "None", ":", "self", ".", "_binset", "=", "self", ".", "bandpass", ".", "waveset", "elif", "self", ".", "spectrum", ".", "waveset", "is", "not", "None", ":", "self", ".", "_binset", "=", "self", ".", "spectrum", ".", "waveset", "log", ".", "info", "(", "'Bandpass waveset is undefined; '", "'Using source spectrum waveset instead.'", ")", "else", ":", "raise", "exceptions", ".", "UndefinedBinset", "(", "'Both source spectrum and bandpass have undefined '", "'waveset; Provide binset manually.'", ")", "else", ":", "self", ".", "_binset", "=", "self", ".", "_validate_wavelengths", "(", "binset", ")", "# binset must be in ascending order for calcbinflux()", "# to work properly.", "if", "self", ".", "_binset", "[", "0", "]", ">", "self", ".", "_binset", "[", "-", "1", "]", ":", "self", ".", "_binset", "=", "self", ".", "_binset", "[", ":", ":", "-", "1", "]", "self", ".", "_bin_edges", "=", "binning", ".", "calculate_bin_edges", "(", "self", ".", "_binset", ")", "# Merge bin edges and centers in with the natural waveset", "spwave", "=", "utils", ".", "merge_wavelengths", "(", "self", ".", "_bin_edges", ".", "value", ",", "self", ".", "_binset", ".", "value", ")", "if", "self", ".", "waveset", "is", "not", "None", ":", "spwave", "=", "utils", ".", "merge_wavelengths", "(", "spwave", ",", "self", ".", "waveset", ".", "value", ")", "# Throw out invalid wavelengths after merging.", "spwave", "=", "spwave", "[", "spwave", ">", "0", "]", "# Compute indices associated to each endpoint.", "indices", "=", "np", ".", "searchsorted", "(", "spwave", ",", "self", ".", "_bin_edges", ".", "value", ")", "i_beg", "=", "indices", "[", ":", "-", "1", "]", "i_end", "=", "indices", "[", "1", ":", "]", "# Prepare integration variables.", "flux", "=", "self", "(", "spwave", ")", "avflux", "=", "(", "flux", ".", "value", "[", "1", ":", "]", "+", "flux", ".", "value", "[", ":", "-", "1", "]", ")", "*", "0.5", "deltaw", "=", "spwave", "[", "1", ":", "]", "-", "spwave", "[", ":", "-", "1", "]", "# Sum over each bin.", "binflux", ",", "intwave", "=", "binning", ".", "calcbinflux", "(", "self", ".", "_binset", ".", "size", ",", "i_beg", ",", "i_end", ",", "avflux", ",", "deltaw", ")", "self", ".", "_binflux", "=", "binflux", "*", "flux", ".", "unit" ]
Calculated binned wavelength centers, edges, and flux. By contrast, the native waveset and flux should be considered samples of a continuous function. Thus, it makes sense to interpolate ``self.waveset`` and ``self(self.waveset)``, but not `binset` and `binflux`.
[ "Calculated", "binned", "wavelength", "centers", "edges", "and", "flux", "." ]
python
train
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/modules/mavproxy_rally.py
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/modules/mavproxy_rally.py#L116-L138
def cmd_rally_alt(self, args): '''handle rally alt change''' if (len(args) < 2): print("Usage: rally alt RALLYNUM newAlt <newBreakAlt>") return if not self.have_list: print("Please list rally points first") return idx = int(args[0]) if idx <= 0 or idx > self.rallyloader.rally_count(): print("Invalid rally point number %u" % idx) return new_alt = int(args[1]) new_break_alt = None if (len(args) > 2): new_break_alt = int(args[2]) self.rallyloader.set_alt(idx, new_alt, new_break_alt) self.send_rally_point(idx-1) self.fetch_rally_point(idx-1) self.rallyloader.reindex()
[ "def", "cmd_rally_alt", "(", "self", ",", "args", ")", ":", "if", "(", "len", "(", "args", ")", "<", "2", ")", ":", "print", "(", "\"Usage: rally alt RALLYNUM newAlt <newBreakAlt>\"", ")", "return", "if", "not", "self", ".", "have_list", ":", "print", "(", "\"Please list rally points first\"", ")", "return", "idx", "=", "int", "(", "args", "[", "0", "]", ")", "if", "idx", "<=", "0", "or", "idx", ">", "self", ".", "rallyloader", ".", "rally_count", "(", ")", ":", "print", "(", "\"Invalid rally point number %u\"", "%", "idx", ")", "return", "new_alt", "=", "int", "(", "args", "[", "1", "]", ")", "new_break_alt", "=", "None", "if", "(", "len", "(", "args", ")", ">", "2", ")", ":", "new_break_alt", "=", "int", "(", "args", "[", "2", "]", ")", "self", ".", "rallyloader", ".", "set_alt", "(", "idx", ",", "new_alt", ",", "new_break_alt", ")", "self", ".", "send_rally_point", "(", "idx", "-", "1", ")", "self", ".", "fetch_rally_point", "(", "idx", "-", "1", ")", "self", ".", "rallyloader", ".", "reindex", "(", ")" ]
handle rally alt change
[ "handle", "rally", "alt", "change" ]
python
train
dotzero/tilda-api-python
tilda/base.py
https://github.com/dotzero/tilda-api-python/blob/0ab984e0236cbfb676b0fbddc1ab37202d92e0a8/tilda/base.py#L54-L70
def to_json(self): """ Returns: str: """ data = dict() for key, value in self.__dict__.items(): if value: if hasattr(value, 'to_dict'): data[key] = value.to_dict() elif isinstance(value, datetime): data[key] = value.strftime('%Y-%m-%d %H:%M:%S') else: data[key] = value return json.dumps(data)
[ "def", "to_json", "(", "self", ")", ":", "data", "=", "dict", "(", ")", "for", "key", ",", "value", "in", "self", ".", "__dict__", ".", "items", "(", ")", ":", "if", "value", ":", "if", "hasattr", "(", "value", ",", "'to_dict'", ")", ":", "data", "[", "key", "]", "=", "value", ".", "to_dict", "(", ")", "elif", "isinstance", "(", "value", ",", "datetime", ")", ":", "data", "[", "key", "]", "=", "value", ".", "strftime", "(", "'%Y-%m-%d %H:%M:%S'", ")", "else", ":", "data", "[", "key", "]", "=", "value", "return", "json", ".", "dumps", "(", "data", ")" ]
Returns: str:
[ "Returns", ":", "str", ":" ]
python
train
madjar/pyramid_persona
pyramid_persona/views.py
https://github.com/madjar/pyramid_persona/blob/b8cc34780e399904bfb8ec907bb11589ac52d9fa/pyramid_persona/views.py#L12-L23
def verify_login(request): """Verifies the assertion and the csrf token in the given request. Returns the email of the user if everything is valid, otherwise raises a HTTPBadRequest""" verifier = request.registry['persona.verifier'] try: data = verifier.verify(request.POST['assertion']) except (ValueError, browserid.errors.TrustError) as e: logger.info('Failed persona login: %s (%s)', e, type(e).__name__) raise HTTPBadRequest('Invalid assertion') return data['email']
[ "def", "verify_login", "(", "request", ")", ":", "verifier", "=", "request", ".", "registry", "[", "'persona.verifier'", "]", "try", ":", "data", "=", "verifier", ".", "verify", "(", "request", ".", "POST", "[", "'assertion'", "]", ")", "except", "(", "ValueError", ",", "browserid", ".", "errors", ".", "TrustError", ")", "as", "e", ":", "logger", ".", "info", "(", "'Failed persona login: %s (%s)'", ",", "e", ",", "type", "(", "e", ")", ".", "__name__", ")", "raise", "HTTPBadRequest", "(", "'Invalid assertion'", ")", "return", "data", "[", "'email'", "]" ]
Verifies the assertion and the csrf token in the given request. Returns the email of the user if everything is valid, otherwise raises a HTTPBadRequest
[ "Verifies", "the", "assertion", "and", "the", "csrf", "token", "in", "the", "given", "request", "." ]
python
train
belbio/bel
bel/utils.py
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/utils.py#L245-L259
def elapsed(self): """ Return the current elapsed time since start If the `elapsed` property is called in the context manager scope, the elapsed time bewteen start and property access is returned. However, if it is accessed outside of the context manager scope, it returns the elapsed time bewteen entering and exiting the scope. The `elapsed` property can thus be accessed at different points within the context manager scope, to time different parts of the block. """ if self.end is None: # if elapsed is called in the context manager scope return (self() - self.start) * self.factor else: # if elapsed is called out of the context manager scope return (self.end - self.start) * self.factor
[ "def", "elapsed", "(", "self", ")", ":", "if", "self", ".", "end", "is", "None", ":", "# if elapsed is called in the context manager scope", "return", "(", "self", "(", ")", "-", "self", ".", "start", ")", "*", "self", ".", "factor", "else", ":", "# if elapsed is called out of the context manager scope", "return", "(", "self", ".", "end", "-", "self", ".", "start", ")", "*", "self", ".", "factor" ]
Return the current elapsed time since start If the `elapsed` property is called in the context manager scope, the elapsed time bewteen start and property access is returned. However, if it is accessed outside of the context manager scope, it returns the elapsed time bewteen entering and exiting the scope. The `elapsed` property can thus be accessed at different points within the context manager scope, to time different parts of the block.
[ "Return", "the", "current", "elapsed", "time", "since", "start", "If", "the", "elapsed", "property", "is", "called", "in", "the", "context", "manager", "scope", "the", "elapsed", "time", "bewteen", "start", "and", "property", "access", "is", "returned", ".", "However", "if", "it", "is", "accessed", "outside", "of", "the", "context", "manager", "scope", "it", "returns", "the", "elapsed", "time", "bewteen", "entering", "and", "exiting", "the", "scope", ".", "The", "elapsed", "property", "can", "thus", "be", "accessed", "at", "different", "points", "within", "the", "context", "manager", "scope", "to", "time", "different", "parts", "of", "the", "block", "." ]
python
train
botswana-harvard/edc-registration
edc_registration/model_mixins/updates_or_creates_registered_subject_model_mixin.py
https://github.com/botswana-harvard/edc-registration/blob/3daca624a496945fd4536488f6f80790bbecc081/edc_registration/model_mixins/updates_or_creates_registered_subject_model_mixin.py#L82-L100
def registration_options(self): """Gathers values for common attributes between the registration model and this instance. """ registration_options = {} rs = self.registration_model() for k, v in self.__dict__.items(): if k not in DEFAULT_BASE_FIELDS + ['_state']: try: getattr(rs, k) registration_options.update({k: v}) except AttributeError: pass registration_identifier = registration_options.get( 'registration_identifier') if registration_identifier: registration_options['registration_identifier'] = self.to_string( registration_identifier) return registration_options
[ "def", "registration_options", "(", "self", ")", ":", "registration_options", "=", "{", "}", "rs", "=", "self", ".", "registration_model", "(", ")", "for", "k", ",", "v", "in", "self", ".", "__dict__", ".", "items", "(", ")", ":", "if", "k", "not", "in", "DEFAULT_BASE_FIELDS", "+", "[", "'_state'", "]", ":", "try", ":", "getattr", "(", "rs", ",", "k", ")", "registration_options", ".", "update", "(", "{", "k", ":", "v", "}", ")", "except", "AttributeError", ":", "pass", "registration_identifier", "=", "registration_options", ".", "get", "(", "'registration_identifier'", ")", "if", "registration_identifier", ":", "registration_options", "[", "'registration_identifier'", "]", "=", "self", ".", "to_string", "(", "registration_identifier", ")", "return", "registration_options" ]
Gathers values for common attributes between the registration model and this instance.
[ "Gathers", "values", "for", "common", "attributes", "between", "the", "registration", "model", "and", "this", "instance", "." ]
python
train
jeffknupp/sandman
sandman/model/utils.py
https://github.com/jeffknupp/sandman/blob/253ea4d15cbccd9f0016d66fedd7478614cc0b2f/sandman/model/utils.py#L110-L125
def register_internal_data(cls): """Register a new class, *cls*, with various internal data structures. :params `sandman.model.Model` cls: class to register """ with app.app_context(): if getattr(cls, 'endpoint', None) is None: orig_class = cls cls = type('Sandman' + cls.__name__, (cls, Model), {}) cls.__from_class__ = orig_class current_app.class_references[cls.__tablename__] = cls current_app.class_references[cls.__name__] = cls current_app.class_references[cls.endpoint()] = cls if not getattr(cls, '__related_tables__', None): cls.__related_tables__ = set()
[ "def", "register_internal_data", "(", "cls", ")", ":", "with", "app", ".", "app_context", "(", ")", ":", "if", "getattr", "(", "cls", ",", "'endpoint'", ",", "None", ")", "is", "None", ":", "orig_class", "=", "cls", "cls", "=", "type", "(", "'Sandman'", "+", "cls", ".", "__name__", ",", "(", "cls", ",", "Model", ")", ",", "{", "}", ")", "cls", ".", "__from_class__", "=", "orig_class", "current_app", ".", "class_references", "[", "cls", ".", "__tablename__", "]", "=", "cls", "current_app", ".", "class_references", "[", "cls", ".", "__name__", "]", "=", "cls", "current_app", ".", "class_references", "[", "cls", ".", "endpoint", "(", ")", "]", "=", "cls", "if", "not", "getattr", "(", "cls", ",", "'__related_tables__'", ",", "None", ")", ":", "cls", ".", "__related_tables__", "=", "set", "(", ")" ]
Register a new class, *cls*, with various internal data structures. :params `sandman.model.Model` cls: class to register
[ "Register", "a", "new", "class", "*", "cls", "*", "with", "various", "internal", "data", "structures", "." ]
python
train
steder/pundler
pundler/core.py
https://github.com/steder/pundler/blob/68d730b08e46d5f7b8781017c9bba87c7378509d/pundler/core.py#L34-L44
def get_requirement_files(args=None): """ Get the "best" requirements file we can find """ if args and args.input_filename: return [args.input_filename] paths = [] for regex in settings.REQUIREMENTS_SOURCE_GLOBS: paths.extend(glob.glob(regex)) return paths
[ "def", "get_requirement_files", "(", "args", "=", "None", ")", ":", "if", "args", "and", "args", ".", "input_filename", ":", "return", "[", "args", ".", "input_filename", "]", "paths", "=", "[", "]", "for", "regex", "in", "settings", ".", "REQUIREMENTS_SOURCE_GLOBS", ":", "paths", ".", "extend", "(", "glob", ".", "glob", "(", "regex", ")", ")", "return", "paths" ]
Get the "best" requirements file we can find
[ "Get", "the", "best", "requirements", "file", "we", "can", "find" ]
python
train
fhs/pyhdf
pyhdf/VS.py
https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/VS.py#L1062-L1159
def storedata(self, fieldName, values, data_type, vName, vClass): """Create and initialize a single field vdata, returning the vdata reference number. Args:: fieldName Name of the single field in the vadata to create values Sequence of values to store in the field;. Each value can itself be a sequence, in which case the field will be multivalued (all second-level sequences must be of the same length) data_type Values type (one of HC.xxx constants). All values must be of the same type vName Name of the vdata to create vClass Vdata class (string) Returns:: vdata reference number C library equivalent : VHstoredata / VHstoredatam """ # See if the field is multi-valued. nrecs = len(values) if type(values[0]) in [list, tuple]: order = len(values[0]) # Replace input list with a flattened list. newValues = [] for el in values: for e in el: newValues.append(e) values = newValues else: order = 1 n_values = nrecs * order if data_type == HC.CHAR8: buf = _C.array_byte(n_values) # Allow values to be passed as a string. # Noop if a list is passed. values = list(values) for n in range(n_values): values[n] = ord(values[n]) elif data_type in [HC.UCHAR8, HC.UINT8]: buf = _C.array_byte(n_values) elif data_type == HC.INT8: # SWIG refuses negative values here. We found that if we # pass them as byte values, it will work. buf = _C.array_int8(n_values) values = list(values) for n in range(n_values): v = values[n] if v >= 0: v &= 0x7f else: v = abs(v) & 0x7f if v: v = 256 - v else: v = 128 # -128 in 2s complement values[n] = v elif data_type == HC.INT16: buf = _C.array_int16(n_values) elif data_type == HC.UINT16: buf = _C.array_uint16(n_values) elif data_type == HC.INT32: buf = _C.array_int32(n_values) elif data_type == HC.UINT32: buf = _C.array_uint32(n_values) elif data_type == HC.FLOAT32: buf = _C.array_float32(n_values) elif data_type == HC.FLOAT64: buf = _C.array_float64(n_values) else: raise HDF4Error("storedata: illegal or unimplemented data_type") for n in range(n_values): buf[n] = values[n] if order == 1: vd = _C.VHstoredata(self._hdf_inst._id, fieldName, buf, nrecs, data_type, vName, vClass) else: vd = _C.VHstoredatam(self._hdf_inst._id, fieldName, buf, nrecs, data_type, vName, vClass, order) _checkErr('storedata', vd, 'cannot create vdata') return vd
[ "def", "storedata", "(", "self", ",", "fieldName", ",", "values", ",", "data_type", ",", "vName", ",", "vClass", ")", ":", "# See if the field is multi-valued.", "nrecs", "=", "len", "(", "values", ")", "if", "type", "(", "values", "[", "0", "]", ")", "in", "[", "list", ",", "tuple", "]", ":", "order", "=", "len", "(", "values", "[", "0", "]", ")", "# Replace input list with a flattened list.", "newValues", "=", "[", "]", "for", "el", "in", "values", ":", "for", "e", "in", "el", ":", "newValues", ".", "append", "(", "e", ")", "values", "=", "newValues", "else", ":", "order", "=", "1", "n_values", "=", "nrecs", "*", "order", "if", "data_type", "==", "HC", ".", "CHAR8", ":", "buf", "=", "_C", ".", "array_byte", "(", "n_values", ")", "# Allow values to be passed as a string.", "# Noop if a list is passed.", "values", "=", "list", "(", "values", ")", "for", "n", "in", "range", "(", "n_values", ")", ":", "values", "[", "n", "]", "=", "ord", "(", "values", "[", "n", "]", ")", "elif", "data_type", "in", "[", "HC", ".", "UCHAR8", ",", "HC", ".", "UINT8", "]", ":", "buf", "=", "_C", ".", "array_byte", "(", "n_values", ")", "elif", "data_type", "==", "HC", ".", "INT8", ":", "# SWIG refuses negative values here. We found that if we", "# pass them as byte values, it will work.", "buf", "=", "_C", ".", "array_int8", "(", "n_values", ")", "values", "=", "list", "(", "values", ")", "for", "n", "in", "range", "(", "n_values", ")", ":", "v", "=", "values", "[", "n", "]", "if", "v", ">=", "0", ":", "v", "&=", "0x7f", "else", ":", "v", "=", "abs", "(", "v", ")", "&", "0x7f", "if", "v", ":", "v", "=", "256", "-", "v", "else", ":", "v", "=", "128", "# -128 in 2s complement", "values", "[", "n", "]", "=", "v", "elif", "data_type", "==", "HC", ".", "INT16", ":", "buf", "=", "_C", ".", "array_int16", "(", "n_values", ")", "elif", "data_type", "==", "HC", ".", "UINT16", ":", "buf", "=", "_C", ".", "array_uint16", "(", "n_values", ")", "elif", "data_type", "==", "HC", ".", "INT32", ":", "buf", "=", "_C", ".", "array_int32", "(", "n_values", ")", "elif", "data_type", "==", "HC", ".", "UINT32", ":", "buf", "=", "_C", ".", "array_uint32", "(", "n_values", ")", "elif", "data_type", "==", "HC", ".", "FLOAT32", ":", "buf", "=", "_C", ".", "array_float32", "(", "n_values", ")", "elif", "data_type", "==", "HC", ".", "FLOAT64", ":", "buf", "=", "_C", ".", "array_float64", "(", "n_values", ")", "else", ":", "raise", "HDF4Error", "(", "\"storedata: illegal or unimplemented data_type\"", ")", "for", "n", "in", "range", "(", "n_values", ")", ":", "buf", "[", "n", "]", "=", "values", "[", "n", "]", "if", "order", "==", "1", ":", "vd", "=", "_C", ".", "VHstoredata", "(", "self", ".", "_hdf_inst", ".", "_id", ",", "fieldName", ",", "buf", ",", "nrecs", ",", "data_type", ",", "vName", ",", "vClass", ")", "else", ":", "vd", "=", "_C", ".", "VHstoredatam", "(", "self", ".", "_hdf_inst", ".", "_id", ",", "fieldName", ",", "buf", ",", "nrecs", ",", "data_type", ",", "vName", ",", "vClass", ",", "order", ")", "_checkErr", "(", "'storedata'", ",", "vd", ",", "'cannot create vdata'", ")", "return", "vd" ]
Create and initialize a single field vdata, returning the vdata reference number. Args:: fieldName Name of the single field in the vadata to create values Sequence of values to store in the field;. Each value can itself be a sequence, in which case the field will be multivalued (all second-level sequences must be of the same length) data_type Values type (one of HC.xxx constants). All values must be of the same type vName Name of the vdata to create vClass Vdata class (string) Returns:: vdata reference number C library equivalent : VHstoredata / VHstoredatam
[ "Create", "and", "initialize", "a", "single", "field", "vdata", "returning", "the", "vdata", "reference", "number", "." ]
python
train
UpCloudLtd/upcloud-python-api
upcloud_api/cloud_manager/tag_mixin.py
https://github.com/UpCloudLtd/upcloud-python-api/blob/954b0ad7c4b932b2be31a95d88975f6b0eeac8ed/upcloud_api/cloud_manager/tag_mixin.py#L28-L38
def create_tag(self, name, description=None, servers=[]): """ Create a new Tag. Only name is mandatory. Returns the created Tag object. """ servers = [str(server) for server in servers] body = {'tag': Tag(name, description, servers).to_dict()} res = self.request('POST', '/tag', body) return Tag(cloud_manager=self, **res['tag'])
[ "def", "create_tag", "(", "self", ",", "name", ",", "description", "=", "None", ",", "servers", "=", "[", "]", ")", ":", "servers", "=", "[", "str", "(", "server", ")", "for", "server", "in", "servers", "]", "body", "=", "{", "'tag'", ":", "Tag", "(", "name", ",", "description", ",", "servers", ")", ".", "to_dict", "(", ")", "}", "res", "=", "self", ".", "request", "(", "'POST'", ",", "'/tag'", ",", "body", ")", "return", "Tag", "(", "cloud_manager", "=", "self", ",", "*", "*", "res", "[", "'tag'", "]", ")" ]
Create a new Tag. Only name is mandatory. Returns the created Tag object.
[ "Create", "a", "new", "Tag", ".", "Only", "name", "is", "mandatory", "." ]
python
train
pachyderm/python-pachyderm
src/python_pachyderm/pfs_client.py
https://github.com/pachyderm/python-pachyderm/blob/1c58cf91d30e03716a4f45213989e890f7b8a78c/src/python_pachyderm/pfs_client.py#L177-L200
def list_commit(self, repo_name, to_commit=None, from_commit=None, number=0): """ Gets a list of CommitInfo objects. Params: * repo_name: If only `repo_name` is given, all commits in the repo are returned. * to_commit: Optional. Only the ancestors of `to`, including `to` itself, are considered. * from_commit: Optional. Only the descendants of `from`, including `from` itself, are considered. * number: Optional. Determines how many commits are returned. If `number` is 0, all commits that match the aforementioned criteria are returned. """ req = proto.ListCommitRequest(repo=proto.Repo(name=repo_name), number=number) if to_commit is not None: req.to.CopyFrom(commit_from(to_commit)) if from_commit is not None: getattr(req, 'from').CopyFrom(commit_from(from_commit)) res = self.stub.ListCommit(req, metadata=self.metadata) if hasattr(res, 'commit_info'): return res.commit_info return []
[ "def", "list_commit", "(", "self", ",", "repo_name", ",", "to_commit", "=", "None", ",", "from_commit", "=", "None", ",", "number", "=", "0", ")", ":", "req", "=", "proto", ".", "ListCommitRequest", "(", "repo", "=", "proto", ".", "Repo", "(", "name", "=", "repo_name", ")", ",", "number", "=", "number", ")", "if", "to_commit", "is", "not", "None", ":", "req", ".", "to", ".", "CopyFrom", "(", "commit_from", "(", "to_commit", ")", ")", "if", "from_commit", "is", "not", "None", ":", "getattr", "(", "req", ",", "'from'", ")", ".", "CopyFrom", "(", "commit_from", "(", "from_commit", ")", ")", "res", "=", "self", ".", "stub", ".", "ListCommit", "(", "req", ",", "metadata", "=", "self", ".", "metadata", ")", "if", "hasattr", "(", "res", ",", "'commit_info'", ")", ":", "return", "res", ".", "commit_info", "return", "[", "]" ]
Gets a list of CommitInfo objects. Params: * repo_name: If only `repo_name` is given, all commits in the repo are returned. * to_commit: Optional. Only the ancestors of `to`, including `to` itself, are considered. * from_commit: Optional. Only the descendants of `from`, including `from` itself, are considered. * number: Optional. Determines how many commits are returned. If `number` is 0, all commits that match the aforementioned criteria are returned.
[ "Gets", "a", "list", "of", "CommitInfo", "objects", "." ]
python
train
raiden-network/raiden-contracts
raiden_contracts/utils/private_key.py
https://github.com/raiden-network/raiden-contracts/blob/a7e72a9477f2204b03f3706360ea8d9c0a8e7063/raiden_contracts/utils/private_key.py#L27-L68
def get_private_key(key_path, password_path=None): """Open a JSON-encoded private key and return it If a password file is provided, uses it to decrypt the key. If not, the password is asked interactively. Raw hex-encoded private keys are supported, but deprecated.""" assert key_path, key_path if not os.path.exists(key_path): log.fatal('%s: no such file', key_path) return None if not check_permission_safety(key_path): log.fatal('Private key file %s must be readable only by its owner.', key_path) return None if password_path and not check_permission_safety(password_path): log.fatal('Password file %s must be readable only by its owner.', password_path) return None with open(key_path) as keyfile: private_key = keyfile.readline().strip() if is_hex(private_key) and len(decode_hex(private_key)) == 32: log.warning('Private key in raw format. Consider switching to JSON-encoded') else: keyfile.seek(0) try: json_data = json.load(keyfile) if password_path: with open(password_path) as password_file: password = password_file.readline().strip() else: password = getpass.getpass('Enter the private key password: ') if json_data['crypto']['kdf'] == 'pbkdf2': password = password.encode() # type: ignore private_key = encode_hex(decode_keyfile_json(json_data, password)) except ValueError: log.fatal('Invalid private key format or password!') return None return private_key
[ "def", "get_private_key", "(", "key_path", ",", "password_path", "=", "None", ")", ":", "assert", "key_path", ",", "key_path", "if", "not", "os", ".", "path", ".", "exists", "(", "key_path", ")", ":", "log", ".", "fatal", "(", "'%s: no such file'", ",", "key_path", ")", "return", "None", "if", "not", "check_permission_safety", "(", "key_path", ")", ":", "log", ".", "fatal", "(", "'Private key file %s must be readable only by its owner.'", ",", "key_path", ")", "return", "None", "if", "password_path", "and", "not", "check_permission_safety", "(", "password_path", ")", ":", "log", ".", "fatal", "(", "'Password file %s must be readable only by its owner.'", ",", "password_path", ")", "return", "None", "with", "open", "(", "key_path", ")", "as", "keyfile", ":", "private_key", "=", "keyfile", ".", "readline", "(", ")", ".", "strip", "(", ")", "if", "is_hex", "(", "private_key", ")", "and", "len", "(", "decode_hex", "(", "private_key", ")", ")", "==", "32", ":", "log", ".", "warning", "(", "'Private key in raw format. Consider switching to JSON-encoded'", ")", "else", ":", "keyfile", ".", "seek", "(", "0", ")", "try", ":", "json_data", "=", "json", ".", "load", "(", "keyfile", ")", "if", "password_path", ":", "with", "open", "(", "password_path", ")", "as", "password_file", ":", "password", "=", "password_file", ".", "readline", "(", ")", ".", "strip", "(", ")", "else", ":", "password", "=", "getpass", ".", "getpass", "(", "'Enter the private key password: '", ")", "if", "json_data", "[", "'crypto'", "]", "[", "'kdf'", "]", "==", "'pbkdf2'", ":", "password", "=", "password", ".", "encode", "(", ")", "# type: ignore", "private_key", "=", "encode_hex", "(", "decode_keyfile_json", "(", "json_data", ",", "password", ")", ")", "except", "ValueError", ":", "log", ".", "fatal", "(", "'Invalid private key format or password!'", ")", "return", "None", "return", "private_key" ]
Open a JSON-encoded private key and return it If a password file is provided, uses it to decrypt the key. If not, the password is asked interactively. Raw hex-encoded private keys are supported, but deprecated.
[ "Open", "a", "JSON", "-", "encoded", "private", "key", "and", "return", "it" ]
python
train
ncolony/ncolony
ncolony/httpcheck.py
https://github.com/ncolony/ncolony/blob/6ac71bda1de6706fb34244ae4972e36db5f062d3/ncolony/httpcheck.py#L197-L213
def makeService(opt): """Make a service :params opt: dictionary-like object with 'freq', 'config' and 'messages' :returns: twisted.application.internet.TimerService that at opt['freq'] checks for stale processes in opt['config'], and sends restart messages through opt['messages'] """ restarter, path = beatcheck.parseConfig(opt) pool = client.HTTPConnectionPool(reactor) agent = client.Agent(reactor=reactor, pool=pool) settings = Settings(reactor=reactor, agent=agent) states = {} checker = functools.partial(check, settings, states, path) httpcheck = tainternet.TimerService(opt['freq'], run, restarter, checker) httpcheck.setName('httpcheck') return heart.wrapHeart(httpcheck)
[ "def", "makeService", "(", "opt", ")", ":", "restarter", ",", "path", "=", "beatcheck", ".", "parseConfig", "(", "opt", ")", "pool", "=", "client", ".", "HTTPConnectionPool", "(", "reactor", ")", "agent", "=", "client", ".", "Agent", "(", "reactor", "=", "reactor", ",", "pool", "=", "pool", ")", "settings", "=", "Settings", "(", "reactor", "=", "reactor", ",", "agent", "=", "agent", ")", "states", "=", "{", "}", "checker", "=", "functools", ".", "partial", "(", "check", ",", "settings", ",", "states", ",", "path", ")", "httpcheck", "=", "tainternet", ".", "TimerService", "(", "opt", "[", "'freq'", "]", ",", "run", ",", "restarter", ",", "checker", ")", "httpcheck", ".", "setName", "(", "'httpcheck'", ")", "return", "heart", ".", "wrapHeart", "(", "httpcheck", ")" ]
Make a service :params opt: dictionary-like object with 'freq', 'config' and 'messages' :returns: twisted.application.internet.TimerService that at opt['freq'] checks for stale processes in opt['config'], and sends restart messages through opt['messages']
[ "Make", "a", "service" ]
python
test
cloud-custodian/cloud-custodian
tools/c7n_gcp/c7n_gcp/mu.py
https://github.com/cloud-custodian/cloud-custodian/blob/52ef732eb3d7bc939d1579faf519314814695c08/tools/c7n_gcp/c7n_gcp/mu.py#L658-L680
def ensure_sink(self): """Ensure the log sink and its pub sub topic exist.""" topic_info = self.pubsub.ensure_topic() scope, sink_path, sink_info = self.get_sink(topic_info) client = self.session.client('logging', 'v2', '%s.sinks' % scope) try: sink = client.execute_command('get', {'sinkName': sink_path}) except HttpError as e: if e.resp.status != 404: raise sink = client.execute_command('create', sink_info) else: delta = delta_resource(sink, sink_info['body']) if delta: sink_info['updateMask'] = ','.join(delta) sink_info['sinkName'] = sink_path sink_info.pop('parent') sink = client.execute_command('update', sink_info) else: return sink_path self.pubsub.ensure_iam(publisher=sink['writerIdentity']) return sink_path
[ "def", "ensure_sink", "(", "self", ")", ":", "topic_info", "=", "self", ".", "pubsub", ".", "ensure_topic", "(", ")", "scope", ",", "sink_path", ",", "sink_info", "=", "self", ".", "get_sink", "(", "topic_info", ")", "client", "=", "self", ".", "session", ".", "client", "(", "'logging'", ",", "'v2'", ",", "'%s.sinks'", "%", "scope", ")", "try", ":", "sink", "=", "client", ".", "execute_command", "(", "'get'", ",", "{", "'sinkName'", ":", "sink_path", "}", ")", "except", "HttpError", "as", "e", ":", "if", "e", ".", "resp", ".", "status", "!=", "404", ":", "raise", "sink", "=", "client", ".", "execute_command", "(", "'create'", ",", "sink_info", ")", "else", ":", "delta", "=", "delta_resource", "(", "sink", ",", "sink_info", "[", "'body'", "]", ")", "if", "delta", ":", "sink_info", "[", "'updateMask'", "]", "=", "','", ".", "join", "(", "delta", ")", "sink_info", "[", "'sinkName'", "]", "=", "sink_path", "sink_info", ".", "pop", "(", "'parent'", ")", "sink", "=", "client", ".", "execute_command", "(", "'update'", ",", "sink_info", ")", "else", ":", "return", "sink_path", "self", ".", "pubsub", ".", "ensure_iam", "(", "publisher", "=", "sink", "[", "'writerIdentity'", "]", ")", "return", "sink_path" ]
Ensure the log sink and its pub sub topic exist.
[ "Ensure", "the", "log", "sink", "and", "its", "pub", "sub", "topic", "exist", "." ]
python
train
NicolasLM/spinach
spinach/contrib/spinachd/mail.py
https://github.com/NicolasLM/spinach/blob/0122f916643101eab5cdc1f3da662b9446e372aa/spinach/contrib/spinachd/mail.py#L38-L43
def deserialize_email_messages(messages: List[str]): """Deserialize EmailMessages passed as task argument.""" return [ pickle.loads(zlib.decompress(base64.b64decode(m))) for m in messages ]
[ "def", "deserialize_email_messages", "(", "messages", ":", "List", "[", "str", "]", ")", ":", "return", "[", "pickle", ".", "loads", "(", "zlib", ".", "decompress", "(", "base64", ".", "b64decode", "(", "m", ")", ")", ")", "for", "m", "in", "messages", "]" ]
Deserialize EmailMessages passed as task argument.
[ "Deserialize", "EmailMessages", "passed", "as", "task", "argument", "." ]
python
train
inasafe/inasafe
extras/xml_tools.py
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/extras/xml_tools.py#L219-L256
def xml2object(xml, verbose=False): """Generate XML object model from XML file or XML text This is the inverse operation to the __str__ representation (up to whitespace). Input xml can be either an * xml file * open xml file object Return XML_document instance. """ # FIXME - can we allow xml to be string? # This would depend on minidom's parse function # Input tests if isinstance(xml, basestring): fid = open(xml) else: fid = xml try: dom = parse(fid) except Exception as e: # Throw filename into dom exception msg = 'XML file "%s" could not be parsed.\n' %fid.name msg += 'Error message from parser: "%s"' %str(e) raise Exception, msg try: xml_object = dom2object(dom) except Exception as e: msg = 'Could not convert %s into XML object.\n' %fid.name msg += str(e) raise Exception, msg return xml_object
[ "def", "xml2object", "(", "xml", ",", "verbose", "=", "False", ")", ":", "# FIXME - can we allow xml to be string?", "# This would depend on minidom's parse function", "# Input tests", "if", "isinstance", "(", "xml", ",", "basestring", ")", ":", "fid", "=", "open", "(", "xml", ")", "else", ":", "fid", "=", "xml", "try", ":", "dom", "=", "parse", "(", "fid", ")", "except", "Exception", "as", "e", ":", "# Throw filename into dom exception", "msg", "=", "'XML file \"%s\" could not be parsed.\\n'", "%", "fid", ".", "name", "msg", "+=", "'Error message from parser: \"%s\"'", "%", "str", "(", "e", ")", "raise", "Exception", ",", "msg", "try", ":", "xml_object", "=", "dom2object", "(", "dom", ")", "except", "Exception", "as", "e", ":", "msg", "=", "'Could not convert %s into XML object.\\n'", "%", "fid", ".", "name", "msg", "+=", "str", "(", "e", ")", "raise", "Exception", ",", "msg", "return", "xml_object" ]
Generate XML object model from XML file or XML text This is the inverse operation to the __str__ representation (up to whitespace). Input xml can be either an * xml file * open xml file object Return XML_document instance.
[ "Generate", "XML", "object", "model", "from", "XML", "file", "or", "XML", "text" ]
python
train
saltstack/salt
salt/modules/bridge.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/bridge.py#L290-L305
def _bsd_stp(br, state, iface): ''' Internal, sets STP state. On BSD-like, it is required to specify the STP physical interface ''' kernel = __grains__['kernel'] if kernel == 'NetBSD': cmd = _tool_path('brconfig') else: cmd = _tool_path('ifconfig') if not br or not iface: return False return __salt__['cmd.run']('{0} {1} {2} {3}'.format(cmd, br, state, iface), python_shell=False)
[ "def", "_bsd_stp", "(", "br", ",", "state", ",", "iface", ")", ":", "kernel", "=", "__grains__", "[", "'kernel'", "]", "if", "kernel", "==", "'NetBSD'", ":", "cmd", "=", "_tool_path", "(", "'brconfig'", ")", "else", ":", "cmd", "=", "_tool_path", "(", "'ifconfig'", ")", "if", "not", "br", "or", "not", "iface", ":", "return", "False", "return", "__salt__", "[", "'cmd.run'", "]", "(", "'{0} {1} {2} {3}'", ".", "format", "(", "cmd", ",", "br", ",", "state", ",", "iface", ")", ",", "python_shell", "=", "False", ")" ]
Internal, sets STP state. On BSD-like, it is required to specify the STP physical interface
[ "Internal", "sets", "STP", "state", ".", "On", "BSD", "-", "like", "it", "is", "required", "to", "specify", "the", "STP", "physical", "interface" ]
python
train
diefans/objective
src/objective/core.py
https://github.com/diefans/objective/blob/e2de37f1cd4f5ad147ab3a5dee7dffd6806f2f88/src/objective/core.py#L283-L309
def _resolve_value(self, value, environment=None): """Resolve the value. Either apply missing or leave the value as is. :param value: the value either from deserialize or serialize :param environment: an optional environment """ # here we care about Undefined values if value == values.Undefined: if isinstance(self._missing, type) and issubclass(self._missing, Missing): # instantiate the missing thing missing = self._missing(self, environment) # invoke missing callback # the default is to raise a MissingValue() exception value = missing(value) elif hasattr(self._missing, "__call__"): value = self._missing() else: # we just assign any value value = self._missing return value
[ "def", "_resolve_value", "(", "self", ",", "value", ",", "environment", "=", "None", ")", ":", "# here we care about Undefined values", "if", "value", "==", "values", ".", "Undefined", ":", "if", "isinstance", "(", "self", ".", "_missing", ",", "type", ")", "and", "issubclass", "(", "self", ".", "_missing", ",", "Missing", ")", ":", "# instantiate the missing thing", "missing", "=", "self", ".", "_missing", "(", "self", ",", "environment", ")", "# invoke missing callback", "# the default is to raise a MissingValue() exception", "value", "=", "missing", "(", "value", ")", "elif", "hasattr", "(", "self", ".", "_missing", ",", "\"__call__\"", ")", ":", "value", "=", "self", ".", "_missing", "(", ")", "else", ":", "# we just assign any value", "value", "=", "self", ".", "_missing", "return", "value" ]
Resolve the value. Either apply missing or leave the value as is. :param value: the value either from deserialize or serialize :param environment: an optional environment
[ "Resolve", "the", "value", "." ]
python
train
osrg/ryu
ryu/lib/ovs/bridge.py
https://github.com/osrg/ryu/blob/6f906e72c92e10bd0264c9b91a2f7bb85b97780c/ryu/lib/ovs/bridge.py#L164-L175
def get_controller(self): """ Gets the configured OpenFlow controller address. This method is corresponding to the following ovs-vsctl command:: $ ovs-vsctl get-controller <bridge> """ command = ovs_vsctl.VSCtlCommand('get-controller', [self.br_name]) self.run_command([command]) result = command.result return result[0] if len(result) == 1 else result
[ "def", "get_controller", "(", "self", ")", ":", "command", "=", "ovs_vsctl", ".", "VSCtlCommand", "(", "'get-controller'", ",", "[", "self", ".", "br_name", "]", ")", "self", ".", "run_command", "(", "[", "command", "]", ")", "result", "=", "command", ".", "result", "return", "result", "[", "0", "]", "if", "len", "(", "result", ")", "==", "1", "else", "result" ]
Gets the configured OpenFlow controller address. This method is corresponding to the following ovs-vsctl command:: $ ovs-vsctl get-controller <bridge>
[ "Gets", "the", "configured", "OpenFlow", "controller", "address", "." ]
python
train
Shapeways/coyote_framework
coyote_framework/webdriver/webdriverwrapper/WebDriverWrapper.py
https://github.com/Shapeways/coyote_framework/blob/cb29899b984a21d56bf65d0b1d907073948fe16c/coyote_framework/webdriver/webdriverwrapper/WebDriverWrapper.py#L827-L851
def wait_until_text_is_not_empty(self, locator, timeout=None): """ Waits for an element's text to not be empty @type locator: webdriverwrapper.support.locator.Locator @param locator: locator used to find element @type timeout: int @param timeout: the maximum number of seconds the driver will wait before timing out @rtype: webdriverwrapper.WebElementWrapper @return: Returns the element found """ timeout = timeout if timeout is not None else self.timeout self.wait_for(locator) # first check that element exists def wait(): ''' Wait function passed to executor ''' WebDriverWait(self.driver, timeout).until(lambda d: len(self.find(locator).text()) > 0) return self.find(locator) return self.execute_and_handle_webdriver_exceptions( wait, timeout, locator, 'Timeout waiting for element to contain some text')
[ "def", "wait_until_text_is_not_empty", "(", "self", ",", "locator", ",", "timeout", "=", "None", ")", ":", "timeout", "=", "timeout", "if", "timeout", "is", "not", "None", "else", "self", ".", "timeout", "self", ".", "wait_for", "(", "locator", ")", "# first check that element exists", "def", "wait", "(", ")", ":", "'''\n Wait function passed to executor\n '''", "WebDriverWait", "(", "self", ".", "driver", ",", "timeout", ")", ".", "until", "(", "lambda", "d", ":", "len", "(", "self", ".", "find", "(", "locator", ")", ".", "text", "(", ")", ")", ">", "0", ")", "return", "self", ".", "find", "(", "locator", ")", "return", "self", ".", "execute_and_handle_webdriver_exceptions", "(", "wait", ",", "timeout", ",", "locator", ",", "'Timeout waiting for element to contain some text'", ")" ]
Waits for an element's text to not be empty @type locator: webdriverwrapper.support.locator.Locator @param locator: locator used to find element @type timeout: int @param timeout: the maximum number of seconds the driver will wait before timing out @rtype: webdriverwrapper.WebElementWrapper @return: Returns the element found
[ "Waits", "for", "an", "element", "s", "text", "to", "not", "be", "empty" ]
python
train
fracpete/python-weka-wrapper3
python/weka/flow/source.py
https://github.com/fracpete/python-weka-wrapper3/blob/d850ab1bdb25fbd5a8d86e99f34a397975425838/python/weka/flow/source.py#L173-L183
def quickinfo(self): """ Returns a short string describing some of the options of the actor. :return: the info, None if not available :rtype: str """ return "dir: " + str(self.config["dir"]) \ + ", files: " + str(self.config["list_files"]) \ + ", dirs: " + str(self.resolve_option("list_dirs")) \ + ", recursive: " + str(self.config["recursive"])
[ "def", "quickinfo", "(", "self", ")", ":", "return", "\"dir: \"", "+", "str", "(", "self", ".", "config", "[", "\"dir\"", "]", ")", "+", "\", files: \"", "+", "str", "(", "self", ".", "config", "[", "\"list_files\"", "]", ")", "+", "\", dirs: \"", "+", "str", "(", "self", ".", "resolve_option", "(", "\"list_dirs\"", ")", ")", "+", "\", recursive: \"", "+", "str", "(", "self", ".", "config", "[", "\"recursive\"", "]", ")" ]
Returns a short string describing some of the options of the actor. :return: the info, None if not available :rtype: str
[ "Returns", "a", "short", "string", "describing", "some", "of", "the", "options", "of", "the", "actor", "." ]
python
train
Alveo/pyalveo
pyalveo/pyalveo.py
https://github.com/Alveo/pyalveo/blob/1e9eec22bc031bc9a08066f9966565a546e6242e/pyalveo/pyalveo.py#L1509-L1518
def get_contribution(self, url): """Get the details of a particular contribution given it's url""" result = self.api_request(url) # add the contrib id into the metadata result['id'] = os.path.split(result['url'])[1] return result
[ "def", "get_contribution", "(", "self", ",", "url", ")", ":", "result", "=", "self", ".", "api_request", "(", "url", ")", "# add the contrib id into the metadata", "result", "[", "'id'", "]", "=", "os", ".", "path", ".", "split", "(", "result", "[", "'url'", "]", ")", "[", "1", "]", "return", "result" ]
Get the details of a particular contribution given it's url
[ "Get", "the", "details", "of", "a", "particular", "contribution", "given", "it", "s", "url" ]
python
train
sveetch/py-css-styleguide
py_css_styleguide/parser.py
https://github.com/sveetch/py-css-styleguide/blob/5acc693f71b2fa7d944d7fed561ae0a7699ccd0f/py_css_styleguide/parser.py#L29-L52
def digest_prelude(self, rule): """ Walk on rule prelude (aka CSS selector) tokens to return a string of the value name (from css selector). Actually only simple selector and selector with descendant combinator are supported. Using any other selector kind may leads to unexpected issues. Arguments: rule (tinycss2.ast.QualifiedRule): Qualified rule object as returned by tinycss2. Returns: string: Selector name. If it's a descendant combinator, items are joined with ``__``. """ name = [] for token in rule.prelude: if token.type == 'ident': name.append(token.value) return "__".join(name)
[ "def", "digest_prelude", "(", "self", ",", "rule", ")", ":", "name", "=", "[", "]", "for", "token", "in", "rule", ".", "prelude", ":", "if", "token", ".", "type", "==", "'ident'", ":", "name", ".", "append", "(", "token", ".", "value", ")", "return", "\"__\"", ".", "join", "(", "name", ")" ]
Walk on rule prelude (aka CSS selector) tokens to return a string of the value name (from css selector). Actually only simple selector and selector with descendant combinator are supported. Using any other selector kind may leads to unexpected issues. Arguments: rule (tinycss2.ast.QualifiedRule): Qualified rule object as returned by tinycss2. Returns: string: Selector name. If it's a descendant combinator, items are joined with ``__``.
[ "Walk", "on", "rule", "prelude", "(", "aka", "CSS", "selector", ")", "tokens", "to", "return", "a", "string", "of", "the", "value", "name", "(", "from", "css", "selector", ")", "." ]
python
train
cloudendpoints/endpoints-python
endpoints/openapi_generator.py
https://github.com/cloudendpoints/endpoints-python/blob/00dd7c7a52a9ee39d5923191c2604b8eafdb3f24/endpoints/openapi_generator.py#L576-L611
def __request_message_descriptor(self, request_kind, message_type, method_id, path): """Describes the parameters and body of the request. Args: request_kind: The type of request being made. message_type: messages.Message or ResourceContainer class. The message to describe. method_id: string, Unique method identifier (e.g. 'myapi.items.method') path: string, HTTP path to method. Returns: Dictionary describing the request. Raises: ValueError: if the method path and request required fields do not match """ if isinstance(message_type, resource_container.ResourceContainer): base_message_type = message_type.body_message_class() if (request_kind == self.__NO_BODY and base_message_type != message_types.VoidMessage()): msg = ('Method %s specifies a body message in its ResourceContainer, but ' 'is a HTTP method type that cannot accept a body.') % method_id raise api_exceptions.ApiConfigurationError(msg) else: base_message_type = message_type if (request_kind != self.__NO_BODY and base_message_type != message_types.VoidMessage()): self.__request_schema[method_id] = self.__parser.add_message( base_message_type.__class__) params = self.__params_descriptor(message_type, request_kind, path, method_id) return params
[ "def", "__request_message_descriptor", "(", "self", ",", "request_kind", ",", "message_type", ",", "method_id", ",", "path", ")", ":", "if", "isinstance", "(", "message_type", ",", "resource_container", ".", "ResourceContainer", ")", ":", "base_message_type", "=", "message_type", ".", "body_message_class", "(", ")", "if", "(", "request_kind", "==", "self", ".", "__NO_BODY", "and", "base_message_type", "!=", "message_types", ".", "VoidMessage", "(", ")", ")", ":", "msg", "=", "(", "'Method %s specifies a body message in its ResourceContainer, but '", "'is a HTTP method type that cannot accept a body.'", ")", "%", "method_id", "raise", "api_exceptions", ".", "ApiConfigurationError", "(", "msg", ")", "else", ":", "base_message_type", "=", "message_type", "if", "(", "request_kind", "!=", "self", ".", "__NO_BODY", "and", "base_message_type", "!=", "message_types", ".", "VoidMessage", "(", ")", ")", ":", "self", ".", "__request_schema", "[", "method_id", "]", "=", "self", ".", "__parser", ".", "add_message", "(", "base_message_type", ".", "__class__", ")", "params", "=", "self", ".", "__params_descriptor", "(", "message_type", ",", "request_kind", ",", "path", ",", "method_id", ")", "return", "params" ]
Describes the parameters and body of the request. Args: request_kind: The type of request being made. message_type: messages.Message or ResourceContainer class. The message to describe. method_id: string, Unique method identifier (e.g. 'myapi.items.method') path: string, HTTP path to method. Returns: Dictionary describing the request. Raises: ValueError: if the method path and request required fields do not match
[ "Describes", "the", "parameters", "and", "body", "of", "the", "request", "." ]
python
train
foobarbecue/afterflight
afterflight/logbrowse/views.py
https://github.com/foobarbecue/afterflight/blob/7085f719593f88999dce93f35caec5f15d2991b6/afterflight/logbrowse/views.py#L40-L58
def upload_progress(request): """ AJAX view adapted from django-progressbarupload Return the upload progress and total length values """ if 'X-Progress-ID' in request.GET: progress_id = request.GET['X-Progress-ID'] elif 'X-Progress-ID' in request.META: progress_id = request.META['X-Progress-ID'] if 'logfilename' in request.GET: logfilename = request.GET['logfilename'] elif 'logfilename' in request.META: logfilename = request.META['logfilename'] cache_key = "%s_%s" % (request.META['REMOTE_ADDR'], progress_id) data = cache.get(cache_key) if not data: data = cache.get(logfilename.replace(' ','_')) return HttpResponse(json.dumps(data))
[ "def", "upload_progress", "(", "request", ")", ":", "if", "'X-Progress-ID'", "in", "request", ".", "GET", ":", "progress_id", "=", "request", ".", "GET", "[", "'X-Progress-ID'", "]", "elif", "'X-Progress-ID'", "in", "request", ".", "META", ":", "progress_id", "=", "request", ".", "META", "[", "'X-Progress-ID'", "]", "if", "'logfilename'", "in", "request", ".", "GET", ":", "logfilename", "=", "request", ".", "GET", "[", "'logfilename'", "]", "elif", "'logfilename'", "in", "request", ".", "META", ":", "logfilename", "=", "request", ".", "META", "[", "'logfilename'", "]", "cache_key", "=", "\"%s_%s\"", "%", "(", "request", ".", "META", "[", "'REMOTE_ADDR'", "]", ",", "progress_id", ")", "data", "=", "cache", ".", "get", "(", "cache_key", ")", "if", "not", "data", ":", "data", "=", "cache", ".", "get", "(", "logfilename", ".", "replace", "(", "' '", ",", "'_'", ")", ")", "return", "HttpResponse", "(", "json", ".", "dumps", "(", "data", ")", ")" ]
AJAX view adapted from django-progressbarupload Return the upload progress and total length values
[ "AJAX", "view", "adapted", "from", "django", "-", "progressbarupload" ]
python
train
Miserlou/Zappa
zappa/core.py
https://github.com/Miserlou/Zappa/blob/3ccf7490a8d8b8fa74a61ee39bf44234f3567739/zappa/core.py#L1320-L1432
def deploy_lambda_alb( self, lambda_arn, lambda_name, alb_vpc_config, timeout ): """ The `zappa deploy` functionality for ALB infrastructure. """ if not alb_vpc_config: raise EnvironmentError('When creating an ALB, alb_vpc_config must be filled out in zappa_settings.') if 'SubnetIds' not in alb_vpc_config: raise EnvironmentError('When creating an ALB, you must supply two subnets in different availability zones.') if 'SecurityGroupIds' not in alb_vpc_config: alb_vpc_config["SecurityGroupIds"] = [] if not alb_vpc_config.get('CertificateArn'): raise EnvironmentError('When creating an ALB, you must supply a CertificateArn for the HTTPS listener.') print("Deploying ALB infrastructure...") # Create load balancer # https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/elbv2.html#ElasticLoadBalancingv2.Client.create_load_balancer kwargs = dict( Name=lambda_name, Subnets=alb_vpc_config["SubnetIds"], SecurityGroups=alb_vpc_config["SecurityGroupIds"], # TODO: Scheme can also be "internal" we need to add a new option for this. Scheme="internet-facing", # TODO: Tags might be a useful means of stock-keeping zappa-generated assets. #Tags=[], Type="application", # TODO: can be ipv4 or dualstack (for ipv4 and ipv6) ipv4 is required for internal Scheme. IpAddressType="ipv4" ) response = self.elbv2_client.create_load_balancer(**kwargs) if not(response["LoadBalancers"]) or len(response["LoadBalancers"]) != 1: raise EnvironmentError("Failure to create application load balancer. Response was in unexpected format. Response was: {}".format(repr(response))) if response["LoadBalancers"][0]['State']['Code'] == 'failed': raise EnvironmentError("Failure to create application load balancer. Response reported a failed state: {}".format(response["LoadBalancers"][0]['State']['Reason'])) load_balancer_arn = response["LoadBalancers"][0]["LoadBalancerArn"] load_balancer_dns = response["LoadBalancers"][0]["DNSName"] load_balancer_vpc = response["LoadBalancers"][0]["VpcId"] waiter = self.elbv2_client.get_waiter('load_balancer_available') # Match the lambda timeout on the load balancer. self.elbv2_client.modify_load_balancer_attributes( LoadBalancerArn=load_balancer_arn, Attributes=[{ 'Key': 'idle_timeout.timeout_seconds', 'Value': str(timeout) }] ) # Create/associate target group. # https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/elbv2.html#ElasticLoadBalancingv2.Client.create_target_group kwargs = dict( Name=lambda_name, TargetType="lambda", # TODO: Add options for health checks ) response = self.elbv2_client.create_target_group(**kwargs) if not(response["TargetGroups"]) or len(response["TargetGroups"]) != 1: raise EnvironmentError("Failure to create application load balancer target group. Response was in unexpected format. Response was: {}".format(repr(response))) target_group_arn = response["TargetGroups"][0]["TargetGroupArn"] # Enable multi-value headers by default. response = self.elbv2_client.modify_target_group_attributes( TargetGroupArn=target_group_arn, Attributes=[ { 'Key': 'lambda.multi_value_headers.enabled', 'Value': 'true' }, ] ) # Allow execute permissions from target group to lambda. # https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/lambda.html#Lambda.Client.add_permission kwargs = dict( Action="lambda:InvokeFunction", FunctionName="{}:{}".format(lambda_arn, ALB_LAMBDA_ALIAS), Principal="elasticloadbalancing.amazonaws.com", SourceArn=target_group_arn, StatementId=lambda_name ) response = self.lambda_client.add_permission(**kwargs) # Register target group to lambda association. # https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/elbv2.html#ElasticLoadBalancingv2.Client.register_targets kwargs = dict( TargetGroupArn=target_group_arn, Targets=[{"Id": "{}:{}".format(lambda_arn, ALB_LAMBDA_ALIAS)}] ) response = self.elbv2_client.register_targets(**kwargs) # Bind listener to load balancer with default rule to target group. # https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/elbv2.html#ElasticLoadBalancingv2.Client.create_listener kwargs = dict( # TODO: Listeners support custom ssl certificates (Certificates). For now we leave this default. Certificates=[{"CertificateArn": alb_vpc_config['CertificateArn']}], DefaultActions=[{ "Type": "forward", "TargetGroupArn": target_group_arn, }], LoadBalancerArn=load_balancer_arn, Protocol="HTTPS", # TODO: Add option for custom ports Port=443, # TODO: Listeners support custom ssl security policy (SslPolicy). For now we leave this default. ) response = self.elbv2_client.create_listener(**kwargs) print("ALB created with DNS: {}".format(load_balancer_dns)) print("Note it may take several minutes for load balancer to become available.")
[ "def", "deploy_lambda_alb", "(", "self", ",", "lambda_arn", ",", "lambda_name", ",", "alb_vpc_config", ",", "timeout", ")", ":", "if", "not", "alb_vpc_config", ":", "raise", "EnvironmentError", "(", "'When creating an ALB, alb_vpc_config must be filled out in zappa_settings.'", ")", "if", "'SubnetIds'", "not", "in", "alb_vpc_config", ":", "raise", "EnvironmentError", "(", "'When creating an ALB, you must supply two subnets in different availability zones.'", ")", "if", "'SecurityGroupIds'", "not", "in", "alb_vpc_config", ":", "alb_vpc_config", "[", "\"SecurityGroupIds\"", "]", "=", "[", "]", "if", "not", "alb_vpc_config", ".", "get", "(", "'CertificateArn'", ")", ":", "raise", "EnvironmentError", "(", "'When creating an ALB, you must supply a CertificateArn for the HTTPS listener.'", ")", "print", "(", "\"Deploying ALB infrastructure...\"", ")", "# Create load balancer", "# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/elbv2.html#ElasticLoadBalancingv2.Client.create_load_balancer", "kwargs", "=", "dict", "(", "Name", "=", "lambda_name", ",", "Subnets", "=", "alb_vpc_config", "[", "\"SubnetIds\"", "]", ",", "SecurityGroups", "=", "alb_vpc_config", "[", "\"SecurityGroupIds\"", "]", ",", "# TODO: Scheme can also be \"internal\" we need to add a new option for this.", "Scheme", "=", "\"internet-facing\"", ",", "# TODO: Tags might be a useful means of stock-keeping zappa-generated assets.", "#Tags=[],", "Type", "=", "\"application\"", ",", "# TODO: can be ipv4 or dualstack (for ipv4 and ipv6) ipv4 is required for internal Scheme.", "IpAddressType", "=", "\"ipv4\"", ")", "response", "=", "self", ".", "elbv2_client", ".", "create_load_balancer", "(", "*", "*", "kwargs", ")", "if", "not", "(", "response", "[", "\"LoadBalancers\"", "]", ")", "or", "len", "(", "response", "[", "\"LoadBalancers\"", "]", ")", "!=", "1", ":", "raise", "EnvironmentError", "(", "\"Failure to create application load balancer. Response was in unexpected format. Response was: {}\"", ".", "format", "(", "repr", "(", "response", ")", ")", ")", "if", "response", "[", "\"LoadBalancers\"", "]", "[", "0", "]", "[", "'State'", "]", "[", "'Code'", "]", "==", "'failed'", ":", "raise", "EnvironmentError", "(", "\"Failure to create application load balancer. Response reported a failed state: {}\"", ".", "format", "(", "response", "[", "\"LoadBalancers\"", "]", "[", "0", "]", "[", "'State'", "]", "[", "'Reason'", "]", ")", ")", "load_balancer_arn", "=", "response", "[", "\"LoadBalancers\"", "]", "[", "0", "]", "[", "\"LoadBalancerArn\"", "]", "load_balancer_dns", "=", "response", "[", "\"LoadBalancers\"", "]", "[", "0", "]", "[", "\"DNSName\"", "]", "load_balancer_vpc", "=", "response", "[", "\"LoadBalancers\"", "]", "[", "0", "]", "[", "\"VpcId\"", "]", "waiter", "=", "self", ".", "elbv2_client", ".", "get_waiter", "(", "'load_balancer_available'", ")", "# Match the lambda timeout on the load balancer.", "self", ".", "elbv2_client", ".", "modify_load_balancer_attributes", "(", "LoadBalancerArn", "=", "load_balancer_arn", ",", "Attributes", "=", "[", "{", "'Key'", ":", "'idle_timeout.timeout_seconds'", ",", "'Value'", ":", "str", "(", "timeout", ")", "}", "]", ")", "# Create/associate target group.", "# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/elbv2.html#ElasticLoadBalancingv2.Client.create_target_group", "kwargs", "=", "dict", "(", "Name", "=", "lambda_name", ",", "TargetType", "=", "\"lambda\"", ",", "# TODO: Add options for health checks", ")", "response", "=", "self", ".", "elbv2_client", ".", "create_target_group", "(", "*", "*", "kwargs", ")", "if", "not", "(", "response", "[", "\"TargetGroups\"", "]", ")", "or", "len", "(", "response", "[", "\"TargetGroups\"", "]", ")", "!=", "1", ":", "raise", "EnvironmentError", "(", "\"Failure to create application load balancer target group. Response was in unexpected format. Response was: {}\"", ".", "format", "(", "repr", "(", "response", ")", ")", ")", "target_group_arn", "=", "response", "[", "\"TargetGroups\"", "]", "[", "0", "]", "[", "\"TargetGroupArn\"", "]", "# Enable multi-value headers by default.", "response", "=", "self", ".", "elbv2_client", ".", "modify_target_group_attributes", "(", "TargetGroupArn", "=", "target_group_arn", ",", "Attributes", "=", "[", "{", "'Key'", ":", "'lambda.multi_value_headers.enabled'", ",", "'Value'", ":", "'true'", "}", ",", "]", ")", "# Allow execute permissions from target group to lambda.", "# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/lambda.html#Lambda.Client.add_permission", "kwargs", "=", "dict", "(", "Action", "=", "\"lambda:InvokeFunction\"", ",", "FunctionName", "=", "\"{}:{}\"", ".", "format", "(", "lambda_arn", ",", "ALB_LAMBDA_ALIAS", ")", ",", "Principal", "=", "\"elasticloadbalancing.amazonaws.com\"", ",", "SourceArn", "=", "target_group_arn", ",", "StatementId", "=", "lambda_name", ")", "response", "=", "self", ".", "lambda_client", ".", "add_permission", "(", "*", "*", "kwargs", ")", "# Register target group to lambda association.", "# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/elbv2.html#ElasticLoadBalancingv2.Client.register_targets", "kwargs", "=", "dict", "(", "TargetGroupArn", "=", "target_group_arn", ",", "Targets", "=", "[", "{", "\"Id\"", ":", "\"{}:{}\"", ".", "format", "(", "lambda_arn", ",", "ALB_LAMBDA_ALIAS", ")", "}", "]", ")", "response", "=", "self", ".", "elbv2_client", ".", "register_targets", "(", "*", "*", "kwargs", ")", "# Bind listener to load balancer with default rule to target group.", "# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/elbv2.html#ElasticLoadBalancingv2.Client.create_listener", "kwargs", "=", "dict", "(", "# TODO: Listeners support custom ssl certificates (Certificates). For now we leave this default.", "Certificates", "=", "[", "{", "\"CertificateArn\"", ":", "alb_vpc_config", "[", "'CertificateArn'", "]", "}", "]", ",", "DefaultActions", "=", "[", "{", "\"Type\"", ":", "\"forward\"", ",", "\"TargetGroupArn\"", ":", "target_group_arn", ",", "}", "]", ",", "LoadBalancerArn", "=", "load_balancer_arn", ",", "Protocol", "=", "\"HTTPS\"", ",", "# TODO: Add option for custom ports", "Port", "=", "443", ",", "# TODO: Listeners support custom ssl security policy (SslPolicy). For now we leave this default.", ")", "response", "=", "self", ".", "elbv2_client", ".", "create_listener", "(", "*", "*", "kwargs", ")", "print", "(", "\"ALB created with DNS: {}\"", ".", "format", "(", "load_balancer_dns", ")", ")", "print", "(", "\"Note it may take several minutes for load balancer to become available.\"", ")" ]
The `zappa deploy` functionality for ALB infrastructure.
[ "The", "zappa", "deploy", "functionality", "for", "ALB", "infrastructure", "." ]
python
train
pypa/pipenv
pipenv/vendor/pexpect/spawnbase.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/pexpect/spawnbase.py#L192-L238
def compile_pattern_list(self, patterns): '''This compiles a pattern-string or a list of pattern-strings. Patterns must be a StringType, EOF, TIMEOUT, SRE_Pattern, or a list of those. Patterns may also be None which results in an empty list (you might do this if waiting for an EOF or TIMEOUT condition without expecting any pattern). This is used by expect() when calling expect_list(). Thus expect() is nothing more than:: cpl = self.compile_pattern_list(pl) return self.expect_list(cpl, timeout) If you are using expect() within a loop it may be more efficient to compile the patterns first and then call expect_list(). This avoid calls in a loop to compile_pattern_list():: cpl = self.compile_pattern_list(my_pattern) while some_condition: ... i = self.expect_list(cpl, timeout) ... ''' if patterns is None: return [] if not isinstance(patterns, list): patterns = [patterns] # Allow dot to match \n compile_flags = re.DOTALL if self.ignorecase: compile_flags = compile_flags | re.IGNORECASE compiled_pattern_list = [] for idx, p in enumerate(patterns): if isinstance(p, self.allowed_string_types): p = self._coerce_expect_string(p) compiled_pattern_list.append(re.compile(p, compile_flags)) elif p is EOF: compiled_pattern_list.append(EOF) elif p is TIMEOUT: compiled_pattern_list.append(TIMEOUT) elif isinstance(p, type(re.compile(''))): compiled_pattern_list.append(p) else: self._pattern_type_err(p) return compiled_pattern_list
[ "def", "compile_pattern_list", "(", "self", ",", "patterns", ")", ":", "if", "patterns", "is", "None", ":", "return", "[", "]", "if", "not", "isinstance", "(", "patterns", ",", "list", ")", ":", "patterns", "=", "[", "patterns", "]", "# Allow dot to match \\n", "compile_flags", "=", "re", ".", "DOTALL", "if", "self", ".", "ignorecase", ":", "compile_flags", "=", "compile_flags", "|", "re", ".", "IGNORECASE", "compiled_pattern_list", "=", "[", "]", "for", "idx", ",", "p", "in", "enumerate", "(", "patterns", ")", ":", "if", "isinstance", "(", "p", ",", "self", ".", "allowed_string_types", ")", ":", "p", "=", "self", ".", "_coerce_expect_string", "(", "p", ")", "compiled_pattern_list", ".", "append", "(", "re", ".", "compile", "(", "p", ",", "compile_flags", ")", ")", "elif", "p", "is", "EOF", ":", "compiled_pattern_list", ".", "append", "(", "EOF", ")", "elif", "p", "is", "TIMEOUT", ":", "compiled_pattern_list", ".", "append", "(", "TIMEOUT", ")", "elif", "isinstance", "(", "p", ",", "type", "(", "re", ".", "compile", "(", "''", ")", ")", ")", ":", "compiled_pattern_list", ".", "append", "(", "p", ")", "else", ":", "self", ".", "_pattern_type_err", "(", "p", ")", "return", "compiled_pattern_list" ]
This compiles a pattern-string or a list of pattern-strings. Patterns must be a StringType, EOF, TIMEOUT, SRE_Pattern, or a list of those. Patterns may also be None which results in an empty list (you might do this if waiting for an EOF or TIMEOUT condition without expecting any pattern). This is used by expect() when calling expect_list(). Thus expect() is nothing more than:: cpl = self.compile_pattern_list(pl) return self.expect_list(cpl, timeout) If you are using expect() within a loop it may be more efficient to compile the patterns first and then call expect_list(). This avoid calls in a loop to compile_pattern_list():: cpl = self.compile_pattern_list(my_pattern) while some_condition: ... i = self.expect_list(cpl, timeout) ...
[ "This", "compiles", "a", "pattern", "-", "string", "or", "a", "list", "of", "pattern", "-", "strings", ".", "Patterns", "must", "be", "a", "StringType", "EOF", "TIMEOUT", "SRE_Pattern", "or", "a", "list", "of", "those", ".", "Patterns", "may", "also", "be", "None", "which", "results", "in", "an", "empty", "list", "(", "you", "might", "do", "this", "if", "waiting", "for", "an", "EOF", "or", "TIMEOUT", "condition", "without", "expecting", "any", "pattern", ")", "." ]
python
train
tjvr/skip
skip/__init__.py
https://github.com/tjvr/skip/blob/ac84f7198079732bf22c3b8cbc0dc1a073b1d539/skip/__init__.py#L134-L141
def push_script(self, scriptable, script, callback=None): """Run the script and add it to the list of threads.""" if script in self.threads: self.threads[script].finish() thread = Thread(self.run_script(scriptable, script), scriptable, callback) self.new_threads[script] = thread return thread
[ "def", "push_script", "(", "self", ",", "scriptable", ",", "script", ",", "callback", "=", "None", ")", ":", "if", "script", "in", "self", ".", "threads", ":", "self", ".", "threads", "[", "script", "]", ".", "finish", "(", ")", "thread", "=", "Thread", "(", "self", ".", "run_script", "(", "scriptable", ",", "script", ")", ",", "scriptable", ",", "callback", ")", "self", ".", "new_threads", "[", "script", "]", "=", "thread", "return", "thread" ]
Run the script and add it to the list of threads.
[ "Run", "the", "script", "and", "add", "it", "to", "the", "list", "of", "threads", "." ]
python
train
suds-community/suds
suds/mx/literal.py
https://github.com/suds-community/suds/blob/6fb0a829337b5037a66c20aae6f89b41acd77e40/suds/mx/literal.py#L207-L226
def skip(self, content): """ Get whether to skip this I{content}. Should be skipped when the content is optional and value is either None or an empty list. @param content: Content to skip. @type content: L{Object} @return: True if content is to be skipped. @rtype: bool """ if self.optional(content): v = content.value if v is None: return True if isinstance(v, (list, tuple)) and not v: return True return False
[ "def", "skip", "(", "self", ",", "content", ")", ":", "if", "self", ".", "optional", "(", "content", ")", ":", "v", "=", "content", ".", "value", "if", "v", "is", "None", ":", "return", "True", "if", "isinstance", "(", "v", ",", "(", "list", ",", "tuple", ")", ")", "and", "not", "v", ":", "return", "True", "return", "False" ]
Get whether to skip this I{content}. Should be skipped when the content is optional and value is either None or an empty list. @param content: Content to skip. @type content: L{Object} @return: True if content is to be skipped. @rtype: bool
[ "Get", "whether", "to", "skip", "this", "I", "{", "content", "}", "." ]
python
train
seomoz/qless-py
qless/job.py
https://github.com/seomoz/qless-py/blob/3eda4ffcd4c0016c9a7e44f780d6155e1a354dda/qless/job.py#L215-L235
def fail(self, group, message): '''Mark the particular job as failed, with the provided type, and a more specific message. By `type`, we mean some phrase that might be one of several categorical modes of failure. The `message` is something more job-specific, like perhaps a traceback. This method should __not__ be used to note that a job has been dropped or has failed in a transient way. This method __should__ be used to note that a job has something really wrong with it that must be remedied. The motivation behind the `type` is so that similar errors can be grouped together. Optionally, updated data can be provided for the job. A job in any state can be marked as failed. If it has been given to a worker as a job, then its subsequent requests to heartbeat or complete that job will fail. Failed jobs are kept until they are canceled or completed. __Returns__ the id of the failed job if successful, or `False` on failure.''' logger.warn('Failing %s (%s): %s', self.jid, group, message) return self.client('fail', self.jid, self.client.worker_name, group, message, json.dumps(self.data)) or False
[ "def", "fail", "(", "self", ",", "group", ",", "message", ")", ":", "logger", ".", "warn", "(", "'Failing %s (%s): %s'", ",", "self", ".", "jid", ",", "group", ",", "message", ")", "return", "self", ".", "client", "(", "'fail'", ",", "self", ".", "jid", ",", "self", ".", "client", ".", "worker_name", ",", "group", ",", "message", ",", "json", ".", "dumps", "(", "self", ".", "data", ")", ")", "or", "False" ]
Mark the particular job as failed, with the provided type, and a more specific message. By `type`, we mean some phrase that might be one of several categorical modes of failure. The `message` is something more job-specific, like perhaps a traceback. This method should __not__ be used to note that a job has been dropped or has failed in a transient way. This method __should__ be used to note that a job has something really wrong with it that must be remedied. The motivation behind the `type` is so that similar errors can be grouped together. Optionally, updated data can be provided for the job. A job in any state can be marked as failed. If it has been given to a worker as a job, then its subsequent requests to heartbeat or complete that job will fail. Failed jobs are kept until they are canceled or completed. __Returns__ the id of the failed job if successful, or `False` on failure.
[ "Mark", "the", "particular", "job", "as", "failed", "with", "the", "provided", "type", "and", "a", "more", "specific", "message", ".", "By", "type", "we", "mean", "some", "phrase", "that", "might", "be", "one", "of", "several", "categorical", "modes", "of", "failure", ".", "The", "message", "is", "something", "more", "job", "-", "specific", "like", "perhaps", "a", "traceback", "." ]
python
train
rduplain/jeni-python
jeni.py
https://github.com/rduplain/jeni-python/blob/feca12ce5e4f0438ae5d7bec59d61826063594f1/jeni.py#L850-L860
def class_in_progress(stack=None): """True if currently inside a class definition, else False.""" if stack is None: stack = inspect.stack() for frame in stack: statement_list = frame[4] if statement_list is None: continue if statement_list[0].strip().startswith('class '): return True return False
[ "def", "class_in_progress", "(", "stack", "=", "None", ")", ":", "if", "stack", "is", "None", ":", "stack", "=", "inspect", ".", "stack", "(", ")", "for", "frame", "in", "stack", ":", "statement_list", "=", "frame", "[", "4", "]", "if", "statement_list", "is", "None", ":", "continue", "if", "statement_list", "[", "0", "]", ".", "strip", "(", ")", ".", "startswith", "(", "'class '", ")", ":", "return", "True", "return", "False" ]
True if currently inside a class definition, else False.
[ "True", "if", "currently", "inside", "a", "class", "definition", "else", "False", "." ]
python
train
jay-johnson/network-pipeline
network_pipeline/build_training_request.py
https://github.com/jay-johnson/network-pipeline/blob/4e53ae13fe12085e0cf2e5e1aff947368f4f1ffa/network_pipeline/build_training_request.py#L17-L216
def build_training_request( csv_file=ev( "CSV_FILE", "/tmp/cleaned_attack_scans.csv"), meta_file=ev( "CSV_META_FILE", "/tmp/cleaned_metadata.json"), predict_feature=ev( "PREDICT_FEATURE", "label_value"), ignore_features=[ "label_name", "ip_src", # need to make this an int "ip_dst", # need to make this an int "eth_src", # need to make this an int "eth_dst" # need to make this an int ], seed=None, test_size=float(ev( "TEST_SIZE", "0.20")), preproc_rules=None): """build_training_request :param csv_file: csv file built with prepare_dataset.py :param meta_file: metadata file built with prepare_dataset.py :param predict_feature: feature (column) to predict :param ignore_features: features to remove from the csv before the split of test + train data :param seed: integer to seed :param test_size: percent of records to split into test vs train :param preproc_rules: future preprocessing rules hooks """ last_step = "not started" res = { "status": INVALID, "err": "", "csv_file": csv_file, "meta_file": meta_file, "meta_data": None, "seed": None, "test_size": test_size, "predict_feature": predict_feature, "features_to_process": [], "ignore_features": ignore_features, "X_train": None, "X_test": None, "Y_train": None, "Y_test": None } try: last_step = ("building seed={}").format( seed) log.debug(last_step) use_seed = seed if not use_seed: use_seed = 9 res["seed"] = np.random.seed(use_seed) last_step = ("Loading csv={}").format( csv_file) log.info(last_step) if not os.path.exists(csv_file): res["status"] = ERROR res["err"] = ("Unable to find csv_file={}").format( csv_file) log.error(res["err"]) return res # end of checking for a valid csv file on disk if not os.path.exists(meta_file): res["status"] = ERROR res["err"] = ("Unable to find meta_file={}").format( meta_file) log.error(res["err"]) return res # end of checking for a valid metadata file on disk # load csv file into pandas dataframe df = pd.read_csv(csv_file) features_to_process = [] meta_data = {} try: last_step = ("opening metadata={}").format( meta_file) log.debug(last_step) meta_data = json.loads( open(meta_file, "r").read() ) res["meta_data"] = meta_data if "post_proc_rules" in meta_data: if "drop_columns" in meta_data["post_proc_rules"]: log.debug(("Found drop_columns={}") .format( meta_data["post_proc_rules"]["drop_columns"] )) for ign in meta_data["post_proc_rules"]["drop_columns"]: ignore_features.append(ign) except Exception as e: res["error"] = ("Failed building ignore_features: " "ignore_features={} meta={} meta_data={} " "last_step='{}' ex='{}'").format( ignore_features, meta_file, meta_data, last_step, e) log.error(res["error"]) res["status"] = ERROR return res # end of trying to lookup the meta data file # for non-int/float features to ignore last_step = ("metadata={} df has " "columns={} ignore={}").format( meta_file, df.columns.values, ignore_features) log.info(last_step) for feature in df.columns.values: keep_it = True for ign in ignore_features: if feature == ign: keep_it = False if keep_it: if feature != predict_feature: features_to_process.append(feature) # end of for all features to process last_step = ("Done post-procecessing " "Predicting={} with features={} " "ignore_features={} records={}").format( predict_feature, features_to_process, ignore_features, len(df.index)) log.info(last_step) res["predict_feature"] = predict_feature res["ignore_features"] = [] for k in ignore_features: if k not in res["ignore_features"]: res["ignore_features"].append(k) res["features_to_process"] = [] for k in features_to_process: if k not in res["features_to_process"]: if k != predict_feature: res["features_to_process"].append(k) # split the data into training (res["X_train"], res["X_test"], res["Y_train"], res["Y_test"]) = train_test_split( df[features_to_process], df[predict_feature], test_size=test_size, random_state=res["seed"]) last_step = ("Done splitting rows={} into " "X_train={} X_test={} " "Y_train={} Y_test={}").format( len(df.index), len(res["X_train"]), len(res["X_test"]), len(res["Y_train"]), len(res["Y_test"])) log.info(("Success: {}") .format(last_step)) res["err"] = "" res["status"] = VALID except Exception as e: res["status"] = ERROR res["err"] = ("Failed build_training_request " "step='{}' with ex='{}'").format( last_step, e) log.error(("build_training_request: {}") .format(res["err"])) # end of try/ex return res
[ "def", "build_training_request", "(", "csv_file", "=", "ev", "(", "\"CSV_FILE\"", ",", "\"/tmp/cleaned_attack_scans.csv\"", ")", ",", "meta_file", "=", "ev", "(", "\"CSV_META_FILE\"", ",", "\"/tmp/cleaned_metadata.json\"", ")", ",", "predict_feature", "=", "ev", "(", "\"PREDICT_FEATURE\"", ",", "\"label_value\"", ")", ",", "ignore_features", "=", "[", "\"label_name\"", ",", "\"ip_src\"", ",", "# need to make this an int", "\"ip_dst\"", ",", "# need to make this an int", "\"eth_src\"", ",", "# need to make this an int", "\"eth_dst\"", "# need to make this an int", "]", ",", "seed", "=", "None", ",", "test_size", "=", "float", "(", "ev", "(", "\"TEST_SIZE\"", ",", "\"0.20\"", ")", ")", ",", "preproc_rules", "=", "None", ")", ":", "last_step", "=", "\"not started\"", "res", "=", "{", "\"status\"", ":", "INVALID", ",", "\"err\"", ":", "\"\"", ",", "\"csv_file\"", ":", "csv_file", ",", "\"meta_file\"", ":", "meta_file", ",", "\"meta_data\"", ":", "None", ",", "\"seed\"", ":", "None", ",", "\"test_size\"", ":", "test_size", ",", "\"predict_feature\"", ":", "predict_feature", ",", "\"features_to_process\"", ":", "[", "]", ",", "\"ignore_features\"", ":", "ignore_features", ",", "\"X_train\"", ":", "None", ",", "\"X_test\"", ":", "None", ",", "\"Y_train\"", ":", "None", ",", "\"Y_test\"", ":", "None", "}", "try", ":", "last_step", "=", "(", "\"building seed={}\"", ")", ".", "format", "(", "seed", ")", "log", ".", "debug", "(", "last_step", ")", "use_seed", "=", "seed", "if", "not", "use_seed", ":", "use_seed", "=", "9", "res", "[", "\"seed\"", "]", "=", "np", ".", "random", ".", "seed", "(", "use_seed", ")", "last_step", "=", "(", "\"Loading csv={}\"", ")", ".", "format", "(", "csv_file", ")", "log", ".", "info", "(", "last_step", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "csv_file", ")", ":", "res", "[", "\"status\"", "]", "=", "ERROR", "res", "[", "\"err\"", "]", "=", "(", "\"Unable to find csv_file={}\"", ")", ".", "format", "(", "csv_file", ")", "log", ".", "error", "(", "res", "[", "\"err\"", "]", ")", "return", "res", "# end of checking for a valid csv file on disk", "if", "not", "os", ".", "path", ".", "exists", "(", "meta_file", ")", ":", "res", "[", "\"status\"", "]", "=", "ERROR", "res", "[", "\"err\"", "]", "=", "(", "\"Unable to find meta_file={}\"", ")", ".", "format", "(", "meta_file", ")", "log", ".", "error", "(", "res", "[", "\"err\"", "]", ")", "return", "res", "# end of checking for a valid metadata file on disk", "# load csv file into pandas dataframe", "df", "=", "pd", ".", "read_csv", "(", "csv_file", ")", "features_to_process", "=", "[", "]", "meta_data", "=", "{", "}", "try", ":", "last_step", "=", "(", "\"opening metadata={}\"", ")", ".", "format", "(", "meta_file", ")", "log", ".", "debug", "(", "last_step", ")", "meta_data", "=", "json", ".", "loads", "(", "open", "(", "meta_file", ",", "\"r\"", ")", ".", "read", "(", ")", ")", "res", "[", "\"meta_data\"", "]", "=", "meta_data", "if", "\"post_proc_rules\"", "in", "meta_data", ":", "if", "\"drop_columns\"", "in", "meta_data", "[", "\"post_proc_rules\"", "]", ":", "log", ".", "debug", "(", "(", "\"Found drop_columns={}\"", ")", ".", "format", "(", "meta_data", "[", "\"post_proc_rules\"", "]", "[", "\"drop_columns\"", "]", ")", ")", "for", "ign", "in", "meta_data", "[", "\"post_proc_rules\"", "]", "[", "\"drop_columns\"", "]", ":", "ignore_features", ".", "append", "(", "ign", ")", "except", "Exception", "as", "e", ":", "res", "[", "\"error\"", "]", "=", "(", "\"Failed building ignore_features: \"", "\"ignore_features={} meta={} meta_data={} \"", "\"last_step='{}' ex='{}'\"", ")", ".", "format", "(", "ignore_features", ",", "meta_file", ",", "meta_data", ",", "last_step", ",", "e", ")", "log", ".", "error", "(", "res", "[", "\"error\"", "]", ")", "res", "[", "\"status\"", "]", "=", "ERROR", "return", "res", "# end of trying to lookup the meta data file", "# for non-int/float features to ignore", "last_step", "=", "(", "\"metadata={} df has \"", "\"columns={} ignore={}\"", ")", ".", "format", "(", "meta_file", ",", "df", ".", "columns", ".", "values", ",", "ignore_features", ")", "log", ".", "info", "(", "last_step", ")", "for", "feature", "in", "df", ".", "columns", ".", "values", ":", "keep_it", "=", "True", "for", "ign", "in", "ignore_features", ":", "if", "feature", "==", "ign", ":", "keep_it", "=", "False", "if", "keep_it", ":", "if", "feature", "!=", "predict_feature", ":", "features_to_process", ".", "append", "(", "feature", ")", "# end of for all features to process", "last_step", "=", "(", "\"Done post-procecessing \"", "\"Predicting={} with features={} \"", "\"ignore_features={} records={}\"", ")", ".", "format", "(", "predict_feature", ",", "features_to_process", ",", "ignore_features", ",", "len", "(", "df", ".", "index", ")", ")", "log", ".", "info", "(", "last_step", ")", "res", "[", "\"predict_feature\"", "]", "=", "predict_feature", "res", "[", "\"ignore_features\"", "]", "=", "[", "]", "for", "k", "in", "ignore_features", ":", "if", "k", "not", "in", "res", "[", "\"ignore_features\"", "]", ":", "res", "[", "\"ignore_features\"", "]", ".", "append", "(", "k", ")", "res", "[", "\"features_to_process\"", "]", "=", "[", "]", "for", "k", "in", "features_to_process", ":", "if", "k", "not", "in", "res", "[", "\"features_to_process\"", "]", ":", "if", "k", "!=", "predict_feature", ":", "res", "[", "\"features_to_process\"", "]", ".", "append", "(", "k", ")", "# split the data into training", "(", "res", "[", "\"X_train\"", "]", ",", "res", "[", "\"X_test\"", "]", ",", "res", "[", "\"Y_train\"", "]", ",", "res", "[", "\"Y_test\"", "]", ")", "=", "train_test_split", "(", "df", "[", "features_to_process", "]", ",", "df", "[", "predict_feature", "]", ",", "test_size", "=", "test_size", ",", "random_state", "=", "res", "[", "\"seed\"", "]", ")", "last_step", "=", "(", "\"Done splitting rows={} into \"", "\"X_train={} X_test={} \"", "\"Y_train={} Y_test={}\"", ")", ".", "format", "(", "len", "(", "df", ".", "index", ")", ",", "len", "(", "res", "[", "\"X_train\"", "]", ")", ",", "len", "(", "res", "[", "\"X_test\"", "]", ")", ",", "len", "(", "res", "[", "\"Y_train\"", "]", ")", ",", "len", "(", "res", "[", "\"Y_test\"", "]", ")", ")", "log", ".", "info", "(", "(", "\"Success: {}\"", ")", ".", "format", "(", "last_step", ")", ")", "res", "[", "\"err\"", "]", "=", "\"\"", "res", "[", "\"status\"", "]", "=", "VALID", "except", "Exception", "as", "e", ":", "res", "[", "\"status\"", "]", "=", "ERROR", "res", "[", "\"err\"", "]", "=", "(", "\"Failed build_training_request \"", "\"step='{}' with ex='{}'\"", ")", ".", "format", "(", "last_step", ",", "e", ")", "log", ".", "error", "(", "(", "\"build_training_request: {}\"", ")", ".", "format", "(", "res", "[", "\"err\"", "]", ")", ")", "# end of try/ex", "return", "res" ]
build_training_request :param csv_file: csv file built with prepare_dataset.py :param meta_file: metadata file built with prepare_dataset.py :param predict_feature: feature (column) to predict :param ignore_features: features to remove from the csv before the split of test + train data :param seed: integer to seed :param test_size: percent of records to split into test vs train :param preproc_rules: future preprocessing rules hooks
[ "build_training_request" ]
python
train
gwpy/gwpy
gwpy/segments/io/hdf5.py
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/segments/io/hdf5.py#L105-L128
def _get_flag_group(h5f, path): """Determine the group to use in order to read a flag """ # if user chose the path, just use it if path: return h5f[path] # if the user gave us the group directly, use it if _is_flag_group(h5f): return h5f # otherwise try and find a single group that matches try: path, = _find_flag_groups(h5f) except ValueError: pass else: return h5f[path] # if not exactly 1 valid group in the file, complain raise ValueError( "please pass a valid HDF5 Group, or specify the HDF5 Group " "path via the ``path=`` keyword argument", )
[ "def", "_get_flag_group", "(", "h5f", ",", "path", ")", ":", "# if user chose the path, just use it", "if", "path", ":", "return", "h5f", "[", "path", "]", "# if the user gave us the group directly, use it", "if", "_is_flag_group", "(", "h5f", ")", ":", "return", "h5f", "# otherwise try and find a single group that matches", "try", ":", "path", ",", "=", "_find_flag_groups", "(", "h5f", ")", "except", "ValueError", ":", "pass", "else", ":", "return", "h5f", "[", "path", "]", "# if not exactly 1 valid group in the file, complain", "raise", "ValueError", "(", "\"please pass a valid HDF5 Group, or specify the HDF5 Group \"", "\"path via the ``path=`` keyword argument\"", ",", ")" ]
Determine the group to use in order to read a flag
[ "Determine", "the", "group", "to", "use", "in", "order", "to", "read", "a", "flag" ]
python
train
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/parallel/controller/hub.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/parallel/controller/hub.py#L943-L968
def unregister_engine(self, ident, msg): """Unregister an engine that explicitly requested to leave.""" try: eid = msg['content']['id'] except: self.log.error("registration::bad engine id for unregistration: %r", ident, exc_info=True) return self.log.info("registration::unregister_engine(%r)", eid) # print (eid) uuid = self.keytable[eid] content=dict(id=eid, queue=uuid.decode('ascii')) self.dead_engines.add(uuid) # self.ids.remove(eid) # uuid = self.keytable.pop(eid) # # ec = self.engines.pop(eid) # self.hearts.pop(ec.heartbeat) # self.by_ident.pop(ec.queue) # self.completed.pop(eid) handleit = lambda : self._handle_stranded_msgs(eid, uuid) dc = ioloop.DelayedCallback(handleit, self.registration_timeout, self.loop) dc.start() ############## TODO: HANDLE IT ################ if self.notifier: self.session.send(self.notifier, "unregistration_notification", content=content)
[ "def", "unregister_engine", "(", "self", ",", "ident", ",", "msg", ")", ":", "try", ":", "eid", "=", "msg", "[", "'content'", "]", "[", "'id'", "]", "except", ":", "self", ".", "log", ".", "error", "(", "\"registration::bad engine id for unregistration: %r\"", ",", "ident", ",", "exc_info", "=", "True", ")", "return", "self", ".", "log", ".", "info", "(", "\"registration::unregister_engine(%r)\"", ",", "eid", ")", "# print (eid)", "uuid", "=", "self", ".", "keytable", "[", "eid", "]", "content", "=", "dict", "(", "id", "=", "eid", ",", "queue", "=", "uuid", ".", "decode", "(", "'ascii'", ")", ")", "self", ".", "dead_engines", ".", "add", "(", "uuid", ")", "# self.ids.remove(eid)", "# uuid = self.keytable.pop(eid)", "#", "# ec = self.engines.pop(eid)", "# self.hearts.pop(ec.heartbeat)", "# self.by_ident.pop(ec.queue)", "# self.completed.pop(eid)", "handleit", "=", "lambda", ":", "self", ".", "_handle_stranded_msgs", "(", "eid", ",", "uuid", ")", "dc", "=", "ioloop", ".", "DelayedCallback", "(", "handleit", ",", "self", ".", "registration_timeout", ",", "self", ".", "loop", ")", "dc", ".", "start", "(", ")", "############## TODO: HANDLE IT ################", "if", "self", ".", "notifier", ":", "self", ".", "session", ".", "send", "(", "self", ".", "notifier", ",", "\"unregistration_notification\"", ",", "content", "=", "content", ")" ]
Unregister an engine that explicitly requested to leave.
[ "Unregister", "an", "engine", "that", "explicitly", "requested", "to", "leave", "." ]
python
test
useblocks/groundwork
groundwork/patterns/gw_recipes_pattern.py
https://github.com/useblocks/groundwork/blob/d34fce43f54246ca4db0f7b89e450dcdc847c68c/groundwork/patterns/gw_recipes_pattern.py#L82-L89
def get(self, name=None): """ Gets a list of all recipes, which are registered by the current plugin. If a name is provided, only the requested recipe is returned or None. :param: name: Name of the recipe """ return self.__app.recipes.get(name, self._plugin)
[ "def", "get", "(", "self", ",", "name", "=", "None", ")", ":", "return", "self", ".", "__app", ".", "recipes", ".", "get", "(", "name", ",", "self", ".", "_plugin", ")" ]
Gets a list of all recipes, which are registered by the current plugin. If a name is provided, only the requested recipe is returned or None. :param: name: Name of the recipe
[ "Gets", "a", "list", "of", "all", "recipes", "which", "are", "registered", "by", "the", "current", "plugin", ".", "If", "a", "name", "is", "provided", "only", "the", "requested", "recipe", "is", "returned", "or", "None", "." ]
python
train
F-Secure/see
plugins/agent.py
https://github.com/F-Secure/see/blob/3e053e52a45229f96a12db9e98caf7fb3880e811/plugins/agent.py#L85-L95
def respond(self, output): """Generates server response.""" response = {'exit_code': output.code, 'command_output': output.log} self.send_response(200) self.send_header('Content-type', 'application/json') self.end_headers() self.wfile.write(bytes(json.dumps(response), "utf8"))
[ "def", "respond", "(", "self", ",", "output", ")", ":", "response", "=", "{", "'exit_code'", ":", "output", ".", "code", ",", "'command_output'", ":", "output", ".", "log", "}", "self", ".", "send_response", "(", "200", ")", "self", ".", "send_header", "(", "'Content-type'", ",", "'application/json'", ")", "self", ".", "end_headers", "(", ")", "self", ".", "wfile", ".", "write", "(", "bytes", "(", "json", ".", "dumps", "(", "response", ")", ",", "\"utf8\"", ")", ")" ]
Generates server response.
[ "Generates", "server", "response", "." ]
python
train
tcalmant/ipopo
pelix/rsa/remoteserviceadmin.py
https://github.com/tcalmant/ipopo/blob/2f9ae0c44cd9c34ef1a9d50837b3254e75678eb1/pelix/rsa/remoteserviceadmin.py#L970-L982
def get_exception(self): # type: () -> Optional[Tuple[Any, Any, Any]] """ Returns the exception associated to the export :return: An exception tuple, if any """ with self.__lock: return ( self.__updateexception if self.__updateexception or self.__closed else self.__exportref.get_exception() )
[ "def", "get_exception", "(", "self", ")", ":", "# type: () -> Optional[Tuple[Any, Any, Any]]", "with", "self", ".", "__lock", ":", "return", "(", "self", ".", "__updateexception", "if", "self", ".", "__updateexception", "or", "self", ".", "__closed", "else", "self", ".", "__exportref", ".", "get_exception", "(", ")", ")" ]
Returns the exception associated to the export :return: An exception tuple, if any
[ "Returns", "the", "exception", "associated", "to", "the", "export" ]
python
train
ouroboroscoding/format-oc-python
FormatOC/__init__.py
https://github.com/ouroboroscoding/format-oc-python/blob/c160b46fe4ff2c92333c776991c712de23991225/FormatOC/__init__.py#L1956-L1983
def clean(self, value): """Clean Uses the valid method to check which type the value is, and then calls the correct version of clean on that node Arguments: value {mixed} -- The value to clean Returns: mixed """ # If the value is None and it's optional, return as is if value is None and self._optional: return None # Go through each of the nodes for i in range(len(self._nodes)): # If it's valid if self._nodes[i].valid(value): # Use it's clean return self._nodes[i].clean(value) # Something went wrong raise ValueError('value', value)
[ "def", "clean", "(", "self", ",", "value", ")", ":", "# If the value is None and it's optional, return as is", "if", "value", "is", "None", "and", "self", ".", "_optional", ":", "return", "None", "# Go through each of the nodes", "for", "i", "in", "range", "(", "len", "(", "self", ".", "_nodes", ")", ")", ":", "# If it's valid", "if", "self", ".", "_nodes", "[", "i", "]", ".", "valid", "(", "value", ")", ":", "# Use it's clean", "return", "self", ".", "_nodes", "[", "i", "]", ".", "clean", "(", "value", ")", "# Something went wrong", "raise", "ValueError", "(", "'value'", ",", "value", ")" ]
Clean Uses the valid method to check which type the value is, and then calls the correct version of clean on that node Arguments: value {mixed} -- The value to clean Returns: mixed
[ "Clean" ]
python
train
henzk/django-productline
django_productline/tasks.py
https://github.com/henzk/django-productline/blob/24ff156924c1a8c07b99cbb8a1de0a42b8d81f60/django_productline/tasks.py#L270-L279
def import_context(target_zip): """ Overwrite old context.json, use context.json from target_zip :param target_zip: :return: """ context_path = tasks.get_context_path() with zipfile.ZipFile(target_zip) as unzipped_data: with open(context_path, 'w') as context: context.write(unzipped_data.read('context.json'))
[ "def", "import_context", "(", "target_zip", ")", ":", "context_path", "=", "tasks", ".", "get_context_path", "(", ")", "with", "zipfile", ".", "ZipFile", "(", "target_zip", ")", "as", "unzipped_data", ":", "with", "open", "(", "context_path", ",", "'w'", ")", "as", "context", ":", "context", ".", "write", "(", "unzipped_data", ".", "read", "(", "'context.json'", ")", ")" ]
Overwrite old context.json, use context.json from target_zip :param target_zip: :return:
[ "Overwrite", "old", "context", ".", "json", "use", "context", ".", "json", "from", "target_zip", ":", "param", "target_zip", ":", ":", "return", ":" ]
python
train
ThreatConnect-Inc/tcex
tcex/tcex_bin_run.py
https://github.com/ThreatConnect-Inc/tcex/blob/dd4d7a1ef723af1561687120191886b9a2fd4b47/tcex/tcex_bin_run.py#L290-L338
def clear_tc(self, owner, data, clear_type): """Delete threat intel from ThreatConnect platform. Args: owner (str): The ThreatConnect owner. data (dict): The data for the threat intel to clear. clear_type (str): The type of clear action. """ batch = self.tcex.batch(owner, action='Delete') tc_type = data.get('type') path = data.get('path') if tc_type in self.tcex.group_types: name = self.tcex.playbook.read(data.get('name')) name = self.path_data(name, path) if name is not None: print( 'Deleting ThreatConnect Group: {}{}{}'.format( c.Style.BRIGHT, c.Fore.MAGENTA, name ) ) self.log.info( '[{}] Deleting ThreatConnect {} with name: {}.'.format( clear_type, tc_type, name ) ) batch.group(tc_type, name) elif tc_type in self.tcex.indicator_types: if data.get('summary') is not None: summary = self.tcex.playbook.read(data.get('summary')) else: resource = self.tcex.resource(tc_type) summary = resource.summary(data) summary = self.path_data(summary, path) if summary is not None: print( 'Deleting ThreatConnect Indicator: {}{}{}'.format( c.Style.BRIGHT, c.Fore.MAGENTA, summary ) ) self.log.info( '[{}] Deleting ThreatConnect {} with value: {}.'.format( clear_type, tc_type, summary ) ) batch.indicator(tc_type, summary) batch_results = batch.submit() self.log.debug('[{}] Batch Results: {}'.format(clear_type, batch_results)) for error in batch_results.get('errors') or []: self.log.error('[{}] Batch Error: {}'.format(clear_type, error))
[ "def", "clear_tc", "(", "self", ",", "owner", ",", "data", ",", "clear_type", ")", ":", "batch", "=", "self", ".", "tcex", ".", "batch", "(", "owner", ",", "action", "=", "'Delete'", ")", "tc_type", "=", "data", ".", "get", "(", "'type'", ")", "path", "=", "data", ".", "get", "(", "'path'", ")", "if", "tc_type", "in", "self", ".", "tcex", ".", "group_types", ":", "name", "=", "self", ".", "tcex", ".", "playbook", ".", "read", "(", "data", ".", "get", "(", "'name'", ")", ")", "name", "=", "self", ".", "path_data", "(", "name", ",", "path", ")", "if", "name", "is", "not", "None", ":", "print", "(", "'Deleting ThreatConnect Group: {}{}{}'", ".", "format", "(", "c", ".", "Style", ".", "BRIGHT", ",", "c", ".", "Fore", ".", "MAGENTA", ",", "name", ")", ")", "self", ".", "log", ".", "info", "(", "'[{}] Deleting ThreatConnect {} with name: {}.'", ".", "format", "(", "clear_type", ",", "tc_type", ",", "name", ")", ")", "batch", ".", "group", "(", "tc_type", ",", "name", ")", "elif", "tc_type", "in", "self", ".", "tcex", ".", "indicator_types", ":", "if", "data", ".", "get", "(", "'summary'", ")", "is", "not", "None", ":", "summary", "=", "self", ".", "tcex", ".", "playbook", ".", "read", "(", "data", ".", "get", "(", "'summary'", ")", ")", "else", ":", "resource", "=", "self", ".", "tcex", ".", "resource", "(", "tc_type", ")", "summary", "=", "resource", ".", "summary", "(", "data", ")", "summary", "=", "self", ".", "path_data", "(", "summary", ",", "path", ")", "if", "summary", "is", "not", "None", ":", "print", "(", "'Deleting ThreatConnect Indicator: {}{}{}'", ".", "format", "(", "c", ".", "Style", ".", "BRIGHT", ",", "c", ".", "Fore", ".", "MAGENTA", ",", "summary", ")", ")", "self", ".", "log", ".", "info", "(", "'[{}] Deleting ThreatConnect {} with value: {}.'", ".", "format", "(", "clear_type", ",", "tc_type", ",", "summary", ")", ")", "batch", ".", "indicator", "(", "tc_type", ",", "summary", ")", "batch_results", "=", "batch", ".", "submit", "(", ")", "self", ".", "log", ".", "debug", "(", "'[{}] Batch Results: {}'", ".", "format", "(", "clear_type", ",", "batch_results", ")", ")", "for", "error", "in", "batch_results", ".", "get", "(", "'errors'", ")", "or", "[", "]", ":", "self", ".", "log", ".", "error", "(", "'[{}] Batch Error: {}'", ".", "format", "(", "clear_type", ",", "error", ")", ")" ]
Delete threat intel from ThreatConnect platform. Args: owner (str): The ThreatConnect owner. data (dict): The data for the threat intel to clear. clear_type (str): The type of clear action.
[ "Delete", "threat", "intel", "from", "ThreatConnect", "platform", "." ]
python
train
rkhleics/wagtailmenus
wagtailmenus/templatetags/menu_tags.py
https://github.com/rkhleics/wagtailmenus/blob/a41f240bed0d362e0d4dd4ef04a230f2b1827a93/wagtailmenus/templatetags/menu_tags.py#L147-L200
def sub_menu( context, menuitem_or_page, use_specific=None, allow_repeating_parents=None, apply_active_classes=None, template='', use_absolute_page_urls=None, add_sub_menus_inline=None, **kwargs ): """ Retrieve the children pages for the `menuitem_or_page` provided, turn them into menu items, and render them to a template. """ validate_supplied_values('sub_menu', use_specific=use_specific, menuitem_or_page=menuitem_or_page) max_levels = context.get( 'max_levels', settings.DEFAULT_CHILDREN_MENU_MAX_LEVELS ) if use_specific is None: use_specific = context.get( 'use_specific', constants.USE_SPECIFIC_AUTO) if apply_active_classes is None: apply_active_classes = context.get('apply_active_classes', True) if allow_repeating_parents is None: allow_repeating_parents = context.get('allow_repeating_parents', True) if use_absolute_page_urls is None: use_absolute_page_urls = context.get('use_absolute_page_urls', False) if add_sub_menus_inline is None: add_sub_menus_inline = context.get('add_sub_menus_inline', False) if isinstance(menuitem_or_page, Page): parent_page = menuitem_or_page else: parent_page = menuitem_or_page.link_page original_menu = context.get('original_menu_instance') if original_menu is None: raise SubMenuUsageError() menu_class = original_menu.get_sub_menu_class() return menu_class.render_from_tag( context=context, parent_page=parent_page, max_levels=max_levels, use_specific=use_specific, apply_active_classes=apply_active_classes, allow_repeating_parents=allow_repeating_parents, use_absolute_page_urls=use_absolute_page_urls, add_sub_menus_inline=add_sub_menus_inline, template_name=template, **kwargs )
[ "def", "sub_menu", "(", "context", ",", "menuitem_or_page", ",", "use_specific", "=", "None", ",", "allow_repeating_parents", "=", "None", ",", "apply_active_classes", "=", "None", ",", "template", "=", "''", ",", "use_absolute_page_urls", "=", "None", ",", "add_sub_menus_inline", "=", "None", ",", "*", "*", "kwargs", ")", ":", "validate_supplied_values", "(", "'sub_menu'", ",", "use_specific", "=", "use_specific", ",", "menuitem_or_page", "=", "menuitem_or_page", ")", "max_levels", "=", "context", ".", "get", "(", "'max_levels'", ",", "settings", ".", "DEFAULT_CHILDREN_MENU_MAX_LEVELS", ")", "if", "use_specific", "is", "None", ":", "use_specific", "=", "context", ".", "get", "(", "'use_specific'", ",", "constants", ".", "USE_SPECIFIC_AUTO", ")", "if", "apply_active_classes", "is", "None", ":", "apply_active_classes", "=", "context", ".", "get", "(", "'apply_active_classes'", ",", "True", ")", "if", "allow_repeating_parents", "is", "None", ":", "allow_repeating_parents", "=", "context", ".", "get", "(", "'allow_repeating_parents'", ",", "True", ")", "if", "use_absolute_page_urls", "is", "None", ":", "use_absolute_page_urls", "=", "context", ".", "get", "(", "'use_absolute_page_urls'", ",", "False", ")", "if", "add_sub_menus_inline", "is", "None", ":", "add_sub_menus_inline", "=", "context", ".", "get", "(", "'add_sub_menus_inline'", ",", "False", ")", "if", "isinstance", "(", "menuitem_or_page", ",", "Page", ")", ":", "parent_page", "=", "menuitem_or_page", "else", ":", "parent_page", "=", "menuitem_or_page", ".", "link_page", "original_menu", "=", "context", ".", "get", "(", "'original_menu_instance'", ")", "if", "original_menu", "is", "None", ":", "raise", "SubMenuUsageError", "(", ")", "menu_class", "=", "original_menu", ".", "get_sub_menu_class", "(", ")", "return", "menu_class", ".", "render_from_tag", "(", "context", "=", "context", ",", "parent_page", "=", "parent_page", ",", "max_levels", "=", "max_levels", ",", "use_specific", "=", "use_specific", ",", "apply_active_classes", "=", "apply_active_classes", ",", "allow_repeating_parents", "=", "allow_repeating_parents", ",", "use_absolute_page_urls", "=", "use_absolute_page_urls", ",", "add_sub_menus_inline", "=", "add_sub_menus_inline", ",", "template_name", "=", "template", ",", "*", "*", "kwargs", ")" ]
Retrieve the children pages for the `menuitem_or_page` provided, turn them into menu items, and render them to a template.
[ "Retrieve", "the", "children", "pages", "for", "the", "menuitem_or_page", "provided", "turn", "them", "into", "menu", "items", "and", "render", "them", "to", "a", "template", "." ]
python
train
arve0/leicaexperiment
leicaexperiment/experiment.py
https://github.com/arve0/leicaexperiment/blob/c0393c4d51984a506f813319efb66e54c4f2a426/leicaexperiment/experiment.py#L719-L758
def attributes(path): """Get attributes from path based on format --[A-Z]. Returns namedtuple with upper case attributes equal to what found in path (string) and lower case as int. If path holds several occurrences of same character, only the last one is kept. >>> attrs = attributes('/folder/file--X00-X01.tif') >>> print(attrs) namedtuple('attributes', 'X x')('01', 1) >>> print(attrs.x) 1 Parameters ---------- path : string Returns ------- collections.namedtuple """ # number of charcters set to numbers have changed in LAS AF X !! matches = re.findall('--([A-Z]{1})([0-9]{2,4})', path) keys = [] values = [] for k,v in matches: if k in keys: # keep only last key i = keys.index(k) del keys[i] del values[i] keys.append(k) values.append(v) lower_keys = [k.lower() for k in keys] int_values= [int(v) for v in values] attributes = namedtuple('attributes', keys + lower_keys) return attributes(*values + int_values)
[ "def", "attributes", "(", "path", ")", ":", "# number of charcters set to numbers have changed in LAS AF X !!", "matches", "=", "re", ".", "findall", "(", "'--([A-Z]{1})([0-9]{2,4})'", ",", "path", ")", "keys", "=", "[", "]", "values", "=", "[", "]", "for", "k", ",", "v", "in", "matches", ":", "if", "k", "in", "keys", ":", "# keep only last key", "i", "=", "keys", ".", "index", "(", "k", ")", "del", "keys", "[", "i", "]", "del", "values", "[", "i", "]", "keys", ".", "append", "(", "k", ")", "values", ".", "append", "(", "v", ")", "lower_keys", "=", "[", "k", ".", "lower", "(", ")", "for", "k", "in", "keys", "]", "int_values", "=", "[", "int", "(", "v", ")", "for", "v", "in", "values", "]", "attributes", "=", "namedtuple", "(", "'attributes'", ",", "keys", "+", "lower_keys", ")", "return", "attributes", "(", "*", "values", "+", "int_values", ")" ]
Get attributes from path based on format --[A-Z]. Returns namedtuple with upper case attributes equal to what found in path (string) and lower case as int. If path holds several occurrences of same character, only the last one is kept. >>> attrs = attributes('/folder/file--X00-X01.tif') >>> print(attrs) namedtuple('attributes', 'X x')('01', 1) >>> print(attrs.x) 1 Parameters ---------- path : string Returns ------- collections.namedtuple
[ "Get", "attributes", "from", "path", "based", "on", "format", "--", "[", "A", "-", "Z", "]", ".", "Returns", "namedtuple", "with", "upper", "case", "attributes", "equal", "to", "what", "found", "in", "path", "(", "string", ")", "and", "lower", "case", "as", "int", ".", "If", "path", "holds", "several", "occurrences", "of", "same", "character", "only", "the", "last", "one", "is", "kept", "." ]
python
valid
zhanglab/psamm
psamm/datasource/native.py
https://github.com/zhanglab/psamm/blob/dc427848c4f9d109ca590f0afa024c63b685b3f4/psamm/datasource/native.py#L1487-L1497
def write_reactions(self, stream, reactions, properties=None): """Write iterable of reactions as YAML object to stream. Args: stream: File-like object. compounds: Iterable of reaction entries. properties: Set of reaction properties to output (or None to output all). """ self._write_entries( stream, reactions, self.convert_reaction_entry, properties)
[ "def", "write_reactions", "(", "self", ",", "stream", ",", "reactions", ",", "properties", "=", "None", ")", ":", "self", ".", "_write_entries", "(", "stream", ",", "reactions", ",", "self", ".", "convert_reaction_entry", ",", "properties", ")" ]
Write iterable of reactions as YAML object to stream. Args: stream: File-like object. compounds: Iterable of reaction entries. properties: Set of reaction properties to output (or None to output all).
[ "Write", "iterable", "of", "reactions", "as", "YAML", "object", "to", "stream", "." ]
python
train
econ-ark/HARK
HARK/cAndCwithStickyE/StickyEmodel.py
https://github.com/econ-ark/HARK/blob/3d184153a189e618a87c9540df1cd12044039cc5/HARK/cAndCwithStickyE/StickyEmodel.py#L98-L131
def getShocks(self): ''' Gets permanent and transitory shocks (combining idiosyncratic and aggregate shocks), but only consumers who update their macroeconomic beliefs this period incorporate all pre- viously unnoticed aggregate permanent shocks. Agents correctly observe the level of all real variables (market resources, consumption, assets, etc), but misperceive the aggregate productivity level. Parameters ---------- None Returns ------- None ''' # The strange syntax here is so that both StickyEconsumerType and StickyEmarkovConsumerType # run the getShocks method of their first superclass: AggShockConsumerType and # AggShockMarkovConsumerType respectively. This will be simplified in Python 3. super(self.__class__,self).getShocks() # Get permanent and transitory combined shocks newborns = self.t_age == 0 self.TranShkNow[newborns] = self.TranShkAggNow*self.wRteNow # Turn off idiosyncratic shocks for newborns self.PermShkNow[newborns] = self.PermShkAggNow self.getUpdaters() # Randomly draw which agents will update their beliefs # Calculate innovation to the productivity level perception error pLvlErrNew = self.getpLvlError() self.pLvlErrNow *= pLvlErrNew # Perception error accumulation # Calculate (mis)perceptions of the permanent shock PermShkPcvd = self.PermShkNow/pLvlErrNew PermShkPcvd[self.update] *= self.pLvlErrNow[self.update] # Updaters see the true permanent shock and all missed news self.pLvlErrNow[self.update] = 1.0 self.PermShkNow = PermShkPcvd
[ "def", "getShocks", "(", "self", ")", ":", "# The strange syntax here is so that both StickyEconsumerType and StickyEmarkovConsumerType", "# run the getShocks method of their first superclass: AggShockConsumerType and", "# AggShockMarkovConsumerType respectively. This will be simplified in Python 3.", "super", "(", "self", ".", "__class__", ",", "self", ")", ".", "getShocks", "(", ")", "# Get permanent and transitory combined shocks", "newborns", "=", "self", ".", "t_age", "==", "0", "self", ".", "TranShkNow", "[", "newborns", "]", "=", "self", ".", "TranShkAggNow", "*", "self", ".", "wRteNow", "# Turn off idiosyncratic shocks for newborns", "self", ".", "PermShkNow", "[", "newborns", "]", "=", "self", ".", "PermShkAggNow", "self", ".", "getUpdaters", "(", ")", "# Randomly draw which agents will update their beliefs", "# Calculate innovation to the productivity level perception error", "pLvlErrNew", "=", "self", ".", "getpLvlError", "(", ")", "self", ".", "pLvlErrNow", "*=", "pLvlErrNew", "# Perception error accumulation", "# Calculate (mis)perceptions of the permanent shock", "PermShkPcvd", "=", "self", ".", "PermShkNow", "/", "pLvlErrNew", "PermShkPcvd", "[", "self", ".", "update", "]", "*=", "self", ".", "pLvlErrNow", "[", "self", ".", "update", "]", "# Updaters see the true permanent shock and all missed news", "self", ".", "pLvlErrNow", "[", "self", ".", "update", "]", "=", "1.0", "self", ".", "PermShkNow", "=", "PermShkPcvd" ]
Gets permanent and transitory shocks (combining idiosyncratic and aggregate shocks), but only consumers who update their macroeconomic beliefs this period incorporate all pre- viously unnoticed aggregate permanent shocks. Agents correctly observe the level of all real variables (market resources, consumption, assets, etc), but misperceive the aggregate productivity level. Parameters ---------- None Returns ------- None
[ "Gets", "permanent", "and", "transitory", "shocks", "(", "combining", "idiosyncratic", "and", "aggregate", "shocks", ")", "but", "only", "consumers", "who", "update", "their", "macroeconomic", "beliefs", "this", "period", "incorporate", "all", "pre", "-", "viously", "unnoticed", "aggregate", "permanent", "shocks", ".", "Agents", "correctly", "observe", "the", "level", "of", "all", "real", "variables", "(", "market", "resources", "consumption", "assets", "etc", ")", "but", "misperceive", "the", "aggregate", "productivity", "level", "." ]
python
train
eng-tools/sfsimodels
sfsimodels/loader.py
https://github.com/eng-tools/sfsimodels/blob/65a690ca440d61307f5a9b8478e4704f203a5925/sfsimodels/loader.py#L33-L53
def load_soil_sample_data(sp): """ Sample data for the Soil object :param sp: Soil Object :return: """ # soil sp.g_mod = 60.0e6 # [Pa] sp.phi = 30 # [degrees] sp.relative_density = .40 # [decimal] sp.gwl = 2. # [m], ground water level sp.unit_dry_weight = 17000 # [N/m3] sp.unit_sat_weight = 18000 # [N/m3] sp.unit_weight_water = 9800 # [N/m3] sp.cohesion = 10.0 # [Pa] sp.poissons_ratio = 0.22 sp.e_min = 0.55 sp.e_max = 0.95 sp.e_critical0 = 0.79 # Jin et al. 2015 sp.p_critical0 = 0.7 # Jin et al. 2015 sp.lamb_crl = 0.015
[ "def", "load_soil_sample_data", "(", "sp", ")", ":", "# soil", "sp", ".", "g_mod", "=", "60.0e6", "# [Pa]", "sp", ".", "phi", "=", "30", "# [degrees]", "sp", ".", "relative_density", "=", ".40", "# [decimal]", "sp", ".", "gwl", "=", "2.", "# [m], ground water level", "sp", ".", "unit_dry_weight", "=", "17000", "# [N/m3]", "sp", ".", "unit_sat_weight", "=", "18000", "# [N/m3]", "sp", ".", "unit_weight_water", "=", "9800", "# [N/m3]", "sp", ".", "cohesion", "=", "10.0", "# [Pa]", "sp", ".", "poissons_ratio", "=", "0.22", "sp", ".", "e_min", "=", "0.55", "sp", ".", "e_max", "=", "0.95", "sp", ".", "e_critical0", "=", "0.79", "# Jin et al. 2015", "sp", ".", "p_critical0", "=", "0.7", "# Jin et al. 2015", "sp", ".", "lamb_crl", "=", "0.015" ]
Sample data for the Soil object :param sp: Soil Object :return:
[ "Sample", "data", "for", "the", "Soil", "object", ":", "param", "sp", ":", "Soil", "Object", ":", "return", ":" ]
python
train
ayust/kitnirc
kitnirc/modular.py
https://github.com/ayust/kitnirc/blob/cf19fe39219da75f053e1a3976bf21331b6fefea/kitnirc/modular.py#L73-L83
def handle_event(self, event, client, args): """Dispatch an event to its handler. Note: the handler does not receive the event which triggered its call. If you want to handle more than one event, it's recommended to put the shared handling in a separate function, and create wrapper handlers that call the shared function. """ handler = self.event_handlers.get(event) if handler: return handler(client, *args)
[ "def", "handle_event", "(", "self", ",", "event", ",", "client", ",", "args", ")", ":", "handler", "=", "self", ".", "event_handlers", ".", "get", "(", "event", ")", "if", "handler", ":", "return", "handler", "(", "client", ",", "*", "args", ")" ]
Dispatch an event to its handler. Note: the handler does not receive the event which triggered its call. If you want to handle more than one event, it's recommended to put the shared handling in a separate function, and create wrapper handlers that call the shared function.
[ "Dispatch", "an", "event", "to", "its", "handler", "." ]
python
train
gem/oq-engine
openquake/hazardlib/gsim/munson_thurber_1997.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/gsim/munson_thurber_1997.py#L111-L151
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types): """ See :meth:`superclass method <.base.GroundShakingIntensityModel.get_mean_and_stddevs>` for spec of input and result values. """ # assign constant log10e = np.log10(np.e) # Distance term R = np.sqrt(dists.rjb ** 2 + 11.29 ** 2) # Magnitude term M = rup.mag - 6 # Site term only distinguishes between lava and ash; # since ash sites have Vs30 in the range 60-200m/s, # we use this upper value as class separator S = np.zeros(R.shape) S[sites.vs30 <= 200] = 1 # Mean ground motion (natural log) # call super mean, stddevs = super().get_mean_and_stddevs(sites, rup, dists, imt, stddev_types) if rup.mag > 7. and rup.mag <= 7.7: mean = (0.171 * (1 - M)) / log10e + mean elif rup.mag > 7.7: mean = (0.1512 + 0.387 * (1 - M)) / log10e + mean # define natural log of SA 0.3 sec and 0.2 sec if isinstance(imt, SA): if imt.period == 0.3: mean = np.log(2.2) + mean if imt.period == 0.2: mean = np.log(2.5) + mean return mean, stddevs
[ "def", "get_mean_and_stddevs", "(", "self", ",", "sites", ",", "rup", ",", "dists", ",", "imt", ",", "stddev_types", ")", ":", "# assign constant", "log10e", "=", "np", ".", "log10", "(", "np", ".", "e", ")", "# Distance term", "R", "=", "np", ".", "sqrt", "(", "dists", ".", "rjb", "**", "2", "+", "11.29", "**", "2", ")", "# Magnitude term", "M", "=", "rup", ".", "mag", "-", "6", "# Site term only distinguishes between lava and ash;", "# since ash sites have Vs30 in the range 60-200m/s,", "# we use this upper value as class separator", "S", "=", "np", ".", "zeros", "(", "R", ".", "shape", ")", "S", "[", "sites", ".", "vs30", "<=", "200", "]", "=", "1", "# Mean ground motion (natural log)", "# call super", "mean", ",", "stddevs", "=", "super", "(", ")", ".", "get_mean_and_stddevs", "(", "sites", ",", "rup", ",", "dists", ",", "imt", ",", "stddev_types", ")", "if", "rup", ".", "mag", ">", "7.", "and", "rup", ".", "mag", "<=", "7.7", ":", "mean", "=", "(", "0.171", "*", "(", "1", "-", "M", ")", ")", "/", "log10e", "+", "mean", "elif", "rup", ".", "mag", ">", "7.7", ":", "mean", "=", "(", "0.1512", "+", "0.387", "*", "(", "1", "-", "M", ")", ")", "/", "log10e", "+", "mean", "# define natural log of SA 0.3 sec and 0.2 sec", "if", "isinstance", "(", "imt", ",", "SA", ")", ":", "if", "imt", ".", "period", "==", "0.3", ":", "mean", "=", "np", ".", "log", "(", "2.2", ")", "+", "mean", "if", "imt", ".", "period", "==", "0.2", ":", "mean", "=", "np", ".", "log", "(", "2.5", ")", "+", "mean", "return", "mean", ",", "stddevs" ]
See :meth:`superclass method <.base.GroundShakingIntensityModel.get_mean_and_stddevs>` for spec of input and result values.
[ "See", ":", "meth", ":", "superclass", "method", "<", ".", "base", ".", "GroundShakingIntensityModel", ".", "get_mean_and_stddevs", ">", "for", "spec", "of", "input", "and", "result", "values", "." ]
python
train
pycontribs/pyrax
pyrax/object_storage.py
https://github.com/pycontribs/pyrax/blob/9ddfd5064b3a292d7337906f3b2d5dce95b50b99/pyrax/object_storage.py#L1446-L1465
def copy_object(self, container, obj, new_container, new_obj_name=None, content_type=None): """ Copies the object to the new container, optionally giving it a new name. If you copy to the same container, you must supply a different name. Returns the etag of the newly-copied object. You can optionally change the content_type of the object by supplying that in the 'content_type' parameter. """ nm = new_obj_name or utils.get_name(obj) uri = "/%s/%s" % (utils.get_name(new_container), nm) copy_from = "/%s/%s" % (utils.get_name(container), utils.get_name(obj)) headers = {"X-Copy-From": copy_from, "Content-Length": "0"} if content_type: headers["Content-Type"] = content_type resp, resp_body = self.api.method_put(uri, headers=headers) return resp.headers.get("etag")
[ "def", "copy_object", "(", "self", ",", "container", ",", "obj", ",", "new_container", ",", "new_obj_name", "=", "None", ",", "content_type", "=", "None", ")", ":", "nm", "=", "new_obj_name", "or", "utils", ".", "get_name", "(", "obj", ")", "uri", "=", "\"/%s/%s\"", "%", "(", "utils", ".", "get_name", "(", "new_container", ")", ",", "nm", ")", "copy_from", "=", "\"/%s/%s\"", "%", "(", "utils", ".", "get_name", "(", "container", ")", ",", "utils", ".", "get_name", "(", "obj", ")", ")", "headers", "=", "{", "\"X-Copy-From\"", ":", "copy_from", ",", "\"Content-Length\"", ":", "\"0\"", "}", "if", "content_type", ":", "headers", "[", "\"Content-Type\"", "]", "=", "content_type", "resp", ",", "resp_body", "=", "self", ".", "api", ".", "method_put", "(", "uri", ",", "headers", "=", "headers", ")", "return", "resp", ".", "headers", ".", "get", "(", "\"etag\"", ")" ]
Copies the object to the new container, optionally giving it a new name. If you copy to the same container, you must supply a different name. Returns the etag of the newly-copied object. You can optionally change the content_type of the object by supplying that in the 'content_type' parameter.
[ "Copies", "the", "object", "to", "the", "new", "container", "optionally", "giving", "it", "a", "new", "name", ".", "If", "you", "copy", "to", "the", "same", "container", "you", "must", "supply", "a", "different", "name", "." ]
python
train
dnanexus/dx-toolkit
src/python/dxpy/bindings/dxfile_functions.py
https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/bindings/dxfile_functions.py#L97-L135
def download_dxfile(dxid, filename, chunksize=dxfile.DEFAULT_BUFFER_SIZE, append=False, show_progress=False, project=None, describe_output=None, **kwargs): ''' :param dxid: DNAnexus file ID or DXFile (file handler) object :type dxid: string or DXFile :param filename: Local filename :type filename: string :param append: If True, appends to the local file (default is to truncate local file if it exists) :type append: boolean :param project: project to use as context for this download (may affect which billing account is billed for this download). If None or DXFile.NO_PROJECT_HINT, no project hint is supplied to the API server. :type project: str or None :param describe_output: (experimental) output of the file-xxxx/describe API call, if available. It will make it possible to skip another describe API call. It should contain the default fields of the describe API call output and the "parts" field, not included in the output by default. :type describe_output: dict or None Downloads the remote file referenced by *dxid* and saves it to *filename*. Example:: download_dxfile("file-xxxx", "localfilename.fastq") ''' # retry the inner loop while there are retriable errors part_retry_counter = defaultdict(lambda: 3) success = False while not success: success = _download_dxfile(dxid, filename, part_retry_counter, chunksize=chunksize, append=append, show_progress=show_progress, project=project, describe_output=describe_output, **kwargs)
[ "def", "download_dxfile", "(", "dxid", ",", "filename", ",", "chunksize", "=", "dxfile", ".", "DEFAULT_BUFFER_SIZE", ",", "append", "=", "False", ",", "show_progress", "=", "False", ",", "project", "=", "None", ",", "describe_output", "=", "None", ",", "*", "*", "kwargs", ")", ":", "# retry the inner loop while there are retriable errors", "part_retry_counter", "=", "defaultdict", "(", "lambda", ":", "3", ")", "success", "=", "False", "while", "not", "success", ":", "success", "=", "_download_dxfile", "(", "dxid", ",", "filename", ",", "part_retry_counter", ",", "chunksize", "=", "chunksize", ",", "append", "=", "append", ",", "show_progress", "=", "show_progress", ",", "project", "=", "project", ",", "describe_output", "=", "describe_output", ",", "*", "*", "kwargs", ")" ]
:param dxid: DNAnexus file ID or DXFile (file handler) object :type dxid: string or DXFile :param filename: Local filename :type filename: string :param append: If True, appends to the local file (default is to truncate local file if it exists) :type append: boolean :param project: project to use as context for this download (may affect which billing account is billed for this download). If None or DXFile.NO_PROJECT_HINT, no project hint is supplied to the API server. :type project: str or None :param describe_output: (experimental) output of the file-xxxx/describe API call, if available. It will make it possible to skip another describe API call. It should contain the default fields of the describe API call output and the "parts" field, not included in the output by default. :type describe_output: dict or None Downloads the remote file referenced by *dxid* and saves it to *filename*. Example:: download_dxfile("file-xxxx", "localfilename.fastq")
[ ":", "param", "dxid", ":", "DNAnexus", "file", "ID", "or", "DXFile", "(", "file", "handler", ")", "object", ":", "type", "dxid", ":", "string", "or", "DXFile", ":", "param", "filename", ":", "Local", "filename", ":", "type", "filename", ":", "string", ":", "param", "append", ":", "If", "True", "appends", "to", "the", "local", "file", "(", "default", "is", "to", "truncate", "local", "file", "if", "it", "exists", ")", ":", "type", "append", ":", "boolean", ":", "param", "project", ":", "project", "to", "use", "as", "context", "for", "this", "download", "(", "may", "affect", "which", "billing", "account", "is", "billed", "for", "this", "download", ")", ".", "If", "None", "or", "DXFile", ".", "NO_PROJECT_HINT", "no", "project", "hint", "is", "supplied", "to", "the", "API", "server", ".", ":", "type", "project", ":", "str", "or", "None", ":", "param", "describe_output", ":", "(", "experimental", ")", "output", "of", "the", "file", "-", "xxxx", "/", "describe", "API", "call", "if", "available", ".", "It", "will", "make", "it", "possible", "to", "skip", "another", "describe", "API", "call", ".", "It", "should", "contain", "the", "default", "fields", "of", "the", "describe", "API", "call", "output", "and", "the", "parts", "field", "not", "included", "in", "the", "output", "by", "default", ".", ":", "type", "describe_output", ":", "dict", "or", "None" ]
python
train
knipknap/SpiffWorkflow
SpiffWorkflow/serializer/prettyxml.py
https://github.com/knipknap/SpiffWorkflow/blob/f0af7f59a332e0619e4f3c00a7d4a3d230760e00/SpiffWorkflow/serializer/prettyxml.py#L100-L129
def deserialize_logical(self, node): """ Reads the logical tag from the given node, returns a Condition object. node -- the xml node (xml.dom.minidom.Node) """ term1_attrib = node.getAttribute('left-field') term1_value = node.getAttribute('left-value') op = node.nodeName.lower() term2_attrib = node.getAttribute('right-field') term2_value = node.getAttribute('right-value') if op not in _op_map: _exc('Invalid operator') if term1_attrib != '' and term1_value != '': _exc('Both, left-field and left-value attributes found') elif term1_attrib == '' and term1_value == '': _exc('left-field or left-value attribute required') elif term1_value != '': left = term1_value else: left = operators.Attrib(term1_attrib) if term2_attrib != '' and term2_value != '': _exc('Both, right-field and right-value attributes found') elif term2_attrib == '' and term2_value == '': _exc('right-field or right-value attribute required') elif term2_value != '': right = term2_value else: right = operators.Attrib(term2_attrib) return _op_map[op](left, right)
[ "def", "deserialize_logical", "(", "self", ",", "node", ")", ":", "term1_attrib", "=", "node", ".", "getAttribute", "(", "'left-field'", ")", "term1_value", "=", "node", ".", "getAttribute", "(", "'left-value'", ")", "op", "=", "node", ".", "nodeName", ".", "lower", "(", ")", "term2_attrib", "=", "node", ".", "getAttribute", "(", "'right-field'", ")", "term2_value", "=", "node", ".", "getAttribute", "(", "'right-value'", ")", "if", "op", "not", "in", "_op_map", ":", "_exc", "(", "'Invalid operator'", ")", "if", "term1_attrib", "!=", "''", "and", "term1_value", "!=", "''", ":", "_exc", "(", "'Both, left-field and left-value attributes found'", ")", "elif", "term1_attrib", "==", "''", "and", "term1_value", "==", "''", ":", "_exc", "(", "'left-field or left-value attribute required'", ")", "elif", "term1_value", "!=", "''", ":", "left", "=", "term1_value", "else", ":", "left", "=", "operators", ".", "Attrib", "(", "term1_attrib", ")", "if", "term2_attrib", "!=", "''", "and", "term2_value", "!=", "''", ":", "_exc", "(", "'Both, right-field and right-value attributes found'", ")", "elif", "term2_attrib", "==", "''", "and", "term2_value", "==", "''", ":", "_exc", "(", "'right-field or right-value attribute required'", ")", "elif", "term2_value", "!=", "''", ":", "right", "=", "term2_value", "else", ":", "right", "=", "operators", ".", "Attrib", "(", "term2_attrib", ")", "return", "_op_map", "[", "op", "]", "(", "left", ",", "right", ")" ]
Reads the logical tag from the given node, returns a Condition object. node -- the xml node (xml.dom.minidom.Node)
[ "Reads", "the", "logical", "tag", "from", "the", "given", "node", "returns", "a", "Condition", "object", "." ]
python
valid
SBRG/ssbio
ssbio/protein/sequence/properties/tmhmm.py
https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/protein/sequence/properties/tmhmm.py#L101-L169
def label_TM_tmhmm_residue_numbers_and_leaflets(tmhmm_seq): """Determine the residue numbers of the TM-helix residues that cross the membrane and label them by leaflet. Args: tmhmm_seq: g.protein.representative_sequence.seq_record.letter_annotations['TM-tmhmm'] Returns: leaflet_dict: a dictionary with leaflet_variable : [residue list] where the variable is inside or outside TM_boundary dict: outputs a dictionar with : TM helix number : [TM helix residue start , TM helix residue end] TODO: untested method! """ TM_number_dict = {} T_index = [] T_residue = [] residue_count = 1 for residue_label in tmhmm_seq: if residue_label == 'T': T_residue.append(residue_count) residue_count = residue_count + 1 TM_number_dict.update({'T_residue': T_residue}) # finding the TM boundaries T_residue_list = TM_number_dict['T_residue'] count = 0 max_count = len(T_residue_list) - 1 TM_helix_count = 0 TM_boundary_dict = {} while count <= max_count: # first residue = TM start if count == 0: TM_start = T_residue_list[count] count = count + 1 continue # Last residue = TM end elif count == max_count: TM_end = T_residue_list[count] TM_helix_count = TM_helix_count + 1 TM_boundary_dict.update({'TM_helix_' + str(TM_helix_count): [TM_start, TM_end]}) break # middle residues need to be start or end elif T_residue_list[count] != T_residue_list[count + 1] - 1: TM_end = T_residue_list[count] TM_helix_count = TM_helix_count + 1 TM_boundary_dict.update({'TM_helix_' + str(TM_helix_count): [TM_start, TM_end]}) # new TM_start TM_start = T_residue_list[count + 1] count = count + 1 # assign leaflet to proper TM residues O or I leaflet_dict = {} for leaflet in ['O', 'I']: leaflet_list = [] for TM_helix, TM_residues in TM_boundary_dict.items(): for residue_num in TM_residues: tmhmm_seq_index = residue_num - 1 previous_residue = tmhmm_seq_index - 1 next_residue = tmhmm_seq_index + 1 # identify if the previous or next residue closest to the TM helix start/end is the proper leaflet if tmhmm_seq[previous_residue] == leaflet or tmhmm_seq[next_residue] == leaflet: leaflet_list.append(residue_num) leaflet_dict.update({'tmhmm_leaflet_' + leaflet: leaflet_list}) return TM_boundary_dict, leaflet_dict
[ "def", "label_TM_tmhmm_residue_numbers_and_leaflets", "(", "tmhmm_seq", ")", ":", "TM_number_dict", "=", "{", "}", "T_index", "=", "[", "]", "T_residue", "=", "[", "]", "residue_count", "=", "1", "for", "residue_label", "in", "tmhmm_seq", ":", "if", "residue_label", "==", "'T'", ":", "T_residue", ".", "append", "(", "residue_count", ")", "residue_count", "=", "residue_count", "+", "1", "TM_number_dict", ".", "update", "(", "{", "'T_residue'", ":", "T_residue", "}", ")", "# finding the TM boundaries", "T_residue_list", "=", "TM_number_dict", "[", "'T_residue'", "]", "count", "=", "0", "max_count", "=", "len", "(", "T_residue_list", ")", "-", "1", "TM_helix_count", "=", "0", "TM_boundary_dict", "=", "{", "}", "while", "count", "<=", "max_count", ":", "# first residue = TM start", "if", "count", "==", "0", ":", "TM_start", "=", "T_residue_list", "[", "count", "]", "count", "=", "count", "+", "1", "continue", "# Last residue = TM end", "elif", "count", "==", "max_count", ":", "TM_end", "=", "T_residue_list", "[", "count", "]", "TM_helix_count", "=", "TM_helix_count", "+", "1", "TM_boundary_dict", ".", "update", "(", "{", "'TM_helix_'", "+", "str", "(", "TM_helix_count", ")", ":", "[", "TM_start", ",", "TM_end", "]", "}", ")", "break", "# middle residues need to be start or end", "elif", "T_residue_list", "[", "count", "]", "!=", "T_residue_list", "[", "count", "+", "1", "]", "-", "1", ":", "TM_end", "=", "T_residue_list", "[", "count", "]", "TM_helix_count", "=", "TM_helix_count", "+", "1", "TM_boundary_dict", ".", "update", "(", "{", "'TM_helix_'", "+", "str", "(", "TM_helix_count", ")", ":", "[", "TM_start", ",", "TM_end", "]", "}", ")", "# new TM_start", "TM_start", "=", "T_residue_list", "[", "count", "+", "1", "]", "count", "=", "count", "+", "1", "# assign leaflet to proper TM residues O or I", "leaflet_dict", "=", "{", "}", "for", "leaflet", "in", "[", "'O'", ",", "'I'", "]", ":", "leaflet_list", "=", "[", "]", "for", "TM_helix", ",", "TM_residues", "in", "TM_boundary_dict", ".", "items", "(", ")", ":", "for", "residue_num", "in", "TM_residues", ":", "tmhmm_seq_index", "=", "residue_num", "-", "1", "previous_residue", "=", "tmhmm_seq_index", "-", "1", "next_residue", "=", "tmhmm_seq_index", "+", "1", "# identify if the previous or next residue closest to the TM helix start/end is the proper leaflet", "if", "tmhmm_seq", "[", "previous_residue", "]", "==", "leaflet", "or", "tmhmm_seq", "[", "next_residue", "]", "==", "leaflet", ":", "leaflet_list", ".", "append", "(", "residue_num", ")", "leaflet_dict", ".", "update", "(", "{", "'tmhmm_leaflet_'", "+", "leaflet", ":", "leaflet_list", "}", ")", "return", "TM_boundary_dict", ",", "leaflet_dict" ]
Determine the residue numbers of the TM-helix residues that cross the membrane and label them by leaflet. Args: tmhmm_seq: g.protein.representative_sequence.seq_record.letter_annotations['TM-tmhmm'] Returns: leaflet_dict: a dictionary with leaflet_variable : [residue list] where the variable is inside or outside TM_boundary dict: outputs a dictionar with : TM helix number : [TM helix residue start , TM helix residue end] TODO: untested method!
[ "Determine", "the", "residue", "numbers", "of", "the", "TM", "-", "helix", "residues", "that", "cross", "the", "membrane", "and", "label", "them", "by", "leaflet", "." ]
python
train
cqparts/cqparts
src/cqparts/search.py
https://github.com/cqparts/cqparts/blob/018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53/src/cqparts/search.py#L151-L220
def common_criteria(**common): """ Wrap a function to always call with the given ``common`` named parameters. :property common: criteria common to your function call :return: decorator function :rtype: :class:`function` .. doctest:: >>> import cqparts >>> from cqparts.search import register, search, find >>> from cqparts.search import common_criteria >>> # Somebody elses (boring) library may register with... >>> @register(a='one', b='two') ... class BoringThing(cqparts.Part): ... pass >>> # But your library is awesome; only registering with unique criteria... >>> lib_criteria = { ... 'author': 'your_name', ... 'libname': 'awesome_things', ... } >>> awesome_register = common_criteria(**lib_criteria)(register) >>> @awesome_register(a='one', b='two') # identical to BoringThing ... class AwesomeThing(cqparts.Part): ... pass >>> # So lets try a search >>> len(search(a='one', b='two')) # doctest: +SKIP 2 >>> # oops, that returned both classes >>> # To narrow it down, we add something unique: >>> len(search(a='one', b='two', libname='awesome_things')) # finds only yours # doctest: +SKIP 1 >>> # or, we could use common_criteria again... >>> awesome_search = common_criteria(**lib_criteria)(search) >>> awesome_find = common_criteria(**lib_criteria)(find) >>> len(awesome_search(a='one', b='two')) # doctest: +SKIP 1 >>> awesome_find(a='one', b='two').__name__ 'AwesomeThing' A good universal way to apply unique criteria is with .. testcode:: import cadquery, cqparts from cqparts.search import register, common_criteria _register = common_criteria(module=__name__)(register) @_register(shape='cube', scale='unit') class Cube(cqparts.Part): # just an example... def make(self): return cadquery.Workplane('XY').box(1, 1, 1) """ def decorator(func): def inner(*args, **kwargs): merged_kwargs = copy(common) merged_kwargs.update(kwargs) return func(*args, **merged_kwargs) return inner return decorator
[ "def", "common_criteria", "(", "*", "*", "common", ")", ":", "def", "decorator", "(", "func", ")", ":", "def", "inner", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "merged_kwargs", "=", "copy", "(", "common", ")", "merged_kwargs", ".", "update", "(", "kwargs", ")", "return", "func", "(", "*", "args", ",", "*", "*", "merged_kwargs", ")", "return", "inner", "return", "decorator" ]
Wrap a function to always call with the given ``common`` named parameters. :property common: criteria common to your function call :return: decorator function :rtype: :class:`function` .. doctest:: >>> import cqparts >>> from cqparts.search import register, search, find >>> from cqparts.search import common_criteria >>> # Somebody elses (boring) library may register with... >>> @register(a='one', b='two') ... class BoringThing(cqparts.Part): ... pass >>> # But your library is awesome; only registering with unique criteria... >>> lib_criteria = { ... 'author': 'your_name', ... 'libname': 'awesome_things', ... } >>> awesome_register = common_criteria(**lib_criteria)(register) >>> @awesome_register(a='one', b='two') # identical to BoringThing ... class AwesomeThing(cqparts.Part): ... pass >>> # So lets try a search >>> len(search(a='one', b='two')) # doctest: +SKIP 2 >>> # oops, that returned both classes >>> # To narrow it down, we add something unique: >>> len(search(a='one', b='two', libname='awesome_things')) # finds only yours # doctest: +SKIP 1 >>> # or, we could use common_criteria again... >>> awesome_search = common_criteria(**lib_criteria)(search) >>> awesome_find = common_criteria(**lib_criteria)(find) >>> len(awesome_search(a='one', b='two')) # doctest: +SKIP 1 >>> awesome_find(a='one', b='two').__name__ 'AwesomeThing' A good universal way to apply unique criteria is with .. testcode:: import cadquery, cqparts from cqparts.search import register, common_criteria _register = common_criteria(module=__name__)(register) @_register(shape='cube', scale='unit') class Cube(cqparts.Part): # just an example... def make(self): return cadquery.Workplane('XY').box(1, 1, 1)
[ "Wrap", "a", "function", "to", "always", "call", "with", "the", "given", "common", "named", "parameters", "." ]
python
train
CartoDB/cartoframes
cartoframes/datasets.py
https://github.com/CartoDB/cartoframes/blob/c94238a545f3dec45963dac3892540942b6f0df8/cartoframes/datasets.py#L313-L324
def _encode_decode_decorator(func): """decorator for encoding and decoding geoms""" def wrapper(*args): """error catching""" try: processed_geom = func(*args) return processed_geom except ImportError as err: raise ImportError('The Python package `shapely` needs to be ' 'installed to encode or decode geometries. ' '({})'.format(err)) return wrapper
[ "def", "_encode_decode_decorator", "(", "func", ")", ":", "def", "wrapper", "(", "*", "args", ")", ":", "\"\"\"error catching\"\"\"", "try", ":", "processed_geom", "=", "func", "(", "*", "args", ")", "return", "processed_geom", "except", "ImportError", "as", "err", ":", "raise", "ImportError", "(", "'The Python package `shapely` needs to be '", "'installed to encode or decode geometries. '", "'({})'", ".", "format", "(", "err", ")", ")", "return", "wrapper" ]
decorator for encoding and decoding geoms
[ "decorator", "for", "encoding", "and", "decoding", "geoms" ]
python
train
cltl/KafNafParserPy
KafNafParserPy/feature_extractor/constituency.py
https://github.com/cltl/KafNafParserPy/blob/9bc32e803c176404b255ba317479b8780ed5f569/KafNafParserPy/feature_extractor/constituency.py#L218-L248
def get_deepest_subsumer(self,list_terms): ''' Returns the labels of the deepest node that subsumes all the terms in the list of terms id's provided ''' #To store with how many terms every nonterminal appears count_per_no_terminal = defaultdict(int) #To store the total deep of each noter for all the term ides (as we want the deepest) total_deep_per_no_terminal = defaultdict(int) for term_id in list_terms: terminal_id = self.terminal_for_term.get(term_id) path = self.paths_for_terminal[terminal_id][0] print(term_id, path) for c,noter in enumerate(path): count_per_no_terminal[noter] += 1 total_deep_per_no_terminal[noter] += c deepest_and_common = None deepest = 10000 for noterid, this_total in total_deep_per_no_terminal.items(): if count_per_no_terminal.get(noterid,-1) == len(list_terms): ##Only the nontarms that ocurr with all the term ids in the input if this_total < deepest: deepest = this_total deepest_and_common = noterid label = None if deepest_and_common is not None: label = self.label_for_nonter[deepest_and_common] return deepest_and_common, label
[ "def", "get_deepest_subsumer", "(", "self", ",", "list_terms", ")", ":", "#To store with how many terms every nonterminal appears", "count_per_no_terminal", "=", "defaultdict", "(", "int", ")", "#To store the total deep of each noter for all the term ides (as we want the deepest)", "total_deep_per_no_terminal", "=", "defaultdict", "(", "int", ")", "for", "term_id", "in", "list_terms", ":", "terminal_id", "=", "self", ".", "terminal_for_term", ".", "get", "(", "term_id", ")", "path", "=", "self", ".", "paths_for_terminal", "[", "terminal_id", "]", "[", "0", "]", "print", "(", "term_id", ",", "path", ")", "for", "c", ",", "noter", "in", "enumerate", "(", "path", ")", ":", "count_per_no_terminal", "[", "noter", "]", "+=", "1", "total_deep_per_no_terminal", "[", "noter", "]", "+=", "c", "deepest_and_common", "=", "None", "deepest", "=", "10000", "for", "noterid", ",", "this_total", "in", "total_deep_per_no_terminal", ".", "items", "(", ")", ":", "if", "count_per_no_terminal", ".", "get", "(", "noterid", ",", "-", "1", ")", "==", "len", "(", "list_terms", ")", ":", "##Only the nontarms that ocurr with all the term ids in the input", "if", "this_total", "<", "deepest", ":", "deepest", "=", "this_total", "deepest_and_common", "=", "noterid", "label", "=", "None", "if", "deepest_and_common", "is", "not", "None", ":", "label", "=", "self", ".", "label_for_nonter", "[", "deepest_and_common", "]", "return", "deepest_and_common", ",", "label" ]
Returns the labels of the deepest node that subsumes all the terms in the list of terms id's provided
[ "Returns", "the", "labels", "of", "the", "deepest", "node", "that", "subsumes", "all", "the", "terms", "in", "the", "list", "of", "terms", "id", "s", "provided" ]
python
train
pyviz/holoviews
holoviews/core/util.py
https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/core/util.py#L733-L746
def _process_underscores(self, tokens): "Strip underscores to make sure the number is correct after join" groups = [[str(''.join(el))] if b else list(el) for (b,el) in itertools.groupby(tokens, lambda k: k=='_')] flattened = [el for group in groups for el in group] processed = [] for token in flattened: if token == '_': continue if token.startswith('_'): token = str(token[1:]) if token.endswith('_'): token = str(token[:-1]) processed.append(token) return processed
[ "def", "_process_underscores", "(", "self", ",", "tokens", ")", ":", "groups", "=", "[", "[", "str", "(", "''", ".", "join", "(", "el", ")", ")", "]", "if", "b", "else", "list", "(", "el", ")", "for", "(", "b", ",", "el", ")", "in", "itertools", ".", "groupby", "(", "tokens", ",", "lambda", "k", ":", "k", "==", "'_'", ")", "]", "flattened", "=", "[", "el", "for", "group", "in", "groups", "for", "el", "in", "group", "]", "processed", "=", "[", "]", "for", "token", "in", "flattened", ":", "if", "token", "==", "'_'", ":", "continue", "if", "token", ".", "startswith", "(", "'_'", ")", ":", "token", "=", "str", "(", "token", "[", "1", ":", "]", ")", "if", "token", ".", "endswith", "(", "'_'", ")", ":", "token", "=", "str", "(", "token", "[", ":", "-", "1", "]", ")", "processed", ".", "append", "(", "token", ")", "return", "processed" ]
Strip underscores to make sure the number is correct after join
[ "Strip", "underscores", "to", "make", "sure", "the", "number", "is", "correct", "after", "join" ]
python
train
nens/turn
turn/tools.py
https://github.com/nens/turn/blob/98e806a0749ada0ddfd04b3c29fb04c15bf5ac18/turn/tools.py#L56-L83
def lock(resources, *args, **kwargs): """ Lock resources from the command line, for example for maintenance. """ # all resources are locked if nothing is specified if not resources: client = redis.Redis(decode_responses=True, **kwargs) resources = find_resources(client) if not resources: return # create one process per pid locker = Locker(**kwargs) while len(resources) > 1: pid = os.fork() resources = resources[:1] if pid else resources[1:] # at this point there is only one resource - lock it down resource = resources[0] try: print('{}: acquiring'.format(resource)) with locker.lock(resource, label='lock tool'): print('{}: locked'.format(resource)) try: signal.pause() except KeyboardInterrupt: print('{}: released'.format(resource)) except KeyboardInterrupt: print('{}: canceled'.format(resource))
[ "def", "lock", "(", "resources", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# all resources are locked if nothing is specified", "if", "not", "resources", ":", "client", "=", "redis", ".", "Redis", "(", "decode_responses", "=", "True", ",", "*", "*", "kwargs", ")", "resources", "=", "find_resources", "(", "client", ")", "if", "not", "resources", ":", "return", "# create one process per pid", "locker", "=", "Locker", "(", "*", "*", "kwargs", ")", "while", "len", "(", "resources", ")", ">", "1", ":", "pid", "=", "os", ".", "fork", "(", ")", "resources", "=", "resources", "[", ":", "1", "]", "if", "pid", "else", "resources", "[", "1", ":", "]", "# at this point there is only one resource - lock it down", "resource", "=", "resources", "[", "0", "]", "try", ":", "print", "(", "'{}: acquiring'", ".", "format", "(", "resource", ")", ")", "with", "locker", ".", "lock", "(", "resource", ",", "label", "=", "'lock tool'", ")", ":", "print", "(", "'{}: locked'", ".", "format", "(", "resource", ")", ")", "try", ":", "signal", ".", "pause", "(", ")", "except", "KeyboardInterrupt", ":", "print", "(", "'{}: released'", ".", "format", "(", "resource", ")", ")", "except", "KeyboardInterrupt", ":", "print", "(", "'{}: canceled'", ".", "format", "(", "resource", ")", ")" ]
Lock resources from the command line, for example for maintenance.
[ "Lock", "resources", "from", "the", "command", "line", "for", "example", "for", "maintenance", "." ]
python
train
alvinwan/TexSoup
TexSoup/data.py
https://github.com/alvinwan/TexSoup/blob/63323ed71510fd2351102b8c36660a3b7703cead/TexSoup/data.py#L413-L433
def find(self, name=None, **attrs): r"""First descendant node matching criteria. Returns None if no descendant node found. :return: descendant node matching criteria :rtype: Union[None,TexExpr] >>> from TexSoup import TexSoup >>> soup = TexSoup(r''' ... \section{Ooo} ... \textit{eee} ... \textit{ooo}''') >>> soup.find('textit') \textit{eee} >>> soup.find('textbf') """ try: return next(self.find_all(name, **attrs)) except StopIteration: return None
[ "def", "find", "(", "self", ",", "name", "=", "None", ",", "*", "*", "attrs", ")", ":", "try", ":", "return", "next", "(", "self", ".", "find_all", "(", "name", ",", "*", "*", "attrs", ")", ")", "except", "StopIteration", ":", "return", "None" ]
r"""First descendant node matching criteria. Returns None if no descendant node found. :return: descendant node matching criteria :rtype: Union[None,TexExpr] >>> from TexSoup import TexSoup >>> soup = TexSoup(r''' ... \section{Ooo} ... \textit{eee} ... \textit{ooo}''') >>> soup.find('textit') \textit{eee} >>> soup.find('textbf')
[ "r", "First", "descendant", "node", "matching", "criteria", "." ]
python
train
bunq/sdk_python
bunq/sdk/json/adapters.py
https://github.com/bunq/sdk_python/blob/da6c9b83e6d83ee8062617f53c6eb7293c0d863d/bunq/sdk/json/adapters.py#L260-L285
def deserialize(cls, target_class, obj): """ :type target_class: context.InstallationContext|type :type obj: dict :rtype: context.InstallationContext """ installation_context = target_class.__new__(target_class) private_key_client = security.rsa_key_from_string( obj[cls._FIELD_PRIVATE_KEY_CLIENT] ) public_key_client = security.rsa_key_from_string( obj[cls._FIELD_PUBLIC_KEY_CLIENT] ) public_key_server = security.rsa_key_from_string( obj[cls._FIELD_PUBLIC_KEY_SERVER] ) installation_context.__dict__ = { cls._ATTRIBUTE_TOKEN: obj[cls._FIELD_TOKEN], cls._ATTRIBUTE_PRIVATE_KEY_CLIENT: private_key_client, cls._ATTRIBUTE_PUBLIC_KEY_CLIENT: public_key_client, cls._ATTRIBUTE_PUBLIC_KEY_SERVER: public_key_server, } return installation_context
[ "def", "deserialize", "(", "cls", ",", "target_class", ",", "obj", ")", ":", "installation_context", "=", "target_class", ".", "__new__", "(", "target_class", ")", "private_key_client", "=", "security", ".", "rsa_key_from_string", "(", "obj", "[", "cls", ".", "_FIELD_PRIVATE_KEY_CLIENT", "]", ")", "public_key_client", "=", "security", ".", "rsa_key_from_string", "(", "obj", "[", "cls", ".", "_FIELD_PUBLIC_KEY_CLIENT", "]", ")", "public_key_server", "=", "security", ".", "rsa_key_from_string", "(", "obj", "[", "cls", ".", "_FIELD_PUBLIC_KEY_SERVER", "]", ")", "installation_context", ".", "__dict__", "=", "{", "cls", ".", "_ATTRIBUTE_TOKEN", ":", "obj", "[", "cls", ".", "_FIELD_TOKEN", "]", ",", "cls", ".", "_ATTRIBUTE_PRIVATE_KEY_CLIENT", ":", "private_key_client", ",", "cls", ".", "_ATTRIBUTE_PUBLIC_KEY_CLIENT", ":", "public_key_client", ",", "cls", ".", "_ATTRIBUTE_PUBLIC_KEY_SERVER", ":", "public_key_server", ",", "}", "return", "installation_context" ]
:type target_class: context.InstallationContext|type :type obj: dict :rtype: context.InstallationContext
[ ":", "type", "target_class", ":", "context", ".", "InstallationContext|type", ":", "type", "obj", ":", "dict" ]
python
train
krischer/mtspec
mtspec/multitaper.py
https://github.com/krischer/mtspec/blob/06561b6370f13fcb2e731470ba0f7314f4b2362d/mtspec/multitaper.py#L623-L749
def mt_deconvolve(data_a, data_b, delta, nfft=None, time_bandwidth=None, number_of_tapers=None, weights="adaptive", demean=True, fmax=0.0): """ Deconvolve two time series using multitapers. This uses the eigencoefficients and the weights from the multitaper spectral estimations and more or less follows this paper: .. |br| raw:: html <br /> **Receiver Functions from Multiple-Taper Spectral Correlation Estimates** *Jeffrey Park, Vadim Levin* |br| Bulletin of the Seismological Society of America Dec 2000, 90 (6) 1507-1520 http://dx.doi.org/10.1785/0119990122 :type data_a: :class:`numpy.ndarray` :param data_a: Data for first time series. :type data_b: :class:`numpy.ndarray` :param data_b: Data for second time series. :type delta: float :param delta: Sample spacing of the data. :type nfft: int :param nfft: Number of points for the FFT. If ``nfft == None``, no zero padding will be applied before the FFT. :type time_bandwidth: float :param time_bandwidth: Time-bandwidth product. Common values are 2, 3, 4, and numbers in between. :type number_of_tapers: int :param number_of_tapers: Number of tapers to use. Defaults to ``int(2*time_bandwidth) - 1``. This is maximum senseful amount. More tapers will have no great influence on the final spectrum but increase the calculation time. Use fewer tapers for a faster calculation. :type weights: str :param weights: ``"adaptive"`` or ``"constant"`` weights. :type deman: bool :param demean: Force the complex TF to be demeaned. :type fmax: float :param fmax: Maximum frequency for lowpass cosine filter. Set this to zero to not have a filter. :return: Returns a dictionary with 5 :class:`numpy.ndarray`'s. See the note below. .. note:: Returns a dictionary with five arrays: * ``"deconvolved"``: Deconvolved time series. * ``"spectrum_a"``: Spectrum of the first time series. * ``"spectrum_b"``: Spectrum of the second time series. * ``"spectral_ratio"``: The ratio of both spectra. * ``"frequencies"``: The used frequency bins for the spectra. """ npts = len(data_a) if len(data_b) != npts: raise ValueError("Input arrays must have the same length!") if nfft is None: nfft = npts elif nfft < npts: raise ValueError("nfft must be larger then the number of samples in " "the array.") # Deconvolution utilizes the 32bit version. mt = _MtspecType("float32") # Use the optimal number of tapers in case no number is specified. if number_of_tapers is None: number_of_tapers = int(2 * time_bandwidth) - 1 # Transform the data to work with the library. data_a = np.require(data_a, mt.float, requirements=[mt.order]) data_b = np.require(data_b, mt.float, requirements=[mt.order]) nf = nfft // 2 + 1 # Internally uses integers if demean: demean = 1 else: demean = 0 # iad = 0 are adaptive, iad = 1 are constant weight - this is # counter intuitive. if weights == "constant": adaptive = 1 elif weights == "adaptive": adaptive = 0 else: raise ValueError('Weights must be either "adaptive" or "constant".') tfun = mt.empty(nfft) freq = mt.empty(nf) spec_ratio = mt.empty(nf) speci = mt.empty(nf) specj = mt.empty(nf) mtspeclib.mt_deconv_( C.byref(C.c_int(int(npts))), C.byref(C.c_int(int(nfft))), C.byref(C.c_float(float(delta))), mt.p(data_a), mt.p(data_b), C.byref(C.c_float(float(time_bandwidth))), C.byref(C.c_int(int(number_of_tapers))), C.byref(C.c_int(int(nf))), C.byref(C.c_int(adaptive)), mt.p(freq), mt.p(tfun), mt.p(spec_ratio), mt.p(speci), mt.p(specj), C.byref(C.c_int(demean)), C.byref(C.c_float(fmax))) return { "frequencies": freq, "deconvolved": tfun, "spectral_ratio": spec_ratio, "spectrum_a": speci, "spectrum_b": specj }
[ "def", "mt_deconvolve", "(", "data_a", ",", "data_b", ",", "delta", ",", "nfft", "=", "None", ",", "time_bandwidth", "=", "None", ",", "number_of_tapers", "=", "None", ",", "weights", "=", "\"adaptive\"", ",", "demean", "=", "True", ",", "fmax", "=", "0.0", ")", ":", "npts", "=", "len", "(", "data_a", ")", "if", "len", "(", "data_b", ")", "!=", "npts", ":", "raise", "ValueError", "(", "\"Input arrays must have the same length!\"", ")", "if", "nfft", "is", "None", ":", "nfft", "=", "npts", "elif", "nfft", "<", "npts", ":", "raise", "ValueError", "(", "\"nfft must be larger then the number of samples in \"", "\"the array.\"", ")", "# Deconvolution utilizes the 32bit version.", "mt", "=", "_MtspecType", "(", "\"float32\"", ")", "# Use the optimal number of tapers in case no number is specified.", "if", "number_of_tapers", "is", "None", ":", "number_of_tapers", "=", "int", "(", "2", "*", "time_bandwidth", ")", "-", "1", "# Transform the data to work with the library.", "data_a", "=", "np", ".", "require", "(", "data_a", ",", "mt", ".", "float", ",", "requirements", "=", "[", "mt", ".", "order", "]", ")", "data_b", "=", "np", ".", "require", "(", "data_b", ",", "mt", ".", "float", ",", "requirements", "=", "[", "mt", ".", "order", "]", ")", "nf", "=", "nfft", "//", "2", "+", "1", "# Internally uses integers", "if", "demean", ":", "demean", "=", "1", "else", ":", "demean", "=", "0", "# iad = 0 are adaptive, iad = 1 are constant weight - this is", "# counter intuitive.", "if", "weights", "==", "\"constant\"", ":", "adaptive", "=", "1", "elif", "weights", "==", "\"adaptive\"", ":", "adaptive", "=", "0", "else", ":", "raise", "ValueError", "(", "'Weights must be either \"adaptive\" or \"constant\".'", ")", "tfun", "=", "mt", ".", "empty", "(", "nfft", ")", "freq", "=", "mt", ".", "empty", "(", "nf", ")", "spec_ratio", "=", "mt", ".", "empty", "(", "nf", ")", "speci", "=", "mt", ".", "empty", "(", "nf", ")", "specj", "=", "mt", ".", "empty", "(", "nf", ")", "mtspeclib", ".", "mt_deconv_", "(", "C", ".", "byref", "(", "C", ".", "c_int", "(", "int", "(", "npts", ")", ")", ")", ",", "C", ".", "byref", "(", "C", ".", "c_int", "(", "int", "(", "nfft", ")", ")", ")", ",", "C", ".", "byref", "(", "C", ".", "c_float", "(", "float", "(", "delta", ")", ")", ")", ",", "mt", ".", "p", "(", "data_a", ")", ",", "mt", ".", "p", "(", "data_b", ")", ",", "C", ".", "byref", "(", "C", ".", "c_float", "(", "float", "(", "time_bandwidth", ")", ")", ")", ",", "C", ".", "byref", "(", "C", ".", "c_int", "(", "int", "(", "number_of_tapers", ")", ")", ")", ",", "C", ".", "byref", "(", "C", ".", "c_int", "(", "int", "(", "nf", ")", ")", ")", ",", "C", ".", "byref", "(", "C", ".", "c_int", "(", "adaptive", ")", ")", ",", "mt", ".", "p", "(", "freq", ")", ",", "mt", ".", "p", "(", "tfun", ")", ",", "mt", ".", "p", "(", "spec_ratio", ")", ",", "mt", ".", "p", "(", "speci", ")", ",", "mt", ".", "p", "(", "specj", ")", ",", "C", ".", "byref", "(", "C", ".", "c_int", "(", "demean", ")", ")", ",", "C", ".", "byref", "(", "C", ".", "c_float", "(", "fmax", ")", ")", ")", "return", "{", "\"frequencies\"", ":", "freq", ",", "\"deconvolved\"", ":", "tfun", ",", "\"spectral_ratio\"", ":", "spec_ratio", ",", "\"spectrum_a\"", ":", "speci", ",", "\"spectrum_b\"", ":", "specj", "}" ]
Deconvolve two time series using multitapers. This uses the eigencoefficients and the weights from the multitaper spectral estimations and more or less follows this paper: .. |br| raw:: html <br /> **Receiver Functions from Multiple-Taper Spectral Correlation Estimates** *Jeffrey Park, Vadim Levin* |br| Bulletin of the Seismological Society of America Dec 2000, 90 (6) 1507-1520 http://dx.doi.org/10.1785/0119990122 :type data_a: :class:`numpy.ndarray` :param data_a: Data for first time series. :type data_b: :class:`numpy.ndarray` :param data_b: Data for second time series. :type delta: float :param delta: Sample spacing of the data. :type nfft: int :param nfft: Number of points for the FFT. If ``nfft == None``, no zero padding will be applied before the FFT. :type time_bandwidth: float :param time_bandwidth: Time-bandwidth product. Common values are 2, 3, 4, and numbers in between. :type number_of_tapers: int :param number_of_tapers: Number of tapers to use. Defaults to ``int(2*time_bandwidth) - 1``. This is maximum senseful amount. More tapers will have no great influence on the final spectrum but increase the calculation time. Use fewer tapers for a faster calculation. :type weights: str :param weights: ``"adaptive"`` or ``"constant"`` weights. :type deman: bool :param demean: Force the complex TF to be demeaned. :type fmax: float :param fmax: Maximum frequency for lowpass cosine filter. Set this to zero to not have a filter. :return: Returns a dictionary with 5 :class:`numpy.ndarray`'s. See the note below. .. note:: Returns a dictionary with five arrays: * ``"deconvolved"``: Deconvolved time series. * ``"spectrum_a"``: Spectrum of the first time series. * ``"spectrum_b"``: Spectrum of the second time series. * ``"spectral_ratio"``: The ratio of both spectra. * ``"frequencies"``: The used frequency bins for the spectra.
[ "Deconvolve", "two", "time", "series", "using", "multitapers", "." ]
python
train
dpkp/kafka-python
kafka/client_async.py
https://github.com/dpkp/kafka-python/blob/f6a8a38937688ea2cc5dc13d3d1039493be5c9b5/kafka/client_async.py#L440-L457
def is_disconnected(self, node_id): """Check whether the node connection has been disconnected or failed. A disconnected node has either been closed or has failed. Connection failures are usually transient and can be resumed in the next ready() call, but there are cases where transient failures need to be caught and re-acted upon. Arguments: node_id (int): the id of the node to check Returns: bool: True iff the node exists and is disconnected """ conn = self._conns.get(node_id) if conn is None: return False return conn.disconnected()
[ "def", "is_disconnected", "(", "self", ",", "node_id", ")", ":", "conn", "=", "self", ".", "_conns", ".", "get", "(", "node_id", ")", "if", "conn", "is", "None", ":", "return", "False", "return", "conn", ".", "disconnected", "(", ")" ]
Check whether the node connection has been disconnected or failed. A disconnected node has either been closed or has failed. Connection failures are usually transient and can be resumed in the next ready() call, but there are cases where transient failures need to be caught and re-acted upon. Arguments: node_id (int): the id of the node to check Returns: bool: True iff the node exists and is disconnected
[ "Check", "whether", "the", "node", "connection", "has", "been", "disconnected", "or", "failed", "." ]
python
train
hellock/icrawler
icrawler/utils/proxy_pool.py
https://github.com/hellock/icrawler/blob/38c925758fd3d3e568d3ecc993f77bc0acfa4788/icrawler/utils/proxy_pool.py#L177-L189
def add_proxy(self, proxy): """Add a valid proxy into pool You must call `add_proxy` method to add a proxy into pool instead of directly operate the `proxies` variable. """ protocol = proxy.protocol addr = proxy.addr if addr in self.proxies: self.proxies[protocol][addr].last_checked = proxy.last_checked else: self.proxies[protocol][addr] = proxy self.addr_list[protocol].append(addr)
[ "def", "add_proxy", "(", "self", ",", "proxy", ")", ":", "protocol", "=", "proxy", ".", "protocol", "addr", "=", "proxy", ".", "addr", "if", "addr", "in", "self", ".", "proxies", ":", "self", ".", "proxies", "[", "protocol", "]", "[", "addr", "]", ".", "last_checked", "=", "proxy", ".", "last_checked", "else", ":", "self", ".", "proxies", "[", "protocol", "]", "[", "addr", "]", "=", "proxy", "self", ".", "addr_list", "[", "protocol", "]", ".", "append", "(", "addr", ")" ]
Add a valid proxy into pool You must call `add_proxy` method to add a proxy into pool instead of directly operate the `proxies` variable.
[ "Add", "a", "valid", "proxy", "into", "pool" ]
python
train
vmlaker/mpipe
src/Stage.py
https://github.com/vmlaker/mpipe/blob/5a1804cf64271931f0cd3e4fff3e2b38291212dd/src/Stage.py#L42-L55
def get(self, timeout=None): """Retrieve results from all the output tubes.""" valid = False result = None for tube in self._output_tubes: if timeout: valid, result = tube.get(timeout) if valid: result = result[0] else: result = tube.get()[0] if timeout: return valid, result return result
[ "def", "get", "(", "self", ",", "timeout", "=", "None", ")", ":", "valid", "=", "False", "result", "=", "None", "for", "tube", "in", "self", ".", "_output_tubes", ":", "if", "timeout", ":", "valid", ",", "result", "=", "tube", ".", "get", "(", "timeout", ")", "if", "valid", ":", "result", "=", "result", "[", "0", "]", "else", ":", "result", "=", "tube", ".", "get", "(", ")", "[", "0", "]", "if", "timeout", ":", "return", "valid", ",", "result", "return", "result" ]
Retrieve results from all the output tubes.
[ "Retrieve", "results", "from", "all", "the", "output", "tubes", "." ]
python
train
mitsei/dlkit
dlkit/json_/grading/objects.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/grading/objects.py#L659-L677
def set_lowest_numeric_score(self, score): """Sets the lowest numeric score. arg: score (decimal): the lowest numeric score raise: InvalidArgument - ``score`` is invalid raise: NoAccess - ``score`` cannot be modified *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.grading.GradeSystemForm.set_lowest_numeric_score if self.get_lowest_numeric_score_metadata().is_read_only(): raise errors.NoAccess() try: score = float(score) except ValueError: raise errors.InvalidArgument() if not self._is_valid_decimal(score, self.get_lowest_numeric_score_metadata()): raise errors.InvalidArgument() self._my_map['lowestNumericScore'] = score
[ "def", "set_lowest_numeric_score", "(", "self", ",", "score", ")", ":", "# Implemented from template for osid.grading.GradeSystemForm.set_lowest_numeric_score", "if", "self", ".", "get_lowest_numeric_score_metadata", "(", ")", ".", "is_read_only", "(", ")", ":", "raise", "errors", ".", "NoAccess", "(", ")", "try", ":", "score", "=", "float", "(", "score", ")", "except", "ValueError", ":", "raise", "errors", ".", "InvalidArgument", "(", ")", "if", "not", "self", ".", "_is_valid_decimal", "(", "score", ",", "self", ".", "get_lowest_numeric_score_metadata", "(", ")", ")", ":", "raise", "errors", ".", "InvalidArgument", "(", ")", "self", ".", "_my_map", "[", "'lowestNumericScore'", "]", "=", "score" ]
Sets the lowest numeric score. arg: score (decimal): the lowest numeric score raise: InvalidArgument - ``score`` is invalid raise: NoAccess - ``score`` cannot be modified *compliance: mandatory -- This method must be implemented.*
[ "Sets", "the", "lowest", "numeric", "score", "." ]
python
train
ninuxorg/nodeshot
nodeshot/core/metrics/signals.py
https://github.com/ninuxorg/nodeshot/blob/2466f0a55f522b2696026f196436ce7ba3f1e5c6/nodeshot/core/metrics/signals.py#L17-L25
def user_loggedin(sender, **kwargs): """ collect metrics about user logins """ values = { 'value': 1, 'path': kwargs['request'].path, 'user_id': str(kwargs['user'].pk), 'username': kwargs['user'].username, } write('user_logins', values=values)
[ "def", "user_loggedin", "(", "sender", ",", "*", "*", "kwargs", ")", ":", "values", "=", "{", "'value'", ":", "1", ",", "'path'", ":", "kwargs", "[", "'request'", "]", ".", "path", ",", "'user_id'", ":", "str", "(", "kwargs", "[", "'user'", "]", ".", "pk", ")", ",", "'username'", ":", "kwargs", "[", "'user'", "]", ".", "username", ",", "}", "write", "(", "'user_logins'", ",", "values", "=", "values", ")" ]
collect metrics about user logins
[ "collect", "metrics", "about", "user", "logins" ]
python
train
pypa/pipenv
pipenv/vendor/requirementslib/models/requirements.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/requirementslib/models/requirements.py#L483-L493
def parse_hashes(self): # type: () -> None """ Parse hashes from *self.line* and set them on the current object. :returns: Nothing :rtype: None """ line, hashes = self.split_hashes(self.line) self.hashes = hashes self.line = line
[ "def", "parse_hashes", "(", "self", ")", ":", "# type: () -> None", "line", ",", "hashes", "=", "self", ".", "split_hashes", "(", "self", ".", "line", ")", "self", ".", "hashes", "=", "hashes", "self", ".", "line", "=", "line" ]
Parse hashes from *self.line* and set them on the current object. :returns: Nothing :rtype: None
[ "Parse", "hashes", "from", "*", "self", ".", "line", "*", "and", "set", "them", "on", "the", "current", "object", ".", ":", "returns", ":", "Nothing", ":", "rtype", ":", "None" ]
python
train
StagPython/StagPy
stagpy/time_series.py
https://github.com/StagPython/StagPy/blob/18c4416cc4a1011db2fd736ee8b0ec29aa6e4fd4/stagpy/time_series.py#L55-L93
def get_time_series(sdat, var, tstart, tend): """Extract or compute and rescale a time series. Args: sdat (:class:`~stagpy.stagyydata.StagyyData`): a StagyyData instance. var (str): time series name, a key of :data:`stagpy.phyvars.TIME` or :data:`stagpy.phyvars.TIME_EXTRA`. tstart (float): starting time of desired series. Set to None to start at the beginning of available data. tend (float): ending time of desired series. Set to None to stop at the end of available data. Returns: tuple of :class:`numpy.array` and :class:`stagpy.phyvars.Vart`: series, time, meta series is the requested time series, time the time at which it is evaluated (set to None if it is the one of time series output by StagYY), and meta is a :class:`stagpy.phyvars.Vart` instance holding metadata of the requested variable. """ tseries = sdat.tseries_between(tstart, tend) if var in tseries.columns: series = tseries[var] time = None if var in phyvars.TIME: meta = phyvars.TIME[var] else: meta = phyvars.Vart(var, None, '1') elif var in phyvars.TIME_EXTRA: meta = phyvars.TIME_EXTRA[var] series, time = meta.description(sdat, tstart, tend) meta = phyvars.Vart(misc.baredoc(meta.description), meta.kind, meta.dim) else: raise UnknownTimeVarError(var) series, _ = sdat.scale(series, meta.dim) if time is not None: time, _ = sdat.scale(time, 's') return series, time, meta
[ "def", "get_time_series", "(", "sdat", ",", "var", ",", "tstart", ",", "tend", ")", ":", "tseries", "=", "sdat", ".", "tseries_between", "(", "tstart", ",", "tend", ")", "if", "var", "in", "tseries", ".", "columns", ":", "series", "=", "tseries", "[", "var", "]", "time", "=", "None", "if", "var", "in", "phyvars", ".", "TIME", ":", "meta", "=", "phyvars", ".", "TIME", "[", "var", "]", "else", ":", "meta", "=", "phyvars", ".", "Vart", "(", "var", ",", "None", ",", "'1'", ")", "elif", "var", "in", "phyvars", ".", "TIME_EXTRA", ":", "meta", "=", "phyvars", ".", "TIME_EXTRA", "[", "var", "]", "series", ",", "time", "=", "meta", ".", "description", "(", "sdat", ",", "tstart", ",", "tend", ")", "meta", "=", "phyvars", ".", "Vart", "(", "misc", ".", "baredoc", "(", "meta", ".", "description", ")", ",", "meta", ".", "kind", ",", "meta", ".", "dim", ")", "else", ":", "raise", "UnknownTimeVarError", "(", "var", ")", "series", ",", "_", "=", "sdat", ".", "scale", "(", "series", ",", "meta", ".", "dim", ")", "if", "time", "is", "not", "None", ":", "time", ",", "_", "=", "sdat", ".", "scale", "(", "time", ",", "'s'", ")", "return", "series", ",", "time", ",", "meta" ]
Extract or compute and rescale a time series. Args: sdat (:class:`~stagpy.stagyydata.StagyyData`): a StagyyData instance. var (str): time series name, a key of :data:`stagpy.phyvars.TIME` or :data:`stagpy.phyvars.TIME_EXTRA`. tstart (float): starting time of desired series. Set to None to start at the beginning of available data. tend (float): ending time of desired series. Set to None to stop at the end of available data. Returns: tuple of :class:`numpy.array` and :class:`stagpy.phyvars.Vart`: series, time, meta series is the requested time series, time the time at which it is evaluated (set to None if it is the one of time series output by StagYY), and meta is a :class:`stagpy.phyvars.Vart` instance holding metadata of the requested variable.
[ "Extract", "or", "compute", "and", "rescale", "a", "time", "series", "." ]
python
train
CivicSpleen/ambry
ambry/identity.py
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/identity.py#L1597-L1603
def add_partition(self, p): """Add a partition identity as a child of a dataset identity.""" if not self.partitions: self.partitions = {} self.partitions[p.vid] = p
[ "def", "add_partition", "(", "self", ",", "p", ")", ":", "if", "not", "self", ".", "partitions", ":", "self", ".", "partitions", "=", "{", "}", "self", ".", "partitions", "[", "p", ".", "vid", "]", "=", "p" ]
Add a partition identity as a child of a dataset identity.
[ "Add", "a", "partition", "identity", "as", "a", "child", "of", "a", "dataset", "identity", "." ]
python
train
greenbender/pynntp
nntp/nntp.py
https://github.com/greenbender/pynntp/blob/991a76331cdf5d8f9dbf5b18f6e29adc80749a2f/nntp/nntp.py#L254-L301
def __info_gzip_gen(self): """Generator for the lines of a compressed info (textual) response. Compressed responses are an extension to the NNTP protocol supported by some usenet servers to reduce the bandwidth of heavily used range style commands that can return large amounts of textual data. This function handles gzip compressed responses that have the terminating line inside or outside the compressed data. From experience if the 'XFEATURE COMPRESS GZIP' command causes the terminating '.\\r\\n' to follow the compressed data and 'XFEATURE COMPRESS GZIP TERMINATOR' causes the terminator to be the last part of the compressed data (i.e the reply the gzipped version of the original reply - terminating line included) This function will produce that same output as the __info_plain_gen() function. In other words it takes care of decompression. Yields: A line of the info response. Raises: NNTPError: If data is required to be read from the socket and fails. NNTPDataError: If decompression fails. """ self.__generating = True inflate = zlib.decompressobj(15+32) done, buf = False, fifo.Fifo() while not done: try: data = inflate.decompress(next(self.__buf_gen())) except zlib.error: raise NNTPDataError("Decompression failed") if data: buf.write(data) if inflate.unused_data: buf.write(inflate.unused_data) for line in buf: if line == ".\r\n": done = True break if line.startswith("."): yield line[1:] yield line self.__generating = False
[ "def", "__info_gzip_gen", "(", "self", ")", ":", "self", ".", "__generating", "=", "True", "inflate", "=", "zlib", ".", "decompressobj", "(", "15", "+", "32", ")", "done", ",", "buf", "=", "False", ",", "fifo", ".", "Fifo", "(", ")", "while", "not", "done", ":", "try", ":", "data", "=", "inflate", ".", "decompress", "(", "next", "(", "self", ".", "__buf_gen", "(", ")", ")", ")", "except", "zlib", ".", "error", ":", "raise", "NNTPDataError", "(", "\"Decompression failed\"", ")", "if", "data", ":", "buf", ".", "write", "(", "data", ")", "if", "inflate", ".", "unused_data", ":", "buf", ".", "write", "(", "inflate", ".", "unused_data", ")", "for", "line", "in", "buf", ":", "if", "line", "==", "\".\\r\\n\"", ":", "done", "=", "True", "break", "if", "line", ".", "startswith", "(", "\".\"", ")", ":", "yield", "line", "[", "1", ":", "]", "yield", "line", "self", ".", "__generating", "=", "False" ]
Generator for the lines of a compressed info (textual) response. Compressed responses are an extension to the NNTP protocol supported by some usenet servers to reduce the bandwidth of heavily used range style commands that can return large amounts of textual data. This function handles gzip compressed responses that have the terminating line inside or outside the compressed data. From experience if the 'XFEATURE COMPRESS GZIP' command causes the terminating '.\\r\\n' to follow the compressed data and 'XFEATURE COMPRESS GZIP TERMINATOR' causes the terminator to be the last part of the compressed data (i.e the reply the gzipped version of the original reply - terminating line included) This function will produce that same output as the __info_plain_gen() function. In other words it takes care of decompression. Yields: A line of the info response. Raises: NNTPError: If data is required to be read from the socket and fails. NNTPDataError: If decompression fails.
[ "Generator", "for", "the", "lines", "of", "a", "compressed", "info", "(", "textual", ")", "response", "." ]
python
test
chaoss/grimoirelab-elk
grimoire_elk/enriched/jira.py
https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/grimoire_elk/enriched/jira.py#L159-L178
def get_identities(self, item): """Return the identities from an item""" item = item['data'] for field in ["assignee", "reporter", "creator"]: if field not in item["fields"]: continue if item["fields"][field]: user = self.get_sh_identity(item["fields"][field]) yield user comments = item.get('comments_data', []) for comment in comments: if 'author' in comment and comment['author']: user = self.get_sh_identity(comment['author']) yield user if 'updateAuthor' in comment and comment['updateAuthor']: user = self.get_sh_identity(comment['updateAuthor']) yield user
[ "def", "get_identities", "(", "self", ",", "item", ")", ":", "item", "=", "item", "[", "'data'", "]", "for", "field", "in", "[", "\"assignee\"", ",", "\"reporter\"", ",", "\"creator\"", "]", ":", "if", "field", "not", "in", "item", "[", "\"fields\"", "]", ":", "continue", "if", "item", "[", "\"fields\"", "]", "[", "field", "]", ":", "user", "=", "self", ".", "get_sh_identity", "(", "item", "[", "\"fields\"", "]", "[", "field", "]", ")", "yield", "user", "comments", "=", "item", ".", "get", "(", "'comments_data'", ",", "[", "]", ")", "for", "comment", "in", "comments", ":", "if", "'author'", "in", "comment", "and", "comment", "[", "'author'", "]", ":", "user", "=", "self", ".", "get_sh_identity", "(", "comment", "[", "'author'", "]", ")", "yield", "user", "if", "'updateAuthor'", "in", "comment", "and", "comment", "[", "'updateAuthor'", "]", ":", "user", "=", "self", ".", "get_sh_identity", "(", "comment", "[", "'updateAuthor'", "]", ")", "yield", "user" ]
Return the identities from an item
[ "Return", "the", "identities", "from", "an", "item" ]
python
train
Robpol86/libnl
libnl/genl/genl.py
https://github.com/Robpol86/libnl/blob/274e9fdaa39822d06ef70b799ed4a95937a4d923/libnl/genl/genl.py#L44-L64
def genl_send_simple(sk, family, cmd, version, flags): """Send a Generic Netlink message consisting only of a header. https://github.com/thom311/libnl/blob/libnl3_2_25/lib/genl/genl.c#L84 This function is a shortcut for sending a Generic Netlink message without any message payload. The message will only consist of the Netlink and Generic Netlink headers. The header is constructed based on the specified parameters and passed on to nl_send_simple() to send it on the specified socket. Positional arguments: sk -- Generic Netlink socket (nl_sock class instance). family -- numeric family identifier (integer). cmd -- numeric command identifier (integer). version -- interface version (integer). flags -- additional Netlink message flags (integer). Returns: 0 on success or a negative error code. """ hdr = genlmsghdr(cmd=cmd, version=version) return int(nl_send_simple(sk, family, flags, hdr, hdr.SIZEOF))
[ "def", "genl_send_simple", "(", "sk", ",", "family", ",", "cmd", ",", "version", ",", "flags", ")", ":", "hdr", "=", "genlmsghdr", "(", "cmd", "=", "cmd", ",", "version", "=", "version", ")", "return", "int", "(", "nl_send_simple", "(", "sk", ",", "family", ",", "flags", ",", "hdr", ",", "hdr", ".", "SIZEOF", ")", ")" ]
Send a Generic Netlink message consisting only of a header. https://github.com/thom311/libnl/blob/libnl3_2_25/lib/genl/genl.c#L84 This function is a shortcut for sending a Generic Netlink message without any message payload. The message will only consist of the Netlink and Generic Netlink headers. The header is constructed based on the specified parameters and passed on to nl_send_simple() to send it on the specified socket. Positional arguments: sk -- Generic Netlink socket (nl_sock class instance). family -- numeric family identifier (integer). cmd -- numeric command identifier (integer). version -- interface version (integer). flags -- additional Netlink message flags (integer). Returns: 0 on success or a negative error code.
[ "Send", "a", "Generic", "Netlink", "message", "consisting", "only", "of", "a", "header", "." ]
python
train
quasipedia/simpleactors
simpleactors.py
https://github.com/quasipedia/simpleactors/blob/4253da2d10b3df080b5e7b3fbee03aa6dd10db07/simpleactors.py#L115-L121
def run(self): '''Run until there are no events to be processed.''' # We left-append rather than emit (right-append) because some message # may have been already queued for execution before the director runs. global_event_queue.appendleft((INITIATE, self, (), {})) while global_event_queue: self.process_event(global_event_queue.popleft())
[ "def", "run", "(", "self", ")", ":", "# We left-append rather than emit (right-append) because some message", "# may have been already queued for execution before the director runs.", "global_event_queue", ".", "appendleft", "(", "(", "INITIATE", ",", "self", ",", "(", ")", ",", "{", "}", ")", ")", "while", "global_event_queue", ":", "self", ".", "process_event", "(", "global_event_queue", ".", "popleft", "(", ")", ")" ]
Run until there are no events to be processed.
[ "Run", "until", "there", "are", "no", "events", "to", "be", "processed", "." ]
python
train
project-rig/rig
rig/machine_control/machine_controller.py
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/machine_control/machine_controller.py#L1373-L1491
def load_application(self, *args, **kwargs): """Load an application to a set of application cores. This method guarantees that once it returns, all required cores will have been loaded. If this is not possible after a small number of attempts, a :py:exc:`.SpiNNakerLoadingError` will be raised. This method can be called in either of the following ways:: load_application("/path/to/app.aplx", {(x, y): {core, ...}, ...}) load_application({"/path/to/app.aplx": {(x, y): {core, ...}, ...}, ...}) Note that the latter format is the same format produced by :py:func:`~rig.place_and_route.util.build_application_map`. Parameters ---------- app_id : int wait : bool Leave the application in a wait state after successfully loading it. n_tries : int Number attempts to make to load the application. app_start_delay : float Time to pause (in seconds) after loading to ensure that the application successfully reaches the wait state before checking for success. use_count : bool If True (the default) then the targets dictionary will be assumed to represent _all_ the cores that will be loaded and a faster method to determine whether all applications have been loaded correctly will be used. If False a fallback method will be used. Raises ------ rig.machine_control.machine_controller.SpiNNakerLoadingError This exception is raised after some cores failed to load after ``n_tries`` attempts. """ # Get keyword arguments app_id = kwargs.pop("app_id") wait = kwargs.pop("wait") n_tries = kwargs.pop("n_tries") app_start_delay = kwargs.pop("app_start_delay") use_count = kwargs.pop("use_count", True) # Coerce the arguments into a single form. If there are two arguments # then assume that we have filename and a map of chips and cores; # otherwise there should be ONE argument which is of the form of the # return value of `build_application_map`. application_map = {} if len(args) == 1: application_map = args[0] elif len(args) == 2: application_map = {args[0]: args[1]} else: raise TypeError( "load_application: accepts either 1 or 2 positional arguments:" "a map of filenames to targets OR a single filename and its" "targets" ) # Count the number of cores being loaded core_count = sum( len(cores) for ts in six.itervalues(application_map) for cores in six.itervalues(ts) ) # Mark all targets as unloaded unloaded = application_map # Try to load the applications, then determine which are unloaded tries = 0 while unloaded != {} and tries <= n_tries: tries += 1 # Load all unloaded applications, then pause to ensure they reach # the wait state self.flood_fill_aplx(unloaded, app_id=app_id, wait=True) time.sleep(app_start_delay) # If running in "fast" mode then check that the correct number of # cores are in the "wait" state, if so then break out of this loop. if (use_count and core_count == self.count_cores_in_state("wait", app_id)): unloaded = {} continue # Query each target in turn to determine if it is loaded or # otherwise. If it is loaded (in the wait state) then remove it # from the unloaded list. new_unloadeds = dict() for app_name, targets in iteritems(unloaded): unloaded_targets = {} for (x, y), cores in iteritems(targets): unloaded_cores = set() for p in cores: # Read the struct value vcpu->cpu_state, if it is # anything BUT wait then we mark this core as unloaded. state = consts.AppState( self.read_vcpu_struct_field("cpu_state", x, y, p) ) if state is not consts.AppState.wait: unloaded_cores.add(p) if len(unloaded_cores) > 0: unloaded_targets[(x, y)] = unloaded_cores if len(unloaded_targets) > 0: new_unloadeds[app_name] = unloaded_targets unloaded = new_unloadeds # If there are still unloaded cores then we bail if unloaded != {}: raise SpiNNakerLoadingError(unloaded) # If not waiting then send the start signal if not wait: self.send_signal("start", app_id)
[ "def", "load_application", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# Get keyword arguments", "app_id", "=", "kwargs", ".", "pop", "(", "\"app_id\"", ")", "wait", "=", "kwargs", ".", "pop", "(", "\"wait\"", ")", "n_tries", "=", "kwargs", ".", "pop", "(", "\"n_tries\"", ")", "app_start_delay", "=", "kwargs", ".", "pop", "(", "\"app_start_delay\"", ")", "use_count", "=", "kwargs", ".", "pop", "(", "\"use_count\"", ",", "True", ")", "# Coerce the arguments into a single form. If there are two arguments", "# then assume that we have filename and a map of chips and cores;", "# otherwise there should be ONE argument which is of the form of the", "# return value of `build_application_map`.", "application_map", "=", "{", "}", "if", "len", "(", "args", ")", "==", "1", ":", "application_map", "=", "args", "[", "0", "]", "elif", "len", "(", "args", ")", "==", "2", ":", "application_map", "=", "{", "args", "[", "0", "]", ":", "args", "[", "1", "]", "}", "else", ":", "raise", "TypeError", "(", "\"load_application: accepts either 1 or 2 positional arguments:\"", "\"a map of filenames to targets OR a single filename and its\"", "\"targets\"", ")", "# Count the number of cores being loaded", "core_count", "=", "sum", "(", "len", "(", "cores", ")", "for", "ts", "in", "six", ".", "itervalues", "(", "application_map", ")", "for", "cores", "in", "six", ".", "itervalues", "(", "ts", ")", ")", "# Mark all targets as unloaded", "unloaded", "=", "application_map", "# Try to load the applications, then determine which are unloaded", "tries", "=", "0", "while", "unloaded", "!=", "{", "}", "and", "tries", "<=", "n_tries", ":", "tries", "+=", "1", "# Load all unloaded applications, then pause to ensure they reach", "# the wait state", "self", ".", "flood_fill_aplx", "(", "unloaded", ",", "app_id", "=", "app_id", ",", "wait", "=", "True", ")", "time", ".", "sleep", "(", "app_start_delay", ")", "# If running in \"fast\" mode then check that the correct number of", "# cores are in the \"wait\" state, if so then break out of this loop.", "if", "(", "use_count", "and", "core_count", "==", "self", ".", "count_cores_in_state", "(", "\"wait\"", ",", "app_id", ")", ")", ":", "unloaded", "=", "{", "}", "continue", "# Query each target in turn to determine if it is loaded or", "# otherwise. If it is loaded (in the wait state) then remove it", "# from the unloaded list.", "new_unloadeds", "=", "dict", "(", ")", "for", "app_name", ",", "targets", "in", "iteritems", "(", "unloaded", ")", ":", "unloaded_targets", "=", "{", "}", "for", "(", "x", ",", "y", ")", ",", "cores", "in", "iteritems", "(", "targets", ")", ":", "unloaded_cores", "=", "set", "(", ")", "for", "p", "in", "cores", ":", "# Read the struct value vcpu->cpu_state, if it is", "# anything BUT wait then we mark this core as unloaded.", "state", "=", "consts", ".", "AppState", "(", "self", ".", "read_vcpu_struct_field", "(", "\"cpu_state\"", ",", "x", ",", "y", ",", "p", ")", ")", "if", "state", "is", "not", "consts", ".", "AppState", ".", "wait", ":", "unloaded_cores", ".", "add", "(", "p", ")", "if", "len", "(", "unloaded_cores", ")", ">", "0", ":", "unloaded_targets", "[", "(", "x", ",", "y", ")", "]", "=", "unloaded_cores", "if", "len", "(", "unloaded_targets", ")", ">", "0", ":", "new_unloadeds", "[", "app_name", "]", "=", "unloaded_targets", "unloaded", "=", "new_unloadeds", "# If there are still unloaded cores then we bail", "if", "unloaded", "!=", "{", "}", ":", "raise", "SpiNNakerLoadingError", "(", "unloaded", ")", "# If not waiting then send the start signal", "if", "not", "wait", ":", "self", ".", "send_signal", "(", "\"start\"", ",", "app_id", ")" ]
Load an application to a set of application cores. This method guarantees that once it returns, all required cores will have been loaded. If this is not possible after a small number of attempts, a :py:exc:`.SpiNNakerLoadingError` will be raised. This method can be called in either of the following ways:: load_application("/path/to/app.aplx", {(x, y): {core, ...}, ...}) load_application({"/path/to/app.aplx": {(x, y): {core, ...}, ...}, ...}) Note that the latter format is the same format produced by :py:func:`~rig.place_and_route.util.build_application_map`. Parameters ---------- app_id : int wait : bool Leave the application in a wait state after successfully loading it. n_tries : int Number attempts to make to load the application. app_start_delay : float Time to pause (in seconds) after loading to ensure that the application successfully reaches the wait state before checking for success. use_count : bool If True (the default) then the targets dictionary will be assumed to represent _all_ the cores that will be loaded and a faster method to determine whether all applications have been loaded correctly will be used. If False a fallback method will be used. Raises ------ rig.machine_control.machine_controller.SpiNNakerLoadingError This exception is raised after some cores failed to load after ``n_tries`` attempts.
[ "Load", "an", "application", "to", "a", "set", "of", "application", "cores", "." ]
python
train
dmlc/gluon-nlp
scripts/parsing/common/data.py
https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/scripts/parsing/common/data.py#L288-L303
def rel2id(self, xs): """Map relation(s) to id(s) Parameters ---------- xs : str or list relation Returns ------- int or list id(s) of relation """ if isinstance(xs, list): return [self._rel2id[x] for x in xs] return self._rel2id[xs]
[ "def", "rel2id", "(", "self", ",", "xs", ")", ":", "if", "isinstance", "(", "xs", ",", "list", ")", ":", "return", "[", "self", ".", "_rel2id", "[", "x", "]", "for", "x", "in", "xs", "]", "return", "self", ".", "_rel2id", "[", "xs", "]" ]
Map relation(s) to id(s) Parameters ---------- xs : str or list relation Returns ------- int or list id(s) of relation
[ "Map", "relation", "(", "s", ")", "to", "id", "(", "s", ")" ]
python
train
saltstack/salt
salt/modules/btrfs.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/btrfs.py#L894-L1031
def subvolume_list(path, parent_id=False, absolute=False, ogeneration=False, generation=False, subvolumes=False, uuid=False, parent_uuid=False, sent_subvolume_uuid=False, snapshots=False, readonly=False, deleted=False, generation_cmp=None, ogeneration_cmp=None, sort=None): ''' List the subvolumes present in the filesystem. path Mount point for the subvolume parent_id Print parent ID absolute Print all the subvolumes in the filesystem and distinguish between absolute and relative path with respect to the given <path> ogeneration Print the ogeneration of the subvolume generation Print the generation of the subvolume subvolumes Print only subvolumes below specified <path> uuid Print the UUID of the subvolume parent_uuid Print the parent uuid of subvolumes (and snapshots) sent_subvolume_uuid Print the UUID of the sent subvolume, where the subvolume is the result of a receive operation snapshots Only snapshot subvolumes in the filesystem will be listed readonly Only readonly subvolumes in the filesystem will be listed deleted Only deleted subvolumens that are ye not cleaned generation_cmp List subvolumes in the filesystem that its generation is >=, <= or = value. '+' means >= value, '-' means <= value, If there is neither '+' nor '-', it means = value ogeneration_cmp List subvolumes in the filesystem that its ogeneration is >=, <= or = value sort List subvolumes in order by specified items. Possible values: * rootid * gen * ogen * path You can add '+' or '-' in front of each items, '+' means ascending, '-' means descending. The default is ascending. You can combite it in a list. CLI Example: .. code-block:: bash salt '*' btrfs.subvolume_list /var/volumes/tmp salt '*' btrfs.subvolume_list /var/volumes/tmp path=True salt '*' btrfs.subvolume_list /var/volumes/tmp sort='[-rootid]' ''' if sort and type(sort) is not list: raise CommandExecutionError('Sort parameter must be a list') valid_sorts = [ ''.join((order, attrib)) for order, attrib in itertools.product( ('-', '', '+'), ('rootid', 'gen', 'ogen', 'path')) ] if sort and not all(s in valid_sorts for s in sort): raise CommandExecutionError('Value for sort not recognized') cmd = ['btrfs', 'subvolume', 'list'] params = ((parent_id, '-p'), (absolute, '-a'), (ogeneration, '-c'), (generation, '-g'), (subvolumes, '-o'), (uuid, '-u'), (parent_uuid, '-q'), (sent_subvolume_uuid, '-R'), (snapshots, '-s'), (readonly, '-r'), (deleted, '-d')) cmd.extend(p[1] for p in params if p[0]) if generation_cmp: cmd.extend(['-G', generation_cmp]) if ogeneration_cmp: cmd.extend(['-C', ogeneration_cmp]) # We already validated the content of the list if sort: cmd.append('--sort={}'.format(','.join(sort))) cmd.append(path) res = __salt__['cmd.run_all'](cmd) salt.utils.fsutils._verify_run(res) # Parse the output. ID and gen are always at the begining, and # path is always at the end. There is only one column that # contains space (top level), and the path value can also have # spaces. The issue is that we do not know how many spaces do we # have in the path name, so any classic solution based on split # will fail. # # This list is in order. columns = ('ID', 'gen', 'cgen', 'parent', 'top level', 'otime', 'parent_uuid', 'received_uuid', 'uuid', 'path') result = [] for line in res['stdout'].splitlines(): table = {} for key in columns: value, line = _pop(line, key, key == 'path') if value: table[key.lower()] = value # If line is not empty here, we are not able to parse it if not line: result.append(table) return result
[ "def", "subvolume_list", "(", "path", ",", "parent_id", "=", "False", ",", "absolute", "=", "False", ",", "ogeneration", "=", "False", ",", "generation", "=", "False", ",", "subvolumes", "=", "False", ",", "uuid", "=", "False", ",", "parent_uuid", "=", "False", ",", "sent_subvolume_uuid", "=", "False", ",", "snapshots", "=", "False", ",", "readonly", "=", "False", ",", "deleted", "=", "False", ",", "generation_cmp", "=", "None", ",", "ogeneration_cmp", "=", "None", ",", "sort", "=", "None", ")", ":", "if", "sort", "and", "type", "(", "sort", ")", "is", "not", "list", ":", "raise", "CommandExecutionError", "(", "'Sort parameter must be a list'", ")", "valid_sorts", "=", "[", "''", ".", "join", "(", "(", "order", ",", "attrib", ")", ")", "for", "order", ",", "attrib", "in", "itertools", ".", "product", "(", "(", "'-'", ",", "''", ",", "'+'", ")", ",", "(", "'rootid'", ",", "'gen'", ",", "'ogen'", ",", "'path'", ")", ")", "]", "if", "sort", "and", "not", "all", "(", "s", "in", "valid_sorts", "for", "s", "in", "sort", ")", ":", "raise", "CommandExecutionError", "(", "'Value for sort not recognized'", ")", "cmd", "=", "[", "'btrfs'", ",", "'subvolume'", ",", "'list'", "]", "params", "=", "(", "(", "parent_id", ",", "'-p'", ")", ",", "(", "absolute", ",", "'-a'", ")", ",", "(", "ogeneration", ",", "'-c'", ")", ",", "(", "generation", ",", "'-g'", ")", ",", "(", "subvolumes", ",", "'-o'", ")", ",", "(", "uuid", ",", "'-u'", ")", ",", "(", "parent_uuid", ",", "'-q'", ")", ",", "(", "sent_subvolume_uuid", ",", "'-R'", ")", ",", "(", "snapshots", ",", "'-s'", ")", ",", "(", "readonly", ",", "'-r'", ")", ",", "(", "deleted", ",", "'-d'", ")", ")", "cmd", ".", "extend", "(", "p", "[", "1", "]", "for", "p", "in", "params", "if", "p", "[", "0", "]", ")", "if", "generation_cmp", ":", "cmd", ".", "extend", "(", "[", "'-G'", ",", "generation_cmp", "]", ")", "if", "ogeneration_cmp", ":", "cmd", ".", "extend", "(", "[", "'-C'", ",", "ogeneration_cmp", "]", ")", "# We already validated the content of the list", "if", "sort", ":", "cmd", ".", "append", "(", "'--sort={}'", ".", "format", "(", "','", ".", "join", "(", "sort", ")", ")", ")", "cmd", ".", "append", "(", "path", ")", "res", "=", "__salt__", "[", "'cmd.run_all'", "]", "(", "cmd", ")", "salt", ".", "utils", ".", "fsutils", ".", "_verify_run", "(", "res", ")", "# Parse the output. ID and gen are always at the begining, and", "# path is always at the end. There is only one column that", "# contains space (top level), and the path value can also have", "# spaces. The issue is that we do not know how many spaces do we", "# have in the path name, so any classic solution based on split", "# will fail.", "#", "# This list is in order.", "columns", "=", "(", "'ID'", ",", "'gen'", ",", "'cgen'", ",", "'parent'", ",", "'top level'", ",", "'otime'", ",", "'parent_uuid'", ",", "'received_uuid'", ",", "'uuid'", ",", "'path'", ")", "result", "=", "[", "]", "for", "line", "in", "res", "[", "'stdout'", "]", ".", "splitlines", "(", ")", ":", "table", "=", "{", "}", "for", "key", "in", "columns", ":", "value", ",", "line", "=", "_pop", "(", "line", ",", "key", ",", "key", "==", "'path'", ")", "if", "value", ":", "table", "[", "key", ".", "lower", "(", ")", "]", "=", "value", "# If line is not empty here, we are not able to parse it", "if", "not", "line", ":", "result", ".", "append", "(", "table", ")", "return", "result" ]
List the subvolumes present in the filesystem. path Mount point for the subvolume parent_id Print parent ID absolute Print all the subvolumes in the filesystem and distinguish between absolute and relative path with respect to the given <path> ogeneration Print the ogeneration of the subvolume generation Print the generation of the subvolume subvolumes Print only subvolumes below specified <path> uuid Print the UUID of the subvolume parent_uuid Print the parent uuid of subvolumes (and snapshots) sent_subvolume_uuid Print the UUID of the sent subvolume, where the subvolume is the result of a receive operation snapshots Only snapshot subvolumes in the filesystem will be listed readonly Only readonly subvolumes in the filesystem will be listed deleted Only deleted subvolumens that are ye not cleaned generation_cmp List subvolumes in the filesystem that its generation is >=, <= or = value. '+' means >= value, '-' means <= value, If there is neither '+' nor '-', it means = value ogeneration_cmp List subvolumes in the filesystem that its ogeneration is >=, <= or = value sort List subvolumes in order by specified items. Possible values: * rootid * gen * ogen * path You can add '+' or '-' in front of each items, '+' means ascending, '-' means descending. The default is ascending. You can combite it in a list. CLI Example: .. code-block:: bash salt '*' btrfs.subvolume_list /var/volumes/tmp salt '*' btrfs.subvolume_list /var/volumes/tmp path=True salt '*' btrfs.subvolume_list /var/volumes/tmp sort='[-rootid]'
[ "List", "the", "subvolumes", "present", "in", "the", "filesystem", "." ]
python
train
thomasdelaet/python-velbus
velbus/messages/temp_sensor_status.py
https://github.com/thomasdelaet/python-velbus/blob/af2f8af43f1a24bf854eff9f3126fd7b5c41b3dd/velbus/messages/temp_sensor_status.py#L42-L88
def populate(self, priority, address, rtr, data): """ -DB1 last bit = local_control -DB1 bit 2+3 = status_mode -DB1 bit 4 = auto send -DB1 bit 5+6+7 = mode -DB1 bit 8 = cool -DB2 = program (not used) -DB3 last bit = heater -DB3 bit 2 = boost -DB3 bit 3 = pump -DB3 bit 4 = pump -DB4 bit 5 = alarm 1 -DB4 bit 6 = alarm 2 -DB4 bit 7 = alarm 3 -DB4 bit 8 = alarm 4 -DB5 current temp = current temp -DB6 target temp = target temp -DB7-8 sleep timer = 0=off >0=x min :return: None """ assert isinstance(data, bytes) self.needs_no_rtr(rtr) self.needs_data(data, 7) self.set_attributes(priority, address, rtr) self.local_control = (data[0] & 0x01) self.status_mode = (data[0] & 0x206) self._status_str = DSTATUS[self.status_mode] self.auto_send = (data[0] & 0x08) self.mode = (data[0] & 0x70) self.mode_str = DMODE[self.mode] self.cool = (data[0] & 0x80) self.heater = (data[2] & 0x01) self.boost = (data[2] & 0x02) self.pump = (data[2] & 0x04) self.cool = (data[2] & 0x08) self.alarm1 = (data[2] & 0x10) self.alarm2 = (data[2] & 0x20) self.alarm3 = (data[2] & 0x40) self.alarm4 = (data[2] & 0x80) self.current_temp = data[3] / 2 self.target_temp = data[4] / 2 self.sleep_timer = (data[5] << 8) + data[6]
[ "def", "populate", "(", "self", ",", "priority", ",", "address", ",", "rtr", ",", "data", ")", ":", "assert", "isinstance", "(", "data", ",", "bytes", ")", "self", ".", "needs_no_rtr", "(", "rtr", ")", "self", ".", "needs_data", "(", "data", ",", "7", ")", "self", ".", "set_attributes", "(", "priority", ",", "address", ",", "rtr", ")", "self", ".", "local_control", "=", "(", "data", "[", "0", "]", "&", "0x01", ")", "self", ".", "status_mode", "=", "(", "data", "[", "0", "]", "&", "0x206", ")", "self", ".", "_status_str", "=", "DSTATUS", "[", "self", ".", "status_mode", "]", "self", ".", "auto_send", "=", "(", "data", "[", "0", "]", "&", "0x08", ")", "self", ".", "mode", "=", "(", "data", "[", "0", "]", "&", "0x70", ")", "self", ".", "mode_str", "=", "DMODE", "[", "self", ".", "mode", "]", "self", ".", "cool", "=", "(", "data", "[", "0", "]", "&", "0x80", ")", "self", ".", "heater", "=", "(", "data", "[", "2", "]", "&", "0x01", ")", "self", ".", "boost", "=", "(", "data", "[", "2", "]", "&", "0x02", ")", "self", ".", "pump", "=", "(", "data", "[", "2", "]", "&", "0x04", ")", "self", ".", "cool", "=", "(", "data", "[", "2", "]", "&", "0x08", ")", "self", ".", "alarm1", "=", "(", "data", "[", "2", "]", "&", "0x10", ")", "self", ".", "alarm2", "=", "(", "data", "[", "2", "]", "&", "0x20", ")", "self", ".", "alarm3", "=", "(", "data", "[", "2", "]", "&", "0x40", ")", "self", ".", "alarm4", "=", "(", "data", "[", "2", "]", "&", "0x80", ")", "self", ".", "current_temp", "=", "data", "[", "3", "]", "/", "2", "self", ".", "target_temp", "=", "data", "[", "4", "]", "/", "2", "self", ".", "sleep_timer", "=", "(", "data", "[", "5", "]", "<<", "8", ")", "+", "data", "[", "6", "]" ]
-DB1 last bit = local_control -DB1 bit 2+3 = status_mode -DB1 bit 4 = auto send -DB1 bit 5+6+7 = mode -DB1 bit 8 = cool -DB2 = program (not used) -DB3 last bit = heater -DB3 bit 2 = boost -DB3 bit 3 = pump -DB3 bit 4 = pump -DB4 bit 5 = alarm 1 -DB4 bit 6 = alarm 2 -DB4 bit 7 = alarm 3 -DB4 bit 8 = alarm 4 -DB5 current temp = current temp -DB6 target temp = target temp -DB7-8 sleep timer = 0=off >0=x min :return: None
[ "-", "DB1", "last", "bit", "=", "local_control", "-", "DB1", "bit", "2", "+", "3", "=", "status_mode", "-", "DB1", "bit", "4", "=", "auto", "send", "-", "DB1", "bit", "5", "+", "6", "+", "7", "=", "mode", "-", "DB1", "bit", "8", "=", "cool", "-", "DB2", "=", "program", "(", "not", "used", ")", "-", "DB3", "last", "bit", "=", "heater", "-", "DB3", "bit", "2", "=", "boost", "-", "DB3", "bit", "3", "=", "pump", "-", "DB3", "bit", "4", "=", "pump", "-", "DB4", "bit", "5", "=", "alarm", "1", "-", "DB4", "bit", "6", "=", "alarm", "2", "-", "DB4", "bit", "7", "=", "alarm", "3", "-", "DB4", "bit", "8", "=", "alarm", "4", "-", "DB5", "current", "temp", "=", "current", "temp", "-", "DB6", "target", "temp", "=", "target", "temp", "-", "DB7", "-", "8", "sleep", "timer", "=", "0", "=", "off", ">", "0", "=", "x", "min", ":", "return", ":", "None" ]
python
train
zhanglab/psamm
psamm/fluxanalysis.py
https://github.com/zhanglab/psamm/blob/dc427848c4f9d109ca590f0afa024c63b685b3f4/psamm/fluxanalysis.py#L226-L251
def max_min_l1(self, reaction, weights={}): """Maximize flux of reaction then minimize the L1 norm. During minimization the given reaction will be fixed at the maximum obtained from the first solution. If reaction is a dictionary object, each entry is interpreted as a weight on the objective for that reaction (non-existent reaction will have zero weight). """ self.maximize(reaction) if isinstance(reaction, dict): reactions = list(reaction) else: reactions = [reaction] # Save flux values before modifying the LP problem fluxes = {r: self.get_flux(r) for r in reactions} # Add constraints on the maximized reactions for r in reactions: flux_var = self.get_flux_var(r) c, = self._prob.add_linear_constraints(flux_var == fluxes[r]) self._temp_constr.append(c) self.minimize_l1(weights)
[ "def", "max_min_l1", "(", "self", ",", "reaction", ",", "weights", "=", "{", "}", ")", ":", "self", ".", "maximize", "(", "reaction", ")", "if", "isinstance", "(", "reaction", ",", "dict", ")", ":", "reactions", "=", "list", "(", "reaction", ")", "else", ":", "reactions", "=", "[", "reaction", "]", "# Save flux values before modifying the LP problem", "fluxes", "=", "{", "r", ":", "self", ".", "get_flux", "(", "r", ")", "for", "r", "in", "reactions", "}", "# Add constraints on the maximized reactions", "for", "r", "in", "reactions", ":", "flux_var", "=", "self", ".", "get_flux_var", "(", "r", ")", "c", ",", "=", "self", ".", "_prob", ".", "add_linear_constraints", "(", "flux_var", "==", "fluxes", "[", "r", "]", ")", "self", ".", "_temp_constr", ".", "append", "(", "c", ")", "self", ".", "minimize_l1", "(", "weights", ")" ]
Maximize flux of reaction then minimize the L1 norm. During minimization the given reaction will be fixed at the maximum obtained from the first solution. If reaction is a dictionary object, each entry is interpreted as a weight on the objective for that reaction (non-existent reaction will have zero weight).
[ "Maximize", "flux", "of", "reaction", "then", "minimize", "the", "L1", "norm", "." ]
python
train
rikrd/inspire
inspirespeech/common.py
https://github.com/rikrd/inspire/blob/e281c0266a9a9633f34ab70f9c3ad58036c19b59/inspirespeech/common.py#L989-L1061
def recognise_model(feature_filename, symbollist_filename, model_directory, recognition_filename, pronunciation_dictionary_filename, list_words_filename='', cmllr_directory=None, tokens_count=None, hypotheses_count=1, htk_trace=0): """ Perform recognition using a model and assuming a single word language. If the list_words_filename is == '' then all the words in the dictionary are used as language words. """ # Normalize UTF-8 to avoid Mac problems temp_dictionary_filename = utf8_normalize(pronunciation_dictionary_filename) # Create language word list if list_words_filename: list_words = parse_wordlist(list_words_filename) else: list_words = sorted(parse_dictionary(temp_dictionary_filename).keys()) # Create language model temp_directory = config.project_path('tmp', create=True) grammar_filename = config.path(temp_directory, 'grammar_words') wdnet_filename = config.path(temp_directory, 'wdnet') logging.debug('Create language model') create_language_model(list_words, grammar_filename, wdnet_filename) # Handle the Adaptation parameters cmllr_arguments = '' if cmllr_directory: if not os.path.isdir(cmllr_directory): logging.error('CMLLR adapatation directory not found: {}'.format(cmllr_directory)) cmllr_arguments = "-J {} mllr2 -h '*/*_s%.mfc' -k -J {}".format( os.path.abspath(config.path(cmllr_directory, 'xforms')), os.path.abspath(config.path(cmllr_directory, 'classes'))) # Handle the N-Best parameters hypotheses_count = hypotheses_count or 1 tokens_count = tokens_count or int(math.ceil(hypotheses_count / 5.0)) if hypotheses_count == 1 and tokens_count == 1: nbest_arguments = "" else: nbest_arguments = "-n {tokens_count} {hypotheses_count} ".format(tokens_count=tokens_count, hypotheses_count=hypotheses_count) # Run the HTK command config.htk_command("HVite -A -l '*' -T {htk_trace} " "-H {model_directory}/macros -H {model_directory}/hmmdefs " "-i {recognition_filename} -S {feature_filename} " "{cmllr_arguments} -w {wdnet_filename} " "{nbest_arguments} " "-p 0.0 -s 5.0 " "{pronunciation_dictionary_filename} " "{symbollist_filename}".format(htk_trace=htk_trace, model_directory=model_directory, recognition_filename=recognition_filename, feature_filename=feature_filename, symbollist_filename=symbollist_filename, nbest_arguments=nbest_arguments, pronunciation_dictionary_filename=temp_dictionary_filename, wdnet_filename=wdnet_filename, cmllr_arguments=cmllr_arguments)) # Remove temporary files os.remove(temp_dictionary_filename) os.remove(wdnet_filename) os.remove(grammar_filename)
[ "def", "recognise_model", "(", "feature_filename", ",", "symbollist_filename", ",", "model_directory", ",", "recognition_filename", ",", "pronunciation_dictionary_filename", ",", "list_words_filename", "=", "''", ",", "cmllr_directory", "=", "None", ",", "tokens_count", "=", "None", ",", "hypotheses_count", "=", "1", ",", "htk_trace", "=", "0", ")", ":", "# Normalize UTF-8 to avoid Mac problems", "temp_dictionary_filename", "=", "utf8_normalize", "(", "pronunciation_dictionary_filename", ")", "# Create language word list", "if", "list_words_filename", ":", "list_words", "=", "parse_wordlist", "(", "list_words_filename", ")", "else", ":", "list_words", "=", "sorted", "(", "parse_dictionary", "(", "temp_dictionary_filename", ")", ".", "keys", "(", ")", ")", "# Create language model", "temp_directory", "=", "config", ".", "project_path", "(", "'tmp'", ",", "create", "=", "True", ")", "grammar_filename", "=", "config", ".", "path", "(", "temp_directory", ",", "'grammar_words'", ")", "wdnet_filename", "=", "config", ".", "path", "(", "temp_directory", ",", "'wdnet'", ")", "logging", ".", "debug", "(", "'Create language model'", ")", "create_language_model", "(", "list_words", ",", "grammar_filename", ",", "wdnet_filename", ")", "# Handle the Adaptation parameters", "cmllr_arguments", "=", "''", "if", "cmllr_directory", ":", "if", "not", "os", ".", "path", ".", "isdir", "(", "cmllr_directory", ")", ":", "logging", ".", "error", "(", "'CMLLR adapatation directory not found: {}'", ".", "format", "(", "cmllr_directory", ")", ")", "cmllr_arguments", "=", "\"-J {} mllr2 -h '*/*_s%.mfc' -k -J {}\"", ".", "format", "(", "os", ".", "path", ".", "abspath", "(", "config", ".", "path", "(", "cmllr_directory", ",", "'xforms'", ")", ")", ",", "os", ".", "path", ".", "abspath", "(", "config", ".", "path", "(", "cmllr_directory", ",", "'classes'", ")", ")", ")", "# Handle the N-Best parameters", "hypotheses_count", "=", "hypotheses_count", "or", "1", "tokens_count", "=", "tokens_count", "or", "int", "(", "math", ".", "ceil", "(", "hypotheses_count", "/", "5.0", ")", ")", "if", "hypotheses_count", "==", "1", "and", "tokens_count", "==", "1", ":", "nbest_arguments", "=", "\"\"", "else", ":", "nbest_arguments", "=", "\"-n {tokens_count} {hypotheses_count} \"", ".", "format", "(", "tokens_count", "=", "tokens_count", ",", "hypotheses_count", "=", "hypotheses_count", ")", "# Run the HTK command", "config", ".", "htk_command", "(", "\"HVite -A -l '*' -T {htk_trace} \"", "\"-H {model_directory}/macros -H {model_directory}/hmmdefs \"", "\"-i {recognition_filename} -S {feature_filename} \"", "\"{cmllr_arguments} -w {wdnet_filename} \"", "\"{nbest_arguments} \"", "\"-p 0.0 -s 5.0 \"", "\"{pronunciation_dictionary_filename} \"", "\"{symbollist_filename}\"", ".", "format", "(", "htk_trace", "=", "htk_trace", ",", "model_directory", "=", "model_directory", ",", "recognition_filename", "=", "recognition_filename", ",", "feature_filename", "=", "feature_filename", ",", "symbollist_filename", "=", "symbollist_filename", ",", "nbest_arguments", "=", "nbest_arguments", ",", "pronunciation_dictionary_filename", "=", "temp_dictionary_filename", ",", "wdnet_filename", "=", "wdnet_filename", ",", "cmllr_arguments", "=", "cmllr_arguments", ")", ")", "# Remove temporary files", "os", ".", "remove", "(", "temp_dictionary_filename", ")", "os", ".", "remove", "(", "wdnet_filename", ")", "os", ".", "remove", "(", "grammar_filename", ")" ]
Perform recognition using a model and assuming a single word language. If the list_words_filename is == '' then all the words in the dictionary are used as language words.
[ "Perform", "recognition", "using", "a", "model", "and", "assuming", "a", "single", "word", "language", "." ]
python
train