repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
sequence
docstring
stringlengths
1
46.9k
docstring_tokens
sequence
language
stringclasses
1 value
partition
stringclasses
3 values
RRZE-HPC/kerncraft
kerncraft/kernel.py
https://github.com/RRZE-HPC/kerncraft/blob/c60baf8043e4da8d8d66da7575021c2f4c6c78af/kerncraft/kernel.py#L168-L174
def reduce_path(path): """Reduce absolute path to relative (if shorter) for easier readability.""" relative_path = os.path.relpath(path) if len(relative_path) < len(path): return relative_path else: return path
[ "def", "reduce_path", "(", "path", ")", ":", "relative_path", "=", "os", ".", "path", ".", "relpath", "(", "path", ")", "if", "len", "(", "relative_path", ")", "<", "len", "(", "path", ")", ":", "return", "relative_path", "else", ":", "return", "path" ]
Reduce absolute path to relative (if shorter) for easier readability.
[ "Reduce", "absolute", "path", "to", "relative", "(", "if", "shorter", ")", "for", "easier", "readability", "." ]
python
test
dmlc/xgboost
python-package/xgboost/core.py
https://github.com/dmlc/xgboost/blob/253fdd8a42d5ec6b819788199584d27bf9ea6253/python-package/xgboost/core.py#L179-L194
def ctypes2numpy(cptr, length, dtype): """Convert a ctypes pointer array to a numpy array. """ NUMPY_TO_CTYPES_MAPPING = { np.float32: ctypes.c_float, np.uint32: ctypes.c_uint, } if dtype not in NUMPY_TO_CTYPES_MAPPING: raise RuntimeError('Supported types: {}'.format(NUMPY_TO_CTYPES_MAPPING.keys())) ctype = NUMPY_TO_CTYPES_MAPPING[dtype] if not isinstance(cptr, ctypes.POINTER(ctype)): raise RuntimeError('expected {} pointer'.format(ctype)) res = np.zeros(length, dtype=dtype) if not ctypes.memmove(res.ctypes.data, cptr, length * res.strides[0]): raise RuntimeError('memmove failed') return res
[ "def", "ctypes2numpy", "(", "cptr", ",", "length", ",", "dtype", ")", ":", "NUMPY_TO_CTYPES_MAPPING", "=", "{", "np", ".", "float32", ":", "ctypes", ".", "c_float", ",", "np", ".", "uint32", ":", "ctypes", ".", "c_uint", ",", "}", "if", "dtype", "not", "in", "NUMPY_TO_CTYPES_MAPPING", ":", "raise", "RuntimeError", "(", "'Supported types: {}'", ".", "format", "(", "NUMPY_TO_CTYPES_MAPPING", ".", "keys", "(", ")", ")", ")", "ctype", "=", "NUMPY_TO_CTYPES_MAPPING", "[", "dtype", "]", "if", "not", "isinstance", "(", "cptr", ",", "ctypes", ".", "POINTER", "(", "ctype", ")", ")", ":", "raise", "RuntimeError", "(", "'expected {} pointer'", ".", "format", "(", "ctype", ")", ")", "res", "=", "np", ".", "zeros", "(", "length", ",", "dtype", "=", "dtype", ")", "if", "not", "ctypes", ".", "memmove", "(", "res", ".", "ctypes", ".", "data", ",", "cptr", ",", "length", "*", "res", ".", "strides", "[", "0", "]", ")", ":", "raise", "RuntimeError", "(", "'memmove failed'", ")", "return", "res" ]
Convert a ctypes pointer array to a numpy array.
[ "Convert", "a", "ctypes", "pointer", "array", "to", "a", "numpy", "array", "." ]
python
train
dslackw/slpkg
slpkg/utils.py
https://github.com/dslackw/slpkg/blob/dd2e08a80e944d337d157b992167ba631a4343de/slpkg/utils.py#L50-L57
def remove_dbs(self, double): """Remove double item from list """ one = [] for dup in double: if dup not in one: one.append(dup) return one
[ "def", "remove_dbs", "(", "self", ",", "double", ")", ":", "one", "=", "[", "]", "for", "dup", "in", "double", ":", "if", "dup", "not", "in", "one", ":", "one", ".", "append", "(", "dup", ")", "return", "one" ]
Remove double item from list
[ "Remove", "double", "item", "from", "list" ]
python
train
tanghaibao/goatools
goatools/gosubdag/plot/go_name_shorten.py
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/gosubdag/plot/go_name_shorten.py#L26-L60
def get_short_plot_name(self, goobj): """Shorten some GO names so plots are smaller.""" name = goobj.name if self._keep_this(name): return self.replace_greek(name) name = name.replace("cellular response to chemical stimulus", "cellular rsp. to chemical stim.") depth = goobj.depth if depth > 1: name = name.replace("regulation of ", "reg. of ") name = name.replace("positive reg", "+reg") name = name.replace("negative reg", "-reg") name = name.replace("involved in", "in") if depth > 2: name = name.replace("antigen processing and presentation", "a.p.p") name = name.replace("MHC class I", "MHC-I") if depth == 4: if goobj.id == "GO:0002460": before = " ".join([ "adaptive immune response based on somatic recombination of", "immune receptors built from immunoglobulin superfamily domains"]) name = name.replace( before, "rsp. based on somatic recombination of Ig immune receptors") if depth > 3: name = name.replace("signaling pathway", "sig. pw.") name = name.replace("response", "rsp.") name = name.replace("immunoglobulin superfamily domains", "Ig domains") name = name.replace("immunoglobulin", "Ig") if depth > 4: name = name.replace("production", "prod.") if depth == 6 or depth == 5: name = name.replace("tumor necrosis factor", "TNF") name = self.replace_greek(name) return name
[ "def", "get_short_plot_name", "(", "self", ",", "goobj", ")", ":", "name", "=", "goobj", ".", "name", "if", "self", ".", "_keep_this", "(", "name", ")", ":", "return", "self", ".", "replace_greek", "(", "name", ")", "name", "=", "name", ".", "replace", "(", "\"cellular response to chemical stimulus\"", ",", "\"cellular rsp. to chemical stim.\"", ")", "depth", "=", "goobj", ".", "depth", "if", "depth", ">", "1", ":", "name", "=", "name", ".", "replace", "(", "\"regulation of \"", ",", "\"reg. of \"", ")", "name", "=", "name", ".", "replace", "(", "\"positive reg\"", ",", "\"+reg\"", ")", "name", "=", "name", ".", "replace", "(", "\"negative reg\"", ",", "\"-reg\"", ")", "name", "=", "name", ".", "replace", "(", "\"involved in\"", ",", "\"in\"", ")", "if", "depth", ">", "2", ":", "name", "=", "name", ".", "replace", "(", "\"antigen processing and presentation\"", ",", "\"a.p.p\"", ")", "name", "=", "name", ".", "replace", "(", "\"MHC class I\"", ",", "\"MHC-I\"", ")", "if", "depth", "==", "4", ":", "if", "goobj", ".", "id", "==", "\"GO:0002460\"", ":", "before", "=", "\" \"", ".", "join", "(", "[", "\"adaptive immune response based on somatic recombination of\"", ",", "\"immune receptors built from immunoglobulin superfamily domains\"", "]", ")", "name", "=", "name", ".", "replace", "(", "before", ",", "\"rsp. based on somatic recombination of Ig immune receptors\"", ")", "if", "depth", ">", "3", ":", "name", "=", "name", ".", "replace", "(", "\"signaling pathway\"", ",", "\"sig. pw.\"", ")", "name", "=", "name", ".", "replace", "(", "\"response\"", ",", "\"rsp.\"", ")", "name", "=", "name", ".", "replace", "(", "\"immunoglobulin superfamily domains\"", ",", "\"Ig domains\"", ")", "name", "=", "name", ".", "replace", "(", "\"immunoglobulin\"", ",", "\"Ig\"", ")", "if", "depth", ">", "4", ":", "name", "=", "name", ".", "replace", "(", "\"production\"", ",", "\"prod.\"", ")", "if", "depth", "==", "6", "or", "depth", "==", "5", ":", "name", "=", "name", ".", "replace", "(", "\"tumor necrosis factor\"", ",", "\"TNF\"", ")", "name", "=", "self", ".", "replace_greek", "(", "name", ")", "return", "name" ]
Shorten some GO names so plots are smaller.
[ "Shorten", "some", "GO", "names", "so", "plots", "are", "smaller", "." ]
python
train
biosignalsnotebooks/biosignalsnotebooks
biosignalsnotebooks/biosignalsnotebooks/synchronisation.py
https://github.com/biosignalsnotebooks/biosignalsnotebooks/blob/aaa01d4125180b3a34f1e26e0d3ff08c23f666d3/biosignalsnotebooks/biosignalsnotebooks/synchronisation.py#L128-L160
def generate_sync_txt_file(in_path, channels=("CH1", "CH1"), new_path='sync_file.txt'): """ ----- Brief ----- This function allows to generate a text file with synchronised signals from the input file(s). ----------- Description ----------- OpenSignals files follow a specific structure that allows to analyse all files in the same way. Furthermore, it allows those files to be opened and analysed in the OpenSignals software without the need of programming. This functions takes one or two files, synchronises the signals in channels and generates a new file in the new path. ---------- Parameters ---------- in_path : str or list If the input is a string, it is assumed that the two signals are in the same file, else, if the input is a list, it is assumed that the two signals are in different file (the list should contain the paths to the two files). channels : list List with the strings identifying the channels of each signal. (default: ("CH1", "CH1")) new_path : str The path to create the new file. (default: 'sync_file.txt') """ if type(in_path) is str: _create_txt_from_str(in_path, channels, new_path) elif type(in_path) is list: _create_txt_from_list(in_path, channels, new_path) else: raise TypeError('The path should be a list of str or a str.')
[ "def", "generate_sync_txt_file", "(", "in_path", ",", "channels", "=", "(", "\"CH1\"", ",", "\"CH1\"", ")", ",", "new_path", "=", "'sync_file.txt'", ")", ":", "if", "type", "(", "in_path", ")", "is", "str", ":", "_create_txt_from_str", "(", "in_path", ",", "channels", ",", "new_path", ")", "elif", "type", "(", "in_path", ")", "is", "list", ":", "_create_txt_from_list", "(", "in_path", ",", "channels", ",", "new_path", ")", "else", ":", "raise", "TypeError", "(", "'The path should be a list of str or a str.'", ")" ]
----- Brief ----- This function allows to generate a text file with synchronised signals from the input file(s). ----------- Description ----------- OpenSignals files follow a specific structure that allows to analyse all files in the same way. Furthermore, it allows those files to be opened and analysed in the OpenSignals software without the need of programming. This functions takes one or two files, synchronises the signals in channels and generates a new file in the new path. ---------- Parameters ---------- in_path : str or list If the input is a string, it is assumed that the two signals are in the same file, else, if the input is a list, it is assumed that the two signals are in different file (the list should contain the paths to the two files). channels : list List with the strings identifying the channels of each signal. (default: ("CH1", "CH1")) new_path : str The path to create the new file. (default: 'sync_file.txt')
[ "-----", "Brief", "-----", "This", "function", "allows", "to", "generate", "a", "text", "file", "with", "synchronised", "signals", "from", "the", "input", "file", "(", "s", ")", "." ]
python
train
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/parallel/controller/hub.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/parallel/controller/hub.py#L1237-L1287
def get_results(self, client_id, msg): """Get the result of 1 or more messages.""" content = msg['content'] msg_ids = sorted(set(content['msg_ids'])) statusonly = content.get('status_only', False) pending = [] completed = [] content = dict(status='ok') content['pending'] = pending content['completed'] = completed buffers = [] if not statusonly: try: matches = self.db.find_records(dict(msg_id={'$in':msg_ids})) # turn match list into dict, for faster lookup records = {} for rec in matches: records[rec['msg_id']] = rec except Exception: content = error.wrap_exception() self.session.send(self.query, "result_reply", content=content, parent=msg, ident=client_id) return else: records = {} for msg_id in msg_ids: if msg_id in self.pending: pending.append(msg_id) elif msg_id in self.all_completed: completed.append(msg_id) if not statusonly: c,bufs = self._extract_record(records[msg_id]) content[msg_id] = c buffers.extend(bufs) elif msg_id in records: if rec['completed']: completed.append(msg_id) c,bufs = self._extract_record(records[msg_id]) content[msg_id] = c buffers.extend(bufs) else: pending.append(msg_id) else: try: raise KeyError('No such message: '+msg_id) except: content = error.wrap_exception() break self.session.send(self.query, "result_reply", content=content, parent=msg, ident=client_id, buffers=buffers)
[ "def", "get_results", "(", "self", ",", "client_id", ",", "msg", ")", ":", "content", "=", "msg", "[", "'content'", "]", "msg_ids", "=", "sorted", "(", "set", "(", "content", "[", "'msg_ids'", "]", ")", ")", "statusonly", "=", "content", ".", "get", "(", "'status_only'", ",", "False", ")", "pending", "=", "[", "]", "completed", "=", "[", "]", "content", "=", "dict", "(", "status", "=", "'ok'", ")", "content", "[", "'pending'", "]", "=", "pending", "content", "[", "'completed'", "]", "=", "completed", "buffers", "=", "[", "]", "if", "not", "statusonly", ":", "try", ":", "matches", "=", "self", ".", "db", ".", "find_records", "(", "dict", "(", "msg_id", "=", "{", "'$in'", ":", "msg_ids", "}", ")", ")", "# turn match list into dict, for faster lookup", "records", "=", "{", "}", "for", "rec", "in", "matches", ":", "records", "[", "rec", "[", "'msg_id'", "]", "]", "=", "rec", "except", "Exception", ":", "content", "=", "error", ".", "wrap_exception", "(", ")", "self", ".", "session", ".", "send", "(", "self", ".", "query", ",", "\"result_reply\"", ",", "content", "=", "content", ",", "parent", "=", "msg", ",", "ident", "=", "client_id", ")", "return", "else", ":", "records", "=", "{", "}", "for", "msg_id", "in", "msg_ids", ":", "if", "msg_id", "in", "self", ".", "pending", ":", "pending", ".", "append", "(", "msg_id", ")", "elif", "msg_id", "in", "self", ".", "all_completed", ":", "completed", ".", "append", "(", "msg_id", ")", "if", "not", "statusonly", ":", "c", ",", "bufs", "=", "self", ".", "_extract_record", "(", "records", "[", "msg_id", "]", ")", "content", "[", "msg_id", "]", "=", "c", "buffers", ".", "extend", "(", "bufs", ")", "elif", "msg_id", "in", "records", ":", "if", "rec", "[", "'completed'", "]", ":", "completed", ".", "append", "(", "msg_id", ")", "c", ",", "bufs", "=", "self", ".", "_extract_record", "(", "records", "[", "msg_id", "]", ")", "content", "[", "msg_id", "]", "=", "c", "buffers", ".", "extend", "(", "bufs", ")", "else", ":", "pending", ".", "append", "(", "msg_id", ")", "else", ":", "try", ":", "raise", "KeyError", "(", "'No such message: '", "+", "msg_id", ")", "except", ":", "content", "=", "error", ".", "wrap_exception", "(", ")", "break", "self", ".", "session", ".", "send", "(", "self", ".", "query", ",", "\"result_reply\"", ",", "content", "=", "content", ",", "parent", "=", "msg", ",", "ident", "=", "client_id", ",", "buffers", "=", "buffers", ")" ]
Get the result of 1 or more messages.
[ "Get", "the", "result", "of", "1", "or", "more", "messages", "." ]
python
test
saltstack/salt
salt/modules/mod_random.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/mod_random.py#L197-L220
def seed(range=10, hash=None): ''' Returns a random number within a range. Optional hash argument can be any hashable object. If hash is omitted or None, the id of the minion is used. .. versionadded: 2015.8.0 hash: None Any hashable object. range: 10 Any valid integer number CLI Example: .. code-block:: bash salt '*' random.seed 10 hash=None ''' if hash is None: hash = __grains__['id'] random.seed(hash) return random.randrange(range)
[ "def", "seed", "(", "range", "=", "10", ",", "hash", "=", "None", ")", ":", "if", "hash", "is", "None", ":", "hash", "=", "__grains__", "[", "'id'", "]", "random", ".", "seed", "(", "hash", ")", "return", "random", ".", "randrange", "(", "range", ")" ]
Returns a random number within a range. Optional hash argument can be any hashable object. If hash is omitted or None, the id of the minion is used. .. versionadded: 2015.8.0 hash: None Any hashable object. range: 10 Any valid integer number CLI Example: .. code-block:: bash salt '*' random.seed 10 hash=None
[ "Returns", "a", "random", "number", "within", "a", "range", ".", "Optional", "hash", "argument", "can", "be", "any", "hashable", "object", ".", "If", "hash", "is", "omitted", "or", "None", "the", "id", "of", "the", "minion", "is", "used", "." ]
python
train
census-instrumentation/opencensus-python
contrib/opencensus-ext-stackdriver/opencensus/ext/stackdriver/stats_exporter/__init__.py
https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/contrib/opencensus-ext-stackdriver/opencensus/ext/stackdriver/stats_exporter/__init__.py#L258-L285
def get_metric_descriptor(self, oc_md): """Convert an OC metric descriptor to a SD metric descriptor.""" try: metric_kind, value_type = OC_MD_TO_SD_TYPE[oc_md.type] except KeyError: raise TypeError("Unsupported metric type: {}".format(oc_md.type)) if self.options.metric_prefix: display_name_prefix = self.options.metric_prefix else: display_name_prefix = DEFAULT_DISPLAY_NAME_PREFIX desc_labels = new_label_descriptors( self.options.default_monitoring_labels, oc_md.label_keys) descriptor = monitoring_v3.types.MetricDescriptor(labels=desc_labels) metric_type = self.get_metric_type(oc_md) descriptor.type = metric_type descriptor.metric_kind = metric_kind descriptor.value_type = value_type descriptor.description = oc_md.description descriptor.unit = oc_md.unit descriptor.name = ("projects/{}/metricDescriptors/{}" .format(self.options.project_id, metric_type)) descriptor.display_name = ("{}/{}" .format(display_name_prefix, oc_md.name)) return descriptor
[ "def", "get_metric_descriptor", "(", "self", ",", "oc_md", ")", ":", "try", ":", "metric_kind", ",", "value_type", "=", "OC_MD_TO_SD_TYPE", "[", "oc_md", ".", "type", "]", "except", "KeyError", ":", "raise", "TypeError", "(", "\"Unsupported metric type: {}\"", ".", "format", "(", "oc_md", ".", "type", ")", ")", "if", "self", ".", "options", ".", "metric_prefix", ":", "display_name_prefix", "=", "self", ".", "options", ".", "metric_prefix", "else", ":", "display_name_prefix", "=", "DEFAULT_DISPLAY_NAME_PREFIX", "desc_labels", "=", "new_label_descriptors", "(", "self", ".", "options", ".", "default_monitoring_labels", ",", "oc_md", ".", "label_keys", ")", "descriptor", "=", "monitoring_v3", ".", "types", ".", "MetricDescriptor", "(", "labels", "=", "desc_labels", ")", "metric_type", "=", "self", ".", "get_metric_type", "(", "oc_md", ")", "descriptor", ".", "type", "=", "metric_type", "descriptor", ".", "metric_kind", "=", "metric_kind", "descriptor", ".", "value_type", "=", "value_type", "descriptor", ".", "description", "=", "oc_md", ".", "description", "descriptor", ".", "unit", "=", "oc_md", ".", "unit", "descriptor", ".", "name", "=", "(", "\"projects/{}/metricDescriptors/{}\"", ".", "format", "(", "self", ".", "options", ".", "project_id", ",", "metric_type", ")", ")", "descriptor", ".", "display_name", "=", "(", "\"{}/{}\"", ".", "format", "(", "display_name_prefix", ",", "oc_md", ".", "name", ")", ")", "return", "descriptor" ]
Convert an OC metric descriptor to a SD metric descriptor.
[ "Convert", "an", "OC", "metric", "descriptor", "to", "a", "SD", "metric", "descriptor", "." ]
python
train
asyncee/django-easy-select2
docs/source/_ext/djangodocs.py
https://github.com/asyncee/django-easy-select2/blob/f81bbaa91d0266029be7ef6d075d85f13273e3a5/docs/source/_ext/djangodocs.py#L364-L403
def visit_console_html(self, node): """Generate HTML for the console directive.""" if self.builder.name in ('djangohtml', 'json') and node['win_console_text']: # Put a mark on the document object signaling the fact the directive # has been used on it. self.document._console_directive_used_flag = True uid = node['uid'] self.body.append('''\ <div class="console-block" id="console-block-%(id)s"> <input class="c-tab-unix" id="c-tab-%(id)s-unix" type="radio" name="console-%(id)s" checked> <label for="c-tab-%(id)s-unix" title="Linux/macOS">&#xf17c/&#xf179</label> <input class="c-tab-win" id="c-tab-%(id)s-win" type="radio" name="console-%(id)s"> <label for="c-tab-%(id)s-win" title="Windows">&#xf17a</label> <section class="c-content-unix" id="c-content-%(id)s-unix">\n''' % {'id': uid}) try: self.visit_literal_block(node) except nodes.SkipNode: pass self.body.append('</section>\n') self.body.append('<section class="c-content-win" id="c-content-%(id)s-win">\n' % {'id': uid}) win_text = node['win_console_text'] highlight_args = {'force': True} if 'linenos' in node: linenos = node['linenos'] else: linenos = win_text.count('\n') >= self.highlightlinenothreshold - 1 def warner(msg): self.builder.warn(msg, (self.builder.current_docname, node.line)) highlighted = self.highlighter.highlight_block( win_text, 'doscon', warn=warner, linenos=linenos, **highlight_args ) self.body.append(highlighted) self.body.append('</section>\n') self.body.append('</div>\n') raise nodes.SkipNode else: self.visit_literal_block(node)
[ "def", "visit_console_html", "(", "self", ",", "node", ")", ":", "if", "self", ".", "builder", ".", "name", "in", "(", "'djangohtml'", ",", "'json'", ")", "and", "node", "[", "'win_console_text'", "]", ":", "# Put a mark on the document object signaling the fact the directive", "# has been used on it.", "self", ".", "document", ".", "_console_directive_used_flag", "=", "True", "uid", "=", "node", "[", "'uid'", "]", "self", ".", "body", ".", "append", "(", "'''\\\n<div class=\"console-block\" id=\"console-block-%(id)s\">\n<input class=\"c-tab-unix\" id=\"c-tab-%(id)s-unix\" type=\"radio\" name=\"console-%(id)s\" checked>\n<label for=\"c-tab-%(id)s-unix\" title=\"Linux/macOS\">&#xf17c/&#xf179</label>\n<input class=\"c-tab-win\" id=\"c-tab-%(id)s-win\" type=\"radio\" name=\"console-%(id)s\">\n<label for=\"c-tab-%(id)s-win\" title=\"Windows\">&#xf17a</label>\n<section class=\"c-content-unix\" id=\"c-content-%(id)s-unix\">\\n'''", "%", "{", "'id'", ":", "uid", "}", ")", "try", ":", "self", ".", "visit_literal_block", "(", "node", ")", "except", "nodes", ".", "SkipNode", ":", "pass", "self", ".", "body", ".", "append", "(", "'</section>\\n'", ")", "self", ".", "body", ".", "append", "(", "'<section class=\"c-content-win\" id=\"c-content-%(id)s-win\">\\n'", "%", "{", "'id'", ":", "uid", "}", ")", "win_text", "=", "node", "[", "'win_console_text'", "]", "highlight_args", "=", "{", "'force'", ":", "True", "}", "if", "'linenos'", "in", "node", ":", "linenos", "=", "node", "[", "'linenos'", "]", "else", ":", "linenos", "=", "win_text", ".", "count", "(", "'\\n'", ")", ">=", "self", ".", "highlightlinenothreshold", "-", "1", "def", "warner", "(", "msg", ")", ":", "self", ".", "builder", ".", "warn", "(", "msg", ",", "(", "self", ".", "builder", ".", "current_docname", ",", "node", ".", "line", ")", ")", "highlighted", "=", "self", ".", "highlighter", ".", "highlight_block", "(", "win_text", ",", "'doscon'", ",", "warn", "=", "warner", ",", "linenos", "=", "linenos", ",", "*", "*", "highlight_args", ")", "self", ".", "body", ".", "append", "(", "highlighted", ")", "self", ".", "body", ".", "append", "(", "'</section>\\n'", ")", "self", ".", "body", ".", "append", "(", "'</div>\\n'", ")", "raise", "nodes", ".", "SkipNode", "else", ":", "self", ".", "visit_literal_block", "(", "node", ")" ]
Generate HTML for the console directive.
[ "Generate", "HTML", "for", "the", "console", "directive", "." ]
python
train
ttinies/sc2gameLobby
sc2gameLobby/debugCmds.py
https://github.com/ttinies/sc2gameLobby/blob/5352d51d53ddeb4858e92e682da89c4434123e52/sc2gameLobby/debugCmds.py#L28-L41
def modify(*units): """set the unit defined by in-game tag with desired properties NOTE: all units must be owned by the same player or the command fails.""" ret = [] for unit in units: # add one command for each attribute for attr, idx in [("energy", 1), ("life", 2), ("shields", 3)]: # see debug_pb2.UnitValue for enum declaration newValue = getattr(unit, attr) if not newValue: continue # don't bother setting something that isn't necessary new = DebugCommand(unit_value=DebugSetUnitValue( value = newValue, unit_value = idx, unit_tag = unit.tag)) ret.append(new) return ret
[ "def", "modify", "(", "*", "units", ")", ":", "ret", "=", "[", "]", "for", "unit", "in", "units", ":", "# add one command for each attribute", "for", "attr", ",", "idx", "in", "[", "(", "\"energy\"", ",", "1", ")", ",", "(", "\"life\"", ",", "2", ")", ",", "(", "\"shields\"", ",", "3", ")", "]", ":", "# see debug_pb2.UnitValue for enum declaration", "newValue", "=", "getattr", "(", "unit", ",", "attr", ")", "if", "not", "newValue", ":", "continue", "# don't bother setting something that isn't necessary", "new", "=", "DebugCommand", "(", "unit_value", "=", "DebugSetUnitValue", "(", "value", "=", "newValue", ",", "unit_value", "=", "idx", ",", "unit_tag", "=", "unit", ".", "tag", ")", ")", "ret", ".", "append", "(", "new", ")", "return", "ret" ]
set the unit defined by in-game tag with desired properties NOTE: all units must be owned by the same player or the command fails.
[ "set", "the", "unit", "defined", "by", "in", "-", "game", "tag", "with", "desired", "properties", "NOTE", ":", "all", "units", "must", "be", "owned", "by", "the", "same", "player", "or", "the", "command", "fails", "." ]
python
train
glue-viz/glue-vispy-viewers
glue_vispy_viewers/extern/vispy/geometry/triangulation.py
https://github.com/glue-viz/glue-vispy-viewers/blob/54a4351d98c1f90dfb1a557d1b447c1f57470eea/glue_vispy_viewers/extern/vispy/geometry/triangulation.py#L537-L557
def _adjacent_tri(self, edge, i): """ Given a triangle formed by edge and i, return the triangle that shares edge. *i* may be either a point or the entire triangle. """ if not np.isscalar(i): i = [x for x in i if x not in edge][0] try: pt1 = self._edges_lookup[edge] pt2 = self._edges_lookup[(edge[1], edge[0])] except KeyError: return None if pt1 == i: return (edge[1], edge[0], pt2) elif pt2 == i: return (edge[1], edge[0], pt1) else: raise RuntimeError("Edge %s and point %d do not form a triangle " "in this mesh." % (edge, i))
[ "def", "_adjacent_tri", "(", "self", ",", "edge", ",", "i", ")", ":", "if", "not", "np", ".", "isscalar", "(", "i", ")", ":", "i", "=", "[", "x", "for", "x", "in", "i", "if", "x", "not", "in", "edge", "]", "[", "0", "]", "try", ":", "pt1", "=", "self", ".", "_edges_lookup", "[", "edge", "]", "pt2", "=", "self", ".", "_edges_lookup", "[", "(", "edge", "[", "1", "]", ",", "edge", "[", "0", "]", ")", "]", "except", "KeyError", ":", "return", "None", "if", "pt1", "==", "i", ":", "return", "(", "edge", "[", "1", "]", ",", "edge", "[", "0", "]", ",", "pt2", ")", "elif", "pt2", "==", "i", ":", "return", "(", "edge", "[", "1", "]", ",", "edge", "[", "0", "]", ",", "pt1", ")", "else", ":", "raise", "RuntimeError", "(", "\"Edge %s and point %d do not form a triangle \"", "\"in this mesh.\"", "%", "(", "edge", ",", "i", ")", ")" ]
Given a triangle formed by edge and i, return the triangle that shares edge. *i* may be either a point or the entire triangle.
[ "Given", "a", "triangle", "formed", "by", "edge", "and", "i", "return", "the", "triangle", "that", "shares", "edge", ".", "*", "i", "*", "may", "be", "either", "a", "point", "or", "the", "entire", "triangle", "." ]
python
train
nerdvegas/rez
src/rez/vendor/pygraph/algorithms/critical.py
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/vendor/pygraph/algorithms/critical.py#L98-L163
def critical_path(graph): """ Compute and return the critical path in an acyclic directed weighted graph. @attention: This function is only meaningful for directed weighted acyclic graphs @type graph: digraph @param graph: Digraph @rtype: List @return: List containing all the nodes in the path (or an empty array if the graph contains a cycle) """ #if the graph contains a cycle we return an empty array if not len(find_cycle(graph)) == 0: return [] #this empty dictionary will contain a tuple for every single node #the tuple contains the information about the most costly predecessor #of the given node and the cost of the path to this node #(predecessor, cost) node_tuples = {} topological_nodes = topological_sorting(graph) #all the tuples must be set to a default value for every node in the graph for node in topological_nodes: node_tuples.update( {node :(None, 0)} ) #run trough all the nodes in a topological order for node in topological_nodes: predecessors =[] #we must check all the predecessors for pre in graph.incidents(node): max_pre = node_tuples[pre][1] predecessors.append( (pre, graph.edge_weight( (pre, node) ) + max_pre ) ) max = 0; max_tuple = (None, 0) for i in predecessors:#look for the most costly predecessor if i[1] >= max: max = i[1] max_tuple = i #assign the maximum value to the given node in the node_tuples dictionary node_tuples[node] = max_tuple #find the critical node max = 0; critical_node = None for k,v in list(node_tuples.items()): if v[1] >= max: max= v[1] critical_node = k path = [] #find the critical path with backtracking trought the dictionary def mid_critical_path(end): if node_tuples[end][0] != None: path.append(end) mid_critical_path(node_tuples[end][0]) else: path.append(end) #call the recursive function mid_critical_path(critical_node) path.reverse() return path
[ "def", "critical_path", "(", "graph", ")", ":", "#if the graph contains a cycle we return an empty array", "if", "not", "len", "(", "find_cycle", "(", "graph", ")", ")", "==", "0", ":", "return", "[", "]", "#this empty dictionary will contain a tuple for every single node", "#the tuple contains the information about the most costly predecessor ", "#of the given node and the cost of the path to this node", "#(predecessor, cost)", "node_tuples", "=", "{", "}", "topological_nodes", "=", "topological_sorting", "(", "graph", ")", "#all the tuples must be set to a default value for every node in the graph", "for", "node", "in", "topological_nodes", ":", "node_tuples", ".", "update", "(", "{", "node", ":", "(", "None", ",", "0", ")", "}", ")", "#run trough all the nodes in a topological order", "for", "node", "in", "topological_nodes", ":", "predecessors", "=", "[", "]", "#we must check all the predecessors", "for", "pre", "in", "graph", ".", "incidents", "(", "node", ")", ":", "max_pre", "=", "node_tuples", "[", "pre", "]", "[", "1", "]", "predecessors", ".", "append", "(", "(", "pre", ",", "graph", ".", "edge_weight", "(", "(", "pre", ",", "node", ")", ")", "+", "max_pre", ")", ")", "max", "=", "0", "max_tuple", "=", "(", "None", ",", "0", ")", "for", "i", "in", "predecessors", ":", "#look for the most costly predecessor", "if", "i", "[", "1", "]", ">=", "max", ":", "max", "=", "i", "[", "1", "]", "max_tuple", "=", "i", "#assign the maximum value to the given node in the node_tuples dictionary", "node_tuples", "[", "node", "]", "=", "max_tuple", "#find the critical node", "max", "=", "0", "critical_node", "=", "None", "for", "k", ",", "v", "in", "list", "(", "node_tuples", ".", "items", "(", ")", ")", ":", "if", "v", "[", "1", "]", ">=", "max", ":", "max", "=", "v", "[", "1", "]", "critical_node", "=", "k", "path", "=", "[", "]", "#find the critical path with backtracking trought the dictionary", "def", "mid_critical_path", "(", "end", ")", ":", "if", "node_tuples", "[", "end", "]", "[", "0", "]", "!=", "None", ":", "path", ".", "append", "(", "end", ")", "mid_critical_path", "(", "node_tuples", "[", "end", "]", "[", "0", "]", ")", "else", ":", "path", ".", "append", "(", "end", ")", "#call the recursive function", "mid_critical_path", "(", "critical_node", ")", "path", ".", "reverse", "(", ")", "return", "path" ]
Compute and return the critical path in an acyclic directed weighted graph. @attention: This function is only meaningful for directed weighted acyclic graphs @type graph: digraph @param graph: Digraph @rtype: List @return: List containing all the nodes in the path (or an empty array if the graph contains a cycle)
[ "Compute", "and", "return", "the", "critical", "path", "in", "an", "acyclic", "directed", "weighted", "graph", "." ]
python
train
awacha/sastool
sastool/utils2d/corrections.py
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/utils2d/corrections.py#L26-L39
def solidangle(twotheta, sampletodetectordistance, pixelsize=None): """Solid-angle correction for two-dimensional SAS images Inputs: twotheta: matrix of two-theta values sampletodetectordistance: sample-to-detector distance pixelsize: the pixel size in mm The output matrix is of the same shape as twotheta. The scattering intensity matrix should be multiplied by it. """ if pixelsize is None: pixelsize = 1 return sampletodetectordistance ** 2 / np.cos(twotheta) ** 3 / pixelsize ** 2
[ "def", "solidangle", "(", "twotheta", ",", "sampletodetectordistance", ",", "pixelsize", "=", "None", ")", ":", "if", "pixelsize", "is", "None", ":", "pixelsize", "=", "1", "return", "sampletodetectordistance", "**", "2", "/", "np", ".", "cos", "(", "twotheta", ")", "**", "3", "/", "pixelsize", "**", "2" ]
Solid-angle correction for two-dimensional SAS images Inputs: twotheta: matrix of two-theta values sampletodetectordistance: sample-to-detector distance pixelsize: the pixel size in mm The output matrix is of the same shape as twotheta. The scattering intensity matrix should be multiplied by it.
[ "Solid", "-", "angle", "correction", "for", "two", "-", "dimensional", "SAS", "images" ]
python
train
cloud-custodian/cloud-custodian
tools/c7n_sentry/c7n_sentry/c7nsentry.py
https://github.com/cloud-custodian/cloud-custodian/blob/52ef732eb3d7bc939d1579faf519314814695c08/tools/c7n_sentry/c7n_sentry/c7nsentry.py#L97-L116
def process_log_event(event, context): """Lambda Entrypoint - Log Subscriber Format log events and relay to sentry (direct or sqs) """ init() # Grab the actual error log payload serialized = event['awslogs'].pop('data') data = json.loads(zlib.decompress( base64.b64decode(serialized), 16 + zlib.MAX_WBITS)) msg = get_sentry_message(config, data) if msg is None: return if config['sentry_dsn']: # Deliver directly to sentry send_sentry_message(config['sentry_dsn'], msg) elif config['sentry_sqs']: # Delivery indirectly via sqs sqs.send_message( QueueUrl=config['sentry_sqs'])
[ "def", "process_log_event", "(", "event", ",", "context", ")", ":", "init", "(", ")", "# Grab the actual error log payload", "serialized", "=", "event", "[", "'awslogs'", "]", ".", "pop", "(", "'data'", ")", "data", "=", "json", ".", "loads", "(", "zlib", ".", "decompress", "(", "base64", ".", "b64decode", "(", "serialized", ")", ",", "16", "+", "zlib", ".", "MAX_WBITS", ")", ")", "msg", "=", "get_sentry_message", "(", "config", ",", "data", ")", "if", "msg", "is", "None", ":", "return", "if", "config", "[", "'sentry_dsn'", "]", ":", "# Deliver directly to sentry", "send_sentry_message", "(", "config", "[", "'sentry_dsn'", "]", ",", "msg", ")", "elif", "config", "[", "'sentry_sqs'", "]", ":", "# Delivery indirectly via sqs", "sqs", ".", "send_message", "(", "QueueUrl", "=", "config", "[", "'sentry_sqs'", "]", ")" ]
Lambda Entrypoint - Log Subscriber Format log events and relay to sentry (direct or sqs)
[ "Lambda", "Entrypoint", "-", "Log", "Subscriber" ]
python
train
hearsaycorp/normalize
normalize/coll.py
https://github.com/hearsaycorp/normalize/blob/8b36522ddca6d41b434580bd848f3bdaa7a999c8/normalize/coll.py#L470-L498
def _make_generic(of, coll): """Used to make a new Collection type, without that type having to be defined explicitly. Generates a new type name using the item type and a 'suffix' Collection class property. args: ``of=``\ *Record type* The type of values of the collection ``coll=``\ *Collection sub-class* The container class. """ assert(issubclass(coll, Collection)) key = (coll.__name__, "%s.%s" % (of.__module__, of.__name__)) if key in GENERIC_TYPES: if GENERIC_TYPES[key].itemtype != of: raise exc.PropertyNotUnique(key=key) else: # oh, we get to name it? Goodie! generic_name = "%s%s" % (of.__name__, coll.suffix) GENERIC_TYPES[key] = type( generic_name, (coll, _Generic), dict(itemtype=of, generic_key=key) ) mod = sys.modules[of.__module__] if not hasattr(mod, generic_name): setattr(mod, generic_name, GENERIC_TYPES[key]) return GENERIC_TYPES[key]
[ "def", "_make_generic", "(", "of", ",", "coll", ")", ":", "assert", "(", "issubclass", "(", "coll", ",", "Collection", ")", ")", "key", "=", "(", "coll", ".", "__name__", ",", "\"%s.%s\"", "%", "(", "of", ".", "__module__", ",", "of", ".", "__name__", ")", ")", "if", "key", "in", "GENERIC_TYPES", ":", "if", "GENERIC_TYPES", "[", "key", "]", ".", "itemtype", "!=", "of", ":", "raise", "exc", ".", "PropertyNotUnique", "(", "key", "=", "key", ")", "else", ":", "# oh, we get to name it? Goodie!", "generic_name", "=", "\"%s%s\"", "%", "(", "of", ".", "__name__", ",", "coll", ".", "suffix", ")", "GENERIC_TYPES", "[", "key", "]", "=", "type", "(", "generic_name", ",", "(", "coll", ",", "_Generic", ")", ",", "dict", "(", "itemtype", "=", "of", ",", "generic_key", "=", "key", ")", ")", "mod", "=", "sys", ".", "modules", "[", "of", ".", "__module__", "]", "if", "not", "hasattr", "(", "mod", ",", "generic_name", ")", ":", "setattr", "(", "mod", ",", "generic_name", ",", "GENERIC_TYPES", "[", "key", "]", ")", "return", "GENERIC_TYPES", "[", "key", "]" ]
Used to make a new Collection type, without that type having to be defined explicitly. Generates a new type name using the item type and a 'suffix' Collection class property. args: ``of=``\ *Record type* The type of values of the collection ``coll=``\ *Collection sub-class* The container class.
[ "Used", "to", "make", "a", "new", "Collection", "type", "without", "that", "type", "having", "to", "be", "defined", "explicitly", ".", "Generates", "a", "new", "type", "name", "using", "the", "item", "type", "and", "a", "suffix", "Collection", "class", "property", "." ]
python
train
delph-in/pydelphin
delphin/itsdb.py
https://github.com/delph-in/pydelphin/blob/7bd2cd63ab7cf74803e1d6547b9ebc014b382abd/delphin/itsdb.py#L324-L331
def from_file(cls, source): """Instantiate Relations from a relations file.""" if hasattr(source, 'read'): relations = cls.from_string(source.read()) else: with open(source) as f: relations = cls.from_string(f.read()) return relations
[ "def", "from_file", "(", "cls", ",", "source", ")", ":", "if", "hasattr", "(", "source", ",", "'read'", ")", ":", "relations", "=", "cls", ".", "from_string", "(", "source", ".", "read", "(", ")", ")", "else", ":", "with", "open", "(", "source", ")", "as", "f", ":", "relations", "=", "cls", ".", "from_string", "(", "f", ".", "read", "(", ")", ")", "return", "relations" ]
Instantiate Relations from a relations file.
[ "Instantiate", "Relations", "from", "a", "relations", "file", "." ]
python
train
crdoconnor/pathquery
hitch/key.py
https://github.com/crdoconnor/pathquery/blob/4905fef27fc666ea4511eb0eee5098f754bb52ed/hitch/key.py#L291-L302
def regression(): """ Run regression testing - lint and then run all tests. """ # HACK: Start using hitchbuildpy to get around this. Command("touch", DIR.project.joinpath("pathquery", "__init__.py").abspath()).run() storybook = _storybook({}).only_uninherited() #storybook.with_params(**{"python version": "2.7.10"})\ #.ordered_by_name().play() Command("touch", DIR.project.joinpath("pathquery", "__init__.py").abspath()).run() storybook.with_params(**{"python version": "3.5.0"}).ordered_by_name().play() lint()
[ "def", "regression", "(", ")", ":", "# HACK: Start using hitchbuildpy to get around this.", "Command", "(", "\"touch\"", ",", "DIR", ".", "project", ".", "joinpath", "(", "\"pathquery\"", ",", "\"__init__.py\"", ")", ".", "abspath", "(", ")", ")", ".", "run", "(", ")", "storybook", "=", "_storybook", "(", "{", "}", ")", ".", "only_uninherited", "(", ")", "#storybook.with_params(**{\"python version\": \"2.7.10\"})\\", "#.ordered_by_name().play()", "Command", "(", "\"touch\"", ",", "DIR", ".", "project", ".", "joinpath", "(", "\"pathquery\"", ",", "\"__init__.py\"", ")", ".", "abspath", "(", ")", ")", ".", "run", "(", ")", "storybook", ".", "with_params", "(", "*", "*", "{", "\"python version\"", ":", "\"3.5.0\"", "}", ")", ".", "ordered_by_name", "(", ")", ".", "play", "(", ")", "lint", "(", ")" ]
Run regression testing - lint and then run all tests.
[ "Run", "regression", "testing", "-", "lint", "and", "then", "run", "all", "tests", "." ]
python
train
saltstack/salt
salt/modules/netbox.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/netbox.py#L259-L283
def create_device_role(role, color): ''' .. versionadded:: 2019.2.0 Create a device role role String of device role, e.g., ``router`` CLI Example: .. code-block:: bash salt myminion netbox.create_device_role router ''' nb_role = get_('dcim', 'device-roles', name=role) if nb_role: return False else: payload = {'name': role, 'slug': slugify(role), 'color': color} role = _add('dcim', 'device-roles', payload) if role: return{'dcim': {'device-roles': payload}} else: return False
[ "def", "create_device_role", "(", "role", ",", "color", ")", ":", "nb_role", "=", "get_", "(", "'dcim'", ",", "'device-roles'", ",", "name", "=", "role", ")", "if", "nb_role", ":", "return", "False", "else", ":", "payload", "=", "{", "'name'", ":", "role", ",", "'slug'", ":", "slugify", "(", "role", ")", ",", "'color'", ":", "color", "}", "role", "=", "_add", "(", "'dcim'", ",", "'device-roles'", ",", "payload", ")", "if", "role", ":", "return", "{", "'dcim'", ":", "{", "'device-roles'", ":", "payload", "}", "}", "else", ":", "return", "False" ]
.. versionadded:: 2019.2.0 Create a device role role String of device role, e.g., ``router`` CLI Example: .. code-block:: bash salt myminion netbox.create_device_role router
[ "..", "versionadded", "::", "2019", ".", "2", ".", "0" ]
python
train
GibbsConsulting/django-plotly-dash
django_plotly_dash/dash_wrapper.py
https://github.com/GibbsConsulting/django-plotly-dash/blob/773ed081fc2ea3cc7607590322a14686a7a79bc5/django_plotly_dash/dash_wrapper.py#L433-L436
def _fix_callback_item(self, item): 'Update component identifier' item.component_id = self._fix_id(item.component_id) return item
[ "def", "_fix_callback_item", "(", "self", ",", "item", ")", ":", "item", ".", "component_id", "=", "self", ".", "_fix_id", "(", "item", ".", "component_id", ")", "return", "item" ]
Update component identifier
[ "Update", "component", "identifier" ]
python
train
BD2KOnFHIR/fhirtordf
fhirtordf/fhir/picklejar.py
https://github.com/BD2KOnFHIR/fhirtordf/blob/f97b3df683fa4caacf5cf4f29699ab060bcc0fbf/fhirtordf/fhir/picklejar.py#L107-L121
def get(self, name: str, sig: Tuple) -> Optional[object]: """ Return the object representing name if it is cached :param name: name of object :param sig: unique signature of object :return: object if exists and signature matches """ if name not in self._cache: return None if self._cache[name].sig != sig: del self._cache[name] self._update() return None with open(self._cache[name].loc, 'rb') as f: return pickle.load(f)
[ "def", "get", "(", "self", ",", "name", ":", "str", ",", "sig", ":", "Tuple", ")", "->", "Optional", "[", "object", "]", ":", "if", "name", "not", "in", "self", ".", "_cache", ":", "return", "None", "if", "self", ".", "_cache", "[", "name", "]", ".", "sig", "!=", "sig", ":", "del", "self", ".", "_cache", "[", "name", "]", "self", ".", "_update", "(", ")", "return", "None", "with", "open", "(", "self", ".", "_cache", "[", "name", "]", ".", "loc", ",", "'rb'", ")", "as", "f", ":", "return", "pickle", ".", "load", "(", "f", ")" ]
Return the object representing name if it is cached :param name: name of object :param sig: unique signature of object :return: object if exists and signature matches
[ "Return", "the", "object", "representing", "name", "if", "it", "is", "cached", ":", "param", "name", ":", "name", "of", "object", ":", "param", "sig", ":", "unique", "signature", "of", "object", ":", "return", ":", "object", "if", "exists", "and", "signature", "matches" ]
python
train
deepmind/sonnet
sonnet/python/modules/conv.py
https://github.com/deepmind/sonnet/blob/00612ca3178964d86b556e062694d808ff81fcca/sonnet/python/modules/conv.py#L994-L1093
def _build(self, inputs): """Connects the _ConvNDTranspose module into the graph. If this is not the first time the module has been connected to the graph, the input Tensor provided here must have the same final N dimensions, in order for the existing variables to be the correct size for the multiplication. The batch size may differ for each connection. Args: inputs: A Tensor of shape `data_format` and of type `tf.float16`, `tf.bfloat16` or `tf.float32`. Returns: A Tensor of shape `data_format` and of type `tf.float16`, `tf.bfloat16` or `tf.float32`. Raises: ValueError: If connecting the module into the graph any time after the first time and the inferred size of the input does not match previous invocations. base.IncompatibleShapeError: If the input tensor has the wrong number of dimensions. base.UnderspecifiedError: If the channel dimension of `inputs` isn't defined. base.IncompatibleShapeError: If `output_shape` is an iterable and is not in the format `(out_height, out_width)`. TypeError: If input Tensor dtype is not compatible with either `tf.float16`, `tf.bfloat16` or `tf.float32`. """ _verify_inputs(inputs, self._channel_index, self._data_format) self._input_shape = tuple(inputs.get_shape().as_list()) self._input_channels = self._input_shape[self._channel_index] # First, figure out what the non-(N,C) dims will be. if self._use_default_output_shape: def _default_transpose_size_wrapper(): if self._data_format.startswith("NC"): input_size = self._input_shape[2:] stride = self.stride[2:] else: # self._data_format == N*WC input_size = self._input_shape[1:-1] stride = self.stride[1:-1] return _default_transpose_size(input_size, stride, kernel_shape=self._kernel_shape, padding=self._padding) self._output_shape = _default_transpose_size_wrapper if len(self.output_shape) != self._n: raise base.IncompatibleShapeError( "Output shape must have rank {}, but instead was {}".format( self._n, len(self.output_shape))) # Now, construct the size of the output, including the N + C dims. output_shape = self._infer_all_output_dims(inputs) self._w = self._construct_w(inputs) if self._n == 1: # Add a dimension for the height. if self._data_format == DATA_FORMAT_NWC: h_dim = 1 two_dim_conv_data_format = DATA_FORMAT_NHWC else: # self._data_format == DATA_FORMAT_NCW h_dim = 2 two_dim_conv_data_format = DATA_FORMAT_NCHW inputs = tf.expand_dims(inputs, h_dim) two_dim_conv_stride = self.stride[:h_dim] + (1,) + self.stride[h_dim:] outputs = tf.nn.conv2d_transpose(inputs, self._w, output_shape, strides=two_dim_conv_stride, padding=self._padding, data_format=two_dim_conv_data_format) # Remove the height dimension to return a 3D tensor. outputs = tf.squeeze(outputs, [h_dim]) elif self._n == 2: outputs = tf.nn.conv2d_transpose(inputs, self._w, output_shape, strides=self._stride, padding=self._padding, data_format=self._data_format) else: outputs = tf.nn.conv3d_transpose(inputs, self._w, output_shape, strides=self._stride, padding=self._padding, data_format=self._data_format) if self._use_bias: self._b, outputs = _apply_bias( inputs, outputs, self._channel_index, self._data_format, self._output_channels, self._initializers, self._partitioners, self._regularizers) outputs = self._recover_shape_information(inputs, outputs) return outputs
[ "def", "_build", "(", "self", ",", "inputs", ")", ":", "_verify_inputs", "(", "inputs", ",", "self", ".", "_channel_index", ",", "self", ".", "_data_format", ")", "self", ".", "_input_shape", "=", "tuple", "(", "inputs", ".", "get_shape", "(", ")", ".", "as_list", "(", ")", ")", "self", ".", "_input_channels", "=", "self", ".", "_input_shape", "[", "self", ".", "_channel_index", "]", "# First, figure out what the non-(N,C) dims will be.", "if", "self", ".", "_use_default_output_shape", ":", "def", "_default_transpose_size_wrapper", "(", ")", ":", "if", "self", ".", "_data_format", ".", "startswith", "(", "\"NC\"", ")", ":", "input_size", "=", "self", ".", "_input_shape", "[", "2", ":", "]", "stride", "=", "self", ".", "stride", "[", "2", ":", "]", "else", ":", "# self._data_format == N*WC", "input_size", "=", "self", ".", "_input_shape", "[", "1", ":", "-", "1", "]", "stride", "=", "self", ".", "stride", "[", "1", ":", "-", "1", "]", "return", "_default_transpose_size", "(", "input_size", ",", "stride", ",", "kernel_shape", "=", "self", ".", "_kernel_shape", ",", "padding", "=", "self", ".", "_padding", ")", "self", ".", "_output_shape", "=", "_default_transpose_size_wrapper", "if", "len", "(", "self", ".", "output_shape", ")", "!=", "self", ".", "_n", ":", "raise", "base", ".", "IncompatibleShapeError", "(", "\"Output shape must have rank {}, but instead was {}\"", ".", "format", "(", "self", ".", "_n", ",", "len", "(", "self", ".", "output_shape", ")", ")", ")", "# Now, construct the size of the output, including the N + C dims.", "output_shape", "=", "self", ".", "_infer_all_output_dims", "(", "inputs", ")", "self", ".", "_w", "=", "self", ".", "_construct_w", "(", "inputs", ")", "if", "self", ".", "_n", "==", "1", ":", "# Add a dimension for the height.", "if", "self", ".", "_data_format", "==", "DATA_FORMAT_NWC", ":", "h_dim", "=", "1", "two_dim_conv_data_format", "=", "DATA_FORMAT_NHWC", "else", ":", "# self._data_format == DATA_FORMAT_NCW", "h_dim", "=", "2", "two_dim_conv_data_format", "=", "DATA_FORMAT_NCHW", "inputs", "=", "tf", ".", "expand_dims", "(", "inputs", ",", "h_dim", ")", "two_dim_conv_stride", "=", "self", ".", "stride", "[", ":", "h_dim", "]", "+", "(", "1", ",", ")", "+", "self", ".", "stride", "[", "h_dim", ":", "]", "outputs", "=", "tf", ".", "nn", ".", "conv2d_transpose", "(", "inputs", ",", "self", ".", "_w", ",", "output_shape", ",", "strides", "=", "two_dim_conv_stride", ",", "padding", "=", "self", ".", "_padding", ",", "data_format", "=", "two_dim_conv_data_format", ")", "# Remove the height dimension to return a 3D tensor.", "outputs", "=", "tf", ".", "squeeze", "(", "outputs", ",", "[", "h_dim", "]", ")", "elif", "self", ".", "_n", "==", "2", ":", "outputs", "=", "tf", ".", "nn", ".", "conv2d_transpose", "(", "inputs", ",", "self", ".", "_w", ",", "output_shape", ",", "strides", "=", "self", ".", "_stride", ",", "padding", "=", "self", ".", "_padding", ",", "data_format", "=", "self", ".", "_data_format", ")", "else", ":", "outputs", "=", "tf", ".", "nn", ".", "conv3d_transpose", "(", "inputs", ",", "self", ".", "_w", ",", "output_shape", ",", "strides", "=", "self", ".", "_stride", ",", "padding", "=", "self", ".", "_padding", ",", "data_format", "=", "self", ".", "_data_format", ")", "if", "self", ".", "_use_bias", ":", "self", ".", "_b", ",", "outputs", "=", "_apply_bias", "(", "inputs", ",", "outputs", ",", "self", ".", "_channel_index", ",", "self", ".", "_data_format", ",", "self", ".", "_output_channels", ",", "self", ".", "_initializers", ",", "self", ".", "_partitioners", ",", "self", ".", "_regularizers", ")", "outputs", "=", "self", ".", "_recover_shape_information", "(", "inputs", ",", "outputs", ")", "return", "outputs" ]
Connects the _ConvNDTranspose module into the graph. If this is not the first time the module has been connected to the graph, the input Tensor provided here must have the same final N dimensions, in order for the existing variables to be the correct size for the multiplication. The batch size may differ for each connection. Args: inputs: A Tensor of shape `data_format` and of type `tf.float16`, `tf.bfloat16` or `tf.float32`. Returns: A Tensor of shape `data_format` and of type `tf.float16`, `tf.bfloat16` or `tf.float32`. Raises: ValueError: If connecting the module into the graph any time after the first time and the inferred size of the input does not match previous invocations. base.IncompatibleShapeError: If the input tensor has the wrong number of dimensions. base.UnderspecifiedError: If the channel dimension of `inputs` isn't defined. base.IncompatibleShapeError: If `output_shape` is an iterable and is not in the format `(out_height, out_width)`. TypeError: If input Tensor dtype is not compatible with either `tf.float16`, `tf.bfloat16` or `tf.float32`.
[ "Connects", "the", "_ConvNDTranspose", "module", "into", "the", "graph", "." ]
python
train
fabioz/PyDev.Debugger
third_party/pep8/lib2to3/lib2to3/refactor.py
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/third_party/pep8/lib2to3/lib2to3/refactor.py#L303-L321
def refactor_dir(self, dir_name, write=False, doctests_only=False): """Descends down a directory and refactor every Python file found. Python files are assumed to have a .py extension. Files and subdirectories starting with '.' are skipped. """ py_ext = os.extsep + "py" for dirpath, dirnames, filenames in os.walk(dir_name): self.log_debug("Descending into %s", dirpath) dirnames.sort() filenames.sort() for name in filenames: if (not name.startswith(".") and os.path.splitext(name)[1] == py_ext): fullname = os.path.join(dirpath, name) self.refactor_file(fullname, write, doctests_only) # Modify dirnames in-place to remove subdirs with leading dots dirnames[:] = [dn for dn in dirnames if not dn.startswith(".")]
[ "def", "refactor_dir", "(", "self", ",", "dir_name", ",", "write", "=", "False", ",", "doctests_only", "=", "False", ")", ":", "py_ext", "=", "os", ".", "extsep", "+", "\"py\"", "for", "dirpath", ",", "dirnames", ",", "filenames", "in", "os", ".", "walk", "(", "dir_name", ")", ":", "self", ".", "log_debug", "(", "\"Descending into %s\"", ",", "dirpath", ")", "dirnames", ".", "sort", "(", ")", "filenames", ".", "sort", "(", ")", "for", "name", "in", "filenames", ":", "if", "(", "not", "name", ".", "startswith", "(", "\".\"", ")", "and", "os", ".", "path", ".", "splitext", "(", "name", ")", "[", "1", "]", "==", "py_ext", ")", ":", "fullname", "=", "os", ".", "path", ".", "join", "(", "dirpath", ",", "name", ")", "self", ".", "refactor_file", "(", "fullname", ",", "write", ",", "doctests_only", ")", "# Modify dirnames in-place to remove subdirs with leading dots", "dirnames", "[", ":", "]", "=", "[", "dn", "for", "dn", "in", "dirnames", "if", "not", "dn", ".", "startswith", "(", "\".\"", ")", "]" ]
Descends down a directory and refactor every Python file found. Python files are assumed to have a .py extension. Files and subdirectories starting with '.' are skipped.
[ "Descends", "down", "a", "directory", "and", "refactor", "every", "Python", "file", "found", "." ]
python
train
sentinel-hub/eo-learn
mask/eolearn/mask/cloud_mask.py
https://github.com/sentinel-hub/eo-learn/blob/b8c390b9f553c561612fe9eb64e720611633a035/mask/eolearn/mask/cloud_mask.py#L145-L169
def _downscaling(self, hr_array, meta_info, interp='linear', smooth=True): """ Downscale existing array to resolution requested by cloud detector :param hr_array: High-resolution data array to be downscaled :param meta_info: Meta-info of eopatch :param interp: Interpolation method to be used in downscaling. Default is `'linear'` :param smooth: Apply Gaussian smoothing in spatial directions before downscaling. Sigma of kernel is estimated by rescaling factor. Default is `True` :return: Down-scaled array """ # Run cloud mask on full resolution if (self.cm_size_y is None) and (self.cm_size_x is None): return hr_array, None # Rescaling factor in spatial (height, width) dimensions rescale = self._get_rescale_factors(hr_array.shape[1:3], meta_info) if smooth: sigma = (0,) + tuple(int(1/x) for x in rescale) + (0,) hr_array = scipy.ndimage.gaussian_filter(hr_array, sigma) lr_array = scipy.ndimage.interpolation.zoom(hr_array, (1.0,) + rescale + (1.0,), order=INTERP_METHODS.index(interp), mode='nearest') return lr_array, rescale
[ "def", "_downscaling", "(", "self", ",", "hr_array", ",", "meta_info", ",", "interp", "=", "'linear'", ",", "smooth", "=", "True", ")", ":", "# Run cloud mask on full resolution", "if", "(", "self", ".", "cm_size_y", "is", "None", ")", "and", "(", "self", ".", "cm_size_x", "is", "None", ")", ":", "return", "hr_array", ",", "None", "# Rescaling factor in spatial (height, width) dimensions", "rescale", "=", "self", ".", "_get_rescale_factors", "(", "hr_array", ".", "shape", "[", "1", ":", "3", "]", ",", "meta_info", ")", "if", "smooth", ":", "sigma", "=", "(", "0", ",", ")", "+", "tuple", "(", "int", "(", "1", "/", "x", ")", "for", "x", "in", "rescale", ")", "+", "(", "0", ",", ")", "hr_array", "=", "scipy", ".", "ndimage", ".", "gaussian_filter", "(", "hr_array", ",", "sigma", ")", "lr_array", "=", "scipy", ".", "ndimage", ".", "interpolation", ".", "zoom", "(", "hr_array", ",", "(", "1.0", ",", ")", "+", "rescale", "+", "(", "1.0", ",", ")", ",", "order", "=", "INTERP_METHODS", ".", "index", "(", "interp", ")", ",", "mode", "=", "'nearest'", ")", "return", "lr_array", ",", "rescale" ]
Downscale existing array to resolution requested by cloud detector :param hr_array: High-resolution data array to be downscaled :param meta_info: Meta-info of eopatch :param interp: Interpolation method to be used in downscaling. Default is `'linear'` :param smooth: Apply Gaussian smoothing in spatial directions before downscaling. Sigma of kernel is estimated by rescaling factor. Default is `True` :return: Down-scaled array
[ "Downscale", "existing", "array", "to", "resolution", "requested", "by", "cloud", "detector" ]
python
train
shichaoji/json2df
build/lib/json2df/base.py
https://github.com/shichaoji/json2df/blob/0463d6b494ab636b3c654616cb6be3b1e09e61a0/build/lib/json2df/base.py#L100-L105
def load_data(self, path, *args, **kwargs): """see print instance.doc, e.g. cat=LoadFile(kind='excel') read how to use cat.load_data, exec: print (cat.doc)""" self.df = self._load(path,*args, **kwargs) self.series = self.df.iloc[:,0] print ("Success! file length: " +str(self.df.shape[0]))
[ "def", "load_data", "(", "self", ",", "path", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "self", ".", "df", "=", "self", ".", "_load", "(", "path", ",", "*", "args", ",", "*", "*", "kwargs", ")", "self", ".", "series", "=", "self", ".", "df", ".", "iloc", "[", ":", ",", "0", "]", "print", "(", "\"Success! file length: \"", "+", "str", "(", "self", ".", "df", ".", "shape", "[", "0", "]", ")", ")" ]
see print instance.doc, e.g. cat=LoadFile(kind='excel') read how to use cat.load_data, exec: print (cat.doc)
[ "see", "print", "instance", ".", "doc", "e", ".", "g", ".", "cat", "=", "LoadFile", "(", "kind", "=", "excel", ")", "read", "how", "to", "use", "cat", ".", "load_data", "exec", ":", "print", "(", "cat", ".", "doc", ")" ]
python
train
bitcraze/crazyflie-lib-python
cflib/drivers/crazyradio.py
https://github.com/bitcraze/crazyflie-lib-python/blob/f6ebb4eb315bbe6e02db518936ac17fb615b2af8/cflib/drivers/crazyradio.py#L191-L198
def set_address(self, address): """ Set the radio address to be used""" if len(address) != 5: raise Exception('Crazyradio: the radio address shall be 5' ' bytes long') if address != self.current_address: _send_vendor_setup(self.handle, SET_RADIO_ADDRESS, 0, 0, address) self.current_address = address
[ "def", "set_address", "(", "self", ",", "address", ")", ":", "if", "len", "(", "address", ")", "!=", "5", ":", "raise", "Exception", "(", "'Crazyradio: the radio address shall be 5'", "' bytes long'", ")", "if", "address", "!=", "self", ".", "current_address", ":", "_send_vendor_setup", "(", "self", ".", "handle", ",", "SET_RADIO_ADDRESS", ",", "0", ",", "0", ",", "address", ")", "self", ".", "current_address", "=", "address" ]
Set the radio address to be used
[ "Set", "the", "radio", "address", "to", "be", "used" ]
python
train
slightlynybbled/tk_tools
tk_tools/canvas.py
https://github.com/slightlynybbled/tk_tools/blob/7c1792cad42890251a34f0617ce9b4b3e7abcf50/tk_tools/canvas.py#L43-L54
def to_absolute(self, x, y): """ Converts coordinates provided with reference to the center \ of the canvas (0, 0) to absolute coordinates which are used \ by the canvas object in which (0, 0) is located in the top \ left of the object. :param x: x value in pixels :param y: x value in pixels :return: None """ return x + self.size/2, y + self.size/2
[ "def", "to_absolute", "(", "self", ",", "x", ",", "y", ")", ":", "return", "x", "+", "self", ".", "size", "/", "2", ",", "y", "+", "self", ".", "size", "/", "2" ]
Converts coordinates provided with reference to the center \ of the canvas (0, 0) to absolute coordinates which are used \ by the canvas object in which (0, 0) is located in the top \ left of the object. :param x: x value in pixels :param y: x value in pixels :return: None
[ "Converts", "coordinates", "provided", "with", "reference", "to", "the", "center", "\\", "of", "the", "canvas", "(", "0", "0", ")", "to", "absolute", "coordinates", "which", "are", "used", "\\", "by", "the", "canvas", "object", "in", "which", "(", "0", "0", ")", "is", "located", "in", "the", "top", "\\", "left", "of", "the", "object", "." ]
python
train
gwastro/pycbc
pycbc/io/record.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/io/record.py#L973-L992
def add_functions(self, names, functions): """Adds the given functions to the function library. Functions are added to this instance of the array; all copies of and slices of this array will also have the new functions included. Parameters ---------- names : (list of) string(s) Name or list of names of the functions. functions : (list of) function(s) The function(s) to call. """ if isinstance(names, string_types): names = [names] functions = [functions] if len(functions) != len(names): raise ValueError("number of provided names must be same as number " "of functions") self._functionlib.update(dict(zip(names, functions)))
[ "def", "add_functions", "(", "self", ",", "names", ",", "functions", ")", ":", "if", "isinstance", "(", "names", ",", "string_types", ")", ":", "names", "=", "[", "names", "]", "functions", "=", "[", "functions", "]", "if", "len", "(", "functions", ")", "!=", "len", "(", "names", ")", ":", "raise", "ValueError", "(", "\"number of provided names must be same as number \"", "\"of functions\"", ")", "self", ".", "_functionlib", ".", "update", "(", "dict", "(", "zip", "(", "names", ",", "functions", ")", ")", ")" ]
Adds the given functions to the function library. Functions are added to this instance of the array; all copies of and slices of this array will also have the new functions included. Parameters ---------- names : (list of) string(s) Name or list of names of the functions. functions : (list of) function(s) The function(s) to call.
[ "Adds", "the", "given", "functions", "to", "the", "function", "library", "." ]
python
train
trevisanj/a99
a99/gui/a_WBase.py
https://github.com/trevisanj/a99/blob/193e6e3c9b3e4f4a0ba7eb3eece846fe7045c539/a99/gui/a_WBase.py#L38-L40
def add_log_error(self, x, flag_also_show=False, E=None): """Delegates to parent form""" self.parent_form.add_log_error(x, flag_also_show, E)
[ "def", "add_log_error", "(", "self", ",", "x", ",", "flag_also_show", "=", "False", ",", "E", "=", "None", ")", ":", "self", ".", "parent_form", ".", "add_log_error", "(", "x", ",", "flag_also_show", ",", "E", ")" ]
Delegates to parent form
[ "Delegates", "to", "parent", "form" ]
python
train
pri22296/beautifultable
beautifultable/beautifultable.py
https://github.com/pri22296/beautifultable/blob/c9638f73dff4bb1f341c9ee783e4e47f26efba0b/beautifultable/beautifultable.py#L1049-L1110
def _get_horizontal_line(self, char, intersect_left, intersect_mid, intersect_right): """Get a horizontal line for the table. Internal method used to actually get all horizontal lines in the table. Column width should be set prior to calling this method. This method detects intersection and handles it according to the values of `intersect_*_*` attributes. Parameters ---------- char : str Character used to draw the line. Returns ------- str String which will be printed as the Top border of the table. """ width = self.get_table_width() try: line = list(char * (int(width/termwidth(char)) + 1))[:width] except ZeroDivisionError: line = [' '] * width if len(line) == 0: return '' # Only if Special Intersection is enabled and horizontal line is # visible if not char.isspace(): # If left border is enabled and it is visible visible_junc = not intersect_left.isspace() if termwidth(self.left_border_char) > 0: if not (self.left_border_char.isspace() and visible_junc): length = min(termwidth(self.left_border_char), termwidth(intersect_left)) for i in range(length): line[i] = intersect_left[i] visible_junc = not intersect_right.isspace() # If right border is enabled and it is visible if termwidth(self.right_border_char) > 0: if not (self.right_border_char.isspace() and visible_junc): length = min(termwidth(self.right_border_char), termwidth(intersect_right)) for i in range(length): line[-i-1] = intersect_right[-i-1] visible_junc = not intersect_mid.isspace() # If column separator is enabled and it is visible if termwidth(self.column_separator_char): if not (self.column_separator_char.isspace() and visible_junc): index = termwidth(self.left_border_char) for i in range(self._column_count-1): index += (self._column_widths[i]) length = min(termwidth(self.column_separator_char), termwidth(intersect_mid)) for i in range(length): line[index+i] = intersect_mid[i] index += termwidth(self.column_separator_char) return ''.join(line)
[ "def", "_get_horizontal_line", "(", "self", ",", "char", ",", "intersect_left", ",", "intersect_mid", ",", "intersect_right", ")", ":", "width", "=", "self", ".", "get_table_width", "(", ")", "try", ":", "line", "=", "list", "(", "char", "*", "(", "int", "(", "width", "/", "termwidth", "(", "char", ")", ")", "+", "1", ")", ")", "[", ":", "width", "]", "except", "ZeroDivisionError", ":", "line", "=", "[", "' '", "]", "*", "width", "if", "len", "(", "line", ")", "==", "0", ":", "return", "''", "# Only if Special Intersection is enabled and horizontal line is", "# visible", "if", "not", "char", ".", "isspace", "(", ")", ":", "# If left border is enabled and it is visible", "visible_junc", "=", "not", "intersect_left", ".", "isspace", "(", ")", "if", "termwidth", "(", "self", ".", "left_border_char", ")", ">", "0", ":", "if", "not", "(", "self", ".", "left_border_char", ".", "isspace", "(", ")", "and", "visible_junc", ")", ":", "length", "=", "min", "(", "termwidth", "(", "self", ".", "left_border_char", ")", ",", "termwidth", "(", "intersect_left", ")", ")", "for", "i", "in", "range", "(", "length", ")", ":", "line", "[", "i", "]", "=", "intersect_left", "[", "i", "]", "visible_junc", "=", "not", "intersect_right", ".", "isspace", "(", ")", "# If right border is enabled and it is visible", "if", "termwidth", "(", "self", ".", "right_border_char", ")", ">", "0", ":", "if", "not", "(", "self", ".", "right_border_char", ".", "isspace", "(", ")", "and", "visible_junc", ")", ":", "length", "=", "min", "(", "termwidth", "(", "self", ".", "right_border_char", ")", ",", "termwidth", "(", "intersect_right", ")", ")", "for", "i", "in", "range", "(", "length", ")", ":", "line", "[", "-", "i", "-", "1", "]", "=", "intersect_right", "[", "-", "i", "-", "1", "]", "visible_junc", "=", "not", "intersect_mid", ".", "isspace", "(", ")", "# If column separator is enabled and it is visible", "if", "termwidth", "(", "self", ".", "column_separator_char", ")", ":", "if", "not", "(", "self", ".", "column_separator_char", ".", "isspace", "(", ")", "and", "visible_junc", ")", ":", "index", "=", "termwidth", "(", "self", ".", "left_border_char", ")", "for", "i", "in", "range", "(", "self", ".", "_column_count", "-", "1", ")", ":", "index", "+=", "(", "self", ".", "_column_widths", "[", "i", "]", ")", "length", "=", "min", "(", "termwidth", "(", "self", ".", "column_separator_char", ")", ",", "termwidth", "(", "intersect_mid", ")", ")", "for", "i", "in", "range", "(", "length", ")", ":", "line", "[", "index", "+", "i", "]", "=", "intersect_mid", "[", "i", "]", "index", "+=", "termwidth", "(", "self", ".", "column_separator_char", ")", "return", "''", ".", "join", "(", "line", ")" ]
Get a horizontal line for the table. Internal method used to actually get all horizontal lines in the table. Column width should be set prior to calling this method. This method detects intersection and handles it according to the values of `intersect_*_*` attributes. Parameters ---------- char : str Character used to draw the line. Returns ------- str String which will be printed as the Top border of the table.
[ "Get", "a", "horizontal", "line", "for", "the", "table", "." ]
python
train
quandyfactory/dicttoxml
dicttoxml.py
https://github.com/quandyfactory/dicttoxml/blob/2016fe9817ad03b26aa5f1a475f5b79ad6757b96/dicttoxml.py#L342-L353
def convert_bool(key, val, attr_type, attr={}, cdata=False): """Converts a boolean into an XML element""" LOG.info('Inside convert_bool(): key="%s", val="%s", type(val) is: "%s"' % ( unicode_me(key), unicode_me(val), type(val).__name__) ) key, attr = make_valid_xml_name(key, attr) if attr_type: attr['type'] = get_xml_type(val) attrstring = make_attrstring(attr) return '<%s%s>%s</%s>' % (key, attrstring, unicode(val).lower(), key)
[ "def", "convert_bool", "(", "key", ",", "val", ",", "attr_type", ",", "attr", "=", "{", "}", ",", "cdata", "=", "False", ")", ":", "LOG", ".", "info", "(", "'Inside convert_bool(): key=\"%s\", val=\"%s\", type(val) is: \"%s\"'", "%", "(", "unicode_me", "(", "key", ")", ",", "unicode_me", "(", "val", ")", ",", "type", "(", "val", ")", ".", "__name__", ")", ")", "key", ",", "attr", "=", "make_valid_xml_name", "(", "key", ",", "attr", ")", "if", "attr_type", ":", "attr", "[", "'type'", "]", "=", "get_xml_type", "(", "val", ")", "attrstring", "=", "make_attrstring", "(", "attr", ")", "return", "'<%s%s>%s</%s>'", "%", "(", "key", ",", "attrstring", ",", "unicode", "(", "val", ")", ".", "lower", "(", ")", ",", "key", ")" ]
Converts a boolean into an XML element
[ "Converts", "a", "boolean", "into", "an", "XML", "element" ]
python
train
FNNDSC/pftree
pftree/pftree.py
https://github.com/FNNDSC/pftree/blob/b841e337c976bce151735f9d5dd95eded62aa094/pftree/pftree.py#L164-L182
def walklevel(path, depth = -1, **kwargs): """It works just like os.walk, but you can pass it a level parameter that indicates how deep the recursion will go. If depth is -1 (or less than 0), the full depth is walked. """ # if depth is negative, just walk if depth < 0: for root, dirs, files in os.walk(path, **kwargs): yield root, dirs, files # path.count works because is a file has a "/" it will show up in the list # as a ":" path = path.rstrip(os.path.sep) num_sep = path.count(os.path.sep) for root, dirs, files in os.walk(path, **kwargs): yield root, dirs, files num_sep_this = root.count(os.path.sep) if num_sep + depth <= num_sep_this: del dirs[:]
[ "def", "walklevel", "(", "path", ",", "depth", "=", "-", "1", ",", "*", "*", "kwargs", ")", ":", "# if depth is negative, just walk", "if", "depth", "<", "0", ":", "for", "root", ",", "dirs", ",", "files", "in", "os", ".", "walk", "(", "path", ",", "*", "*", "kwargs", ")", ":", "yield", "root", ",", "dirs", ",", "files", "# path.count works because is a file has a \"/\" it will show up in the list", "# as a \":\"", "path", "=", "path", ".", "rstrip", "(", "os", ".", "path", ".", "sep", ")", "num_sep", "=", "path", ".", "count", "(", "os", ".", "path", ".", "sep", ")", "for", "root", ",", "dirs", ",", "files", "in", "os", ".", "walk", "(", "path", ",", "*", "*", "kwargs", ")", ":", "yield", "root", ",", "dirs", ",", "files", "num_sep_this", "=", "root", ".", "count", "(", "os", ".", "path", ".", "sep", ")", "if", "num_sep", "+", "depth", "<=", "num_sep_this", ":", "del", "dirs", "[", ":", "]" ]
It works just like os.walk, but you can pass it a level parameter that indicates how deep the recursion will go. If depth is -1 (or less than 0), the full depth is walked.
[ "It", "works", "just", "like", "os", ".", "walk", "but", "you", "can", "pass", "it", "a", "level", "parameter", "that", "indicates", "how", "deep", "the", "recursion", "will", "go", ".", "If", "depth", "is", "-", "1", "(", "or", "less", "than", "0", ")", "the", "full", "depth", "is", "walked", "." ]
python
train
Erotemic/utool
utool/util_dev.py
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_dev.py#L1586-L1607
def _memory_profile(with_gc=False): """ Helper for memory debugging. Mostly just a namespace where I experiment with guppy and heapy. References: http://stackoverflow.com/questions/2629680/deciding-between-subprocess-multiprocessing-and-thread-in-python Reset Numpy Memory:: %reset out %reset array """ import utool as ut if with_gc: garbage_collect() import guppy hp = guppy.hpy() print('[hpy] Waiting for heap output...') heap_output = hp.heap() print(heap_output) print('[hpy] total heap size: ' + ut.byte_str2(heap_output.size)) ut.util_resources.memstats()
[ "def", "_memory_profile", "(", "with_gc", "=", "False", ")", ":", "import", "utool", "as", "ut", "if", "with_gc", ":", "garbage_collect", "(", ")", "import", "guppy", "hp", "=", "guppy", ".", "hpy", "(", ")", "print", "(", "'[hpy] Waiting for heap output...'", ")", "heap_output", "=", "hp", ".", "heap", "(", ")", "print", "(", "heap_output", ")", "print", "(", "'[hpy] total heap size: '", "+", "ut", ".", "byte_str2", "(", "heap_output", ".", "size", ")", ")", "ut", ".", "util_resources", ".", "memstats", "(", ")" ]
Helper for memory debugging. Mostly just a namespace where I experiment with guppy and heapy. References: http://stackoverflow.com/questions/2629680/deciding-between-subprocess-multiprocessing-and-thread-in-python Reset Numpy Memory:: %reset out %reset array
[ "Helper", "for", "memory", "debugging", ".", "Mostly", "just", "a", "namespace", "where", "I", "experiment", "with", "guppy", "and", "heapy", "." ]
python
train
bunq/sdk_python
bunq/sdk/security.py
https://github.com/bunq/sdk_python/blob/da6c9b83e6d83ee8062617f53c6eb7293c0d863d/bunq/sdk/security.py#L274-L287
def _get_header_correctly_cased(header_name): """ :type header_name: str :rtype: str """ header_name = header_name.capitalize() matches = re.findall(_REGEX_FOR_LOWERCASE_HEADERS, header_name) for match in matches: header_name = (re.sub(match, match.upper(), header_name)) return header_name
[ "def", "_get_header_correctly_cased", "(", "header_name", ")", ":", "header_name", "=", "header_name", ".", "capitalize", "(", ")", "matches", "=", "re", ".", "findall", "(", "_REGEX_FOR_LOWERCASE_HEADERS", ",", "header_name", ")", "for", "match", "in", "matches", ":", "header_name", "=", "(", "re", ".", "sub", "(", "match", ",", "match", ".", "upper", "(", ")", ",", "header_name", ")", ")", "return", "header_name" ]
:type header_name: str :rtype: str
[ ":", "type", "header_name", ":", "str", ":", "rtype", ":", "str" ]
python
train
Parquery/sphinx-icontract
sphinx_icontract/__init__.py
https://github.com/Parquery/sphinx-icontract/blob/92918f23a8ea1873112e9b7446c64cd6f12ee04b/sphinx_icontract/__init__.py#L449-L467
def _format_function_contracts(func: Callable, prefix: Optional[str] = None) -> List[str]: """ Format the preconditions and postconditions of a function given its checker decorator. :param func: function whose contracts we are describing :param prefix: prefix to be prepended to the contract directives such as ``get`` or ``set`` :return: list of lines """ checker = icontract._checkers.find_checker(func=func) if checker is None: return [] pps = _preconditions_snapshots_postconditions(checker=checker) pre_block = _format_preconditions(preconditions=pps.preconditions, prefix=prefix) old_block = _format_snapshots(snapshots=pps.snapshots, prefix=prefix) post_block = _format_postconditions(postconditions=pps.postconditions, prefix=prefix) return pre_block + old_block + post_block
[ "def", "_format_function_contracts", "(", "func", ":", "Callable", ",", "prefix", ":", "Optional", "[", "str", "]", "=", "None", ")", "->", "List", "[", "str", "]", ":", "checker", "=", "icontract", ".", "_checkers", ".", "find_checker", "(", "func", "=", "func", ")", "if", "checker", "is", "None", ":", "return", "[", "]", "pps", "=", "_preconditions_snapshots_postconditions", "(", "checker", "=", "checker", ")", "pre_block", "=", "_format_preconditions", "(", "preconditions", "=", "pps", ".", "preconditions", ",", "prefix", "=", "prefix", ")", "old_block", "=", "_format_snapshots", "(", "snapshots", "=", "pps", ".", "snapshots", ",", "prefix", "=", "prefix", ")", "post_block", "=", "_format_postconditions", "(", "postconditions", "=", "pps", ".", "postconditions", ",", "prefix", "=", "prefix", ")", "return", "pre_block", "+", "old_block", "+", "post_block" ]
Format the preconditions and postconditions of a function given its checker decorator. :param func: function whose contracts we are describing :param prefix: prefix to be prepended to the contract directives such as ``get`` or ``set`` :return: list of lines
[ "Format", "the", "preconditions", "and", "postconditions", "of", "a", "function", "given", "its", "checker", "decorator", "." ]
python
train
saltstack/salt
salt/states/saltmod.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/saltmod.py#L1006-L1079
def wheel(name, **kwargs): ''' Execute a wheel module on the master .. versionadded:: 2014.7.0 name The name of the function to run kwargs Any keyword arguments to pass to the wheel function asynchronous Run the salt command but don't wait for a reply. .. versionadded:: neon .. code-block:: yaml accept_minion_key: salt.wheel: - name: key.accept - match: frank ''' ret = {'name': name, 'result': False, 'changes': {}, 'comment': ''} try: jid = __orchestration_jid__ except NameError: log.debug( 'Unable to fire args event due to missing __orchestration_jid__' ) jid = None if __opts__.get('test', False): ret['result'] = None, ret['changes'] = {} ret['comment'] = "Wheel function '{0}' would be executed.".format(name) return ret out = __salt__['saltutil.wheel'](name, __orchestration_jid__=jid, __env__=__env__, **kwargs) if kwargs.get('asynchronous'): ret['__jid__'] = ret.get('jid') ret['changes'] = out if int(out.get('jid', 0)) > 0: ret['result'] = True ret['comment'] = 'wheel submitted successfully.' else: ret['result'] = False ret['comment'] = 'wheel failed to run.' return ret wheel_return = out.get('return') if isinstance(wheel_return, dict) and 'Error' in wheel_return: out['success'] = False success = out.get('success', True) ret = {'name': name, 'changes': {'return': wheel_return}, 'result': success} ret['comment'] = "Wheel function '{0}' {1}.".format( name, 'executed' if success else 'failed', ) ret['__orchestration__'] = True if 'jid' in out: ret['__jid__'] = out['jid'] return ret
[ "def", "wheel", "(", "name", ",", "*", "*", "kwargs", ")", ":", "ret", "=", "{", "'name'", ":", "name", ",", "'result'", ":", "False", ",", "'changes'", ":", "{", "}", ",", "'comment'", ":", "''", "}", "try", ":", "jid", "=", "__orchestration_jid__", "except", "NameError", ":", "log", ".", "debug", "(", "'Unable to fire args event due to missing __orchestration_jid__'", ")", "jid", "=", "None", "if", "__opts__", ".", "get", "(", "'test'", ",", "False", ")", ":", "ret", "[", "'result'", "]", "=", "None", ",", "ret", "[", "'changes'", "]", "=", "{", "}", "ret", "[", "'comment'", "]", "=", "\"Wheel function '{0}' would be executed.\"", ".", "format", "(", "name", ")", "return", "ret", "out", "=", "__salt__", "[", "'saltutil.wheel'", "]", "(", "name", ",", "__orchestration_jid__", "=", "jid", ",", "__env__", "=", "__env__", ",", "*", "*", "kwargs", ")", "if", "kwargs", ".", "get", "(", "'asynchronous'", ")", ":", "ret", "[", "'__jid__'", "]", "=", "ret", ".", "get", "(", "'jid'", ")", "ret", "[", "'changes'", "]", "=", "out", "if", "int", "(", "out", ".", "get", "(", "'jid'", ",", "0", ")", ")", ">", "0", ":", "ret", "[", "'result'", "]", "=", "True", "ret", "[", "'comment'", "]", "=", "'wheel submitted successfully.'", "else", ":", "ret", "[", "'result'", "]", "=", "False", "ret", "[", "'comment'", "]", "=", "'wheel failed to run.'", "return", "ret", "wheel_return", "=", "out", ".", "get", "(", "'return'", ")", "if", "isinstance", "(", "wheel_return", ",", "dict", ")", "and", "'Error'", "in", "wheel_return", ":", "out", "[", "'success'", "]", "=", "False", "success", "=", "out", ".", "get", "(", "'success'", ",", "True", ")", "ret", "=", "{", "'name'", ":", "name", ",", "'changes'", ":", "{", "'return'", ":", "wheel_return", "}", ",", "'result'", ":", "success", "}", "ret", "[", "'comment'", "]", "=", "\"Wheel function '{0}' {1}.\"", ".", "format", "(", "name", ",", "'executed'", "if", "success", "else", "'failed'", ",", ")", "ret", "[", "'__orchestration__'", "]", "=", "True", "if", "'jid'", "in", "out", ":", "ret", "[", "'__jid__'", "]", "=", "out", "[", "'jid'", "]", "return", "ret" ]
Execute a wheel module on the master .. versionadded:: 2014.7.0 name The name of the function to run kwargs Any keyword arguments to pass to the wheel function asynchronous Run the salt command but don't wait for a reply. .. versionadded:: neon .. code-block:: yaml accept_minion_key: salt.wheel: - name: key.accept - match: frank
[ "Execute", "a", "wheel", "module", "on", "the", "master" ]
python
train
eaton-lab/toytree
toytree/etemini.py
https://github.com/eaton-lab/toytree/blob/0347ed2098acc5f707fadf52a0ecd411a6d1859c/toytree/etemini.py#L1789-L1807
def get_topology_id(self, attr="name"): """ Returns the unique ID representing the topology of the current tree. Two trees with the same topology will produce the same id. If trees are unrooted, make sure that the root node is not binary or use the tree.unroot() function before generating the topology id. This is useful to detect the number of unique topologies over a bunch of trees, without requiring full distance methods. The id is, by default, calculated based on the terminal node's names. Any other node attribute could be used instead. """ edge_keys = [] for s1, s2 in self.get_edges(): k1 = sorted([getattr(e, attr) for e in s1]) k2 = sorted([getattr(e, attr) for e in s2]) edge_keys.append(sorted([k1, k2])) return md5(str(sorted(edge_keys)).encode('utf-8')).hexdigest()
[ "def", "get_topology_id", "(", "self", ",", "attr", "=", "\"name\"", ")", ":", "edge_keys", "=", "[", "]", "for", "s1", ",", "s2", "in", "self", ".", "get_edges", "(", ")", ":", "k1", "=", "sorted", "(", "[", "getattr", "(", "e", ",", "attr", ")", "for", "e", "in", "s1", "]", ")", "k2", "=", "sorted", "(", "[", "getattr", "(", "e", ",", "attr", ")", "for", "e", "in", "s2", "]", ")", "edge_keys", ".", "append", "(", "sorted", "(", "[", "k1", ",", "k2", "]", ")", ")", "return", "md5", "(", "str", "(", "sorted", "(", "edge_keys", ")", ")", ".", "encode", "(", "'utf-8'", ")", ")", ".", "hexdigest", "(", ")" ]
Returns the unique ID representing the topology of the current tree. Two trees with the same topology will produce the same id. If trees are unrooted, make sure that the root node is not binary or use the tree.unroot() function before generating the topology id. This is useful to detect the number of unique topologies over a bunch of trees, without requiring full distance methods. The id is, by default, calculated based on the terminal node's names. Any other node attribute could be used instead.
[ "Returns", "the", "unique", "ID", "representing", "the", "topology", "of", "the", "current", "tree", ".", "Two", "trees", "with", "the", "same", "topology", "will", "produce", "the", "same", "id", ".", "If", "trees", "are", "unrooted", "make", "sure", "that", "the", "root", "node", "is", "not", "binary", "or", "use", "the", "tree", ".", "unroot", "()", "function", "before", "generating", "the", "topology", "id", "." ]
python
train
aequitas/python-rflink
rflink/parser.py
https://github.com/aequitas/python-rflink/blob/46759ce8daf95cfc7cdb608ae17bc5501be9f6d8/rflink/parser.py#L393-L427
def deserialize_packet_id(packet_id: str) -> dict: r"""Turn a packet id into individual packet components. >>> deserialize_packet_id('newkaku_000001_01') == { ... 'protocol': 'newkaku', ... 'id': '000001', ... 'switch': '01', ... } True >>> deserialize_packet_id('ikeakoppla_000080_0') == { ... 'protocol': 'ikea koppla', ... 'id': '000080', ... 'switch': '0', ... } True """ if packet_id == 'rflink': return {'protocol': UNKNOWN} protocol, *id_switch = packet_id.split(PACKET_ID_SEP) assert len(id_switch) < 3 packet_identifiers = { # lookup the reverse translation of the protocol in the translation # table, fallback to protocol. If this is a unserializable protocol # name, it has not been serialized before and is not in the # translate_protocols table this will result in an invalid command. 'protocol': protocol_translations.get(protocol, protocol), } if id_switch: packet_identifiers['id'] = id_switch[0] if len(id_switch) > 1: packet_identifiers['switch'] = id_switch[1] return packet_identifiers
[ "def", "deserialize_packet_id", "(", "packet_id", ":", "str", ")", "->", "dict", ":", "if", "packet_id", "==", "'rflink'", ":", "return", "{", "'protocol'", ":", "UNKNOWN", "}", "protocol", ",", "", "*", "id_switch", "=", "packet_id", ".", "split", "(", "PACKET_ID_SEP", ")", "assert", "len", "(", "id_switch", ")", "<", "3", "packet_identifiers", "=", "{", "# lookup the reverse translation of the protocol in the translation", "# table, fallback to protocol. If this is a unserializable protocol", "# name, it has not been serialized before and is not in the", "# translate_protocols table this will result in an invalid command.", "'protocol'", ":", "protocol_translations", ".", "get", "(", "protocol", ",", "protocol", ")", ",", "}", "if", "id_switch", ":", "packet_identifiers", "[", "'id'", "]", "=", "id_switch", "[", "0", "]", "if", "len", "(", "id_switch", ")", ">", "1", ":", "packet_identifiers", "[", "'switch'", "]", "=", "id_switch", "[", "1", "]", "return", "packet_identifiers" ]
r"""Turn a packet id into individual packet components. >>> deserialize_packet_id('newkaku_000001_01') == { ... 'protocol': 'newkaku', ... 'id': '000001', ... 'switch': '01', ... } True >>> deserialize_packet_id('ikeakoppla_000080_0') == { ... 'protocol': 'ikea koppla', ... 'id': '000080', ... 'switch': '0', ... } True
[ "r", "Turn", "a", "packet", "id", "into", "individual", "packet", "components", "." ]
python
train
Accelize/pycosio
pycosio/_core/functions_core.py
https://github.com/Accelize/pycosio/blob/1cc1f8fdf5394d92918b7bae2bfa682169ccc48c/pycosio/_core/functions_core.py#L53-L88
def equivalent_to(std_function): """ Decorates a cloud object compatible function to provides fall back to standard function if used on local files. Args: std_function (function): standard function to used with local files. Returns: function: new function """ def decorate(cos_function): """Decorator argument handler""" @wraps(cos_function) def decorated(path, *args, **kwargs): """Decorated function""" # Handles path-like objects path = fsdecode(path).replace('\\', '/') # Storage object: Handle with Cloud object storage # function if is_storage(path): with handle_os_exceptions(): return cos_function(path, *args, **kwargs) # Local file: Redirect to standard function return std_function(path, *args, **kwargs) return decorated return decorate
[ "def", "equivalent_to", "(", "std_function", ")", ":", "def", "decorate", "(", "cos_function", ")", ":", "\"\"\"Decorator argument handler\"\"\"", "@", "wraps", "(", "cos_function", ")", "def", "decorated", "(", "path", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "\"\"\"Decorated function\"\"\"", "# Handles path-like objects", "path", "=", "fsdecode", "(", "path", ")", ".", "replace", "(", "'\\\\'", ",", "'/'", ")", "# Storage object: Handle with Cloud object storage", "# function", "if", "is_storage", "(", "path", ")", ":", "with", "handle_os_exceptions", "(", ")", ":", "return", "cos_function", "(", "path", ",", "*", "args", ",", "*", "*", "kwargs", ")", "# Local file: Redirect to standard function", "return", "std_function", "(", "path", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "decorated", "return", "decorate" ]
Decorates a cloud object compatible function to provides fall back to standard function if used on local files. Args: std_function (function): standard function to used with local files. Returns: function: new function
[ "Decorates", "a", "cloud", "object", "compatible", "function", "to", "provides", "fall", "back", "to", "standard", "function", "if", "used", "on", "local", "files", "." ]
python
train
openstack/pyghmi
pyghmi/ipmi/private/util.py
https://github.com/openstack/pyghmi/blob/f710b1d30a8eed19a9e86f01f9351c737666f3e5/pyghmi/ipmi/private/util.py#L41-L52
def decode_wireformat_uuid(rawguid): """Decode a wire format UUID It handles the rather particular scheme where half is little endian and half is big endian. It returns a string like dmidecode would output. """ if isinstance(rawguid, list): rawguid = bytearray(rawguid) lebytes = struct.unpack_from('<IHH', buffer(rawguid[:8])) bebytes = struct.unpack_from('>HHI', buffer(rawguid[8:])) return '{0:08X}-{1:04X}-{2:04X}-{3:04X}-{4:04X}{5:08X}'.format( lebytes[0], lebytes[1], lebytes[2], bebytes[0], bebytes[1], bebytes[2])
[ "def", "decode_wireformat_uuid", "(", "rawguid", ")", ":", "if", "isinstance", "(", "rawguid", ",", "list", ")", ":", "rawguid", "=", "bytearray", "(", "rawguid", ")", "lebytes", "=", "struct", ".", "unpack_from", "(", "'<IHH'", ",", "buffer", "(", "rawguid", "[", ":", "8", "]", ")", ")", "bebytes", "=", "struct", ".", "unpack_from", "(", "'>HHI'", ",", "buffer", "(", "rawguid", "[", "8", ":", "]", ")", ")", "return", "'{0:08X}-{1:04X}-{2:04X}-{3:04X}-{4:04X}{5:08X}'", ".", "format", "(", "lebytes", "[", "0", "]", ",", "lebytes", "[", "1", "]", ",", "lebytes", "[", "2", "]", ",", "bebytes", "[", "0", "]", ",", "bebytes", "[", "1", "]", ",", "bebytes", "[", "2", "]", ")" ]
Decode a wire format UUID It handles the rather particular scheme where half is little endian and half is big endian. It returns a string like dmidecode would output.
[ "Decode", "a", "wire", "format", "UUID" ]
python
train
ibis-project/ibis
ibis/impala/udf.py
https://github.com/ibis-project/ibis/blob/1e39a5fd9ef088b45c155e8a5f541767ee8ef2e7/ibis/impala/udf.py#L169-L220
def wrap_uda( hdfs_file, inputs, output, update_fn, init_fn=None, merge_fn=None, finalize_fn=None, serialize_fn=None, close_fn=None, name=None, ): """ Creates a callable aggregation function object. Must be created in Impala to be used Parameters ---------- hdfs_file: .so file that contains relevant UDA inputs: list of strings denoting ibis datatypes output: string denoting ibis datatype update_fn: string Library symbol name for update function init_fn: string, optional Library symbol name for initialization function merge_fn: string, optional Library symbol name for merge function finalize_fn: string, optional Library symbol name for finalize function serialize_fn : string, optional Library symbol name for serialize UDA API function. Not required for all UDAs; see documentation for more. close_fn : string, optional name: string, optional Used internally to track function Returns ------- container : UDA object """ func = ImpalaUDA( inputs, output, update_fn, init_fn, merge_fn, finalize_fn, serialize_fn=serialize_fn, name=name, lib_path=hdfs_file, ) return func
[ "def", "wrap_uda", "(", "hdfs_file", ",", "inputs", ",", "output", ",", "update_fn", ",", "init_fn", "=", "None", ",", "merge_fn", "=", "None", ",", "finalize_fn", "=", "None", ",", "serialize_fn", "=", "None", ",", "close_fn", "=", "None", ",", "name", "=", "None", ",", ")", ":", "func", "=", "ImpalaUDA", "(", "inputs", ",", "output", ",", "update_fn", ",", "init_fn", ",", "merge_fn", ",", "finalize_fn", ",", "serialize_fn", "=", "serialize_fn", ",", "name", "=", "name", ",", "lib_path", "=", "hdfs_file", ",", ")", "return", "func" ]
Creates a callable aggregation function object. Must be created in Impala to be used Parameters ---------- hdfs_file: .so file that contains relevant UDA inputs: list of strings denoting ibis datatypes output: string denoting ibis datatype update_fn: string Library symbol name for update function init_fn: string, optional Library symbol name for initialization function merge_fn: string, optional Library symbol name for merge function finalize_fn: string, optional Library symbol name for finalize function serialize_fn : string, optional Library symbol name for serialize UDA API function. Not required for all UDAs; see documentation for more. close_fn : string, optional name: string, optional Used internally to track function Returns ------- container : UDA object
[ "Creates", "a", "callable", "aggregation", "function", "object", ".", "Must", "be", "created", "in", "Impala", "to", "be", "used" ]
python
train
deepmind/sonnet
sonnet/python/modules/conv.py
https://github.com/deepmind/sonnet/blob/00612ca3178964d86b556e062694d808ff81fcca/sonnet/python/modules/conv.py#L209-L233
def _padding_to_conv_op_padding(padding): """Whether to use SAME or VALID for the underlying convolution op. Args: padding: A tuple of members of ALLOWED_PADDINGS, e.g. as returned from `_fill_and_verify_padding`. Returns: One of CONV_OP_ALLOWED_PADDINGS, the padding method to use for the underlying convolution op. Raises: ValueError: If padding is not a tuple. """ if not isinstance(padding, tuple): raise ValueError("padding should be a tuple.") if all(p == SAME for p in padding): # If we want SAME padding for all dimensions then we can use SAME for the # conv and avoid doing any extra padding. return SAME else: # Otherwise we prefer to use VALID, since we can implement all the other # padding types just by adding some extra padding before doing a VALID conv. # (We could use SAME but then we'd also have to crop outputs in some cases). return VALID
[ "def", "_padding_to_conv_op_padding", "(", "padding", ")", ":", "if", "not", "isinstance", "(", "padding", ",", "tuple", ")", ":", "raise", "ValueError", "(", "\"padding should be a tuple.\"", ")", "if", "all", "(", "p", "==", "SAME", "for", "p", "in", "padding", ")", ":", "# If we want SAME padding for all dimensions then we can use SAME for the", "# conv and avoid doing any extra padding.", "return", "SAME", "else", ":", "# Otherwise we prefer to use VALID, since we can implement all the other", "# padding types just by adding some extra padding before doing a VALID conv.", "# (We could use SAME but then we'd also have to crop outputs in some cases).", "return", "VALID" ]
Whether to use SAME or VALID for the underlying convolution op. Args: padding: A tuple of members of ALLOWED_PADDINGS, e.g. as returned from `_fill_and_verify_padding`. Returns: One of CONV_OP_ALLOWED_PADDINGS, the padding method to use for the underlying convolution op. Raises: ValueError: If padding is not a tuple.
[ "Whether", "to", "use", "SAME", "or", "VALID", "for", "the", "underlying", "convolution", "op", "." ]
python
train
TheGhouls/oct
oct/core/turrets_manager.py
https://github.com/TheGhouls/oct/blob/7e9bddeb3b8495a26442b1c86744e9fb187fe88f/oct/core/turrets_manager.py#L68-L84
def add(self, turret_data, is_started=False): """Add a turret object to current turrets configuration :param dict turret_data: the data of the turret to add :param bool is_started: tell if test are already runing """ if turret_data.get('uuid') in self.turrets: return False turret = Turret(**turret_data) self.write(turret) self.turrets[turret.uuid] = turret if is_started: self.publish(self.START, turret.uuid) return True
[ "def", "add", "(", "self", ",", "turret_data", ",", "is_started", "=", "False", ")", ":", "if", "turret_data", ".", "get", "(", "'uuid'", ")", "in", "self", ".", "turrets", ":", "return", "False", "turret", "=", "Turret", "(", "*", "*", "turret_data", ")", "self", ".", "write", "(", "turret", ")", "self", ".", "turrets", "[", "turret", ".", "uuid", "]", "=", "turret", "if", "is_started", ":", "self", ".", "publish", "(", "self", ".", "START", ",", "turret", ".", "uuid", ")", "return", "True" ]
Add a turret object to current turrets configuration :param dict turret_data: the data of the turret to add :param bool is_started: tell if test are already runing
[ "Add", "a", "turret", "object", "to", "current", "turrets", "configuration" ]
python
train
codelv/enaml-web
web/impl/lxml_toolkit_object.py
https://github.com/codelv/enaml-web/blob/88f1131a7b3ba9e83467b4f44bc3bab6f0de7559/web/impl/lxml_toolkit_object.py#L34-L42
def create_widget(self): """ Create the toolkit widget for the proxy object. This method is called during the top-down pass, just before the 'init_widget()' method is called. This method should create the toolkit widget and assign it to the 'widget' attribute. """ self.widget = SubElement(self.parent_widget(), self.declaration.tag)
[ "def", "create_widget", "(", "self", ")", ":", "self", ".", "widget", "=", "SubElement", "(", "self", ".", "parent_widget", "(", ")", ",", "self", ".", "declaration", ".", "tag", ")" ]
Create the toolkit widget for the proxy object. This method is called during the top-down pass, just before the 'init_widget()' method is called. This method should create the toolkit widget and assign it to the 'widget' attribute.
[ "Create", "the", "toolkit", "widget", "for", "the", "proxy", "object", "." ]
python
test
unfoldingWord-dev/python-gogs-client
gogs_client/interface.py
https://github.com/unfoldingWord-dev/python-gogs-client/blob/b7f27f4995abf914c0db8a424760f5b27331939d/gogs_client/interface.py#L306-L317
def user_exists(self, username): """ Returns whether a user with username ``username`` exists. :param str username: username of user :return: whether a user with the specified username exists :rtype: bool :raises NetworkFailure: if there is an error communicating with the server :return: """ path = "/users/{}".format(username) return self._get(path).ok
[ "def", "user_exists", "(", "self", ",", "username", ")", ":", "path", "=", "\"/users/{}\"", ".", "format", "(", "username", ")", "return", "self", ".", "_get", "(", "path", ")", ".", "ok" ]
Returns whether a user with username ``username`` exists. :param str username: username of user :return: whether a user with the specified username exists :rtype: bool :raises NetworkFailure: if there is an error communicating with the server :return:
[ "Returns", "whether", "a", "user", "with", "username", "username", "exists", "." ]
python
train
darothen/xbpch
xbpch/util/cf.py
https://github.com/darothen/xbpch/blob/31972dd6fd5f3f7cecc3a46080ce4f43ca23fbe5/xbpch/util/cf.py#L123-L143
def get_valid_varname(varname): """ Replace characters (e.g., ':', '$', '=', '-') of a variable name, which may cause problems when using with (CF-)netCDF based packages. Parameters ---------- varname : string variable name. Notes ----- Characters replacement is based on the table stored in :attr:`VARNAME_MAP_CHAR`. """ vname = varname for s, r in VARNAME_MAP_CHAR: vname = vname.replace(s, r) return vname
[ "def", "get_valid_varname", "(", "varname", ")", ":", "vname", "=", "varname", "for", "s", ",", "r", "in", "VARNAME_MAP_CHAR", ":", "vname", "=", "vname", ".", "replace", "(", "s", ",", "r", ")", "return", "vname" ]
Replace characters (e.g., ':', '$', '=', '-') of a variable name, which may cause problems when using with (CF-)netCDF based packages. Parameters ---------- varname : string variable name. Notes ----- Characters replacement is based on the table stored in :attr:`VARNAME_MAP_CHAR`.
[ "Replace", "characters", "(", "e", ".", "g", ".", ":", "$", "=", "-", ")", "of", "a", "variable", "name", "which", "may", "cause", "problems", "when", "using", "with", "(", "CF", "-", ")", "netCDF", "based", "packages", "." ]
python
train
numenta/htmresearch
htmresearch/frameworks/layers/laminar_network.py
https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/frameworks/layers/laminar_network.py#L77-L86
def printNetwork(network): """ Given a network, print out regions sorted by phase """ print "The network has",len(network.regions.values()),"regions" for p in range(network.getMaxPhase()): print "=== Phase",p for region in network.regions.values(): if network.getPhases(region.name)[0] == p: print " ",region.name
[ "def", "printNetwork", "(", "network", ")", ":", "print", "\"The network has\"", ",", "len", "(", "network", ".", "regions", ".", "values", "(", ")", ")", ",", "\"regions\"", "for", "p", "in", "range", "(", "network", ".", "getMaxPhase", "(", ")", ")", ":", "print", "\"=== Phase\"", ",", "p", "for", "region", "in", "network", ".", "regions", ".", "values", "(", ")", ":", "if", "network", ".", "getPhases", "(", "region", ".", "name", ")", "[", "0", "]", "==", "p", ":", "print", "\" \"", ",", "region", ".", "name" ]
Given a network, print out regions sorted by phase
[ "Given", "a", "network", "print", "out", "regions", "sorted", "by", "phase" ]
python
train
gamechanger/confluent_schema_registry_client
confluent_schema_registry_client/__init__.py
https://github.com/gamechanger/confluent_schema_registry_client/blob/ac9196e366724eeb2f19f1a169fd2f9a0c8d68ae/confluent_schema_registry_client/__init__.py#L152-L158
def get_global_compatibility_level(self): """ Gets the global compatibility level. """ res = requests.get(self._url('/config'), headers=HEADERS) raise_if_failed(res) return res.json()['compatibility']
[ "def", "get_global_compatibility_level", "(", "self", ")", ":", "res", "=", "requests", ".", "get", "(", "self", ".", "_url", "(", "'/config'", ")", ",", "headers", "=", "HEADERS", ")", "raise_if_failed", "(", "res", ")", "return", "res", ".", "json", "(", ")", "[", "'compatibility'", "]" ]
Gets the global compatibility level.
[ "Gets", "the", "global", "compatibility", "level", "." ]
python
train
gem/oq-engine
openquake/hazardlib/gsim/afshari_stewart_2016.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/gsim/afshari_stewart_2016.py#L104-L117
def get_distance_term(self, C, rrup): """ Returns the distance scaling term in equation 7 """ f_p = C["c1"] * rrup idx = np.logical_and(rrup > self.CONSTANTS["r1"], rrup <= self.CONSTANTS["r2"]) f_p[idx] = (C["c1"] * self.CONSTANTS["r1"]) +\ C["c2"] * (rrup[idx] - self.CONSTANTS["r1"]) idx = rrup > self.CONSTANTS["r2"] f_p[idx] = C["c1"] * self.CONSTANTS["r1"] +\ C["c2"] * (self.CONSTANTS["r2"] - self.CONSTANTS["r1"]) +\ C["c3"] * (rrup[idx] - self.CONSTANTS["r2"]) return f_p
[ "def", "get_distance_term", "(", "self", ",", "C", ",", "rrup", ")", ":", "f_p", "=", "C", "[", "\"c1\"", "]", "*", "rrup", "idx", "=", "np", ".", "logical_and", "(", "rrup", ">", "self", ".", "CONSTANTS", "[", "\"r1\"", "]", ",", "rrup", "<=", "self", ".", "CONSTANTS", "[", "\"r2\"", "]", ")", "f_p", "[", "idx", "]", "=", "(", "C", "[", "\"c1\"", "]", "*", "self", ".", "CONSTANTS", "[", "\"r1\"", "]", ")", "+", "C", "[", "\"c2\"", "]", "*", "(", "rrup", "[", "idx", "]", "-", "self", ".", "CONSTANTS", "[", "\"r1\"", "]", ")", "idx", "=", "rrup", ">", "self", ".", "CONSTANTS", "[", "\"r2\"", "]", "f_p", "[", "idx", "]", "=", "C", "[", "\"c1\"", "]", "*", "self", ".", "CONSTANTS", "[", "\"r1\"", "]", "+", "C", "[", "\"c2\"", "]", "*", "(", "self", ".", "CONSTANTS", "[", "\"r2\"", "]", "-", "self", ".", "CONSTANTS", "[", "\"r1\"", "]", ")", "+", "C", "[", "\"c3\"", "]", "*", "(", "rrup", "[", "idx", "]", "-", "self", ".", "CONSTANTS", "[", "\"r2\"", "]", ")", "return", "f_p" ]
Returns the distance scaling term in equation 7
[ "Returns", "the", "distance", "scaling", "term", "in", "equation", "7" ]
python
train
shmir/PyTrafficGenerator
trafficgenerator/tgn_object.py
https://github.com/shmir/PyTrafficGenerator/blob/382e5d549c83404af2a6571fe19c9e71df8bac14/trafficgenerator/tgn_object.py#L44-L57
def dumps(self, indent=1): """ Returns nested string representation of the dictionary (like json.dumps). :param indent: indentation level. """ str_keys_dict = OrderedDict({str(k): v for k, v in self.items()}) for k, v in str_keys_dict.items(): if isinstance(v, dict): str_keys_dict[k] = OrderedDict({str(k1): v1 for k1, v1 in v.items()}) for k1, v1 in str_keys_dict[k].items(): if isinstance(v1, dict): str_keys_dict[k][k1] = OrderedDict({str(k2): v2 for k2, v2 in v1.items()}) return json.dumps(str_keys_dict, indent=indent)
[ "def", "dumps", "(", "self", ",", "indent", "=", "1", ")", ":", "str_keys_dict", "=", "OrderedDict", "(", "{", "str", "(", "k", ")", ":", "v", "for", "k", ",", "v", "in", "self", ".", "items", "(", ")", "}", ")", "for", "k", ",", "v", "in", "str_keys_dict", ".", "items", "(", ")", ":", "if", "isinstance", "(", "v", ",", "dict", ")", ":", "str_keys_dict", "[", "k", "]", "=", "OrderedDict", "(", "{", "str", "(", "k1", ")", ":", "v1", "for", "k1", ",", "v1", "in", "v", ".", "items", "(", ")", "}", ")", "for", "k1", ",", "v1", "in", "str_keys_dict", "[", "k", "]", ".", "items", "(", ")", ":", "if", "isinstance", "(", "v1", ",", "dict", ")", ":", "str_keys_dict", "[", "k", "]", "[", "k1", "]", "=", "OrderedDict", "(", "{", "str", "(", "k2", ")", ":", "v2", "for", "k2", ",", "v2", "in", "v1", ".", "items", "(", ")", "}", ")", "return", "json", ".", "dumps", "(", "str_keys_dict", ",", "indent", "=", "indent", ")" ]
Returns nested string representation of the dictionary (like json.dumps). :param indent: indentation level.
[ "Returns", "nested", "string", "representation", "of", "the", "dictionary", "(", "like", "json", ".", "dumps", ")", "." ]
python
train
sassoo/goldman
goldman/utils/responder_helpers.py
https://github.com/sassoo/goldman/blob/b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2/goldman/utils/responder_helpers.py#L89-L96
def _from_rest_ignore(model, props): """ Purge fields that are completely unknown """ fields = model.all_fields for prop in props.keys(): if prop not in fields: del props[prop]
[ "def", "_from_rest_ignore", "(", "model", ",", "props", ")", ":", "fields", "=", "model", ".", "all_fields", "for", "prop", "in", "props", ".", "keys", "(", ")", ":", "if", "prop", "not", "in", "fields", ":", "del", "props", "[", "prop", "]" ]
Purge fields that are completely unknown
[ "Purge", "fields", "that", "are", "completely", "unknown" ]
python
train
zsiciarz/pyaavso
pyaavso/parsers/webobs.py
https://github.com/zsiciarz/pyaavso/blob/d3b9eb17bcecc6652841606802b5713fd6083cc1/pyaavso/parsers/webobs.py#L34-L56
def get_observations(self): """ Parses the HTML table into a list of dictionaries, each of which represents a single observation. """ if self.empty: return [] rows = list(self.tbody) observations = [] for row_observation, row_details in zip(rows[::2], rows[1::2]): data = {} cells = OBSERVATION_XPATH(row_observation) data['name'] = _clean_cell(cells[0]) data['date'] = _clean_cell(cells[1]) data['magnitude'] = _clean_cell(cells[3]) data['obscode'] = _clean_cell(cells[6]) cells = DETAILS_XPATH(row_details) data['comp1'] = _clean_cell(cells[0]) data['chart'] = _clean_cell(cells[3]).replace('None', '') data['comment_code'] = _clean_cell(cells[4]) data['notes'] = _clean_cell(cells[5]) observations.append(data) return observations
[ "def", "get_observations", "(", "self", ")", ":", "if", "self", ".", "empty", ":", "return", "[", "]", "rows", "=", "list", "(", "self", ".", "tbody", ")", "observations", "=", "[", "]", "for", "row_observation", ",", "row_details", "in", "zip", "(", "rows", "[", ":", ":", "2", "]", ",", "rows", "[", "1", ":", ":", "2", "]", ")", ":", "data", "=", "{", "}", "cells", "=", "OBSERVATION_XPATH", "(", "row_observation", ")", "data", "[", "'name'", "]", "=", "_clean_cell", "(", "cells", "[", "0", "]", ")", "data", "[", "'date'", "]", "=", "_clean_cell", "(", "cells", "[", "1", "]", ")", "data", "[", "'magnitude'", "]", "=", "_clean_cell", "(", "cells", "[", "3", "]", ")", "data", "[", "'obscode'", "]", "=", "_clean_cell", "(", "cells", "[", "6", "]", ")", "cells", "=", "DETAILS_XPATH", "(", "row_details", ")", "data", "[", "'comp1'", "]", "=", "_clean_cell", "(", "cells", "[", "0", "]", ")", "data", "[", "'chart'", "]", "=", "_clean_cell", "(", "cells", "[", "3", "]", ")", ".", "replace", "(", "'None'", ",", "''", ")", "data", "[", "'comment_code'", "]", "=", "_clean_cell", "(", "cells", "[", "4", "]", ")", "data", "[", "'notes'", "]", "=", "_clean_cell", "(", "cells", "[", "5", "]", ")", "observations", ".", "append", "(", "data", ")", "return", "observations" ]
Parses the HTML table into a list of dictionaries, each of which represents a single observation.
[ "Parses", "the", "HTML", "table", "into", "a", "list", "of", "dictionaries", "each", "of", "which", "represents", "a", "single", "observation", "." ]
python
valid
SBRG/ssbio
ssbio/utils.py
https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/utils.py#L988-L1013
def label_sequential_regions(inlist): """Input a list of labeled tuples and return a dictionary of sequentially labeled regions. Args: inlist (list): A list of tuples with the first number representing the index and the second the index label. Returns: dict: Dictionary of labeled regions. Examples: >>> label_sequential_regions([(1, 'O'), (2, 'O'), (3, 'O'), (4, 'M'), (5, 'M'), (6, 'I'), (7, 'M'), (8, 'O'), (9, 'O')]) {'O1': [1, 2, 3], 'M1': [4, 5], 'I1': [6], 'M2': [7], 'O2': [8, 9]} """ import more_itertools as mit df = pd.DataFrame(inlist).set_index(0) labeled = {} for label in df[1].unique(): iterable = df[df[1] == label].index.tolist() labeled.update({'{}{}'.format(label, i + 1): items for i, items in enumerate([list(group) for group in mit.consecutive_groups(iterable)])}) return labeled
[ "def", "label_sequential_regions", "(", "inlist", ")", ":", "import", "more_itertools", "as", "mit", "df", "=", "pd", ".", "DataFrame", "(", "inlist", ")", ".", "set_index", "(", "0", ")", "labeled", "=", "{", "}", "for", "label", "in", "df", "[", "1", "]", ".", "unique", "(", ")", ":", "iterable", "=", "df", "[", "df", "[", "1", "]", "==", "label", "]", ".", "index", ".", "tolist", "(", ")", "labeled", ".", "update", "(", "{", "'{}{}'", ".", "format", "(", "label", ",", "i", "+", "1", ")", ":", "items", "for", "i", ",", "items", "in", "enumerate", "(", "[", "list", "(", "group", ")", "for", "group", "in", "mit", ".", "consecutive_groups", "(", "iterable", ")", "]", ")", "}", ")", "return", "labeled" ]
Input a list of labeled tuples and return a dictionary of sequentially labeled regions. Args: inlist (list): A list of tuples with the first number representing the index and the second the index label. Returns: dict: Dictionary of labeled regions. Examples: >>> label_sequential_regions([(1, 'O'), (2, 'O'), (3, 'O'), (4, 'M'), (5, 'M'), (6, 'I'), (7, 'M'), (8, 'O'), (9, 'O')]) {'O1': [1, 2, 3], 'M1': [4, 5], 'I1': [6], 'M2': [7], 'O2': [8, 9]}
[ "Input", "a", "list", "of", "labeled", "tuples", "and", "return", "a", "dictionary", "of", "sequentially", "labeled", "regions", "." ]
python
train
gem/oq-engine
openquake/commonlib/calc.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/commonlib/calc.py#L208-L229
def make_hmap_array(pmap, imtls, poes, nsites): """ :returns: a compound array of hazard maps of shape nsites """ if isinstance(pmap, probability_map.ProbabilityMap): # this is here for compatibility with the # past, it could be removed in the future hmap = make_hmap(pmap, imtls, poes) pdic = general.DictArray({imt: poes for imt in imtls}) return convert_to_array(hmap, nsites, pdic) try: hcurves = pmap.value except AttributeError: hcurves = pmap dtlist = [('%s-%s' % (imt, poe), F32) for imt in imtls for poe in poes] array = numpy.zeros(len(pmap), dtlist) for imt, imls in imtls.items(): curves = hcurves[:, imtls(imt)] for poe in poes: array['%s-%s' % (imt, poe)] = compute_hazard_maps( curves, imls, poe).flat return array
[ "def", "make_hmap_array", "(", "pmap", ",", "imtls", ",", "poes", ",", "nsites", ")", ":", "if", "isinstance", "(", "pmap", ",", "probability_map", ".", "ProbabilityMap", ")", ":", "# this is here for compatibility with the", "# past, it could be removed in the future", "hmap", "=", "make_hmap", "(", "pmap", ",", "imtls", ",", "poes", ")", "pdic", "=", "general", ".", "DictArray", "(", "{", "imt", ":", "poes", "for", "imt", "in", "imtls", "}", ")", "return", "convert_to_array", "(", "hmap", ",", "nsites", ",", "pdic", ")", "try", ":", "hcurves", "=", "pmap", ".", "value", "except", "AttributeError", ":", "hcurves", "=", "pmap", "dtlist", "=", "[", "(", "'%s-%s'", "%", "(", "imt", ",", "poe", ")", ",", "F32", ")", "for", "imt", "in", "imtls", "for", "poe", "in", "poes", "]", "array", "=", "numpy", ".", "zeros", "(", "len", "(", "pmap", ")", ",", "dtlist", ")", "for", "imt", ",", "imls", "in", "imtls", ".", "items", "(", ")", ":", "curves", "=", "hcurves", "[", ":", ",", "imtls", "(", "imt", ")", "]", "for", "poe", "in", "poes", ":", "array", "[", "'%s-%s'", "%", "(", "imt", ",", "poe", ")", "]", "=", "compute_hazard_maps", "(", "curves", ",", "imls", ",", "poe", ")", ".", "flat", "return", "array" ]
:returns: a compound array of hazard maps of shape nsites
[ ":", "returns", ":", "a", "compound", "array", "of", "hazard", "maps", "of", "shape", "nsites" ]
python
train
priestc/moneywagon
moneywagon/services/exchange_services.py
https://github.com/priestc/moneywagon/blob/00518f1f557dcca8b3031f46d3564c2baa0227a3/moneywagon/services/exchange_services.py#L49-L65
def eight_decimal_places(amount, format="str"): """ >>> eight_decimal_places(3.12345678912345) "3.12345679" >>> eight_decimal_places("3.12345678912345") "3.12345679" >>> eight_decimal_places(3.12345678912345, format='float') 3.12345679 >>> eight_decimal_places("3.12345678912345", format='float') 3.12345679 """ if type(amount) == str: return amount if format == 'str': return "%.8f" % amount if format == 'float': return float("%.8f" % amount)
[ "def", "eight_decimal_places", "(", "amount", ",", "format", "=", "\"str\"", ")", ":", "if", "type", "(", "amount", ")", "==", "str", ":", "return", "amount", "if", "format", "==", "'str'", ":", "return", "\"%.8f\"", "%", "amount", "if", "format", "==", "'float'", ":", "return", "float", "(", "\"%.8f\"", "%", "amount", ")" ]
>>> eight_decimal_places(3.12345678912345) "3.12345679" >>> eight_decimal_places("3.12345678912345") "3.12345679" >>> eight_decimal_places(3.12345678912345, format='float') 3.12345679 >>> eight_decimal_places("3.12345678912345", format='float') 3.12345679
[ ">>>", "eight_decimal_places", "(", "3", ".", "12345678912345", ")", "3", ".", "12345679", ">>>", "eight_decimal_places", "(", "3", ".", "12345678912345", ")", "3", ".", "12345679", ">>>", "eight_decimal_places", "(", "3", ".", "12345678912345", "format", "=", "float", ")", "3", ".", "12345679", ">>>", "eight_decimal_places", "(", "3", ".", "12345678912345", "format", "=", "float", ")", "3", ".", "12345679" ]
python
train
elliterate/capybara.py
capybara/queries/selector_query.py
https://github.com/elliterate/capybara.py/blob/0c6ae449cc37e4445ec3cd6af95674533beedc6c/capybara/queries/selector_query.py#L136-L149
def visible(self): """ str: The desired element visibility. """ if self.options["visible"] is not None: if self.options["visible"] is True: return "visible" elif self.options["visible"] is False: return "all" else: return self.options["visible"] else: if capybara.ignore_hidden_elements: return "visible" else: return "all"
[ "def", "visible", "(", "self", ")", ":", "if", "self", ".", "options", "[", "\"visible\"", "]", "is", "not", "None", ":", "if", "self", ".", "options", "[", "\"visible\"", "]", "is", "True", ":", "return", "\"visible\"", "elif", "self", ".", "options", "[", "\"visible\"", "]", "is", "False", ":", "return", "\"all\"", "else", ":", "return", "self", ".", "options", "[", "\"visible\"", "]", "else", ":", "if", "capybara", ".", "ignore_hidden_elements", ":", "return", "\"visible\"", "else", ":", "return", "\"all\"" ]
str: The desired element visibility.
[ "str", ":", "The", "desired", "element", "visibility", "." ]
python
test
peri-source/peri
peri/comp/ilms.py
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/comp/ilms.py#L823-L837
def _barnes(self, pos): """Creates a barnes interpolant & calculates its values""" b_in = self.b_in dist = lambda x: np.sqrt(np.dot(x,x)) #we take a filter size as the max distance between the grids along #x or y: sz = self.npts[1] coeffs = self.get_values(self.barnes_params) b = BarnesInterpolationND( b_in, coeffs, filter_size=self.filtsize, damp=0.9, iterations=3, clip=self.local_updates, clipsize=self.barnes_clip_size, blocksize=100 # FIXME magic blocksize ) return b(pos)
[ "def", "_barnes", "(", "self", ",", "pos", ")", ":", "b_in", "=", "self", ".", "b_in", "dist", "=", "lambda", "x", ":", "np", ".", "sqrt", "(", "np", ".", "dot", "(", "x", ",", "x", ")", ")", "#we take a filter size as the max distance between the grids along", "#x or y:", "sz", "=", "self", ".", "npts", "[", "1", "]", "coeffs", "=", "self", ".", "get_values", "(", "self", ".", "barnes_params", ")", "b", "=", "BarnesInterpolationND", "(", "b_in", ",", "coeffs", ",", "filter_size", "=", "self", ".", "filtsize", ",", "damp", "=", "0.9", ",", "iterations", "=", "3", ",", "clip", "=", "self", ".", "local_updates", ",", "clipsize", "=", "self", ".", "barnes_clip_size", ",", "blocksize", "=", "100", "# FIXME magic blocksize", ")", "return", "b", "(", "pos", ")" ]
Creates a barnes interpolant & calculates its values
[ "Creates", "a", "barnes", "interpolant", "&", "calculates", "its", "values" ]
python
valid
xtuml/pyxtuml
bridgepoint/gen_sql_schema.py
https://github.com/xtuml/pyxtuml/blob/7dd9343b9a0191d1db1887ab9288d0a026608d9a/bridgepoint/gen_sql_schema.py#L38-L82
def main(): ''' Parse argv for options and arguments, and start schema generation. ''' parser = optparse.OptionParser(usage="%prog [options] <model_path> [another_model_path...]", version=xtuml.version.complete_string, formatter=optparse.TitledHelpFormatter()) parser.set_description(__doc__.strip()) parser.add_option("-c", "--component", dest="component", metavar="NAME", help="export sql schema for the component named NAME", action="store", default=None) parser.add_option("-d", "--derived-attributes", dest="derived", help="include derived attributes in the schema", action="store_true", default=False) parser.add_option("-o", "--output", dest='output', metavar="PATH", help="save sql schema to PATH (required)", action="store", default=None) parser.add_option("-v", "--verbosity", dest='verbosity', action="count", help="increase debug logging level", default=2) (opts, args) = parser.parse_args() if len(args) == 0 or opts.output is None: parser.print_help() sys.exit(1) levels = { 0: logging.ERROR, 1: logging.WARNING, 2: logging.INFO, 3: logging.DEBUG, } logging.basicConfig(level=levels.get(opts.verbosity, logging.DEBUG)) loader = ooaofooa.Loader() for filename in args: loader.filename_input(filename) c = loader.build_component(opts.component, opts.derived) xtuml.persist_database(c, opts.output)
[ "def", "main", "(", ")", ":", "parser", "=", "optparse", ".", "OptionParser", "(", "usage", "=", "\"%prog [options] <model_path> [another_model_path...]\"", ",", "version", "=", "xtuml", ".", "version", ".", "complete_string", ",", "formatter", "=", "optparse", ".", "TitledHelpFormatter", "(", ")", ")", "parser", ".", "set_description", "(", "__doc__", ".", "strip", "(", ")", ")", "parser", ".", "add_option", "(", "\"-c\"", ",", "\"--component\"", ",", "dest", "=", "\"component\"", ",", "metavar", "=", "\"NAME\"", ",", "help", "=", "\"export sql schema for the component named NAME\"", ",", "action", "=", "\"store\"", ",", "default", "=", "None", ")", "parser", ".", "add_option", "(", "\"-d\"", ",", "\"--derived-attributes\"", ",", "dest", "=", "\"derived\"", ",", "help", "=", "\"include derived attributes in the schema\"", ",", "action", "=", "\"store_true\"", ",", "default", "=", "False", ")", "parser", ".", "add_option", "(", "\"-o\"", ",", "\"--output\"", ",", "dest", "=", "'output'", ",", "metavar", "=", "\"PATH\"", ",", "help", "=", "\"save sql schema to PATH (required)\"", ",", "action", "=", "\"store\"", ",", "default", "=", "None", ")", "parser", ".", "add_option", "(", "\"-v\"", ",", "\"--verbosity\"", ",", "dest", "=", "'verbosity'", ",", "action", "=", "\"count\"", ",", "help", "=", "\"increase debug logging level\"", ",", "default", "=", "2", ")", "(", "opts", ",", "args", ")", "=", "parser", ".", "parse_args", "(", ")", "if", "len", "(", "args", ")", "==", "0", "or", "opts", ".", "output", "is", "None", ":", "parser", ".", "print_help", "(", ")", "sys", ".", "exit", "(", "1", ")", "levels", "=", "{", "0", ":", "logging", ".", "ERROR", ",", "1", ":", "logging", ".", "WARNING", ",", "2", ":", "logging", ".", "INFO", ",", "3", ":", "logging", ".", "DEBUG", ",", "}", "logging", ".", "basicConfig", "(", "level", "=", "levels", ".", "get", "(", "opts", ".", "verbosity", ",", "logging", ".", "DEBUG", ")", ")", "loader", "=", "ooaofooa", ".", "Loader", "(", ")", "for", "filename", "in", "args", ":", "loader", ".", "filename_input", "(", "filename", ")", "c", "=", "loader", ".", "build_component", "(", "opts", ".", "component", ",", "opts", ".", "derived", ")", "xtuml", ".", "persist_database", "(", "c", ",", "opts", ".", "output", ")" ]
Parse argv for options and arguments, and start schema generation.
[ "Parse", "argv", "for", "options", "and", "arguments", "and", "start", "schema", "generation", "." ]
python
test
micha030201/aionationstates
aionationstates/nation_.py
https://github.com/micha030201/aionationstates/blob/dc86b86d994cbab830b69ab8023601c73e778b3a/aionationstates/nation_.py#L572-L582
async def deaths(self, root): """Causes of death in the nation, as percentages. Returns ------- an :class:`ApiQuery` of dict with keys of str and values of float """ return { elem.get('type'): float(elem.text) for elem in root.find('DEATHS') }
[ "async", "def", "deaths", "(", "self", ",", "root", ")", ":", "return", "{", "elem", ".", "get", "(", "'type'", ")", ":", "float", "(", "elem", ".", "text", ")", "for", "elem", "in", "root", ".", "find", "(", "'DEATHS'", ")", "}" ]
Causes of death in the nation, as percentages. Returns ------- an :class:`ApiQuery` of dict with keys of str and values of float
[ "Causes", "of", "death", "in", "the", "nation", "as", "percentages", "." ]
python
train
kdeldycke/chessboard
chessboard/board.py
https://github.com/kdeldycke/chessboard/blob/ac7a14dc7b6905701e3f6d4e01e8fe1869241bed/chessboard/board.py#L202-L206
def get(self, x, y): """ Return piece placed at the provided coordinates. """ for piece in self.pieces: if (piece.x, piece.y) == (x, y): return piece
[ "def", "get", "(", "self", ",", "x", ",", "y", ")", ":", "for", "piece", "in", "self", ".", "pieces", ":", "if", "(", "piece", ".", "x", ",", "piece", ".", "y", ")", "==", "(", "x", ",", "y", ")", ":", "return", "piece" ]
Return piece placed at the provided coordinates.
[ "Return", "piece", "placed", "at", "the", "provided", "coordinates", "." ]
python
train
saltstack/salt
salt/states/mssql_user.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/mssql_user.py#L88-L114
def absent(name, **kwargs): ''' Ensure that the named user is absent name The username of the user to remove ''' ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''} if not __salt__['mssql.user_exists'](name): ret['comment'] = 'User {0} is not present'.format(name) return ret if __opts__['test']: ret['result'] = None ret['comment'] = 'User {0} is set to be removed'.format(name) return ret if __salt__['mssql.user_remove'](name, **kwargs): ret['comment'] = 'User {0} has been removed'.format(name) ret['changes'][name] = 'Absent' return ret # else: ret['result'] = False ret['comment'] = 'User {0} failed to be removed'.format(name) return ret
[ "def", "absent", "(", "name", ",", "*", "*", "kwargs", ")", ":", "ret", "=", "{", "'name'", ":", "name", ",", "'changes'", ":", "{", "}", ",", "'result'", ":", "True", ",", "'comment'", ":", "''", "}", "if", "not", "__salt__", "[", "'mssql.user_exists'", "]", "(", "name", ")", ":", "ret", "[", "'comment'", "]", "=", "'User {0} is not present'", ".", "format", "(", "name", ")", "return", "ret", "if", "__opts__", "[", "'test'", "]", ":", "ret", "[", "'result'", "]", "=", "None", "ret", "[", "'comment'", "]", "=", "'User {0} is set to be removed'", ".", "format", "(", "name", ")", "return", "ret", "if", "__salt__", "[", "'mssql.user_remove'", "]", "(", "name", ",", "*", "*", "kwargs", ")", ":", "ret", "[", "'comment'", "]", "=", "'User {0} has been removed'", ".", "format", "(", "name", ")", "ret", "[", "'changes'", "]", "[", "name", "]", "=", "'Absent'", "return", "ret", "# else:", "ret", "[", "'result'", "]", "=", "False", "ret", "[", "'comment'", "]", "=", "'User {0} failed to be removed'", ".", "format", "(", "name", ")", "return", "ret" ]
Ensure that the named user is absent name The username of the user to remove
[ "Ensure", "that", "the", "named", "user", "is", "absent" ]
python
train
dwavesystems/dwave-cloud-client
dwave/cloud/coders.py
https://github.com/dwavesystems/dwave-cloud-client/blob/df3221a8385dc0c04d7b4d84f740bf3ad6706230/dwave/cloud/coders.py#L155-L169
def _decode_doubles(message): """Helper for decode_qp, decodes a double array. The double array is stored as little endian 64 bit doubles. The array has then been base64 encoded. Since we are decoding we do these steps in reverse. Args: message: the double array Returns: decoded double array """ binary = base64.b64decode(message) return struct.unpack('<' + ('d' * (len(binary) // 8)), binary)
[ "def", "_decode_doubles", "(", "message", ")", ":", "binary", "=", "base64", ".", "b64decode", "(", "message", ")", "return", "struct", ".", "unpack", "(", "'<'", "+", "(", "'d'", "*", "(", "len", "(", "binary", ")", "//", "8", ")", ")", ",", "binary", ")" ]
Helper for decode_qp, decodes a double array. The double array is stored as little endian 64 bit doubles. The array has then been base64 encoded. Since we are decoding we do these steps in reverse. Args: message: the double array Returns: decoded double array
[ "Helper", "for", "decode_qp", "decodes", "a", "double", "array", "." ]
python
train
Azure/azure-kusto-python
azure-kusto-ingest/azure/kusto/ingest/_resource_manager.py
https://github.com/Azure/azure-kusto-python/blob/92466a2ae175d6353d1dee3496a02517b2a71a86/azure-kusto-ingest/azure/kusto/ingest/_resource_manager.py#L17-L20
def parse(cls, uri): """Parses uri into a ResourceUri object""" match = _URI_FORMAT.search(uri) return cls(match.group(1), match.group(2), match.group(3), match.group(4))
[ "def", "parse", "(", "cls", ",", "uri", ")", ":", "match", "=", "_URI_FORMAT", ".", "search", "(", "uri", ")", "return", "cls", "(", "match", ".", "group", "(", "1", ")", ",", "match", ".", "group", "(", "2", ")", ",", "match", ".", "group", "(", "3", ")", ",", "match", ".", "group", "(", "4", ")", ")" ]
Parses uri into a ResourceUri object
[ "Parses", "uri", "into", "a", "ResourceUri", "object" ]
python
train
log2timeline/dfvfs
dfvfs/vfs/zip_file_entry.py
https://github.com/log2timeline/dfvfs/blob/2b3ccd115f9901d89f383397d4a1376a873c83c4/dfvfs/vfs/zip_file_entry.py#L197-L205
def name(self): """str: name of the file entry, without the full path.""" path = getattr(self.path_spec, 'location', None) if path is not None and not isinstance(path, py2to3.UNICODE_TYPE): try: path = path.decode(self._file_system.encoding) except UnicodeDecodeError: path = None return self._file_system.BasenamePath(path)
[ "def", "name", "(", "self", ")", ":", "path", "=", "getattr", "(", "self", ".", "path_spec", ",", "'location'", ",", "None", ")", "if", "path", "is", "not", "None", "and", "not", "isinstance", "(", "path", ",", "py2to3", ".", "UNICODE_TYPE", ")", ":", "try", ":", "path", "=", "path", ".", "decode", "(", "self", ".", "_file_system", ".", "encoding", ")", "except", "UnicodeDecodeError", ":", "path", "=", "None", "return", "self", ".", "_file_system", ".", "BasenamePath", "(", "path", ")" ]
str: name of the file entry, without the full path.
[ "str", ":", "name", "of", "the", "file", "entry", "without", "the", "full", "path", "." ]
python
train
PyGithub/PyGithub
github/Branch.py
https://github.com/PyGithub/PyGithub/blob/f716df86bbe7dc276c6596699fa9712b61ef974c/github/Branch.py#L328-L338
def get_team_push_restrictions(self): """ :calls: `GET /repos/:owner/:repo/branches/:branch/protection/restrictions/teams <https://developer.github.com/v3/repos/branches>`_ :rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Team.Team` """ return github.PaginatedList.PaginatedList( github.Team.Team, self._requester, self.protection_url + "/restrictions/teams", None )
[ "def", "get_team_push_restrictions", "(", "self", ")", ":", "return", "github", ".", "PaginatedList", ".", "PaginatedList", "(", "github", ".", "Team", ".", "Team", ",", "self", ".", "_requester", ",", "self", ".", "protection_url", "+", "\"/restrictions/teams\"", ",", "None", ")" ]
:calls: `GET /repos/:owner/:repo/branches/:branch/protection/restrictions/teams <https://developer.github.com/v3/repos/branches>`_ :rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Team.Team`
[ ":", "calls", ":", "GET", "/", "repos", "/", ":", "owner", "/", ":", "repo", "/", "branches", "/", ":", "branch", "/", "protection", "/", "restrictions", "/", "teams", "<https", ":", "//", "developer", ".", "github", ".", "com", "/", "v3", "/", "repos", "/", "branches", ">", "_", ":", "rtype", ":", ":", "class", ":", "github", ".", "PaginatedList", ".", "PaginatedList", "of", ":", "class", ":", "github", ".", "Team", ".", "Team" ]
python
train
hubo1016/vlcp
vlcp/event/lock.py
https://github.com/hubo1016/vlcp/blob/239055229ec93a99cc7e15208075724ccf543bd1/vlcp/event/lock.py#L65-L76
def trylock(self): "Try to acquire lock and return True; if cannot acquire the lock at this moment, return False." if self.locked: return True if self.lockroutine: return False waiter = self.scheduler.send(LockEvent(self.context, self.key, self)) if waiter: return False else: self.locked = True return True
[ "def", "trylock", "(", "self", ")", ":", "if", "self", ".", "locked", ":", "return", "True", "if", "self", ".", "lockroutine", ":", "return", "False", "waiter", "=", "self", ".", "scheduler", ".", "send", "(", "LockEvent", "(", "self", ".", "context", ",", "self", ".", "key", ",", "self", ")", ")", "if", "waiter", ":", "return", "False", "else", ":", "self", ".", "locked", "=", "True", "return", "True" ]
Try to acquire lock and return True; if cannot acquire the lock at this moment, return False.
[ "Try", "to", "acquire", "lock", "and", "return", "True", ";", "if", "cannot", "acquire", "the", "lock", "at", "this", "moment", "return", "False", "." ]
python
train
numenta/nupic
src/nupic/data/generators/data_generator.py
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/data/generators/data_generator.py#L129-L138
def setFlag(self, index, flag): """Set flag for field at index. Flags are special characters such as 'S' for sequence or 'T' for timestamp. Parameters: -------------------------------------------------------------------- index: index of field whose flag is being set flag: special character """ assert len(self.fields)>index self.fields[index].flag=flag
[ "def", "setFlag", "(", "self", ",", "index", ",", "flag", ")", ":", "assert", "len", "(", "self", ".", "fields", ")", ">", "index", "self", ".", "fields", "[", "index", "]", ".", "flag", "=", "flag" ]
Set flag for field at index. Flags are special characters such as 'S' for sequence or 'T' for timestamp. Parameters: -------------------------------------------------------------------- index: index of field whose flag is being set flag: special character
[ "Set", "flag", "for", "field", "at", "index", ".", "Flags", "are", "special", "characters", "such", "as", "S", "for", "sequence", "or", "T", "for", "timestamp", ".", "Parameters", ":", "--------------------------------------------------------------------", "index", ":", "index", "of", "field", "whose", "flag", "is", "being", "set", "flag", ":", "special", "character" ]
python
valid
materialsproject/pymatgen
pymatgen/io/feff/inputs.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/io/feff/inputs.py#L307-L325
def _set_cluster(self): """ Compute and set the cluster of atoms as a Molecule object. The siteato coordinates are translated such that the absorbing atom(aka central atom) is at the origin. Returns: Molecule """ center = self.struct[self.center_index].coords sphere = self.struct.get_neighbors(self.struct[self.center_index], self.radius) symbols = [self.absorbing_atom] coords = [[0, 0, 0]] for i, site_dist in enumerate(sphere): site_symbol = re.sub(r"[^aA-zZ]+", "", site_dist[0].species_string) symbols.append(site_symbol) coords.append(site_dist[0].coords - center) return Molecule(symbols, coords)
[ "def", "_set_cluster", "(", "self", ")", ":", "center", "=", "self", ".", "struct", "[", "self", ".", "center_index", "]", ".", "coords", "sphere", "=", "self", ".", "struct", ".", "get_neighbors", "(", "self", ".", "struct", "[", "self", ".", "center_index", "]", ",", "self", ".", "radius", ")", "symbols", "=", "[", "self", ".", "absorbing_atom", "]", "coords", "=", "[", "[", "0", ",", "0", ",", "0", "]", "]", "for", "i", ",", "site_dist", "in", "enumerate", "(", "sphere", ")", ":", "site_symbol", "=", "re", ".", "sub", "(", "r\"[^aA-zZ]+\"", ",", "\"\"", ",", "site_dist", "[", "0", "]", ".", "species_string", ")", "symbols", ".", "append", "(", "site_symbol", ")", "coords", ".", "append", "(", "site_dist", "[", "0", "]", ".", "coords", "-", "center", ")", "return", "Molecule", "(", "symbols", ",", "coords", ")" ]
Compute and set the cluster of atoms as a Molecule object. The siteato coordinates are translated such that the absorbing atom(aka central atom) is at the origin. Returns: Molecule
[ "Compute", "and", "set", "the", "cluster", "of", "atoms", "as", "a", "Molecule", "object", ".", "The", "siteato", "coordinates", "are", "translated", "such", "that", "the", "absorbing", "atom", "(", "aka", "central", "atom", ")", "is", "at", "the", "origin", "." ]
python
train
brechtm/rinohtype
src/rinoh/backend/pdf/xobject/purepng.py
https://github.com/brechtm/rinohtype/blob/40a63c4e5ad7550f62b6860f1812cb67cafb9dc7/src/rinoh/backend/pdf/xobject/purepng.py#L1604-L1609
def newarray(self, length, value=0): """Initialise empty row""" if self.bitdepth > 8: return array('H', [value] * length) else: return bytearray([value] * length)
[ "def", "newarray", "(", "self", ",", "length", ",", "value", "=", "0", ")", ":", "if", "self", ".", "bitdepth", ">", "8", ":", "return", "array", "(", "'H'", ",", "[", "value", "]", "*", "length", ")", "else", ":", "return", "bytearray", "(", "[", "value", "]", "*", "length", ")" ]
Initialise empty row
[ "Initialise", "empty", "row" ]
python
train
thiagopbueno/pyrddl
pyrddl/parser.py
https://github.com/thiagopbueno/pyrddl/blob/3bcfa850b1a7532c7744358f3c6b9e0f8ab978c9/pyrddl/parser.py#L318-L324
def p_type_def(self, p): '''type_def : IDENT COLON OBJECT SEMI | IDENT COLON LCURLY enum_list RCURLY SEMI''' if len(p) == 5: p[0] = (p[1], p[3]) elif len(p) == 7: p[0] = (p[1], p[4])
[ "def", "p_type_def", "(", "self", ",", "p", ")", ":", "if", "len", "(", "p", ")", "==", "5", ":", "p", "[", "0", "]", "=", "(", "p", "[", "1", "]", ",", "p", "[", "3", "]", ")", "elif", "len", "(", "p", ")", "==", "7", ":", "p", "[", "0", "]", "=", "(", "p", "[", "1", "]", ",", "p", "[", "4", "]", ")" ]
type_def : IDENT COLON OBJECT SEMI | IDENT COLON LCURLY enum_list RCURLY SEMI
[ "type_def", ":", "IDENT", "COLON", "OBJECT", "SEMI", "|", "IDENT", "COLON", "LCURLY", "enum_list", "RCURLY", "SEMI" ]
python
train
markovmodel/msmtools
msmtools/estimation/api.py
https://github.com/markovmodel/msmtools/blob/54dc76dd2113a0e8f3d15d5316abab41402941be/msmtools/estimation/api.py#L1003-L1073
def log_likelihood(C, T): r"""Log-likelihood of the count matrix given a transition matrix. Parameters ---------- C : (M, M) ndarray or scipy.sparse matrix Count matrix T : (M, M) ndarray orscipy.sparse matrix Transition matrix Returns ------- logL : float Log-likelihood of the count matrix Notes ----- The likelihood of a set of observed transition counts :math:`C=(c_{ij})` for a given matrix of transition counts :math:`T=(t_{ij})` is given by .. math:: L(C|P)=\prod_{i=1}^{M} \left( \prod_{j=1}^{M} p_{ij}^{c_{ij}} \right) The log-likelihood is given by .. math:: l(C|P)=\sum_{i,j=1}^{M}c_{ij} \log p_{ij}. The likelihood describes the probability of making an observation :math:`C` for a given model :math:`P`. Examples -------- >>> import numpy as np >>> from msmtools.estimation import log_likelihood >>> T = np.array([[0.9, 0.1, 0.0], [0.5, 0.0, 0.5], [0.0, 0.1, 0.9]]) >>> C = np.array([[58, 7, 0], [6, 0, 4], [0, 3, 21]]) >>> logL = log_likelihood(C, T) >>> logL # doctest: +ELLIPSIS -38.2808034725... >>> C = np.array([[58, 20, 0], [6, 0, 4], [0, 3, 21]]) >>> logL = log_likelihood(C, T) >>> logL # doctest: +ELLIPSIS -68.2144096814... References ---------- .. [1] Prinz, J H, H Wu, M Sarich, B Keller, M Senne, M Held, J D Chodera, C Schuette and F Noe. 2011. Markov models of molecular kinetics: Generation and validation. J Chem Phys 134: 174105 """ if issparse(C) and issparse(T): return sparse.likelihood.log_likelihood(C, T) else: # use the dense likelihood calculator for all other cases # if a mix of dense/sparse C/T matrices is used, then both # will be converted to ndarrays. if not isinstance(C, np.ndarray): C = np.array(C) if not isinstance(T, np.ndarray): T = np.array(T) # computation is still efficient, because we only use terms # for nonzero elements of T nz = np.nonzero(T) return np.dot(C[nz], np.log(T[nz]))
[ "def", "log_likelihood", "(", "C", ",", "T", ")", ":", "if", "issparse", "(", "C", ")", "and", "issparse", "(", "T", ")", ":", "return", "sparse", ".", "likelihood", ".", "log_likelihood", "(", "C", ",", "T", ")", "else", ":", "# use the dense likelihood calculator for all other cases", "# if a mix of dense/sparse C/T matrices is used, then both", "# will be converted to ndarrays.", "if", "not", "isinstance", "(", "C", ",", "np", ".", "ndarray", ")", ":", "C", "=", "np", ".", "array", "(", "C", ")", "if", "not", "isinstance", "(", "T", ",", "np", ".", "ndarray", ")", ":", "T", "=", "np", ".", "array", "(", "T", ")", "# computation is still efficient, because we only use terms", "# for nonzero elements of T", "nz", "=", "np", ".", "nonzero", "(", "T", ")", "return", "np", ".", "dot", "(", "C", "[", "nz", "]", ",", "np", ".", "log", "(", "T", "[", "nz", "]", ")", ")" ]
r"""Log-likelihood of the count matrix given a transition matrix. Parameters ---------- C : (M, M) ndarray or scipy.sparse matrix Count matrix T : (M, M) ndarray orscipy.sparse matrix Transition matrix Returns ------- logL : float Log-likelihood of the count matrix Notes ----- The likelihood of a set of observed transition counts :math:`C=(c_{ij})` for a given matrix of transition counts :math:`T=(t_{ij})` is given by .. math:: L(C|P)=\prod_{i=1}^{M} \left( \prod_{j=1}^{M} p_{ij}^{c_{ij}} \right) The log-likelihood is given by .. math:: l(C|P)=\sum_{i,j=1}^{M}c_{ij} \log p_{ij}. The likelihood describes the probability of making an observation :math:`C` for a given model :math:`P`. Examples -------- >>> import numpy as np >>> from msmtools.estimation import log_likelihood >>> T = np.array([[0.9, 0.1, 0.0], [0.5, 0.0, 0.5], [0.0, 0.1, 0.9]]) >>> C = np.array([[58, 7, 0], [6, 0, 4], [0, 3, 21]]) >>> logL = log_likelihood(C, T) >>> logL # doctest: +ELLIPSIS -38.2808034725... >>> C = np.array([[58, 20, 0], [6, 0, 4], [0, 3, 21]]) >>> logL = log_likelihood(C, T) >>> logL # doctest: +ELLIPSIS -68.2144096814... References ---------- .. [1] Prinz, J H, H Wu, M Sarich, B Keller, M Senne, M Held, J D Chodera, C Schuette and F Noe. 2011. Markov models of molecular kinetics: Generation and validation. J Chem Phys 134: 174105
[ "r", "Log", "-", "likelihood", "of", "the", "count", "matrix", "given", "a", "transition", "matrix", "." ]
python
train
awacha/sastool
sastool/io/credo_cct/header.py
https://github.com/awacha/sastool/blob/deaddfa3002f3f6818697e36139633b7e30427a3/sastool/io/credo_cct/header.py#L124-L131
def beamcentery(self) -> ErrorValue: """Y (row) coordinate of the beam center, pixel units, 0-based.""" try: return ErrorValue(self._data['geometry']['beamposx'], self._data['geometry']['beamposx.err']) except KeyError: return ErrorValue(self._data['geometry']['beamposx'], 0.0)
[ "def", "beamcentery", "(", "self", ")", "->", "ErrorValue", ":", "try", ":", "return", "ErrorValue", "(", "self", ".", "_data", "[", "'geometry'", "]", "[", "'beamposx'", "]", ",", "self", ".", "_data", "[", "'geometry'", "]", "[", "'beamposx.err'", "]", ")", "except", "KeyError", ":", "return", "ErrorValue", "(", "self", ".", "_data", "[", "'geometry'", "]", "[", "'beamposx'", "]", ",", "0.0", ")" ]
Y (row) coordinate of the beam center, pixel units, 0-based.
[ "Y", "(", "row", ")", "coordinate", "of", "the", "beam", "center", "pixel", "units", "0", "-", "based", "." ]
python
train
PMEAL/OpenPNM
openpnm/core/Base.py
https://github.com/PMEAL/OpenPNM/blob/0547b5724ffedc0a593aae48639d36fe10e0baed/openpnm/core/Base.py#L1589-L1631
def _parse_labels(self, labels, element): r""" This private method is used for converting \'labels\' to a proper format, including dealing with wildcards (\*). Parameters ---------- labels : string or list of strings The label or list of labels to be parsed. Note that the \* can be used as a wildcard. Returns ------- A list of label strings, with all wildcard matches included if applicable. """ if labels is None: raise Exception('Labels cannot be None') if type(labels) is str: labels = [labels] # Parse the labels list parsed_labels = [] for label in labels: # Remove element from label, if present if element in label: label = label.split('.')[-1] # Deal with wildcards if '*' in label: Ls = [L.split('.')[-1] for L in self.labels(element=element)] if label.startswith('*'): temp = [L for L in Ls if L.endswith(label.strip('*'))] if label.endswith('*'): temp = [L for L in Ls if L.startswith(label.strip('*'))] temp = [element+'.'+L for L in temp] elif element+'.'+label in self.keys(): temp = [element+'.'+label] else: temp = [element+'.'+label] parsed_labels.extend(temp) # Remove duplicates if any [parsed_labels.remove(L) for L in parsed_labels if parsed_labels.count(L) > 1] return parsed_labels
[ "def", "_parse_labels", "(", "self", ",", "labels", ",", "element", ")", ":", "if", "labels", "is", "None", ":", "raise", "Exception", "(", "'Labels cannot be None'", ")", "if", "type", "(", "labels", ")", "is", "str", ":", "labels", "=", "[", "labels", "]", "# Parse the labels list", "parsed_labels", "=", "[", "]", "for", "label", "in", "labels", ":", "# Remove element from label, if present", "if", "element", "in", "label", ":", "label", "=", "label", ".", "split", "(", "'.'", ")", "[", "-", "1", "]", "# Deal with wildcards", "if", "'*'", "in", "label", ":", "Ls", "=", "[", "L", ".", "split", "(", "'.'", ")", "[", "-", "1", "]", "for", "L", "in", "self", ".", "labels", "(", "element", "=", "element", ")", "]", "if", "label", ".", "startswith", "(", "'*'", ")", ":", "temp", "=", "[", "L", "for", "L", "in", "Ls", "if", "L", ".", "endswith", "(", "label", ".", "strip", "(", "'*'", ")", ")", "]", "if", "label", ".", "endswith", "(", "'*'", ")", ":", "temp", "=", "[", "L", "for", "L", "in", "Ls", "if", "L", ".", "startswith", "(", "label", ".", "strip", "(", "'*'", ")", ")", "]", "temp", "=", "[", "element", "+", "'.'", "+", "L", "for", "L", "in", "temp", "]", "elif", "element", "+", "'.'", "+", "label", "in", "self", ".", "keys", "(", ")", ":", "temp", "=", "[", "element", "+", "'.'", "+", "label", "]", "else", ":", "temp", "=", "[", "element", "+", "'.'", "+", "label", "]", "parsed_labels", ".", "extend", "(", "temp", ")", "# Remove duplicates if any", "[", "parsed_labels", ".", "remove", "(", "L", ")", "for", "L", "in", "parsed_labels", "if", "parsed_labels", ".", "count", "(", "L", ")", ">", "1", "]", "return", "parsed_labels" ]
r""" This private method is used for converting \'labels\' to a proper format, including dealing with wildcards (\*). Parameters ---------- labels : string or list of strings The label or list of labels to be parsed. Note that the \* can be used as a wildcard. Returns ------- A list of label strings, with all wildcard matches included if applicable.
[ "r", "This", "private", "method", "is", "used", "for", "converting", "\\", "labels", "\\", "to", "a", "proper", "format", "including", "dealing", "with", "wildcards", "(", "\\", "*", ")", "." ]
python
train
OpenHumans/open-humans-api
ohapi/utils_fs.py
https://github.com/OpenHumans/open-humans-api/blob/ca2a28cf5d55cfdae13dd222ba58c25565bdb86e/ohapi/utils_fs.py#L509-L526
def read_id_list(filepath): """ Get project member id from a file. :param filepath: This field is the path of file to read. """ if not filepath: return None id_list = [] with open(filepath) as f: for line in f: line = line.rstrip() if not re.match('^[0-9]{8}$', line): raise('Each line in whitelist or blacklist is expected ' 'to contain an eight digit ID, and nothing else.') else: id_list.append(line) return id_list
[ "def", "read_id_list", "(", "filepath", ")", ":", "if", "not", "filepath", ":", "return", "None", "id_list", "=", "[", "]", "with", "open", "(", "filepath", ")", "as", "f", ":", "for", "line", "in", "f", ":", "line", "=", "line", ".", "rstrip", "(", ")", "if", "not", "re", ".", "match", "(", "'^[0-9]{8}$'", ",", "line", ")", ":", "raise", "(", "'Each line in whitelist or blacklist is expected '", "'to contain an eight digit ID, and nothing else.'", ")", "else", ":", "id_list", ".", "append", "(", "line", ")", "return", "id_list" ]
Get project member id from a file. :param filepath: This field is the path of file to read.
[ "Get", "project", "member", "id", "from", "a", "file", "." ]
python
train
dsoprea/NsqSpinner
nsq/master.py
https://github.com/dsoprea/NsqSpinner/blob/972237b8ddce737983bfed001fde52e5236be695/nsq/master.py#L239-L257
def set_servers(self, node_couplets): """Set the current collection of servers. The entries are 2-tuples of contexts and nodes. """ node_couplets_s = set(node_couplets) if node_couplets_s != self.__node_couplets_s: _logger.info("Servers have changed. NEW: %s REMOVED: %s", node_couplets_s - self.__node_couplets_s, self.__node_couplets_s - node_couplets_s) # Since no servers means no connection greenlets, and the discover # greenlet is technically scheduled and not running between # invocations, this should successfully terminate the process. if not node_couplets_s: raise EnvironmentError("No servers available.") self.__node_couplets_s = node_couplets_s
[ "def", "set_servers", "(", "self", ",", "node_couplets", ")", ":", "node_couplets_s", "=", "set", "(", "node_couplets", ")", "if", "node_couplets_s", "!=", "self", ".", "__node_couplets_s", ":", "_logger", ".", "info", "(", "\"Servers have changed. NEW: %s REMOVED: %s\"", ",", "node_couplets_s", "-", "self", ".", "__node_couplets_s", ",", "self", ".", "__node_couplets_s", "-", "node_couplets_s", ")", "# Since no servers means no connection greenlets, and the discover ", "# greenlet is technically scheduled and not running between ", "# invocations, this should successfully terminate the process.", "if", "not", "node_couplets_s", ":", "raise", "EnvironmentError", "(", "\"No servers available.\"", ")", "self", ".", "__node_couplets_s", "=", "node_couplets_s" ]
Set the current collection of servers. The entries are 2-tuples of contexts and nodes.
[ "Set", "the", "current", "collection", "of", "servers", ".", "The", "entries", "are", "2", "-", "tuples", "of", "contexts", "and", "nodes", "." ]
python
train
ouroboroscoding/format-oc-python
FormatOC/__init__.py
https://github.com/ouroboroscoding/format-oc-python/blob/c160b46fe4ff2c92333c776991c712de23991225/FormatOC/__init__.py#L2361-L2372
def viewkeys(self): """View Keys Returns a view associated with the parent's keys Returns: dict_view """ if hasattr(self._nodes, 'viewkeys'): return self._nodes.viewkeys() else: return self._nodes.keys()
[ "def", "viewkeys", "(", "self", ")", ":", "if", "hasattr", "(", "self", ".", "_nodes", ",", "'viewkeys'", ")", ":", "return", "self", ".", "_nodes", ".", "viewkeys", "(", ")", "else", ":", "return", "self", ".", "_nodes", ".", "keys", "(", ")" ]
View Keys Returns a view associated with the parent's keys Returns: dict_view
[ "View", "Keys" ]
python
train
inveniosoftware/invenio-oauthclient
invenio_oauthclient/contrib/cern.py
https://github.com/inveniosoftware/invenio-oauthclient/blob/2500dc6935738107617aeade79e050d7608004bb/invenio_oauthclient/contrib/cern.py#L300-L327
def account_info(remote, resp): """Retrieve remote account information used to find local user.""" resource = get_resource(remote) valid_identities = current_app.config.get( 'OAUTHCLIENT_CERN_ALLOWED_IDENTITY_CLASSES', OAUTHCLIENT_CERN_ALLOWED_IDENTITY_CLASSES ) identity_class = resource.get('IdentityClass', [None])[0] if identity_class is None or identity_class not in valid_identities: raise OAuthCERNRejectedAccountError( 'Identity class {0} is not one of [{1}]'.format( identity_class, ''.join(valid_identities)), remote, resp, ) email = resource['EmailAddress'][0] person_id = resource.get('PersonID', [None]) external_id = resource.get('uidNumber', person_id)[0] nice = resource['CommonName'][0] name = resource['DisplayName'][0] return dict( user=dict( email=email.lower(), profile=dict(username=nice, full_name=name), ), external_id=external_id, external_method='cern', active=True )
[ "def", "account_info", "(", "remote", ",", "resp", ")", ":", "resource", "=", "get_resource", "(", "remote", ")", "valid_identities", "=", "current_app", ".", "config", ".", "get", "(", "'OAUTHCLIENT_CERN_ALLOWED_IDENTITY_CLASSES'", ",", "OAUTHCLIENT_CERN_ALLOWED_IDENTITY_CLASSES", ")", "identity_class", "=", "resource", ".", "get", "(", "'IdentityClass'", ",", "[", "None", "]", ")", "[", "0", "]", "if", "identity_class", "is", "None", "or", "identity_class", "not", "in", "valid_identities", ":", "raise", "OAuthCERNRejectedAccountError", "(", "'Identity class {0} is not one of [{1}]'", ".", "format", "(", "identity_class", ",", "''", ".", "join", "(", "valid_identities", ")", ")", ",", "remote", ",", "resp", ",", ")", "email", "=", "resource", "[", "'EmailAddress'", "]", "[", "0", "]", "person_id", "=", "resource", ".", "get", "(", "'PersonID'", ",", "[", "None", "]", ")", "external_id", "=", "resource", ".", "get", "(", "'uidNumber'", ",", "person_id", ")", "[", "0", "]", "nice", "=", "resource", "[", "'CommonName'", "]", "[", "0", "]", "name", "=", "resource", "[", "'DisplayName'", "]", "[", "0", "]", "return", "dict", "(", "user", "=", "dict", "(", "email", "=", "email", ".", "lower", "(", ")", ",", "profile", "=", "dict", "(", "username", "=", "nice", ",", "full_name", "=", "name", ")", ",", ")", ",", "external_id", "=", "external_id", ",", "external_method", "=", "'cern'", ",", "active", "=", "True", ")" ]
Retrieve remote account information used to find local user.
[ "Retrieve", "remote", "account", "information", "used", "to", "find", "local", "user", "." ]
python
train
tijme/not-your-average-web-crawler
nyawc/helpers/URLHelper.py
https://github.com/tijme/not-your-average-web-crawler/blob/d77c14e1616c541bb3980f649a7e6f8ed02761fb/nyawc/helpers/URLHelper.py#L205-L219
def get_path(url): """Get the path (e.g /page/23) of the given URL. Args: url (str): The URL to get the path from. Returns: str: The path """ if url not in URLHelper.__cache: URLHelper.__cache[url] = urlparse(url) return URLHelper.__cache[url].path
[ "def", "get_path", "(", "url", ")", ":", "if", "url", "not", "in", "URLHelper", ".", "__cache", ":", "URLHelper", ".", "__cache", "[", "url", "]", "=", "urlparse", "(", "url", ")", "return", "URLHelper", ".", "__cache", "[", "url", "]", ".", "path" ]
Get the path (e.g /page/23) of the given URL. Args: url (str): The URL to get the path from. Returns: str: The path
[ "Get", "the", "path", "(", "e", ".", "g", "/", "page", "/", "23", ")", "of", "the", "given", "URL", "." ]
python
train
matrix-org/matrix-python-sdk
matrix_client/api.py
https://github.com/matrix-org/matrix-python-sdk/blob/e734cce3ccd35f2d355c6a19a7a701033472498a/matrix_client/api.py#L815-L849
def get_thumbnail(self, mxcurl, width, height, method='scale', allow_remote=True): """Download raw media thumbnail from provided mxc URL. Args: mxcurl (str): mxc media URL width (int): desired thumbnail width height (int): desired thumbnail height method (str): thumb creation method. Must be in ['scale', 'crop']. Default 'scale'. allow_remote (bool): indicates to the server that it should not attempt to fetch the media if it is deemed remote. Defaults to true if not provided. """ if method not in ['scale', 'crop']: raise ValueError( "Unsupported thumb method '%s'" % method ) query_params = { "width": width, "height": height, "method": method } if not allow_remote: query_params["allow_remote"] = False if mxcurl.startswith('mxc://'): return self._send( "GET", mxcurl[6:], query_params=query_params, api_path="/_matrix/media/r0/thumbnail/", return_json=False ) else: raise ValueError( "MXC URL '%s' did not begin with 'mxc://'" % mxcurl )
[ "def", "get_thumbnail", "(", "self", ",", "mxcurl", ",", "width", ",", "height", ",", "method", "=", "'scale'", ",", "allow_remote", "=", "True", ")", ":", "if", "method", "not", "in", "[", "'scale'", ",", "'crop'", "]", ":", "raise", "ValueError", "(", "\"Unsupported thumb method '%s'\"", "%", "method", ")", "query_params", "=", "{", "\"width\"", ":", "width", ",", "\"height\"", ":", "height", ",", "\"method\"", ":", "method", "}", "if", "not", "allow_remote", ":", "query_params", "[", "\"allow_remote\"", "]", "=", "False", "if", "mxcurl", ".", "startswith", "(", "'mxc://'", ")", ":", "return", "self", ".", "_send", "(", "\"GET\"", ",", "mxcurl", "[", "6", ":", "]", ",", "query_params", "=", "query_params", ",", "api_path", "=", "\"/_matrix/media/r0/thumbnail/\"", ",", "return_json", "=", "False", ")", "else", ":", "raise", "ValueError", "(", "\"MXC URL '%s' did not begin with 'mxc://'\"", "%", "mxcurl", ")" ]
Download raw media thumbnail from provided mxc URL. Args: mxcurl (str): mxc media URL width (int): desired thumbnail width height (int): desired thumbnail height method (str): thumb creation method. Must be in ['scale', 'crop']. Default 'scale'. allow_remote (bool): indicates to the server that it should not attempt to fetch the media if it is deemed remote. Defaults to true if not provided.
[ "Download", "raw", "media", "thumbnail", "from", "provided", "mxc", "URL", "." ]
python
train
twisted/mantissa
xmantissa/webtheme.py
https://github.com/twisted/mantissa/blob/53e5502aba23ce99be78b27f923a276593033fe8/xmantissa/webtheme.py#L78-L86
def getInstalledThemes(self, store): """ Collect themes from all offerings installed on this store, or (if called multiple times) return the previously collected list. """ if not store in self._getInstalledThemesCache: self._getInstalledThemesCache[store] = (self. _realGetInstalledThemes(store)) return self._getInstalledThemesCache[store]
[ "def", "getInstalledThemes", "(", "self", ",", "store", ")", ":", "if", "not", "store", "in", "self", ".", "_getInstalledThemesCache", ":", "self", ".", "_getInstalledThemesCache", "[", "store", "]", "=", "(", "self", ".", "_realGetInstalledThemes", "(", "store", ")", ")", "return", "self", ".", "_getInstalledThemesCache", "[", "store", "]" ]
Collect themes from all offerings installed on this store, or (if called multiple times) return the previously collected list.
[ "Collect", "themes", "from", "all", "offerings", "installed", "on", "this", "store", "or", "(", "if", "called", "multiple", "times", ")", "return", "the", "previously", "collected", "list", "." ]
python
train
Clinical-Genomics/scout
scout/parse/hpo.py
https://github.com/Clinical-Genomics/scout/blob/90a551e2e1653a319e654c2405c2866f93d0ebb9/scout/parse/hpo.py#L149-L168
def parse_hpo_to_genes(hpo_lines): """Parse the map from hpo term to hgnc symbol Args: lines(iterable(str)): Yields: hpo_to_gene(dict): A dictionary with information on how a term map to a hgnc symbol """ for line in hpo_lines: if line.startswith('#') or len(line) < 1: continue line = line.rstrip().split('\t') hpo_id = line[0] hgnc_symbol = line[3] yield { 'hpo_id': hpo_id, 'hgnc_symbol': hgnc_symbol }
[ "def", "parse_hpo_to_genes", "(", "hpo_lines", ")", ":", "for", "line", "in", "hpo_lines", ":", "if", "line", ".", "startswith", "(", "'#'", ")", "or", "len", "(", "line", ")", "<", "1", ":", "continue", "line", "=", "line", ".", "rstrip", "(", ")", ".", "split", "(", "'\\t'", ")", "hpo_id", "=", "line", "[", "0", "]", "hgnc_symbol", "=", "line", "[", "3", "]", "yield", "{", "'hpo_id'", ":", "hpo_id", ",", "'hgnc_symbol'", ":", "hgnc_symbol", "}" ]
Parse the map from hpo term to hgnc symbol Args: lines(iterable(str)): Yields: hpo_to_gene(dict): A dictionary with information on how a term map to a hgnc symbol
[ "Parse", "the", "map", "from", "hpo", "term", "to", "hgnc", "symbol", "Args", ":", "lines", "(", "iterable", "(", "str", "))", ":", "Yields", ":", "hpo_to_gene", "(", "dict", ")", ":", "A", "dictionary", "with", "information", "on", "how", "a", "term", "map", "to", "a", "hgnc", "symbol" ]
python
test
CxAalto/gtfspy
gtfspy/gtfs.py
https://github.com/CxAalto/gtfspy/blob/bddba4b74faae6c1b91202f19184811e326547e5/gtfspy/gtfs.py#L1440-L1482
def get_route_difference_with_other_db(self, other_gtfs, start_time, end_time, uniqueness_threshold=None, uniqueness_ratio=None): """ Compares the routes based on stops in the schedule with the routes in another db and returns the ones without match. Uniqueness thresholds or ratio can be used to allow small differences :param uniqueness_threshold: :param uniqueness_ratio: :return: """ from gtfspy.stats import frequencies_by_generated_route this_df = frequencies_by_generated_route(self, start_time, end_time) other_df = frequencies_by_generated_route(other_gtfs, start_time, end_time) this_routes = {x: set(x.split(',')) for x in this_df["route"]} other_routes = {x: set(x.split(',')) for x in other_df["route"]} # this_df["route_set"] = this_df.apply(lambda x: set(x.route.split(',')), axis=1) # other_df["route_set"] = other_df.apply(lambda x: set(x.route.split(',')), axis=1) this_uniques = list(this_routes.keys()) other_uniques = list(other_routes.keys()) print("initial routes A:", len(this_uniques)) print("initial routes B:", len(other_uniques)) for i_key, i in this_routes.items(): for j_key, j in other_routes.items(): union = i | j intersection = i & j symmetric_difference = i ^ j if uniqueness_ratio: if len(intersection) / len(union) >= uniqueness_ratio: try: this_uniques.remove(i_key) this_df = this_df[this_df["route"] != i_key] except ValueError: pass try: other_uniques.remove(j_key) other_df = other_df[other_df["route"] != j_key] except ValueError: pass print("unique routes A", len(this_df)) print("unique routes B", len(other_df)) return this_df, other_df
[ "def", "get_route_difference_with_other_db", "(", "self", ",", "other_gtfs", ",", "start_time", ",", "end_time", ",", "uniqueness_threshold", "=", "None", ",", "uniqueness_ratio", "=", "None", ")", ":", "from", "gtfspy", ".", "stats", "import", "frequencies_by_generated_route", "this_df", "=", "frequencies_by_generated_route", "(", "self", ",", "start_time", ",", "end_time", ")", "other_df", "=", "frequencies_by_generated_route", "(", "other_gtfs", ",", "start_time", ",", "end_time", ")", "this_routes", "=", "{", "x", ":", "set", "(", "x", ".", "split", "(", "','", ")", ")", "for", "x", "in", "this_df", "[", "\"route\"", "]", "}", "other_routes", "=", "{", "x", ":", "set", "(", "x", ".", "split", "(", "','", ")", ")", "for", "x", "in", "other_df", "[", "\"route\"", "]", "}", "# this_df[\"route_set\"] = this_df.apply(lambda x: set(x.route.split(',')), axis=1)", "# other_df[\"route_set\"] = other_df.apply(lambda x: set(x.route.split(',')), axis=1)", "this_uniques", "=", "list", "(", "this_routes", ".", "keys", "(", ")", ")", "other_uniques", "=", "list", "(", "other_routes", ".", "keys", "(", ")", ")", "print", "(", "\"initial routes A:\"", ",", "len", "(", "this_uniques", ")", ")", "print", "(", "\"initial routes B:\"", ",", "len", "(", "other_uniques", ")", ")", "for", "i_key", ",", "i", "in", "this_routes", ".", "items", "(", ")", ":", "for", "j_key", ",", "j", "in", "other_routes", ".", "items", "(", ")", ":", "union", "=", "i", "|", "j", "intersection", "=", "i", "&", "j", "symmetric_difference", "=", "i", "^", "j", "if", "uniqueness_ratio", ":", "if", "len", "(", "intersection", ")", "/", "len", "(", "union", ")", ">=", "uniqueness_ratio", ":", "try", ":", "this_uniques", ".", "remove", "(", "i_key", ")", "this_df", "=", "this_df", "[", "this_df", "[", "\"route\"", "]", "!=", "i_key", "]", "except", "ValueError", ":", "pass", "try", ":", "other_uniques", ".", "remove", "(", "j_key", ")", "other_df", "=", "other_df", "[", "other_df", "[", "\"route\"", "]", "!=", "j_key", "]", "except", "ValueError", ":", "pass", "print", "(", "\"unique routes A\"", ",", "len", "(", "this_df", ")", ")", "print", "(", "\"unique routes B\"", ",", "len", "(", "other_df", ")", ")", "return", "this_df", ",", "other_df" ]
Compares the routes based on stops in the schedule with the routes in another db and returns the ones without match. Uniqueness thresholds or ratio can be used to allow small differences :param uniqueness_threshold: :param uniqueness_ratio: :return:
[ "Compares", "the", "routes", "based", "on", "stops", "in", "the", "schedule", "with", "the", "routes", "in", "another", "db", "and", "returns", "the", "ones", "without", "match", ".", "Uniqueness", "thresholds", "or", "ratio", "can", "be", "used", "to", "allow", "small", "differences", ":", "param", "uniqueness_threshold", ":", ":", "param", "uniqueness_ratio", ":", ":", "return", ":" ]
python
valid
awslabs/sockeye
sockeye/lr_scheduler.py
https://github.com/awslabs/sockeye/blob/5d64a1ee1ef3cbba17c6d1d94bc061020c43f6ab/sockeye/lr_scheduler.py#L253-L299
def get_lr_scheduler(scheduler_type: str, updates_per_checkpoint: int, learning_rate_half_life: int, learning_rate_reduce_factor: float, learning_rate_reduce_num_not_improved: int, learning_rate_schedule: Optional[List[Tuple[float, int]]] = None, learning_rate_warmup: Optional[int] = 0) -> Optional[LearningRateScheduler]: """ Returns a learning rate scheduler. :param scheduler_type: Scheduler type. :param updates_per_checkpoint: Number of batches between checkpoints. :param learning_rate_half_life: Half life of the learning rate in number of checkpoints. :param learning_rate_reduce_factor: Factor to reduce learning rate with. :param learning_rate_reduce_num_not_improved: Number of checkpoints with no improvement after which learning rate is reduced. :param learning_rate_schedule: Optional fixed learning rate schedule. :param learning_rate_warmup: Number of batches that the learning rate is linearly increased. :raises: ValueError if unknown scheduler_type :return: Learning rate scheduler. """ check_condition(learning_rate_schedule is None or scheduler_type == C.LR_SCHEDULER_FIXED_STEP, "Learning rate schedule can only be used with '%s' learning rate scheduler." % C.LR_SCHEDULER_FIXED_STEP) if scheduler_type is None: return None if scheduler_type == C.LR_SCHEDULER_FIXED_RATE_INV_SQRT_T: return LearningRateSchedulerInvSqrtT(updates_per_checkpoint, learning_rate_half_life, learning_rate_warmup) elif scheduler_type == C.LR_SCHEDULER_FIXED_RATE_INV_T: return LearningRateSchedulerInvT(updates_per_checkpoint, learning_rate_half_life, learning_rate_warmup) elif scheduler_type == C.LR_SCHEDULER_FIXED_STEP: check_condition(learning_rate_schedule is not None, "learning_rate_schedule needed for %s scheduler" % C.LR_SCHEDULER_FIXED_STEP) return LearningRateSchedulerFixedStep(learning_rate_schedule, updates_per_checkpoint) elif scheduler_type == C.LR_SCHEDULER_PLATEAU_REDUCE: check_condition(learning_rate_reduce_factor is not None, "learning_rate_reduce_factor needed for %s scheduler" % C.LR_SCHEDULER_PLATEAU_REDUCE) check_condition(learning_rate_reduce_num_not_improved is not None, "learning_rate_reduce_num_not_improved needed for %s scheduler" % C.LR_SCHEDULER_PLATEAU_REDUCE) if learning_rate_reduce_factor >= 1.0: logger.warning("Not using %s learning rate scheduling: learning_rate_reduce_factor == 1.0" % C.LR_SCHEDULER_PLATEAU_REDUCE) return None return LearningRateSchedulerPlateauReduce(learning_rate_reduce_factor, learning_rate_reduce_num_not_improved, learning_rate_warmup) else: raise ValueError("Unknown learning rate scheduler type %s." % scheduler_type)
[ "def", "get_lr_scheduler", "(", "scheduler_type", ":", "str", ",", "updates_per_checkpoint", ":", "int", ",", "learning_rate_half_life", ":", "int", ",", "learning_rate_reduce_factor", ":", "float", ",", "learning_rate_reduce_num_not_improved", ":", "int", ",", "learning_rate_schedule", ":", "Optional", "[", "List", "[", "Tuple", "[", "float", ",", "int", "]", "]", "]", "=", "None", ",", "learning_rate_warmup", ":", "Optional", "[", "int", "]", "=", "0", ")", "->", "Optional", "[", "LearningRateScheduler", "]", ":", "check_condition", "(", "learning_rate_schedule", "is", "None", "or", "scheduler_type", "==", "C", ".", "LR_SCHEDULER_FIXED_STEP", ",", "\"Learning rate schedule can only be used with '%s' learning rate scheduler.\"", "%", "C", ".", "LR_SCHEDULER_FIXED_STEP", ")", "if", "scheduler_type", "is", "None", ":", "return", "None", "if", "scheduler_type", "==", "C", ".", "LR_SCHEDULER_FIXED_RATE_INV_SQRT_T", ":", "return", "LearningRateSchedulerInvSqrtT", "(", "updates_per_checkpoint", ",", "learning_rate_half_life", ",", "learning_rate_warmup", ")", "elif", "scheduler_type", "==", "C", ".", "LR_SCHEDULER_FIXED_RATE_INV_T", ":", "return", "LearningRateSchedulerInvT", "(", "updates_per_checkpoint", ",", "learning_rate_half_life", ",", "learning_rate_warmup", ")", "elif", "scheduler_type", "==", "C", ".", "LR_SCHEDULER_FIXED_STEP", ":", "check_condition", "(", "learning_rate_schedule", "is", "not", "None", ",", "\"learning_rate_schedule needed for %s scheduler\"", "%", "C", ".", "LR_SCHEDULER_FIXED_STEP", ")", "return", "LearningRateSchedulerFixedStep", "(", "learning_rate_schedule", ",", "updates_per_checkpoint", ")", "elif", "scheduler_type", "==", "C", ".", "LR_SCHEDULER_PLATEAU_REDUCE", ":", "check_condition", "(", "learning_rate_reduce_factor", "is", "not", "None", ",", "\"learning_rate_reduce_factor needed for %s scheduler\"", "%", "C", ".", "LR_SCHEDULER_PLATEAU_REDUCE", ")", "check_condition", "(", "learning_rate_reduce_num_not_improved", "is", "not", "None", ",", "\"learning_rate_reduce_num_not_improved needed for %s scheduler\"", "%", "C", ".", "LR_SCHEDULER_PLATEAU_REDUCE", ")", "if", "learning_rate_reduce_factor", ">=", "1.0", ":", "logger", ".", "warning", "(", "\"Not using %s learning rate scheduling: learning_rate_reduce_factor == 1.0\"", "%", "C", ".", "LR_SCHEDULER_PLATEAU_REDUCE", ")", "return", "None", "return", "LearningRateSchedulerPlateauReduce", "(", "learning_rate_reduce_factor", ",", "learning_rate_reduce_num_not_improved", ",", "learning_rate_warmup", ")", "else", ":", "raise", "ValueError", "(", "\"Unknown learning rate scheduler type %s.\"", "%", "scheduler_type", ")" ]
Returns a learning rate scheduler. :param scheduler_type: Scheduler type. :param updates_per_checkpoint: Number of batches between checkpoints. :param learning_rate_half_life: Half life of the learning rate in number of checkpoints. :param learning_rate_reduce_factor: Factor to reduce learning rate with. :param learning_rate_reduce_num_not_improved: Number of checkpoints with no improvement after which learning rate is reduced. :param learning_rate_schedule: Optional fixed learning rate schedule. :param learning_rate_warmup: Number of batches that the learning rate is linearly increased. :raises: ValueError if unknown scheduler_type :return: Learning rate scheduler.
[ "Returns", "a", "learning", "rate", "scheduler", "." ]
python
train
jldantas/libmft
libmft/api.py
https://github.com/jldantas/libmft/blob/65a988605fe7663b788bd81dcb52c0a4eaad1549/libmft/api.py#L405-L427
def add_data_attribute(self, data_attr): '''Interprets a DATA attribute and add it to the datastream.''' if data_attr.header.attr_type_id is not AttrTypes.DATA: raise DataStreamError("Invalid attribute. A Datastream deals only with DATA attributes") if data_attr.header.attr_name != self.name: raise DataStreamError(f"Data from a different stream '{data_attr.header.attr_name}' cannot be add to this stream") if data_attr.header.non_resident: nonr_header = data_attr.header if self._data_runs is None: self._data_runs = [] if nonr_header.end_vcn > self.cluster_count: self.cluster_count = nonr_header.end_vcn if not nonr_header.start_vcn: #start_vcn == 0 self.size = nonr_header.curr_sstream self.alloc_size = nonr_header.alloc_sstream self._data_runs.append((nonr_header.start_vcn, nonr_header.data_runs)) self._data_runs_sorted = False else: #if it is resident self.size = self.alloc_size = data_attr.header.content_len self._pending_processing = None #respects mft_config["load_data"] self._content = data_attr.content.content
[ "def", "add_data_attribute", "(", "self", ",", "data_attr", ")", ":", "if", "data_attr", ".", "header", ".", "attr_type_id", "is", "not", "AttrTypes", ".", "DATA", ":", "raise", "DataStreamError", "(", "\"Invalid attribute. A Datastream deals only with DATA attributes\"", ")", "if", "data_attr", ".", "header", ".", "attr_name", "!=", "self", ".", "name", ":", "raise", "DataStreamError", "(", "f\"Data from a different stream '{data_attr.header.attr_name}' cannot be add to this stream\"", ")", "if", "data_attr", ".", "header", ".", "non_resident", ":", "nonr_header", "=", "data_attr", ".", "header", "if", "self", ".", "_data_runs", "is", "None", ":", "self", ".", "_data_runs", "=", "[", "]", "if", "nonr_header", ".", "end_vcn", ">", "self", ".", "cluster_count", ":", "self", ".", "cluster_count", "=", "nonr_header", ".", "end_vcn", "if", "not", "nonr_header", ".", "start_vcn", ":", "#start_vcn == 0", "self", ".", "size", "=", "nonr_header", ".", "curr_sstream", "self", ".", "alloc_size", "=", "nonr_header", ".", "alloc_sstream", "self", ".", "_data_runs", ".", "append", "(", "(", "nonr_header", ".", "start_vcn", ",", "nonr_header", ".", "data_runs", ")", ")", "self", ".", "_data_runs_sorted", "=", "False", "else", ":", "#if it is resident", "self", ".", "size", "=", "self", ".", "alloc_size", "=", "data_attr", ".", "header", ".", "content_len", "self", ".", "_pending_processing", "=", "None", "#respects mft_config[\"load_data\"]", "self", ".", "_content", "=", "data_attr", ".", "content", ".", "content" ]
Interprets a DATA attribute and add it to the datastream.
[ "Interprets", "a", "DATA", "attribute", "and", "add", "it", "to", "the", "datastream", "." ]
python
train
quantopian/pyfolio
pyfolio/timeseries.py
https://github.com/quantopian/pyfolio/blob/712716ab0cdebbec9fabb25eea3bf40e4354749d/pyfolio/timeseries.py#L231-L258
def downside_risk(returns, required_return=0, period=DAILY): """ Determines the downside deviation below a threshold Parameters ---------- returns : pd.Series or pd.DataFrame Daily returns of the strategy, noncumulative. - See full explanation in :func:`~pyfolio.timeseries.cum_returns`. required_return: float / series minimum acceptable return period : str, optional Defines the periodicity of the 'returns' data for purposes of annualizing. Can be 'monthly', 'weekly', or 'daily'. - Defaults to 'daily'. Returns ------- depends on input type series ==> float DataFrame ==> np.array Annualized downside deviation """ return ep.downside_risk(returns, required_return=required_return, period=period)
[ "def", "downside_risk", "(", "returns", ",", "required_return", "=", "0", ",", "period", "=", "DAILY", ")", ":", "return", "ep", ".", "downside_risk", "(", "returns", ",", "required_return", "=", "required_return", ",", "period", "=", "period", ")" ]
Determines the downside deviation below a threshold Parameters ---------- returns : pd.Series or pd.DataFrame Daily returns of the strategy, noncumulative. - See full explanation in :func:`~pyfolio.timeseries.cum_returns`. required_return: float / series minimum acceptable return period : str, optional Defines the periodicity of the 'returns' data for purposes of annualizing. Can be 'monthly', 'weekly', or 'daily'. - Defaults to 'daily'. Returns ------- depends on input type series ==> float DataFrame ==> np.array Annualized downside deviation
[ "Determines", "the", "downside", "deviation", "below", "a", "threshold" ]
python
valid
Hackerfleet/hfos
modules/maps/hfos/map/TileTools.py
https://github.com/Hackerfleet/hfos/blob/b6df14eacaffb6be5c844108873ff8763ec7f0c9/modules/maps/hfos/map/TileTools.py#L40-L45
def getMapScale(self, latitude, level, dpi=96): ''' returns the map scale on the dpi of the screen ''' dpm = dpi / 0.0254 # convert to dots per meter return self.getGroundResolution(latitude, level) * dpm
[ "def", "getMapScale", "(", "self", ",", "latitude", ",", "level", ",", "dpi", "=", "96", ")", ":", "dpm", "=", "dpi", "/", "0.0254", "# convert to dots per meter", "return", "self", ".", "getGroundResolution", "(", "latitude", ",", "level", ")", "*", "dpm" ]
returns the map scale on the dpi of the screen
[ "returns", "the", "map", "scale", "on", "the", "dpi", "of", "the", "screen" ]
python
train
googlesamples/assistant-sdk-python
google-assistant-sdk/googlesamples/assistant/grpc/devicetool.py
https://github.com/googlesamples/assistant-sdk-python/blob/84995692f35be8e085de8dfa7032039a13ae3fab/google-assistant-sdk/googlesamples/assistant/grpc/devicetool.py#L222-L260
def register_model(ctx, model, type, trait, manufacturer, product_name, description): """Registers a device model. Device model fields can only contain letters, numbers, and the following symbols: period (.), hyphen (-), underscore (_), space ( ) and plus (+). The first character of a field must be a letter or number. """ session, api_url, project_id = build_client_from_context(ctx) model_base_url = '/'.join([api_url, 'deviceModels']) model_url = '/'.join([model_base_url, model]) payload = { 'device_model_id': model, 'project_id': project_id, 'device_type': 'action.devices.types.' + type, } if trait: payload['traits'] = trait if manufacturer: payload.setdefault('manifest', {})['manufacturer'] = manufacturer if product_name: payload.setdefault('manifest', {})['productName'] = product_name if description: payload.setdefault('manifest', {})['deviceDescription'] = description logging.debug(json.dumps(payload)) r = session.get(model_url) logging.debug(r.text) if r.status_code == 200: click.echo('Updating existing device model: %s' % model) r = session.put(model_url, data=json.dumps(payload)) elif r.status_code in (400, 403, 404): click.echo('Creating new device model') r = session.post(model_base_url, data=json.dumps(payload)) else: raise failed_request_exception('Failed to check existing device model', r) if r.status_code != 200: raise failed_request_exception('Failed to register model', r) click.echo('Model %s successfully registered' % model)
[ "def", "register_model", "(", "ctx", ",", "model", ",", "type", ",", "trait", ",", "manufacturer", ",", "product_name", ",", "description", ")", ":", "session", ",", "api_url", ",", "project_id", "=", "build_client_from_context", "(", "ctx", ")", "model_base_url", "=", "'/'", ".", "join", "(", "[", "api_url", ",", "'deviceModels'", "]", ")", "model_url", "=", "'/'", ".", "join", "(", "[", "model_base_url", ",", "model", "]", ")", "payload", "=", "{", "'device_model_id'", ":", "model", ",", "'project_id'", ":", "project_id", ",", "'device_type'", ":", "'action.devices.types.'", "+", "type", ",", "}", "if", "trait", ":", "payload", "[", "'traits'", "]", "=", "trait", "if", "manufacturer", ":", "payload", ".", "setdefault", "(", "'manifest'", ",", "{", "}", ")", "[", "'manufacturer'", "]", "=", "manufacturer", "if", "product_name", ":", "payload", ".", "setdefault", "(", "'manifest'", ",", "{", "}", ")", "[", "'productName'", "]", "=", "product_name", "if", "description", ":", "payload", ".", "setdefault", "(", "'manifest'", ",", "{", "}", ")", "[", "'deviceDescription'", "]", "=", "description", "logging", ".", "debug", "(", "json", ".", "dumps", "(", "payload", ")", ")", "r", "=", "session", ".", "get", "(", "model_url", ")", "logging", ".", "debug", "(", "r", ".", "text", ")", "if", "r", ".", "status_code", "==", "200", ":", "click", ".", "echo", "(", "'Updating existing device model: %s'", "%", "model", ")", "r", "=", "session", ".", "put", "(", "model_url", ",", "data", "=", "json", ".", "dumps", "(", "payload", ")", ")", "elif", "r", ".", "status_code", "in", "(", "400", ",", "403", ",", "404", ")", ":", "click", ".", "echo", "(", "'Creating new device model'", ")", "r", "=", "session", ".", "post", "(", "model_base_url", ",", "data", "=", "json", ".", "dumps", "(", "payload", ")", ")", "else", ":", "raise", "failed_request_exception", "(", "'Failed to check existing device model'", ",", "r", ")", "if", "r", ".", "status_code", "!=", "200", ":", "raise", "failed_request_exception", "(", "'Failed to register model'", ",", "r", ")", "click", ".", "echo", "(", "'Model %s successfully registered'", "%", "model", ")" ]
Registers a device model. Device model fields can only contain letters, numbers, and the following symbols: period (.), hyphen (-), underscore (_), space ( ) and plus (+). The first character of a field must be a letter or number.
[ "Registers", "a", "device", "model", "." ]
python
train
serge-sans-paille/pythran
pythran/types/type_dependencies.py
https://github.com/serge-sans-paille/pythran/blob/7e1b5af2dddfabc50bd2a977f0178be269b349b5/pythran/types/type_dependencies.py#L435-L451
def visit_Name(self, node): """ Return dependencies for given variable. It have to be register first. """ if node.id in self.naming: return self.naming[node.id] elif node.id in self.global_declarations: return [frozenset([self.global_declarations[node.id]])] elif isinstance(node.ctx, ast.Param): deps = [frozenset()] self.naming[node.id] = deps return deps else: raise PythranInternalError("Variable '{}' use before assignment" "".format(node.id))
[ "def", "visit_Name", "(", "self", ",", "node", ")", ":", "if", "node", ".", "id", "in", "self", ".", "naming", ":", "return", "self", ".", "naming", "[", "node", ".", "id", "]", "elif", "node", ".", "id", "in", "self", ".", "global_declarations", ":", "return", "[", "frozenset", "(", "[", "self", ".", "global_declarations", "[", "node", ".", "id", "]", "]", ")", "]", "elif", "isinstance", "(", "node", ".", "ctx", ",", "ast", ".", "Param", ")", ":", "deps", "=", "[", "frozenset", "(", ")", "]", "self", ".", "naming", "[", "node", ".", "id", "]", "=", "deps", "return", "deps", "else", ":", "raise", "PythranInternalError", "(", "\"Variable '{}' use before assignment\"", "\"\"", ".", "format", "(", "node", ".", "id", ")", ")" ]
Return dependencies for given variable. It have to be register first.
[ "Return", "dependencies", "for", "given", "variable", "." ]
python
train
flatangle/flatlib
flatlib/tools/chartdynamics.py
https://github.com/flatangle/flatlib/blob/44e05b2991a296c678adbc17a1d51b6a21bc867c/flatlib/tools/chartdynamics.py#L143-L152
def isVOC(self, ID): """ Returns if a planet is Void of Course. A planet is not VOC if has any exact or applicative aspects ignoring the sign status (associate or dissociate). """ asps = self.aspectsByCat(ID, const.MAJOR_ASPECTS) applications = asps[const.APPLICATIVE] exacts = asps[const.EXACT] return len(applications) == 0 and len(exacts) == 0
[ "def", "isVOC", "(", "self", ",", "ID", ")", ":", "asps", "=", "self", ".", "aspectsByCat", "(", "ID", ",", "const", ".", "MAJOR_ASPECTS", ")", "applications", "=", "asps", "[", "const", ".", "APPLICATIVE", "]", "exacts", "=", "asps", "[", "const", ".", "EXACT", "]", "return", "len", "(", "applications", ")", "==", "0", "and", "len", "(", "exacts", ")", "==", "0" ]
Returns if a planet is Void of Course. A planet is not VOC if has any exact or applicative aspects ignoring the sign status (associate or dissociate).
[ "Returns", "if", "a", "planet", "is", "Void", "of", "Course", ".", "A", "planet", "is", "not", "VOC", "if", "has", "any", "exact", "or", "applicative", "aspects", "ignoring", "the", "sign", "status", "(", "associate", "or", "dissociate", ")", "." ]
python
train
abhishek-ram/pyas2-lib
pyas2lib/cms.py
https://github.com/abhishek-ram/pyas2-lib/blob/6af6bc71fe8a8cfb3465dad82ecc50539e3fd551/pyas2lib/cms.py#L309-L371
def verify_message(data_to_verify, signature, verify_cert): """Function parses an ASN.1 encrypted message and extracts/decrypts the original message. :param data_to_verify: A byte string of the data to be verified against the signature. :param signature: A CMS ASN.1 byte string containing the signature. :param verify_cert: The certificate to be used for verifying the signature. :return: The digest algorithm that was used in the signature. """ cms_content = cms.ContentInfo.load(signature) digest_alg = None if cms_content['content_type'].native == 'signed_data': for signer in cms_content['content']['signer_infos']: signed_attributes = signer['signed_attrs'].copy() digest_alg = signer['digest_algorithm']['algorithm'].native if digest_alg not in DIGEST_ALGORITHMS: raise Exception('Unsupported Digest Algorithm') sig_alg = signer['signature_algorithm']['algorithm'].native sig = signer['signature'].native signed_data = data_to_verify if signed_attributes: attr_dict = {} for attr in signed_attributes.native: attr_dict[attr['type']] = attr['values'] message_digest = byte_cls() for d in attr_dict['message_digest']: message_digest += d digest_func = hashlib.new(digest_alg) digest_func.update(data_to_verify) calc_message_digest = digest_func.digest() if message_digest != calc_message_digest: raise IntegrityError('Failed to verify message signature: ' 'Message Digest does not match.') signed_data = signed_attributes.untag().dump() try: if sig_alg == 'rsassa_pkcs1v15': asymmetric.rsa_pkcs1v15_verify( verify_cert, sig, signed_data, digest_alg) elif sig_alg == 'rsassa_pss': asymmetric.rsa_pss_verify( verify_cert, sig, signed_data, digest_alg) else: raise AS2Exception('Unsupported Signature Algorithm') except Exception as e: raise IntegrityError( 'Failed to verify message signature: {}'.format(e)) return digest_alg
[ "def", "verify_message", "(", "data_to_verify", ",", "signature", ",", "verify_cert", ")", ":", "cms_content", "=", "cms", ".", "ContentInfo", ".", "load", "(", "signature", ")", "digest_alg", "=", "None", "if", "cms_content", "[", "'content_type'", "]", ".", "native", "==", "'signed_data'", ":", "for", "signer", "in", "cms_content", "[", "'content'", "]", "[", "'signer_infos'", "]", ":", "signed_attributes", "=", "signer", "[", "'signed_attrs'", "]", ".", "copy", "(", ")", "digest_alg", "=", "signer", "[", "'digest_algorithm'", "]", "[", "'algorithm'", "]", ".", "native", "if", "digest_alg", "not", "in", "DIGEST_ALGORITHMS", ":", "raise", "Exception", "(", "'Unsupported Digest Algorithm'", ")", "sig_alg", "=", "signer", "[", "'signature_algorithm'", "]", "[", "'algorithm'", "]", ".", "native", "sig", "=", "signer", "[", "'signature'", "]", ".", "native", "signed_data", "=", "data_to_verify", "if", "signed_attributes", ":", "attr_dict", "=", "{", "}", "for", "attr", "in", "signed_attributes", ".", "native", ":", "attr_dict", "[", "attr", "[", "'type'", "]", "]", "=", "attr", "[", "'values'", "]", "message_digest", "=", "byte_cls", "(", ")", "for", "d", "in", "attr_dict", "[", "'message_digest'", "]", ":", "message_digest", "+=", "d", "digest_func", "=", "hashlib", ".", "new", "(", "digest_alg", ")", "digest_func", ".", "update", "(", "data_to_verify", ")", "calc_message_digest", "=", "digest_func", ".", "digest", "(", ")", "if", "message_digest", "!=", "calc_message_digest", ":", "raise", "IntegrityError", "(", "'Failed to verify message signature: '", "'Message Digest does not match.'", ")", "signed_data", "=", "signed_attributes", ".", "untag", "(", ")", ".", "dump", "(", ")", "try", ":", "if", "sig_alg", "==", "'rsassa_pkcs1v15'", ":", "asymmetric", ".", "rsa_pkcs1v15_verify", "(", "verify_cert", ",", "sig", ",", "signed_data", ",", "digest_alg", ")", "elif", "sig_alg", "==", "'rsassa_pss'", ":", "asymmetric", ".", "rsa_pss_verify", "(", "verify_cert", ",", "sig", ",", "signed_data", ",", "digest_alg", ")", "else", ":", "raise", "AS2Exception", "(", "'Unsupported Signature Algorithm'", ")", "except", "Exception", "as", "e", ":", "raise", "IntegrityError", "(", "'Failed to verify message signature: {}'", ".", "format", "(", "e", ")", ")", "return", "digest_alg" ]
Function parses an ASN.1 encrypted message and extracts/decrypts the original message. :param data_to_verify: A byte string of the data to be verified against the signature. :param signature: A CMS ASN.1 byte string containing the signature. :param verify_cert: The certificate to be used for verifying the signature. :return: The digest algorithm that was used in the signature.
[ "Function", "parses", "an", "ASN", ".", "1", "encrypted", "message", "and", "extracts", "/", "decrypts", "the", "original", "message", "." ]
python
train
pymc-devs/pymc
pymc/distributions.py
https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/distributions.py#L1225-L1232
def rexponweib(alpha, k, loc=0, scale=1, size=None): """ Random exponentiated Weibull variates. """ q = np.random.uniform(size=size) r = flib.exponweib_ppf(q, alpha, k) return loc + r * scale
[ "def", "rexponweib", "(", "alpha", ",", "k", ",", "loc", "=", "0", ",", "scale", "=", "1", ",", "size", "=", "None", ")", ":", "q", "=", "np", ".", "random", ".", "uniform", "(", "size", "=", "size", ")", "r", "=", "flib", ".", "exponweib_ppf", "(", "q", ",", "alpha", ",", "k", ")", "return", "loc", "+", "r", "*", "scale" ]
Random exponentiated Weibull variates.
[ "Random", "exponentiated", "Weibull", "variates", "." ]
python
train
chaoss/grimoirelab-perceval
perceval/backends/core/mediawiki.py
https://github.com/chaoss/grimoirelab-perceval/blob/41c908605e88b7ebc3a536c643fa0f212eaf9e0e/perceval/backends/core/mediawiki.py#L85-L105
def fetch(self, category=CATEGORY_PAGE, from_date=DEFAULT_DATETIME, reviews_api=False): """Fetch the pages from the backend url. The method retrieves, from a MediaWiki url, the wiki pages. :param category: the category of items to fetch :param from_date: obtain pages updated since this date :param reviews_api: use the reviews API available in MediaWiki >= 1.27 :returns: a generator of pages """ if from_date == DEFAULT_DATETIME: from_date = None else: from_date = datetime_to_utc(from_date) kwargs = {"from_date": from_date, "reviews_api": reviews_api} items = super().fetch(category, **kwargs) return items
[ "def", "fetch", "(", "self", ",", "category", "=", "CATEGORY_PAGE", ",", "from_date", "=", "DEFAULT_DATETIME", ",", "reviews_api", "=", "False", ")", ":", "if", "from_date", "==", "DEFAULT_DATETIME", ":", "from_date", "=", "None", "else", ":", "from_date", "=", "datetime_to_utc", "(", "from_date", ")", "kwargs", "=", "{", "\"from_date\"", ":", "from_date", ",", "\"reviews_api\"", ":", "reviews_api", "}", "items", "=", "super", "(", ")", ".", "fetch", "(", "category", ",", "*", "*", "kwargs", ")", "return", "items" ]
Fetch the pages from the backend url. The method retrieves, from a MediaWiki url, the wiki pages. :param category: the category of items to fetch :param from_date: obtain pages updated since this date :param reviews_api: use the reviews API available in MediaWiki >= 1.27 :returns: a generator of pages
[ "Fetch", "the", "pages", "from", "the", "backend", "url", "." ]
python
test
SBRG/ssbio
ssbio/protein/sequence/utils/blast.py
https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/protein/sequence/utils/blast.py#L180-L237
def calculate_bbh(blast_results_1, blast_results_2, r_name=None, g_name=None, outdir=''): """Calculate the best bidirectional BLAST hits (BBH) and save a dataframe of results. Args: blast_results_1 (str): BLAST results for reference vs. other genome blast_results_2 (str): BLAST results for other vs. reference genome r_name: Name of reference genome g_name: Name of other genome outdir: Directory where BLAST results are stored. Returns: Path to Pandas DataFrame of the BBH results. """ # TODO: add force_rerun option cols = ['gene', 'subject', 'PID', 'alnLength', 'mismatchCount', 'gapOpenCount', 'queryStart', 'queryEnd', 'subjectStart', 'subjectEnd', 'eVal', 'bitScore'] if not r_name and not g_name: r_name = op.basename(blast_results_1).split('_vs_')[0] g_name = op.basename(blast_results_1).split('_vs_')[1].replace('_blast.out', '') r_name2 = op.basename(blast_results_2).split('_vs_')[1].replace('_blast.out', '') if r_name != r_name2: log.warning('{} != {}'.format(r_name, r_name2)) outfile = op.join(outdir, '{}_vs_{}_bbh.csv'.format(r_name, g_name)) if op.exists(outfile) and os.stat(outfile).st_size != 0: log.debug('{} vs {} BLAST BBHs already found at {}'.format(r_name, g_name, outfile)) return outfile bbh1 = pd.read_csv(blast_results_1, sep='\t', names=cols) bbh2 = pd.read_csv(blast_results_2, sep='\t', names=cols) out = pd.DataFrame() log.debug('Finding BBHs for {} vs. {}'.format(r_name, g_name)) for g in bbh1[pd.notnull(bbh1.gene)].gene.unique(): res = bbh1[bbh1.gene == g] if len(res) == 0: continue best_hit = res.ix[res.PID.idxmax()].copy() best_gene = best_hit.subject res2 = bbh2[bbh2.gene == best_gene] if len(res2) == 0: continue best_hit2 = res2.ix[res2.PID.idxmax()] best_gene2 = best_hit2.subject if g == best_gene2: best_hit['BBH'] = '<=>' else: best_hit['BBH'] = '->' out = pd.concat([out, pd.DataFrame(best_hit).transpose()]) out.to_csv(outfile) log.debug('{} vs {} BLAST BBHs saved at {}'.format(r_name, g_name, outfile)) return outfile
[ "def", "calculate_bbh", "(", "blast_results_1", ",", "blast_results_2", ",", "r_name", "=", "None", ",", "g_name", "=", "None", ",", "outdir", "=", "''", ")", ":", "# TODO: add force_rerun option", "cols", "=", "[", "'gene'", ",", "'subject'", ",", "'PID'", ",", "'alnLength'", ",", "'mismatchCount'", ",", "'gapOpenCount'", ",", "'queryStart'", ",", "'queryEnd'", ",", "'subjectStart'", ",", "'subjectEnd'", ",", "'eVal'", ",", "'bitScore'", "]", "if", "not", "r_name", "and", "not", "g_name", ":", "r_name", "=", "op", ".", "basename", "(", "blast_results_1", ")", ".", "split", "(", "'_vs_'", ")", "[", "0", "]", "g_name", "=", "op", ".", "basename", "(", "blast_results_1", ")", ".", "split", "(", "'_vs_'", ")", "[", "1", "]", ".", "replace", "(", "'_blast.out'", ",", "''", ")", "r_name2", "=", "op", ".", "basename", "(", "blast_results_2", ")", ".", "split", "(", "'_vs_'", ")", "[", "1", "]", ".", "replace", "(", "'_blast.out'", ",", "''", ")", "if", "r_name", "!=", "r_name2", ":", "log", ".", "warning", "(", "'{} != {}'", ".", "format", "(", "r_name", ",", "r_name2", ")", ")", "outfile", "=", "op", ".", "join", "(", "outdir", ",", "'{}_vs_{}_bbh.csv'", ".", "format", "(", "r_name", ",", "g_name", ")", ")", "if", "op", ".", "exists", "(", "outfile", ")", "and", "os", ".", "stat", "(", "outfile", ")", ".", "st_size", "!=", "0", ":", "log", ".", "debug", "(", "'{} vs {} BLAST BBHs already found at {}'", ".", "format", "(", "r_name", ",", "g_name", ",", "outfile", ")", ")", "return", "outfile", "bbh1", "=", "pd", ".", "read_csv", "(", "blast_results_1", ",", "sep", "=", "'\\t'", ",", "names", "=", "cols", ")", "bbh2", "=", "pd", ".", "read_csv", "(", "blast_results_2", ",", "sep", "=", "'\\t'", ",", "names", "=", "cols", ")", "out", "=", "pd", ".", "DataFrame", "(", ")", "log", ".", "debug", "(", "'Finding BBHs for {} vs. {}'", ".", "format", "(", "r_name", ",", "g_name", ")", ")", "for", "g", "in", "bbh1", "[", "pd", ".", "notnull", "(", "bbh1", ".", "gene", ")", "]", ".", "gene", ".", "unique", "(", ")", ":", "res", "=", "bbh1", "[", "bbh1", ".", "gene", "==", "g", "]", "if", "len", "(", "res", ")", "==", "0", ":", "continue", "best_hit", "=", "res", ".", "ix", "[", "res", ".", "PID", ".", "idxmax", "(", ")", "]", ".", "copy", "(", ")", "best_gene", "=", "best_hit", ".", "subject", "res2", "=", "bbh2", "[", "bbh2", ".", "gene", "==", "best_gene", "]", "if", "len", "(", "res2", ")", "==", "0", ":", "continue", "best_hit2", "=", "res2", ".", "ix", "[", "res2", ".", "PID", ".", "idxmax", "(", ")", "]", "best_gene2", "=", "best_hit2", ".", "subject", "if", "g", "==", "best_gene2", ":", "best_hit", "[", "'BBH'", "]", "=", "'<=>'", "else", ":", "best_hit", "[", "'BBH'", "]", "=", "'->'", "out", "=", "pd", ".", "concat", "(", "[", "out", ",", "pd", ".", "DataFrame", "(", "best_hit", ")", ".", "transpose", "(", ")", "]", ")", "out", ".", "to_csv", "(", "outfile", ")", "log", ".", "debug", "(", "'{} vs {} BLAST BBHs saved at {}'", ".", "format", "(", "r_name", ",", "g_name", ",", "outfile", ")", ")", "return", "outfile" ]
Calculate the best bidirectional BLAST hits (BBH) and save a dataframe of results. Args: blast_results_1 (str): BLAST results for reference vs. other genome blast_results_2 (str): BLAST results for other vs. reference genome r_name: Name of reference genome g_name: Name of other genome outdir: Directory where BLAST results are stored. Returns: Path to Pandas DataFrame of the BBH results.
[ "Calculate", "the", "best", "bidirectional", "BLAST", "hits", "(", "BBH", ")", "and", "save", "a", "dataframe", "of", "results", "." ]
python
train
boriel/zxbasic
arch/zx48k/backend/__16bit.py
https://github.com/boriel/zxbasic/blob/23b28db10e41117805bdb3c0f78543590853b132/arch/zx48k/backend/__16bit.py#L575-L588
def _geu16(ins): ''' Compares & pops top 2 operands out of the stack, and checks if the 1st operand >= 2nd operand (top of the stack). Pushes 0 if False, 1 if True. 16 bit unsigned version ''' output = _16bit_oper(ins.quad[2], ins.quad[3]) output.append('or a') output.append('sbc hl, de') output.append('ccf') output.append('sbc a, a') output.append('push af') return output
[ "def", "_geu16", "(", "ins", ")", ":", "output", "=", "_16bit_oper", "(", "ins", ".", "quad", "[", "2", "]", ",", "ins", ".", "quad", "[", "3", "]", ")", "output", ".", "append", "(", "'or a'", ")", "output", ".", "append", "(", "'sbc hl, de'", ")", "output", ".", "append", "(", "'ccf'", ")", "output", ".", "append", "(", "'sbc a, a'", ")", "output", ".", "append", "(", "'push af'", ")", "return", "output" ]
Compares & pops top 2 operands out of the stack, and checks if the 1st operand >= 2nd operand (top of the stack). Pushes 0 if False, 1 if True. 16 bit unsigned version
[ "Compares", "&", "pops", "top", "2", "operands", "out", "of", "the", "stack", "and", "checks", "if", "the", "1st", "operand", ">", "=", "2nd", "operand", "(", "top", "of", "the", "stack", ")", ".", "Pushes", "0", "if", "False", "1", "if", "True", "." ]
python
train
autokey/autokey
lib/autokey/interface.py
https://github.com/autokey/autokey/blob/35decb72f286ce68cd2a1f09ace8891a520b58d1/lib/autokey/interface.py#L353-L374
def __grabHotkeys(self): """ Run during startup to grab global and specific hotkeys in all open windows """ c = self.app.configManager hotkeys = c.hotKeys + c.hotKeyFolders # Grab global hotkeys in root window for item in c.globalHotkeys: if item.enabled: self.__enqueue(self.__grabHotkey, item.hotKey, item.modifiers, self.rootWindow) if self.__needsMutterWorkaround(item): self.__enqueue(self.__grabRecurse, item, self.rootWindow, False) # Grab hotkeys without a filter in root window for item in hotkeys: if item.get_applicable_regex() is None: self.__enqueue(self.__grabHotkey, item.hotKey, item.modifiers, self.rootWindow) if self.__needsMutterWorkaround(item): self.__enqueue(self.__grabRecurse, item, self.rootWindow, False) self.__enqueue(self.__recurseTree, self.rootWindow, hotkeys)
[ "def", "__grabHotkeys", "(", "self", ")", ":", "c", "=", "self", ".", "app", ".", "configManager", "hotkeys", "=", "c", ".", "hotKeys", "+", "c", ".", "hotKeyFolders", "# Grab global hotkeys in root window", "for", "item", "in", "c", ".", "globalHotkeys", ":", "if", "item", ".", "enabled", ":", "self", ".", "__enqueue", "(", "self", ".", "__grabHotkey", ",", "item", ".", "hotKey", ",", "item", ".", "modifiers", ",", "self", ".", "rootWindow", ")", "if", "self", ".", "__needsMutterWorkaround", "(", "item", ")", ":", "self", ".", "__enqueue", "(", "self", ".", "__grabRecurse", ",", "item", ",", "self", ".", "rootWindow", ",", "False", ")", "# Grab hotkeys without a filter in root window", "for", "item", "in", "hotkeys", ":", "if", "item", ".", "get_applicable_regex", "(", ")", "is", "None", ":", "self", ".", "__enqueue", "(", "self", ".", "__grabHotkey", ",", "item", ".", "hotKey", ",", "item", ".", "modifiers", ",", "self", ".", "rootWindow", ")", "if", "self", ".", "__needsMutterWorkaround", "(", "item", ")", ":", "self", ".", "__enqueue", "(", "self", ".", "__grabRecurse", ",", "item", ",", "self", ".", "rootWindow", ",", "False", ")", "self", ".", "__enqueue", "(", "self", ".", "__recurseTree", ",", "self", ".", "rootWindow", ",", "hotkeys", ")" ]
Run during startup to grab global and specific hotkeys in all open windows
[ "Run", "during", "startup", "to", "grab", "global", "and", "specific", "hotkeys", "in", "all", "open", "windows" ]
python
train
trendels/rhino
rhino/mapper.py
https://github.com/trendels/rhino/blob/f1f0ef21b6080a2bd130b38b5bef163074c94aed/rhino/mapper.py#L675-L693
def wsgi(self, environ, start_response): """Implements the mapper's WSGI interface.""" request = Request(environ) ctx = Context(request) try: try: response = self(request, ctx) ctx._run_callbacks('finalize', (request, response)) response = response.conditional_to(request) except HTTPException as e: response = e.response except Exception: self.handle_error(request, ctx) response = InternalServerError().response response.add_callback(lambda: ctx._run_callbacks('close')) return response(environ, start_response) finally: ctx._run_callbacks('teardown', log_errors=True)
[ "def", "wsgi", "(", "self", ",", "environ", ",", "start_response", ")", ":", "request", "=", "Request", "(", "environ", ")", "ctx", "=", "Context", "(", "request", ")", "try", ":", "try", ":", "response", "=", "self", "(", "request", ",", "ctx", ")", "ctx", ".", "_run_callbacks", "(", "'finalize'", ",", "(", "request", ",", "response", ")", ")", "response", "=", "response", ".", "conditional_to", "(", "request", ")", "except", "HTTPException", "as", "e", ":", "response", "=", "e", ".", "response", "except", "Exception", ":", "self", ".", "handle_error", "(", "request", ",", "ctx", ")", "response", "=", "InternalServerError", "(", ")", ".", "response", "response", ".", "add_callback", "(", "lambda", ":", "ctx", ".", "_run_callbacks", "(", "'close'", ")", ")", "return", "response", "(", "environ", ",", "start_response", ")", "finally", ":", "ctx", ".", "_run_callbacks", "(", "'teardown'", ",", "log_errors", "=", "True", ")" ]
Implements the mapper's WSGI interface.
[ "Implements", "the", "mapper", "s", "WSGI", "interface", "." ]
python
train
bids-standard/pybids
bids/variables/kollekshuns.py
https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/variables/kollekshuns.py#L245-L288
def resample(self, sampling_rate=None, variables=None, force_dense=False, in_place=False, kind='linear'): ''' Resample all dense variables (and optionally, sparse ones) to the specified sampling rate. Args: sampling_rate (int, float): Target sampling rate (in Hz). If None, uses the instance sampling rate. variables (list): Optional list of Variables to resample. If None, all variables are resampled. force_dense (bool): if True, all sparse variables will be forced to dense. in_place (bool): When True, all variables are overwritten in-place. When False, returns resampled versions of all variables. kind (str): Argument to pass to scipy's interp1d; indicates the kind of interpolation approach to use. See interp1d docs for valid values. ''' # Store old sampling rate-based variables sampling_rate = sampling_rate or self.sampling_rate _variables = {} for name, var in self.variables.items(): if variables is not None and name not in variables: continue if isinstance(var, SparseRunVariable): if force_dense and is_numeric_dtype(var.values): _variables[name] = var.to_dense(sampling_rate) else: # None if in_place; no update needed _var = var.resample(sampling_rate, inplace=in_place, kind=kind) if not in_place: _variables[name] = _var if in_place: for k, v in _variables.items(): self.variables[k] = v self.sampling_rate = sampling_rate else: return _variables
[ "def", "resample", "(", "self", ",", "sampling_rate", "=", "None", ",", "variables", "=", "None", ",", "force_dense", "=", "False", ",", "in_place", "=", "False", ",", "kind", "=", "'linear'", ")", ":", "# Store old sampling rate-based variables", "sampling_rate", "=", "sampling_rate", "or", "self", ".", "sampling_rate", "_variables", "=", "{", "}", "for", "name", ",", "var", "in", "self", ".", "variables", ".", "items", "(", ")", ":", "if", "variables", "is", "not", "None", "and", "name", "not", "in", "variables", ":", "continue", "if", "isinstance", "(", "var", ",", "SparseRunVariable", ")", ":", "if", "force_dense", "and", "is_numeric_dtype", "(", "var", ".", "values", ")", ":", "_variables", "[", "name", "]", "=", "var", ".", "to_dense", "(", "sampling_rate", ")", "else", ":", "# None if in_place; no update needed", "_var", "=", "var", ".", "resample", "(", "sampling_rate", ",", "inplace", "=", "in_place", ",", "kind", "=", "kind", ")", "if", "not", "in_place", ":", "_variables", "[", "name", "]", "=", "_var", "if", "in_place", ":", "for", "k", ",", "v", "in", "_variables", ".", "items", "(", ")", ":", "self", ".", "variables", "[", "k", "]", "=", "v", "self", ".", "sampling_rate", "=", "sampling_rate", "else", ":", "return", "_variables" ]
Resample all dense variables (and optionally, sparse ones) to the specified sampling rate. Args: sampling_rate (int, float): Target sampling rate (in Hz). If None, uses the instance sampling rate. variables (list): Optional list of Variables to resample. If None, all variables are resampled. force_dense (bool): if True, all sparse variables will be forced to dense. in_place (bool): When True, all variables are overwritten in-place. When False, returns resampled versions of all variables. kind (str): Argument to pass to scipy's interp1d; indicates the kind of interpolation approach to use. See interp1d docs for valid values.
[ "Resample", "all", "dense", "variables", "(", "and", "optionally", "sparse", "ones", ")", "to", "the", "specified", "sampling", "rate", "." ]
python
train
bcbio/bcbio-nextgen
bcbio/bam/fastq.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/bam/fastq.py#L101-L110
def rstrip_extra(fname): """Strip extraneous, non-discriminative filename info from the end of a file. """ to_strip = ("_R", ".R", "-R", "_", "fastq", ".", "-") while fname.endswith(to_strip): for x in to_strip: if fname.endswith(x): fname = fname[:len(fname) - len(x)] break return fname
[ "def", "rstrip_extra", "(", "fname", ")", ":", "to_strip", "=", "(", "\"_R\"", ",", "\".R\"", ",", "\"-R\"", ",", "\"_\"", ",", "\"fastq\"", ",", "\".\"", ",", "\"-\"", ")", "while", "fname", ".", "endswith", "(", "to_strip", ")", ":", "for", "x", "in", "to_strip", ":", "if", "fname", ".", "endswith", "(", "x", ")", ":", "fname", "=", "fname", "[", ":", "len", "(", "fname", ")", "-", "len", "(", "x", ")", "]", "break", "return", "fname" ]
Strip extraneous, non-discriminative filename info from the end of a file.
[ "Strip", "extraneous", "non", "-", "discriminative", "filename", "info", "from", "the", "end", "of", "a", "file", "." ]
python
train
TeamHG-Memex/eli5
eli5/sklearn/treeinspect.py
https://github.com/TeamHG-Memex/eli5/blob/371b402a0676295c05e582a2dd591f7af476b86b/eli5/sklearn/treeinspect.py#L16-L31
def get_tree_info(decision_tree, feature_names=None, **export_graphviz_kwargs): # type: (...) -> TreeInfo """ Convert DecisionTreeClassifier or DecisionTreeRegressor to an inspectable object. """ return TreeInfo( criterion=decision_tree.criterion, tree=_get_root_node_info(decision_tree, feature_names), graphviz=tree2dot(decision_tree, feature_names=feature_names, **export_graphviz_kwargs), is_classification=isinstance(decision_tree, ClassifierMixin), )
[ "def", "get_tree_info", "(", "decision_tree", ",", "feature_names", "=", "None", ",", "*", "*", "export_graphviz_kwargs", ")", ":", "# type: (...) -> TreeInfo", "return", "TreeInfo", "(", "criterion", "=", "decision_tree", ".", "criterion", ",", "tree", "=", "_get_root_node_info", "(", "decision_tree", ",", "feature_names", ")", ",", "graphviz", "=", "tree2dot", "(", "decision_tree", ",", "feature_names", "=", "feature_names", ",", "*", "*", "export_graphviz_kwargs", ")", ",", "is_classification", "=", "isinstance", "(", "decision_tree", ",", "ClassifierMixin", ")", ",", ")" ]
Convert DecisionTreeClassifier or DecisionTreeRegressor to an inspectable object.
[ "Convert", "DecisionTreeClassifier", "or", "DecisionTreeRegressor", "to", "an", "inspectable", "object", "." ]
python
train
josiah-wolf-oberholtzer/uqbar
uqbar/sphinx/style.py
https://github.com/josiah-wolf-oberholtzer/uqbar/blob/eca7fefebbbee1e2ae13bf5d6baa838be66b1db6/uqbar/sphinx/style.py#L22-L38
def handle_class(signature_node, module, object_name, cache): """ Styles ``autoclass`` entries. Adds ``abstract`` prefix to abstract classes. """ class_ = getattr(module, object_name, None) if class_ is None: return if class_ not in cache: cache[class_] = {} attributes = inspect.classify_class_attrs(class_) for attribute in attributes: cache[class_][attribute.name] = attribute if inspect.isabstract(class_): emphasis = nodes.emphasis("abstract ", "abstract ", classes=["property"]) signature_node.insert(0, emphasis)
[ "def", "handle_class", "(", "signature_node", ",", "module", ",", "object_name", ",", "cache", ")", ":", "class_", "=", "getattr", "(", "module", ",", "object_name", ",", "None", ")", "if", "class_", "is", "None", ":", "return", "if", "class_", "not", "in", "cache", ":", "cache", "[", "class_", "]", "=", "{", "}", "attributes", "=", "inspect", ".", "classify_class_attrs", "(", "class_", ")", "for", "attribute", "in", "attributes", ":", "cache", "[", "class_", "]", "[", "attribute", ".", "name", "]", "=", "attribute", "if", "inspect", ".", "isabstract", "(", "class_", ")", ":", "emphasis", "=", "nodes", ".", "emphasis", "(", "\"abstract \"", ",", "\"abstract \"", ",", "classes", "=", "[", "\"property\"", "]", ")", "signature_node", ".", "insert", "(", "0", ",", "emphasis", ")" ]
Styles ``autoclass`` entries. Adds ``abstract`` prefix to abstract classes.
[ "Styles", "autoclass", "entries", "." ]
python
train
bwesterb/sarah
src/pack.py
https://github.com/bwesterb/sarah/blob/a9e46e875dfff1dc11255d714bb736e5eb697809/src/pack.py#L1-L11
def unpack_int(s): """ Reads a packed integer from string <s> """ ret = 0 i = 0 while True: b = ord(s[i]) ret |= (b & 127) << (i * 7) i += 1 if b & 128 == 0: break return ret
[ "def", "unpack_int", "(", "s", ")", ":", "ret", "=", "0", "i", "=", "0", "while", "True", ":", "b", "=", "ord", "(", "s", "[", "i", "]", ")", "ret", "|=", "(", "b", "&", "127", ")", "<<", "(", "i", "*", "7", ")", "i", "+=", "1", "if", "b", "&", "128", "==", "0", ":", "break", "return", "ret" ]
Reads a packed integer from string <s>
[ "Reads", "a", "packed", "integer", "from", "string", "<s", ">" ]
python
train