repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
facundobatista/yaswfp
yaswfp/swfparser.py
https://github.com/facundobatista/yaswfp/blob/2a2cc6ca4c0b4d52bd2e658fb5f80fdc0db4924c/yaswfp/swfparser.py#L1197-L1219
def _get_struct_fillstyle(self, shape_number): """Get the values for the FILLSTYLE record.""" obj = _make_object("FillStyle") obj.FillStyleType = style_type = unpack_ui8(self._src) if style_type == 0x00: if shape_number <= 2: obj.Color = self._get_struct_rgb() else: obj.Color = self._get_struct_rgba() if style_type in (0x10, 0x12, 0x13): obj.GradientMatrix = self._get_struct_matrix() if style_type in (0x10, 0x12): obj.Gradient = self._get_struct_gradient(shape_number) if style_type == 0x13: obj.Gradient = self._get_struct_focalgradient(shape_number) if style_type in (0x40, 0x41, 0x42, 0x43): obj.BitmapId = unpack_ui16(self._src) obj.BitmapMatrix = self._get_struct_matrix() return obj
[ "def", "_get_struct_fillstyle", "(", "self", ",", "shape_number", ")", ":", "obj", "=", "_make_object", "(", "\"FillStyle\"", ")", "obj", ".", "FillStyleType", "=", "style_type", "=", "unpack_ui8", "(", "self", ".", "_src", ")", "if", "style_type", "==", "0x00", ":", "if", "shape_number", "<=", "2", ":", "obj", ".", "Color", "=", "self", ".", "_get_struct_rgb", "(", ")", "else", ":", "obj", ".", "Color", "=", "self", ".", "_get_struct_rgba", "(", ")", "if", "style_type", "in", "(", "0x10", ",", "0x12", ",", "0x13", ")", ":", "obj", ".", "GradientMatrix", "=", "self", ".", "_get_struct_matrix", "(", ")", "if", "style_type", "in", "(", "0x10", ",", "0x12", ")", ":", "obj", ".", "Gradient", "=", "self", ".", "_get_struct_gradient", "(", "shape_number", ")", "if", "style_type", "==", "0x13", ":", "obj", ".", "Gradient", "=", "self", ".", "_get_struct_focalgradient", "(", "shape_number", ")", "if", "style_type", "in", "(", "0x40", ",", "0x41", ",", "0x42", ",", "0x43", ")", ":", "obj", ".", "BitmapId", "=", "unpack_ui16", "(", "self", ".", "_src", ")", "obj", ".", "BitmapMatrix", "=", "self", ".", "_get_struct_matrix", "(", ")", "return", "obj" ]
Get the values for the FILLSTYLE record.
[ "Get", "the", "values", "for", "the", "FILLSTYLE", "record", "." ]
python
train
37.478261
raiden-network/raiden
raiden/tasks.py
https://github.com/raiden-network/raiden/blob/407ba15c72074e9de88771d6b9661ff4dc36bef5/raiden/tasks.py#L171-L182
def register_callback(self, callback): """ Register a new callback. Note: The callback will be executed in the AlarmTask context and for this reason it should not block, otherwise we can miss block changes. """ if not callable(callback): raise ValueError('callback is not a callable') self.callbacks.append(callback)
[ "def", "register_callback", "(", "self", ",", "callback", ")", ":", "if", "not", "callable", "(", "callback", ")", ":", "raise", "ValueError", "(", "'callback is not a callable'", ")", "self", ".", "callbacks", ".", "append", "(", "callback", ")" ]
Register a new callback. Note: The callback will be executed in the AlarmTask context and for this reason it should not block, otherwise we can miss block changes.
[ "Register", "a", "new", "callback", "." ]
python
train
32.916667
quantopian/pyfolio
pyfolio/bayesian.py
https://github.com/quantopian/pyfolio/blob/712716ab0cdebbec9fabb25eea3bf40e4354749d/pyfolio/bayesian.py#L456-L484
def compute_consistency_score(returns_test, preds): """ Compute Bayesian consistency score. Parameters ---------- returns_test : pd.Series Observed cumulative returns. preds : numpy.array Multiple (simulated) cumulative returns. Returns ------- Consistency score Score from 100 (returns_test perfectly on the median line of the Bayesian cone spanned by preds) to 0 (returns_test completely outside of Bayesian cone.) """ returns_test_cum = cum_returns(returns_test, starting_value=1.) cum_preds = np.cumprod(preds + 1, 1) q = [sp.stats.percentileofscore(cum_preds[:, i], returns_test_cum.iloc[i], kind='weak') for i in range(len(returns_test_cum))] # normalize to be from 100 (perfect median line) to 0 (completely outside # of cone) return 100 - np.abs(50 - np.mean(q)) / .5
[ "def", "compute_consistency_score", "(", "returns_test", ",", "preds", ")", ":", "returns_test_cum", "=", "cum_returns", "(", "returns_test", ",", "starting_value", "=", "1.", ")", "cum_preds", "=", "np", ".", "cumprod", "(", "preds", "+", "1", ",", "1", ")", "q", "=", "[", "sp", ".", "stats", ".", "percentileofscore", "(", "cum_preds", "[", ":", ",", "i", "]", ",", "returns_test_cum", ".", "iloc", "[", "i", "]", ",", "kind", "=", "'weak'", ")", "for", "i", "in", "range", "(", "len", "(", "returns_test_cum", ")", ")", "]", "# normalize to be from 100 (perfect median line) to 0 (completely outside", "# of cone)", "return", "100", "-", "np", ".", "abs", "(", "50", "-", "np", ".", "mean", "(", "q", ")", ")", "/", ".5" ]
Compute Bayesian consistency score. Parameters ---------- returns_test : pd.Series Observed cumulative returns. preds : numpy.array Multiple (simulated) cumulative returns. Returns ------- Consistency score Score from 100 (returns_test perfectly on the median line of the Bayesian cone spanned by preds) to 0 (returns_test completely outside of Bayesian cone.)
[ "Compute", "Bayesian", "consistency", "score", "." ]
python
valid
32.275862
hobson/aima
aima/nlp.py
https://github.com/hobson/aima/blob/3572b2fb92039b4a1abe384be8545560fbd3d470/aima/nlp.py#L148-L158
def add_edge(self, edge): "Add edge to chart, and see if it extends or predicts another edge." start, end, lhs, found, expects = edge if edge not in self.chart[end]: self.chart[end].append(edge) if self.trace: print '%10s: added %s' % (caller(2), edge) if not expects: self.extender(edge) else: self.predictor(edge)
[ "def", "add_edge", "(", "self", ",", "edge", ")", ":", "start", ",", "end", ",", "lhs", ",", "found", ",", "expects", "=", "edge", "if", "edge", "not", "in", "self", ".", "chart", "[", "end", "]", ":", "self", ".", "chart", "[", "end", "]", ".", "append", "(", "edge", ")", "if", "self", ".", "trace", ":", "print", "'%10s: added %s'", "%", "(", "caller", "(", "2", ")", ",", "edge", ")", "if", "not", "expects", ":", "self", ".", "extender", "(", "edge", ")", "else", ":", "self", ".", "predictor", "(", "edge", ")" ]
Add edge to chart, and see if it extends or predicts another edge.
[ "Add", "edge", "to", "chart", "and", "see", "if", "it", "extends", "or", "predicts", "another", "edge", "." ]
python
valid
38.636364
all-umass/graphs
graphs/mixins/viz.py
https://github.com/all-umass/graphs/blob/4fbeb025dfe33340335f34300f58dd3809228822/graphs/mixins/viz.py#L144-L183
def _parse_fmt(fmt, color_key='colors', ls_key='linestyles', marker_key='marker'): '''Modified from matplotlib's _process_plot_format function.''' try: # Is fmt just a colorspec? color = mcolors.colorConverter.to_rgb(fmt) except ValueError: pass # No, not just a color. else: # Either a color or a numeric marker style if fmt not in mlines.lineMarkers: return {color_key:color} result = dict() # handle the multi char special cases and strip them from the string if fmt.find('--') >= 0: result[ls_key] = '--' fmt = fmt.replace('--', '') if fmt.find('-.') >= 0: result[ls_key] = '-.' fmt = fmt.replace('-.', '') if fmt.find(' ') >= 0: result[ls_key] = 'None' fmt = fmt.replace(' ', '') for c in list(fmt): if c in mlines.lineStyles: if ls_key in result: raise ValueError('Illegal format string; two linestyle symbols') result[ls_key] = c elif c in mlines.lineMarkers: if marker_key in result: raise ValueError('Illegal format string; two marker symbols') result[marker_key] = c elif c in mcolors.colorConverter.colors: if color_key in result: raise ValueError('Illegal format string; two color symbols') result[color_key] = c else: raise ValueError('Unrecognized character %c in format string' % c) return result
[ "def", "_parse_fmt", "(", "fmt", ",", "color_key", "=", "'colors'", ",", "ls_key", "=", "'linestyles'", ",", "marker_key", "=", "'marker'", ")", ":", "try", ":", "# Is fmt just a colorspec?", "color", "=", "mcolors", ".", "colorConverter", ".", "to_rgb", "(", "fmt", ")", "except", "ValueError", ":", "pass", "# No, not just a color.", "else", ":", "# Either a color or a numeric marker style", "if", "fmt", "not", "in", "mlines", ".", "lineMarkers", ":", "return", "{", "color_key", ":", "color", "}", "result", "=", "dict", "(", ")", "# handle the multi char special cases and strip them from the string", "if", "fmt", ".", "find", "(", "'--'", ")", ">=", "0", ":", "result", "[", "ls_key", "]", "=", "'--'", "fmt", "=", "fmt", ".", "replace", "(", "'--'", ",", "''", ")", "if", "fmt", ".", "find", "(", "'-.'", ")", ">=", "0", ":", "result", "[", "ls_key", "]", "=", "'-.'", "fmt", "=", "fmt", ".", "replace", "(", "'-.'", ",", "''", ")", "if", "fmt", ".", "find", "(", "' '", ")", ">=", "0", ":", "result", "[", "ls_key", "]", "=", "'None'", "fmt", "=", "fmt", ".", "replace", "(", "' '", ",", "''", ")", "for", "c", "in", "list", "(", "fmt", ")", ":", "if", "c", "in", "mlines", ".", "lineStyles", ":", "if", "ls_key", "in", "result", ":", "raise", "ValueError", "(", "'Illegal format string; two linestyle symbols'", ")", "result", "[", "ls_key", "]", "=", "c", "elif", "c", "in", "mlines", ".", "lineMarkers", ":", "if", "marker_key", "in", "result", ":", "raise", "ValueError", "(", "'Illegal format string; two marker symbols'", ")", "result", "[", "marker_key", "]", "=", "c", "elif", "c", "in", "mcolors", ".", "colorConverter", ".", "colors", ":", "if", "color_key", "in", "result", ":", "raise", "ValueError", "(", "'Illegal format string; two color symbols'", ")", "result", "[", "color_key", "]", "=", "c", "else", ":", "raise", "ValueError", "(", "'Unrecognized character %c in format string'", "%", "c", ")", "return", "result" ]
Modified from matplotlib's _process_plot_format function.
[ "Modified", "from", "matplotlib", "s", "_process_plot_format", "function", "." ]
python
train
33.525
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/nbformat/current.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/nbformat/current.py#L139-L162
def writes(nb, format, **kwargs): """Write a notebook to a string in a given format in the current nbformat version. This function always writes the notebook in the current nbformat version. Parameters ---------- nb : NotebookNode The notebook to write. format : (u'json', u'ipynb', u'py') The format to write the notebook in. Returns ------- s : unicode The notebook string. """ format = unicode(format) if format == u'json' or format == u'ipynb': return writes_json(nb, **kwargs) elif format == u'py': return writes_py(nb, **kwargs) else: raise NBFormatError('Unsupported format: %s' % format)
[ "def", "writes", "(", "nb", ",", "format", ",", "*", "*", "kwargs", ")", ":", "format", "=", "unicode", "(", "format", ")", "if", "format", "==", "u'json'", "or", "format", "==", "u'ipynb'", ":", "return", "writes_json", "(", "nb", ",", "*", "*", "kwargs", ")", "elif", "format", "==", "u'py'", ":", "return", "writes_py", "(", "nb", ",", "*", "*", "kwargs", ")", "else", ":", "raise", "NBFormatError", "(", "'Unsupported format: %s'", "%", "format", ")" ]
Write a notebook to a string in a given format in the current nbformat version. This function always writes the notebook in the current nbformat version. Parameters ---------- nb : NotebookNode The notebook to write. format : (u'json', u'ipynb', u'py') The format to write the notebook in. Returns ------- s : unicode The notebook string.
[ "Write", "a", "notebook", "to", "a", "string", "in", "a", "given", "format", "in", "the", "current", "nbformat", "version", "." ]
python
test
28.291667
vertexproject/synapse
synapse/lib/trigger.py
https://github.com/vertexproject/synapse/blob/22e67c5a8f6d7caddbcf34b39ab1bd2d6c4a6e0b/synapse/lib/trigger.py#L168-L197
def migrate_v0_rules(self): ''' Remove any v0 (i.e. pre-010) rules from storage and replace them with v1 rules. Notes: v0 had two differences user was a username. Replaced with iden of user as 'iden' field. Also 'iden' was storage as binary. Now it is stored as hex string. ''' for iden, valu in self.core.slab.scanByFull(db=self.trigdb): ruledict = s_msgpack.un(valu) ver = ruledict.get('ver') if ver != 0: continue user = ruledict.pop('user') if user is None: logger.warning('Username missing in stored trigger rule %r', iden) continue # In v0, stored user was username, in >0 user is useriden user = self.core.auth.getUserByName(user).iden if user is None: logger.warning('Unrecognized username in stored trigger rule %r', iden) continue ruledict['ver'] = 1 ruledict['useriden'] = user newiden = s_common.ehex(iden) self.core.slab.pop(iden, db=self.trigdb) self.core.slab.put(newiden.encode(), s_msgpack.en(ruledict), db=self.trigdb)
[ "def", "migrate_v0_rules", "(", "self", ")", ":", "for", "iden", ",", "valu", "in", "self", ".", "core", ".", "slab", ".", "scanByFull", "(", "db", "=", "self", ".", "trigdb", ")", ":", "ruledict", "=", "s_msgpack", ".", "un", "(", "valu", ")", "ver", "=", "ruledict", ".", "get", "(", "'ver'", ")", "if", "ver", "!=", "0", ":", "continue", "user", "=", "ruledict", ".", "pop", "(", "'user'", ")", "if", "user", "is", "None", ":", "logger", ".", "warning", "(", "'Username missing in stored trigger rule %r'", ",", "iden", ")", "continue", "# In v0, stored user was username, in >0 user is useriden", "user", "=", "self", ".", "core", ".", "auth", ".", "getUserByName", "(", "user", ")", ".", "iden", "if", "user", "is", "None", ":", "logger", ".", "warning", "(", "'Unrecognized username in stored trigger rule %r'", ",", "iden", ")", "continue", "ruledict", "[", "'ver'", "]", "=", "1", "ruledict", "[", "'useriden'", "]", "=", "user", "newiden", "=", "s_common", ".", "ehex", "(", "iden", ")", "self", ".", "core", ".", "slab", ".", "pop", "(", "iden", ",", "db", "=", "self", ".", "trigdb", ")", "self", ".", "core", ".", "slab", ".", "put", "(", "newiden", ".", "encode", "(", ")", ",", "s_msgpack", ".", "en", "(", "ruledict", ")", ",", "db", "=", "self", ".", "trigdb", ")" ]
Remove any v0 (i.e. pre-010) rules from storage and replace them with v1 rules. Notes: v0 had two differences user was a username. Replaced with iden of user as 'iden' field. Also 'iden' was storage as binary. Now it is stored as hex string.
[ "Remove", "any", "v0", "(", "i", ".", "e", ".", "pre", "-", "010", ")", "rules", "from", "storage", "and", "replace", "them", "with", "v1", "rules", "." ]
python
train
40.433333
SavinaRoja/OpenAccess_EPUB
src/openaccess_epub/publisher/__init__.py
https://github.com/SavinaRoja/OpenAccess_EPUB/blob/6b77ba30b7394fd003920e7a7957bca963a90656/src/openaccess_epub/publisher/__init__.py#L196-L224
def make_document(self, titlestring): """ This method may be used to create a new document for writing as xml to the OPS subdirectory of the ePub structure. """ #root = etree.XML('''<?xml version="1.0"?>\ #<!DOCTYPE html PUBLIC '-//W3C//DTD XHTML 1.1//EN' 'http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd'>\ #<html xml:lang="en-US" xmlns="http://www.w3.org/1999/xhtml" xmlns:ops="http://www.idpf.org/2007/ops">\ #</html>''') root = etree.XML('''<?xml version="1.0"?>\ <!DOCTYPE html>\ <html xmlns="http://www.w3.org/1999/xhtml">\ </html>''') document = etree.ElementTree(root) html = document.getroot() head = etree.SubElement(html, 'head') etree.SubElement(html, 'body') title = etree.SubElement(head, 'title') title.text = titlestring #The href for the css stylesheet is a standin, can be overwritten etree.SubElement(head, 'link', {'href': 'css/default.css', 'rel': 'stylesheet', 'type': 'text/css'}) return document
[ "def", "make_document", "(", "self", ",", "titlestring", ")", ":", "#root = etree.XML('''<?xml version=\"1.0\"?>\\", "#<!DOCTYPE html PUBLIC '-//W3C//DTD XHTML 1.1//EN' 'http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd'>\\", "#<html xml:lang=\"en-US\" xmlns=\"http://www.w3.org/1999/xhtml\" xmlns:ops=\"http://www.idpf.org/2007/ops\">\\", "#</html>''')", "root", "=", "etree", ".", "XML", "(", "'''<?xml version=\"1.0\"?>\\\n<!DOCTYPE html>\\\n<html xmlns=\"http://www.w3.org/1999/xhtml\">\\\n</html>'''", ")", "document", "=", "etree", ".", "ElementTree", "(", "root", ")", "html", "=", "document", ".", "getroot", "(", ")", "head", "=", "etree", ".", "SubElement", "(", "html", ",", "'head'", ")", "etree", ".", "SubElement", "(", "html", ",", "'body'", ")", "title", "=", "etree", ".", "SubElement", "(", "head", ",", "'title'", ")", "title", ".", "text", "=", "titlestring", "#The href for the css stylesheet is a standin, can be overwritten", "etree", ".", "SubElement", "(", "head", ",", "'link'", ",", "{", "'href'", ":", "'css/default.css'", ",", "'rel'", ":", "'stylesheet'", ",", "'type'", ":", "'text/css'", "}", ")", "return", "document" ]
This method may be used to create a new document for writing as xml to the OPS subdirectory of the ePub structure.
[ "This", "method", "may", "be", "used", "to", "create", "a", "new", "document", "for", "writing", "as", "xml", "to", "the", "OPS", "subdirectory", "of", "the", "ePub", "structure", "." ]
python
train
38.448276
digidotcom/python-wvalib
wva/cli.py
https://github.com/digidotcom/python-wvalib/blob/4252735e2775f80ebaffd813fbe84046d26906b3/wva/cli.py#L202-L217
def delete(ctx, uri): """DELETE the specified URI Example: \b $ wva get files/userfs/WEB/python {'file_list': ['files/userfs/WEB/python/.ssh', 'files/userfs/WEB/python/README.md']} $ wva delete files/userfs/WEB/python/README.md '' $ wva get files/userfs/WEB/python {'file_list': ['files/userfs/WEB/python/.ssh']} """ http_client = get_wva(ctx).get_http_client() cli_pprint(http_client.delete(uri))
[ "def", "delete", "(", "ctx", ",", "uri", ")", ":", "http_client", "=", "get_wva", "(", "ctx", ")", ".", "get_http_client", "(", ")", "cli_pprint", "(", "http_client", ".", "delete", "(", "uri", ")", ")" ]
DELETE the specified URI Example: \b $ wva get files/userfs/WEB/python {'file_list': ['files/userfs/WEB/python/.ssh', 'files/userfs/WEB/python/README.md']} $ wva delete files/userfs/WEB/python/README.md '' $ wva get files/userfs/WEB/python {'file_list': ['files/userfs/WEB/python/.ssh']}
[ "DELETE", "the", "specified", "URI" ]
python
train
27.75
AkihikoITOH/capybara
capybara/virtualenv/lib/python2.7/site-packages/wheel/tool/__init__.py
https://github.com/AkihikoITOH/capybara/blob/e86c2173ea386654f4ae061148e8fbe3f25e715c/capybara/virtualenv/lib/python2.7/site-packages/wheel/tool/__init__.py#L94-L108
def unsign(wheelfile): """ Remove RECORD.jws from a wheel by truncating the zip file. RECORD.jws must be at the end of the archive. The zip file must be an ordinary archive, with the compressed files and the directory in the same order, and without any non-zip content after the truncation point. """ import wheel.install vzf = wheel.install.VerifyingZipFile(wheelfile, "a") info = vzf.infolist() if not (len(info) and info[-1].filename.endswith('/RECORD.jws')): raise WheelError("RECORD.jws not found at end of archive.") vzf.pop() vzf.close()
[ "def", "unsign", "(", "wheelfile", ")", ":", "import", "wheel", ".", "install", "vzf", "=", "wheel", ".", "install", ".", "VerifyingZipFile", "(", "wheelfile", ",", "\"a\"", ")", "info", "=", "vzf", ".", "infolist", "(", ")", "if", "not", "(", "len", "(", "info", ")", "and", "info", "[", "-", "1", "]", ".", "filename", ".", "endswith", "(", "'/RECORD.jws'", ")", ")", ":", "raise", "WheelError", "(", "\"RECORD.jws not found at end of archive.\"", ")", "vzf", ".", "pop", "(", ")", "vzf", ".", "close", "(", ")" ]
Remove RECORD.jws from a wheel by truncating the zip file. RECORD.jws must be at the end of the archive. The zip file must be an ordinary archive, with the compressed files and the directory in the same order, and without any non-zip content after the truncation point.
[ "Remove", "RECORD", ".", "jws", "from", "a", "wheel", "by", "truncating", "the", "zip", "file", ".", "RECORD", ".", "jws", "must", "be", "at", "the", "end", "of", "the", "archive", ".", "The", "zip", "file", "must", "be", "an", "ordinary", "archive", "with", "the", "compressed", "files", "and", "the", "directory", "in", "the", "same", "order", "and", "without", "any", "non", "-", "zip", "content", "after", "the", "truncation", "point", "." ]
python
test
39.533333
ShadowBlip/Neteria
neteria/core.py
https://github.com/ShadowBlip/Neteria/blob/1a8c976eb2beeca0a5a272a34ac58b2c114495a4/neteria/core.py#L44-L76
def serialize_data(data, compression=False, encryption=False, public_key=None): """Serializes normal Python datatypes into plaintext using json. You may also choose to enable compression and encryption when serializing data to send over the network. Enabling one or both of these options will incur additional overhead. Args: data (dict): The data to convert into plain text using json. compression (boolean): True or False value on whether or not to compress the serialized data. encryption (rsa.encryption): An encryption instance used to encrypt the message if encryption is desired. public_key (str): The public key to use to encrypt if encryption is enabled. Returns: The string message serialized using json. """ message = json.dumps(data) if compression: message = zlib.compress(message) message = binascii.b2a_base64(message) if encryption and public_key: message = encryption.encrypt(message, public_key) encoded_message = str.encode(message) return encoded_message
[ "def", "serialize_data", "(", "data", ",", "compression", "=", "False", ",", "encryption", "=", "False", ",", "public_key", "=", "None", ")", ":", "message", "=", "json", ".", "dumps", "(", "data", ")", "if", "compression", ":", "message", "=", "zlib", ".", "compress", "(", "message", ")", "message", "=", "binascii", ".", "b2a_base64", "(", "message", ")", "if", "encryption", "and", "public_key", ":", "message", "=", "encryption", ".", "encrypt", "(", "message", ",", "public_key", ")", "encoded_message", "=", "str", ".", "encode", "(", "message", ")", "return", "encoded_message" ]
Serializes normal Python datatypes into plaintext using json. You may also choose to enable compression and encryption when serializing data to send over the network. Enabling one or both of these options will incur additional overhead. Args: data (dict): The data to convert into plain text using json. compression (boolean): True or False value on whether or not to compress the serialized data. encryption (rsa.encryption): An encryption instance used to encrypt the message if encryption is desired. public_key (str): The public key to use to encrypt if encryption is enabled. Returns: The string message serialized using json.
[ "Serializes", "normal", "Python", "datatypes", "into", "plaintext", "using", "json", "." ]
python
train
32.636364
RI-imaging/qpformat
examples/convert_txt2tif.py
https://github.com/RI-imaging/qpformat/blob/364e29d7d9e8b9f1d7a4a25c753d1baf9d73d5eb/examples/convert_txt2tif.py#L27-L42
def get_paths(folder, ignore_endswith=ignore_endswith): '''Return hologram file paths Parameters ---------- folder: str or pathlib.Path Path to search folder ignore_endswith: list List of filename ending strings indicating which files should be ignored. ''' folder = pathlib.Path(folder).resolve() files = folder.rglob("*") for ie in ignore_endswith: files = [ff for ff in files if not ff.name.endswith(ie)] return sorted(files)
[ "def", "get_paths", "(", "folder", ",", "ignore_endswith", "=", "ignore_endswith", ")", ":", "folder", "=", "pathlib", ".", "Path", "(", "folder", ")", ".", "resolve", "(", ")", "files", "=", "folder", ".", "rglob", "(", "\"*\"", ")", "for", "ie", "in", "ignore_endswith", ":", "files", "=", "[", "ff", "for", "ff", "in", "files", "if", "not", "ff", ".", "name", ".", "endswith", "(", "ie", ")", "]", "return", "sorted", "(", "files", ")" ]
Return hologram file paths Parameters ---------- folder: str or pathlib.Path Path to search folder ignore_endswith: list List of filename ending strings indicating which files should be ignored.
[ "Return", "hologram", "file", "paths" ]
python
train
30.375
jic-dtool/dtoolcore
dtoolcore/compare.py
https://github.com/jic-dtool/dtoolcore/blob/eeb9a924dc8fcf543340653748a7877be1f98e0f/dtoolcore/compare.py#L4-L25
def diff_identifiers(a, b): """Return list of tuples where identifiers in datasets differ. Tuple structure: (identifier, present in a, present in b) :param a: first :class:`dtoolcore.DataSet` :param b: second :class:`dtoolcore.DataSet` :returns: list of tuples where identifiers in datasets differ """ a_ids = set(a.identifiers) b_ids = set(b.identifiers) difference = [] for i in a_ids.difference(b_ids): difference.append((i, True, False)) for i in b_ids.difference(a_ids): difference.append((i, False, True)) return difference
[ "def", "diff_identifiers", "(", "a", ",", "b", ")", ":", "a_ids", "=", "set", "(", "a", ".", "identifiers", ")", "b_ids", "=", "set", "(", "b", ".", "identifiers", ")", "difference", "=", "[", "]", "for", "i", "in", "a_ids", ".", "difference", "(", "b_ids", ")", ":", "difference", ".", "append", "(", "(", "i", ",", "True", ",", "False", ")", ")", "for", "i", "in", "b_ids", ".", "difference", "(", "a_ids", ")", ":", "difference", ".", "append", "(", "(", "i", ",", "False", ",", "True", ")", ")", "return", "difference" ]
Return list of tuples where identifiers in datasets differ. Tuple structure: (identifier, present in a, present in b) :param a: first :class:`dtoolcore.DataSet` :param b: second :class:`dtoolcore.DataSet` :returns: list of tuples where identifiers in datasets differ
[ "Return", "list", "of", "tuples", "where", "identifiers", "in", "datasets", "differ", "." ]
python
train
26.454545
annoviko/pyclustering
pyclustering/cluster/clique.py
https://github.com/annoviko/pyclustering/blob/98aa0dd89fd36f701668fb1eb29c8fb5662bf7d0/pyclustering/cluster/clique.py#L704-L730
def __create_grid(self): """! @brief Creates CLIQUE grid that consists of CLIQUE blocks for clustering process. """ data_sizes, min_corner, max_corner = self.__get_data_size_derscription() dimension = len(self.__data[0]) cell_sizes = [dimension_length / self.__amount_intervals for dimension_length in data_sizes] self.__cells = [clique_block() for _ in range(pow(self.__amount_intervals, dimension))] iterator = coordinate_iterator(dimension, self.__amount_intervals) point_availability = [True] * len(self.__data) self.__cell_map = {} for index_cell in range(len(self.__cells)): logical_location = iterator.get_coordinate() iterator.increment() self.__cells[index_cell].logical_location = logical_location[:] cur_max_corner, cur_min_corner = self.__get_spatial_location(logical_location, min_corner, max_corner, cell_sizes) self.__cells[index_cell].spatial_location = spatial_block(cur_max_corner, cur_min_corner) self.__cells[index_cell].capture_points(self.__data, point_availability) self.__cell_map[self.__location_to_key(logical_location)] = self.__cells[index_cell]
[ "def", "__create_grid", "(", "self", ")", ":", "data_sizes", ",", "min_corner", ",", "max_corner", "=", "self", ".", "__get_data_size_derscription", "(", ")", "dimension", "=", "len", "(", "self", ".", "__data", "[", "0", "]", ")", "cell_sizes", "=", "[", "dimension_length", "/", "self", ".", "__amount_intervals", "for", "dimension_length", "in", "data_sizes", "]", "self", ".", "__cells", "=", "[", "clique_block", "(", ")", "for", "_", "in", "range", "(", "pow", "(", "self", ".", "__amount_intervals", ",", "dimension", ")", ")", "]", "iterator", "=", "coordinate_iterator", "(", "dimension", ",", "self", ".", "__amount_intervals", ")", "point_availability", "=", "[", "True", "]", "*", "len", "(", "self", ".", "__data", ")", "self", ".", "__cell_map", "=", "{", "}", "for", "index_cell", "in", "range", "(", "len", "(", "self", ".", "__cells", ")", ")", ":", "logical_location", "=", "iterator", ".", "get_coordinate", "(", ")", "iterator", ".", "increment", "(", ")", "self", ".", "__cells", "[", "index_cell", "]", ".", "logical_location", "=", "logical_location", "[", ":", "]", "cur_max_corner", ",", "cur_min_corner", "=", "self", ".", "__get_spatial_location", "(", "logical_location", ",", "min_corner", ",", "max_corner", ",", "cell_sizes", ")", "self", ".", "__cells", "[", "index_cell", "]", ".", "spatial_location", "=", "spatial_block", "(", "cur_max_corner", ",", "cur_min_corner", ")", "self", ".", "__cells", "[", "index_cell", "]", ".", "capture_points", "(", "self", ".", "__data", ",", "point_availability", ")", "self", ".", "__cell_map", "[", "self", ".", "__location_to_key", "(", "logical_location", ")", "]", "=", "self", ".", "__cells", "[", "index_cell", "]" ]
! @brief Creates CLIQUE grid that consists of CLIQUE blocks for clustering process.
[ "!" ]
python
valid
46.407407
benknight/hue-python-rgb-converter
rgbxy/__init__.py
https://github.com/benknight/hue-python-rgb-converter/blob/76dd70eac7a56a1260fd94a52cca3991cd57dff0/rgbxy/__init__.py#L228-L233
def hex_to_xy(self, h): """Converts hexadecimal colors represented as a String to approximate CIE 1931 x and y coordinates. """ rgb = self.color.hex_to_rgb(h) return self.rgb_to_xy(rgb[0], rgb[1], rgb[2])
[ "def", "hex_to_xy", "(", "self", ",", "h", ")", ":", "rgb", "=", "self", ".", "color", ".", "hex_to_rgb", "(", "h", ")", "return", "self", ".", "rgb_to_xy", "(", "rgb", "[", "0", "]", ",", "rgb", "[", "1", "]", ",", "rgb", "[", "2", "]", ")" ]
Converts hexadecimal colors represented as a String to approximate CIE 1931 x and y coordinates.
[ "Converts", "hexadecimal", "colors", "represented", "as", "a", "String", "to", "approximate", "CIE", "1931", "x", "and", "y", "coordinates", "." ]
python
train
39.833333
istommao/django-simditor
simditor/image_processing.py
https://github.com/istommao/django-simditor/blob/1d9fe00481f463c67f88d73ec6593a721f5fb469/simditor/image_processing.py#L7-L15
def get_backend(): """Get backend.""" backend = getattr(settings, 'SIMDITOR_IMAGE_BACKEND', None) if backend == 'pillow': from simditor.image import pillow_backend as backend else: from simditor.image import dummy_backend as backend return backend
[ "def", "get_backend", "(", ")", ":", "backend", "=", "getattr", "(", "settings", ",", "'SIMDITOR_IMAGE_BACKEND'", ",", "None", ")", "if", "backend", "==", "'pillow'", ":", "from", "simditor", ".", "image", "import", "pillow_backend", "as", "backend", "else", ":", "from", "simditor", ".", "image", "import", "dummy_backend", "as", "backend", "return", "backend" ]
Get backend.
[ "Get", "backend", "." ]
python
train
30.666667
googleapis/google-cloud-python
api_core/google/api_core/protobuf_helpers.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/api_core/google/api_core/protobuf_helpers.py#L150-L198
def get(msg_or_dict, key, default=_SENTINEL): """Retrieve a key's value from a protobuf Message or dictionary. Args: mdg_or_dict (Union[~google.protobuf.message.Message, Mapping]): the object. key (str): The key to retrieve from the object. default (Any): If the key is not present on the object, and a default is set, returns that default instead. A type-appropriate falsy default is generally recommended, as protobuf messages almost always have default values for unset values and it is not always possible to tell the difference between a falsy value and an unset one. If no default is set then :class:`KeyError` will be raised if the key is not present in the object. Returns: Any: The return value from the underlying Message or dict. Raises: KeyError: If the key is not found. Note that, for unset values, messages and dictionaries may not have consistent behavior. TypeError: If ``msg_or_dict`` is not a Message or Mapping. """ # We may need to get a nested key. Resolve this. key, subkey = _resolve_subkeys(key) # Attempt to get the value from the two types of objects we know about. # If we get something else, complain. if isinstance(msg_or_dict, message.Message): answer = getattr(msg_or_dict, key, default) elif isinstance(msg_or_dict, collections_abc.Mapping): answer = msg_or_dict.get(key, default) else: raise TypeError( "get() expected a dict or protobuf message, got {!r}.".format( type(msg_or_dict) ) ) # If the object we got back is our sentinel, raise KeyError; this is # a "not found" case. if answer is _SENTINEL: raise KeyError(key) # If a subkey exists, call this method recursively against the answer. if subkey is not None and answer is not default: return get(answer, subkey, default=default) return answer
[ "def", "get", "(", "msg_or_dict", ",", "key", ",", "default", "=", "_SENTINEL", ")", ":", "# We may need to get a nested key. Resolve this.", "key", ",", "subkey", "=", "_resolve_subkeys", "(", "key", ")", "# Attempt to get the value from the two types of objects we know about.", "# If we get something else, complain.", "if", "isinstance", "(", "msg_or_dict", ",", "message", ".", "Message", ")", ":", "answer", "=", "getattr", "(", "msg_or_dict", ",", "key", ",", "default", ")", "elif", "isinstance", "(", "msg_or_dict", ",", "collections_abc", ".", "Mapping", ")", ":", "answer", "=", "msg_or_dict", ".", "get", "(", "key", ",", "default", ")", "else", ":", "raise", "TypeError", "(", "\"get() expected a dict or protobuf message, got {!r}.\"", ".", "format", "(", "type", "(", "msg_or_dict", ")", ")", ")", "# If the object we got back is our sentinel, raise KeyError; this is", "# a \"not found\" case.", "if", "answer", "is", "_SENTINEL", ":", "raise", "KeyError", "(", "key", ")", "# If a subkey exists, call this method recursively against the answer.", "if", "subkey", "is", "not", "None", "and", "answer", "is", "not", "default", ":", "return", "get", "(", "answer", ",", "subkey", ",", "default", "=", "default", ")", "return", "answer" ]
Retrieve a key's value from a protobuf Message or dictionary. Args: mdg_or_dict (Union[~google.protobuf.message.Message, Mapping]): the object. key (str): The key to retrieve from the object. default (Any): If the key is not present on the object, and a default is set, returns that default instead. A type-appropriate falsy default is generally recommended, as protobuf messages almost always have default values for unset values and it is not always possible to tell the difference between a falsy value and an unset one. If no default is set then :class:`KeyError` will be raised if the key is not present in the object. Returns: Any: The return value from the underlying Message or dict. Raises: KeyError: If the key is not found. Note that, for unset values, messages and dictionaries may not have consistent behavior. TypeError: If ``msg_or_dict`` is not a Message or Mapping.
[ "Retrieve", "a", "key", "s", "value", "from", "a", "protobuf", "Message", "or", "dictionary", "." ]
python
train
40.755102
twisted/axiom
axiom/item.py
https://github.com/twisted/axiom/blob/7de70bc8fe1bb81f9c2339fba8daec9eb2e92b68/axiom/item.py#L1094-L1126
def declareLegacyItem(typeName, schemaVersion, attributes, dummyBases=()): """ Generate a dummy subclass of Item that will have the given attributes, and the base Item methods, but no methods of its own. This is for use with upgrading. @param typeName: a string, the Axiom TypeName to have attributes for. @param schemaVersion: an int, the (old) version of the schema this is a proxy for. @param attributes: a dict mapping {columnName: attr instance} describing the schema of C{typeName} at C{schemaVersion}. @param dummyBases: a sequence of 4-tuples of (baseTypeName, baseSchemaVersion, baseAttributes, baseBases) representing the dummy bases of this legacy class. """ if (typeName, schemaVersion) in _legacyTypes: return _legacyTypes[typeName, schemaVersion] if dummyBases: realBases = [declareLegacyItem(*A) for A in dummyBases] else: realBases = (Item,) attributes = attributes.copy() attributes['__module__'] = 'item_dummy' attributes['__legacy__'] = True attributes['typeName'] = typeName attributes['schemaVersion'] = schemaVersion result = type(str('DummyItem<%s,%d>' % (typeName, schemaVersion)), realBases, attributes) assert result is not None, 'wtf, %r' % (type,) _legacyTypes[(typeName, schemaVersion)] = result return result
[ "def", "declareLegacyItem", "(", "typeName", ",", "schemaVersion", ",", "attributes", ",", "dummyBases", "=", "(", ")", ")", ":", "if", "(", "typeName", ",", "schemaVersion", ")", "in", "_legacyTypes", ":", "return", "_legacyTypes", "[", "typeName", ",", "schemaVersion", "]", "if", "dummyBases", ":", "realBases", "=", "[", "declareLegacyItem", "(", "*", "A", ")", "for", "A", "in", "dummyBases", "]", "else", ":", "realBases", "=", "(", "Item", ",", ")", "attributes", "=", "attributes", ".", "copy", "(", ")", "attributes", "[", "'__module__'", "]", "=", "'item_dummy'", "attributes", "[", "'__legacy__'", "]", "=", "True", "attributes", "[", "'typeName'", "]", "=", "typeName", "attributes", "[", "'schemaVersion'", "]", "=", "schemaVersion", "result", "=", "type", "(", "str", "(", "'DummyItem<%s,%d>'", "%", "(", "typeName", ",", "schemaVersion", ")", ")", ",", "realBases", ",", "attributes", ")", "assert", "result", "is", "not", "None", ",", "'wtf, %r'", "%", "(", "type", ",", ")", "_legacyTypes", "[", "(", "typeName", ",", "schemaVersion", ")", "]", "=", "result", "return", "result" ]
Generate a dummy subclass of Item that will have the given attributes, and the base Item methods, but no methods of its own. This is for use with upgrading. @param typeName: a string, the Axiom TypeName to have attributes for. @param schemaVersion: an int, the (old) version of the schema this is a proxy for. @param attributes: a dict mapping {columnName: attr instance} describing the schema of C{typeName} at C{schemaVersion}. @param dummyBases: a sequence of 4-tuples of (baseTypeName, baseSchemaVersion, baseAttributes, baseBases) representing the dummy bases of this legacy class.
[ "Generate", "a", "dummy", "subclass", "of", "Item", "that", "will", "have", "the", "given", "attributes", "and", "the", "base", "Item", "methods", "but", "no", "methods", "of", "its", "own", ".", "This", "is", "for", "use", "with", "upgrading", "." ]
python
train
41.484848
SHTOOLS/SHTOOLS
pyshtools/shclasses/shmagcoeffs.py
https://github.com/SHTOOLS/SHTOOLS/blob/9a115cf83002df2ddec6b7f41aeb6be688e285de/pyshtools/shclasses/shmagcoeffs.py#L611-L643
def set_coeffs(self, values, ls, ms): """ Set spherical harmonic coefficients in-place to specified values. Usage ----- x.set_coeffs(values, ls, ms) Parameters ---------- values : float (list) The value(s) of the spherical harmonic coefficient(s). ls : int (list) The degree(s) of the coefficient(s) that should be set. ms : int (list) The order(s) of the coefficient(s) that should be set. Positive and negative values correspond to the cosine and sine components, respectively. Examples -------- x.set_coeffs(10., 1, 1) # x.coeffs[0, 1, 1] = 10. x.set_coeffs(5., 1, -1) # x.coeffs[1, 1, 1] = 5. x.set_coeffs([1., 2], [1, 2], [0, -2]) # x.coeffs[0, 1, 0] = 1. # x.coeffs[1, 2, 2] = 2. """ # Ensure that the type is correct values = _np.array(values) ls = _np.array(ls) ms = _np.array(ms) mneg_mask = (ms < 0).astype(_np.int) self.coeffs[mneg_mask, ls, _np.abs(ms)] = values
[ "def", "set_coeffs", "(", "self", ",", "values", ",", "ls", ",", "ms", ")", ":", "# Ensure that the type is correct", "values", "=", "_np", ".", "array", "(", "values", ")", "ls", "=", "_np", ".", "array", "(", "ls", ")", "ms", "=", "_np", ".", "array", "(", "ms", ")", "mneg_mask", "=", "(", "ms", "<", "0", ")", ".", "astype", "(", "_np", ".", "int", ")", "self", ".", "coeffs", "[", "mneg_mask", ",", "ls", ",", "_np", ".", "abs", "(", "ms", ")", "]", "=", "values" ]
Set spherical harmonic coefficients in-place to specified values. Usage ----- x.set_coeffs(values, ls, ms) Parameters ---------- values : float (list) The value(s) of the spherical harmonic coefficient(s). ls : int (list) The degree(s) of the coefficient(s) that should be set. ms : int (list) The order(s) of the coefficient(s) that should be set. Positive and negative values correspond to the cosine and sine components, respectively. Examples -------- x.set_coeffs(10., 1, 1) # x.coeffs[0, 1, 1] = 10. x.set_coeffs(5., 1, -1) # x.coeffs[1, 1, 1] = 5. x.set_coeffs([1., 2], [1, 2], [0, -2]) # x.coeffs[0, 1, 0] = 1. # x.coeffs[1, 2, 2] = 2.
[ "Set", "spherical", "harmonic", "coefficients", "in", "-", "place", "to", "specified", "values", "." ]
python
train
35.484848
classner/pymp
pymp/__init__.py
https://github.com/classner/pymp/blob/9895ec2ec01ad2778a400449cbfa17f162491180/pymp/__init__.py#L230-L252
def iterate(self, iterable, element_timeout=None): """ Iterate over an iterable. The iterator is executed in the host thread. The threads dynamically grab the elements. The iterator elements must hence be picklable to be transferred through the queue. If there is only one thread, no special operations are performed. Otherwise, effectively n-1 threads are used to process the iterable elements, and the host thread is used to provide them. You can specify a timeout for the clients to adhere. """ self._assert_active() with self._queuelock: # Get this loop id. self._thread_loop_ids[self._thread_num] += 1 loop_id = self._thread_loop_ids[self._thread_num] # Iterate. return _IterableQueueIterator( self._iter_queue, loop_id, self, iterable, element_timeout )
[ "def", "iterate", "(", "self", ",", "iterable", ",", "element_timeout", "=", "None", ")", ":", "self", ".", "_assert_active", "(", ")", "with", "self", ".", "_queuelock", ":", "# Get this loop id.", "self", ".", "_thread_loop_ids", "[", "self", ".", "_thread_num", "]", "+=", "1", "loop_id", "=", "self", ".", "_thread_loop_ids", "[", "self", ".", "_thread_num", "]", "# Iterate.", "return", "_IterableQueueIterator", "(", "self", ".", "_iter_queue", ",", "loop_id", ",", "self", ",", "iterable", ",", "element_timeout", ")" ]
Iterate over an iterable. The iterator is executed in the host thread. The threads dynamically grab the elements. The iterator elements must hence be picklable to be transferred through the queue. If there is only one thread, no special operations are performed. Otherwise, effectively n-1 threads are used to process the iterable elements, and the host thread is used to provide them. You can specify a timeout for the clients to adhere.
[ "Iterate", "over", "an", "iterable", "." ]
python
train
40.173913
merll/docker-map
dockermap/map/runner/base.py
https://github.com/merll/docker-map/blob/e14fe86a6ff5c33d121eb2f9157e9359cb80dd02/dockermap/map/runner/base.py#L64-L78
def remove_network(self, action, n_name, **kwargs): """ Removes a network. :param action: Action configuration. :type action: dockermap.map.runner.ActionConfig :param n_name: Network name or id. :type n_name: unicode | str :param kwargs: Additional keyword arguments. :type kwargs: dict """ c_kwargs = self.get_network_remove_kwargs(action, n_name, **kwargs) res = action.client.remove_network(**c_kwargs) del self._policy.network_names[action.client_name][n_name] return res
[ "def", "remove_network", "(", "self", ",", "action", ",", "n_name", ",", "*", "*", "kwargs", ")", ":", "c_kwargs", "=", "self", ".", "get_network_remove_kwargs", "(", "action", ",", "n_name", ",", "*", "*", "kwargs", ")", "res", "=", "action", ".", "client", ".", "remove_network", "(", "*", "*", "c_kwargs", ")", "del", "self", ".", "_policy", ".", "network_names", "[", "action", ".", "client_name", "]", "[", "n_name", "]", "return", "res" ]
Removes a network. :param action: Action configuration. :type action: dockermap.map.runner.ActionConfig :param n_name: Network name or id. :type n_name: unicode | str :param kwargs: Additional keyword arguments. :type kwargs: dict
[ "Removes", "a", "network", "." ]
python
train
37.733333
hazelcast/hazelcast-python-client
hazelcast/serialization/data.py
https://github.com/hazelcast/hazelcast-python-client/blob/3f6639443c23d6d036aa343f8e094f052250d2c1/hazelcast/serialization/data.py#L56-L67
def get_partition_hash(self): """ Returns partition hash calculated for serialized object. Partition hash is used to determine partition of a Data and is calculated using * PartitioningStrategy during serialization. * If partition hash is not set then hash_code() is used. :return: partition hash """ if self.has_partition_hash(): return unpack_from(FMT_BE_INT, self._buffer, PARTITION_HASH_OFFSET)[0] return self.hash_code()
[ "def", "get_partition_hash", "(", "self", ")", ":", "if", "self", ".", "has_partition_hash", "(", ")", ":", "return", "unpack_from", "(", "FMT_BE_INT", ",", "self", ".", "_buffer", ",", "PARTITION_HASH_OFFSET", ")", "[", "0", "]", "return", "self", ".", "hash_code", "(", ")" ]
Returns partition hash calculated for serialized object. Partition hash is used to determine partition of a Data and is calculated using * PartitioningStrategy during serialization. * If partition hash is not set then hash_code() is used. :return: partition hash
[ "Returns", "partition", "hash", "calculated", "for", "serialized", "object", ".", "Partition", "hash", "is", "used", "to", "determine", "partition", "of", "a", "Data", "and", "is", "calculated", "using", "*", "PartitioningStrategy", "during", "serialization", ".", "*", "If", "partition", "hash", "is", "not", "set", "then", "hash_code", "()", "is", "used", "." ]
python
train
43.25
pyviz/holoviews
holoviews/plotting/util.py
https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/plotting/util.py#L977-L982
def traverse_setter(obj, attribute, value): """ Traverses the object and sets the supplied attribute on the object. Supports Dimensioned and DimensionedPlot types. """ obj.traverse(lambda x: setattr(x, attribute, value))
[ "def", "traverse_setter", "(", "obj", ",", "attribute", ",", "value", ")", ":", "obj", ".", "traverse", "(", "lambda", "x", ":", "setattr", "(", "x", ",", "attribute", ",", "value", ")", ")" ]
Traverses the object and sets the supplied attribute on the object. Supports Dimensioned and DimensionedPlot types.
[ "Traverses", "the", "object", "and", "sets", "the", "supplied", "attribute", "on", "the", "object", ".", "Supports", "Dimensioned", "and", "DimensionedPlot", "types", "." ]
python
train
39.166667
f3at/feat
src/feat/agencies/net/ssh.py
https://github.com/f3at/feat/blob/15da93fc9d6ec8154f52a9172824e25821195ef8/src/feat/agencies/net/ssh.py#L128-L138
def restart_agent(self, agent_id, **kwargs): '''tells the host agent running in this agency to restart the agent.''' host_medium = self.get_medium('host_agent') agent = host_medium.get_agent() d = host_medium.get_document(agent_id) # This is done like this on purpose, we want to ensure that document # exists before passing it to the agent (even though he would handle # this himself). d.addCallback( lambda desc: agent.start_agent(desc.doc_id, **kwargs)) return d
[ "def", "restart_agent", "(", "self", ",", "agent_id", ",", "*", "*", "kwargs", ")", ":", "host_medium", "=", "self", ".", "get_medium", "(", "'host_agent'", ")", "agent", "=", "host_medium", ".", "get_agent", "(", ")", "d", "=", "host_medium", ".", "get_document", "(", "agent_id", ")", "# This is done like this on purpose, we want to ensure that document", "# exists before passing it to the agent (even though he would handle", "# this himself).", "d", ".", "addCallback", "(", "lambda", "desc", ":", "agent", ".", "start_agent", "(", "desc", ".", "doc_id", ",", "*", "*", "kwargs", ")", ")", "return", "d" ]
tells the host agent running in this agency to restart the agent.
[ "tells", "the", "host", "agent", "running", "in", "this", "agency", "to", "restart", "the", "agent", "." ]
python
train
49
sibirrer/lenstronomy
lenstronomy/Util/kernel_util.py
https://github.com/sibirrer/lenstronomy/blob/4edb100a4f3f4fdc4fac9b0032d2b0283d0aa1d6/lenstronomy/Util/kernel_util.py#L196-L205
def cut_psf(psf_data, psf_size): """ cut the psf properly :param psf_data: image of PSF :param psf_size: size of psf :return: re-sized and re-normalized PSF """ kernel = image_util.cut_edges(psf_data, psf_size) kernel = kernel_norm(kernel) return kernel
[ "def", "cut_psf", "(", "psf_data", ",", "psf_size", ")", ":", "kernel", "=", "image_util", ".", "cut_edges", "(", "psf_data", ",", "psf_size", ")", "kernel", "=", "kernel_norm", "(", "kernel", ")", "return", "kernel" ]
cut the psf properly :param psf_data: image of PSF :param psf_size: size of psf :return: re-sized and re-normalized PSF
[ "cut", "the", "psf", "properly", ":", "param", "psf_data", ":", "image", "of", "PSF", ":", "param", "psf_size", ":", "size", "of", "psf", ":", "return", ":", "re", "-", "sized", "and", "re", "-", "normalized", "PSF" ]
python
train
28
konstantinstadler/pymrio
pymrio/core/mriosystem.py
https://github.com/konstantinstadler/pymrio/blob/d764aa0dd2150200e867a9713a98ddae203e12d4/pymrio/core/mriosystem.py#L1239-L1252
def get_rows(self): """ Returns the name of the rows of the extension""" possible_dataframes = ['F', 'FY', 'M', 'S', 'D_cba', 'D_pba', 'D_imp', 'D_exp', 'D_cba_reg', 'D_pba_reg', 'D_imp_reg', 'D_exp_reg', 'D_cba_cap', 'D_pba_cap', 'D_imp_cap', 'D_exp_cap', ] for df in possible_dataframes: if (df in self.__dict__) and (getattr(self, df) is not None): return getattr(self, df).index.get_values() else: logging.warn("No attributes available to get row names") return None
[ "def", "get_rows", "(", "self", ")", ":", "possible_dataframes", "=", "[", "'F'", ",", "'FY'", ",", "'M'", ",", "'S'", ",", "'D_cba'", ",", "'D_pba'", ",", "'D_imp'", ",", "'D_exp'", ",", "'D_cba_reg'", ",", "'D_pba_reg'", ",", "'D_imp_reg'", ",", "'D_exp_reg'", ",", "'D_cba_cap'", ",", "'D_pba_cap'", ",", "'D_imp_cap'", ",", "'D_exp_cap'", ",", "]", "for", "df", "in", "possible_dataframes", ":", "if", "(", "df", "in", "self", ".", "__dict__", ")", "and", "(", "getattr", "(", "self", ",", "df", ")", "is", "not", "None", ")", ":", "return", "getattr", "(", "self", ",", "df", ")", ".", "index", ".", "get_values", "(", ")", "else", ":", "logging", ".", "warn", "(", "\"No attributes available to get row names\"", ")", "return", "None" ]
Returns the name of the rows of the extension
[ "Returns", "the", "name", "of", "the", "rows", "of", "the", "extension" ]
python
train
49.785714
niklasb/webkit-server
webkit_server.py
https://github.com/niklasb/webkit-server/blob/c9e3a8394b8c51000c35f8a56fb770580562b544/webkit_server.py#L262-L272
def headers(self): """ Returns a list of the last HTTP response headers. Header keys are normalized to capitalized form, as in `User-Agent`. """ headers = self.conn.issue_command("Headers") res = [] for header in headers.split("\r"): key, value = header.split(": ", 1) for line in value.split("\n"): res.append((_normalize_header(key), line)) return res
[ "def", "headers", "(", "self", ")", ":", "headers", "=", "self", ".", "conn", ".", "issue_command", "(", "\"Headers\"", ")", "res", "=", "[", "]", "for", "header", "in", "headers", ".", "split", "(", "\"\\r\"", ")", ":", "key", ",", "value", "=", "header", ".", "split", "(", "\": \"", ",", "1", ")", "for", "line", "in", "value", ".", "split", "(", "\"\\n\"", ")", ":", "res", ".", "append", "(", "(", "_normalize_header", "(", "key", ")", ",", "line", ")", ")", "return", "res" ]
Returns a list of the last HTTP response headers. Header keys are normalized to capitalized form, as in `User-Agent`.
[ "Returns", "a", "list", "of", "the", "last", "HTTP", "response", "headers", ".", "Header", "keys", "are", "normalized", "to", "capitalized", "form", "as", "in", "User", "-", "Agent", "." ]
python
train
35.545455
eqcorrscan/EQcorrscan
eqcorrscan/utils/plotting.py
https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/plotting.py#L1156-L1231
def noise_plot(signal, noise, normalise=False, **kwargs): """ Plot signal and noise fourier transforms and the difference. :type signal: `obspy.core.stream.Stream` :param signal: Stream of "signal" window :type noise: `obspy.core.stream.Stream` :param noise: Stream of the "noise" window. :type normalise: bool :param normalise: Whether to normalise the data before plotting or not. :return: `matplotlib.pyplot.Figure` """ import matplotlib.pyplot as plt # Work out how many traces we can plot n_traces = 0 for tr in signal: try: noise.select(id=tr.id)[0] except IndexError: # pragma: no cover continue n_traces += 1 fig, axes = plt.subplots(n_traces, 2, sharex=True) if len(signal) > 1: axes = axes.ravel() i = 0 lines = [] labels = [] for tr in signal: try: noise_tr = noise.select(id=tr.id)[0] except IndexError: # pragma: no cover continue ax1 = axes[i] ax2 = axes[i + 1] fft_len = fftpack.next_fast_len( max(noise_tr.stats.npts, tr.stats.npts)) if not normalise: signal_fft = fftpack.rfft(tr.data, fft_len) noise_fft = fftpack.rfft(noise_tr.data, fft_len) else: signal_fft = fftpack.rfft(tr.data / max(tr.data), fft_len) noise_fft = fftpack.rfft( noise_tr.data / max(noise_tr.data), fft_len) frequencies = np.linspace(0, 1 / (2 * tr.stats.delta), fft_len // 2) noise_line, = ax1.semilogy( frequencies, 2.0 / fft_len * np.abs(noise_fft[0: fft_len // 2]), 'k', label="noise") signal_line, = ax1.semilogy( frequencies, 2.0 / fft_len * np.abs(signal_fft[0: fft_len // 2]), 'r', label="signal") if "signal" not in labels: labels.append("signal") lines.append(signal_line) if "noise" not in labels: labels.append("noise") lines.append(noise_line) ax1.set_ylabel(tr.id, rotation=0, horizontalalignment='right') ax2.plot( frequencies, (2.0 / fft_len * np.abs(signal_fft[0: fft_len // 2])) - (2.0 / fft_len * np.abs(noise_fft[0: fft_len // 2])), 'k') ax2.yaxis.tick_right() ax2.set_ylim(bottom=0) i += 2 axes[-1].set_xlabel("Frequency (Hz)") axes[-2].set_xlabel("Frequency (Hz)") axes[0].set_title("Spectra") axes[1].set_title("Signal - noise") plt.figlegend(lines, labels, 'upper left') plt.tight_layout() plt.subplots_adjust(hspace=0) fig = _finalise_figure(fig=fig, **kwargs) # pragma: no cover return fig
[ "def", "noise_plot", "(", "signal", ",", "noise", ",", "normalise", "=", "False", ",", "*", "*", "kwargs", ")", ":", "import", "matplotlib", ".", "pyplot", "as", "plt", "# Work out how many traces we can plot", "n_traces", "=", "0", "for", "tr", "in", "signal", ":", "try", ":", "noise", ".", "select", "(", "id", "=", "tr", ".", "id", ")", "[", "0", "]", "except", "IndexError", ":", "# pragma: no cover", "continue", "n_traces", "+=", "1", "fig", ",", "axes", "=", "plt", ".", "subplots", "(", "n_traces", ",", "2", ",", "sharex", "=", "True", ")", "if", "len", "(", "signal", ")", ">", "1", ":", "axes", "=", "axes", ".", "ravel", "(", ")", "i", "=", "0", "lines", "=", "[", "]", "labels", "=", "[", "]", "for", "tr", "in", "signal", ":", "try", ":", "noise_tr", "=", "noise", ".", "select", "(", "id", "=", "tr", ".", "id", ")", "[", "0", "]", "except", "IndexError", ":", "# pragma: no cover", "continue", "ax1", "=", "axes", "[", "i", "]", "ax2", "=", "axes", "[", "i", "+", "1", "]", "fft_len", "=", "fftpack", ".", "next_fast_len", "(", "max", "(", "noise_tr", ".", "stats", ".", "npts", ",", "tr", ".", "stats", ".", "npts", ")", ")", "if", "not", "normalise", ":", "signal_fft", "=", "fftpack", ".", "rfft", "(", "tr", ".", "data", ",", "fft_len", ")", "noise_fft", "=", "fftpack", ".", "rfft", "(", "noise_tr", ".", "data", ",", "fft_len", ")", "else", ":", "signal_fft", "=", "fftpack", ".", "rfft", "(", "tr", ".", "data", "/", "max", "(", "tr", ".", "data", ")", ",", "fft_len", ")", "noise_fft", "=", "fftpack", ".", "rfft", "(", "noise_tr", ".", "data", "/", "max", "(", "noise_tr", ".", "data", ")", ",", "fft_len", ")", "frequencies", "=", "np", ".", "linspace", "(", "0", ",", "1", "/", "(", "2", "*", "tr", ".", "stats", ".", "delta", ")", ",", "fft_len", "//", "2", ")", "noise_line", ",", "=", "ax1", ".", "semilogy", "(", "frequencies", ",", "2.0", "/", "fft_len", "*", "np", ".", "abs", "(", "noise_fft", "[", "0", ":", "fft_len", "//", "2", "]", ")", ",", "'k'", ",", "label", "=", "\"noise\"", ")", "signal_line", ",", "=", "ax1", ".", "semilogy", "(", "frequencies", ",", "2.0", "/", "fft_len", "*", "np", ".", "abs", "(", "signal_fft", "[", "0", ":", "fft_len", "//", "2", "]", ")", ",", "'r'", ",", "label", "=", "\"signal\"", ")", "if", "\"signal\"", "not", "in", "labels", ":", "labels", ".", "append", "(", "\"signal\"", ")", "lines", ".", "append", "(", "signal_line", ")", "if", "\"noise\"", "not", "in", "labels", ":", "labels", ".", "append", "(", "\"noise\"", ")", "lines", ".", "append", "(", "noise_line", ")", "ax1", ".", "set_ylabel", "(", "tr", ".", "id", ",", "rotation", "=", "0", ",", "horizontalalignment", "=", "'right'", ")", "ax2", ".", "plot", "(", "frequencies", ",", "(", "2.0", "/", "fft_len", "*", "np", ".", "abs", "(", "signal_fft", "[", "0", ":", "fft_len", "//", "2", "]", ")", ")", "-", "(", "2.0", "/", "fft_len", "*", "np", ".", "abs", "(", "noise_fft", "[", "0", ":", "fft_len", "//", "2", "]", ")", ")", ",", "'k'", ")", "ax2", ".", "yaxis", ".", "tick_right", "(", ")", "ax2", ".", "set_ylim", "(", "bottom", "=", "0", ")", "i", "+=", "2", "axes", "[", "-", "1", "]", ".", "set_xlabel", "(", "\"Frequency (Hz)\"", ")", "axes", "[", "-", "2", "]", ".", "set_xlabel", "(", "\"Frequency (Hz)\"", ")", "axes", "[", "0", "]", ".", "set_title", "(", "\"Spectra\"", ")", "axes", "[", "1", "]", ".", "set_title", "(", "\"Signal - noise\"", ")", "plt", ".", "figlegend", "(", "lines", ",", "labels", ",", "'upper left'", ")", "plt", ".", "tight_layout", "(", ")", "plt", ".", "subplots_adjust", "(", "hspace", "=", "0", ")", "fig", "=", "_finalise_figure", "(", "fig", "=", "fig", ",", "*", "*", "kwargs", ")", "# pragma: no cover", "return", "fig" ]
Plot signal and noise fourier transforms and the difference. :type signal: `obspy.core.stream.Stream` :param signal: Stream of "signal" window :type noise: `obspy.core.stream.Stream` :param noise: Stream of the "noise" window. :type normalise: bool :param normalise: Whether to normalise the data before plotting or not. :return: `matplotlib.pyplot.Figure`
[ "Plot", "signal", "and", "noise", "fourier", "transforms", "and", "the", "difference", "." ]
python
train
35.210526
cmbruns/pyopenvr
src/openvr/__init__.py
https://github.com/cmbruns/pyopenvr/blob/68395d26bb3df6ab1f0f059c38d441f962938be6/src/openvr/__init__.py#L6001-L6010
def getAnalogActionData(self, action, unActionDataSize, ulRestrictToDevice): """ Reads the state of an analog action given its handle. This will return VRInputError_WrongType if the type of action is something other than analog """ fn = self.function_table.getAnalogActionData pActionData = InputAnalogActionData_t() result = fn(action, byref(pActionData), unActionDataSize, ulRestrictToDevice) return result, pActionData
[ "def", "getAnalogActionData", "(", "self", ",", "action", ",", "unActionDataSize", ",", "ulRestrictToDevice", ")", ":", "fn", "=", "self", ".", "function_table", ".", "getAnalogActionData", "pActionData", "=", "InputAnalogActionData_t", "(", ")", "result", "=", "fn", "(", "action", ",", "byref", "(", "pActionData", ")", ",", "unActionDataSize", ",", "ulRestrictToDevice", ")", "return", "result", ",", "pActionData" ]
Reads the state of an analog action given its handle. This will return VRInputError_WrongType if the type of action is something other than analog
[ "Reads", "the", "state", "of", "an", "analog", "action", "given", "its", "handle", ".", "This", "will", "return", "VRInputError_WrongType", "if", "the", "type", "of", "action", "is", "something", "other", "than", "analog" ]
python
train
47.7
marrow/package
marrow/package/loader.py
https://github.com/marrow/package/blob/133d4bf67cc857d1b2423695938a00ff2dfa8af2/marrow/package/loader.py#L8-L54
def traverse(obj, target:str, default=nodefault, executable:bool=False, separator:str='.', protect:bool=True): """Traverse down an object, using getattr or getitem. If ``executable`` is ``True`` any executable function encountered will be, with no arguments. Traversal will continue on the result of that call. You can change the separator as desired, i.e. to a '/'. By default attributes (but not array elements) prefixed with an underscore are taboo. They will not resolve, raising a LookupError. Certain allowances are made: if a 'path segment' is numerical, it's treated as an array index. If attribute lookup fails, it will re-try on that object using array notation and continue from there. This makes lookup very flexible. """ # TODO: Support numerical slicing, i.e. ``1:4``, or even just ``:-1`` and things. assert check_argument_types() value = obj remainder = target if not target: return obj while separator: name, separator, remainder = remainder.partition(separator) numeric = name.lstrip('-').isdigit() try: if numeric or (protect and name.startswith('_')): raise AttributeError() value = getattr(value, name) if executable and callable(value): value = value() except AttributeError: try: value = value[int(name) if numeric else name] except (KeyError, TypeError): if default is nodefault: raise LookupError("Could not resolve '" + target + "' on: " + repr(obj)) return default return value
[ "def", "traverse", "(", "obj", ",", "target", ":", "str", ",", "default", "=", "nodefault", ",", "executable", ":", "bool", "=", "False", ",", "separator", ":", "str", "=", "'.'", ",", "protect", ":", "bool", "=", "True", ")", ":", "# TODO: Support numerical slicing, i.e. ``1:4``, or even just ``:-1`` and things.", "assert", "check_argument_types", "(", ")", "value", "=", "obj", "remainder", "=", "target", "if", "not", "target", ":", "return", "obj", "while", "separator", ":", "name", ",", "separator", ",", "remainder", "=", "remainder", ".", "partition", "(", "separator", ")", "numeric", "=", "name", ".", "lstrip", "(", "'-'", ")", ".", "isdigit", "(", ")", "try", ":", "if", "numeric", "or", "(", "protect", "and", "name", ".", "startswith", "(", "'_'", ")", ")", ":", "raise", "AttributeError", "(", ")", "value", "=", "getattr", "(", "value", ",", "name", ")", "if", "executable", "and", "callable", "(", "value", ")", ":", "value", "=", "value", "(", ")", "except", "AttributeError", ":", "try", ":", "value", "=", "value", "[", "int", "(", "name", ")", "if", "numeric", "else", "name", "]", "except", "(", "KeyError", ",", "TypeError", ")", ":", "if", "default", "is", "nodefault", ":", "raise", "LookupError", "(", "\"Could not resolve '\"", "+", "target", "+", "\"' on: \"", "+", "repr", "(", "obj", ")", ")", "return", "default", "return", "value" ]
Traverse down an object, using getattr or getitem. If ``executable`` is ``True`` any executable function encountered will be, with no arguments. Traversal will continue on the result of that call. You can change the separator as desired, i.e. to a '/'. By default attributes (but not array elements) prefixed with an underscore are taboo. They will not resolve, raising a LookupError. Certain allowances are made: if a 'path segment' is numerical, it's treated as an array index. If attribute lookup fails, it will re-try on that object using array notation and continue from there. This makes lookup very flexible.
[ "Traverse", "down", "an", "object", "using", "getattr", "or", "getitem", ".", "If", "executable", "is", "True", "any", "executable", "function", "encountered", "will", "be", "with", "no", "arguments", ".", "Traversal", "will", "continue", "on", "the", "result", "of", "that", "call", ".", "You", "can", "change", "the", "separator", "as", "desired", "i", ".", "e", ".", "to", "a", "/", ".", "By", "default", "attributes", "(", "but", "not", "array", "elements", ")", "prefixed", "with", "an", "underscore", "are", "taboo", ".", "They", "will", "not", "resolve", "raising", "a", "LookupError", ".", "Certain", "allowances", "are", "made", ":", "if", "a", "path", "segment", "is", "numerical", "it", "s", "treated", "as", "an", "array", "index", ".", "If", "attribute", "lookup", "fails", "it", "will", "re", "-", "try", "on", "that", "object", "using", "array", "notation", "and", "continue", "from", "there", ".", "This", "makes", "lookup", "very", "flexible", "." ]
python
test
31.212766
StackStorm/pybind
pybind/slxos/v17s_1_02/overlay/access_list/type/vxlan/standard/seq/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17s_1_02/overlay/access_list/type/vxlan/standard/seq/__init__.py#L173-L194
def _set_dst_vtep_ip(self, v, load=False): """ Setter method for dst_vtep_ip, mapped from YANG variable /overlay/access_list/type/vxlan/standard/seq/dst_vtep_ip (inet:ipv4-address) If this variable is read-only (config: false) in the source YANG file, then _set_dst_vtep_ip is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_dst_vtep_ip() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="dst-vtep-ip", rest_name="dst-vtep-ip-host", parent=self, choice=(u'choice-dst-vtep-ip', u'case-dst-vtep-ip'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'dst vtep ip address: A.B.C.D', u'alt-name': u'dst-vtep-ip-host', u'cli-incomplete-command': None, u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-vxlan-visibility', defining_module='brocade-vxlan-visibility', yang_type='inet:ipv4-address', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """dst_vtep_ip must be of a type compatible with inet:ipv4-address""", 'defined-type': "inet:ipv4-address", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="dst-vtep-ip", rest_name="dst-vtep-ip-host", parent=self, choice=(u'choice-dst-vtep-ip', u'case-dst-vtep-ip'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'dst vtep ip address: A.B.C.D', u'alt-name': u'dst-vtep-ip-host', u'cli-incomplete-command': None, u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-vxlan-visibility', defining_module='brocade-vxlan-visibility', yang_type='inet:ipv4-address', is_config=True)""", }) self.__dst_vtep_ip = t if hasattr(self, '_set'): self._set()
[ "def", "_set_dst_vtep_ip", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base", "=", "RestrictedClassType", "(", "base_type", "=", "unicode", ",", "restriction_dict", "=", "{", "'pattern'", ":", "u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\\\p{N}\\\\p{L}]+)?'", "}", ")", ",", "is_leaf", "=", "True", ",", "yang_name", "=", "\"dst-vtep-ip\"", ",", "rest_name", "=", "\"dst-vtep-ip-host\"", ",", "parent", "=", "self", ",", "choice", "=", "(", "u'choice-dst-vtep-ip'", ",", "u'case-dst-vtep-ip'", ")", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "extmethods", "=", "self", ".", "_extmethods", ",", "register_paths", "=", "True", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'info'", ":", "u'dst vtep ip address: A.B.C.D'", ",", "u'alt-name'", ":", "u'dst-vtep-ip-host'", ",", "u'cli-incomplete-command'", ":", "None", ",", "u'cli-suppress-no'", ":", "None", "}", "}", ",", "namespace", "=", "'urn:brocade.com:mgmt:brocade-vxlan-visibility'", ",", "defining_module", "=", "'brocade-vxlan-visibility'", ",", "yang_type", "=", "'inet:ipv4-address'", ",", "is_config", "=", "True", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "ValueError", "(", "{", "'error-string'", ":", "\"\"\"dst_vtep_ip must be of a type compatible with inet:ipv4-address\"\"\"", ",", "'defined-type'", ":", "\"inet:ipv4-address\"", ",", "'generated-type'", ":", "\"\"\"YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\\\p{N}\\\\p{L}]+)?'}), is_leaf=True, yang_name=\"dst-vtep-ip\", rest_name=\"dst-vtep-ip-host\", parent=self, choice=(u'choice-dst-vtep-ip', u'case-dst-vtep-ip'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'dst vtep ip address: A.B.C.D', u'alt-name': u'dst-vtep-ip-host', u'cli-incomplete-command': None, u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-vxlan-visibility', defining_module='brocade-vxlan-visibility', yang_type='inet:ipv4-address', is_config=True)\"\"\"", ",", "}", ")", "self", ".", "__dst_vtep_ip", "=", "t", "if", "hasattr", "(", "self", ",", "'_set'", ")", ":", "self", ".", "_set", "(", ")" ]
Setter method for dst_vtep_ip, mapped from YANG variable /overlay/access_list/type/vxlan/standard/seq/dst_vtep_ip (inet:ipv4-address) If this variable is read-only (config: false) in the source YANG file, then _set_dst_vtep_ip is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_dst_vtep_ip() directly.
[ "Setter", "method", "for", "dst_vtep_ip", "mapped", "from", "YANG", "variable", "/", "overlay", "/", "access_list", "/", "type", "/", "vxlan", "/", "standard", "/", "seq", "/", "dst_vtep_ip", "(", "inet", ":", "ipv4", "-", "address", ")", "If", "this", "variable", "is", "read", "-", "only", "(", "config", ":", "false", ")", "in", "the", "source", "YANG", "file", "then", "_set_dst_vtep_ip", "is", "considered", "as", "a", "private", "method", ".", "Backends", "looking", "to", "populate", "this", "variable", "should", "do", "so", "via", "calling", "thisObj", ".", "_set_dst_vtep_ip", "()", "directly", "." ]
python
train
105.863636
hhatto/autopep8
autopep8.py
https://github.com/hhatto/autopep8/blob/fda3bb39181437b6b8a0aa0185f21ae5f14385dd/autopep8.py#L983-L987
def fix_e502(self, result): """Remove extraneous escape of newline.""" (line_index, _, target) = get_index_offset_contents(result, self.source) self.source[line_index] = target.rstrip('\n\r \t\\') + '\n'
[ "def", "fix_e502", "(", "self", ",", "result", ")", ":", "(", "line_index", ",", "_", ",", "target", ")", "=", "get_index_offset_contents", "(", "result", ",", "self", ".", "source", ")", "self", ".", "source", "[", "line_index", "]", "=", "target", ".", "rstrip", "(", "'\\n\\r \\t\\\\'", ")", "+", "'\\n'" ]
Remove extraneous escape of newline.
[ "Remove", "extraneous", "escape", "of", "newline", "." ]
python
train
56.6
xolox/python-vcs-repo-mgr
vcs_repo_mgr/backends/git.py
https://github.com/xolox/python-vcs-repo-mgr/blob/fdad2441a3e7ba5deeeddfa1c2f5ebc00c393aed/vcs_repo_mgr/backends/git.py#L242-L252
def find_revision_number(self, revision=None): """Find the local revision number of the given revision.""" # Make sure the local repository exists. self.create() # Try to find the revision number of the specified revision. revision = self.expand_branch_name(revision) output = self.context.capture('git', 'rev-list', revision, '--count') if not (output and output.isdigit()): msg = "Failed to find local revision number! ('git rev-list --count' gave unexpected output)" raise ValueError(msg) return int(output)
[ "def", "find_revision_number", "(", "self", ",", "revision", "=", "None", ")", ":", "# Make sure the local repository exists.", "self", ".", "create", "(", ")", "# Try to find the revision number of the specified revision.", "revision", "=", "self", ".", "expand_branch_name", "(", "revision", ")", "output", "=", "self", ".", "context", ".", "capture", "(", "'git'", ",", "'rev-list'", ",", "revision", ",", "'--count'", ")", "if", "not", "(", "output", "and", "output", ".", "isdigit", "(", ")", ")", ":", "msg", "=", "\"Failed to find local revision number! ('git rev-list --count' gave unexpected output)\"", "raise", "ValueError", "(", "msg", ")", "return", "int", "(", "output", ")" ]
Find the local revision number of the given revision.
[ "Find", "the", "local", "revision", "number", "of", "the", "given", "revision", "." ]
python
train
53.454545
closeio/tasktiger
tasktiger/__init__.py
https://github.com/closeio/tasktiger/blob/59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a/tasktiger/__init__.py#L242-L297
def task(self, _fn=None, queue=None, hard_timeout=None, unique=None, lock=None, lock_key=None, retry=None, retry_on=None, retry_method=None, schedule=None, batch=False, max_queue_size=None): """ Function decorator that defines the behavior of the function when it is used as a task. To use the default behavior, tasks don't need to be decorated. See README.rst for an explanation of the options. """ def _delay(func): def _delay_inner(*args, **kwargs): return self.delay(func, args=args, kwargs=kwargs) return _delay_inner # Periodic tasks are unique. if schedule is not None: unique = True def _wrap(func): if hard_timeout is not None: func._task_hard_timeout = hard_timeout if queue is not None: func._task_queue = queue if unique is not None: func._task_unique = unique if lock is not None: func._task_lock = lock if lock_key is not None: func._task_lock_key = lock_key if retry is not None: func._task_retry = retry if retry_on is not None: func._task_retry_on = retry_on if retry_method is not None: func._task_retry_method = retry_method if batch is not None: func._task_batch = batch if schedule is not None: func._task_schedule = schedule if max_queue_size is not None: func._task_max_queue_size = max_queue_size func.delay = _delay(func) if schedule is not None: serialized_func = serialize_func_name(func) assert serialized_func not in self.periodic_task_funcs, \ "attempted duplicate registration of periodic task" self.periodic_task_funcs[serialized_func] = func return func return _wrap if _fn is None else _wrap(_fn)
[ "def", "task", "(", "self", ",", "_fn", "=", "None", ",", "queue", "=", "None", ",", "hard_timeout", "=", "None", ",", "unique", "=", "None", ",", "lock", "=", "None", ",", "lock_key", "=", "None", ",", "retry", "=", "None", ",", "retry_on", "=", "None", ",", "retry_method", "=", "None", ",", "schedule", "=", "None", ",", "batch", "=", "False", ",", "max_queue_size", "=", "None", ")", ":", "def", "_delay", "(", "func", ")", ":", "def", "_delay_inner", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "delay", "(", "func", ",", "args", "=", "args", ",", "kwargs", "=", "kwargs", ")", "return", "_delay_inner", "# Periodic tasks are unique.", "if", "schedule", "is", "not", "None", ":", "unique", "=", "True", "def", "_wrap", "(", "func", ")", ":", "if", "hard_timeout", "is", "not", "None", ":", "func", ".", "_task_hard_timeout", "=", "hard_timeout", "if", "queue", "is", "not", "None", ":", "func", ".", "_task_queue", "=", "queue", "if", "unique", "is", "not", "None", ":", "func", ".", "_task_unique", "=", "unique", "if", "lock", "is", "not", "None", ":", "func", ".", "_task_lock", "=", "lock", "if", "lock_key", "is", "not", "None", ":", "func", ".", "_task_lock_key", "=", "lock_key", "if", "retry", "is", "not", "None", ":", "func", ".", "_task_retry", "=", "retry", "if", "retry_on", "is", "not", "None", ":", "func", ".", "_task_retry_on", "=", "retry_on", "if", "retry_method", "is", "not", "None", ":", "func", ".", "_task_retry_method", "=", "retry_method", "if", "batch", "is", "not", "None", ":", "func", ".", "_task_batch", "=", "batch", "if", "schedule", "is", "not", "None", ":", "func", ".", "_task_schedule", "=", "schedule", "if", "max_queue_size", "is", "not", "None", ":", "func", ".", "_task_max_queue_size", "=", "max_queue_size", "func", ".", "delay", "=", "_delay", "(", "func", ")", "if", "schedule", "is", "not", "None", ":", "serialized_func", "=", "serialize_func_name", "(", "func", ")", "assert", "serialized_func", "not", "in", "self", ".", "periodic_task_funcs", ",", "\"attempted duplicate registration of periodic task\"", "self", ".", "periodic_task_funcs", "[", "serialized_func", "]", "=", "func", "return", "func", "return", "_wrap", "if", "_fn", "is", "None", "else", "_wrap", "(", "_fn", ")" ]
Function decorator that defines the behavior of the function when it is used as a task. To use the default behavior, tasks don't need to be decorated. See README.rst for an explanation of the options.
[ "Function", "decorator", "that", "defines", "the", "behavior", "of", "the", "function", "when", "it", "is", "used", "as", "a", "task", ".", "To", "use", "the", "default", "behavior", "tasks", "don", "t", "need", "to", "be", "decorated", "." ]
python
train
37.035714
tkem/cachetools
cachetools/ttl.py
https://github.com/tkem/cachetools/blob/1b67cddadccb89993e9d2567bac22e57e2b2b373/cachetools/ttl.py#L165-L178
def expire(self, time=None): """Remove expired items from the cache.""" if time is None: time = self.__timer() root = self.__root curr = root.next links = self.__links cache_delitem = Cache.__delitem__ while curr is not root and curr.expire < time: cache_delitem(self, curr.key) del links[curr.key] next = curr.next curr.unlink() curr = next
[ "def", "expire", "(", "self", ",", "time", "=", "None", ")", ":", "if", "time", "is", "None", ":", "time", "=", "self", ".", "__timer", "(", ")", "root", "=", "self", ".", "__root", "curr", "=", "root", ".", "next", "links", "=", "self", ".", "__links", "cache_delitem", "=", "Cache", ".", "__delitem__", "while", "curr", "is", "not", "root", "and", "curr", ".", "expire", "<", "time", ":", "cache_delitem", "(", "self", ",", "curr", ".", "key", ")", "del", "links", "[", "curr", ".", "key", "]", "next", "=", "curr", ".", "next", "curr", ".", "unlink", "(", ")", "curr", "=", "next" ]
Remove expired items from the cache.
[ "Remove", "expired", "items", "from", "the", "cache", "." ]
python
train
32.571429
OCHA-DAP/hdx-python-utilities
src/hdx/utilities/loader.py
https://github.com/OCHA-DAP/hdx-python-utilities/blob/9c89e0aa5afac2c002b02a2d8f0e5b91eeb3d2a3/src/hdx/utilities/loader.py#L79-L93
def load_yaml(path): # type: (str) -> OrderedDict """Load YAML file into an ordered dictionary Args: path (str): Path to YAML file Returns: OrderedDict: Ordered dictionary containing loaded YAML file """ with open(path, 'rt') as f: yamldict = yaml.load(f.read(), Loader=yamlloader.ordereddict.CSafeLoader) if not yamldict: raise (LoadError('YAML file: %s is empty!' % path)) return yamldict
[ "def", "load_yaml", "(", "path", ")", ":", "# type: (str) -> OrderedDict", "with", "open", "(", "path", ",", "'rt'", ")", "as", "f", ":", "yamldict", "=", "yaml", ".", "load", "(", "f", ".", "read", "(", ")", ",", "Loader", "=", "yamlloader", ".", "ordereddict", ".", "CSafeLoader", ")", "if", "not", "yamldict", ":", "raise", "(", "LoadError", "(", "'YAML file: %s is empty!'", "%", "path", ")", ")", "return", "yamldict" ]
Load YAML file into an ordered dictionary Args: path (str): Path to YAML file Returns: OrderedDict: Ordered dictionary containing loaded YAML file
[ "Load", "YAML", "file", "into", "an", "ordered", "dictionary" ]
python
train
29.466667
bloomreach/s4cmd
s4cmd.py
https://github.com/bloomreach/s4cmd/blob/bb51075bf43703e7cd95aa39288cf7732ec13a6d/s4cmd.py#L838-L841
def update_privilege(self, obj, target): '''Get privileges from metadata of the source in s3, and apply them to target''' if 'privilege' in obj['Metadata']: os.chmod(target, int(obj['Metadata']['privilege'], 8))
[ "def", "update_privilege", "(", "self", ",", "obj", ",", "target", ")", ":", "if", "'privilege'", "in", "obj", "[", "'Metadata'", "]", ":", "os", ".", "chmod", "(", "target", ",", "int", "(", "obj", "[", "'Metadata'", "]", "[", "'privilege'", "]", ",", "8", ")", ")" ]
Get privileges from metadata of the source in s3, and apply them to target
[ "Get", "privileges", "from", "metadata", "of", "the", "source", "in", "s3", "and", "apply", "them", "to", "target" ]
python
test
55.5
IdentityPython/pysaml2
src/saml2/__init__.py
https://github.com/IdentityPython/pysaml2/blob/d3aa78eeb7d37c12688f783cb4db1c7263a14ad6/src/saml2/__init__.py#L210-L244
def find_children(self, tag=None, namespace=None): """Searches child nodes for objects with the desired tag/namespace. Returns a list of extension elements within this object whose tag and/or namespace match those passed in. To find all children in a particular namespace, specify the namespace but not the tag name. If you specify only the tag, the result list may contain extension elements in multiple namespaces. :param tag: str (optional) The desired tag :param namespace: str (optional) The desired namespace :return: A list of elements whose tag and/or namespace match the parameters values """ results = [] if tag and namespace: for element in self.children: if element.tag == tag and element.namespace == namespace: results.append(element) elif tag and not namespace: for element in self.children: if element.tag == tag: results.append(element) elif namespace and not tag: for element in self.children: if element.namespace == namespace: results.append(element) else: for element in self.children: results.append(element) return results
[ "def", "find_children", "(", "self", ",", "tag", "=", "None", ",", "namespace", "=", "None", ")", ":", "results", "=", "[", "]", "if", "tag", "and", "namespace", ":", "for", "element", "in", "self", ".", "children", ":", "if", "element", ".", "tag", "==", "tag", "and", "element", ".", "namespace", "==", "namespace", ":", "results", ".", "append", "(", "element", ")", "elif", "tag", "and", "not", "namespace", ":", "for", "element", "in", "self", ".", "children", ":", "if", "element", ".", "tag", "==", "tag", ":", "results", ".", "append", "(", "element", ")", "elif", "namespace", "and", "not", "tag", ":", "for", "element", "in", "self", ".", "children", ":", "if", "element", ".", "namespace", "==", "namespace", ":", "results", ".", "append", "(", "element", ")", "else", ":", "for", "element", "in", "self", ".", "children", ":", "results", ".", "append", "(", "element", ")", "return", "results" ]
Searches child nodes for objects with the desired tag/namespace. Returns a list of extension elements within this object whose tag and/or namespace match those passed in. To find all children in a particular namespace, specify the namespace but not the tag name. If you specify only the tag, the result list may contain extension elements in multiple namespaces. :param tag: str (optional) The desired tag :param namespace: str (optional) The desired namespace :return: A list of elements whose tag and/or namespace match the parameters values
[ "Searches", "child", "nodes", "for", "objects", "with", "the", "desired", "tag", "/", "namespace", "." ]
python
train
37.971429
zulily/pudl
pudl/ad_object.py
https://github.com/zulily/pudl/blob/761eec76841964780e759e6bf6d5f06a54844a80/pudl/ad_object.py#L49-L66
def samaccountname(self, base_dn, distinguished_name): """Retrieve the sAMAccountName for a specific DistinguishedName :param str base_dn: The base DN to search within :param list distinguished_name: The base DN to search within :param list attributes: Object attributes to populate, defaults to all :return: A populated ADUser object :rtype: ADUser """ mappings = self.samaccountnames(base_dn, [distinguished_name]) try: # Usually we will find a match, but perhaps not always return mappings[distinguished_name] except KeyError: logging.info("%s - unable to retrieve object from AD by DistinguishedName", distinguished_name)
[ "def", "samaccountname", "(", "self", ",", "base_dn", ",", "distinguished_name", ")", ":", "mappings", "=", "self", ".", "samaccountnames", "(", "base_dn", ",", "[", "distinguished_name", "]", ")", "try", ":", "# Usually we will find a match, but perhaps not always", "return", "mappings", "[", "distinguished_name", "]", "except", "KeyError", ":", "logging", ".", "info", "(", "\"%s - unable to retrieve object from AD by DistinguishedName\"", ",", "distinguished_name", ")" ]
Retrieve the sAMAccountName for a specific DistinguishedName :param str base_dn: The base DN to search within :param list distinguished_name: The base DN to search within :param list attributes: Object attributes to populate, defaults to all :return: A populated ADUser object :rtype: ADUser
[ "Retrieve", "the", "sAMAccountName", "for", "a", "specific", "DistinguishedName" ]
python
train
41.777778
saltstack/salt
salt/cloud/clouds/softlayer_hw.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/softlayer_hw.py#L117-L144
def avail_locations(call=None): ''' List all available locations ''' if call == 'action': raise SaltCloudSystemExit( 'The avail_locations function must be called with ' '-f or --function, or with the --list-locations option' ) ret = {} conn = get_conn(service='SoftLayer_Product_Package') locations = conn.getLocations(id=50) for location in locations: ret[location['id']] = { 'id': location['id'], 'name': location['name'], 'location': location['longName'], } available = conn.getAvailableLocations(id=50) for location in available: if location.get('isAvailable', 0) is 0: continue ret[location['locationId']]['available'] = True return ret
[ "def", "avail_locations", "(", "call", "=", "None", ")", ":", "if", "call", "==", "'action'", ":", "raise", "SaltCloudSystemExit", "(", "'The avail_locations function must be called with '", "'-f or --function, or with the --list-locations option'", ")", "ret", "=", "{", "}", "conn", "=", "get_conn", "(", "service", "=", "'SoftLayer_Product_Package'", ")", "locations", "=", "conn", ".", "getLocations", "(", "id", "=", "50", ")", "for", "location", "in", "locations", ":", "ret", "[", "location", "[", "'id'", "]", "]", "=", "{", "'id'", ":", "location", "[", "'id'", "]", ",", "'name'", ":", "location", "[", "'name'", "]", ",", "'location'", ":", "location", "[", "'longName'", "]", ",", "}", "available", "=", "conn", ".", "getAvailableLocations", "(", "id", "=", "50", ")", "for", "location", "in", "available", ":", "if", "location", ".", "get", "(", "'isAvailable'", ",", "0", ")", "is", "0", ":", "continue", "ret", "[", "location", "[", "'locationId'", "]", "]", "[", "'available'", "]", "=", "True", "return", "ret" ]
List all available locations
[ "List", "all", "available", "locations" ]
python
train
27.928571
maas/python-libmaas
maas/client/flesh/machines.py
https://github.com/maas/python-libmaas/blob/4092c68ef7fb1753efc843569848e2bcc3415002/maas/client/flesh/machines.py#L29-L33
def validate_file(parser, arg): """Validates that `arg` is a valid file.""" if not os.path.isfile(arg): parser.error("%s is not a file." % arg) return arg
[ "def", "validate_file", "(", "parser", ",", "arg", ")", ":", "if", "not", "os", ".", "path", ".", "isfile", "(", "arg", ")", ":", "parser", ".", "error", "(", "\"%s is not a file.\"", "%", "arg", ")", "return", "arg" ]
Validates that `arg` is a valid file.
[ "Validates", "that", "arg", "is", "a", "valid", "file", "." ]
python
train
34
ebroecker/canmatrix
src/canmatrix/join.py
https://github.com/ebroecker/canmatrix/blob/d6150b7a648350f051a11c431e9628308c8d5593/src/canmatrix/join.py#L22-L28
def ids_sharing_same_pgn(id_x, pgn_x, id_y, pgn_y): # type: (typing.Sequence[canmatrix.ArbitrationId], typing.Sequence[int], typing.Sequence[canmatrix.ArbitrationId], typing.Sequence[int]) -> typing.Iterable[typing.Tuple[canmatrix.ArbitrationId, canmatrix.ArbitrationId]] """Yield arbitration ids which has the same pgn.""" for id_a, pgn_a in zip(id_x, pgn_x): for id_b, pgn_b in zip(id_y, pgn_y): if pgn_a == pgn_b: yield (id_a, id_b)
[ "def", "ids_sharing_same_pgn", "(", "id_x", ",", "pgn_x", ",", "id_y", ",", "pgn_y", ")", ":", "# type: (typing.Sequence[canmatrix.ArbitrationId], typing.Sequence[int], typing.Sequence[canmatrix.ArbitrationId], typing.Sequence[int]) -> typing.Iterable[typing.Tuple[canmatrix.ArbitrationId, canmatrix.ArbitrationId]]", "for", "id_a", ",", "pgn_a", "in", "zip", "(", "id_x", ",", "pgn_x", ")", ":", "for", "id_b", ",", "pgn_b", "in", "zip", "(", "id_y", ",", "pgn_y", ")", ":", "if", "pgn_a", "==", "pgn_b", ":", "yield", "(", "id_a", ",", "id_b", ")" ]
Yield arbitration ids which has the same pgn.
[ "Yield", "arbitration", "ids", "which", "has", "the", "same", "pgn", "." ]
python
train
68.142857
poppy-project/pypot
pypot/dynamixel/io/io.py
https://github.com/poppy-project/pypot/blob/d9c6551bbc87d45d9d1f0bc15e35b616d0002afd/pypot/dynamixel/io/io.py#L30-L32
def set_wheel_mode(self, ids): """ Sets the specified motors to wheel mode. """ self.set_control_mode(dict(zip(ids, itertools.repeat('wheel'))))
[ "def", "set_wheel_mode", "(", "self", ",", "ids", ")", ":", "self", ".", "set_control_mode", "(", "dict", "(", "zip", "(", "ids", ",", "itertools", ".", "repeat", "(", "'wheel'", ")", ")", ")", ")" ]
Sets the specified motors to wheel mode.
[ "Sets", "the", "specified", "motors", "to", "wheel", "mode", "." ]
python
train
52.666667
pantsbuild/pants
src/python/pants/pantsd/pailgun_server.py
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/pantsd/pailgun_server.py#L137-L140
def server_bind(self): """Override of TCPServer.server_bind() that tracks bind-time assigned random ports.""" TCPServer.server_bind(self) _, self.server_port = self.socket.getsockname()[:2]
[ "def", "server_bind", "(", "self", ")", ":", "TCPServer", ".", "server_bind", "(", "self", ")", "_", ",", "self", ".", "server_port", "=", "self", ".", "socket", ".", "getsockname", "(", ")", "[", ":", "2", "]" ]
Override of TCPServer.server_bind() that tracks bind-time assigned random ports.
[ "Override", "of", "TCPServer", ".", "server_bind", "()", "that", "tracks", "bind", "-", "time", "assigned", "random", "ports", "." ]
python
train
49.5
ctuning/ck
ck/kernel.py
https://github.com/ctuning/ck/blob/7e009814e975f8742790d3106340088a46223714/ck/kernel.py#L2618-L2667
def find_repo_by_path(i): """ Input: { path - path to repo } Output: { return - return code = 0, if successful 16, if repo not found (may be warning) > 0, if error (error) - error text if return > 0 repo_uoa - repo UOA repo_uid - repo UID repo_alias - repo alias } """ p=i['path'] if p!='': p=os.path.normpath(p) found=False if p==work['dir_default_repo']: uoa=cfg['repo_name_default'] uid=cfg['repo_uid_default'] alias=uoa found=True elif p==work['dir_local_repo']: uoa=cfg['repo_name_local'] uid=cfg['repo_uid_local'] alias=uoa found=True else: r=reload_repo_cache({}) # Ignore errors if r['return']>0: return r for q in cache_repo_info: qq=cache_repo_info[q] if p==qq['dict'].get('path',''): uoa=qq['data_uoa'] uid=qq['data_uid'] alias=uid if not is_uid(uoa): alias=uoa found=True break if not found: return {'return':16, 'error': 'repository not found in this path'} return {'return':0, 'repo_uoa': uoa, 'repo_uid': uid, 'repo_alias':alias}
[ "def", "find_repo_by_path", "(", "i", ")", ":", "p", "=", "i", "[", "'path'", "]", "if", "p", "!=", "''", ":", "p", "=", "os", ".", "path", ".", "normpath", "(", "p", ")", "found", "=", "False", "if", "p", "==", "work", "[", "'dir_default_repo'", "]", ":", "uoa", "=", "cfg", "[", "'repo_name_default'", "]", "uid", "=", "cfg", "[", "'repo_uid_default'", "]", "alias", "=", "uoa", "found", "=", "True", "elif", "p", "==", "work", "[", "'dir_local_repo'", "]", ":", "uoa", "=", "cfg", "[", "'repo_name_local'", "]", "uid", "=", "cfg", "[", "'repo_uid_local'", "]", "alias", "=", "uoa", "found", "=", "True", "else", ":", "r", "=", "reload_repo_cache", "(", "{", "}", ")", "# Ignore errors", "if", "r", "[", "'return'", "]", ">", "0", ":", "return", "r", "for", "q", "in", "cache_repo_info", ":", "qq", "=", "cache_repo_info", "[", "q", "]", "if", "p", "==", "qq", "[", "'dict'", "]", ".", "get", "(", "'path'", ",", "''", ")", ":", "uoa", "=", "qq", "[", "'data_uoa'", "]", "uid", "=", "qq", "[", "'data_uid'", "]", "alias", "=", "uid", "if", "not", "is_uid", "(", "uoa", ")", ":", "alias", "=", "uoa", "found", "=", "True", "break", "if", "not", "found", ":", "return", "{", "'return'", ":", "16", ",", "'error'", ":", "'repository not found in this path'", "}", "return", "{", "'return'", ":", "0", ",", "'repo_uoa'", ":", "uoa", ",", "'repo_uid'", ":", "uid", ",", "'repo_alias'", ":", "alias", "}" ]
Input: { path - path to repo } Output: { return - return code = 0, if successful 16, if repo not found (may be warning) > 0, if error (error) - error text if return > 0 repo_uoa - repo UOA repo_uid - repo UID repo_alias - repo alias }
[ "Input", ":", "{", "path", "-", "path", "to", "repo", "}" ]
python
train
27.12
AkihikoITOH/capybara
capybara/virtualenv/lib/python2.7/site-packages/lxml/html/diff.py
https://github.com/AkihikoITOH/capybara/blob/e86c2173ea386654f4ae061148e8fbe3f25e715c/capybara/virtualenv/lib/python2.7/site-packages/lxml/html/diff.py#L71-L77
def tokenize_annotated(doc, annotation): """Tokenize a document and add an annotation attribute to each token """ tokens = tokenize(doc, include_hrefs=False) for tok in tokens: tok.annotation = annotation return tokens
[ "def", "tokenize_annotated", "(", "doc", ",", "annotation", ")", ":", "tokens", "=", "tokenize", "(", "doc", ",", "include_hrefs", "=", "False", ")", "for", "tok", "in", "tokens", ":", "tok", ".", "annotation", "=", "annotation", "return", "tokens" ]
Tokenize a document and add an annotation attribute to each token
[ "Tokenize", "a", "document", "and", "add", "an", "annotation", "attribute", "to", "each", "token" ]
python
test
34.571429
HPENetworking/PYHPEIMC
pyhpeimc/plat/icc.py
https://github.com/HPENetworking/PYHPEIMC/blob/4fba31827573587e03a6233c7db60f188038c8e5/pyhpeimc/plat/icc.py#L77-L130
def create_cfg_segment(filename, filecontent, description, auth, url): """ Takes a str into var filecontent which represents the entire content of a configuration segment, or partial configuration file. Takes a str into var description which represents the description of the configuration segment :param filename: str containing the name of the configuration segment. :param filecontent: str containing the entire contents of the configuration segment :param description: str contrianing the description of the configuration segment :param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class :param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass :return: If successful, Boolena of type True :rtype: Boolean >>> from pyhpeimc.auth import * >>> from pyhpeimc.plat.icc import * >>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin") >>> filecontent = 'sample file content' >>> create_new_file = create_cfg_segment('CW7SNMP.cfg', filecontent, 'My New Template', auth.creds, auth.url) >>> template_id = get_template_id('CW7SNMP.cfg', auth.creds, auth.url) >>> assert type(template_id) is str >>> """ payload = {"confFileName": filename, "confFileType": "2", "cfgFileParent": "-1", "confFileDesc": description, "content": filecontent} f_url = url + "/imcrs/icc/confFile" response = requests.post(f_url, data=(json.dumps(payload)), auth=auth, headers=HEADERS) try: if response.status_code == 201: print("Template successfully created") return response.status_code elif response.status_code is not 201: return response.status_code except requests.exceptions.RequestException as error: return "Error:\n" + str(error) + " create_cfg_segment: An Error has occured"
[ "def", "create_cfg_segment", "(", "filename", ",", "filecontent", ",", "description", ",", "auth", ",", "url", ")", ":", "payload", "=", "{", "\"confFileName\"", ":", "filename", ",", "\"confFileType\"", ":", "\"2\"", ",", "\"cfgFileParent\"", ":", "\"-1\"", ",", "\"confFileDesc\"", ":", "description", ",", "\"content\"", ":", "filecontent", "}", "f_url", "=", "url", "+", "\"/imcrs/icc/confFile\"", "response", "=", "requests", ".", "post", "(", "f_url", ",", "data", "=", "(", "json", ".", "dumps", "(", "payload", ")", ")", ",", "auth", "=", "auth", ",", "headers", "=", "HEADERS", ")", "try", ":", "if", "response", ".", "status_code", "==", "201", ":", "print", "(", "\"Template successfully created\"", ")", "return", "response", ".", "status_code", "elif", "response", ".", "status_code", "is", "not", "201", ":", "return", "response", ".", "status_code", "except", "requests", ".", "exceptions", ".", "RequestException", "as", "error", ":", "return", "\"Error:\\n\"", "+", "str", "(", "error", ")", "+", "\" create_cfg_segment: An Error has occured\"" ]
Takes a str into var filecontent which represents the entire content of a configuration segment, or partial configuration file. Takes a str into var description which represents the description of the configuration segment :param filename: str containing the name of the configuration segment. :param filecontent: str containing the entire contents of the configuration segment :param description: str contrianing the description of the configuration segment :param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class :param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass :return: If successful, Boolena of type True :rtype: Boolean >>> from pyhpeimc.auth import * >>> from pyhpeimc.plat.icc import * >>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin") >>> filecontent = 'sample file content' >>> create_new_file = create_cfg_segment('CW7SNMP.cfg', filecontent, 'My New Template', auth.creds, auth.url) >>> template_id = get_template_id('CW7SNMP.cfg', auth.creds, auth.url) >>> assert type(template_id) is str >>>
[ "Takes", "a", "str", "into", "var", "filecontent", "which", "represents", "the", "entire", "content", "of", "a", "configuration", "segment", "or", "partial", "configuration", "file", ".", "Takes", "a", "str", "into", "var", "description", "which", "represents", "the", "description", "of", "the", "configuration", "segment", ":", "param", "filename", ":", "str", "containing", "the", "name", "of", "the", "configuration", "segment", "." ]
python
train
38.833333
rpcope1/PythonConfluenceAPI
PythonConfluenceAPI/api.py
https://github.com/rpcope1/PythonConfluenceAPI/blob/b7f0ca2a390f964715fdf3a60b5b0c5ef7116d40/PythonConfluenceAPI/api.py#L823-L837
def create_new_label_by_content_id(self, content_id, label_names, callback=None): """ Adds a list of labels to the specified content. :param content_id (string): A string containing the id of the labels content container. :param label_names (list): A list of labels (strings) to apply to the content. :param callback: OPTIONAL: The callback to execute on the resulting data, before the method returns. Default: None (no callback, raw data returned). :return: The JSON data returned from the content/{id}/label endpoint, or the results of the callback. Will raise requests.HTTPError on bad input, potentially. """ assert isinstance(label_names, list) assert all(isinstance(ln, dict) and set(ln.keys()) == {"prefix", "name"} for ln in label_names) return self._service_post_request("rest/api/content/{id}/label".format(id=content_id), data=json.dumps(label_names), headers={"Content-Type": "application/json"}, callback=callback)
[ "def", "create_new_label_by_content_id", "(", "self", ",", "content_id", ",", "label_names", ",", "callback", "=", "None", ")", ":", "assert", "isinstance", "(", "label_names", ",", "list", ")", "assert", "all", "(", "isinstance", "(", "ln", ",", "dict", ")", "and", "set", "(", "ln", ".", "keys", "(", ")", ")", "==", "{", "\"prefix\"", ",", "\"name\"", "}", "for", "ln", "in", "label_names", ")", "return", "self", ".", "_service_post_request", "(", "\"rest/api/content/{id}/label\"", ".", "format", "(", "id", "=", "content_id", ")", ",", "data", "=", "json", ".", "dumps", "(", "label_names", ")", ",", "headers", "=", "{", "\"Content-Type\"", ":", "\"application/json\"", "}", ",", "callback", "=", "callback", ")" ]
Adds a list of labels to the specified content. :param content_id (string): A string containing the id of the labels content container. :param label_names (list): A list of labels (strings) to apply to the content. :param callback: OPTIONAL: The callback to execute on the resulting data, before the method returns. Default: None (no callback, raw data returned). :return: The JSON data returned from the content/{id}/label endpoint, or the results of the callback. Will raise requests.HTTPError on bad input, potentially.
[ "Adds", "a", "list", "of", "labels", "to", "the", "specified", "content", ".", ":", "param", "content_id", "(", "string", ")", ":", "A", "string", "containing", "the", "id", "of", "the", "labels", "content", "container", ".", ":", "param", "label_names", "(", "list", ")", ":", "A", "list", "of", "labels", "(", "strings", ")", "to", "apply", "to", "the", "content", ".", ":", "param", "callback", ":", "OPTIONAL", ":", "The", "callback", "to", "execute", "on", "the", "resulting", "data", "before", "the", "method", "returns", ".", "Default", ":", "None", "(", "no", "callback", "raw", "data", "returned", ")", ".", ":", "return", ":", "The", "JSON", "data", "returned", "from", "the", "content", "/", "{", "id", "}", "/", "label", "endpoint", "or", "the", "results", "of", "the", "callback", ".", "Will", "raise", "requests", ".", "HTTPError", "on", "bad", "input", "potentially", "." ]
python
train
74.466667
Terrance/SkPy
skpy/chat.py
https://github.com/Terrance/SkPy/blob/0f9489c94e8ec4d3effab4314497428872a80ad1/skpy/chat.py#L462-L477
def urlToIds(url): """ Resolve a ``join.skype.com`` URL and returns various identifiers for the group conversation. Args: url (str): public join URL, or identifier from it Returns: dict: related conversation's identifiers -- keys: ``id``, ``long``, ``blob`` """ urlId = url.split("/")[-1] convUrl = "https://join.skype.com/api/v2/conversation/" json = SkypeConnection.externalCall("POST", convUrl, json={"shortId": urlId, "type": "wl"}).json() return {"id": json.get("Resource"), "long": json.get("Id"), "blob": json.get("ChatBlob")}
[ "def", "urlToIds", "(", "url", ")", ":", "urlId", "=", "url", ".", "split", "(", "\"/\"", ")", "[", "-", "1", "]", "convUrl", "=", "\"https://join.skype.com/api/v2/conversation/\"", "json", "=", "SkypeConnection", ".", "externalCall", "(", "\"POST\"", ",", "convUrl", ",", "json", "=", "{", "\"shortId\"", ":", "urlId", ",", "\"type\"", ":", "\"wl\"", "}", ")", ".", "json", "(", ")", "return", "{", "\"id\"", ":", "json", ".", "get", "(", "\"Resource\"", ")", ",", "\"long\"", ":", "json", ".", "get", "(", "\"Id\"", ")", ",", "\"blob\"", ":", "json", ".", "get", "(", "\"ChatBlob\"", ")", "}" ]
Resolve a ``join.skype.com`` URL and returns various identifiers for the group conversation. Args: url (str): public join URL, or identifier from it Returns: dict: related conversation's identifiers -- keys: ``id``, ``long``, ``blob``
[ "Resolve", "a", "join", ".", "skype", ".", "com", "URL", "and", "returns", "various", "identifiers", "for", "the", "group", "conversation", "." ]
python
test
40.5
cytoscape/py2cytoscape
py2cytoscape/cyrest/apply.py
https://github.com/cytoscape/py2cytoscape/blob/dd34de8d028f512314d0057168df7fef7c5d5195/py2cytoscape/cyrest/apply.py#L58-L71
def copyCurrentLayout(self, sourceViewSUID, targetViewSUID, body, verbose=None): """ Copy one network view layout onto another, setting the node location and view scale to match. This makes visually comparing networks simple. :param sourceViewSUID: Source network view SUID (or "current") :param targetViewSUID: Target network view SUID (or "current") :param body: Clone the specified network view layout onto another network view -- Not required, can be None :param verbose: print more :returns: 200: successful operation; 404: Network View does not exist """ response=api(url=self.___url+'apply/layouts/copycat/'+str(sourceViewSUID)+'/'+str(targetViewSUID)+'', method="PUT", body=body, verbose=verbose) return response
[ "def", "copyCurrentLayout", "(", "self", ",", "sourceViewSUID", ",", "targetViewSUID", ",", "body", ",", "verbose", "=", "None", ")", ":", "response", "=", "api", "(", "url", "=", "self", ".", "___url", "+", "'apply/layouts/copycat/'", "+", "str", "(", "sourceViewSUID", ")", "+", "'/'", "+", "str", "(", "targetViewSUID", ")", "+", "''", ",", "method", "=", "\"PUT\"", ",", "body", "=", "body", ",", "verbose", "=", "verbose", ")", "return", "response" ]
Copy one network view layout onto another, setting the node location and view scale to match. This makes visually comparing networks simple. :param sourceViewSUID: Source network view SUID (or "current") :param targetViewSUID: Target network view SUID (or "current") :param body: Clone the specified network view layout onto another network view -- Not required, can be None :param verbose: print more :returns: 200: successful operation; 404: Network View does not exist
[ "Copy", "one", "network", "view", "layout", "onto", "another", "setting", "the", "node", "location", "and", "view", "scale", "to", "match", ".", "This", "makes", "visually", "comparing", "networks", "simple", "." ]
python
train
56.428571
scottgigante/tasklogger
tasklogger/api.py
https://github.com/scottgigante/tasklogger/blob/06a263715d2db0653615c17b2df14b8272967b8d/tasklogger/api.py#L131-L149
def log_error(msg, logger="TaskLogger"): """Log an ERROR message Convenience function to log a message to the default Logger Parameters ---------- msg : str Message to be logged logger : str, optional (default: "TaskLogger") Unique name of the logger to retrieve Returns ------- logger : TaskLogger """ tasklogger = get_tasklogger(logger) tasklogger.error(msg) return tasklogger
[ "def", "log_error", "(", "msg", ",", "logger", "=", "\"TaskLogger\"", ")", ":", "tasklogger", "=", "get_tasklogger", "(", "logger", ")", "tasklogger", ".", "error", "(", "msg", ")", "return", "tasklogger" ]
Log an ERROR message Convenience function to log a message to the default Logger Parameters ---------- msg : str Message to be logged logger : str, optional (default: "TaskLogger") Unique name of the logger to retrieve Returns ------- logger : TaskLogger
[ "Log", "an", "ERROR", "message" ]
python
train
22.684211
underworldcode/stripy
stripy-src/stripy/cartesian.py
https://github.com/underworldcode/stripy/blob/d4c3480c3e58c88489ded695eadbe7cd5bf94b48/stripy-src/stripy/cartesian.py#L1120-L1128
def remove_duplicates(vector_tuple): """ Remove duplicates rows from N equally-sized arrays """ array = np.column_stack(vector_tuple) a = np.ascontiguousarray(array) unique_a = np.unique(a.view([('', a.dtype)]*a.shape[1])) b = unique_a.view(a.dtype).reshape((unique_a.shape[0], a.shape[1])) return list(b.T)
[ "def", "remove_duplicates", "(", "vector_tuple", ")", ":", "array", "=", "np", ".", "column_stack", "(", "vector_tuple", ")", "a", "=", "np", ".", "ascontiguousarray", "(", "array", ")", "unique_a", "=", "np", ".", "unique", "(", "a", ".", "view", "(", "[", "(", "''", ",", "a", ".", "dtype", ")", "]", "*", "a", ".", "shape", "[", "1", "]", ")", ")", "b", "=", "unique_a", ".", "view", "(", "a", ".", "dtype", ")", ".", "reshape", "(", "(", "unique_a", ".", "shape", "[", "0", "]", ",", "a", ".", "shape", "[", "1", "]", ")", ")", "return", "list", "(", "b", ".", "T", ")" ]
Remove duplicates rows from N equally-sized arrays
[ "Remove", "duplicates", "rows", "from", "N", "equally", "-", "sized", "arrays" ]
python
train
36.777778
saltstack/salt
salt/modules/vsphere.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/vsphere.py#L4841-L4877
def update_storage_policy(policy, policy_dict, service_instance=None): ''' Updates a storage policy. Supported capability types: scalar, set, range. policy Name of the policy to update. policy_dict Dictionary containing the changes to apply to the policy. (example in salt.states.pbm) service_instance Service instance (vim.ServiceInstance) of the vCenter. Default is None. .. code-block:: bash salt '*' vsphere.update_storage_policy policy='policy name' policy_dict="$policy_dict" ''' log.trace('updating storage policy, dict = %s', policy_dict) profile_manager = salt.utils.pbm.get_profile_manager(service_instance) policies = salt.utils.pbm.get_storage_policies(profile_manager, [policy]) if not policies: raise VMwareObjectRetrievalError('Policy \'{0}\' was not found' ''.format(policy)) policy_ref = policies[0] policy_update_spec = pbm.profile.CapabilityBasedProfileUpdateSpec() log.trace('Setting policy values in policy_update_spec') for prop in ['description', 'constraints']: setattr(policy_update_spec, prop, getattr(policy_ref, prop)) _apply_policy_config(policy_update_spec, policy_dict) salt.utils.pbm.update_storage_policy(profile_manager, policy_ref, policy_update_spec) return {'update_storage_policy': True}
[ "def", "update_storage_policy", "(", "policy", ",", "policy_dict", ",", "service_instance", "=", "None", ")", ":", "log", ".", "trace", "(", "'updating storage policy, dict = %s'", ",", "policy_dict", ")", "profile_manager", "=", "salt", ".", "utils", ".", "pbm", ".", "get_profile_manager", "(", "service_instance", ")", "policies", "=", "salt", ".", "utils", ".", "pbm", ".", "get_storage_policies", "(", "profile_manager", ",", "[", "policy", "]", ")", "if", "not", "policies", ":", "raise", "VMwareObjectRetrievalError", "(", "'Policy \\'{0}\\' was not found'", "''", ".", "format", "(", "policy", ")", ")", "policy_ref", "=", "policies", "[", "0", "]", "policy_update_spec", "=", "pbm", ".", "profile", ".", "CapabilityBasedProfileUpdateSpec", "(", ")", "log", ".", "trace", "(", "'Setting policy values in policy_update_spec'", ")", "for", "prop", "in", "[", "'description'", ",", "'constraints'", "]", ":", "setattr", "(", "policy_update_spec", ",", "prop", ",", "getattr", "(", "policy_ref", ",", "prop", ")", ")", "_apply_policy_config", "(", "policy_update_spec", ",", "policy_dict", ")", "salt", ".", "utils", ".", "pbm", ".", "update_storage_policy", "(", "profile_manager", ",", "policy_ref", ",", "policy_update_spec", ")", "return", "{", "'update_storage_policy'", ":", "True", "}" ]
Updates a storage policy. Supported capability types: scalar, set, range. policy Name of the policy to update. policy_dict Dictionary containing the changes to apply to the policy. (example in salt.states.pbm) service_instance Service instance (vim.ServiceInstance) of the vCenter. Default is None. .. code-block:: bash salt '*' vsphere.update_storage_policy policy='policy name' policy_dict="$policy_dict"
[ "Updates", "a", "storage", "policy", "." ]
python
train
38.621622
seperman/deepdiff
deepdiff/deephash.py
https://github.com/seperman/deepdiff/blob/a66879190fadc671632f154c1fcb82f5c3cef800/deepdiff/deephash.py#L282-L343
def _hash(self, obj, parent, parents_ids=EMPTY_FROZENSET): """The main diff method""" try: result = self[obj] except (TypeError, KeyError): pass else: return result result = not_hashed if self._skip_this(obj, parent): return elif obj is None: result = 'NONE' elif isinstance(obj, strings): result = prepare_string_for_hashing( obj, ignore_string_type_changes=self.ignore_string_type_changes, ignore_string_case=self.ignore_string_case) elif isinstance(obj, numbers): result = self._prep_number(obj) elif isinstance(obj, MutableMapping): result = self._prep_dict(obj=obj, parent=parent, parents_ids=parents_ids) elif isinstance(obj, tuple): result = self._prep_tuple(obj=obj, parent=parent, parents_ids=parents_ids) elif isinstance(obj, Iterable): result = self._prep_iterable(obj=obj, parent=parent, parents_ids=parents_ids) else: result = self._prep_obj(obj=obj, parent=parent, parents_ids=parents_ids) if result is not_hashed: # pragma: no cover self[UNPROCESSED].append(obj) elif result is unprocessed: pass elif self.apply_hash: if isinstance(obj, strings): result_cleaned = result else: result_cleaned = prepare_string_for_hashing( result, ignore_string_type_changes=self.ignore_string_type_changes, ignore_string_case=self.ignore_string_case) result = self.hasher(result_cleaned) # It is important to keep the hash of all objects. # The hashes will be later used for comparing the objects. try: self[obj] = result except TypeError: obj_id = get_id(obj) self[obj_id] = result return result
[ "def", "_hash", "(", "self", ",", "obj", ",", "parent", ",", "parents_ids", "=", "EMPTY_FROZENSET", ")", ":", "try", ":", "result", "=", "self", "[", "obj", "]", "except", "(", "TypeError", ",", "KeyError", ")", ":", "pass", "else", ":", "return", "result", "result", "=", "not_hashed", "if", "self", ".", "_skip_this", "(", "obj", ",", "parent", ")", ":", "return", "elif", "obj", "is", "None", ":", "result", "=", "'NONE'", "elif", "isinstance", "(", "obj", ",", "strings", ")", ":", "result", "=", "prepare_string_for_hashing", "(", "obj", ",", "ignore_string_type_changes", "=", "self", ".", "ignore_string_type_changes", ",", "ignore_string_case", "=", "self", ".", "ignore_string_case", ")", "elif", "isinstance", "(", "obj", ",", "numbers", ")", ":", "result", "=", "self", ".", "_prep_number", "(", "obj", ")", "elif", "isinstance", "(", "obj", ",", "MutableMapping", ")", ":", "result", "=", "self", ".", "_prep_dict", "(", "obj", "=", "obj", ",", "parent", "=", "parent", ",", "parents_ids", "=", "parents_ids", ")", "elif", "isinstance", "(", "obj", ",", "tuple", ")", ":", "result", "=", "self", ".", "_prep_tuple", "(", "obj", "=", "obj", ",", "parent", "=", "parent", ",", "parents_ids", "=", "parents_ids", ")", "elif", "isinstance", "(", "obj", ",", "Iterable", ")", ":", "result", "=", "self", ".", "_prep_iterable", "(", "obj", "=", "obj", ",", "parent", "=", "parent", ",", "parents_ids", "=", "parents_ids", ")", "else", ":", "result", "=", "self", ".", "_prep_obj", "(", "obj", "=", "obj", ",", "parent", "=", "parent", ",", "parents_ids", "=", "parents_ids", ")", "if", "result", "is", "not_hashed", ":", "# pragma: no cover", "self", "[", "UNPROCESSED", "]", ".", "append", "(", "obj", ")", "elif", "result", "is", "unprocessed", ":", "pass", "elif", "self", ".", "apply_hash", ":", "if", "isinstance", "(", "obj", ",", "strings", ")", ":", "result_cleaned", "=", "result", "else", ":", "result_cleaned", "=", "prepare_string_for_hashing", "(", "result", ",", "ignore_string_type_changes", "=", "self", ".", "ignore_string_type_changes", ",", "ignore_string_case", "=", "self", ".", "ignore_string_case", ")", "result", "=", "self", ".", "hasher", "(", "result_cleaned", ")", "# It is important to keep the hash of all objects.", "# The hashes will be later used for comparing the objects.", "try", ":", "self", "[", "obj", "]", "=", "result", "except", "TypeError", ":", "obj_id", "=", "get_id", "(", "obj", ")", "self", "[", "obj_id", "]", "=", "result", "return", "result" ]
The main diff method
[ "The", "main", "diff", "method" ]
python
train
31.435484
saltstack/salt
salt/modules/vsphere.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/vsphere.py#L6558-L6575
def _format_firewall_stdout(cmd_ret): ''' Helper function to format the stdout from the get_firewall_status function. cmd_ret The return dictionary that comes from a cmd.run_all call. ''' ret_dict = {'success': True, 'rulesets': {}} for line in cmd_ret['stdout'].splitlines(): if line.startswith('Name'): continue if line.startswith('---'): continue ruleset_status = line.split() ret_dict['rulesets'][ruleset_status[0]] = bool(ruleset_status[1]) return ret_dict
[ "def", "_format_firewall_stdout", "(", "cmd_ret", ")", ":", "ret_dict", "=", "{", "'success'", ":", "True", ",", "'rulesets'", ":", "{", "}", "}", "for", "line", "in", "cmd_ret", "[", "'stdout'", "]", ".", "splitlines", "(", ")", ":", "if", "line", ".", "startswith", "(", "'Name'", ")", ":", "continue", "if", "line", ".", "startswith", "(", "'---'", ")", ":", "continue", "ruleset_status", "=", "line", ".", "split", "(", ")", "ret_dict", "[", "'rulesets'", "]", "[", "ruleset_status", "[", "0", "]", "]", "=", "bool", "(", "ruleset_status", "[", "1", "]", ")", "return", "ret_dict" ]
Helper function to format the stdout from the get_firewall_status function. cmd_ret The return dictionary that comes from a cmd.run_all call.
[ "Helper", "function", "to", "format", "the", "stdout", "from", "the", "get_firewall_status", "function", "." ]
python
train
30.777778
jumpscale7/python-consistent-toml
contoml/file/file.py
https://github.com/jumpscale7/python-consistent-toml/blob/a0149c65313ccb8170aa99a0cc498e76231292b9/contoml/file/file.py#L53-L71
def _array_setitem_with_key_seq(self, array_name, index, key_seq, value): """ Sets a the array value in the TOML file located by the given key sequence. Example: self._array_setitem(array_name, index, ('key1', 'key2', 'key3'), 'text_value') is equivalent to doing self.array(array_name)[index]['key1']['key2']['key3'] = 'text_value' """ table = self.array(array_name)[index] key_so_far = tuple() for key in key_seq[:-1]: key_so_far += (key,) new_table = self._array_make_sure_table_exists(array_name, index, key_so_far) if new_table is not None: table = new_table else: table = table[key] table[key_seq[-1]] = value
[ "def", "_array_setitem_with_key_seq", "(", "self", ",", "array_name", ",", "index", ",", "key_seq", ",", "value", ")", ":", "table", "=", "self", ".", "array", "(", "array_name", ")", "[", "index", "]", "key_so_far", "=", "tuple", "(", ")", "for", "key", "in", "key_seq", "[", ":", "-", "1", "]", ":", "key_so_far", "+=", "(", "key", ",", ")", "new_table", "=", "self", ".", "_array_make_sure_table_exists", "(", "array_name", ",", "index", ",", "key_so_far", ")", "if", "new_table", "is", "not", "None", ":", "table", "=", "new_table", "else", ":", "table", "=", "table", "[", "key", "]", "table", "[", "key_seq", "[", "-", "1", "]", "]", "=", "value" ]
Sets a the array value in the TOML file located by the given key sequence. Example: self._array_setitem(array_name, index, ('key1', 'key2', 'key3'), 'text_value') is equivalent to doing self.array(array_name)[index]['key1']['key2']['key3'] = 'text_value'
[ "Sets", "a", "the", "array", "value", "in", "the", "TOML", "file", "located", "by", "the", "given", "key", "sequence", "." ]
python
train
40.315789
apple/turicreate
src/unity/python/turicreate/util/__init__.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/util/__init__.py#L249-L260
def get_turicreate_object_type(url): ''' Given url where a Turi Create object is persisted, return the Turi Create object type: 'model', 'graph', 'sframe', or 'sarray' ''' from .._connect import main as _glconnect ret = _glconnect.get_unity().get_turicreate_object_type(_make_internal_url(url)) # to be consistent, we use sgraph instead of graph here if ret == 'graph': ret = 'sgraph' return ret
[ "def", "get_turicreate_object_type", "(", "url", ")", ":", "from", ".", ".", "_connect", "import", "main", "as", "_glconnect", "ret", "=", "_glconnect", ".", "get_unity", "(", ")", ".", "get_turicreate_object_type", "(", "_make_internal_url", "(", "url", ")", ")", "# to be consistent, we use sgraph instead of graph here", "if", "ret", "==", "'graph'", ":", "ret", "=", "'sgraph'", "return", "ret" ]
Given url where a Turi Create object is persisted, return the Turi Create object type: 'model', 'graph', 'sframe', or 'sarray'
[ "Given", "url", "where", "a", "Turi", "Create", "object", "is", "persisted", "return", "the", "Turi", "Create", "object", "type", ":", "model", "graph", "sframe", "or", "sarray" ]
python
train
35.75
bjoernricks/python-quilt
quilt/pop.py
https://github.com/bjoernricks/python-quilt/blob/fae88237f601848cc34d073584d9dcb409f01777/quilt/pop.py#L63-L74
def unapply_patch(self, patch_name, force=False): """ Unapply patches up to patch_name. patch_name will end up as top patch """ self._check(force) patches = self.db.patches_after(Patch(patch_name)) for patch in reversed(patches): self._unapply_patch(patch) self.db.save() self.unapplied(self.db.top_patch())
[ "def", "unapply_patch", "(", "self", ",", "patch_name", ",", "force", "=", "False", ")", ":", "self", ".", "_check", "(", "force", ")", "patches", "=", "self", ".", "db", ".", "patches_after", "(", "Patch", "(", "patch_name", ")", ")", "for", "patch", "in", "reversed", "(", "patches", ")", ":", "self", ".", "_unapply_patch", "(", "patch", ")", "self", ".", "db", ".", "save", "(", ")", "self", ".", "unapplied", "(", "self", ".", "db", ".", "top_patch", "(", ")", ")" ]
Unapply patches up to patch_name. patch_name will end up as top patch
[ "Unapply", "patches", "up", "to", "patch_name", ".", "patch_name", "will", "end", "up", "as", "top", "patch" ]
python
test
30.916667
doconix/django-mako-plus
django_mako_plus/management/commands/dmp_makemessages.py
https://github.com/doconix/django-mako-plus/blob/a90f9b4af19e5fa9f83452989cdcaed21569a181/django_mako_plus/management/commands/dmp_makemessages.py#L70-L100
def compile_mako_files(self, app_config): '''Compiles the Mako templates within the apps of this system''' # go through the files in the templates, scripts, and styles directories for subdir_name in self.SEARCH_DIRS: subdir = subdir_name.format( app_path=app_config.path, app_name=app_config.name, ) def recurse_path(path): self.message('searching for Mako templates in {}'.format(path), 1) if os.path.exists(path): for filename in os.listdir(path): filepath = os.path.join(path, filename) _, ext = os.path.splitext(filename) if filename.startswith('__'): # __dmpcache__, __pycache__ continue elif os.path.isdir(filepath): recurse_path(filepath) elif ext.lower() in ( '.htm', '.html', '.mako' ): # create the template object, which creates the compiled .py file self.message('compiling {}'.format(filepath), 2) try: get_template_for_path(filepath) except TemplateSyntaxError: if not self.options.get('ignore_template_errors'): raise recurse_path(subdir)
[ "def", "compile_mako_files", "(", "self", ",", "app_config", ")", ":", "# go through the files in the templates, scripts, and styles directories", "for", "subdir_name", "in", "self", ".", "SEARCH_DIRS", ":", "subdir", "=", "subdir_name", ".", "format", "(", "app_path", "=", "app_config", ".", "path", ",", "app_name", "=", "app_config", ".", "name", ",", ")", "def", "recurse_path", "(", "path", ")", ":", "self", ".", "message", "(", "'searching for Mako templates in {}'", ".", "format", "(", "path", ")", ",", "1", ")", "if", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "for", "filename", "in", "os", ".", "listdir", "(", "path", ")", ":", "filepath", "=", "os", ".", "path", ".", "join", "(", "path", ",", "filename", ")", "_", ",", "ext", "=", "os", ".", "path", ".", "splitext", "(", "filename", ")", "if", "filename", ".", "startswith", "(", "'__'", ")", ":", "# __dmpcache__, __pycache__", "continue", "elif", "os", ".", "path", ".", "isdir", "(", "filepath", ")", ":", "recurse_path", "(", "filepath", ")", "elif", "ext", ".", "lower", "(", ")", "in", "(", "'.htm'", ",", "'.html'", ",", "'.mako'", ")", ":", "# create the template object, which creates the compiled .py file", "self", ".", "message", "(", "'compiling {}'", ".", "format", "(", "filepath", ")", ",", "2", ")", "try", ":", "get_template_for_path", "(", "filepath", ")", "except", "TemplateSyntaxError", ":", "if", "not", "self", ".", "options", ".", "get", "(", "'ignore_template_errors'", ")", ":", "raise", "recurse_path", "(", "subdir", ")" ]
Compiles the Mako templates within the apps of this system
[ "Compiles", "the", "Mako", "templates", "within", "the", "apps", "of", "this", "system" ]
python
train
47.483871
xiaocong/uiautomator
uiautomator/__init__.py
https://github.com/xiaocong/uiautomator/blob/9a0c892ffd056713f91aa2153d1533c5b0553a1c/uiautomator/__init__.py#L650-L658
def orientation(self, value): '''setter of orientation property.''' for values in self.__orientation: if value in values: # can not set upside-down until api level 18. self.server.jsonrpc.setOrientation(values[1]) break else: raise ValueError("Invalid orientation.")
[ "def", "orientation", "(", "self", ",", "value", ")", ":", "for", "values", "in", "self", ".", "__orientation", ":", "if", "value", "in", "values", ":", "# can not set upside-down until api level 18.", "self", ".", "server", ".", "jsonrpc", ".", "setOrientation", "(", "values", "[", "1", "]", ")", "break", "else", ":", "raise", "ValueError", "(", "\"Invalid orientation.\"", ")" ]
setter of orientation property.
[ "setter", "of", "orientation", "property", "." ]
python
train
39.333333
Vital-Fernandez/dazer
bin/lib/Astro_Libraries/f2n.py
https://github.com/Vital-Fernandez/dazer/blob/3c9ae8ae6d40ea33f22cc20dc11365d6d6e65244/bin/lib/Astro_Libraries/f2n.py#L744-L765
def drawcircle(self, x, y, r = 10, colour = None, label = None): """ Draws a circle centered on (x, y) with radius r. All these are in the coordinates of your initial image ! You give these x and y in the usual ds9 pixels, (0,0) is bottom left. I will convert this into the right PIL coordiates. """ self.checkforpilimage() colour = self.defaultcolour(colour) self.changecolourmode(colour) self.makedraw() (pilx, pily) = self.pilcoords((x,y)) pilr = self.pilscale(r) self.draw.ellipse([(pilx-pilr+1, pily-pilr+1), (pilx+pilr+1, pily+pilr+1)], outline = colour) if label != None: # The we write it : self.loadlabelfont() textwidth = self.draw.textsize(label, font = self.labelfont)[0] self.draw.text((pilx - float(textwidth)/2.0 + 2, pily + pilr + 4), label, fill = colour, font = self.labelfont)
[ "def", "drawcircle", "(", "self", ",", "x", ",", "y", ",", "r", "=", "10", ",", "colour", "=", "None", ",", "label", "=", "None", ")", ":", "self", ".", "checkforpilimage", "(", ")", "colour", "=", "self", ".", "defaultcolour", "(", "colour", ")", "self", ".", "changecolourmode", "(", "colour", ")", "self", ".", "makedraw", "(", ")", "(", "pilx", ",", "pily", ")", "=", "self", ".", "pilcoords", "(", "(", "x", ",", "y", ")", ")", "pilr", "=", "self", ".", "pilscale", "(", "r", ")", "self", ".", "draw", ".", "ellipse", "(", "[", "(", "pilx", "-", "pilr", "+", "1", ",", "pily", "-", "pilr", "+", "1", ")", ",", "(", "pilx", "+", "pilr", "+", "1", ",", "pily", "+", "pilr", "+", "1", ")", "]", ",", "outline", "=", "colour", ")", "if", "label", "!=", "None", ":", "# The we write it :", "self", ".", "loadlabelfont", "(", ")", "textwidth", "=", "self", ".", "draw", ".", "textsize", "(", "label", ",", "font", "=", "self", ".", "labelfont", ")", "[", "0", "]", "self", ".", "draw", ".", "text", "(", "(", "pilx", "-", "float", "(", "textwidth", ")", "/", "2.0", "+", "2", ",", "pily", "+", "pilr", "+", "4", ")", ",", "label", ",", "fill", "=", "colour", ",", "font", "=", "self", ".", "labelfont", ")" ]
Draws a circle centered on (x, y) with radius r. All these are in the coordinates of your initial image ! You give these x and y in the usual ds9 pixels, (0,0) is bottom left. I will convert this into the right PIL coordiates.
[ "Draws", "a", "circle", "centered", "on", "(", "x", "y", ")", "with", "radius", "r", ".", "All", "these", "are", "in", "the", "coordinates", "of", "your", "initial", "image", "!", "You", "give", "these", "x", "and", "y", "in", "the", "usual", "ds9", "pixels", "(", "0", "0", ")", "is", "bottom", "left", ".", "I", "will", "convert", "this", "into", "the", "right", "PIL", "coordiates", "." ]
python
train
43.727273
ronaldguillen/wave
wave/renderers.py
https://github.com/ronaldguillen/wave/blob/20bb979c917f7634d8257992e6d449dc751256a9/wave/renderers.py#L607-L665
def get_context(self, data, accepted_media_type, renderer_context): """ Returns the context used to render. """ view = renderer_context['view'] request = renderer_context['request'] response = renderer_context['response'] renderer = self.get_default_renderer(view) raw_data_post_form = self.get_raw_data_form(data, view, 'POST', request) raw_data_put_form = self.get_raw_data_form(data, view, 'PUT', request) raw_data_patch_form = self.get_raw_data_form(data, view, 'PATCH', request) raw_data_put_or_patch_form = raw_data_put_form or raw_data_patch_form response_headers = OrderedDict(sorted(response.items())) renderer_content_type = '' if renderer: renderer_content_type = '%s' % renderer.media_type if renderer.charset: renderer_content_type += ' ;%s' % renderer.charset response_headers['Content-Type'] = renderer_content_type if getattr(view, 'paginator', None) and view.paginator.display_page_controls: paginator = view.paginator else: paginator = None context = { 'content': self.get_content(renderer, data, accepted_media_type, renderer_context), 'view': view, 'request': request, 'response': response, 'description': self.get_description(view, response.status_code), 'name': self.get_name(view), 'version': VERSION, 'paginator': paginator, 'breadcrumblist': self.get_breadcrumbs(request), 'allowed_methods': view.allowed_methods, 'available_formats': [renderer_cls.format for renderer_cls in view.renderer_classes], 'response_headers': response_headers, 'put_form': self.get_rendered_html_form(data, view, 'PUT', request), 'post_form': self.get_rendered_html_form(data, view, 'POST', request), 'delete_form': self.get_rendered_html_form(data, view, 'DELETE', request), 'options_form': self.get_rendered_html_form(data, view, 'OPTIONS', request), 'filter_form': self.get_filter_form(data, view, request), 'raw_data_put_form': raw_data_put_form, 'raw_data_post_form': raw_data_post_form, 'raw_data_patch_form': raw_data_patch_form, 'raw_data_put_or_patch_form': raw_data_put_or_patch_form, 'display_edit_forms': bool(response.status_code != 403), 'api_settings': api_settings } return context
[ "def", "get_context", "(", "self", ",", "data", ",", "accepted_media_type", ",", "renderer_context", ")", ":", "view", "=", "renderer_context", "[", "'view'", "]", "request", "=", "renderer_context", "[", "'request'", "]", "response", "=", "renderer_context", "[", "'response'", "]", "renderer", "=", "self", ".", "get_default_renderer", "(", "view", ")", "raw_data_post_form", "=", "self", ".", "get_raw_data_form", "(", "data", ",", "view", ",", "'POST'", ",", "request", ")", "raw_data_put_form", "=", "self", ".", "get_raw_data_form", "(", "data", ",", "view", ",", "'PUT'", ",", "request", ")", "raw_data_patch_form", "=", "self", ".", "get_raw_data_form", "(", "data", ",", "view", ",", "'PATCH'", ",", "request", ")", "raw_data_put_or_patch_form", "=", "raw_data_put_form", "or", "raw_data_patch_form", "response_headers", "=", "OrderedDict", "(", "sorted", "(", "response", ".", "items", "(", ")", ")", ")", "renderer_content_type", "=", "''", "if", "renderer", ":", "renderer_content_type", "=", "'%s'", "%", "renderer", ".", "media_type", "if", "renderer", ".", "charset", ":", "renderer_content_type", "+=", "' ;%s'", "%", "renderer", ".", "charset", "response_headers", "[", "'Content-Type'", "]", "=", "renderer_content_type", "if", "getattr", "(", "view", ",", "'paginator'", ",", "None", ")", "and", "view", ".", "paginator", ".", "display_page_controls", ":", "paginator", "=", "view", ".", "paginator", "else", ":", "paginator", "=", "None", "context", "=", "{", "'content'", ":", "self", ".", "get_content", "(", "renderer", ",", "data", ",", "accepted_media_type", ",", "renderer_context", ")", ",", "'view'", ":", "view", ",", "'request'", ":", "request", ",", "'response'", ":", "response", ",", "'description'", ":", "self", ".", "get_description", "(", "view", ",", "response", ".", "status_code", ")", ",", "'name'", ":", "self", ".", "get_name", "(", "view", ")", ",", "'version'", ":", "VERSION", ",", "'paginator'", ":", "paginator", ",", "'breadcrumblist'", ":", "self", ".", "get_breadcrumbs", "(", "request", ")", ",", "'allowed_methods'", ":", "view", ".", "allowed_methods", ",", "'available_formats'", ":", "[", "renderer_cls", ".", "format", "for", "renderer_cls", "in", "view", ".", "renderer_classes", "]", ",", "'response_headers'", ":", "response_headers", ",", "'put_form'", ":", "self", ".", "get_rendered_html_form", "(", "data", ",", "view", ",", "'PUT'", ",", "request", ")", ",", "'post_form'", ":", "self", ".", "get_rendered_html_form", "(", "data", ",", "view", ",", "'POST'", ",", "request", ")", ",", "'delete_form'", ":", "self", ".", "get_rendered_html_form", "(", "data", ",", "view", ",", "'DELETE'", ",", "request", ")", ",", "'options_form'", ":", "self", ".", "get_rendered_html_form", "(", "data", ",", "view", ",", "'OPTIONS'", ",", "request", ")", ",", "'filter_form'", ":", "self", ".", "get_filter_form", "(", "data", ",", "view", ",", "request", ")", ",", "'raw_data_put_form'", ":", "raw_data_put_form", ",", "'raw_data_post_form'", ":", "raw_data_post_form", ",", "'raw_data_patch_form'", ":", "raw_data_patch_form", ",", "'raw_data_put_or_patch_form'", ":", "raw_data_put_or_patch_form", ",", "'display_edit_forms'", ":", "bool", "(", "response", ".", "status_code", "!=", "403", ")", ",", "'api_settings'", ":", "api_settings", "}", "return", "context" ]
Returns the context used to render.
[ "Returns", "the", "context", "used", "to", "render", "." ]
python
train
43.237288
twilio/twilio-python
twilio/rest/__init__.py
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/__init__.py#L250-L260
def lookups(self): """ Access the Lookups Twilio Domain :returns: Lookups Twilio Domain :rtype: twilio.rest.lookups.Lookups """ if self._lookups is None: from twilio.rest.lookups import Lookups self._lookups = Lookups(self) return self._lookups
[ "def", "lookups", "(", "self", ")", ":", "if", "self", ".", "_lookups", "is", "None", ":", "from", "twilio", ".", "rest", ".", "lookups", "import", "Lookups", "self", ".", "_lookups", "=", "Lookups", "(", "self", ")", "return", "self", ".", "_lookups" ]
Access the Lookups Twilio Domain :returns: Lookups Twilio Domain :rtype: twilio.rest.lookups.Lookups
[ "Access", "the", "Lookups", "Twilio", "Domain" ]
python
train
28.636364
abseil/abseil-py
absl/flags/_validators.py
https://github.com/abseil/abseil-py/blob/9d73fdaa23a6b6726aa5731390f388c0c6250ee5/absl/flags/_validators.py#L196-L223
def register_validator(flag_name, checker, message='Flag validation failed', flag_values=_flagvalues.FLAGS): """Adds a constraint, which will be enforced during program execution. The constraint is validated when flags are initially parsed, and after each change of the corresponding flag's value. Args: flag_name: str, name of the flag to be checked. checker: callable, a function to validate the flag. input - A single positional argument: The value of the corresponding flag (string, boolean, etc. This value will be passed to checker by the library). output - bool, True if validator constraint is satisfied. If constraint is not satisfied, it should either return False or raise flags.ValidationError(desired_error_message). message: str, error text to be shown to the user if checker returns False. If checker raises flags.ValidationError, message from the raised error will be shown. flag_values: flags.FlagValues, optional FlagValues instance to validate against. Raises: AttributeError: Raised when flag_name is not registered as a valid flag name. """ v = SingleFlagValidator(flag_name, checker, message) _add_validator(flag_values, v)
[ "def", "register_validator", "(", "flag_name", ",", "checker", ",", "message", "=", "'Flag validation failed'", ",", "flag_values", "=", "_flagvalues", ".", "FLAGS", ")", ":", "v", "=", "SingleFlagValidator", "(", "flag_name", ",", "checker", ",", "message", ")", "_add_validator", "(", "flag_values", ",", "v", ")" ]
Adds a constraint, which will be enforced during program execution. The constraint is validated when flags are initially parsed, and after each change of the corresponding flag's value. Args: flag_name: str, name of the flag to be checked. checker: callable, a function to validate the flag. input - A single positional argument: The value of the corresponding flag (string, boolean, etc. This value will be passed to checker by the library). output - bool, True if validator constraint is satisfied. If constraint is not satisfied, it should either return False or raise flags.ValidationError(desired_error_message). message: str, error text to be shown to the user if checker returns False. If checker raises flags.ValidationError, message from the raised error will be shown. flag_values: flags.FlagValues, optional FlagValues instance to validate against. Raises: AttributeError: Raised when flag_name is not registered as a valid flag name.
[ "Adds", "a", "constraint", "which", "will", "be", "enforced", "during", "program", "execution", "." ]
python
train
47.178571
hollenstein/maspy
maspy/featuregrouping.py
https://github.com/hollenstein/maspy/blob/f15fcfd24df306d8420540460d902aa3073ec133/maspy/featuregrouping.py#L356-L386
def calcDistMatchArr(matchArr, tKey, mKey): """Calculate the euclidean distance of all array positions in "matchArr". :param matchArr: a dictionary of ``numpy.arrays`` containing at least two entries that are treated as cartesian coordinates. :param tKey: #TODO: docstring :param mKey: #TODO: docstring :returns: #TODO: docstring {'eucDist': numpy.array([eucDistance, eucDistance, ...]), 'posPairs': numpy.array([[pos1, pos2], [pos1, pos2], ...]) } """ #Calculate all sorted list of all eucledian feature distances matchArrSize = listvalues(matchArr)[0].size distInfo = {'posPairs': list(), 'eucDist': list()} _matrix = numpy.swapaxes(numpy.array([matchArr[tKey], matchArr[mKey]]), 0, 1) for pos1 in range(matchArrSize-1): for pos2 in range(pos1+1, matchArrSize): distInfo['posPairs'].append((pos1, pos2)) distInfo['posPairs'] = numpy.array(distInfo['posPairs']) distInfo['eucDist'] = scipy.spatial.distance.pdist(_matrix) distSort = numpy.argsort(distInfo['eucDist']) for key in list(viewkeys(distInfo)): distInfo[key] = distInfo[key][distSort] return distInfo
[ "def", "calcDistMatchArr", "(", "matchArr", ",", "tKey", ",", "mKey", ")", ":", "#Calculate all sorted list of all eucledian feature distances", "matchArrSize", "=", "listvalues", "(", "matchArr", ")", "[", "0", "]", ".", "size", "distInfo", "=", "{", "'posPairs'", ":", "list", "(", ")", ",", "'eucDist'", ":", "list", "(", ")", "}", "_matrix", "=", "numpy", ".", "swapaxes", "(", "numpy", ".", "array", "(", "[", "matchArr", "[", "tKey", "]", ",", "matchArr", "[", "mKey", "]", "]", ")", ",", "0", ",", "1", ")", "for", "pos1", "in", "range", "(", "matchArrSize", "-", "1", ")", ":", "for", "pos2", "in", "range", "(", "pos1", "+", "1", ",", "matchArrSize", ")", ":", "distInfo", "[", "'posPairs'", "]", ".", "append", "(", "(", "pos1", ",", "pos2", ")", ")", "distInfo", "[", "'posPairs'", "]", "=", "numpy", ".", "array", "(", "distInfo", "[", "'posPairs'", "]", ")", "distInfo", "[", "'eucDist'", "]", "=", "scipy", ".", "spatial", ".", "distance", ".", "pdist", "(", "_matrix", ")", "distSort", "=", "numpy", ".", "argsort", "(", "distInfo", "[", "'eucDist'", "]", ")", "for", "key", "in", "list", "(", "viewkeys", "(", "distInfo", ")", ")", ":", "distInfo", "[", "key", "]", "=", "distInfo", "[", "key", "]", "[", "distSort", "]", "return", "distInfo" ]
Calculate the euclidean distance of all array positions in "matchArr". :param matchArr: a dictionary of ``numpy.arrays`` containing at least two entries that are treated as cartesian coordinates. :param tKey: #TODO: docstring :param mKey: #TODO: docstring :returns: #TODO: docstring {'eucDist': numpy.array([eucDistance, eucDistance, ...]), 'posPairs': numpy.array([[pos1, pos2], [pos1, pos2], ...]) }
[ "Calculate", "the", "euclidean", "distance", "of", "all", "array", "positions", "in", "matchArr", "." ]
python
train
37.935484
pytroll/satpy
satpy/readers/nwcsaf_nc.py
https://github.com/pytroll/satpy/blob/1f21d20ac686b745fb0da9b4030d139893e066dd/satpy/readers/nwcsaf_nc.py#L129-L187
def scale_dataset(self, dsid, variable, info): """Scale the data set, applying the attributes from the netCDF file""" variable = remove_empties(variable) scale = variable.attrs.get('scale_factor', np.array(1)) offset = variable.attrs.get('add_offset', np.array(0)) if np.issubdtype((scale + offset).dtype, np.floating) or np.issubdtype(variable.dtype, np.floating): if '_FillValue' in variable.attrs: variable = variable.where( variable != variable.attrs['_FillValue']) variable.attrs['_FillValue'] = np.nan if 'valid_range' in variable.attrs: variable = variable.where( variable <= variable.attrs['valid_range'][1]) variable = variable.where( variable >= variable.attrs['valid_range'][0]) if 'valid_max' in variable.attrs: variable = variable.where( variable <= variable.attrs['valid_max']) if 'valid_min' in variable.attrs: variable = variable.where( variable >= variable.attrs['valid_min']) attrs = variable.attrs variable = variable * scale + offset variable.attrs = attrs variable.attrs.update({'platform_name': self.platform_name, 'sensor': self.sensor}) variable.attrs.setdefault('units', '1') ancillary_names = variable.attrs.get('ancillary_variables', '') try: variable.attrs['ancillary_variables'] = ancillary_names.split() except AttributeError: pass if 'palette_meanings' in variable.attrs: variable.attrs['palette_meanings'] = [int(val) for val in variable.attrs['palette_meanings'].split()] if variable.attrs['palette_meanings'][0] == 1: variable.attrs['palette_meanings'] = [0] + variable.attrs['palette_meanings'] variable = xr.DataArray(da.vstack((np.array(variable.attrs['fill_value_color']), variable.data)), coords=variable.coords, dims=variable.dims, attrs=variable.attrs) val, idx = np.unique(variable.attrs['palette_meanings'], return_index=True) variable.attrs['palette_meanings'] = val variable = variable[idx] if 'standard_name' in info: variable.attrs.setdefault('standard_name', info['standard_name']) if self.sw_version == 'NWC/PPS version v2014' and dsid.name == 'ctth_alti': # pps 2014 valid range and palette don't match variable.attrs['valid_range'] = (0., 9000.) if self.sw_version == 'NWC/PPS version v2014' and dsid.name == 'ctth_alti_pal': # pps 2014 palette has the nodata color (black) first variable = variable[1:, :] return variable
[ "def", "scale_dataset", "(", "self", ",", "dsid", ",", "variable", ",", "info", ")", ":", "variable", "=", "remove_empties", "(", "variable", ")", "scale", "=", "variable", ".", "attrs", ".", "get", "(", "'scale_factor'", ",", "np", ".", "array", "(", "1", ")", ")", "offset", "=", "variable", ".", "attrs", ".", "get", "(", "'add_offset'", ",", "np", ".", "array", "(", "0", ")", ")", "if", "np", ".", "issubdtype", "(", "(", "scale", "+", "offset", ")", ".", "dtype", ",", "np", ".", "floating", ")", "or", "np", ".", "issubdtype", "(", "variable", ".", "dtype", ",", "np", ".", "floating", ")", ":", "if", "'_FillValue'", "in", "variable", ".", "attrs", ":", "variable", "=", "variable", ".", "where", "(", "variable", "!=", "variable", ".", "attrs", "[", "'_FillValue'", "]", ")", "variable", ".", "attrs", "[", "'_FillValue'", "]", "=", "np", ".", "nan", "if", "'valid_range'", "in", "variable", ".", "attrs", ":", "variable", "=", "variable", ".", "where", "(", "variable", "<=", "variable", ".", "attrs", "[", "'valid_range'", "]", "[", "1", "]", ")", "variable", "=", "variable", ".", "where", "(", "variable", ">=", "variable", ".", "attrs", "[", "'valid_range'", "]", "[", "0", "]", ")", "if", "'valid_max'", "in", "variable", ".", "attrs", ":", "variable", "=", "variable", ".", "where", "(", "variable", "<=", "variable", ".", "attrs", "[", "'valid_max'", "]", ")", "if", "'valid_min'", "in", "variable", ".", "attrs", ":", "variable", "=", "variable", ".", "where", "(", "variable", ">=", "variable", ".", "attrs", "[", "'valid_min'", "]", ")", "attrs", "=", "variable", ".", "attrs", "variable", "=", "variable", "*", "scale", "+", "offset", "variable", ".", "attrs", "=", "attrs", "variable", ".", "attrs", ".", "update", "(", "{", "'platform_name'", ":", "self", ".", "platform_name", ",", "'sensor'", ":", "self", ".", "sensor", "}", ")", "variable", ".", "attrs", ".", "setdefault", "(", "'units'", ",", "'1'", ")", "ancillary_names", "=", "variable", ".", "attrs", ".", "get", "(", "'ancillary_variables'", ",", "''", ")", "try", ":", "variable", ".", "attrs", "[", "'ancillary_variables'", "]", "=", "ancillary_names", ".", "split", "(", ")", "except", "AttributeError", ":", "pass", "if", "'palette_meanings'", "in", "variable", ".", "attrs", ":", "variable", ".", "attrs", "[", "'palette_meanings'", "]", "=", "[", "int", "(", "val", ")", "for", "val", "in", "variable", ".", "attrs", "[", "'palette_meanings'", "]", ".", "split", "(", ")", "]", "if", "variable", ".", "attrs", "[", "'palette_meanings'", "]", "[", "0", "]", "==", "1", ":", "variable", ".", "attrs", "[", "'palette_meanings'", "]", "=", "[", "0", "]", "+", "variable", ".", "attrs", "[", "'palette_meanings'", "]", "variable", "=", "xr", ".", "DataArray", "(", "da", ".", "vstack", "(", "(", "np", ".", "array", "(", "variable", ".", "attrs", "[", "'fill_value_color'", "]", ")", ",", "variable", ".", "data", ")", ")", ",", "coords", "=", "variable", ".", "coords", ",", "dims", "=", "variable", ".", "dims", ",", "attrs", "=", "variable", ".", "attrs", ")", "val", ",", "idx", "=", "np", ".", "unique", "(", "variable", ".", "attrs", "[", "'palette_meanings'", "]", ",", "return_index", "=", "True", ")", "variable", ".", "attrs", "[", "'palette_meanings'", "]", "=", "val", "variable", "=", "variable", "[", "idx", "]", "if", "'standard_name'", "in", "info", ":", "variable", ".", "attrs", ".", "setdefault", "(", "'standard_name'", ",", "info", "[", "'standard_name'", "]", ")", "if", "self", ".", "sw_version", "==", "'NWC/PPS version v2014'", "and", "dsid", ".", "name", "==", "'ctth_alti'", ":", "# pps 2014 valid range and palette don't match", "variable", ".", "attrs", "[", "'valid_range'", "]", "=", "(", "0.", ",", "9000.", ")", "if", "self", ".", "sw_version", "==", "'NWC/PPS version v2014'", "and", "dsid", ".", "name", "==", "'ctth_alti_pal'", ":", "# pps 2014 palette has the nodata color (black) first", "variable", "=", "variable", "[", "1", ":", ",", ":", "]", "return", "variable" ]
Scale the data set, applying the attributes from the netCDF file
[ "Scale", "the", "data", "set", "applying", "the", "attributes", "from", "the", "netCDF", "file" ]
python
train
49.288136
secdev/scapy
scapy/plist.py
https://github.com/secdev/scapy/blob/3ffe757c184017dd46464593a8f80f85abc1e79a/scapy/plist.py#L379-L461
def afterglow(self, src=None, event=None, dst=None, **kargs): """Experimental clone attempt of http://sourceforge.net/projects/afterglow each datum is reduced as src -> event -> dst and the data are graphed. by default we have IP.src -> IP.dport -> IP.dst""" if src is None: src = lambda x: x['IP'].src if event is None: event = lambda x: x['IP'].dport if dst is None: dst = lambda x: x['IP'].dst sl = {} el = {} dl = {} for i in self.res: try: s, e, d = src(i), event(i), dst(i) if s in sl: n, lst = sl[s] n += 1 if e not in lst: lst.append(e) sl[s] = (n, lst) else: sl[s] = (1, [e]) if e in el: n, lst = el[e] n += 1 if d not in lst: lst.append(d) el[e] = (n, lst) else: el[e] = (1, [d]) dl[d] = dl.get(d, 0) + 1 except Exception: continue import math def normalize(n): return 2 + math.log(n) / 4.0 def minmax(x): m, M = reduce(lambda a, b: (min(a[0], b[0]), max(a[1], b[1])), ((a, a) for a in x)) if m == M: m = 0 if M == 0: M = 1 return m, M mins, maxs = minmax(x for x, _ in six.itervalues(sl)) mine, maxe = minmax(x for x, _ in six.itervalues(el)) mind, maxd = minmax(six.itervalues(dl)) gr = 'digraph "afterglow" {\n\tedge [len=2.5];\n' gr += "# src nodes\n" for s in sl: n, _ = sl[s] n = 1 + float(n - mins) / (maxs - mins) gr += '"src.%s" [label = "%s", shape=box, fillcolor="#FF0000", style=filled, fixedsize=1, height=%.2f,width=%.2f];\n' % (repr(s), repr(s), n, n) # noqa: E501 gr += "# event nodes\n" for e in el: n, _ = el[e] n = n = 1 + float(n - mine) / (maxe - mine) gr += '"evt.%s" [label = "%s", shape=circle, fillcolor="#00FFFF", style=filled, fixedsize=1, height=%.2f, width=%.2f];\n' % (repr(e), repr(e), n, n) # noqa: E501 for d in dl: n = dl[d] n = n = 1 + float(n - mind) / (maxd - mind) gr += '"dst.%s" [label = "%s", shape=triangle, fillcolor="#0000ff", style=filled, fixedsize=1, height=%.2f, width=%.2f];\n' % (repr(d), repr(d), n, n) # noqa: E501 gr += "###\n" for s in sl: n, lst = sl[s] for e in lst: gr += ' "src.%s" -> "evt.%s";\n' % (repr(s), repr(e)) for e in el: n, lst = el[e] for d in lst: gr += ' "evt.%s" -> "dst.%s";\n' % (repr(e), repr(d)) gr += "}" return do_graph(gr, **kargs)
[ "def", "afterglow", "(", "self", ",", "src", "=", "None", ",", "event", "=", "None", ",", "dst", "=", "None", ",", "*", "*", "kargs", ")", ":", "if", "src", "is", "None", ":", "src", "=", "lambda", "x", ":", "x", "[", "'IP'", "]", ".", "src", "if", "event", "is", "None", ":", "event", "=", "lambda", "x", ":", "x", "[", "'IP'", "]", ".", "dport", "if", "dst", "is", "None", ":", "dst", "=", "lambda", "x", ":", "x", "[", "'IP'", "]", ".", "dst", "sl", "=", "{", "}", "el", "=", "{", "}", "dl", "=", "{", "}", "for", "i", "in", "self", ".", "res", ":", "try", ":", "s", ",", "e", ",", "d", "=", "src", "(", "i", ")", ",", "event", "(", "i", ")", ",", "dst", "(", "i", ")", "if", "s", "in", "sl", ":", "n", ",", "lst", "=", "sl", "[", "s", "]", "n", "+=", "1", "if", "e", "not", "in", "lst", ":", "lst", ".", "append", "(", "e", ")", "sl", "[", "s", "]", "=", "(", "n", ",", "lst", ")", "else", ":", "sl", "[", "s", "]", "=", "(", "1", ",", "[", "e", "]", ")", "if", "e", "in", "el", ":", "n", ",", "lst", "=", "el", "[", "e", "]", "n", "+=", "1", "if", "d", "not", "in", "lst", ":", "lst", ".", "append", "(", "d", ")", "el", "[", "e", "]", "=", "(", "n", ",", "lst", ")", "else", ":", "el", "[", "e", "]", "=", "(", "1", ",", "[", "d", "]", ")", "dl", "[", "d", "]", "=", "dl", ".", "get", "(", "d", ",", "0", ")", "+", "1", "except", "Exception", ":", "continue", "import", "math", "def", "normalize", "(", "n", ")", ":", "return", "2", "+", "math", ".", "log", "(", "n", ")", "/", "4.0", "def", "minmax", "(", "x", ")", ":", "m", ",", "M", "=", "reduce", "(", "lambda", "a", ",", "b", ":", "(", "min", "(", "a", "[", "0", "]", ",", "b", "[", "0", "]", ")", ",", "max", "(", "a", "[", "1", "]", ",", "b", "[", "1", "]", ")", ")", ",", "(", "(", "a", ",", "a", ")", "for", "a", "in", "x", ")", ")", "if", "m", "==", "M", ":", "m", "=", "0", "if", "M", "==", "0", ":", "M", "=", "1", "return", "m", ",", "M", "mins", ",", "maxs", "=", "minmax", "(", "x", "for", "x", ",", "_", "in", "six", ".", "itervalues", "(", "sl", ")", ")", "mine", ",", "maxe", "=", "minmax", "(", "x", "for", "x", ",", "_", "in", "six", ".", "itervalues", "(", "el", ")", ")", "mind", ",", "maxd", "=", "minmax", "(", "six", ".", "itervalues", "(", "dl", ")", ")", "gr", "=", "'digraph \"afterglow\" {\\n\\tedge [len=2.5];\\n'", "gr", "+=", "\"# src nodes\\n\"", "for", "s", "in", "sl", ":", "n", ",", "_", "=", "sl", "[", "s", "]", "n", "=", "1", "+", "float", "(", "n", "-", "mins", ")", "/", "(", "maxs", "-", "mins", ")", "gr", "+=", "'\"src.%s\" [label = \"%s\", shape=box, fillcolor=\"#FF0000\", style=filled, fixedsize=1, height=%.2f,width=%.2f];\\n'", "%", "(", "repr", "(", "s", ")", ",", "repr", "(", "s", ")", ",", "n", ",", "n", ")", "# noqa: E501", "gr", "+=", "\"# event nodes\\n\"", "for", "e", "in", "el", ":", "n", ",", "_", "=", "el", "[", "e", "]", "n", "=", "n", "=", "1", "+", "float", "(", "n", "-", "mine", ")", "/", "(", "maxe", "-", "mine", ")", "gr", "+=", "'\"evt.%s\" [label = \"%s\", shape=circle, fillcolor=\"#00FFFF\", style=filled, fixedsize=1, height=%.2f, width=%.2f];\\n'", "%", "(", "repr", "(", "e", ")", ",", "repr", "(", "e", ")", ",", "n", ",", "n", ")", "# noqa: E501", "for", "d", "in", "dl", ":", "n", "=", "dl", "[", "d", "]", "n", "=", "n", "=", "1", "+", "float", "(", "n", "-", "mind", ")", "/", "(", "maxd", "-", "mind", ")", "gr", "+=", "'\"dst.%s\" [label = \"%s\", shape=triangle, fillcolor=\"#0000ff\", style=filled, fixedsize=1, height=%.2f, width=%.2f];\\n'", "%", "(", "repr", "(", "d", ")", ",", "repr", "(", "d", ")", ",", "n", ",", "n", ")", "# noqa: E501", "gr", "+=", "\"###\\n\"", "for", "s", "in", "sl", ":", "n", ",", "lst", "=", "sl", "[", "s", "]", "for", "e", "in", "lst", ":", "gr", "+=", "' \"src.%s\" -> \"evt.%s\";\\n'", "%", "(", "repr", "(", "s", ")", ",", "repr", "(", "e", ")", ")", "for", "e", "in", "el", ":", "n", ",", "lst", "=", "el", "[", "e", "]", "for", "d", "in", "lst", ":", "gr", "+=", "' \"evt.%s\" -> \"dst.%s\";\\n'", "%", "(", "repr", "(", "e", ")", ",", "repr", "(", "d", ")", ")", "gr", "+=", "\"}\"", "return", "do_graph", "(", "gr", ",", "*", "*", "kargs", ")" ]
Experimental clone attempt of http://sourceforge.net/projects/afterglow each datum is reduced as src -> event -> dst and the data are graphed. by default we have IP.src -> IP.dport -> IP.dst
[ "Experimental", "clone", "attempt", "of", "http", ":", "//", "sourceforge", ".", "net", "/", "projects", "/", "afterglow", "each", "datum", "is", "reduced", "as", "src", "-", ">", "event", "-", ">", "dst", "and", "the", "data", "are", "graphed", ".", "by", "default", "we", "have", "IP", ".", "src", "-", ">", "IP", ".", "dport", "-", ">", "IP", ".", "dst" ]
python
train
36.048193
bitesofcode/projex
projex/hooks.py
https://github.com/bitesofcode/projex/blob/d31743ec456a41428709968ab11a2cf6c6c76247/projex/hooks.py#L67-L85
def displayhook(value): """ Runs all of the registered display hook methods with the given value. Look at the sys.displayhook documentation for more information. :param value | <variant> """ global _displayhooks new_hooks = [] for hook_ref in _displayhooks: hook = hook_ref() if hook: hook(value) new_hooks.append(hook_ref) _displayhooks = new_hooks sys.__displayhook__(value)
[ "def", "displayhook", "(", "value", ")", ":", "global", "_displayhooks", "new_hooks", "=", "[", "]", "for", "hook_ref", "in", "_displayhooks", ":", "hook", "=", "hook_ref", "(", ")", "if", "hook", ":", "hook", "(", "value", ")", "new_hooks", ".", "append", "(", "hook_ref", ")", "_displayhooks", "=", "new_hooks", "sys", ".", "__displayhook__", "(", "value", ")" ]
Runs all of the registered display hook methods with the given value. Look at the sys.displayhook documentation for more information. :param value | <variant>
[ "Runs", "all", "of", "the", "registered", "display", "hook", "methods", "with", "the", "given", "value", ".", "Look", "at", "the", "sys", ".", "displayhook", "documentation", "for", "more", "information", ".", ":", "param", "value", "|", "<variant", ">" ]
python
train
23.736842
FlorianRhiem/pyGLFW
glfw/__init__.py
https://github.com/FlorianRhiem/pyGLFW/blob/87767dfbe15ba15d2a8338cdfddf6afc6a25dff5/glfw/__init__.py#L1629-L1639
def get_joystick_buttons(joy): """ Returns the state of all buttons of the specified joystick. Wrapper for: const unsigned char* glfwGetJoystickButtons(int joy, int* count); """ count_value = ctypes.c_int(0) count = ctypes.pointer(count_value) result = _glfw.glfwGetJoystickButtons(joy, count) return result, count_value.value
[ "def", "get_joystick_buttons", "(", "joy", ")", ":", "count_value", "=", "ctypes", ".", "c_int", "(", "0", ")", "count", "=", "ctypes", ".", "pointer", "(", "count_value", ")", "result", "=", "_glfw", ".", "glfwGetJoystickButtons", "(", "joy", ",", "count", ")", "return", "result", ",", "count_value", ".", "value" ]
Returns the state of all buttons of the specified joystick. Wrapper for: const unsigned char* glfwGetJoystickButtons(int joy, int* count);
[ "Returns", "the", "state", "of", "all", "buttons", "of", "the", "specified", "joystick", "." ]
python
train
32.454545
airspeed-velocity/asv
asv/plugins/regressions.py
https://github.com/airspeed-velocity/asv/blob/d23bb8b74e8adacbfa3cf5724bda55fb39d56ba6/asv/plugins/regressions.py#L271-L314
def _get_start_revision(self, graph, benchmark, entry_name): """ Compute the first revision allowed by asv.conf.json. Revisions correspond to linearized commit history and the regression detection runs on this order --- the starting commit thus corresponds to a specific starting revision. """ start_revision = min(six.itervalues(self.revisions)) if graph.params.get('branch'): branch_suffix = '@' + graph.params.get('branch') else: branch_suffix = '' for regex, start_commit in six.iteritems(self.conf.regressions_first_commits): if re.match(regex, entry_name + branch_suffix): if start_commit is None: # Disable regression detection completely return None if self.conf.branches == [None]: key = (start_commit, None) else: key = (start_commit, graph.params.get('branch')) if key not in self._start_revisions: spec = self.repo.get_new_range_spec(*key) start_hash = self.repo.get_hash_from_name(start_commit) for commit in [start_hash] + self.repo.get_hashes_from_range(spec): rev = self.revisions.get(commit) if rev is not None: self._start_revisions[key] = rev break else: # Commit not found in the branch --- warn and ignore. log.warning(("Commit {0} specified in `regressions_first_commits` " "not found in branch").format(start_commit)) self._start_revisions[key] = -1 start_revision = max(start_revision, self._start_revisions[key] + 1) return start_revision
[ "def", "_get_start_revision", "(", "self", ",", "graph", ",", "benchmark", ",", "entry_name", ")", ":", "start_revision", "=", "min", "(", "six", ".", "itervalues", "(", "self", ".", "revisions", ")", ")", "if", "graph", ".", "params", ".", "get", "(", "'branch'", ")", ":", "branch_suffix", "=", "'@'", "+", "graph", ".", "params", ".", "get", "(", "'branch'", ")", "else", ":", "branch_suffix", "=", "''", "for", "regex", ",", "start_commit", "in", "six", ".", "iteritems", "(", "self", ".", "conf", ".", "regressions_first_commits", ")", ":", "if", "re", ".", "match", "(", "regex", ",", "entry_name", "+", "branch_suffix", ")", ":", "if", "start_commit", "is", "None", ":", "# Disable regression detection completely", "return", "None", "if", "self", ".", "conf", ".", "branches", "==", "[", "None", "]", ":", "key", "=", "(", "start_commit", ",", "None", ")", "else", ":", "key", "=", "(", "start_commit", ",", "graph", ".", "params", ".", "get", "(", "'branch'", ")", ")", "if", "key", "not", "in", "self", ".", "_start_revisions", ":", "spec", "=", "self", ".", "repo", ".", "get_new_range_spec", "(", "*", "key", ")", "start_hash", "=", "self", ".", "repo", ".", "get_hash_from_name", "(", "start_commit", ")", "for", "commit", "in", "[", "start_hash", "]", "+", "self", ".", "repo", ".", "get_hashes_from_range", "(", "spec", ")", ":", "rev", "=", "self", ".", "revisions", ".", "get", "(", "commit", ")", "if", "rev", "is", "not", "None", ":", "self", ".", "_start_revisions", "[", "key", "]", "=", "rev", "break", "else", ":", "# Commit not found in the branch --- warn and ignore.", "log", ".", "warning", "(", "(", "\"Commit {0} specified in `regressions_first_commits` \"", "\"not found in branch\"", ")", ".", "format", "(", "start_commit", ")", ")", "self", ".", "_start_revisions", "[", "key", "]", "=", "-", "1", "start_revision", "=", "max", "(", "start_revision", ",", "self", ".", "_start_revisions", "[", "key", "]", "+", "1", ")", "return", "start_revision" ]
Compute the first revision allowed by asv.conf.json. Revisions correspond to linearized commit history and the regression detection runs on this order --- the starting commit thus corresponds to a specific starting revision.
[ "Compute", "the", "first", "revision", "allowed", "by", "asv", ".", "conf", ".", "json", "." ]
python
train
43.227273
tcalmant/ipopo
pelix/ldapfilter.py
https://github.com/tcalmant/ipopo/blob/2f9ae0c44cd9c34ef1a9d50837b3254e75678eb1/pelix/ldapfilter.py#L342-L370
def unescape_LDAP(ldap_string): # type: (str) -> str # pylint: disable=C0103 """ Unespaces an LDAP string :param ldap_string: The string to unescape :return: The unprotected string """ if ldap_string is None: return None if ESCAPE_CHARACTER not in ldap_string: # No need to loop return ldap_string escaped = False result = "" for character in ldap_string: if not escaped and character == ESCAPE_CHARACTER: # Escape character found escaped = True else: # Copy the character escaped = False result += character return result
[ "def", "unescape_LDAP", "(", "ldap_string", ")", ":", "# type: (str) -> str", "# pylint: disable=C0103", "if", "ldap_string", "is", "None", ":", "return", "None", "if", "ESCAPE_CHARACTER", "not", "in", "ldap_string", ":", "# No need to loop", "return", "ldap_string", "escaped", "=", "False", "result", "=", "\"\"", "for", "character", "in", "ldap_string", ":", "if", "not", "escaped", "and", "character", "==", "ESCAPE_CHARACTER", ":", "# Escape character found", "escaped", "=", "True", "else", ":", "# Copy the character", "escaped", "=", "False", "result", "+=", "character", "return", "result" ]
Unespaces an LDAP string :param ldap_string: The string to unescape :return: The unprotected string
[ "Unespaces", "an", "LDAP", "string" ]
python
train
22.448276
kennethreitz/maya
maya/core.py
https://github.com/kennethreitz/maya/blob/774b141d91a83a5d77cb5351db3d02bf50564b21/maya/core.py#L238-L259
def datetime(self, to_timezone=None, naive=False): """Returns a timezone-aware datetime... Defaulting to UTC (as it should). Keyword Arguments: to_timezone {str} -- timezone to convert to (default: None/UTC) naive {bool} -- if True, the tzinfo is simply dropped (default: False) """ if to_timezone: dt = self.datetime().astimezone(pytz.timezone(to_timezone)) else: dt = Datetime.utcfromtimestamp(self._epoch) dt.replace(tzinfo=self._tz) # Strip the timezone info if requested to do so. if naive: return dt.replace(tzinfo=None) else: if dt.tzinfo is None: dt = dt.replace(tzinfo=self._tz) return dt
[ "def", "datetime", "(", "self", ",", "to_timezone", "=", "None", ",", "naive", "=", "False", ")", ":", "if", "to_timezone", ":", "dt", "=", "self", ".", "datetime", "(", ")", ".", "astimezone", "(", "pytz", ".", "timezone", "(", "to_timezone", ")", ")", "else", ":", "dt", "=", "Datetime", ".", "utcfromtimestamp", "(", "self", ".", "_epoch", ")", "dt", ".", "replace", "(", "tzinfo", "=", "self", ".", "_tz", ")", "# Strip the timezone info if requested to do so.", "if", "naive", ":", "return", "dt", ".", "replace", "(", "tzinfo", "=", "None", ")", "else", ":", "if", "dt", ".", "tzinfo", "is", "None", ":", "dt", "=", "dt", ".", "replace", "(", "tzinfo", "=", "self", ".", "_tz", ")", "return", "dt" ]
Returns a timezone-aware datetime... Defaulting to UTC (as it should). Keyword Arguments: to_timezone {str} -- timezone to convert to (default: None/UTC) naive {bool} -- if True, the tzinfo is simply dropped (default: False)
[ "Returns", "a", "timezone", "-", "aware", "datetime", "...", "Defaulting", "to", "UTC", "(", "as", "it", "should", ")", "." ]
python
train
35.727273
etcher-be/elib_miz
elib_miz/mission.py
https://github.com/etcher-be/elib_miz/blob/f28db58fadb2cd9341e0ae4d65101c0cc7d8f3d7/elib_miz/mission.py#L578-L595
def get_country_by_name(self, country_name) -> 'Country': """ Gets a country in this coalition by its name Args: country_name: country name Returns: Country """ VALID_STR.validate(country_name, 'get_country_by_name', exc=ValueError) if country_name not in self._countries_by_name.keys(): for country in self.countries: if country.country_name == country_name: return country raise ValueError(country_name) else: return self._countries_by_name[country_name]
[ "def", "get_country_by_name", "(", "self", ",", "country_name", ")", "->", "'Country'", ":", "VALID_STR", ".", "validate", "(", "country_name", ",", "'get_country_by_name'", ",", "exc", "=", "ValueError", ")", "if", "country_name", "not", "in", "self", ".", "_countries_by_name", ".", "keys", "(", ")", ":", "for", "country", "in", "self", ".", "countries", ":", "if", "country", ".", "country_name", "==", "country_name", ":", "return", "country", "raise", "ValueError", "(", "country_name", ")", "else", ":", "return", "self", ".", "_countries_by_name", "[", "country_name", "]" ]
Gets a country in this coalition by its name Args: country_name: country name Returns: Country
[ "Gets", "a", "country", "in", "this", "coalition", "by", "its", "name" ]
python
train
32.777778
pycontribs/pyrax
pyrax/cloudnetworks.py
https://github.com/pycontribs/pyrax/blob/9ddfd5064b3a292d7337906f3b2d5dce95b50b99/pyrax/cloudnetworks.py#L202-L216
def get_server_networks(self, network, public=False, private=False, key=None): """ Creates the dict of network UUIDs required by Cloud Servers when creating a new server with isolated networks. By default, the UUID values are returned with the key of "net-id", which is what novaclient expects. Other tools may require different values, such as 'uuid'. If that is the case, pass the desired key as the 'key' parameter. By default only this network is included. If you wish to create a server that has either the public (internet) or private (ServiceNet) networks, you have to pass those parameters in with values of True. """ return _get_server_networks(network, public=public, private=private, key=key)
[ "def", "get_server_networks", "(", "self", ",", "network", ",", "public", "=", "False", ",", "private", "=", "False", ",", "key", "=", "None", ")", ":", "return", "_get_server_networks", "(", "network", ",", "public", "=", "public", ",", "private", "=", "private", ",", "key", "=", "key", ")" ]
Creates the dict of network UUIDs required by Cloud Servers when creating a new server with isolated networks. By default, the UUID values are returned with the key of "net-id", which is what novaclient expects. Other tools may require different values, such as 'uuid'. If that is the case, pass the desired key as the 'key' parameter. By default only this network is included. If you wish to create a server that has either the public (internet) or private (ServiceNet) networks, you have to pass those parameters in with values of True.
[ "Creates", "the", "dict", "of", "network", "UUIDs", "required", "by", "Cloud", "Servers", "when", "creating", "a", "new", "server", "with", "isolated", "networks", ".", "By", "default", "the", "UUID", "values", "are", "returned", "with", "the", "key", "of", "net", "-", "id", "which", "is", "what", "novaclient", "expects", ".", "Other", "tools", "may", "require", "different", "values", "such", "as", "uuid", ".", "If", "that", "is", "the", "case", "pass", "the", "desired", "key", "as", "the", "key", "parameter", "." ]
python
train
53.733333
spyder-ide/spyder
spyder/plugins/variableexplorer/widgets/dataframeeditor.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/variableexplorer/widgets/dataframeeditor.py#L688-L712
def headerData(self, section, orientation, role): """Get the information to put in the header.""" if role == Qt.TextAlignmentRole: if orientation == Qt.Horizontal: return Qt.AlignCenter | Qt.AlignBottom else: return Qt.AlignRight | Qt.AlignVCenter if role != Qt.DisplayRole and role != Qt.ToolTipRole: return None if self.axis == 1 and self._shape[1] <= 1: return None orient_axis = 0 if orientation == Qt.Horizontal else 1 if self.model.header_shape[orient_axis] > 1: header = section else: header = self.model.header(self.axis, section) # Don't perform any conversion on strings # because it leads to differences between # the data present in the dataframe and # what is shown by Spyder if not is_type_text_string(header): header = to_text_string(header) return header
[ "def", "headerData", "(", "self", ",", "section", ",", "orientation", ",", "role", ")", ":", "if", "role", "==", "Qt", ".", "TextAlignmentRole", ":", "if", "orientation", "==", "Qt", ".", "Horizontal", ":", "return", "Qt", ".", "AlignCenter", "|", "Qt", ".", "AlignBottom", "else", ":", "return", "Qt", ".", "AlignRight", "|", "Qt", ".", "AlignVCenter", "if", "role", "!=", "Qt", ".", "DisplayRole", "and", "role", "!=", "Qt", ".", "ToolTipRole", ":", "return", "None", "if", "self", ".", "axis", "==", "1", "and", "self", ".", "_shape", "[", "1", "]", "<=", "1", ":", "return", "None", "orient_axis", "=", "0", "if", "orientation", "==", "Qt", ".", "Horizontal", "else", "1", "if", "self", ".", "model", ".", "header_shape", "[", "orient_axis", "]", ">", "1", ":", "header", "=", "section", "else", ":", "header", "=", "self", ".", "model", ".", "header", "(", "self", ".", "axis", ",", "section", ")", "# Don't perform any conversion on strings\r", "# because it leads to differences between\r", "# the data present in the dataframe and\r", "# what is shown by Spyder\r", "if", "not", "is_type_text_string", "(", "header", ")", ":", "header", "=", "to_text_string", "(", "header", ")", "return", "header" ]
Get the information to put in the header.
[ "Get", "the", "information", "to", "put", "in", "the", "header", "." ]
python
train
40.64
SCIP-Interfaces/PySCIPOpt
examples/finished/read_tsplib.py
https://github.com/SCIP-Interfaces/PySCIPOpt/blob/9c960b40d94a48b0304d73dbe28b467b9c065abe/examples/finished/read_tsplib.py#L14-L24
def distL2(x1,y1,x2,y2): """Compute the L2-norm (Euclidean) distance between two points. The distance is rounded to the closest integer, for compatibility with the TSPLIB convention. The two points are located on coordinates (x1,y1) and (x2,y2), sent as parameters""" xdiff = x2 - x1 ydiff = y2 - y1 return int(math.sqrt(xdiff*xdiff + ydiff*ydiff) + .5)
[ "def", "distL2", "(", "x1", ",", "y1", ",", "x2", ",", "y2", ")", ":", "xdiff", "=", "x2", "-", "x1", "ydiff", "=", "y2", "-", "y1", "return", "int", "(", "math", ".", "sqrt", "(", "xdiff", "*", "xdiff", "+", "ydiff", "*", "ydiff", ")", "+", ".5", ")" ]
Compute the L2-norm (Euclidean) distance between two points. The distance is rounded to the closest integer, for compatibility with the TSPLIB convention. The two points are located on coordinates (x1,y1) and (x2,y2), sent as parameters
[ "Compute", "the", "L2", "-", "norm", "(", "Euclidean", ")", "distance", "between", "two", "points", "." ]
python
train
34.272727
ihgazni2/elist
elist/elist.py
https://github.com/ihgazni2/elist/blob/8c07b5029bda34ead60ce10335ceb145f209263c/elist/elist.py#L6709-L6722
def str_display_width(s): ''' from elist.utils import * str_display_width('a') str_display_width('去') ''' s= str(s) width = 0 len = s.__len__() for i in range(0,len): sublen = s[i].encode().__len__() sublen = int(sublen/2 + 1/2) width = width + sublen return(width)
[ "def", "str_display_width", "(", "s", ")", ":", "s", "=", "str", "(", "s", ")", "width", "=", "0", "len", "=", "s", ".", "__len__", "(", ")", "for", "i", "in", "range", "(", "0", ",", "len", ")", ":", "sublen", "=", "s", "[", "i", "]", ".", "encode", "(", ")", ".", "__len__", "(", ")", "sublen", "=", "int", "(", "sublen", "/", "2", "+", "1", "/", "2", ")", "width", "=", "width", "+", "sublen", "return", "(", "width", ")" ]
from elist.utils import * str_display_width('a') str_display_width('去')
[ "from", "elist", ".", "utils", "import", "*", "str_display_width", "(", "a", ")", "str_display_width", "(", "去", ")" ]
python
valid
23.428571
mangalam-research/selenic
selenic/util.py
https://github.com/mangalam-research/selenic/blob/2284c68e15fa3d34b88aa2eec1a2e8ecd37f44ad/selenic/util.py#L167-L178
def command_x(self, x, to=None): """ Sends a character to the currently active element with Command pressed. This method takes care of pressing and releasing Command. """ if to is None: ActionChains(self.driver) \ .send_keys([Keys.COMMAND, x, Keys.COMMAND]) \ .perform() else: self.send_keys(to, [Keys.COMMAND, x, Keys.COMMAND])
[ "def", "command_x", "(", "self", ",", "x", ",", "to", "=", "None", ")", ":", "if", "to", "is", "None", ":", "ActionChains", "(", "self", ".", "driver", ")", ".", "send_keys", "(", "[", "Keys", ".", "COMMAND", ",", "x", ",", "Keys", ".", "COMMAND", "]", ")", ".", "perform", "(", ")", "else", ":", "self", ".", "send_keys", "(", "to", ",", "[", "Keys", ".", "COMMAND", ",", "x", ",", "Keys", ".", "COMMAND", "]", ")" ]
Sends a character to the currently active element with Command pressed. This method takes care of pressing and releasing Command.
[ "Sends", "a", "character", "to", "the", "currently", "active", "element", "with", "Command", "pressed", ".", "This", "method", "takes", "care", "of", "pressing", "and", "releasing", "Command", "." ]
python
train
35.75
cgoldberg/sauceclient
sauceclient.py
https://github.com/cgoldberg/sauceclient/blob/aa27b7da8eb2e483adc2754c694fe5082e1fa8f7/sauceclient.py#L114-L119
def get_concurrency(self): """Check account concurrency limits.""" method = 'GET' endpoint = '/rest/v1.1/users/{}/concurrency'.format( self.client.sauce_username) return self.client.request(method, endpoint)
[ "def", "get_concurrency", "(", "self", ")", ":", "method", "=", "'GET'", "endpoint", "=", "'/rest/v1.1/users/{}/concurrency'", ".", "format", "(", "self", ".", "client", ".", "sauce_username", ")", "return", "self", ".", "client", ".", "request", "(", "method", ",", "endpoint", ")" ]
Check account concurrency limits.
[ "Check", "account", "concurrency", "limits", "." ]
python
train
41
DBuildService/dockerfile-parse
dockerfile_parse/parser.py
https://github.com/DBuildService/dockerfile-parse/blob/3d7b514d8b8eded1b33529cf0f6a0770a573aee0/dockerfile_parse/parser.py#L271-L282
def parent_images(self): """ :return: list of parent images -- one image per each stage's FROM instruction """ parents = [] for instr in self.structure: if instr['instruction'] != 'FROM': continue image, _ = image_from(instr['value']) if image is not None: parents.append(image) return parents
[ "def", "parent_images", "(", "self", ")", ":", "parents", "=", "[", "]", "for", "instr", "in", "self", ".", "structure", ":", "if", "instr", "[", "'instruction'", "]", "!=", "'FROM'", ":", "continue", "image", ",", "_", "=", "image_from", "(", "instr", "[", "'value'", "]", ")", "if", "image", "is", "not", "None", ":", "parents", ".", "append", "(", "image", ")", "return", "parents" ]
:return: list of parent images -- one image per each stage's FROM instruction
[ ":", "return", ":", "list", "of", "parent", "images", "--", "one", "image", "per", "each", "stage", "s", "FROM", "instruction" ]
python
train
33.166667
twisted/mantissa
xmantissa/scrolltable.py
https://github.com/twisted/mantissa/blob/53e5502aba23ce99be78b27f923a276593033fe8/xmantissa/scrolltable.py#L734-L754
def constructRows(self, items): """ Build row objects that are serializable using Athena for sending to the client. @param items: an iterable of objects compatible with my columns' C{extractValue} methods. @return: a list of dictionaries, where each dictionary has a string key for each column name in my list of columns. """ rows = [] for item in items: row = dict((colname, col.extractValue(self, item)) for (colname, col) in self.columns.iteritems()) link = self.linkToItem(item) if link is not None: row[u'__id__'] = link rows.append(row) return rows
[ "def", "constructRows", "(", "self", ",", "items", ")", ":", "rows", "=", "[", "]", "for", "item", "in", "items", ":", "row", "=", "dict", "(", "(", "colname", ",", "col", ".", "extractValue", "(", "self", ",", "item", ")", ")", "for", "(", "colname", ",", "col", ")", "in", "self", ".", "columns", ".", "iteritems", "(", ")", ")", "link", "=", "self", ".", "linkToItem", "(", "item", ")", "if", "link", "is", "not", "None", ":", "row", "[", "u'__id__'", "]", "=", "link", "rows", ".", "append", "(", "row", ")", "return", "rows" ]
Build row objects that are serializable using Athena for sending to the client. @param items: an iterable of objects compatible with my columns' C{extractValue} methods. @return: a list of dictionaries, where each dictionary has a string key for each column name in my list of columns.
[ "Build", "row", "objects", "that", "are", "serializable", "using", "Athena", "for", "sending", "to", "the", "client", "." ]
python
train
33.904762
tensorflow/tensor2tensor
tensor2tensor/layers/common_attention.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_attention.py#L406-L452
def get_timing_signal_1d(length, channels, min_timescale=1.0, max_timescale=1.0e4, start_index=0): """Gets a bunch of sinusoids of different frequencies. Each channel of the input Tensor is incremented by a sinusoid of a different frequency and phase. This allows attention to learn to use absolute and relative positions. Timing signals should be added to some precursors of both the query and the memory inputs to attention. The use of relative position is possible because sin(x+y) and cos(x+y) can be expressed in terms of y, sin(x) and cos(x). In particular, we use a geometric sequence of timescales starting with min_timescale and ending with max_timescale. The number of different timescales is equal to channels / 2. For each timescale, we generate the two sinusoidal signals sin(timestep/timescale) and cos(timestep/timescale). All of these sinusoids are concatenated in the channels dimension. Args: length: scalar, length of timing signal sequence. channels: scalar, size of timing embeddings to create. The number of different timescales is equal to channels / 2. min_timescale: a float max_timescale: a float start_index: index of first position Returns: a Tensor of timing signals [1, length, channels] """ position = tf.to_float(tf.range(length) + start_index) num_timescales = channels // 2 log_timescale_increment = ( math.log(float(max_timescale) / float(min_timescale)) / tf.maximum(tf.to_float(num_timescales) - 1, 1)) inv_timescales = min_timescale * tf.exp( tf.to_float(tf.range(num_timescales)) * -log_timescale_increment) scaled_time = tf.expand_dims(position, 1) * tf.expand_dims(inv_timescales, 0) signal = tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=1) signal = tf.pad(signal, [[0, 0], [0, tf.mod(channels, 2)]]) signal = tf.reshape(signal, [1, length, channels]) return signal
[ "def", "get_timing_signal_1d", "(", "length", ",", "channels", ",", "min_timescale", "=", "1.0", ",", "max_timescale", "=", "1.0e4", ",", "start_index", "=", "0", ")", ":", "position", "=", "tf", ".", "to_float", "(", "tf", ".", "range", "(", "length", ")", "+", "start_index", ")", "num_timescales", "=", "channels", "//", "2", "log_timescale_increment", "=", "(", "math", ".", "log", "(", "float", "(", "max_timescale", ")", "/", "float", "(", "min_timescale", ")", ")", "/", "tf", ".", "maximum", "(", "tf", ".", "to_float", "(", "num_timescales", ")", "-", "1", ",", "1", ")", ")", "inv_timescales", "=", "min_timescale", "*", "tf", ".", "exp", "(", "tf", ".", "to_float", "(", "tf", ".", "range", "(", "num_timescales", ")", ")", "*", "-", "log_timescale_increment", ")", "scaled_time", "=", "tf", ".", "expand_dims", "(", "position", ",", "1", ")", "*", "tf", ".", "expand_dims", "(", "inv_timescales", ",", "0", ")", "signal", "=", "tf", ".", "concat", "(", "[", "tf", ".", "sin", "(", "scaled_time", ")", ",", "tf", ".", "cos", "(", "scaled_time", ")", "]", ",", "axis", "=", "1", ")", "signal", "=", "tf", ".", "pad", "(", "signal", ",", "[", "[", "0", ",", "0", "]", ",", "[", "0", ",", "tf", ".", "mod", "(", "channels", ",", "2", ")", "]", "]", ")", "signal", "=", "tf", ".", "reshape", "(", "signal", ",", "[", "1", ",", "length", ",", "channels", "]", ")", "return", "signal" ]
Gets a bunch of sinusoids of different frequencies. Each channel of the input Tensor is incremented by a sinusoid of a different frequency and phase. This allows attention to learn to use absolute and relative positions. Timing signals should be added to some precursors of both the query and the memory inputs to attention. The use of relative position is possible because sin(x+y) and cos(x+y) can be expressed in terms of y, sin(x) and cos(x). In particular, we use a geometric sequence of timescales starting with min_timescale and ending with max_timescale. The number of different timescales is equal to channels / 2. For each timescale, we generate the two sinusoidal signals sin(timestep/timescale) and cos(timestep/timescale). All of these sinusoids are concatenated in the channels dimension. Args: length: scalar, length of timing signal sequence. channels: scalar, size of timing embeddings to create. The number of different timescales is equal to channels / 2. min_timescale: a float max_timescale: a float start_index: index of first position Returns: a Tensor of timing signals [1, length, channels]
[ "Gets", "a", "bunch", "of", "sinusoids", "of", "different", "frequencies", "." ]
python
train
42.212766
robotools/fontParts
Lib/fontParts/base/normalizers.py
https://github.com/robotools/fontParts/blob/d2ff106fe95f9d566161d936a645157626568712/Lib/fontParts/base/normalizers.py#L927-L937
def normalizeGlyphNote(value): """ Normalizes Glyph Note. * **value** must be a :ref:`type-string`. * Returned value is an unencoded ``unicode`` string """ if not isinstance(value, basestring): raise TypeError("Note must be a string, not %s." % type(value).__name__) return unicode(value)
[ "def", "normalizeGlyphNote", "(", "value", ")", ":", "if", "not", "isinstance", "(", "value", ",", "basestring", ")", ":", "raise", "TypeError", "(", "\"Note must be a string, not %s.\"", "%", "type", "(", "value", ")", ".", "__name__", ")", "return", "unicode", "(", "value", ")" ]
Normalizes Glyph Note. * **value** must be a :ref:`type-string`. * Returned value is an unencoded ``unicode`` string
[ "Normalizes", "Glyph", "Note", "." ]
python
train
30.818182
richardkiss/pycoin
pycoin/crack/ecdsa.py
https://github.com/richardkiss/pycoin/blob/1e8d0d9fe20ce0347b97847bb529cd1bd84c7442/pycoin/crack/ecdsa.py#L2-L7
def crack_secret_exponent_from_k(generator, signed_value, sig, k): """ Given a signature of a signed_value and a known k, return the secret exponent. """ r, s = sig return ((s * k - signed_value) * generator.inverse(r)) % generator.order()
[ "def", "crack_secret_exponent_from_k", "(", "generator", ",", "signed_value", ",", "sig", ",", "k", ")", ":", "r", ",", "s", "=", "sig", "return", "(", "(", "s", "*", "k", "-", "signed_value", ")", "*", "generator", ".", "inverse", "(", "r", ")", ")", "%", "generator", ".", "order", "(", ")" ]
Given a signature of a signed_value and a known k, return the secret exponent.
[ "Given", "a", "signature", "of", "a", "signed_value", "and", "a", "known", "k", "return", "the", "secret", "exponent", "." ]
python
train
42.333333
QualiSystems/vCenterShell
package/cloudshell/cp/vcenter/common/utilites/common_utils.py
https://github.com/QualiSystems/vCenterShell/blob/e2e24cd938a92a68f4a8e6a860810d3ef72aae6d/package/cloudshell/cp/vcenter/common/utilites/common_utils.py#L11-L22
def back_slash_to_front_converter(string): """ Replacing all \ in the str to / :param string: single string to modify :type string: str """ try: if not string or not isinstance(string, str): return string return string.replace('\\', '/') except Exception: return string
[ "def", "back_slash_to_front_converter", "(", "string", ")", ":", "try", ":", "if", "not", "string", "or", "not", "isinstance", "(", "string", ",", "str", ")", ":", "return", "string", "return", "string", ".", "replace", "(", "'\\\\'", ",", "'/'", ")", "except", "Exception", ":", "return", "string" ]
Replacing all \ in the str to / :param string: single string to modify :type string: str
[ "Replacing", "all", "\\", "in", "the", "str", "to", "/", ":", "param", "string", ":", "single", "string", "to", "modify", ":", "type", "string", ":", "str" ]
python
train
26.833333
kivy/python-for-android
pythonforandroid/bootstraps/pygame/build/buildlib/jinja2.egg/jinja2/utils.py
https://github.com/kivy/python-for-android/blob/8e0e8056bc22e4d5bd3398a6b0301f38ff167933/pythonforandroid/bootstraps/pygame/build/buildlib/jinja2.egg/jinja2/utils.py#L258-L299
def urlize(text, trim_url_limit=None, nofollow=False): """Converts any URLs in text into clickable links. Works on http://, https:// and www. links. Links can have trailing punctuation (periods, commas, close-parens) and leading punctuation (opening parens) and it'll still do the right thing. If trim_url_limit is not None, the URLs in link text will be limited to trim_url_limit characters. If nofollow is True, the URLs in link text will get a rel="nofollow" attribute. """ trim_url = lambda x, limit=trim_url_limit: limit is not None \ and (x[:limit] + (len(x) >=limit and '...' or '')) or x words = _word_split_re.split(unicode(escape(text))) nofollow_attr = nofollow and ' rel="nofollow"' or '' for i, word in enumerate(words): match = _punctuation_re.match(word) if match: lead, middle, trail = match.groups() if middle.startswith('www.') or ( '@' not in middle and not middle.startswith('http://') and len(middle) > 0 and middle[0] in _letters + _digits and ( middle.endswith('.org') or middle.endswith('.net') or middle.endswith('.com') )): middle = '<a href="http://%s"%s>%s</a>' % (middle, nofollow_attr, trim_url(middle)) if middle.startswith('http://') or \ middle.startswith('https://'): middle = '<a href="%s"%s>%s</a>' % (middle, nofollow_attr, trim_url(middle)) if '@' in middle and not middle.startswith('www.') and \ not ':' in middle and _simple_email_re.match(middle): middle = '<a href="mailto:%s">%s</a>' % (middle, middle) if lead + middle + trail != word: words[i] = lead + middle + trail return u''.join(words)
[ "def", "urlize", "(", "text", ",", "trim_url_limit", "=", "None", ",", "nofollow", "=", "False", ")", ":", "trim_url", "=", "lambda", "x", ",", "limit", "=", "trim_url_limit", ":", "limit", "is", "not", "None", "and", "(", "x", "[", ":", "limit", "]", "+", "(", "len", "(", "x", ")", ">=", "limit", "and", "'...'", "or", "''", ")", ")", "or", "x", "words", "=", "_word_split_re", ".", "split", "(", "unicode", "(", "escape", "(", "text", ")", ")", ")", "nofollow_attr", "=", "nofollow", "and", "' rel=\"nofollow\"'", "or", "''", "for", "i", ",", "word", "in", "enumerate", "(", "words", ")", ":", "match", "=", "_punctuation_re", ".", "match", "(", "word", ")", "if", "match", ":", "lead", ",", "middle", ",", "trail", "=", "match", ".", "groups", "(", ")", "if", "middle", ".", "startswith", "(", "'www.'", ")", "or", "(", "'@'", "not", "in", "middle", "and", "not", "middle", ".", "startswith", "(", "'http://'", ")", "and", "len", "(", "middle", ")", ">", "0", "and", "middle", "[", "0", "]", "in", "_letters", "+", "_digits", "and", "(", "middle", ".", "endswith", "(", "'.org'", ")", "or", "middle", ".", "endswith", "(", "'.net'", ")", "or", "middle", ".", "endswith", "(", "'.com'", ")", ")", ")", ":", "middle", "=", "'<a href=\"http://%s\"%s>%s</a>'", "%", "(", "middle", ",", "nofollow_attr", ",", "trim_url", "(", "middle", ")", ")", "if", "middle", ".", "startswith", "(", "'http://'", ")", "or", "middle", ".", "startswith", "(", "'https://'", ")", ":", "middle", "=", "'<a href=\"%s\"%s>%s</a>'", "%", "(", "middle", ",", "nofollow_attr", ",", "trim_url", "(", "middle", ")", ")", "if", "'@'", "in", "middle", "and", "not", "middle", ".", "startswith", "(", "'www.'", ")", "and", "not", "':'", "in", "middle", "and", "_simple_email_re", ".", "match", "(", "middle", ")", ":", "middle", "=", "'<a href=\"mailto:%s\">%s</a>'", "%", "(", "middle", ",", "middle", ")", "if", "lead", "+", "middle", "+", "trail", "!=", "word", ":", "words", "[", "i", "]", "=", "lead", "+", "middle", "+", "trail", "return", "u''", ".", "join", "(", "words", ")" ]
Converts any URLs in text into clickable links. Works on http://, https:// and www. links. Links can have trailing punctuation (periods, commas, close-parens) and leading punctuation (opening parens) and it'll still do the right thing. If trim_url_limit is not None, the URLs in link text will be limited to trim_url_limit characters. If nofollow is True, the URLs in link text will get a rel="nofollow" attribute.
[ "Converts", "any", "URLs", "in", "text", "into", "clickable", "links", ".", "Works", "on", "http", ":", "//", "https", ":", "//", "and", "www", ".", "links", ".", "Links", "can", "have", "trailing", "punctuation", "(", "periods", "commas", "close", "-", "parens", ")", "and", "leading", "punctuation", "(", "opening", "parens", ")", "and", "it", "ll", "still", "do", "the", "right", "thing", "." ]
python
train
46.5
ynop/audiomate
audiomate/annotations/relabeling.py
https://github.com/ynop/audiomate/blob/61727920b23a708293c3d526fa3000d4de9c6c21/audiomate/annotations/relabeling.py#L13-L68
def relabel(label_list, projections): """ Relabel an entire :py:class:`~audiomate.annotations.LabelList` using user-defined projections. Labels can be renamed, removed or overlapping labels can be flattened to a single label per segment. Each entry in the dictionary of projections represents a single projection that maps a combination of labels (key) to a single new label (value). The combination of labels to be mapped is a tuple of naturally sorted labels that apply to one or more segments simultaneously. By defining a special wildcard projection using `('**',)` is is not required to specify a projection for every single combination of labels. This method raises a :py:class:`~audiomate.corpus.utils.labellist.UnmappedLabelsException` if a projection for one or more combinations of labels is not defined. Args: label_list (audiomate.annotations.LabelList): The label list to relabel projections (dict): A dictionary that maps tuples of label combinations to string labels. Returns: audiomate.annotations.LabelList: New label list with remapped labels Raises: UnmappedLabelsException: If a projection for one or more combinations of labels is not defined. Example: >>> projections = { ... ('a',): 'a', ... ('b',): 'b', ... ('c',): 'c', ... ('a', 'b',): 'a_b', ... ('a', 'b', 'c',): 'a_b_c', ... ('**',): 'b_c', ... } >>> label_list = annotations.LabelList(labels=[ ... annotations.Label('a', 3.2, 4.5), ... annotations.Label('b', 4.0, 4.9), ... annotations.Label('c', 4.2, 5.1) ... ]) >>> ll = relabel(label_list, projections) >>> [l.value for l in ll] ['a', 'a_b', 'a_b_c', 'b_c', 'c'] """ unmapped_combinations = find_missing_projections(label_list, projections) if len(unmapped_combinations) > 0: raise UnmappedLabelsException('Unmapped combinations: {}'.format(unmapped_combinations)) new_labels = [] for labeled_segment in label_list.ranges(): combination = tuple(sorted([label.value for label in labeled_segment[2]])) label_mapping = projections[combination] if combination in projections else projections[WILDCARD_COMBINATION] if label_mapping == '': continue new_labels.append(annotations.Label(label_mapping, labeled_segment[0], labeled_segment[1])) return annotations.LabelList(idx=label_list.idx, labels=new_labels)
[ "def", "relabel", "(", "label_list", ",", "projections", ")", ":", "unmapped_combinations", "=", "find_missing_projections", "(", "label_list", ",", "projections", ")", "if", "len", "(", "unmapped_combinations", ")", ">", "0", ":", "raise", "UnmappedLabelsException", "(", "'Unmapped combinations: {}'", ".", "format", "(", "unmapped_combinations", ")", ")", "new_labels", "=", "[", "]", "for", "labeled_segment", "in", "label_list", ".", "ranges", "(", ")", ":", "combination", "=", "tuple", "(", "sorted", "(", "[", "label", ".", "value", "for", "label", "in", "labeled_segment", "[", "2", "]", "]", ")", ")", "label_mapping", "=", "projections", "[", "combination", "]", "if", "combination", "in", "projections", "else", "projections", "[", "WILDCARD_COMBINATION", "]", "if", "label_mapping", "==", "''", ":", "continue", "new_labels", ".", "append", "(", "annotations", ".", "Label", "(", "label_mapping", ",", "labeled_segment", "[", "0", "]", ",", "labeled_segment", "[", "1", "]", ")", ")", "return", "annotations", ".", "LabelList", "(", "idx", "=", "label_list", ".", "idx", ",", "labels", "=", "new_labels", ")" ]
Relabel an entire :py:class:`~audiomate.annotations.LabelList` using user-defined projections. Labels can be renamed, removed or overlapping labels can be flattened to a single label per segment. Each entry in the dictionary of projections represents a single projection that maps a combination of labels (key) to a single new label (value). The combination of labels to be mapped is a tuple of naturally sorted labels that apply to one or more segments simultaneously. By defining a special wildcard projection using `('**',)` is is not required to specify a projection for every single combination of labels. This method raises a :py:class:`~audiomate.corpus.utils.labellist.UnmappedLabelsException` if a projection for one or more combinations of labels is not defined. Args: label_list (audiomate.annotations.LabelList): The label list to relabel projections (dict): A dictionary that maps tuples of label combinations to string labels. Returns: audiomate.annotations.LabelList: New label list with remapped labels Raises: UnmappedLabelsException: If a projection for one or more combinations of labels is not defined. Example: >>> projections = { ... ('a',): 'a', ... ('b',): 'b', ... ('c',): 'c', ... ('a', 'b',): 'a_b', ... ('a', 'b', 'c',): 'a_b_c', ... ('**',): 'b_c', ... } >>> label_list = annotations.LabelList(labels=[ ... annotations.Label('a', 3.2, 4.5), ... annotations.Label('b', 4.0, 4.9), ... annotations.Label('c', 4.2, 5.1) ... ]) >>> ll = relabel(label_list, projections) >>> [l.value for l in ll] ['a', 'a_b', 'a_b_c', 'b_c', 'c']
[ "Relabel", "an", "entire", ":", "py", ":", "class", ":", "~audiomate", ".", "annotations", ".", "LabelList", "using", "user", "-", "defined", "projections", ".", "Labels", "can", "be", "renamed", "removed", "or", "overlapping", "labels", "can", "be", "flattened", "to", "a", "single", "label", "per", "segment", "." ]
python
train
45.375
pkkid/python-plexapi
tools/plex-backupwatched.py
https://github.com/pkkid/python-plexapi/blob/9efbde96441c2bfbf410eacfb46e811e108e8bbc/tools/plex-backupwatched.py#L13-L23
def _find_server(account, servername=None): """ Find and return a PlexServer object. """ servers = servers = [s for s in account.resources() if 'server' in s.provides] # If servername specified find and return it if servername is not None: for server in servers: if server.name == servername: return server.connect() raise SystemExit('Unknown server name: %s' % servername) # If servername not specified; allow user to choose return utils.choose('Choose a Server', servers, 'name').connect()
[ "def", "_find_server", "(", "account", ",", "servername", "=", "None", ")", ":", "servers", "=", "servers", "=", "[", "s", "for", "s", "in", "account", ".", "resources", "(", ")", "if", "'server'", "in", "s", ".", "provides", "]", "# If servername specified find and return it", "if", "servername", "is", "not", "None", ":", "for", "server", "in", "servers", ":", "if", "server", ".", "name", "==", "servername", ":", "return", "server", ".", "connect", "(", ")", "raise", "SystemExit", "(", "'Unknown server name: %s'", "%", "servername", ")", "# If servername not specified; allow user to choose", "return", "utils", ".", "choose", "(", "'Choose a Server'", ",", "servers", ",", "'name'", ")", ".", "connect", "(", ")" ]
Find and return a PlexServer object.
[ "Find", "and", "return", "a", "PlexServer", "object", "." ]
python
train
49.909091
kamikaze/webdav
src/webdav/client.py
https://github.com/kamikaze/webdav/blob/6facff7224023d3e28c8e1592f3c58401c91a0e6/src/webdav/client.py#L457-L469
def upload(self, remote_path, local_path, progress=None): """Uploads resource to remote path on WebDAV server. In case resource is directory it will upload all nested files and directories. More information you can find by link http://webdav.org/specs/rfc4918.html#METHOD_PUT :param remote_path: the path for uploading resources on WebDAV server. Can be file and directory. :param local_path: the path to local resource for uploading. :param progress: Progress function. Not supported now. """ if os.path.isdir(local_path): self.upload_directory(local_path=local_path, remote_path=remote_path, progress=progress) else: self.upload_file(local_path=local_path, remote_path=remote_path)
[ "def", "upload", "(", "self", ",", "remote_path", ",", "local_path", ",", "progress", "=", "None", ")", ":", "if", "os", ".", "path", ".", "isdir", "(", "local_path", ")", ":", "self", ".", "upload_directory", "(", "local_path", "=", "local_path", ",", "remote_path", "=", "remote_path", ",", "progress", "=", "progress", ")", "else", ":", "self", ".", "upload_file", "(", "local_path", "=", "local_path", ",", "remote_path", "=", "remote_path", ")" ]
Uploads resource to remote path on WebDAV server. In case resource is directory it will upload all nested files and directories. More information you can find by link http://webdav.org/specs/rfc4918.html#METHOD_PUT :param remote_path: the path for uploading resources on WebDAV server. Can be file and directory. :param local_path: the path to local resource for uploading. :param progress: Progress function. Not supported now.
[ "Uploads", "resource", "to", "remote", "path", "on", "WebDAV", "server", ".", "In", "case", "resource", "is", "directory", "it", "will", "upload", "all", "nested", "files", "and", "directories", ".", "More", "information", "you", "can", "find", "by", "link", "http", ":", "//", "webdav", ".", "org", "/", "specs", "/", "rfc4918", ".", "html#METHOD_PUT" ]
python
train
59.076923
kislyuk/aegea
aegea/packages/github3/repos/repo.py
https://github.com/kislyuk/aegea/blob/94957e9dba036eae3052e2662c208b259c08399a/aegea/packages/github3/repos/repo.py#L1232-L1253
def iter_contributor_statistics(self, number=-1, etag=None): """Iterate over the contributors list. See also: http://developer.github.com/v3/repos/statistics/ :param int number: (optional), number of weeks to return. Default -1 will return all of the weeks. :param str etag: (optional), ETag from a previous request to the same endpoint :returns: generator of :class:`ContributorStats <github3.repos.stats.ContributorStats>` .. note:: All statistics methods may return a 202. On those occasions, you will not receive any objects. You should store your iterator and check the new ``last_status`` attribute. If it is a 202 you should wait before re-requesting. .. versionadded:: 0.7 """ url = self._build_url('stats', 'contributors', base_url=self._api) return self._iter(int(number), url, ContributorStats, etag=etag)
[ "def", "iter_contributor_statistics", "(", "self", ",", "number", "=", "-", "1", ",", "etag", "=", "None", ")", ":", "url", "=", "self", ".", "_build_url", "(", "'stats'", ",", "'contributors'", ",", "base_url", "=", "self", ".", "_api", ")", "return", "self", ".", "_iter", "(", "int", "(", "number", ")", ",", "url", ",", "ContributorStats", ",", "etag", "=", "etag", ")" ]
Iterate over the contributors list. See also: http://developer.github.com/v3/repos/statistics/ :param int number: (optional), number of weeks to return. Default -1 will return all of the weeks. :param str etag: (optional), ETag from a previous request to the same endpoint :returns: generator of :class:`ContributorStats <github3.repos.stats.ContributorStats>` .. note:: All statistics methods may return a 202. On those occasions, you will not receive any objects. You should store your iterator and check the new ``last_status`` attribute. If it is a 202 you should wait before re-requesting. .. versionadded:: 0.7
[ "Iterate", "over", "the", "contributors", "list", "." ]
python
train
44.090909
PMBio/limix-backup
limix/varDecomp/varianceDecomposition.py
https://github.com/PMBio/limix-backup/blob/1e201fdb5c694d0d5506f207f3de65d8ef66146c/limix/varDecomp/varianceDecomposition.py#L731-L774
def _getH2singleTrait(self, K, verbose=None): """ Internal function for parameter initialization estimate variance components and fixed effect using a linear mixed model with an intercept and 2 random effects (one is noise) Args: K: covariance matrix of the non-noise random effect term """ verbose = dlimix.getVerbose(verbose) # Fit single trait model varg = sp.zeros(self.P) varn = sp.zeros(self.P) fixed = sp.zeros((1,self.P)) for p in range(self.P): y = self.Y[:,p:p+1] # check if some sull value I = sp.isnan(y[:,0]) if I.sum()>0: y = y[~I,:] _K = K[~I,:][:,~I] else: _K = copy.copy(K) lmm = dlimix.CLMM() lmm.setK(_K) lmm.setSNPs(sp.ones((y.shape[0],1))) lmm.setPheno(y) lmm.setCovs(sp.zeros((y.shape[0],1))) lmm.setVarcompApprox0(-20, 20, 1000) lmm.process() delta = sp.exp(lmm.getLdelta0()[0,0]) Vtot = sp.exp(lmm.getLSigma()[0,0]) varg[p] = Vtot varn[p] = delta*Vtot fixed[:,p] = lmm.getBetaSNP() if verbose: print(p) sth = {} sth['varg'] = varg sth['varn'] = varn sth['fixed'] = fixed return sth
[ "def", "_getH2singleTrait", "(", "self", ",", "K", ",", "verbose", "=", "None", ")", ":", "verbose", "=", "dlimix", ".", "getVerbose", "(", "verbose", ")", "# Fit single trait model", "varg", "=", "sp", ".", "zeros", "(", "self", ".", "P", ")", "varn", "=", "sp", ".", "zeros", "(", "self", ".", "P", ")", "fixed", "=", "sp", ".", "zeros", "(", "(", "1", ",", "self", ".", "P", ")", ")", "for", "p", "in", "range", "(", "self", ".", "P", ")", ":", "y", "=", "self", ".", "Y", "[", ":", ",", "p", ":", "p", "+", "1", "]", "# check if some sull value", "I", "=", "sp", ".", "isnan", "(", "y", "[", ":", ",", "0", "]", ")", "if", "I", ".", "sum", "(", ")", ">", "0", ":", "y", "=", "y", "[", "~", "I", ",", ":", "]", "_K", "=", "K", "[", "~", "I", ",", ":", "]", "[", ":", ",", "~", "I", "]", "else", ":", "_K", "=", "copy", ".", "copy", "(", "K", ")", "lmm", "=", "dlimix", ".", "CLMM", "(", ")", "lmm", ".", "setK", "(", "_K", ")", "lmm", ".", "setSNPs", "(", "sp", ".", "ones", "(", "(", "y", ".", "shape", "[", "0", "]", ",", "1", ")", ")", ")", "lmm", ".", "setPheno", "(", "y", ")", "lmm", ".", "setCovs", "(", "sp", ".", "zeros", "(", "(", "y", ".", "shape", "[", "0", "]", ",", "1", ")", ")", ")", "lmm", ".", "setVarcompApprox0", "(", "-", "20", ",", "20", ",", "1000", ")", "lmm", ".", "process", "(", ")", "delta", "=", "sp", ".", "exp", "(", "lmm", ".", "getLdelta0", "(", ")", "[", "0", ",", "0", "]", ")", "Vtot", "=", "sp", ".", "exp", "(", "lmm", ".", "getLSigma", "(", ")", "[", "0", ",", "0", "]", ")", "varg", "[", "p", "]", "=", "Vtot", "varn", "[", "p", "]", "=", "delta", "*", "Vtot", "fixed", "[", ":", ",", "p", "]", "=", "lmm", ".", "getBetaSNP", "(", ")", "if", "verbose", ":", "print", "(", "p", ")", "sth", "=", "{", "}", "sth", "[", "'varg'", "]", "=", "varg", "sth", "[", "'varn'", "]", "=", "varn", "sth", "[", "'fixed'", "]", "=", "fixed", "return", "sth" ]
Internal function for parameter initialization estimate variance components and fixed effect using a linear mixed model with an intercept and 2 random effects (one is noise) Args: K: covariance matrix of the non-noise random effect term
[ "Internal", "function", "for", "parameter", "initialization", "estimate", "variance", "components", "and", "fixed", "effect", "using", "a", "linear", "mixed", "model", "with", "an", "intercept", "and", "2", "random", "effects", "(", "one", "is", "noise", ")", "Args", ":", "K", ":", "covariance", "matrix", "of", "the", "non", "-", "noise", "random", "effect", "term" ]
python
train
31.5
dragnet-org/dragnet
dragnet/features/weninger.py
https://github.com/dragnet-org/dragnet/blob/532c9d9f28e5b1b57f3cabc708218d3863a16322/dragnet/features/weninger.py#L92-L107
def transform(self, blocks, y=None): """ Computes the content to tag ratio per block, smooths the values, then predicts content (1) or not-content (0) using a fit k-means cluster model. Args: blocks (List[Block]): as output by :class:`Blockifier.blockify` y (None): This isn't used, it's only here for API consistency. Returns: :class:`np.ndarray`: 2D array of shape (len(feature_mat), 1), where values are either 0 or 1, corresponding to the kmeans prediction of content (1) or not-content (0). """ preds = (self.kmeans.predict(make_weninger_features(blocks)) > 0).astype(int) return np.reshape(preds, (-1, 1))
[ "def", "transform", "(", "self", ",", "blocks", ",", "y", "=", "None", ")", ":", "preds", "=", "(", "self", ".", "kmeans", ".", "predict", "(", "make_weninger_features", "(", "blocks", ")", ")", ">", "0", ")", ".", "astype", "(", "int", ")", "return", "np", ".", "reshape", "(", "preds", ",", "(", "-", "1", ",", "1", ")", ")" ]
Computes the content to tag ratio per block, smooths the values, then predicts content (1) or not-content (0) using a fit k-means cluster model. Args: blocks (List[Block]): as output by :class:`Blockifier.blockify` y (None): This isn't used, it's only here for API consistency. Returns: :class:`np.ndarray`: 2D array of shape (len(feature_mat), 1), where values are either 0 or 1, corresponding to the kmeans prediction of content (1) or not-content (0).
[ "Computes", "the", "content", "to", "tag", "ratio", "per", "block", "smooths", "the", "values", "then", "predicts", "content", "(", "1", ")", "or", "not", "-", "content", "(", "0", ")", "using", "a", "fit", "k", "-", "means", "cluster", "model", "." ]
python
train
45.625
saltstack/salt
salt/modules/jenkinsmod.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/jenkinsmod.py#L170-L191
def job_exists(name=None): ''' Check whether the job exists in configured Jenkins jobs. :param name: The name of the job is check if it exists. :return: True if job exists, False if job does not exist. CLI Example: .. code-block:: bash salt '*' jenkins.job_exists jobname ''' if not name: raise SaltInvocationError('Required parameter \'name\' is missing') server = _connect() if server.job_exists(name): return True else: return False
[ "def", "job_exists", "(", "name", "=", "None", ")", ":", "if", "not", "name", ":", "raise", "SaltInvocationError", "(", "'Required parameter \\'name\\' is missing'", ")", "server", "=", "_connect", "(", ")", "if", "server", ".", "job_exists", "(", "name", ")", ":", "return", "True", "else", ":", "return", "False" ]
Check whether the job exists in configured Jenkins jobs. :param name: The name of the job is check if it exists. :return: True if job exists, False if job does not exist. CLI Example: .. code-block:: bash salt '*' jenkins.job_exists jobname
[ "Check", "whether", "the", "job", "exists", "in", "configured", "Jenkins", "jobs", "." ]
python
train
22.545455
apache/airflow
airflow/hooks/S3_hook.py
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/hooks/S3_hook.py#L384-L422
def load_bytes(self, bytes_data, key, bucket_name=None, replace=False, encrypt=False): """ Loads bytes to S3 This is provided as a convenience to drop a string in S3. It uses the boto infrastructure to ship a file to s3. :param bytes_data: bytes to set as content for the key. :type bytes_data: bytes :param key: S3 key that will point to the file :type key: str :param bucket_name: Name of the bucket in which to store the file :type bucket_name: str :param replace: A flag to decide whether or not to overwrite the key if it already exists :type replace: bool :param encrypt: If True, the file will be encrypted on the server-side by S3 and will be stored in an encrypted form while at rest in S3. :type encrypt: bool """ if not bucket_name: (bucket_name, key) = self.parse_s3_url(key) if not replace and self.check_for_key(key, bucket_name): raise ValueError("The key {key} already exists.".format(key=key)) extra_args = {} if encrypt: extra_args['ServerSideEncryption'] = "AES256" filelike_buffer = BytesIO(bytes_data) client = self.get_conn() client.upload_fileobj(filelike_buffer, bucket_name, key, ExtraArgs=extra_args)
[ "def", "load_bytes", "(", "self", ",", "bytes_data", ",", "key", ",", "bucket_name", "=", "None", ",", "replace", "=", "False", ",", "encrypt", "=", "False", ")", ":", "if", "not", "bucket_name", ":", "(", "bucket_name", ",", "key", ")", "=", "self", ".", "parse_s3_url", "(", "key", ")", "if", "not", "replace", "and", "self", ".", "check_for_key", "(", "key", ",", "bucket_name", ")", ":", "raise", "ValueError", "(", "\"The key {key} already exists.\"", ".", "format", "(", "key", "=", "key", ")", ")", "extra_args", "=", "{", "}", "if", "encrypt", ":", "extra_args", "[", "'ServerSideEncryption'", "]", "=", "\"AES256\"", "filelike_buffer", "=", "BytesIO", "(", "bytes_data", ")", "client", "=", "self", ".", "get_conn", "(", ")", "client", ".", "upload_fileobj", "(", "filelike_buffer", ",", "bucket_name", ",", "key", ",", "ExtraArgs", "=", "extra_args", ")" ]
Loads bytes to S3 This is provided as a convenience to drop a string in S3. It uses the boto infrastructure to ship a file to s3. :param bytes_data: bytes to set as content for the key. :type bytes_data: bytes :param key: S3 key that will point to the file :type key: str :param bucket_name: Name of the bucket in which to store the file :type bucket_name: str :param replace: A flag to decide whether or not to overwrite the key if it already exists :type replace: bool :param encrypt: If True, the file will be encrypted on the server-side by S3 and will be stored in an encrypted form while at rest in S3. :type encrypt: bool
[ "Loads", "bytes", "to", "S3" ]
python
test
36.538462
maas/python-libmaas
maas/client/bones/helpers.py
https://github.com/maas/python-libmaas/blob/4092c68ef7fb1753efc843569848e2bcc3415002/maas/client/bones/helpers.py#L90-L127
async def connect(url, *, apikey=None, insecure=False): """Connect to a remote MAAS instance with `apikey`. Returns a new :class:`Profile` which has NOT been saved. To connect AND save a new profile:: profile = connect(url, apikey=apikey) profile = profile.replace(name="mad-hatter") with profiles.ProfileStore.open() as config: config.save(profile) # Optionally, set it as the default. config.default = profile.name """ url = api_url(url) url = urlparse(url) if url.username is not None: raise ConnectError( "Cannot provide user-name explicitly in URL (%r) when connecting; " "use login instead." % url.username) if url.password is not None: raise ConnectError( "Cannot provide password explicitly in URL (%r) when connecting; " "use login instead." % url.username) if apikey is None: credentials = None # Anonymous access. else: credentials = Credentials.parse(apikey) description = await fetch_api_description(url, insecure) # Return a new (unsaved) profile. return Profile( name=url.netloc, url=url.geturl(), credentials=credentials, description=description)
[ "async", "def", "connect", "(", "url", ",", "*", ",", "apikey", "=", "None", ",", "insecure", "=", "False", ")", ":", "url", "=", "api_url", "(", "url", ")", "url", "=", "urlparse", "(", "url", ")", "if", "url", ".", "username", "is", "not", "None", ":", "raise", "ConnectError", "(", "\"Cannot provide user-name explicitly in URL (%r) when connecting; \"", "\"use login instead.\"", "%", "url", ".", "username", ")", "if", "url", ".", "password", "is", "not", "None", ":", "raise", "ConnectError", "(", "\"Cannot provide password explicitly in URL (%r) when connecting; \"", "\"use login instead.\"", "%", "url", ".", "username", ")", "if", "apikey", "is", "None", ":", "credentials", "=", "None", "# Anonymous access.", "else", ":", "credentials", "=", "Credentials", ".", "parse", "(", "apikey", ")", "description", "=", "await", "fetch_api_description", "(", "url", ",", "insecure", ")", "# Return a new (unsaved) profile.", "return", "Profile", "(", "name", "=", "url", ".", "netloc", ",", "url", "=", "url", ".", "geturl", "(", ")", ",", "credentials", "=", "credentials", ",", "description", "=", "description", ")" ]
Connect to a remote MAAS instance with `apikey`. Returns a new :class:`Profile` which has NOT been saved. To connect AND save a new profile:: profile = connect(url, apikey=apikey) profile = profile.replace(name="mad-hatter") with profiles.ProfileStore.open() as config: config.save(profile) # Optionally, set it as the default. config.default = profile.name
[ "Connect", "to", "a", "remote", "MAAS", "instance", "with", "apikey", "." ]
python
train
32.684211
numenta/htmresearch
htmresearch/algorithms/location_modules.py
https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/algorithms/location_modules.py#L219-L240
def _sensoryComputeInferenceMode(self, anchorInput): """ Infer the location from sensory input. Activate any cells with enough active synapses to this sensory input. Deactivate all other cells. @param anchorInput (numpy array) A sensory input. This will often come from a feature-location pair layer. """ if len(anchorInput) == 0: return overlaps = self.connections.computeActivity(anchorInput, self.connectedPermanence) activeSegments = np.where(overlaps >= self.activationThreshold)[0] sensorySupportedCells = np.unique( self.connections.mapSegmentsToCells(activeSegments)) self.bumpPhases = self.cellPhases[:,sensorySupportedCells] self._computeActiveCells() self.activeSegments = activeSegments self.sensoryAssociatedCells = sensorySupportedCells
[ "def", "_sensoryComputeInferenceMode", "(", "self", ",", "anchorInput", ")", ":", "if", "len", "(", "anchorInput", ")", "==", "0", ":", "return", "overlaps", "=", "self", ".", "connections", ".", "computeActivity", "(", "anchorInput", ",", "self", ".", "connectedPermanence", ")", "activeSegments", "=", "np", ".", "where", "(", "overlaps", ">=", "self", ".", "activationThreshold", ")", "[", "0", "]", "sensorySupportedCells", "=", "np", ".", "unique", "(", "self", ".", "connections", ".", "mapSegmentsToCells", "(", "activeSegments", ")", ")", "self", ".", "bumpPhases", "=", "self", ".", "cellPhases", "[", ":", ",", "sensorySupportedCells", "]", "self", ".", "_computeActiveCells", "(", ")", "self", ".", "activeSegments", "=", "activeSegments", "self", ".", "sensoryAssociatedCells", "=", "sensorySupportedCells" ]
Infer the location from sensory input. Activate any cells with enough active synapses to this sensory input. Deactivate all other cells. @param anchorInput (numpy array) A sensory input. This will often come from a feature-location pair layer.
[ "Infer", "the", "location", "from", "sensory", "input", ".", "Activate", "any", "cells", "with", "enough", "active", "synapses", "to", "this", "sensory", "input", ".", "Deactivate", "all", "other", "cells", "." ]
python
train
38.590909
annoviko/pyclustering
pyclustering/cluster/dbscan.py
https://github.com/annoviko/pyclustering/blob/98aa0dd89fd36f701668fb1eb29c8fb5662bf7d0/pyclustering/cluster/dbscan.py#L196-L228
def __expand_cluster(self, index_point): """! @brief Expands cluster from specified point in the input data space. @param[in] index_point (list): Index of a point from the data. @return (list) Return tuple of list of indexes that belong to the same cluster and list of points that are marked as noise: (cluster, noise), or None if nothing has been expanded. """ cluster = None self.__visited[index_point] = True neighbors = self.__neighbor_searcher(index_point) if len(neighbors) >= self.__neighbors: cluster = [index_point] self.__belong[index_point] = True for i in neighbors: if self.__visited[i] is False: self.__visited[i] = True next_neighbors = self.__neighbor_searcher(i) if len(next_neighbors) >= self.__neighbors: neighbors += [k for k in next_neighbors if ( (k in neighbors) == False) and k != index_point] if self.__belong[i] is False: cluster.append(i) self.__belong[i] = True return cluster
[ "def", "__expand_cluster", "(", "self", ",", "index_point", ")", ":", "cluster", "=", "None", "self", ".", "__visited", "[", "index_point", "]", "=", "True", "neighbors", "=", "self", ".", "__neighbor_searcher", "(", "index_point", ")", "if", "len", "(", "neighbors", ")", ">=", "self", ".", "__neighbors", ":", "cluster", "=", "[", "index_point", "]", "self", ".", "__belong", "[", "index_point", "]", "=", "True", "for", "i", "in", "neighbors", ":", "if", "self", ".", "__visited", "[", "i", "]", "is", "False", ":", "self", ".", "__visited", "[", "i", "]", "=", "True", "next_neighbors", "=", "self", ".", "__neighbor_searcher", "(", "i", ")", "if", "len", "(", "next_neighbors", ")", ">=", "self", ".", "__neighbors", ":", "neighbors", "+=", "[", "k", "for", "k", "in", "next_neighbors", "if", "(", "(", "k", "in", "neighbors", ")", "==", "False", ")", "and", "k", "!=", "index_point", "]", "if", "self", ".", "__belong", "[", "i", "]", "is", "False", ":", "cluster", ".", "append", "(", "i", ")", "self", ".", "__belong", "[", "i", "]", "=", "True", "return", "cluster" ]
! @brief Expands cluster from specified point in the input data space. @param[in] index_point (list): Index of a point from the data. @return (list) Return tuple of list of indexes that belong to the same cluster and list of points that are marked as noise: (cluster, noise), or None if nothing has been expanded.
[ "!" ]
python
valid
38.909091
ChrisCummins/labm8
db.py
https://github.com/ChrisCummins/labm8/blob/dd10d67a757aefb180cb508f86696f99440c94f5/db.py#L305-L324
def create_table(self, name, schema): """ Create a new table. If the table already exists, nothing happens. Example: >>> db.create_table("foo", (("id", "integer primary key"), ("value", "text"))) Arguments: name (str): The name of the table to create. schema (sequence of tuples): A list of (name, type) tuples representing each of the columns. """ columns = [" ".join(column) for column in schema] self.execute("CREATE TABLE IF NOT EXISTS {name} ({columns})" .format(name=name, columns=",".join(columns)))
[ "def", "create_table", "(", "self", ",", "name", ",", "schema", ")", ":", "columns", "=", "[", "\" \"", ".", "join", "(", "column", ")", "for", "column", "in", "schema", "]", "self", ".", "execute", "(", "\"CREATE TABLE IF NOT EXISTS {name} ({columns})\"", ".", "format", "(", "name", "=", "name", ",", "columns", "=", "\",\"", ".", "join", "(", "columns", ")", ")", ")" ]
Create a new table. If the table already exists, nothing happens. Example: >>> db.create_table("foo", (("id", "integer primary key"), ("value", "text"))) Arguments: name (str): The name of the table to create. schema (sequence of tuples): A list of (name, type) tuples representing each of the columns.
[ "Create", "a", "new", "table", "." ]
python
train
33.2
tanghaibao/goatools
goatools/obo_parser.py
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/obo_parser.py#L635-L651
def update_association(self, association): """Add the GO parents of a gene's associated GO IDs to the gene's association.""" bad_goids = set() # Loop through all sets of GO IDs for all genes for goids in association.values(): parents = set() # Iterate thru each GO ID in the current gene's association for goid in goids: try: parents.update(self[goid].get_all_parents()) except: bad_goids.add(goid.strip()) # Add the GO parents of all GO IDs in the current gene's association goids.update(parents) if bad_goids: sys.stdout.write("{N} GO IDs in assc. are not found in the GO-DAG: {GOs}\n".format( N=len(bad_goids), GOs=" ".join(bad_goids)))
[ "def", "update_association", "(", "self", ",", "association", ")", ":", "bad_goids", "=", "set", "(", ")", "# Loop through all sets of GO IDs for all genes", "for", "goids", "in", "association", ".", "values", "(", ")", ":", "parents", "=", "set", "(", ")", "# Iterate thru each GO ID in the current gene's association", "for", "goid", "in", "goids", ":", "try", ":", "parents", ".", "update", "(", "self", "[", "goid", "]", ".", "get_all_parents", "(", ")", ")", "except", ":", "bad_goids", ".", "add", "(", "goid", ".", "strip", "(", ")", ")", "# Add the GO parents of all GO IDs in the current gene's association", "goids", ".", "update", "(", "parents", ")", "if", "bad_goids", ":", "sys", ".", "stdout", ".", "write", "(", "\"{N} GO IDs in assc. are not found in the GO-DAG: {GOs}\\n\"", ".", "format", "(", "N", "=", "len", "(", "bad_goids", ")", ",", "GOs", "=", "\" \"", ".", "join", "(", "bad_goids", ")", ")", ")" ]
Add the GO parents of a gene's associated GO IDs to the gene's association.
[ "Add", "the", "GO", "parents", "of", "a", "gene", "s", "associated", "GO", "IDs", "to", "the", "gene", "s", "association", "." ]
python
train
48.411765
notanumber/xapian-haystack
xapian_backend.py
https://github.com/notanumber/xapian-haystack/blob/2247b23d3cb6322ce477d45f84d52da47a940348/xapian_backend.py#L817-L852
def parse_query(self, query_string): """ Given a `query_string`, will attempt to return a xapian.Query Required arguments: ``query_string`` -- A query string to parse Returns a xapian.Query """ if query_string == '*': return xapian.Query('') # Match everything elif query_string == '': return xapian.Query() # Match nothing qp = xapian.QueryParser() qp.set_database(self._database()) qp.set_stemmer(xapian.Stem(self.language)) qp.set_stemming_strategy(self.stemming_strategy) qp.set_default_op(XAPIAN_OPTS[DEFAULT_OPERATOR]) qp.add_boolean_prefix(DJANGO_CT, TERM_PREFIXES[DJANGO_CT]) for field_dict in self.schema: # since 'django_ct' has a boolean_prefix, # we ignore it here. if field_dict['field_name'] == DJANGO_CT: continue qp.add_prefix( field_dict['field_name'], TERM_PREFIXES['field'] + field_dict['field_name'].upper() ) vrp = XHValueRangeProcessor(self) qp.add_valuerangeprocessor(vrp) return qp.parse_query(query_string, self.flags)
[ "def", "parse_query", "(", "self", ",", "query_string", ")", ":", "if", "query_string", "==", "'*'", ":", "return", "xapian", ".", "Query", "(", "''", ")", "# Match everything", "elif", "query_string", "==", "''", ":", "return", "xapian", ".", "Query", "(", ")", "# Match nothing", "qp", "=", "xapian", ".", "QueryParser", "(", ")", "qp", ".", "set_database", "(", "self", ".", "_database", "(", ")", ")", "qp", ".", "set_stemmer", "(", "xapian", ".", "Stem", "(", "self", ".", "language", ")", ")", "qp", ".", "set_stemming_strategy", "(", "self", ".", "stemming_strategy", ")", "qp", ".", "set_default_op", "(", "XAPIAN_OPTS", "[", "DEFAULT_OPERATOR", "]", ")", "qp", ".", "add_boolean_prefix", "(", "DJANGO_CT", ",", "TERM_PREFIXES", "[", "DJANGO_CT", "]", ")", "for", "field_dict", "in", "self", ".", "schema", ":", "# since 'django_ct' has a boolean_prefix,", "# we ignore it here.", "if", "field_dict", "[", "'field_name'", "]", "==", "DJANGO_CT", ":", "continue", "qp", ".", "add_prefix", "(", "field_dict", "[", "'field_name'", "]", ",", "TERM_PREFIXES", "[", "'field'", "]", "+", "field_dict", "[", "'field_name'", "]", ".", "upper", "(", ")", ")", "vrp", "=", "XHValueRangeProcessor", "(", "self", ")", "qp", ".", "add_valuerangeprocessor", "(", "vrp", ")", "return", "qp", ".", "parse_query", "(", "query_string", ",", "self", ".", "flags", ")" ]
Given a `query_string`, will attempt to return a xapian.Query Required arguments: ``query_string`` -- A query string to parse Returns a xapian.Query
[ "Given", "a", "query_string", "will", "attempt", "to", "return", "a", "xapian", ".", "Query" ]
python
train
33.25