repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
ChargePoint/pydnp3
examples/master.py
https://github.com/ChargePoint/pydnp3/blob/5bcd8240d1fc0aa1579e71f2efcab63b4c61c547/examples/master.py#L247-L256
def collection_callback(result=None): """ :type result: opendnp3.CommandPointResult """ print("Header: {0} | Index: {1} | State: {2} | Status: {3}".format( result.headerIndex, result.index, opendnp3.CommandPointStateToString(result.state), opendnp3.CommandStatusToString(result.status) ))
[ "def", "collection_callback", "(", "result", "=", "None", ")", ":", "print", "(", "\"Header: {0} | Index: {1} | State: {2} | Status: {3}\"", ".", "format", "(", "result", ".", "headerIndex", ",", "result", ".", "index", ",", "opendnp3", ".", "CommandPointStateToString", "(", "result", ".", "state", ")", ",", "opendnp3", ".", "CommandStatusToString", "(", "result", ".", "status", ")", ")", ")" ]
:type result: opendnp3.CommandPointResult
[ ":", "type", "result", ":", "opendnp3", ".", "CommandPointResult" ]
python
valid
33.3
rigetti/pyquil
pyquil/pyqvm.py
https://github.com/rigetti/pyquil/blob/ec98e453084b0037d69d8c3245f6822a5422593d/pyquil/pyqvm.py#L319-L333
def find_label(self, label: Label): """ Helper function that iterates over the program and looks for a JumpTarget that has a Label matching the input label. :param label: Label object to search for in program :return: Program index where ``label`` is found """ for index, action in enumerate(self.program): if isinstance(action, JumpTarget): if label == action.label: return index raise RuntimeError("Improper program - Jump Target not found in the " "input program!")
[ "def", "find_label", "(", "self", ",", "label", ":", "Label", ")", ":", "for", "index", ",", "action", "in", "enumerate", "(", "self", ".", "program", ")", ":", "if", "isinstance", "(", "action", ",", "JumpTarget", ")", ":", "if", "label", "==", "action", ".", "label", ":", "return", "index", "raise", "RuntimeError", "(", "\"Improper program - Jump Target not found in the \"", "\"input program!\"", ")" ]
Helper function that iterates over the program and looks for a JumpTarget that has a Label matching the input label. :param label: Label object to search for in program :return: Program index where ``label`` is found
[ "Helper", "function", "that", "iterates", "over", "the", "program", "and", "looks", "for", "a", "JumpTarget", "that", "has", "a", "Label", "matching", "the", "input", "label", "." ]
python
train
39.666667
dourvaris/nano-python
src/nano/rpc.py
https://github.com/dourvaris/nano-python/blob/f26b8bc895b997067780f925049a70e82c0c2479/src/nano/rpc.py#L2335-L2361
def wallet_contains(self, wallet, account): """ Check whether **wallet** contains **account** :param wallet: Wallet to check contains **account** :type wallet: str :param account: Account to check exists in **wallet** :type account: str :raises: :py:exc:`nano.rpc.RPCException` >>> rpc.wallet_contains( ... wallet="000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F", ... account="xrb_3e3j5tkog48pnny9dmfzj1r16pg8t1e76dz5tmac6iq689wyjfpi00000000" ... ) True """ wallet = self._process_value(wallet, 'wallet') account = self._process_value(account, 'account') payload = {"wallet": wallet, "account": account} resp = self.call('wallet_contains', payload) return resp['exists'] == '1'
[ "def", "wallet_contains", "(", "self", ",", "wallet", ",", "account", ")", ":", "wallet", "=", "self", ".", "_process_value", "(", "wallet", ",", "'wallet'", ")", "account", "=", "self", ".", "_process_value", "(", "account", ",", "'account'", ")", "payload", "=", "{", "\"wallet\"", ":", "wallet", ",", "\"account\"", ":", "account", "}", "resp", "=", "self", ".", "call", "(", "'wallet_contains'", ",", "payload", ")", "return", "resp", "[", "'exists'", "]", "==", "'1'" ]
Check whether **wallet** contains **account** :param wallet: Wallet to check contains **account** :type wallet: str :param account: Account to check exists in **wallet** :type account: str :raises: :py:exc:`nano.rpc.RPCException` >>> rpc.wallet_contains( ... wallet="000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F", ... account="xrb_3e3j5tkog48pnny9dmfzj1r16pg8t1e76dz5tmac6iq689wyjfpi00000000" ... ) True
[ "Check", "whether", "**", "wallet", "**", "contains", "**", "account", "**" ]
python
train
30.703704
googleapis/google-cloud-python
logging/google/cloud/logging/entries.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/logging/google/cloud/logging/entries.py#L287-L292
def to_api_repr(self): """API repr (JSON format) for entry. """ info = super(TextEntry, self).to_api_repr() info["textPayload"] = self.payload return info
[ "def", "to_api_repr", "(", "self", ")", ":", "info", "=", "super", "(", "TextEntry", ",", "self", ")", ".", "to_api_repr", "(", ")", "info", "[", "\"textPayload\"", "]", "=", "self", ".", "payload", "return", "info" ]
API repr (JSON format) for entry.
[ "API", "repr", "(", "JSON", "format", ")", "for", "entry", "." ]
python
train
31.5
DataDog/integrations-core
network/datadog_checks/network/network.py
https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/network/datadog_checks/network/network.py#L733-L742
def _check_psutil(self, instance): """ Gather metrics about connections states and interfaces counters using psutil facilities """ custom_tags = instance.get('tags', []) if self._collect_cx_state: self._cx_state_psutil(tags=custom_tags) self._cx_counters_psutil(tags=custom_tags)
[ "def", "_check_psutil", "(", "self", ",", "instance", ")", ":", "custom_tags", "=", "instance", ".", "get", "(", "'tags'", ",", "[", "]", ")", "if", "self", ".", "_collect_cx_state", ":", "self", ".", "_cx_state_psutil", "(", "tags", "=", "custom_tags", ")", "self", ".", "_cx_counters_psutil", "(", "tags", "=", "custom_tags", ")" ]
Gather metrics about connections states and interfaces counters using psutil facilities
[ "Gather", "metrics", "about", "connections", "states", "and", "interfaces", "counters", "using", "psutil", "facilities" ]
python
train
33.9
h2oai/h2o-3
h2o-bindings/bin/pymagic.py
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-bindings/bin/pymagic.py#L24-L32
def locate_files(root_dir): """Find all python files in the given directory and all subfolders.""" all_files = [] root_dir = os.path.abspath(root_dir) for dir_name, subdirs, files in os.walk(root_dir): for f in files: if f.endswith(".py"): all_files.append(os.path.join(dir_name, f)) return all_files
[ "def", "locate_files", "(", "root_dir", ")", ":", "all_files", "=", "[", "]", "root_dir", "=", "os", ".", "path", ".", "abspath", "(", "root_dir", ")", "for", "dir_name", ",", "subdirs", ",", "files", "in", "os", ".", "walk", "(", "root_dir", ")", ":", "for", "f", "in", "files", ":", "if", "f", ".", "endswith", "(", "\".py\"", ")", ":", "all_files", ".", "append", "(", "os", ".", "path", ".", "join", "(", "dir_name", ",", "f", ")", ")", "return", "all_files" ]
Find all python files in the given directory and all subfolders.
[ "Find", "all", "python", "files", "in", "the", "given", "directory", "and", "all", "subfolders", "." ]
python
test
38.666667
mayfield/shellish
shellish/rendering/html.py
https://github.com/mayfield/shellish/blob/df0f0e4612d138c34d8cb99b66ab5b8e47f1414a/shellish/rendering/html.py#L223-L226
def htmlprint(*values, plain=None, **options): """ Convert HTML to VTML and then print it. Follows same semantics as vtmlprint. """ print(*[htmlrender(x, plain=plain) for x in values], **options)
[ "def", "htmlprint", "(", "*", "values", ",", "plain", "=", "None", ",", "*", "*", "options", ")", ":", "print", "(", "*", "[", "htmlrender", "(", "x", ",", "plain", "=", "plain", ")", "for", "x", "in", "values", "]", ",", "*", "*", "options", ")" ]
Convert HTML to VTML and then print it. Follows same semantics as vtmlprint.
[ "Convert", "HTML", "to", "VTML", "and", "then", "print", "it", ".", "Follows", "same", "semantics", "as", "vtmlprint", "." ]
python
train
51
hearsaycorp/normalize
normalize/selector.py
https://github.com/hearsaycorp/normalize/blob/8b36522ddca6d41b434580bd848f3bdaa7a999c8/normalize/selector.py#L837-L862
def delete(self, obj, force=False): """Deletes all of the fields at the specified locations. args: ``obj=``\ *OBJECT* the object to remove the fields from ``force=``\ *BOOL* if True, missing attributes do not raise errors. Otherwise, the first failure raises an exception without making any changes to ``obj``. """ # TODO: this could be a whole lot more efficient! if not force: for fs in self: try: fs.get(obj) except FieldSelectorException: raise for fs in self: try: fs.delete(obj) except FieldSelectorException: pass
[ "def", "delete", "(", "self", ",", "obj", ",", "force", "=", "False", ")", ":", "# TODO: this could be a whole lot more efficient!", "if", "not", "force", ":", "for", "fs", "in", "self", ":", "try", ":", "fs", ".", "get", "(", "obj", ")", "except", "FieldSelectorException", ":", "raise", "for", "fs", "in", "self", ":", "try", ":", "fs", ".", "delete", "(", "obj", ")", "except", "FieldSelectorException", ":", "pass" ]
Deletes all of the fields at the specified locations. args: ``obj=``\ *OBJECT* the object to remove the fields from ``force=``\ *BOOL* if True, missing attributes do not raise errors. Otherwise, the first failure raises an exception without making any changes to ``obj``.
[ "Deletes", "all", "of", "the", "fields", "at", "the", "specified", "locations", "." ]
python
train
29.846154
mohamedattahri/PyXMLi
pyxmli/__init__.py
https://github.com/mohamedattahri/PyXMLi/blob/a81a245be822d62f1a20c734ca14b42c786ae81e/pyxmli/__init__.py#L1289-L1298
def compute_discounts(self, precision=None): ''' Returns the total amount of discounts for this line with a specific number of decimals. @param precision:int number of decimal places @return: Decimal ''' gross = self.compute_gross(precision) return min(gross, sum([d.compute(gross, precision) for d in self.__discounts]))
[ "def", "compute_discounts", "(", "self", ",", "precision", "=", "None", ")", ":", "gross", "=", "self", ".", "compute_gross", "(", "precision", ")", "return", "min", "(", "gross", ",", "sum", "(", "[", "d", ".", "compute", "(", "gross", ",", "precision", ")", "for", "d", "in", "self", ".", "__discounts", "]", ")", ")" ]
Returns the total amount of discounts for this line with a specific number of decimals. @param precision:int number of decimal places @return: Decimal
[ "Returns", "the", "total", "amount", "of", "discounts", "for", "this", "line", "with", "a", "specific", "number", "of", "decimals", "." ]
python
train
39.5
openbermuda/ripl
ripl/md2py.py
https://github.com/openbermuda/ripl/blob/4886b1a697e4b81c2202db9cb977609e034f8e70/ripl/md2py.py#L57-L116
def generate_records(self, infile): """ Process a file of rest and yield dictionaries """ state = 0 record = {} for item in self.generate_lines(infile): line = item['line'] heading = item['heading'] # any Markdown heading is just a caption, no image if heading: record['heading'] = True record['caption'] = line[1:].strip() state = 'caption' continue if not line[0].isspace(): # at a potential image if state == 'caption': yield record record = {} state = 0 if state == 'caption': record['caption'] += '\n' + line[:-1] continue fields = line.split(',') # nothing there, carry on if not fields: continue image = fields[0].strip() if not image: continue record['image'] = image try: time = float(fields[1]) except: time = 0 record['time'] = time try: caption = fields[2].strip() except: caption = None if caption: record['caption'] = caption # yield it if we have anything if record: yield record record = {}
[ "def", "generate_records", "(", "self", ",", "infile", ")", ":", "state", "=", "0", "record", "=", "{", "}", "for", "item", "in", "self", ".", "generate_lines", "(", "infile", ")", ":", "line", "=", "item", "[", "'line'", "]", "heading", "=", "item", "[", "'heading'", "]", "# any Markdown heading is just a caption, no image", "if", "heading", ":", "record", "[", "'heading'", "]", "=", "True", "record", "[", "'caption'", "]", "=", "line", "[", "1", ":", "]", ".", "strip", "(", ")", "state", "=", "'caption'", "continue", "if", "not", "line", "[", "0", "]", ".", "isspace", "(", ")", ":", "# at a potential image", "if", "state", "==", "'caption'", ":", "yield", "record", "record", "=", "{", "}", "state", "=", "0", "if", "state", "==", "'caption'", ":", "record", "[", "'caption'", "]", "+=", "'\\n'", "+", "line", "[", ":", "-", "1", "]", "continue", "fields", "=", "line", ".", "split", "(", "','", ")", "# nothing there, carry on", "if", "not", "fields", ":", "continue", "image", "=", "fields", "[", "0", "]", ".", "strip", "(", ")", "if", "not", "image", ":", "continue", "record", "[", "'image'", "]", "=", "image", "try", ":", "time", "=", "float", "(", "fields", "[", "1", "]", ")", "except", ":", "time", "=", "0", "record", "[", "'time'", "]", "=", "time", "try", ":", "caption", "=", "fields", "[", "2", "]", ".", "strip", "(", ")", "except", ":", "caption", "=", "None", "if", "caption", ":", "record", "[", "'caption'", "]", "=", "caption", "# yield it if we have anything", "if", "record", ":", "yield", "record", "record", "=", "{", "}" ]
Process a file of rest and yield dictionaries
[ "Process", "a", "file", "of", "rest", "and", "yield", "dictionaries" ]
python
train
24.133333
secdev/scapy
scapy/packet.py
https://github.com/secdev/scapy/blob/3ffe757c184017dd46464593a8f80f85abc1e79a/scapy/packet.py#L1761-L1849
def ls(obj=None, case_sensitive=False, verbose=False): """List available layers, or infos on a given layer class or name. params: - obj: Packet / packet name to use - case_sensitive: if obj is a string, is it case sensitive? - verbose """ is_string = isinstance(obj, six.string_types) if obj is None or is_string: tip = False if obj is None: tip = True all_layers = sorted(conf.layers, key=lambda x: x.__name__) else: pattern = re.compile(obj, 0 if case_sensitive else re.I) # We first order by accuracy, then length if case_sensitive: sorter = lambda x: (x.__name__.index(obj), len(x.__name__)) else: obj = obj.lower() sorter = lambda x: (x.__name__.lower().index(obj), len(x.__name__)) all_layers = sorted((layer for layer in conf.layers if (isinstance(layer.__name__, str) and pattern.search(layer.__name__)) or (isinstance(layer.name, str) and pattern.search(layer.name))), key=sorter) for layer in all_layers: print("%-10s : %s" % (layer.__name__, layer._name)) if tip and conf.interactive: print("\nTIP: You may use explore() to navigate through all " "layers using a clear GUI") else: is_pkt = isinstance(obj, Packet) if issubtype(obj, Packet) or is_pkt: for f in obj.fields_desc: cur_fld = f attrs = [] long_attrs = [] while isinstance(cur_fld, (Emph, ConditionalField)): if isinstance(cur_fld, ConditionalField): attrs.append(cur_fld.__class__.__name__[:4]) cur_fld = cur_fld.fld if verbose and isinstance(cur_fld, EnumField) \ and hasattr(cur_fld, "i2s"): if len(cur_fld.i2s) < 50: long_attrs.extend( "%s: %d" % (strval, numval) for numval, strval in sorted(six.iteritems(cur_fld.i2s)) ) elif isinstance(cur_fld, MultiEnumField): fld_depend = cur_fld.depends_on(obj.__class__ if is_pkt else obj) attrs.append("Depends on %s" % fld_depend.name) if verbose: cur_i2s = cur_fld.i2s_multi.get( cur_fld.depends_on(obj if is_pkt else obj()), {} ) if len(cur_i2s) < 50: long_attrs.extend( "%s: %d" % (strval, numval) for numval, strval in sorted(six.iteritems(cur_i2s)) ) elif verbose and isinstance(cur_fld, FlagsField): names = cur_fld.names long_attrs.append(", ".join(names)) class_name = "%s (%s)" % ( cur_fld.__class__.__name__, ", ".join(attrs)) if attrs else cur_fld.__class__.__name__ if isinstance(cur_fld, BitField): class_name += " (%d bit%s)" % (cur_fld.size, "s" if cur_fld.size > 1 else "") print("%-10s : %-35s =" % (f.name, class_name), end=' ') if is_pkt: print("%-15r" % (getattr(obj, f.name),), end=' ') print("(%r)" % (f.default,)) for attr in long_attrs: print("%-15s%s" % ("", attr)) if is_pkt and not isinstance(obj.payload, NoPayload): print("--") ls(obj.payload) else: print("Not a packet class or name. Type 'ls()' to list packet classes.")
[ "def", "ls", "(", "obj", "=", "None", ",", "case_sensitive", "=", "False", ",", "verbose", "=", "False", ")", ":", "is_string", "=", "isinstance", "(", "obj", ",", "six", ".", "string_types", ")", "if", "obj", "is", "None", "or", "is_string", ":", "tip", "=", "False", "if", "obj", "is", "None", ":", "tip", "=", "True", "all_layers", "=", "sorted", "(", "conf", ".", "layers", ",", "key", "=", "lambda", "x", ":", "x", ".", "__name__", ")", "else", ":", "pattern", "=", "re", ".", "compile", "(", "obj", ",", "0", "if", "case_sensitive", "else", "re", ".", "I", ")", "# We first order by accuracy, then length", "if", "case_sensitive", ":", "sorter", "=", "lambda", "x", ":", "(", "x", ".", "__name__", ".", "index", "(", "obj", ")", ",", "len", "(", "x", ".", "__name__", ")", ")", "else", ":", "obj", "=", "obj", ".", "lower", "(", ")", "sorter", "=", "lambda", "x", ":", "(", "x", ".", "__name__", ".", "lower", "(", ")", ".", "index", "(", "obj", ")", ",", "len", "(", "x", ".", "__name__", ")", ")", "all_layers", "=", "sorted", "(", "(", "layer", "for", "layer", "in", "conf", ".", "layers", "if", "(", "isinstance", "(", "layer", ".", "__name__", ",", "str", ")", "and", "pattern", ".", "search", "(", "layer", ".", "__name__", ")", ")", "or", "(", "isinstance", "(", "layer", ".", "name", ",", "str", ")", "and", "pattern", ".", "search", "(", "layer", ".", "name", ")", ")", ")", ",", "key", "=", "sorter", ")", "for", "layer", "in", "all_layers", ":", "print", "(", "\"%-10s : %s\"", "%", "(", "layer", ".", "__name__", ",", "layer", ".", "_name", ")", ")", "if", "tip", "and", "conf", ".", "interactive", ":", "print", "(", "\"\\nTIP: You may use explore() to navigate through all \"", "\"layers using a clear GUI\"", ")", "else", ":", "is_pkt", "=", "isinstance", "(", "obj", ",", "Packet", ")", "if", "issubtype", "(", "obj", ",", "Packet", ")", "or", "is_pkt", ":", "for", "f", "in", "obj", ".", "fields_desc", ":", "cur_fld", "=", "f", "attrs", "=", "[", "]", "long_attrs", "=", "[", "]", "while", "isinstance", "(", "cur_fld", ",", "(", "Emph", ",", "ConditionalField", ")", ")", ":", "if", "isinstance", "(", "cur_fld", ",", "ConditionalField", ")", ":", "attrs", ".", "append", "(", "cur_fld", ".", "__class__", ".", "__name__", "[", ":", "4", "]", ")", "cur_fld", "=", "cur_fld", ".", "fld", "if", "verbose", "and", "isinstance", "(", "cur_fld", ",", "EnumField", ")", "and", "hasattr", "(", "cur_fld", ",", "\"i2s\"", ")", ":", "if", "len", "(", "cur_fld", ".", "i2s", ")", "<", "50", ":", "long_attrs", ".", "extend", "(", "\"%s: %d\"", "%", "(", "strval", ",", "numval", ")", "for", "numval", ",", "strval", "in", "sorted", "(", "six", ".", "iteritems", "(", "cur_fld", ".", "i2s", ")", ")", ")", "elif", "isinstance", "(", "cur_fld", ",", "MultiEnumField", ")", ":", "fld_depend", "=", "cur_fld", ".", "depends_on", "(", "obj", ".", "__class__", "if", "is_pkt", "else", "obj", ")", "attrs", ".", "append", "(", "\"Depends on %s\"", "%", "fld_depend", ".", "name", ")", "if", "verbose", ":", "cur_i2s", "=", "cur_fld", ".", "i2s_multi", ".", "get", "(", "cur_fld", ".", "depends_on", "(", "obj", "if", "is_pkt", "else", "obj", "(", ")", ")", ",", "{", "}", ")", "if", "len", "(", "cur_i2s", ")", "<", "50", ":", "long_attrs", ".", "extend", "(", "\"%s: %d\"", "%", "(", "strval", ",", "numval", ")", "for", "numval", ",", "strval", "in", "sorted", "(", "six", ".", "iteritems", "(", "cur_i2s", ")", ")", ")", "elif", "verbose", "and", "isinstance", "(", "cur_fld", ",", "FlagsField", ")", ":", "names", "=", "cur_fld", ".", "names", "long_attrs", ".", "append", "(", "\", \"", ".", "join", "(", "names", ")", ")", "class_name", "=", "\"%s (%s)\"", "%", "(", "cur_fld", ".", "__class__", ".", "__name__", ",", "\", \"", ".", "join", "(", "attrs", ")", ")", "if", "attrs", "else", "cur_fld", ".", "__class__", ".", "__name__", "if", "isinstance", "(", "cur_fld", ",", "BitField", ")", ":", "class_name", "+=", "\" (%d bit%s)\"", "%", "(", "cur_fld", ".", "size", ",", "\"s\"", "if", "cur_fld", ".", "size", ">", "1", "else", "\"\"", ")", "print", "(", "\"%-10s : %-35s =\"", "%", "(", "f", ".", "name", ",", "class_name", ")", ",", "end", "=", "' '", ")", "if", "is_pkt", ":", "print", "(", "\"%-15r\"", "%", "(", "getattr", "(", "obj", ",", "f", ".", "name", ")", ",", ")", ",", "end", "=", "' '", ")", "print", "(", "\"(%r)\"", "%", "(", "f", ".", "default", ",", ")", ")", "for", "attr", "in", "long_attrs", ":", "print", "(", "\"%-15s%s\"", "%", "(", "\"\"", ",", "attr", ")", ")", "if", "is_pkt", "and", "not", "isinstance", "(", "obj", ".", "payload", ",", "NoPayload", ")", ":", "print", "(", "\"--\"", ")", "ls", "(", "obj", ".", "payload", ")", "else", ":", "print", "(", "\"Not a packet class or name. Type 'ls()' to list packet classes.\"", ")" ]
List available layers, or infos on a given layer class or name. params: - obj: Packet / packet name to use - case_sensitive: if obj is a string, is it case sensitive? - verbose
[ "List", "available", "layers", "or", "infos", "on", "a", "given", "layer", "class", "or", "name", ".", "params", ":", "-", "obj", ":", "Packet", "/", "packet", "name", "to", "use", "-", "case_sensitive", ":", "if", "obj", "is", "a", "string", "is", "it", "case", "sensitive?", "-", "verbose" ]
python
train
47.033708
kowalpy/Robot-Framework-FTP-Library
FtpLibrary.py
https://github.com/kowalpy/Robot-Framework-FTP-Library/blob/90794be0a12af489ac98e8ae3b4ff450c83e2f3d/FtpLibrary.py#L119-L160
def ftp_connect(self, host, user='anonymous', password='anonymous@', port=21, timeout=30, connId='default'): """ Constructs FTP object, opens a connection and login. Call this function before any other (otherwise raises exception). Returns server output. Parameters: - host - server host address - user(optional) - FTP user name. If not given, 'anonymous' is used. - password(optional) - FTP password. If not given, 'anonymous@' is used. - port(optional) - TCP port. By default 21. - timeout(optional) - timeout in seconds. By default 30. - connId(optional) - connection identifier. By default equals 'default' Examples: | ftp connect | 192.168.1.10 | mylogin | mypassword | | | | ftp connect | 192.168.1.10 | | | | | | ftp connect | 192.168.1.10 | mylogin | mypassword | connId=secondConn | | | ftp connect | 192.168.1.10 | mylogin | mypassword | 29 | 20 | | ftp connect | 192.168.1.10 | mylogin | mypassword | 29 | | | ftp connect | 192.168.1.10 | mylogin | mypassword | timeout=20 | | | ftp connect | 192.168.1.10 | port=29 | timeout=20 | | | """ if connId in self.ftpList: errMsg = "Connection with ID %s already exist. It should be deleted before this step." % connId raise FtpLibraryError(errMsg) else: newFtp = None outputMsg = "" try: timeout = int(timeout) port = int(port) newFtp = ftplib.FTP() outputMsg += newFtp.connect(host, port, timeout) outputMsg += newFtp.login(user,password) except socket.error as se: raise FtpLibraryError('Socket error exception occured.') except ftplib.all_errors as e: raise FtpLibraryError(str(e)) except Exception as e: raise FtpLibraryError(str(e)) if self.printOutput: logger.info(outputMsg) self.__addNewConnection(newFtp, connId)
[ "def", "ftp_connect", "(", "self", ",", "host", ",", "user", "=", "'anonymous'", ",", "password", "=", "'anonymous@'", ",", "port", "=", "21", ",", "timeout", "=", "30", ",", "connId", "=", "'default'", ")", ":", "if", "connId", "in", "self", ".", "ftpList", ":", "errMsg", "=", "\"Connection with ID %s already exist. It should be deleted before this step.\"", "%", "connId", "raise", "FtpLibraryError", "(", "errMsg", ")", "else", ":", "newFtp", "=", "None", "outputMsg", "=", "\"\"", "try", ":", "timeout", "=", "int", "(", "timeout", ")", "port", "=", "int", "(", "port", ")", "newFtp", "=", "ftplib", ".", "FTP", "(", ")", "outputMsg", "+=", "newFtp", ".", "connect", "(", "host", ",", "port", ",", "timeout", ")", "outputMsg", "+=", "newFtp", ".", "login", "(", "user", ",", "password", ")", "except", "socket", ".", "error", "as", "se", ":", "raise", "FtpLibraryError", "(", "'Socket error exception occured.'", ")", "except", "ftplib", ".", "all_errors", "as", "e", ":", "raise", "FtpLibraryError", "(", "str", "(", "e", ")", ")", "except", "Exception", "as", "e", ":", "raise", "FtpLibraryError", "(", "str", "(", "e", ")", ")", "if", "self", ".", "printOutput", ":", "logger", ".", "info", "(", "outputMsg", ")", "self", ".", "__addNewConnection", "(", "newFtp", ",", "connId", ")" ]
Constructs FTP object, opens a connection and login. Call this function before any other (otherwise raises exception). Returns server output. Parameters: - host - server host address - user(optional) - FTP user name. If not given, 'anonymous' is used. - password(optional) - FTP password. If not given, 'anonymous@' is used. - port(optional) - TCP port. By default 21. - timeout(optional) - timeout in seconds. By default 30. - connId(optional) - connection identifier. By default equals 'default' Examples: | ftp connect | 192.168.1.10 | mylogin | mypassword | | | | ftp connect | 192.168.1.10 | | | | | | ftp connect | 192.168.1.10 | mylogin | mypassword | connId=secondConn | | | ftp connect | 192.168.1.10 | mylogin | mypassword | 29 | 20 | | ftp connect | 192.168.1.10 | mylogin | mypassword | 29 | | | ftp connect | 192.168.1.10 | mylogin | mypassword | timeout=20 | | | ftp connect | 192.168.1.10 | port=29 | timeout=20 | | |
[ "Constructs", "FTP", "object", "opens", "a", "connection", "and", "login", ".", "Call", "this", "function", "before", "any", "other", "(", "otherwise", "raises", "exception", ")", ".", "Returns", "server", "output", ".", "Parameters", ":", "-", "host", "-", "server", "host", "address", "-", "user", "(", "optional", ")", "-", "FTP", "user", "name", ".", "If", "not", "given", "anonymous", "is", "used", ".", "-", "password", "(", "optional", ")", "-", "FTP", "password", ".", "If", "not", "given", "anonymous" ]
python
train
50.238095
svasilev94/GraphLibrary
graphlibrary/first_search.py
https://github.com/svasilev94/GraphLibrary/blob/bf979a80bdea17eeb25955f0c119ca8f711ef62b/graphlibrary/first_search.py#L54-L92
def DFS(G): """ Algorithm for depth-first searching the vertices of a graph. """ if not G.vertices: raise GraphInsertError("This graph have no vertices.") color = {} pred = {} reach = {} finish = {} def DFSvisit(G, current, time): color[current] = 'grey' time += 1 reach[current] = time for vertex in G.vertices[current]: if color[vertex] == 'white': pred[vertex] = current time = DFSvisit(G, vertex, time) color[current] = 'black' time += 1 finish[current] = time return time for vertex in G.vertices: color[vertex] = 'white' pred[vertex] = None reach[vertex] = 0 finish[vertex] = 0 time = 0 for vertex in G.vertices: if color[vertex] == 'white': time = DFSvisit(G, vertex, time) # Dictionary for vertex data after DFS # -> vertex_data = {vertex: (predecessor, reach, finish), } vertex_data = {} for vertex in G.vertices: vertex_data[vertex] = (pred[vertex], reach[vertex], finish[vertex]) return vertex_data
[ "def", "DFS", "(", "G", ")", ":", "if", "not", "G", ".", "vertices", ":", "raise", "GraphInsertError", "(", "\"This graph have no vertices.\"", ")", "color", "=", "{", "}", "pred", "=", "{", "}", "reach", "=", "{", "}", "finish", "=", "{", "}", "def", "DFSvisit", "(", "G", ",", "current", ",", "time", ")", ":", "color", "[", "current", "]", "=", "'grey'", "time", "+=", "1", "reach", "[", "current", "]", "=", "time", "for", "vertex", "in", "G", ".", "vertices", "[", "current", "]", ":", "if", "color", "[", "vertex", "]", "==", "'white'", ":", "pred", "[", "vertex", "]", "=", "current", "time", "=", "DFSvisit", "(", "G", ",", "vertex", ",", "time", ")", "color", "[", "current", "]", "=", "'black'", "time", "+=", "1", "finish", "[", "current", "]", "=", "time", "return", "time", "for", "vertex", "in", "G", ".", "vertices", ":", "color", "[", "vertex", "]", "=", "'white'", "pred", "[", "vertex", "]", "=", "None", "reach", "[", "vertex", "]", "=", "0", "finish", "[", "vertex", "]", "=", "0", "time", "=", "0", "for", "vertex", "in", "G", ".", "vertices", ":", "if", "color", "[", "vertex", "]", "==", "'white'", ":", "time", "=", "DFSvisit", "(", "G", ",", "vertex", ",", "time", ")", "# Dictionary for vertex data after DFS\r", "# -> vertex_data = {vertex: (predecessor, reach, finish), }\r", "vertex_data", "=", "{", "}", "for", "vertex", "in", "G", ".", "vertices", ":", "vertex_data", "[", "vertex", "]", "=", "(", "pred", "[", "vertex", "]", ",", "reach", "[", "vertex", "]", ",", "finish", "[", "vertex", "]", ")", "return", "vertex_data" ]
Algorithm for depth-first searching the vertices of a graph.
[ "Algorithm", "for", "depth", "-", "first", "searching", "the", "vertices", "of", "a", "graph", "." ]
python
train
29.615385
belbio/bel
bel/nanopub/nanopubs.py
https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/nanopub/nanopubs.py#L108-L138
def bel_edges( self, nanopub: Mapping[str, Any], namespace_targets: Mapping[str, List[str]] = {}, rules: List[str] = [], orthologize_target: str = None, ) -> List[Mapping[str, Any]]: """Create BEL Edges from BEL nanopub Args: nanopub (Mapping[str, Any]): bel nanopub namespace_targets (Mapping[str, List[str]]): what namespaces to canonicalize rules (List[str]): which computed edge rules to process, default is all, look at BEL Specification yaml file for computed edge signature keys, e.g. degradation, if any rule in list is 'skip', then skip computing edges just return primary_edge orthologize_target (str): species to convert BEL into, e.g. TAX:10090 for mouse, default option does not orthologize Returns: List[Mapping[str, Any]]: edge list with edge attributes (e.g. context) """ edges = bel.edge.edges.create_edges( nanopub, self.endpoint, namespace_targets=namespace_targets, rules=rules, orthologize_target=orthologize_target, ) return edges
[ "def", "bel_edges", "(", "self", ",", "nanopub", ":", "Mapping", "[", "str", ",", "Any", "]", ",", "namespace_targets", ":", "Mapping", "[", "str", ",", "List", "[", "str", "]", "]", "=", "{", "}", ",", "rules", ":", "List", "[", "str", "]", "=", "[", "]", ",", "orthologize_target", ":", "str", "=", "None", ",", ")", "->", "List", "[", "Mapping", "[", "str", ",", "Any", "]", "]", ":", "edges", "=", "bel", ".", "edge", ".", "edges", ".", "create_edges", "(", "nanopub", ",", "self", ".", "endpoint", ",", "namespace_targets", "=", "namespace_targets", ",", "rules", "=", "rules", ",", "orthologize_target", "=", "orthologize_target", ",", ")", "return", "edges" ]
Create BEL Edges from BEL nanopub Args: nanopub (Mapping[str, Any]): bel nanopub namespace_targets (Mapping[str, List[str]]): what namespaces to canonicalize rules (List[str]): which computed edge rules to process, default is all, look at BEL Specification yaml file for computed edge signature keys, e.g. degradation, if any rule in list is 'skip', then skip computing edges just return primary_edge orthologize_target (str): species to convert BEL into, e.g. TAX:10090 for mouse, default option does not orthologize Returns: List[Mapping[str, Any]]: edge list with edge attributes (e.g. context)
[ "Create", "BEL", "Edges", "from", "BEL", "nanopub" ]
python
train
38.419355
loli/medpy
medpy/metric/histogram.py
https://github.com/loli/medpy/blob/95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5/medpy/metric/histogram.py#L139-L190
def chebyshev(h1, h2): # 12 us @array, 36 us @list \w 100 bins r""" Chebyshev distance. Also Tchebychev distance, Maximum or :math:`L_{\infty}` metric; equal to Minowski distance with :math:`p=+\infty`. For the case of :math:`p=-\infty`, use `chebyshev_neg`. The Chebyshev distance between two histograms :math:`H` and :math:`H'` of size :math:`m` is defined as: .. math:: d_{\infty}(H, H') = \max_{m=1}^M|H_m-H'_m| *Attributes:* - semimetric (triangle equation satisfied?) *Attributes for normalized histograms:* - :math:`d(H, H')\in[0, 1]` - :math:`d(H, H) = 0` - :math:`d(H, H') = d(H', H)` *Attributes for not-normalized histograms:* - :math:`d(H, H')\in[0, \infty)` - :math:`d(H, H) = 0` - :math:`d(H, H') = d(H', H)` *Attributes for not-equal histograms:* - not applicable Parameters ---------- h1 : sequence The first histogram. h2 : sequence The second histogram. Returns ------- chebyshev : float Chebyshev distance. See also -------- minowski, chebyshev_neg """ h1, h2 = __prepare_histogram(h1, h2) return max(scipy.absolute(h1 - h2))
[ "def", "chebyshev", "(", "h1", ",", "h2", ")", ":", "# 12 us @array, 36 us @list \\w 100 bins", "h1", ",", "h2", "=", "__prepare_histogram", "(", "h1", ",", "h2", ")", "return", "max", "(", "scipy", ".", "absolute", "(", "h1", "-", "h2", ")", ")" ]
r""" Chebyshev distance. Also Tchebychev distance, Maximum or :math:`L_{\infty}` metric; equal to Minowski distance with :math:`p=+\infty`. For the case of :math:`p=-\infty`, use `chebyshev_neg`. The Chebyshev distance between two histograms :math:`H` and :math:`H'` of size :math:`m` is defined as: .. math:: d_{\infty}(H, H') = \max_{m=1}^M|H_m-H'_m| *Attributes:* - semimetric (triangle equation satisfied?) *Attributes for normalized histograms:* - :math:`d(H, H')\in[0, 1]` - :math:`d(H, H) = 0` - :math:`d(H, H') = d(H', H)` *Attributes for not-normalized histograms:* - :math:`d(H, H')\in[0, \infty)` - :math:`d(H, H) = 0` - :math:`d(H, H') = d(H', H)` *Attributes for not-equal histograms:* - not applicable Parameters ---------- h1 : sequence The first histogram. h2 : sequence The second histogram. Returns ------- chebyshev : float Chebyshev distance. See also -------- minowski, chebyshev_neg
[ "r", "Chebyshev", "distance", ".", "Also", "Tchebychev", "distance", "Maximum", "or", ":", "math", ":", "L_", "{", "\\", "infty", "}", "metric", ";", "equal", "to", "Minowski", "distance", "with", ":", "math", ":", "p", "=", "+", "\\", "infty", ".", "For", "the", "case", "of", ":", "math", ":", "p", "=", "-", "\\", "infty", "use", "chebyshev_neg", ".", "The", "Chebyshev", "distance", "between", "two", "histograms", ":", "math", ":", "H", "and", ":", "math", ":", "H", "of", "size", ":", "math", ":", "m", "is", "defined", "as", ":", "..", "math", "::", "d_", "{", "\\", "infty", "}", "(", "H", "H", ")", "=", "\\", "max_", "{", "m", "=", "1", "}", "^M|H_m", "-", "H", "_m|", "*", "Attributes", ":", "*", "-", "semimetric", "(", "triangle", "equation", "satisfied?", ")", "*", "Attributes", "for", "normalized", "histograms", ":", "*", "-", ":", "math", ":", "d", "(", "H", "H", ")", "\\", "in", "[", "0", "1", "]", "-", ":", "math", ":", "d", "(", "H", "H", ")", "=", "0", "-", ":", "math", ":", "d", "(", "H", "H", ")", "=", "d", "(", "H", "H", ")", "*", "Attributes", "for", "not", "-", "normalized", "histograms", ":", "*", "-", ":", "math", ":", "d", "(", "H", "H", ")", "\\", "in", "[", "0", "\\", "infty", ")", "-", ":", "math", ":", "d", "(", "H", "H", ")", "=", "0", "-", ":", "math", ":", "d", "(", "H", "H", ")", "=", "d", "(", "H", "H", ")", "*", "Attributes", "for", "not", "-", "equal", "histograms", ":", "*", "-", "not", "applicable", "Parameters", "----------", "h1", ":", "sequence", "The", "first", "histogram", ".", "h2", ":", "sequence", "The", "second", "histogram", ".", "Returns", "-------", "chebyshev", ":", "float", "Chebyshev", "distance", ".", "See", "also", "--------", "minowski", "chebyshev_neg" ]
python
train
23.615385
vtkiorg/vtki
vtki/plotting.py
https://github.com/vtkiorg/vtki/blob/5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1/vtki/plotting.py#L1546-L1557
def _update_axes_color(self, color): """Internal helper to set the axes label color""" prop_x = self.axes_actor.GetXAxisCaptionActor2D().GetCaptionTextProperty() prop_y = self.axes_actor.GetYAxisCaptionActor2D().GetCaptionTextProperty() prop_z = self.axes_actor.GetZAxisCaptionActor2D().GetCaptionTextProperty() if color is None: color = rcParams['font']['color'] color = parse_color(color) for prop in [prop_x, prop_y, prop_z]: prop.SetColor(color[0], color[1], color[2]) prop.SetShadow(False) return
[ "def", "_update_axes_color", "(", "self", ",", "color", ")", ":", "prop_x", "=", "self", ".", "axes_actor", ".", "GetXAxisCaptionActor2D", "(", ")", ".", "GetCaptionTextProperty", "(", ")", "prop_y", "=", "self", ".", "axes_actor", ".", "GetYAxisCaptionActor2D", "(", ")", ".", "GetCaptionTextProperty", "(", ")", "prop_z", "=", "self", ".", "axes_actor", ".", "GetZAxisCaptionActor2D", "(", ")", ".", "GetCaptionTextProperty", "(", ")", "if", "color", "is", "None", ":", "color", "=", "rcParams", "[", "'font'", "]", "[", "'color'", "]", "color", "=", "parse_color", "(", "color", ")", "for", "prop", "in", "[", "prop_x", ",", "prop_y", ",", "prop_z", "]", ":", "prop", ".", "SetColor", "(", "color", "[", "0", "]", ",", "color", "[", "1", "]", ",", "color", "[", "2", "]", ")", "prop", ".", "SetShadow", "(", "False", ")", "return" ]
Internal helper to set the axes label color
[ "Internal", "helper", "to", "set", "the", "axes", "label", "color" ]
python
train
49.166667
JustinLovinger/optimal
optimal/common.py
https://github.com/JustinLovinger/optimal/blob/ab48a4961697338cc32d50e3a6b06ac989e39c3f/optimal/common.py#L42-L46
def make_population(population_size, solution_generator, *args, **kwargs): """Make a population with the supplied generator.""" return [ solution_generator(*args, **kwargs) for _ in range(population_size) ]
[ "def", "make_population", "(", "population_size", ",", "solution_generator", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "[", "solution_generator", "(", "*", "args", ",", "*", "*", "kwargs", ")", "for", "_", "in", "range", "(", "population_size", ")", "]" ]
Make a population with the supplied generator.
[ "Make", "a", "population", "with", "the", "supplied", "generator", "." ]
python
train
44.4
fhcrc/taxtastic
taxtastic/taxtable.py
https://github.com/fhcrc/taxtastic/blob/4e874b7f2cc146178828bfba386314f8c342722b/taxtastic/taxtable.py#L130-L141
def depth_first_iter(self, self_first=True): """ Iterate over nodes below this node, optionally yielding children before self. """ if self_first: yield self for child in list(self.children): for i in child.depth_first_iter(self_first): yield i if not self_first: yield self
[ "def", "depth_first_iter", "(", "self", ",", "self_first", "=", "True", ")", ":", "if", "self_first", ":", "yield", "self", "for", "child", "in", "list", "(", "self", ".", "children", ")", ":", "for", "i", "in", "child", ".", "depth_first_iter", "(", "self_first", ")", ":", "yield", "i", "if", "not", "self_first", ":", "yield", "self" ]
Iterate over nodes below this node, optionally yielding children before self.
[ "Iterate", "over", "nodes", "below", "this", "node", "optionally", "yielding", "children", "before", "self", "." ]
python
train
30.833333
gem/oq-engine
openquake/hazardlib/gsim/sharma_2009.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/gsim/sharma_2009.py#L167-L174
def get_site_type_dummy_variables(self, sites): """ Binary rock/soil classification dummy variable based on sites.vs30. "``S`` is 1 for a rock site and 0 otherwise" (p. 1201). """ is_rock = np.array(sites.vs30 > self.NEHRP_BC_BOUNDARY) return is_rock
[ "def", "get_site_type_dummy_variables", "(", "self", ",", "sites", ")", ":", "is_rock", "=", "np", ".", "array", "(", "sites", ".", "vs30", ">", "self", ".", "NEHRP_BC_BOUNDARY", ")", "return", "is_rock" ]
Binary rock/soil classification dummy variable based on sites.vs30. "``S`` is 1 for a rock site and 0 otherwise" (p. 1201).
[ "Binary", "rock", "/", "soil", "classification", "dummy", "variable", "based", "on", "sites", ".", "vs30", "." ]
python
train
36.5
senaite/senaite.core
bika/lims/content/analysisprofile.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/content/analysisprofile.py#L212-L235
def remove_service(self, service): """Removes the service passed in from the services offered by the current Profile. If the Analysis Service passed in is not assigned to this Analysis Profile, returns False. :param service: the service to be removed from this Analysis Profile :type service: AnalysisService :return: True if the AnalysisService has been removed successfully """ obj = api.get_object(service) uid = api.get_uid(obj) # Remove the service from the referenced services services = self.getService() num_services = len(services) services.remove(obj) self.setService(services) removed = len(services) < num_services # Remove the service from the settings map settings = self.getAnalysisServicesSettings() settings = [item for item in settings if item.get('uid', '') != uid] self.setAnalysisServicesSettings(settings) return removed
[ "def", "remove_service", "(", "self", ",", "service", ")", ":", "obj", "=", "api", ".", "get_object", "(", "service", ")", "uid", "=", "api", ".", "get_uid", "(", "obj", ")", "# Remove the service from the referenced services", "services", "=", "self", ".", "getService", "(", ")", "num_services", "=", "len", "(", "services", ")", "services", ".", "remove", "(", "obj", ")", "self", ".", "setService", "(", "services", ")", "removed", "=", "len", "(", "services", ")", "<", "num_services", "# Remove the service from the settings map", "settings", "=", "self", ".", "getAnalysisServicesSettings", "(", ")", "settings", "=", "[", "item", "for", "item", "in", "settings", "if", "item", ".", "get", "(", "'uid'", ",", "''", ")", "!=", "uid", "]", "self", ".", "setAnalysisServicesSettings", "(", "settings", ")", "return", "removed" ]
Removes the service passed in from the services offered by the current Profile. If the Analysis Service passed in is not assigned to this Analysis Profile, returns False. :param service: the service to be removed from this Analysis Profile :type service: AnalysisService :return: True if the AnalysisService has been removed successfully
[ "Removes", "the", "service", "passed", "in", "from", "the", "services", "offered", "by", "the", "current", "Profile", ".", "If", "the", "Analysis", "Service", "passed", "in", "is", "not", "assigned", "to", "this", "Analysis", "Profile", "returns", "False", ".", ":", "param", "service", ":", "the", "service", "to", "be", "removed", "from", "this", "Analysis", "Profile", ":", "type", "service", ":", "AnalysisService", ":", "return", ":", "True", "if", "the", "AnalysisService", "has", "been", "removed", "successfully" ]
python
train
40.916667
gabstopper/smc-python
smc/base/model.py
https://github.com/gabstopper/smc-python/blob/e027b8a5dcfaf884eada32d113d41c1e56b32457/smc/base/model.py#L506-L520
def get(cls, name, raise_exc=True): """ Get the element by name. Does an exact match by element type. :param str name: name of element :param bool raise_exc: optionally disable exception. :raises ElementNotFound: if element does not exist :rtype: Element """ element = cls.objects.filter(name, exact_match=True).first() if \ name is not None else None if not element and raise_exc: raise ElementNotFound('Cannot find specified element: %s, type: ' '%s' % (name, cls.__name__)) return element
[ "def", "get", "(", "cls", ",", "name", ",", "raise_exc", "=", "True", ")", ":", "element", "=", "cls", ".", "objects", ".", "filter", "(", "name", ",", "exact_match", "=", "True", ")", ".", "first", "(", ")", "if", "name", "is", "not", "None", "else", "None", "if", "not", "element", "and", "raise_exc", ":", "raise", "ElementNotFound", "(", "'Cannot find specified element: %s, type: '", "'%s'", "%", "(", "name", ",", "cls", ".", "__name__", ")", ")", "return", "element" ]
Get the element by name. Does an exact match by element type. :param str name: name of element :param bool raise_exc: optionally disable exception. :raises ElementNotFound: if element does not exist :rtype: Element
[ "Get", "the", "element", "by", "name", ".", "Does", "an", "exact", "match", "by", "element", "type", ".", ":", "param", "str", "name", ":", "name", "of", "element", ":", "param", "bool", "raise_exc", ":", "optionally", "disable", "exception", ".", ":", "raises", "ElementNotFound", ":", "if", "element", "does", "not", "exist", ":", "rtype", ":", "Element" ]
python
train
40.466667
Rockhopper-Technologies/enlighten
enlighten/_counter.py
https://github.com/Rockhopper-Technologies/enlighten/blob/857855f940e6c1bb84d0be849b999a18fff5bf5a/enlighten/_counter.py#L509-L518
def clear(self, flush=True): """ Args: flush(bool): Flush stream after clearing progress bar (Default:True) Clear progress bar """ if self.enabled: self.manager.write(flush=flush, position=self.position)
[ "def", "clear", "(", "self", ",", "flush", "=", "True", ")", ":", "if", "self", ".", "enabled", ":", "self", ".", "manager", ".", "write", "(", "flush", "=", "flush", ",", "position", "=", "self", ".", "position", ")" ]
Args: flush(bool): Flush stream after clearing progress bar (Default:True) Clear progress bar
[ "Args", ":", "flush", "(", "bool", ")", ":", "Flush", "stream", "after", "clearing", "progress", "bar", "(", "Default", ":", "True", ")" ]
python
train
26
AoiKuiyuyou/AoikLiveReload
tools/waf/aoikwafutil.py
https://github.com/AoiKuiyuyou/AoikLiveReload/blob/0d5adb12118a33749e6690a8165fdb769cff7d5c/tools/waf/aoikwafutil.py#L1373-L1485
def build_ctx(pythonpath=None): """ Decorator that makes decorated function use BuildContext instead of \ Context instance. BuildContext instance has more methods. :param pythonpath: Path or list of paths to add to environment variable PYTHONPATH. Each path can be absolute path, or relative path relative to top directory. Notice if this decorator is used without arguments, argument `pythonpath` is the decorated function. :return: Two situations: - If decorator arguments are given, return no-argument decorator. - If decorator arguments are not given, return wrapper function. """ # If argument `pythonpath` is string if isinstance(pythonpath, str): # Create paths list containing the string path_s = [pythonpath] # If argument `pythonpath` is list elif isinstance(pythonpath, list): # Use the list as paths list path_s = pythonpath # If argument `pythonpath` is not string or list, # it means the decorator is used without arguments. else: # Set paths list be None path_s = None # Create no-argument decorator def _noarg_decorator(func): """ No-argument decorator. :param func: Decorated function. :return: Wrapper function. """ # Create BuildContext subclass class _BuildContext(BuildContext): # Set command name for the context class cmd = func.__name__ # Set function name for the context class fun = func.__name__ # Create wrapper function @wraps(func) def _new_func(ctx, *args, **kwargs): """ Wrapper function. :param ctx: BuildContext object. :param \\*args: Other arguments passed to decorated function. :param \\*\\*kwargs: Other keyword arguments passed to decorated function. :return: Decorated function's call result. """ # If paths list is not empty if path_s: # For each path for path in path_s: # If the path is absolute path if os.path.isabs(path): # Use the path as absolute path abs_path = path # If the path is not absolute path, # it means relative path relative to top directory. else: # Create path node path_node = create_node(ctx, path) # Get absolute path abs_path = path_node.abspath() # Add the absolute path to environment variable PYTHONPATH add_pythonpath(abs_path) # Call the decorated function result = func(ctx, *args, **kwargs) # Return the call result return result # Store the created context class with the wrapper function _new_func._context_class = _BuildContext # pylint: disable=W0212 # Return the wrapper function return _new_func # If decorator arguments are given if path_s is not None: # Return no-argument decorator return _noarg_decorator # If decorator arguments are not given else: # Argument `pythonpath` is the decorated function _func = pythonpath # Call the no-argument decorator to create wrapper function wrapper_func = _noarg_decorator(_func) # Return the wrapper function return wrapper_func
[ "def", "build_ctx", "(", "pythonpath", "=", "None", ")", ":", "# If argument `pythonpath` is string", "if", "isinstance", "(", "pythonpath", ",", "str", ")", ":", "# Create paths list containing the string", "path_s", "=", "[", "pythonpath", "]", "# If argument `pythonpath` is list", "elif", "isinstance", "(", "pythonpath", ",", "list", ")", ":", "# Use the list as paths list", "path_s", "=", "pythonpath", "# If argument `pythonpath` is not string or list,", "# it means the decorator is used without arguments.", "else", ":", "# Set paths list be None", "path_s", "=", "None", "# Create no-argument decorator", "def", "_noarg_decorator", "(", "func", ")", ":", "\"\"\"\n No-argument decorator.\n\n :param func: Decorated function.\n\n :return: Wrapper function.\n \"\"\"", "# Create BuildContext subclass", "class", "_BuildContext", "(", "BuildContext", ")", ":", "# Set command name for the context class", "cmd", "=", "func", ".", "__name__", "# Set function name for the context class", "fun", "=", "func", ".", "__name__", "# Create wrapper function", "@", "wraps", "(", "func", ")", "def", "_new_func", "(", "ctx", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "\"\"\"\n Wrapper function.\n\n :param ctx: BuildContext object.\n\n :param \\\\*args: Other arguments passed to decorated function.\n\n :param \\\\*\\\\*kwargs: Other keyword arguments passed to decorated\n function.\n\n :return: Decorated function's call result.\n \"\"\"", "# If paths list is not empty", "if", "path_s", ":", "# For each path", "for", "path", "in", "path_s", ":", "# If the path is absolute path", "if", "os", ".", "path", ".", "isabs", "(", "path", ")", ":", "# Use the path as absolute path", "abs_path", "=", "path", "# If the path is not absolute path,", "# it means relative path relative to top directory.", "else", ":", "# Create path node", "path_node", "=", "create_node", "(", "ctx", ",", "path", ")", "# Get absolute path", "abs_path", "=", "path_node", ".", "abspath", "(", ")", "# Add the absolute path to environment variable PYTHONPATH", "add_pythonpath", "(", "abs_path", ")", "# Call the decorated function", "result", "=", "func", "(", "ctx", ",", "*", "args", ",", "*", "*", "kwargs", ")", "# Return the call result", "return", "result", "# Store the created context class with the wrapper function", "_new_func", ".", "_context_class", "=", "_BuildContext", "# pylint: disable=W0212", "# Return the wrapper function", "return", "_new_func", "# If decorator arguments are given", "if", "path_s", "is", "not", "None", ":", "# Return no-argument decorator", "return", "_noarg_decorator", "# If decorator arguments are not given", "else", ":", "# Argument `pythonpath` is the decorated function", "_func", "=", "pythonpath", "# Call the no-argument decorator to create wrapper function", "wrapper_func", "=", "_noarg_decorator", "(", "_func", ")", "# Return the wrapper function", "return", "wrapper_func" ]
Decorator that makes decorated function use BuildContext instead of \ Context instance. BuildContext instance has more methods. :param pythonpath: Path or list of paths to add to environment variable PYTHONPATH. Each path can be absolute path, or relative path relative to top directory. Notice if this decorator is used without arguments, argument `pythonpath` is the decorated function. :return: Two situations: - If decorator arguments are given, return no-argument decorator. - If decorator arguments are not given, return wrapper function.
[ "Decorator", "that", "makes", "decorated", "function", "use", "BuildContext", "instead", "of", "\\", "Context", "instance", ".", "BuildContext", "instance", "has", "more", "methods", "." ]
python
train
31.389381
saltstack/salt
salt/utils/stringutils.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/stringutils.py#L540-L572
def get_context(template, line, num_lines=5, marker=None): ''' Returns debugging context around a line in a given string Returns:: string ''' template_lines = template.splitlines() num_template_lines = len(template_lines) # In test mode, a single line template would return a crazy line number like, # 357. Do this sanity check and if the given line is obviously wrong, just # return the entire template if line > num_template_lines: return template context_start = max(0, line - num_lines - 1) # subt 1 for 0-based indexing context_end = min(num_template_lines, line + num_lines) error_line_in_context = line - context_start - 1 # subtr 1 for 0-based idx buf = [] if context_start > 0: buf.append('[...]') error_line_in_context += 1 buf.extend(template_lines[context_start:context_end]) if context_end < num_template_lines: buf.append('[...]') if marker: buf[error_line_in_context] += marker return '---\n{0}\n---'.format('\n'.join(buf))
[ "def", "get_context", "(", "template", ",", "line", ",", "num_lines", "=", "5", ",", "marker", "=", "None", ")", ":", "template_lines", "=", "template", ".", "splitlines", "(", ")", "num_template_lines", "=", "len", "(", "template_lines", ")", "# In test mode, a single line template would return a crazy line number like,", "# 357. Do this sanity check and if the given line is obviously wrong, just", "# return the entire template", "if", "line", ">", "num_template_lines", ":", "return", "template", "context_start", "=", "max", "(", "0", ",", "line", "-", "num_lines", "-", "1", ")", "# subt 1 for 0-based indexing", "context_end", "=", "min", "(", "num_template_lines", ",", "line", "+", "num_lines", ")", "error_line_in_context", "=", "line", "-", "context_start", "-", "1", "# subtr 1 for 0-based idx", "buf", "=", "[", "]", "if", "context_start", ">", "0", ":", "buf", ".", "append", "(", "'[...]'", ")", "error_line_in_context", "+=", "1", "buf", ".", "extend", "(", "template_lines", "[", "context_start", ":", "context_end", "]", ")", "if", "context_end", "<", "num_template_lines", ":", "buf", ".", "append", "(", "'[...]'", ")", "if", "marker", ":", "buf", "[", "error_line_in_context", "]", "+=", "marker", "return", "'---\\n{0}\\n---'", ".", "format", "(", "'\\n'", ".", "join", "(", "buf", ")", ")" ]
Returns debugging context around a line in a given string Returns:: string
[ "Returns", "debugging", "context", "around", "a", "line", "in", "a", "given", "string" ]
python
train
31.272727
aws/aws-encryption-sdk-python
src/aws_encryption_sdk/streaming_client.py
https://github.com/aws/aws-encryption-sdk-python/blob/d182155d5fb1ef176d9e7d0647679737d5146495/src/aws_encryption_sdk/streaming_client.py#L494-L527
def _prep_non_framed(self): """Prepare the opening data for a non-framed message.""" try: plaintext_length = self.stream_length self.__unframed_plaintext_cache = self.source_stream except NotSupportedError: # We need to know the plaintext length before we can start processing the data. # If we cannot seek on the source then we need to read the entire source into memory. self.__unframed_plaintext_cache = io.BytesIO() self.__unframed_plaintext_cache.write(self.source_stream.read()) plaintext_length = self.__unframed_plaintext_cache.tell() self.__unframed_plaintext_cache.seek(0) aad_content_string = aws_encryption_sdk.internal.utils.get_aad_content_string( content_type=self.content_type, is_final_frame=True ) associated_data = assemble_content_aad( message_id=self._header.message_id, aad_content_string=aad_content_string, seq_num=1, length=plaintext_length, ) self.encryptor = Encryptor( algorithm=self._encryption_materials.algorithm, key=self._derived_data_key, associated_data=associated_data, iv=non_framed_body_iv(self._encryption_materials.algorithm), ) self.output_buffer += serialize_non_framed_open( algorithm=self._encryption_materials.algorithm, iv=self.encryptor.iv, plaintext_length=plaintext_length, signer=self.signer, )
[ "def", "_prep_non_framed", "(", "self", ")", ":", "try", ":", "plaintext_length", "=", "self", ".", "stream_length", "self", ".", "__unframed_plaintext_cache", "=", "self", ".", "source_stream", "except", "NotSupportedError", ":", "# We need to know the plaintext length before we can start processing the data.", "# If we cannot seek on the source then we need to read the entire source into memory.", "self", ".", "__unframed_plaintext_cache", "=", "io", ".", "BytesIO", "(", ")", "self", ".", "__unframed_plaintext_cache", ".", "write", "(", "self", ".", "source_stream", ".", "read", "(", ")", ")", "plaintext_length", "=", "self", ".", "__unframed_plaintext_cache", ".", "tell", "(", ")", "self", ".", "__unframed_plaintext_cache", ".", "seek", "(", "0", ")", "aad_content_string", "=", "aws_encryption_sdk", ".", "internal", ".", "utils", ".", "get_aad_content_string", "(", "content_type", "=", "self", ".", "content_type", ",", "is_final_frame", "=", "True", ")", "associated_data", "=", "assemble_content_aad", "(", "message_id", "=", "self", ".", "_header", ".", "message_id", ",", "aad_content_string", "=", "aad_content_string", ",", "seq_num", "=", "1", ",", "length", "=", "plaintext_length", ",", ")", "self", ".", "encryptor", "=", "Encryptor", "(", "algorithm", "=", "self", ".", "_encryption_materials", ".", "algorithm", ",", "key", "=", "self", ".", "_derived_data_key", ",", "associated_data", "=", "associated_data", ",", "iv", "=", "non_framed_body_iv", "(", "self", ".", "_encryption_materials", ".", "algorithm", ")", ",", ")", "self", ".", "output_buffer", "+=", "serialize_non_framed_open", "(", "algorithm", "=", "self", ".", "_encryption_materials", ".", "algorithm", ",", "iv", "=", "self", ".", "encryptor", ".", "iv", ",", "plaintext_length", "=", "plaintext_length", ",", "signer", "=", "self", ".", "signer", ",", ")" ]
Prepare the opening data for a non-framed message.
[ "Prepare", "the", "opening", "data", "for", "a", "non", "-", "framed", "message", "." ]
python
train
45.647059
happyleavesaoc/python-firetv
firetv/__main__.py
https://github.com/happyleavesaoc/python-firetv/blob/3dd953376c0d5af502e775ae14ed0afe03224781/firetv/__main__.py#L117-L125
def list_devices(): """ List devices via HTTP GET. """ output = {} for device_id, device in devices.items(): output[device_id] = { 'host': device.host, 'state': device.state } return jsonify(devices=output)
[ "def", "list_devices", "(", ")", ":", "output", "=", "{", "}", "for", "device_id", ",", "device", "in", "devices", ".", "items", "(", ")", ":", "output", "[", "device_id", "]", "=", "{", "'host'", ":", "device", ".", "host", ",", "'state'", ":", "device", ".", "state", "}", "return", "jsonify", "(", "devices", "=", "output", ")" ]
List devices via HTTP GET.
[ "List", "devices", "via", "HTTP", "GET", "." ]
python
train
28.222222
coursera-dl/coursera-dl
coursera/api.py
https://github.com/coursera-dl/coursera-dl/blob/9b434bcf3c4011bf3181429fe674633ae5fb7d4d/coursera/api.py#L1526-L1561
def _extract_links_from_asset_tags_in_text(self, text): """ Scan the text and extract asset tags and links to corresponding files. @param text: Page text. @type text: str @return: @see CourseraOnDemand._extract_links_from_text """ # Extract asset tags from instructions text asset_tags_map = self._extract_asset_tags(text) ids = list(iterkeys(asset_tags_map)) if not ids: return {} # asset tags contain asset names and ids. We need to make another # HTTP request to get asset URL. asset_urls = self._extract_asset_urls(ids) supplement_links = {} # Build supplement links, providing nice titles along the way for asset in asset_urls: title = clean_filename( asset_tags_map[asset['id']]['name'], self._unrestricted_filenames) extension = clean_filename( asset_tags_map[asset['id']]['extension'].strip(), self._unrestricted_filenames) url = asset['url'].strip() if extension not in supplement_links: supplement_links[extension] = [] supplement_links[extension].append((url, title)) return supplement_links
[ "def", "_extract_links_from_asset_tags_in_text", "(", "self", ",", "text", ")", ":", "# Extract asset tags from instructions text", "asset_tags_map", "=", "self", ".", "_extract_asset_tags", "(", "text", ")", "ids", "=", "list", "(", "iterkeys", "(", "asset_tags_map", ")", ")", "if", "not", "ids", ":", "return", "{", "}", "# asset tags contain asset names and ids. We need to make another", "# HTTP request to get asset URL.", "asset_urls", "=", "self", ".", "_extract_asset_urls", "(", "ids", ")", "supplement_links", "=", "{", "}", "# Build supplement links, providing nice titles along the way", "for", "asset", "in", "asset_urls", ":", "title", "=", "clean_filename", "(", "asset_tags_map", "[", "asset", "[", "'id'", "]", "]", "[", "'name'", "]", ",", "self", ".", "_unrestricted_filenames", ")", "extension", "=", "clean_filename", "(", "asset_tags_map", "[", "asset", "[", "'id'", "]", "]", "[", "'extension'", "]", ".", "strip", "(", ")", ",", "self", ".", "_unrestricted_filenames", ")", "url", "=", "asset", "[", "'url'", "]", ".", "strip", "(", ")", "if", "extension", "not", "in", "supplement_links", ":", "supplement_links", "[", "extension", "]", "=", "[", "]", "supplement_links", "[", "extension", "]", ".", "append", "(", "(", "url", ",", "title", ")", ")", "return", "supplement_links" ]
Scan the text and extract asset tags and links to corresponding files. @param text: Page text. @type text: str @return: @see CourseraOnDemand._extract_links_from_text
[ "Scan", "the", "text", "and", "extract", "asset", "tags", "and", "links", "to", "corresponding", "files", "." ]
python
train
35.25
theno/fabsetup
fabsetup/fabfile/setup/service/__init__.py
https://github.com/theno/fabsetup/blob/ced728abff93551ba5677e63bc1bdc0ef5ca5777/fabsetup/fabfile/setup/service/__init__.py#L237-L268
def lms(): '''Install and start a Logitech Media Server (lms). More infos: * http://wiki.slimdevices.com/index.php/Logitech_Media_Server * http://wiki.slimdevices.com/index.php/DebianPackage * http://www.mysqueezebox.com/download * XSqueeze on Kodi: * http://kodi.wiki/view/Add-on:XSqueeze * http://forum.kodi.tv/showthread.php?tid=122199 ''' # cf. http://wiki.slimdevices.com/index.php/DebianPackage#installing_7.9.0 cmds = '''\ url="http://www.mysqueezebox.com/update/?version=7.9.0&revision=1&geturl=1&os=deb" latest_lms=$(wget -q -O - "$url") mkdir -p ~/.logitech_media_server_sources cd ~/.logitech_media_server_sources wget $latest_lms lms_deb=${latest_lms##*/} sudo dpkg -i $lms_deb ''' run(cmds) run('sudo usermod -aG audio squeezeboxserver') with warn_only(): run('sudo addgroup lms') run('sudo usermod -aG lms squeezeboxserver') username = env.user run(flo('sudo usermod -aG audio {username}')) print('\n Set correct folder permissions manually, eg:') print(' > ' + cyan(flo('chown -R {username}.lms <path/to/your/media>'))) hostname = env.host print(flo('\n lms frontend available at http://{hostname}:9000'))
[ "def", "lms", "(", ")", ":", "# cf. http://wiki.slimdevices.com/index.php/DebianPackage#installing_7.9.0", "cmds", "=", "'''\\\nurl=\"http://www.mysqueezebox.com/update/?version=7.9.0&revision=1&geturl=1&os=deb\"\nlatest_lms=$(wget -q -O - \"$url\")\nmkdir -p ~/.logitech_media_server_sources\ncd ~/.logitech_media_server_sources\nwget $latest_lms\nlms_deb=${latest_lms##*/}\nsudo dpkg -i $lms_deb\n'''", "run", "(", "cmds", ")", "run", "(", "'sudo usermod -aG audio squeezeboxserver'", ")", "with", "warn_only", "(", ")", ":", "run", "(", "'sudo addgroup lms'", ")", "run", "(", "'sudo usermod -aG lms squeezeboxserver'", ")", "username", "=", "env", ".", "user", "run", "(", "flo", "(", "'sudo usermod -aG audio {username}'", ")", ")", "print", "(", "'\\n Set correct folder permissions manually, eg:'", ")", "print", "(", "' > '", "+", "cyan", "(", "flo", "(", "'chown -R {username}.lms <path/to/your/media>'", ")", ")", ")", "hostname", "=", "env", ".", "host", "print", "(", "flo", "(", "'\\n lms frontend available at http://{hostname}:9000'", ")", ")" ]
Install and start a Logitech Media Server (lms). More infos: * http://wiki.slimdevices.com/index.php/Logitech_Media_Server * http://wiki.slimdevices.com/index.php/DebianPackage * http://www.mysqueezebox.com/download * XSqueeze on Kodi: * http://kodi.wiki/view/Add-on:XSqueeze * http://forum.kodi.tv/showthread.php?tid=122199
[ "Install", "and", "start", "a", "Logitech", "Media", "Server", "(", "lms", ")", "." ]
python
train
37.65625
guma44/GEOparse
GEOparse/GEOTypes.py
https://github.com/guma44/GEOparse/blob/7ee8d5b8678d780382a6bf884afa69d2033f5ca0/GEOparse/GEOTypes.py#L897-L964
def download_SRA(self, email, directory='series', filterby=None, nproc=1, **kwargs): """Download SRA files for each GSM in series. .. warning:: Do not use parallel option (nproc > 1) in the interactive shell. For more details see `this issue <https://stackoverflow.com/questions/23641475/multiprocessing-working-in-python-but-not-in-ipython/23641560#23641560>`_ on SO. Args: email (:obj:`str`): E-mail that will be provided to the Entrez. directory (:obj:`str`, optional): Directory to save the data (defaults to the 'series' which saves the data to the directory with the name of the series + '_SRA' ending). Defaults to "series". filterby (:obj:`str`, optional): Filter GSM objects, argument is a function that operates on GSM object and return bool eg. lambda x: "brain" not in x.name. Defaults to None. nproc (:obj:`int`, optional): Number of processes for SRA download (default is 1, no parallelization). **kwargs: Any arbitrary argument passed to GSM.download_SRA method. See the documentation for more details. Returns: :obj:`dict`: A dictionary containing output of ``GSM.download_SRA`` method where each GSM accession ID is the key for the output. """ if directory == 'series': dirpath = os.path.abspath(self.get_accession() + "_SRA") utils.mkdir_p(dirpath) else: dirpath = os.path.abspath(directory) utils.mkdir_p(dirpath) if filterby is not None: gsms_to_use = [gsm for gsm in self.gsms.values() if filterby(gsm)] else: gsms_to_use = self.gsms.values() if nproc == 1: # No need to parallelize, running ordinary download in loop downloaded_paths = dict() for gsm in gsms_to_use: logger.info( "Downloading SRA files for %s series\n" % gsm.name) downloaded_paths[gsm.name] = gsm.download_SRA( email=email, directory=dirpath, **kwargs) elif nproc > 1: # Parallelization enabled downloaders = list() # Collecting params for Pool.map in a loop for gsm in gsms_to_use: downloaders.append([ gsm, email, dirpath, kwargs]) p = Pool(nproc) results = p.map(_sra_download_worker, downloaders) downloaded_paths = dict(results) else: raise ValueError("Nproc should be non-negative: %s" % str(nproc)) return downloaded_paths
[ "def", "download_SRA", "(", "self", ",", "email", ",", "directory", "=", "'series'", ",", "filterby", "=", "None", ",", "nproc", "=", "1", ",", "*", "*", "kwargs", ")", ":", "if", "directory", "==", "'series'", ":", "dirpath", "=", "os", ".", "path", ".", "abspath", "(", "self", ".", "get_accession", "(", ")", "+", "\"_SRA\"", ")", "utils", ".", "mkdir_p", "(", "dirpath", ")", "else", ":", "dirpath", "=", "os", ".", "path", ".", "abspath", "(", "directory", ")", "utils", ".", "mkdir_p", "(", "dirpath", ")", "if", "filterby", "is", "not", "None", ":", "gsms_to_use", "=", "[", "gsm", "for", "gsm", "in", "self", ".", "gsms", ".", "values", "(", ")", "if", "filterby", "(", "gsm", ")", "]", "else", ":", "gsms_to_use", "=", "self", ".", "gsms", ".", "values", "(", ")", "if", "nproc", "==", "1", ":", "# No need to parallelize, running ordinary download in loop", "downloaded_paths", "=", "dict", "(", ")", "for", "gsm", "in", "gsms_to_use", ":", "logger", ".", "info", "(", "\"Downloading SRA files for %s series\\n\"", "%", "gsm", ".", "name", ")", "downloaded_paths", "[", "gsm", ".", "name", "]", "=", "gsm", ".", "download_SRA", "(", "email", "=", "email", ",", "directory", "=", "dirpath", ",", "*", "*", "kwargs", ")", "elif", "nproc", ">", "1", ":", "# Parallelization enabled", "downloaders", "=", "list", "(", ")", "# Collecting params for Pool.map in a loop", "for", "gsm", "in", "gsms_to_use", ":", "downloaders", ".", "append", "(", "[", "gsm", ",", "email", ",", "dirpath", ",", "kwargs", "]", ")", "p", "=", "Pool", "(", "nproc", ")", "results", "=", "p", ".", "map", "(", "_sra_download_worker", ",", "downloaders", ")", "downloaded_paths", "=", "dict", "(", "results", ")", "else", ":", "raise", "ValueError", "(", "\"Nproc should be non-negative: %s\"", "%", "str", "(", "nproc", ")", ")", "return", "downloaded_paths" ]
Download SRA files for each GSM in series. .. warning:: Do not use parallel option (nproc > 1) in the interactive shell. For more details see `this issue <https://stackoverflow.com/questions/23641475/multiprocessing-working-in-python-but-not-in-ipython/23641560#23641560>`_ on SO. Args: email (:obj:`str`): E-mail that will be provided to the Entrez. directory (:obj:`str`, optional): Directory to save the data (defaults to the 'series' which saves the data to the directory with the name of the series + '_SRA' ending). Defaults to "series". filterby (:obj:`str`, optional): Filter GSM objects, argument is a function that operates on GSM object and return bool eg. lambda x: "brain" not in x.name. Defaults to None. nproc (:obj:`int`, optional): Number of processes for SRA download (default is 1, no parallelization). **kwargs: Any arbitrary argument passed to GSM.download_SRA method. See the documentation for more details. Returns: :obj:`dict`: A dictionary containing output of ``GSM.download_SRA`` method where each GSM accession ID is the key for the output.
[ "Download", "SRA", "files", "for", "each", "GSM", "in", "series", "." ]
python
train
42.132353
SiLab-Bonn/pyBAR
pybar/analysis/analysis.py
https://github.com/SiLab-Bonn/pyBAR/blob/5ad95bbcd41cd358825823fb78f396cfce23593e/pybar/analysis/analysis.py#L102-L147
def analyze_event_rate(scan_base, combine_n_readouts=1000, time_line_absolute=True, output_pdf=None, output_file=None): ''' Determines the number of events as a function of time. Therefore the data of a fixed number of read outs are combined ('combine_n_readouts'). The number of events is taken from the meta data info and stored into a pdf file. Parameters ---------- scan_base: list of str scan base names (e.g.: ['//data//SCC_50_fei4_self_trigger_scan_390', ] combine_n_readouts: int the number of read outs to combine (e.g. 1000) time_line_absolute: bool if true the analysis uses absolute time stamps output_pdf: PdfPages PdfPages file object, if none the plot is printed to screen ''' time_stamp = [] rate = [] start_time_set = False for data_file in scan_base: with tb.open_file(data_file + '_interpreted.h5', mode="r") as in_file_h5: meta_data_array = in_file_h5.root.meta_data[:] parameter_ranges = np.column_stack((analysis_utils.get_ranges_from_array(meta_data_array['timestamp_start'][::combine_n_readouts]), analysis_utils.get_ranges_from_array(meta_data_array['event_number'][::combine_n_readouts]))) if time_line_absolute: time_stamp.extend(parameter_ranges[:-1, 0]) else: if not start_time_set: start_time = parameter_ranges[0, 0] start_time_set = True time_stamp.extend((parameter_ranges[:-1, 0] - start_time) / 60.0) rate.extend((parameter_ranges[:-1, 3] - parameter_ranges[:-1, 2]) / (parameter_ranges[:-1, 1] - parameter_ranges[:-1, 0])) # d#Events / dt if time_line_absolute: plotting.plot_scatter_time(time_stamp, rate, title='Event rate [Hz]', marker_style='o', filename=output_pdf) else: plotting.plot_scatter(time_stamp, rate, title='Events per time', x_label='Progressed time [min.]', y_label='Events rate [Hz]', marker_style='o', filename=output_pdf) if output_file: with tb.open_file(output_file, mode="a") as out_file_h5: rec_array = np.array(zip(time_stamp, rate), dtype=[('time_stamp', float), ('rate', float)]).view(np.recarray) try: rate_table = out_file_h5.create_table(out_file_h5.root, name='Eventrate', description=rec_array, title='Event rate', filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False)) rate_table[:] = rec_array except tb.exceptions.NodeError: logging.warning(output_file + ' has already a Eventrate note, do not overwrite existing.') return time_stamp, rate
[ "def", "analyze_event_rate", "(", "scan_base", ",", "combine_n_readouts", "=", "1000", ",", "time_line_absolute", "=", "True", ",", "output_pdf", "=", "None", ",", "output_file", "=", "None", ")", ":", "time_stamp", "=", "[", "]", "rate", "=", "[", "]", "start_time_set", "=", "False", "for", "data_file", "in", "scan_base", ":", "with", "tb", ".", "open_file", "(", "data_file", "+", "'_interpreted.h5'", ",", "mode", "=", "\"r\"", ")", "as", "in_file_h5", ":", "meta_data_array", "=", "in_file_h5", ".", "root", ".", "meta_data", "[", ":", "]", "parameter_ranges", "=", "np", ".", "column_stack", "(", "(", "analysis_utils", ".", "get_ranges_from_array", "(", "meta_data_array", "[", "'timestamp_start'", "]", "[", ":", ":", "combine_n_readouts", "]", ")", ",", "analysis_utils", ".", "get_ranges_from_array", "(", "meta_data_array", "[", "'event_number'", "]", "[", ":", ":", "combine_n_readouts", "]", ")", ")", ")", "if", "time_line_absolute", ":", "time_stamp", ".", "extend", "(", "parameter_ranges", "[", ":", "-", "1", ",", "0", "]", ")", "else", ":", "if", "not", "start_time_set", ":", "start_time", "=", "parameter_ranges", "[", "0", ",", "0", "]", "start_time_set", "=", "True", "time_stamp", ".", "extend", "(", "(", "parameter_ranges", "[", ":", "-", "1", ",", "0", "]", "-", "start_time", ")", "/", "60.0", ")", "rate", ".", "extend", "(", "(", "parameter_ranges", "[", ":", "-", "1", ",", "3", "]", "-", "parameter_ranges", "[", ":", "-", "1", ",", "2", "]", ")", "/", "(", "parameter_ranges", "[", ":", "-", "1", ",", "1", "]", "-", "parameter_ranges", "[", ":", "-", "1", ",", "0", "]", ")", ")", "# d#Events / dt", "if", "time_line_absolute", ":", "plotting", ".", "plot_scatter_time", "(", "time_stamp", ",", "rate", ",", "title", "=", "'Event rate [Hz]'", ",", "marker_style", "=", "'o'", ",", "filename", "=", "output_pdf", ")", "else", ":", "plotting", ".", "plot_scatter", "(", "time_stamp", ",", "rate", ",", "title", "=", "'Events per time'", ",", "x_label", "=", "'Progressed time [min.]'", ",", "y_label", "=", "'Events rate [Hz]'", ",", "marker_style", "=", "'o'", ",", "filename", "=", "output_pdf", ")", "if", "output_file", ":", "with", "tb", ".", "open_file", "(", "output_file", ",", "mode", "=", "\"a\"", ")", "as", "out_file_h5", ":", "rec_array", "=", "np", ".", "array", "(", "zip", "(", "time_stamp", ",", "rate", ")", ",", "dtype", "=", "[", "(", "'time_stamp'", ",", "float", ")", ",", "(", "'rate'", ",", "float", ")", "]", ")", ".", "view", "(", "np", ".", "recarray", ")", "try", ":", "rate_table", "=", "out_file_h5", ".", "create_table", "(", "out_file_h5", ".", "root", ",", "name", "=", "'Eventrate'", ",", "description", "=", "rec_array", ",", "title", "=", "'Event rate'", ",", "filters", "=", "tb", ".", "Filters", "(", "complib", "=", "'blosc'", ",", "complevel", "=", "5", ",", "fletcher32", "=", "False", ")", ")", "rate_table", "[", ":", "]", "=", "rec_array", "except", "tb", ".", "exceptions", ".", "NodeError", ":", "logging", ".", "warning", "(", "output_file", "+", "' has already a Eventrate note, do not overwrite existing.'", ")", "return", "time_stamp", ",", "rate" ]
Determines the number of events as a function of time. Therefore the data of a fixed number of read outs are combined ('combine_n_readouts'). The number of events is taken from the meta data info and stored into a pdf file. Parameters ---------- scan_base: list of str scan base names (e.g.: ['//data//SCC_50_fei4_self_trigger_scan_390', ] combine_n_readouts: int the number of read outs to combine (e.g. 1000) time_line_absolute: bool if true the analysis uses absolute time stamps output_pdf: PdfPages PdfPages file object, if none the plot is printed to screen
[ "Determines", "the", "number", "of", "events", "as", "a", "function", "of", "time", ".", "Therefore", "the", "data", "of", "a", "fixed", "number", "of", "read", "outs", "are", "combined", "(", "combine_n_readouts", ")", ".", "The", "number", "of", "events", "is", "taken", "from", "the", "meta", "data", "info", "and", "stored", "into", "a", "pdf", "file", "." ]
python
train
57.586957
gem/oq-engine
openquake/risklib/scientific.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/risklib/scientific.py#L935-L977
def classical_damage( fragility_functions, hazard_imls, hazard_poes, investigation_time, risk_investigation_time): """ :param fragility_functions: a list of fragility functions for each damage state :param hazard_imls: Intensity Measure Levels :param hazard_poes: hazard curve :param investigation_time: hazard investigation time :param risk_investigation_time: risk investigation time :returns: an array of M probabilities of occurrence where M is the numbers of damage states. """ spi = fragility_functions.steps_per_interval if spi and spi > 1: # interpolate imls = numpy.array(fragility_functions.interp_imls) min_val, max_val = hazard_imls[0], hazard_imls[-1] assert min_val > 0, hazard_imls # sanity check numpy.putmask(imls, imls < min_val, min_val) numpy.putmask(imls, imls > max_val, max_val) poes = interpolate.interp1d(hazard_imls, hazard_poes)(imls) else: imls = (hazard_imls if fragility_functions.format == 'continuous' else fragility_functions.imls) poes = numpy.array(hazard_poes) afe = annual_frequency_of_exceedence(poes, investigation_time) annual_frequency_of_occurrence = pairwise_diff( pairwise_mean([afe[0]] + list(afe) + [afe[-1]])) poes_per_damage_state = [] for ff in fragility_functions: frequency_of_exceedence_per_damage_state = numpy.dot( annual_frequency_of_occurrence, list(map(ff, imls))) poe_per_damage_state = 1. - numpy.exp( - frequency_of_exceedence_per_damage_state * risk_investigation_time) poes_per_damage_state.append(poe_per_damage_state) poos = pairwise_diff([1] + poes_per_damage_state + [0]) return poos
[ "def", "classical_damage", "(", "fragility_functions", ",", "hazard_imls", ",", "hazard_poes", ",", "investigation_time", ",", "risk_investigation_time", ")", ":", "spi", "=", "fragility_functions", ".", "steps_per_interval", "if", "spi", "and", "spi", ">", "1", ":", "# interpolate", "imls", "=", "numpy", ".", "array", "(", "fragility_functions", ".", "interp_imls", ")", "min_val", ",", "max_val", "=", "hazard_imls", "[", "0", "]", ",", "hazard_imls", "[", "-", "1", "]", "assert", "min_val", ">", "0", ",", "hazard_imls", "# sanity check", "numpy", ".", "putmask", "(", "imls", ",", "imls", "<", "min_val", ",", "min_val", ")", "numpy", ".", "putmask", "(", "imls", ",", "imls", ">", "max_val", ",", "max_val", ")", "poes", "=", "interpolate", ".", "interp1d", "(", "hazard_imls", ",", "hazard_poes", ")", "(", "imls", ")", "else", ":", "imls", "=", "(", "hazard_imls", "if", "fragility_functions", ".", "format", "==", "'continuous'", "else", "fragility_functions", ".", "imls", ")", "poes", "=", "numpy", ".", "array", "(", "hazard_poes", ")", "afe", "=", "annual_frequency_of_exceedence", "(", "poes", ",", "investigation_time", ")", "annual_frequency_of_occurrence", "=", "pairwise_diff", "(", "pairwise_mean", "(", "[", "afe", "[", "0", "]", "]", "+", "list", "(", "afe", ")", "+", "[", "afe", "[", "-", "1", "]", "]", ")", ")", "poes_per_damage_state", "=", "[", "]", "for", "ff", "in", "fragility_functions", ":", "frequency_of_exceedence_per_damage_state", "=", "numpy", ".", "dot", "(", "annual_frequency_of_occurrence", ",", "list", "(", "map", "(", "ff", ",", "imls", ")", ")", ")", "poe_per_damage_state", "=", "1.", "-", "numpy", ".", "exp", "(", "-", "frequency_of_exceedence_per_damage_state", "*", "risk_investigation_time", ")", "poes_per_damage_state", ".", "append", "(", "poe_per_damage_state", ")", "poos", "=", "pairwise_diff", "(", "[", "1", "]", "+", "poes_per_damage_state", "+", "[", "0", "]", ")", "return", "poos" ]
:param fragility_functions: a list of fragility functions for each damage state :param hazard_imls: Intensity Measure Levels :param hazard_poes: hazard curve :param investigation_time: hazard investigation time :param risk_investigation_time: risk investigation time :returns: an array of M probabilities of occurrence where M is the numbers of damage states.
[ ":", "param", "fragility_functions", ":", "a", "list", "of", "fragility", "functions", "for", "each", "damage", "state", ":", "param", "hazard_imls", ":", "Intensity", "Measure", "Levels", ":", "param", "hazard_poes", ":", "hazard", "curve", ":", "param", "investigation_time", ":", "hazard", "investigation", "time", ":", "param", "risk_investigation_time", ":", "risk", "investigation", "time", ":", "returns", ":", "an", "array", "of", "M", "probabilities", "of", "occurrence", "where", "M", "is", "the", "numbers", "of", "damage", "states", "." ]
python
train
41.790698
pydata/xarray
xarray/core/computation.py
https://github.com/pydata/xarray/blob/6d93a95d05bdbfc33fff24064f67d29dd891ab58/xarray/core/computation.py#L104-L117
def to_gufunc_string(self): """Create an equivalent signature string for a NumPy gufunc. Unlike __str__, handles dimensions that don't map to Python identifiers. """ all_dims = self.all_core_dims dims_map = dict(zip(sorted(all_dims), range(len(all_dims)))) input_core_dims = [['dim%d' % dims_map[dim] for dim in core_dims] for core_dims in self.input_core_dims] output_core_dims = [['dim%d' % dims_map[dim] for dim in core_dims] for core_dims in self.output_core_dims] alt_signature = type(self)(input_core_dims, output_core_dims) return str(alt_signature)
[ "def", "to_gufunc_string", "(", "self", ")", ":", "all_dims", "=", "self", ".", "all_core_dims", "dims_map", "=", "dict", "(", "zip", "(", "sorted", "(", "all_dims", ")", ",", "range", "(", "len", "(", "all_dims", ")", ")", ")", ")", "input_core_dims", "=", "[", "[", "'dim%d'", "%", "dims_map", "[", "dim", "]", "for", "dim", "in", "core_dims", "]", "for", "core_dims", "in", "self", ".", "input_core_dims", "]", "output_core_dims", "=", "[", "[", "'dim%d'", "%", "dims_map", "[", "dim", "]", "for", "dim", "in", "core_dims", "]", "for", "core_dims", "in", "self", ".", "output_core_dims", "]", "alt_signature", "=", "type", "(", "self", ")", "(", "input_core_dims", ",", "output_core_dims", ")", "return", "str", "(", "alt_signature", ")" ]
Create an equivalent signature string for a NumPy gufunc. Unlike __str__, handles dimensions that don't map to Python identifiers.
[ "Create", "an", "equivalent", "signature", "string", "for", "a", "NumPy", "gufunc", "." ]
python
train
48.5
saltstack/salt
salt/modules/aws_sqs.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/aws_sqs.py#L217-L264
def delete_queue(name, region, opts=None, user=None): ''' Deletes a queue in the region. name Name of the SQS queue to deletes region Name of the region to delete the queue from opts : None Any additional options to add to the command line user : None Run hg as a user other than what the minion runs as CLI Example: salt '*' aws_sqs.delete_queue <sqs queue> <region> ''' queues = list_queues(region, opts, user) url_map = _parse_queue_list(queues) log.debug('map %s', url_map) if name in url_map: delete = {'queue-url': url_map[name]} rtn = _run_aws( 'delete-queue', region=region, opts=opts, user=user, **delete) success = True err = '' out = '{0} deleted'.format(name) else: out = '' err = "Delete failed" success = False ret = { 'retcode': 0 if success else 1, 'stdout': out, 'stderr': err, } return ret
[ "def", "delete_queue", "(", "name", ",", "region", ",", "opts", "=", "None", ",", "user", "=", "None", ")", ":", "queues", "=", "list_queues", "(", "region", ",", "opts", ",", "user", ")", "url_map", "=", "_parse_queue_list", "(", "queues", ")", "log", ".", "debug", "(", "'map %s'", ",", "url_map", ")", "if", "name", "in", "url_map", ":", "delete", "=", "{", "'queue-url'", ":", "url_map", "[", "name", "]", "}", "rtn", "=", "_run_aws", "(", "'delete-queue'", ",", "region", "=", "region", ",", "opts", "=", "opts", ",", "user", "=", "user", ",", "*", "*", "delete", ")", "success", "=", "True", "err", "=", "''", "out", "=", "'{0} deleted'", ".", "format", "(", "name", ")", "else", ":", "out", "=", "''", "err", "=", "\"Delete failed\"", "success", "=", "False", "ret", "=", "{", "'retcode'", ":", "0", "if", "success", "else", "1", ",", "'stdout'", ":", "out", ",", "'stderr'", ":", "err", ",", "}", "return", "ret" ]
Deletes a queue in the region. name Name of the SQS queue to deletes region Name of the region to delete the queue from opts : None Any additional options to add to the command line user : None Run hg as a user other than what the minion runs as CLI Example: salt '*' aws_sqs.delete_queue <sqs queue> <region>
[ "Deletes", "a", "queue", "in", "the", "region", "." ]
python
train
21.291667
danilobellini/audiolazy
examples/pi.py
https://github.com/danilobellini/audiolazy/blob/dba0a278937909980ed40b976d866b8e97c35dee/examples/pi.py#L24-L38
def mgl_seq(x): """ Sequence whose sum is the Madhava-Gregory-Leibniz series. [x, -x^3/3, x^5/5, -x^7/7, x^9/9, -x^11/11, ...] Returns ------- An endless sequence that has the property ``atan(x) = sum(mgl_seq(x))``. Usually you would use the ``atan()`` function, not this one. """ odd_numbers = thub(count(start=1, step=2), 2) return Stream(1, -1) * x ** odd_numbers / odd_numbers
[ "def", "mgl_seq", "(", "x", ")", ":", "odd_numbers", "=", "thub", "(", "count", "(", "start", "=", "1", ",", "step", "=", "2", ")", ",", "2", ")", "return", "Stream", "(", "1", ",", "-", "1", ")", "*", "x", "**", "odd_numbers", "/", "odd_numbers" ]
Sequence whose sum is the Madhava-Gregory-Leibniz series. [x, -x^3/3, x^5/5, -x^7/7, x^9/9, -x^11/11, ...] Returns ------- An endless sequence that has the property ``atan(x) = sum(mgl_seq(x))``. Usually you would use the ``atan()`` function, not this one.
[ "Sequence", "whose", "sum", "is", "the", "Madhava", "-", "Gregory", "-", "Leibniz", "series", "." ]
python
train
26.666667
telefonicaid/fiware-sdc
python-sdcclient/utils/rest_client_utils.py
https://github.com/telefonicaid/fiware-sdc/blob/d2d5f87fc574caf6bcc49594bbcb31f620ba8c51/python-sdcclient/utils/rest_client_utils.py#L116-L127
def launch_request(self, uri_pattern, body, method, headers=None, parameters=None, **kwargs): """ Launch HTTP request to the API with given arguments :param uri_pattern: string pattern of the full API url with keyword arguments (format string syntax) :param body: Raw Body content (string) (Plain/XML/JSON to be sent) :param method: HTTP ver to be used in the request [GET | POST | PUT | DELETE | UPDATE ] :param headers: HTTP header (dict) :param parameters: Query parameters for the URL. i.e. {'key1': 'value1', 'key2': 'value2'} :param **kwargs: URL parameters (without url_root) to fill the patters :returns: REST API response ('Requests' response) """ return self._call_api(uri_pattern, method, body, headers, parameters, **kwargs)
[ "def", "launch_request", "(", "self", ",", "uri_pattern", ",", "body", ",", "method", ",", "headers", "=", "None", ",", "parameters", "=", "None", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "_call_api", "(", "uri_pattern", ",", "method", ",", "body", ",", "headers", ",", "parameters", ",", "*", "*", "kwargs", ")" ]
Launch HTTP request to the API with given arguments :param uri_pattern: string pattern of the full API url with keyword arguments (format string syntax) :param body: Raw Body content (string) (Plain/XML/JSON to be sent) :param method: HTTP ver to be used in the request [GET | POST | PUT | DELETE | UPDATE ] :param headers: HTTP header (dict) :param parameters: Query parameters for the URL. i.e. {'key1': 'value1', 'key2': 'value2'} :param **kwargs: URL parameters (without url_root) to fill the patters :returns: REST API response ('Requests' response)
[ "Launch", "HTTP", "request", "to", "the", "API", "with", "given", "arguments", ":", "param", "uri_pattern", ":", "string", "pattern", "of", "the", "full", "API", "url", "with", "keyword", "arguments", "(", "format", "string", "syntax", ")", ":", "param", "body", ":", "Raw", "Body", "content", "(", "string", ")", "(", "Plain", "/", "XML", "/", "JSON", "to", "be", "sent", ")", ":", "param", "method", ":", "HTTP", "ver", "to", "be", "used", "in", "the", "request", "[", "GET", "|", "POST", "|", "PUT", "|", "DELETE", "|", "UPDATE", "]", ":", "param", "headers", ":", "HTTP", "header", "(", "dict", ")", ":", "param", "parameters", ":", "Query", "parameters", "for", "the", "URL", ".", "i", ".", "e", ".", "{", "key1", ":", "value1", "key2", ":", "value2", "}", ":", "param", "**", "kwargs", ":", "URL", "parameters", "(", "without", "url_root", ")", "to", "fill", "the", "patters", ":", "returns", ":", "REST", "API", "response", "(", "Requests", "response", ")" ]
python
train
67.75
TUT-ARG/sed_eval
evaluators/sound_event_eval.py
https://github.com/TUT-ARG/sed_eval/blob/0cb1b6d11ceec4fe500cc9b31079c9d8666ed6eb/evaluators/sound_event_eval.py#L94-L144
def main(argv): """Main """ parameters = process_arguments(argv) file_list = sed_eval.io.load_file_pair_list(parameters['file_list']) path = os.path.dirname(parameters['file_list']) data = [] all_data = dcase_util.containers.MetaDataContainer() for file_pair in file_list: reference_event_list = sed_eval.io.load_event_list( os.path.abspath(os.path.join(path, file_pair['reference_file'])) ) estimated_event_list = sed_eval.io.load_event_list( os.path.abspath(os.path.join(path, file_pair['estimated_file'])) ) data.append({ 'reference_event_list': reference_event_list, 'estimated_event_list': estimated_event_list }) all_data += reference_event_list event_labels = all_data.unique_event_labels segment_based_metrics = sed_eval.sound_event.SegmentBasedMetrics(event_labels) event_based_metrics = sed_eval.sound_event.EventBasedMetrics(event_labels) for file_pair in data: segment_based_metrics.evaluate( file_pair['reference_event_list'], file_pair['estimated_event_list'] ) event_based_metrics.evaluate( file_pair['reference_event_list'], file_pair['estimated_event_list'] ) if parameters['output_file']: results = dcase_util.containers.DictContainer({ 'segment_based_metrics': segment_based_metrics.results(), 'event_based_metrics': event_based_metrics.results() }).save(parameters['output_file']) else: print(segment_based_metrics) print(event_based_metrics)
[ "def", "main", "(", "argv", ")", ":", "parameters", "=", "process_arguments", "(", "argv", ")", "file_list", "=", "sed_eval", ".", "io", ".", "load_file_pair_list", "(", "parameters", "[", "'file_list'", "]", ")", "path", "=", "os", ".", "path", ".", "dirname", "(", "parameters", "[", "'file_list'", "]", ")", "data", "=", "[", "]", "all_data", "=", "dcase_util", ".", "containers", ".", "MetaDataContainer", "(", ")", "for", "file_pair", "in", "file_list", ":", "reference_event_list", "=", "sed_eval", ".", "io", ".", "load_event_list", "(", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "join", "(", "path", ",", "file_pair", "[", "'reference_file'", "]", ")", ")", ")", "estimated_event_list", "=", "sed_eval", ".", "io", ".", "load_event_list", "(", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "join", "(", "path", ",", "file_pair", "[", "'estimated_file'", "]", ")", ")", ")", "data", ".", "append", "(", "{", "'reference_event_list'", ":", "reference_event_list", ",", "'estimated_event_list'", ":", "estimated_event_list", "}", ")", "all_data", "+=", "reference_event_list", "event_labels", "=", "all_data", ".", "unique_event_labels", "segment_based_metrics", "=", "sed_eval", ".", "sound_event", ".", "SegmentBasedMetrics", "(", "event_labels", ")", "event_based_metrics", "=", "sed_eval", ".", "sound_event", ".", "EventBasedMetrics", "(", "event_labels", ")", "for", "file_pair", "in", "data", ":", "segment_based_metrics", ".", "evaluate", "(", "file_pair", "[", "'reference_event_list'", "]", ",", "file_pair", "[", "'estimated_event_list'", "]", ")", "event_based_metrics", ".", "evaluate", "(", "file_pair", "[", "'reference_event_list'", "]", ",", "file_pair", "[", "'estimated_event_list'", "]", ")", "if", "parameters", "[", "'output_file'", "]", ":", "results", "=", "dcase_util", ".", "containers", ".", "DictContainer", "(", "{", "'segment_based_metrics'", ":", "segment_based_metrics", ".", "results", "(", ")", ",", "'event_based_metrics'", ":", "event_based_metrics", ".", "results", "(", ")", "}", ")", ".", "save", "(", "parameters", "[", "'output_file'", "]", ")", "else", ":", "print", "(", "segment_based_metrics", ")", "print", "(", "event_based_metrics", ")" ]
Main
[ "Main" ]
python
train
31.745098
collectiveacuity/labPack
labpack/platforms/heroku.py
https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/platforms/heroku.py#L187-L237
def access(self, app_subdomain): ''' a method to validate user can access app ''' title = '%s.access' % self.__class__.__name__ # validate input input_fields = { 'app_subdomain': app_subdomain } for key, value in input_fields.items(): object_title = '%s(%s=%s)' % (title, key, str(value)) self.fields.validate(value, '.%s' % key, object_title) # verbosity self.printer('Checking access to "%s" subdomain ... ' % app_subdomain, flush=True) # confirm existence of subdomain for app in self.apps: if app['name'] == app_subdomain: self.subdomain = app_subdomain break # refresh app list and search again if not self.subdomain: import json response = self._handle_command('heroku apps --json', handle_error=True) self.apps = json.loads(response) for app in self.apps: if app['name'] == app_subdomain: self.subdomain = app_subdomain break # check reason for failure if not self.subdomain: sys_command = 'heroku ps -a %s' % app_subdomain heroku_response = self._handle_command(sys_command, handle_error=True) if heroku_response.find('find that app') > -1: self.printer('ERROR') raise Exception('%s does not exist. Try: heroku create -a %s' % (app_subdomain, app_subdomain)) elif heroku_response.find('have access to the app') > -1: self.printer('ERROR') raise Exception('%s belongs to another account.' % app_subdomain) else: self.printer('ERROR') raise Exception('Some unknown issue prevents you from accessing %s' % app_subdomain) self.printer('done.') return self
[ "def", "access", "(", "self", ",", "app_subdomain", ")", ":", "title", "=", "'%s.access'", "%", "self", ".", "__class__", ".", "__name__", "# validate input\r", "input_fields", "=", "{", "'app_subdomain'", ":", "app_subdomain", "}", "for", "key", ",", "value", "in", "input_fields", ".", "items", "(", ")", ":", "object_title", "=", "'%s(%s=%s)'", "%", "(", "title", ",", "key", ",", "str", "(", "value", ")", ")", "self", ".", "fields", ".", "validate", "(", "value", ",", "'.%s'", "%", "key", ",", "object_title", ")", "# verbosity\r", "self", ".", "printer", "(", "'Checking access to \"%s\" subdomain ... '", "%", "app_subdomain", ",", "flush", "=", "True", ")", "# confirm existence of subdomain\r", "for", "app", "in", "self", ".", "apps", ":", "if", "app", "[", "'name'", "]", "==", "app_subdomain", ":", "self", ".", "subdomain", "=", "app_subdomain", "break", "# refresh app list and search again\r", "if", "not", "self", ".", "subdomain", ":", "import", "json", "response", "=", "self", ".", "_handle_command", "(", "'heroku apps --json'", ",", "handle_error", "=", "True", ")", "self", ".", "apps", "=", "json", ".", "loads", "(", "response", ")", "for", "app", "in", "self", ".", "apps", ":", "if", "app", "[", "'name'", "]", "==", "app_subdomain", ":", "self", ".", "subdomain", "=", "app_subdomain", "break", "# check reason for failure\r", "if", "not", "self", ".", "subdomain", ":", "sys_command", "=", "'heroku ps -a %s'", "%", "app_subdomain", "heroku_response", "=", "self", ".", "_handle_command", "(", "sys_command", ",", "handle_error", "=", "True", ")", "if", "heroku_response", ".", "find", "(", "'find that app'", ")", ">", "-", "1", ":", "self", ".", "printer", "(", "'ERROR'", ")", "raise", "Exception", "(", "'%s does not exist. Try: heroku create -a %s'", "%", "(", "app_subdomain", ",", "app_subdomain", ")", ")", "elif", "heroku_response", ".", "find", "(", "'have access to the app'", ")", ">", "-", "1", ":", "self", ".", "printer", "(", "'ERROR'", ")", "raise", "Exception", "(", "'%s belongs to another account.'", "%", "app_subdomain", ")", "else", ":", "self", ".", "printer", "(", "'ERROR'", ")", "raise", "Exception", "(", "'Some unknown issue prevents you from accessing %s'", "%", "app_subdomain", ")", "self", ".", "printer", "(", "'done.'", ")", "return", "self" ]
a method to validate user can access app
[ "a", "method", "to", "validate", "user", "can", "access", "app" ]
python
train
38.137255
pyusb/pyusb
usb/legacy.py
https://github.com/pyusb/pyusb/blob/ffe6faf42c6ad273880b0b464b9bbf44c1d4b2e9/usb/legacy.py#L154-L164
def interruptWrite(self, endpoint, buffer, timeout = 100): r"""Perform a interrupt write request to the endpoint specified. Arguments: endpoint: endpoint number. buffer: sequence data buffer to write. This parameter can be any sequence type. timeout: operation timeout in milliseconds. (default: 100) Returns the number of bytes written. """ return self.dev.write(endpoint, buffer, timeout)
[ "def", "interruptWrite", "(", "self", ",", "endpoint", ",", "buffer", ",", "timeout", "=", "100", ")", ":", "return", "self", ".", "dev", ".", "write", "(", "endpoint", ",", "buffer", ",", "timeout", ")" ]
r"""Perform a interrupt write request to the endpoint specified. Arguments: endpoint: endpoint number. buffer: sequence data buffer to write. This parameter can be any sequence type. timeout: operation timeout in milliseconds. (default: 100) Returns the number of bytes written.
[ "r", "Perform", "a", "interrupt", "write", "request", "to", "the", "endpoint", "specified", "." ]
python
train
45.545455
flowersteam/explauto
explauto/sensorimotor_model/inverse/cma.py
https://github.com/flowersteam/explauto/blob/cf0f81ecb9f6412f7276a95bd27359000e1e26b6/explauto/sensorimotor_model/inverse/cma.py#L7278-L7293
def flattened(self): """return flattened data ``(x, f)`` such that for the sweep through coordinate ``i`` we have for data point ``j`` that ``f[i][j] == func(x[i][j])`` """ flatx = {} flatf = {} for i in self.res: if isinstance(i, int): flatx[i] = [] flatf[i] = [] for x in sorted(self.res[i]): for d in sorted(self.res[i][x]): flatx[i].append(x) flatf[i].append(d) return flatx, flatf
[ "def", "flattened", "(", "self", ")", ":", "flatx", "=", "{", "}", "flatf", "=", "{", "}", "for", "i", "in", "self", ".", "res", ":", "if", "isinstance", "(", "i", ",", "int", ")", ":", "flatx", "[", "i", "]", "=", "[", "]", "flatf", "[", "i", "]", "=", "[", "]", "for", "x", "in", "sorted", "(", "self", ".", "res", "[", "i", "]", ")", ":", "for", "d", "in", "sorted", "(", "self", ".", "res", "[", "i", "]", "[", "x", "]", ")", ":", "flatx", "[", "i", "]", ".", "append", "(", "x", ")", "flatf", "[", "i", "]", ".", "append", "(", "d", ")", "return", "flatx", ",", "flatf" ]
return flattened data ``(x, f)`` such that for the sweep through coordinate ``i`` we have for data point ``j`` that ``f[i][j] == func(x[i][j])``
[ "return", "flattened", "data", "(", "x", "f", ")", "such", "that", "for", "the", "sweep", "through", "coordinate", "i", "we", "have", "for", "data", "point", "j", "that", "f", "[", "i", "]", "[", "j", "]", "==", "func", "(", "x", "[", "i", "]", "[", "j", "]", ")" ]
python
train
34.6875
marcomusy/vtkplotter
vtkplotter/utils.py
https://github.com/marcomusy/vtkplotter/blob/692c3396782722ec525bc1346a26999868c650c6/vtkplotter/utils.py#L229-L233
def cart2pol(x, y): """Cartesian to Polar coordinates conversion.""" theta = np.arctan2(y, x) rho = np.hypot(x, y) return theta, rho
[ "def", "cart2pol", "(", "x", ",", "y", ")", ":", "theta", "=", "np", ".", "arctan2", "(", "y", ",", "x", ")", "rho", "=", "np", ".", "hypot", "(", "x", ",", "y", ")", "return", "theta", ",", "rho" ]
Cartesian to Polar coordinates conversion.
[ "Cartesian", "to", "Polar", "coordinates", "conversion", "." ]
python
train
28.8
spyder-ide/spyder
spyder/app/restart.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/app/restart.py#L106-L109
def _show_message(self, text): """Show message on splash screen.""" self.splash.showMessage(text, Qt.AlignBottom | Qt.AlignCenter | Qt.AlignAbsolute, QColor(Qt.white))
[ "def", "_show_message", "(", "self", ",", "text", ")", ":", "self", ".", "splash", ".", "showMessage", "(", "text", ",", "Qt", ".", "AlignBottom", "|", "Qt", ".", "AlignCenter", "|", "Qt", ".", "AlignAbsolute", ",", "QColor", "(", "Qt", ".", "white", ")", ")" ]
Show message on splash screen.
[ "Show", "message", "on", "splash", "screen", "." ]
python
train
53.75
mcs07/ChemDataExtractor
chemdataextractor/nlp/tag.py
https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/nlp/tag.py#L153-L168
def update(self, truth, guess, features): """Update the feature weights.""" def upd_feat(c, f, w, v): param = (f, c) self._totals[param] += (self.i - self._tstamps[param]) * w self._tstamps[param] = self.i self.weights[f][c] = w + v self.i += 1 if truth == guess: return None for f in features: weights = self.weights.setdefault(f, {}) upd_feat(truth, f, weights.get(truth, 0.0), 1.0) upd_feat(guess, f, weights.get(guess, 0.0), -1.0) return None
[ "def", "update", "(", "self", ",", "truth", ",", "guess", ",", "features", ")", ":", "def", "upd_feat", "(", "c", ",", "f", ",", "w", ",", "v", ")", ":", "param", "=", "(", "f", ",", "c", ")", "self", ".", "_totals", "[", "param", "]", "+=", "(", "self", ".", "i", "-", "self", ".", "_tstamps", "[", "param", "]", ")", "*", "w", "self", ".", "_tstamps", "[", "param", "]", "=", "self", ".", "i", "self", ".", "weights", "[", "f", "]", "[", "c", "]", "=", "w", "+", "v", "self", ".", "i", "+=", "1", "if", "truth", "==", "guess", ":", "return", "None", "for", "f", "in", "features", ":", "weights", "=", "self", ".", "weights", ".", "setdefault", "(", "f", ",", "{", "}", ")", "upd_feat", "(", "truth", ",", "f", ",", "weights", ".", "get", "(", "truth", ",", "0.0", ")", ",", "1.0", ")", "upd_feat", "(", "guess", ",", "f", ",", "weights", ".", "get", "(", "guess", ",", "0.0", ")", ",", "-", "1.0", ")", "return", "None" ]
Update the feature weights.
[ "Update", "the", "feature", "weights", "." ]
python
train
36
outini/python-pylls
pylls/cachet.py
https://github.com/outini/python-pylls/blob/f9fa220594bc1974469097d9bad690a42d0d0f0f/pylls/cachet.py#L479-L493
def create(self, email, verify=None, components=None): """Create a new subscriber :param str email: Email address to subscribe :param bool verify: Whether to send verification email :param list components: Components ID list, defaults to all :return: Created subscriber data (:class:`dict`) .. seealso:: https://docs.cachethq.io/reference#subscribers """ data = ApiParams() data['email'] = email data['verify'] = verify data['components'] = components return self._post('subscribers', data=data)['data']
[ "def", "create", "(", "self", ",", "email", ",", "verify", "=", "None", ",", "components", "=", "None", ")", ":", "data", "=", "ApiParams", "(", ")", "data", "[", "'email'", "]", "=", "email", "data", "[", "'verify'", "]", "=", "verify", "data", "[", "'components'", "]", "=", "components", "return", "self", ".", "_post", "(", "'subscribers'", ",", "data", "=", "data", ")", "[", "'data'", "]" ]
Create a new subscriber :param str email: Email address to subscribe :param bool verify: Whether to send verification email :param list components: Components ID list, defaults to all :return: Created subscriber data (:class:`dict`) .. seealso:: https://docs.cachethq.io/reference#subscribers
[ "Create", "a", "new", "subscriber" ]
python
train
39.133333
KoffeinFlummi/Chronyk
chronyk/chronyk.py
https://github.com/KoffeinFlummi/Chronyk/blob/5a9f3518d2e831884dea7e8c077d6e7350df2fbe/chronyk/chronyk.py#L112-L128
def guesstype(timestr): """Tries to guess whether a string represents a time or a time delta and returns the appropriate object. :param timestr (required) The string to be analyzed """ timestr_full = " {} ".format(timestr) if timestr_full.find(" in ") != -1 or timestr_full.find(" ago ") != -1: return Chronyk(timestr) comps = ["second", "minute", "hour", "day", "week", "month", "year"] for comp in comps: if timestr_full.find(comp) != -1: return ChronykDelta(timestr) return Chronyk(timestr)
[ "def", "guesstype", "(", "timestr", ")", ":", "timestr_full", "=", "\" {} \"", ".", "format", "(", "timestr", ")", "if", "timestr_full", ".", "find", "(", "\" in \"", ")", "!=", "-", "1", "or", "timestr_full", ".", "find", "(", "\" ago \"", ")", "!=", "-", "1", ":", "return", "Chronyk", "(", "timestr", ")", "comps", "=", "[", "\"second\"", ",", "\"minute\"", ",", "\"hour\"", ",", "\"day\"", ",", "\"week\"", ",", "\"month\"", ",", "\"year\"", "]", "for", "comp", "in", "comps", ":", "if", "timestr_full", ".", "find", "(", "comp", ")", "!=", "-", "1", ":", "return", "ChronykDelta", "(", "timestr", ")", "return", "Chronyk", "(", "timestr", ")" ]
Tries to guess whether a string represents a time or a time delta and returns the appropriate object. :param timestr (required) The string to be analyzed
[ "Tries", "to", "guess", "whether", "a", "string", "represents", "a", "time", "or", "a", "time", "delta", "and", "returns", "the", "appropriate", "object", "." ]
python
train
32.470588
openstack/networking-cisco
networking_cisco/apps/saf/server/services/firewall/native/fw_mgr.py
https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/services/firewall/native/fw_mgr.py#L589-L606
def _fw_delete(self, drvr_name, data): """Firewall Delete routine. This function calls routines to remove FW from fabric and device. It also updates its local cache. """ fw_id = data.get('firewall_id') tenant_id = self.tenant_db.get_fw_tenant(fw_id) if tenant_id not in self.fwid_attr: LOG.error("Invalid tenant id for FW delete %s", tenant_id) return tenant_obj = self.fwid_attr[tenant_id] ret = self._check_delete_fw(tenant_id, drvr_name) if ret: tenant_obj.delete_fw(fw_id) self.tenant_db.del_fw_tenant(fw_id)
[ "def", "_fw_delete", "(", "self", ",", "drvr_name", ",", "data", ")", ":", "fw_id", "=", "data", ".", "get", "(", "'firewall_id'", ")", "tenant_id", "=", "self", ".", "tenant_db", ".", "get_fw_tenant", "(", "fw_id", ")", "if", "tenant_id", "not", "in", "self", ".", "fwid_attr", ":", "LOG", ".", "error", "(", "\"Invalid tenant id for FW delete %s\"", ",", "tenant_id", ")", "return", "tenant_obj", "=", "self", ".", "fwid_attr", "[", "tenant_id", "]", "ret", "=", "self", ".", "_check_delete_fw", "(", "tenant_id", ",", "drvr_name", ")", "if", "ret", ":", "tenant_obj", ".", "delete_fw", "(", "fw_id", ")", "self", ".", "tenant_db", ".", "del_fw_tenant", "(", "fw_id", ")" ]
Firewall Delete routine. This function calls routines to remove FW from fabric and device. It also updates its local cache.
[ "Firewall", "Delete", "routine", "." ]
python
train
34.777778
brainiak/brainiak
brainiak/reprsimil/brsa.py
https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/reprsimil/brsa.py#L796-L853
def transform(self, X, y=None, scan_onsets=None): """ Use the model to estimate the time course of response to each condition (ts), and the time course unrelated to task (ts0) which is spread across the brain. This is equivalent to "decoding" the design matrix and nuisance regressors from a new dataset different from the training dataset on which fit() was applied. An AR(1) smooth prior is imposed on the decoded ts and ts0 with the AR(1) parameters learnt from the corresponding time courses in the training data. Notice: if you set the rank to be lower than the number of experimental conditions (number of columns in the design matrix), the recovered task-related activity will have collinearity (the recovered time courses of some conditions can be linearly explained by the recovered time courses of other conditions). Parameters ---------- X : numpy arrays, shape=[time_points, voxels] fMRI data of new data of the same subject. The voxels should match those used in the fit() function. If data are z-scored (recommended) when fitting the model, data should be z-scored as well when calling transform() y : not used (as it is unsupervised learning) scan_onsets : numpy array, shape=[number of runs]. A list of indices corresponding to the onsets of scans in the data X. If not provided, data will be assumed to be acquired in a continuous scan. Returns ------- ts : numpy arrays, shape = [time_points, condition] The estimated response to the task conditions which have the response amplitudes estimated during the fit step. ts0: numpy array, shape = [time_points, n_nureg] The estimated time course spread across the brain, with the loading weights estimated during the fit step. """ assert X.ndim == 2 and X.shape[1] == self.beta_.shape[1], \ 'The shape of X is not consistent with the shape of data '\ 'used in the fitting step. They should have the same number '\ 'of voxels' assert scan_onsets is None or (scan_onsets.ndim == 1 and 0 in scan_onsets), \ 'scan_onsets should either be None or an array of indices '\ 'If it is given, it should include at least 0' if scan_onsets is None: scan_onsets = np.array([0], dtype=int) else: scan_onsets = np.int32(scan_onsets) ts, ts0, log_p = self._transform( Y=X, scan_onsets=scan_onsets, beta=self.beta_, beta0=self.beta0_, rho_e=self.rho_, sigma_e=self.sigma_, rho_X=self._rho_design_, sigma2_X=self._sigma2_design_, rho_X0=self._rho_X0_, sigma2_X0=self._sigma2_X0_) return ts, ts0
[ "def", "transform", "(", "self", ",", "X", ",", "y", "=", "None", ",", "scan_onsets", "=", "None", ")", ":", "assert", "X", ".", "ndim", "==", "2", "and", "X", ".", "shape", "[", "1", "]", "==", "self", ".", "beta_", ".", "shape", "[", "1", "]", ",", "'The shape of X is not consistent with the shape of data '", "'used in the fitting step. They should have the same number '", "'of voxels'", "assert", "scan_onsets", "is", "None", "or", "(", "scan_onsets", ".", "ndim", "==", "1", "and", "0", "in", "scan_onsets", ")", ",", "'scan_onsets should either be None or an array of indices '", "'If it is given, it should include at least 0'", "if", "scan_onsets", "is", "None", ":", "scan_onsets", "=", "np", ".", "array", "(", "[", "0", "]", ",", "dtype", "=", "int", ")", "else", ":", "scan_onsets", "=", "np", ".", "int32", "(", "scan_onsets", ")", "ts", ",", "ts0", ",", "log_p", "=", "self", ".", "_transform", "(", "Y", "=", "X", ",", "scan_onsets", "=", "scan_onsets", ",", "beta", "=", "self", ".", "beta_", ",", "beta0", "=", "self", ".", "beta0_", ",", "rho_e", "=", "self", ".", "rho_", ",", "sigma_e", "=", "self", ".", "sigma_", ",", "rho_X", "=", "self", ".", "_rho_design_", ",", "sigma2_X", "=", "self", ".", "_sigma2_design_", ",", "rho_X0", "=", "self", ".", "_rho_X0_", ",", "sigma2_X0", "=", "self", ".", "_sigma2_X0_", ")", "return", "ts", ",", "ts0" ]
Use the model to estimate the time course of response to each condition (ts), and the time course unrelated to task (ts0) which is spread across the brain. This is equivalent to "decoding" the design matrix and nuisance regressors from a new dataset different from the training dataset on which fit() was applied. An AR(1) smooth prior is imposed on the decoded ts and ts0 with the AR(1) parameters learnt from the corresponding time courses in the training data. Notice: if you set the rank to be lower than the number of experimental conditions (number of columns in the design matrix), the recovered task-related activity will have collinearity (the recovered time courses of some conditions can be linearly explained by the recovered time courses of other conditions). Parameters ---------- X : numpy arrays, shape=[time_points, voxels] fMRI data of new data of the same subject. The voxels should match those used in the fit() function. If data are z-scored (recommended) when fitting the model, data should be z-scored as well when calling transform() y : not used (as it is unsupervised learning) scan_onsets : numpy array, shape=[number of runs]. A list of indices corresponding to the onsets of scans in the data X. If not provided, data will be assumed to be acquired in a continuous scan. Returns ------- ts : numpy arrays, shape = [time_points, condition] The estimated response to the task conditions which have the response amplitudes estimated during the fit step. ts0: numpy array, shape = [time_points, n_nureg] The estimated time course spread across the brain, with the loading weights estimated during the fit step.
[ "Use", "the", "model", "to", "estimate", "the", "time", "course", "of", "response", "to", "each", "condition", "(", "ts", ")", "and", "the", "time", "course", "unrelated", "to", "task", "(", "ts0", ")", "which", "is", "spread", "across", "the", "brain", ".", "This", "is", "equivalent", "to", "decoding", "the", "design", "matrix", "and", "nuisance", "regressors", "from", "a", "new", "dataset", "different", "from", "the", "training", "dataset", "on", "which", "fit", "()", "was", "applied", ".", "An", "AR", "(", "1", ")", "smooth", "prior", "is", "imposed", "on", "the", "decoded", "ts", "and", "ts0", "with", "the", "AR", "(", "1", ")", "parameters", "learnt", "from", "the", "corresponding", "time", "courses", "in", "the", "training", "data", ".", "Notice", ":", "if", "you", "set", "the", "rank", "to", "be", "lower", "than", "the", "number", "of", "experimental", "conditions", "(", "number", "of", "columns", "in", "the", "design", "matrix", ")", "the", "recovered", "task", "-", "related", "activity", "will", "have", "collinearity", "(", "the", "recovered", "time", "courses", "of", "some", "conditions", "can", "be", "linearly", "explained", "by", "the", "recovered", "time", "courses", "of", "other", "conditions", ")", "." ]
python
train
51.568966
alefnula/tea
tea/logger/win_handlers.py
https://github.com/alefnula/tea/blob/f5a0a724a425ec4f9dd2c7fe966ef06faf3a15a3/tea/logger/win_handlers.py#L42-L55
def emit(self, record): """Emit a record. Output the record to the file, catering for rollover as described in doRollover(). """ try: if self.shouldRollover(record): self.doRollover() FileHandler.emit(self, record) except (KeyboardInterrupt, SystemExit): raise except Exception: self.handleError(record)
[ "def", "emit", "(", "self", ",", "record", ")", ":", "try", ":", "if", "self", ".", "shouldRollover", "(", "record", ")", ":", "self", ".", "doRollover", "(", ")", "FileHandler", ".", "emit", "(", "self", ",", "record", ")", "except", "(", "KeyboardInterrupt", ",", "SystemExit", ")", ":", "raise", "except", "Exception", ":", "self", ".", "handleError", "(", "record", ")" ]
Emit a record. Output the record to the file, catering for rollover as described in doRollover().
[ "Emit", "a", "record", "." ]
python
train
29.357143
quantumlib/Cirq
cirq/value/value_equality.py
https://github.com/quantumlib/Cirq/blob/0827da80dd7880e5b923eb69407e980ed9bc0bd2/cirq/value/value_equality.py#L135-L223
def value_equality(cls: type = None, *, unhashable: bool = False, distinct_child_types: bool = False, manual_cls: bool = False, approximate: bool = False ) -> Union[Callable[[type], type], type]: """Implements __eq__/__ne__/__hash__ via a _value_equality_values_ method. _value_equality_values_ is a method that the decorated class must implement. _value_equality_approximate_values_ is a method that the decorated class might implement if special support for approximate equality is required. This is only used when approximate argument is set. When approximate argument is set and _value_equality_approximate_values_ is not defined, _value_equality_values_ values are used for approximate equality. For example, this can be used to compare periodic values like angles: the angle value can be wrapped with `PeriodicValue`. When returned as part of approximate values a special normalization will be done automatically to guarantee correctness. Note that the type of the decorated value is included as part of the value equality values. This is so that completely separate classes with identical equality values (e.g. a Point2D and a Vector2D) don't compare as equal. Further note that this means that child types of the decorated type will be considered equal to each other, though this behavior can be changed via the 'distinct_child_types` argument. The type logic is implemented behind the scenes by a `_value_equality_values_cls_` method added to the class. Args: cls: The type to decorate. Automatically passed in by python when using the @cirq.value_equality decorator notation on a class. unhashable: When set, the __hash__ method will be set to None instead of to a hash of the equality class and equality values. Useful for mutable types such as dictionaries. distinct_child_types: When set, classes that inherit from the decorated class will not be considered equal to it. Also, different child classes will not be considered equal to each other. Useful for when the decorated class is an abstract class or trait that is helping to define equality for many conceptually distinct concrete classes. manual_cls: When set, the method '_value_equality_values_cls_' must be implemented. This allows a new class to compare as equal to another existing class that is also using value equality, by having the new class return the existing class' type. Incompatible with `distinct_child_types`. approximate: When set, the decorated class will be enhanced with `_approx_eq_` implementation and thus start to support the `SupportsApproximateEquality` protocol. """ # If keyword arguments were specified, python invokes the decorator method # without a `cls` argument, then passes `cls` into the result. if cls is None: return lambda deferred_cls: value_equality(deferred_cls, unhashable=unhashable, manual_cls=manual_cls, distinct_child_types= distinct_child_types, approximate=approximate) if distinct_child_types and manual_cls: raise ValueError("'distinct_child_types' is " "incompatible with 'manual_cls") values_getter = getattr(cls, '_value_equality_values_', None) if values_getter is None: raise TypeError('The @cirq.value_equality decorator requires a ' '_value_equality_values_ method to be defined.') if distinct_child_types: setattr(cls, '_value_equality_values_cls_', lambda self: type(self)) elif manual_cls: cls_getter = getattr(cls, '_value_equality_values_cls_', None) if cls_getter is None: raise TypeError('The @cirq.value_equality decorator requires a ' '_value_equality_values_cls_ method to be defined ' 'when "manual_cls" is set.') else: setattr(cls, '_value_equality_values_cls_', lambda self: cls) setattr(cls, '__hash__', None if unhashable else _value_equality_hash) setattr(cls, '__eq__', _value_equality_eq) setattr(cls, '__ne__', _value_equality_ne) if approximate: if not hasattr(cls, '_value_equality_approximate_values_'): setattr(cls, '_value_equality_approximate_values_', values_getter) setattr(cls, '_approx_eq_', _value_equality_approx_eq) return cls
[ "def", "value_equality", "(", "cls", ":", "type", "=", "None", ",", "*", ",", "unhashable", ":", "bool", "=", "False", ",", "distinct_child_types", ":", "bool", "=", "False", ",", "manual_cls", ":", "bool", "=", "False", ",", "approximate", ":", "bool", "=", "False", ")", "->", "Union", "[", "Callable", "[", "[", "type", "]", ",", "type", "]", ",", "type", "]", ":", "# If keyword arguments were specified, python invokes the decorator method", "# without a `cls` argument, then passes `cls` into the result.", "if", "cls", "is", "None", ":", "return", "lambda", "deferred_cls", ":", "value_equality", "(", "deferred_cls", ",", "unhashable", "=", "unhashable", ",", "manual_cls", "=", "manual_cls", ",", "distinct_child_types", "=", "distinct_child_types", ",", "approximate", "=", "approximate", ")", "if", "distinct_child_types", "and", "manual_cls", ":", "raise", "ValueError", "(", "\"'distinct_child_types' is \"", "\"incompatible with 'manual_cls\"", ")", "values_getter", "=", "getattr", "(", "cls", ",", "'_value_equality_values_'", ",", "None", ")", "if", "values_getter", "is", "None", ":", "raise", "TypeError", "(", "'The @cirq.value_equality decorator requires a '", "'_value_equality_values_ method to be defined.'", ")", "if", "distinct_child_types", ":", "setattr", "(", "cls", ",", "'_value_equality_values_cls_'", ",", "lambda", "self", ":", "type", "(", "self", ")", ")", "elif", "manual_cls", ":", "cls_getter", "=", "getattr", "(", "cls", ",", "'_value_equality_values_cls_'", ",", "None", ")", "if", "cls_getter", "is", "None", ":", "raise", "TypeError", "(", "'The @cirq.value_equality decorator requires a '", "'_value_equality_values_cls_ method to be defined '", "'when \"manual_cls\" is set.'", ")", "else", ":", "setattr", "(", "cls", ",", "'_value_equality_values_cls_'", ",", "lambda", "self", ":", "cls", ")", "setattr", "(", "cls", ",", "'__hash__'", ",", "None", "if", "unhashable", "else", "_value_equality_hash", ")", "setattr", "(", "cls", ",", "'__eq__'", ",", "_value_equality_eq", ")", "setattr", "(", "cls", ",", "'__ne__'", ",", "_value_equality_ne", ")", "if", "approximate", ":", "if", "not", "hasattr", "(", "cls", ",", "'_value_equality_approximate_values_'", ")", ":", "setattr", "(", "cls", ",", "'_value_equality_approximate_values_'", ",", "values_getter", ")", "setattr", "(", "cls", ",", "'_approx_eq_'", ",", "_value_equality_approx_eq", ")", "return", "cls" ]
Implements __eq__/__ne__/__hash__ via a _value_equality_values_ method. _value_equality_values_ is a method that the decorated class must implement. _value_equality_approximate_values_ is a method that the decorated class might implement if special support for approximate equality is required. This is only used when approximate argument is set. When approximate argument is set and _value_equality_approximate_values_ is not defined, _value_equality_values_ values are used for approximate equality. For example, this can be used to compare periodic values like angles: the angle value can be wrapped with `PeriodicValue`. When returned as part of approximate values a special normalization will be done automatically to guarantee correctness. Note that the type of the decorated value is included as part of the value equality values. This is so that completely separate classes with identical equality values (e.g. a Point2D and a Vector2D) don't compare as equal. Further note that this means that child types of the decorated type will be considered equal to each other, though this behavior can be changed via the 'distinct_child_types` argument. The type logic is implemented behind the scenes by a `_value_equality_values_cls_` method added to the class. Args: cls: The type to decorate. Automatically passed in by python when using the @cirq.value_equality decorator notation on a class. unhashable: When set, the __hash__ method will be set to None instead of to a hash of the equality class and equality values. Useful for mutable types such as dictionaries. distinct_child_types: When set, classes that inherit from the decorated class will not be considered equal to it. Also, different child classes will not be considered equal to each other. Useful for when the decorated class is an abstract class or trait that is helping to define equality for many conceptually distinct concrete classes. manual_cls: When set, the method '_value_equality_values_cls_' must be implemented. This allows a new class to compare as equal to another existing class that is also using value equality, by having the new class return the existing class' type. Incompatible with `distinct_child_types`. approximate: When set, the decorated class will be enhanced with `_approx_eq_` implementation and thus start to support the `SupportsApproximateEquality` protocol.
[ "Implements", "__eq__", "/", "__ne__", "/", "__hash__", "via", "a", "_value_equality_values_", "method", "." ]
python
train
54.044944
pytorch/vision
torchvision/models/alexnet.py
https://github.com/pytorch/vision/blob/3afcf3cd49661c466c75ea536b0b2a7ff57f9a05/torchvision/models/alexnet.py#L51-L61
def alexnet(pretrained=False, **kwargs): r"""AlexNet model architecture from the `"One weird trick..." <https://arxiv.org/abs/1404.5997>`_ paper. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ model = AlexNet(**kwargs) if pretrained: model.load_state_dict(model_zoo.load_url(model_urls['alexnet'])) return model
[ "def", "alexnet", "(", "pretrained", "=", "False", ",", "*", "*", "kwargs", ")", ":", "model", "=", "AlexNet", "(", "*", "*", "kwargs", ")", "if", "pretrained", ":", "model", ".", "load_state_dict", "(", "model_zoo", ".", "load_url", "(", "model_urls", "[", "'alexnet'", "]", ")", ")", "return", "model" ]
r"""AlexNet model architecture from the `"One weird trick..." <https://arxiv.org/abs/1404.5997>`_ paper. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet
[ "r", "AlexNet", "model", "architecture", "from", "the", "One", "weird", "trick", "...", "<https", ":", "//", "arxiv", ".", "org", "/", "abs", "/", "1404", ".", "5997", ">", "_", "paper", "." ]
python
test
34.272727
nickw444/flask-ldap3-login
flask_ldap3_login/__init__.py
https://github.com/nickw444/flask-ldap3-login/blob/3cf0faff52d0e04d4813119a2ba36d706e6fb31f/flask_ldap3_login/__init__.py#L705-L736
def connection(self): """ Convenience property for externally accessing an authenticated connection to the server. This connection is automatically handled by the appcontext, so you do not have to perform an unbind. Returns: ldap3.Connection: A bound ldap3.Connection Raises: ldap3.core.exceptions.LDAPException: Since this method is performing a bind on behalf of the caller. You should handle this case occuring, such as invalid service credentials. """ ctx = stack.top if ctx is None: raise Exception("Working outside of the Flask application " "context. If you wish to make a connection outside of a flask" " application context, please handle your connections " "and use manager.make_connection()") if hasattr(ctx, 'ldap3_manager_main_connection'): return ctx.ldap3_manager_main_connection else: connection = self._make_connection( bind_user=self.config.get('LDAP_BIND_USER_DN'), bind_password=self.config.get('LDAP_BIND_USER_PASSWORD'), contextualise=False ) connection.bind() if ctx is not None: ctx.ldap3_manager_main_connection = connection return connection
[ "def", "connection", "(", "self", ")", ":", "ctx", "=", "stack", ".", "top", "if", "ctx", "is", "None", ":", "raise", "Exception", "(", "\"Working outside of the Flask application \"", "\"context. If you wish to make a connection outside of a flask\"", "\" application context, please handle your connections \"", "\"and use manager.make_connection()\"", ")", "if", "hasattr", "(", "ctx", ",", "'ldap3_manager_main_connection'", ")", ":", "return", "ctx", ".", "ldap3_manager_main_connection", "else", ":", "connection", "=", "self", ".", "_make_connection", "(", "bind_user", "=", "self", ".", "config", ".", "get", "(", "'LDAP_BIND_USER_DN'", ")", ",", "bind_password", "=", "self", ".", "config", ".", "get", "(", "'LDAP_BIND_USER_PASSWORD'", ")", ",", "contextualise", "=", "False", ")", "connection", ".", "bind", "(", ")", "if", "ctx", "is", "not", "None", ":", "ctx", ".", "ldap3_manager_main_connection", "=", "connection", "return", "connection" ]
Convenience property for externally accessing an authenticated connection to the server. This connection is automatically handled by the appcontext, so you do not have to perform an unbind. Returns: ldap3.Connection: A bound ldap3.Connection Raises: ldap3.core.exceptions.LDAPException: Since this method is performing a bind on behalf of the caller. You should handle this case occuring, such as invalid service credentials.
[ "Convenience", "property", "for", "externally", "accessing", "an", "authenticated", "connection", "to", "the", "server", ".", "This", "connection", "is", "automatically", "handled", "by", "the", "appcontext", "so", "you", "do", "not", "have", "to", "perform", "an", "unbind", "." ]
python
test
44.1875
tjcsl/cslbot
cslbot/commands/wtf.py
https://github.com/tjcsl/cslbot/blob/aebe07be47141f61d7c180706bddfb707f19b2b5/cslbot/commands/wtf.py#L24-L34
def cmd(send, msg, _): """Tells you what acronyms mean. Syntax: {command} <term> """ try: answer = subprocess.check_output(['wtf', msg], stderr=subprocess.STDOUT) send(answer.decode().strip().replace('\n', ' or ').replace('fuck', 'fsck')) except subprocess.CalledProcessError as ex: send(ex.output.decode().rstrip().splitlines()[0])
[ "def", "cmd", "(", "send", ",", "msg", ",", "_", ")", ":", "try", ":", "answer", "=", "subprocess", ".", "check_output", "(", "[", "'wtf'", ",", "msg", "]", ",", "stderr", "=", "subprocess", ".", "STDOUT", ")", "send", "(", "answer", ".", "decode", "(", ")", ".", "strip", "(", ")", ".", "replace", "(", "'\\n'", ",", "' or '", ")", ".", "replace", "(", "'fuck'", ",", "'fsck'", ")", ")", "except", "subprocess", ".", "CalledProcessError", "as", "ex", ":", "send", "(", "ex", ".", "output", ".", "decode", "(", ")", ".", "rstrip", "(", ")", ".", "splitlines", "(", ")", "[", "0", "]", ")" ]
Tells you what acronyms mean. Syntax: {command} <term>
[ "Tells", "you", "what", "acronyms", "mean", "." ]
python
train
33.454545
NLeSC/noodles
noodles/lib/thread_pool.py
https://github.com/NLeSC/noodles/blob/3759e24e6e54a3a1a364431309dbb1061f617c04/noodles/lib/thread_pool.py#L35-L71
def thread_pool(*workers, results=None, end_of_queue=EndOfQueue): """Returns a |pull| object, call it ``r``, starting a thread for each given worker. Each thread pulls from the source that ``r`` is connected to, and the returned results are pushed to a |Queue|. ``r`` yields from the other end of the same |Queue|. The target function for each thread is |patch|, which can be stopped by exhausting the source. If all threads have ended, the result queue receives end-of-queue. :param results: If results should go somewhere else than a newly constructed |Queue|, a different |Connection| object can be given. :type results: |Connection| :param end_of_queue: end-of-queue signal object passed on to the creation of the |Queue| object. :rtype: |pull| """ if results is None: results = Queue(end_of_queue=end_of_queue) count = thread_counter(results.close) @pull def thread_pool_results(source): for worker in workers: t = threading.Thread( target=count(patch), args=(pull(source) >> worker, results.sink), daemon=True) t.start() yield from results.source() return thread_pool_results
[ "def", "thread_pool", "(", "*", "workers", ",", "results", "=", "None", ",", "end_of_queue", "=", "EndOfQueue", ")", ":", "if", "results", "is", "None", ":", "results", "=", "Queue", "(", "end_of_queue", "=", "end_of_queue", ")", "count", "=", "thread_counter", "(", "results", ".", "close", ")", "@", "pull", "def", "thread_pool_results", "(", "source", ")", ":", "for", "worker", "in", "workers", ":", "t", "=", "threading", ".", "Thread", "(", "target", "=", "count", "(", "patch", ")", ",", "args", "=", "(", "pull", "(", "source", ")", ">>", "worker", ",", "results", ".", "sink", ")", ",", "daemon", "=", "True", ")", "t", ".", "start", "(", ")", "yield", "from", "results", ".", "source", "(", ")", "return", "thread_pool_results" ]
Returns a |pull| object, call it ``r``, starting a thread for each given worker. Each thread pulls from the source that ``r`` is connected to, and the returned results are pushed to a |Queue|. ``r`` yields from the other end of the same |Queue|. The target function for each thread is |patch|, which can be stopped by exhausting the source. If all threads have ended, the result queue receives end-of-queue. :param results: If results should go somewhere else than a newly constructed |Queue|, a different |Connection| object can be given. :type results: |Connection| :param end_of_queue: end-of-queue signal object passed on to the creation of the |Queue| object. :rtype: |pull|
[ "Returns", "a", "|pull|", "object", "call", "it", "r", "starting", "a", "thread", "for", "each", "given", "worker", ".", "Each", "thread", "pulls", "from", "the", "source", "that", "r", "is", "connected", "to", "and", "the", "returned", "results", "are", "pushed", "to", "a", "|Queue|", ".", "r", "yields", "from", "the", "other", "end", "of", "the", "same", "|Queue|", "." ]
python
train
33.405405
wummel/linkchecker
linkcheck/logger/__init__.py
https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/logger/__init__.py#L230-L245
def start_fileoutput (self): """Start output to configured file.""" path = os.path.dirname(self.filename) try: if path and not os.path.isdir(path): os.makedirs(path) self.fd = self.create_fd() self.close_fd = True except IOError: msg = sys.exc_info()[1] log.warn(LOG_CHECK, "Could not open file %r for writing: %s\n" "Disabling log output of %s", self.filename, msg, self) self.fd = dummy.Dummy() self.is_active = False self.filename = None
[ "def", "start_fileoutput", "(", "self", ")", ":", "path", "=", "os", ".", "path", ".", "dirname", "(", "self", ".", "filename", ")", "try", ":", "if", "path", "and", "not", "os", ".", "path", ".", "isdir", "(", "path", ")", ":", "os", ".", "makedirs", "(", "path", ")", "self", ".", "fd", "=", "self", ".", "create_fd", "(", ")", "self", ".", "close_fd", "=", "True", "except", "IOError", ":", "msg", "=", "sys", ".", "exc_info", "(", ")", "[", "1", "]", "log", ".", "warn", "(", "LOG_CHECK", ",", "\"Could not open file %r for writing: %s\\n\"", "\"Disabling log output of %s\"", ",", "self", ".", "filename", ",", "msg", ",", "self", ")", "self", ".", "fd", "=", "dummy", ".", "Dummy", "(", ")", "self", ".", "is_active", "=", "False", "self", ".", "filename", "=", "None" ]
Start output to configured file.
[ "Start", "output", "to", "configured", "file", "." ]
python
train
37.3125
MonashBI/arcana
arcana/data/file_format.py
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/data/file_format.py#L304-L315
def set_converter(self, file_format, converter): """ Register a Converter and the FileFormat that it is able to convert from Parameters ---------- converter : Converter The converter to register file_format : FileFormat The file format that can be converted into this format """ self._converters[file_format.name] = (file_format, converter)
[ "def", "set_converter", "(", "self", ",", "file_format", ",", "converter", ")", ":", "self", ".", "_converters", "[", "file_format", ".", "name", "]", "=", "(", "file_format", ",", "converter", ")" ]
Register a Converter and the FileFormat that it is able to convert from Parameters ---------- converter : Converter The converter to register file_format : FileFormat The file format that can be converted into this format
[ "Register", "a", "Converter", "and", "the", "FileFormat", "that", "it", "is", "able", "to", "convert", "from" ]
python
train
34.833333
romanz/trezor-agent
libagent/gpg/agent.py
https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/gpg/agent.py#L216-L219
def set_hash(self, algo, digest): """Set algorithm ID and hexadecimal digest for next operation.""" self.algo = algo self.digest = digest
[ "def", "set_hash", "(", "self", ",", "algo", ",", "digest", ")", ":", "self", ".", "algo", "=", "algo", "self", ".", "digest", "=", "digest" ]
Set algorithm ID and hexadecimal digest for next operation.
[ "Set", "algorithm", "ID", "and", "hexadecimal", "digest", "for", "next", "operation", "." ]
python
train
39.5
hsolbrig/PyShEx
pyshex/shape_expressions_language/p5_context.py
https://github.com/hsolbrig/PyShEx/blob/9d659cc36e808afd66d4a6d60e8ea21cb12eb744/pyshex/shape_expressions_language/p5_context.py#L356-L378
def start_evaluating(self, n: Node, s: ShExJ.shapeExpr) -> Optional[bool]: """Indicate that we are beginning to evaluate n according to shape expression s. If we are already in the process of evaluating (n,s), as indicated self.evaluating, we return our current guess as to the result. :param n: Node to be evaluated :param s: expression for node evaluation :return: Assumed evaluation result. If None, evaluation must be performed """ if not s.id: s.id = str(BNode()) # Random permanant id key = (n, s.id) # We only evaluate a node once if key in self.known_results: return self.known_results[key] if key not in self.evaluating: self.evaluating.add(key) return None elif key not in self.assumptions: self.assumptions[key] = True return self.assumptions[key]
[ "def", "start_evaluating", "(", "self", ",", "n", ":", "Node", ",", "s", ":", "ShExJ", ".", "shapeExpr", ")", "->", "Optional", "[", "bool", "]", ":", "if", "not", "s", ".", "id", ":", "s", ".", "id", "=", "str", "(", "BNode", "(", ")", ")", "# Random permanant id", "key", "=", "(", "n", ",", "s", ".", "id", ")", "# We only evaluate a node once", "if", "key", "in", "self", ".", "known_results", ":", "return", "self", ".", "known_results", "[", "key", "]", "if", "key", "not", "in", "self", ".", "evaluating", ":", "self", ".", "evaluating", ".", "add", "(", "key", ")", "return", "None", "elif", "key", "not", "in", "self", ".", "assumptions", ":", "self", ".", "assumptions", "[", "key", "]", "=", "True", "return", "self", ".", "assumptions", "[", "key", "]" ]
Indicate that we are beginning to evaluate n according to shape expression s. If we are already in the process of evaluating (n,s), as indicated self.evaluating, we return our current guess as to the result. :param n: Node to be evaluated :param s: expression for node evaluation :return: Assumed evaluation result. If None, evaluation must be performed
[ "Indicate", "that", "we", "are", "beginning", "to", "evaluate", "n", "according", "to", "shape", "expression", "s", ".", "If", "we", "are", "already", "in", "the", "process", "of", "evaluating", "(", "n", "s", ")", "as", "indicated", "self", ".", "evaluating", "we", "return", "our", "current", "guess", "as", "to", "the", "result", "." ]
python
train
40.347826
quantmind/pulsar
pulsar/apps/http/plugins.py
https://github.com/quantmind/pulsar/blob/fee44e871954aa6ca36d00bb5a3739abfdb89b26/pulsar/apps/http/plugins.py#L230-L245
def on_headers(self, response, exc=None): '''Websocket upgrade as ``on_headers`` event.''' if response.status_code == 101: connection = response.connection request = response.request handler = request.websocket_handler if not handler: handler = WS() parser = request.client.frame_parser(kind=1) consumer = partial(WebSocketClient.create, response, handler, parser) connection.upgrade(consumer) response.event('post_request').fire() websocket = connection.current_consumer() response.request_again = lambda r: websocket
[ "def", "on_headers", "(", "self", ",", "response", ",", "exc", "=", "None", ")", ":", "if", "response", ".", "status_code", "==", "101", ":", "connection", "=", "response", ".", "connection", "request", "=", "response", ".", "request", "handler", "=", "request", ".", "websocket_handler", "if", "not", "handler", ":", "handler", "=", "WS", "(", ")", "parser", "=", "request", ".", "client", ".", "frame_parser", "(", "kind", "=", "1", ")", "consumer", "=", "partial", "(", "WebSocketClient", ".", "create", ",", "response", ",", "handler", ",", "parser", ")", "connection", ".", "upgrade", "(", "consumer", ")", "response", ".", "event", "(", "'post_request'", ")", ".", "fire", "(", ")", "websocket", "=", "connection", ".", "current_consumer", "(", ")", "response", ".", "request_again", "=", "lambda", "r", ":", "websocket" ]
Websocket upgrade as ``on_headers`` event.
[ "Websocket", "upgrade", "as", "on_headers", "event", "." ]
python
train
42.9375
Dentosal/python-sc2
sc2/position.py
https://github.com/Dentosal/python-sc2/blob/608bd25f04e89d39cef68b40101d8e9a8a7f1634/sc2/position.py#L267-L272
def center(a: Union[Set["Point2"], List["Point2"]]) -> "Point2": """ Returns the central point for points in list """ s = Point2((0, 0)) for p in a: s += p return s / len(a)
[ "def", "center", "(", "a", ":", "Union", "[", "Set", "[", "\"Point2\"", "]", ",", "List", "[", "\"Point2\"", "]", "]", ")", "->", "\"Point2\"", ":", "s", "=", "Point2", "(", "(", "0", ",", "0", ")", ")", "for", "p", "in", "a", ":", "s", "+=", "p", "return", "s", "/", "len", "(", "a", ")" ]
Returns the central point for points in list
[ "Returns", "the", "central", "point", "for", "points", "in", "list" ]
python
train
35.333333
Neurita/boyle
boyle/nifti/check.py
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/nifti/check.py#L172-L217
def have_same_affine(one_img, another_img, only_check_3d=False): """Return True if the affine matrix of one_img is close to the affine matrix of another_img. False otherwise. Parameters ---------- one_img: nibabel.Nifti1Image another_img: nibabel.Nifti1Image only_check_3d: bool If True will extract only the 3D part of the affine matrices when they have more dimensions. Returns ------- bool Raises ------ ValueError """ img1 = check_img(one_img) img2 = check_img(another_img) ndim1 = len(img1.shape) ndim2 = len(img2.shape) if ndim1 < 3: raise ValueError('Image {} has only {} dimensions, at least 3 dimensions is expected.'.format(repr_imgs(img1), ndim1)) if ndim2 < 3: raise ValueError('Image {} has only {} dimensions, at least 3 dimensions is expected.'.format(repr_imgs(img2), ndim1)) affine1 = img1.get_affine() affine2 = img2.get_affine() if only_check_3d: affine1 = affine1[:3, :3] affine2 = affine2[:3, :3] try: return np.allclose(affine1, affine2) except ValueError: return False except: raise
[ "def", "have_same_affine", "(", "one_img", ",", "another_img", ",", "only_check_3d", "=", "False", ")", ":", "img1", "=", "check_img", "(", "one_img", ")", "img2", "=", "check_img", "(", "another_img", ")", "ndim1", "=", "len", "(", "img1", ".", "shape", ")", "ndim2", "=", "len", "(", "img2", ".", "shape", ")", "if", "ndim1", "<", "3", ":", "raise", "ValueError", "(", "'Image {} has only {} dimensions, at least 3 dimensions is expected.'", ".", "format", "(", "repr_imgs", "(", "img1", ")", ",", "ndim1", ")", ")", "if", "ndim2", "<", "3", ":", "raise", "ValueError", "(", "'Image {} has only {} dimensions, at least 3 dimensions is expected.'", ".", "format", "(", "repr_imgs", "(", "img2", ")", ",", "ndim1", ")", ")", "affine1", "=", "img1", ".", "get_affine", "(", ")", "affine2", "=", "img2", ".", "get_affine", "(", ")", "if", "only_check_3d", ":", "affine1", "=", "affine1", "[", ":", "3", ",", ":", "3", "]", "affine2", "=", "affine2", "[", ":", "3", ",", ":", "3", "]", "try", ":", "return", "np", ".", "allclose", "(", "affine1", ",", "affine2", ")", "except", "ValueError", ":", "return", "False", "except", ":", "raise" ]
Return True if the affine matrix of one_img is close to the affine matrix of another_img. False otherwise. Parameters ---------- one_img: nibabel.Nifti1Image another_img: nibabel.Nifti1Image only_check_3d: bool If True will extract only the 3D part of the affine matrices when they have more dimensions. Returns ------- bool Raises ------ ValueError
[ "Return", "True", "if", "the", "affine", "matrix", "of", "one_img", "is", "close", "to", "the", "affine", "matrix", "of", "another_img", ".", "False", "otherwise", "." ]
python
valid
24.76087
AndrewAnnex/SpiceyPy
spiceypy/spiceypy.py
https://github.com/AndrewAnnex/SpiceyPy/blob/fc20a9b9de68b58eed5b332f0c051fb343a6e335/spiceypy/spiceypy.py#L3042-L3057
def dskobj(dsk): """ Find the set of body ID codes of all objects for which topographic data are provided in a specified DSK file. https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/dskobj_c.html :param dsk: Name of DSK file. :type dsk: str :return: Set of ID codes of objects in DSK file. :rtype: spiceypy.utils.support_types.SpiceCell """ dsk = stypes.stringToCharP(dsk) bodids = stypes.SPICEINT_CELL(10000) libspice.dskobj_c(dsk, ctypes.byref(bodids)) return bodids
[ "def", "dskobj", "(", "dsk", ")", ":", "dsk", "=", "stypes", ".", "stringToCharP", "(", "dsk", ")", "bodids", "=", "stypes", ".", "SPICEINT_CELL", "(", "10000", ")", "libspice", ".", "dskobj_c", "(", "dsk", ",", "ctypes", ".", "byref", "(", "bodids", ")", ")", "return", "bodids" ]
Find the set of body ID codes of all objects for which topographic data are provided in a specified DSK file. https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/dskobj_c.html :param dsk: Name of DSK file. :type dsk: str :return: Set of ID codes of objects in DSK file. :rtype: spiceypy.utils.support_types.SpiceCell
[ "Find", "the", "set", "of", "body", "ID", "codes", "of", "all", "objects", "for", "which", "topographic", "data", "are", "provided", "in", "a", "specified", "DSK", "file", ".", "https", ":", "//", "naif", ".", "jpl", ".", "nasa", ".", "gov", "/", "pub", "/", "naif", "/", "toolkit_docs", "/", "C", "/", "cspice", "/", "dskobj_c", ".", "html", ":", "param", "dsk", ":", "Name", "of", "DSK", "file", ".", ":", "type", "dsk", ":", "str", ":", "return", ":", "Set", "of", "ID", "codes", "of", "objects", "in", "DSK", "file", ".", ":", "rtype", ":", "spiceypy", ".", "utils", ".", "support_types", ".", "SpiceCell" ]
python
train
32.75
UDST/pandana
pandana/loaders/pandash5.py
https://github.com/UDST/pandana/blob/961a7ef8d3b0144b190cb60bbd61845fca6fb314/pandana/loaders/pandash5.py#L30-L53
def network_to_pandas_hdf5(network, filename, rm_nodes=None): """ Save a Network's data to a Pandas HDFStore. Parameters ---------- network : pandana.Network filename : str rm_nodes : array_like A list, array, Index, or Series of node IDs that should *not* be saved as part of the Network. """ if rm_nodes is not None: nodes, edges = remove_nodes(network, rm_nodes) else: nodes, edges = network.nodes_df, network.edges_df with pd.HDFStore(filename, mode='w') as store: store['nodes'] = nodes store['edges'] = edges store['two_way'] = pd.Series([network._twoway]) store['impedance_names'] = pd.Series(network.impedance_names)
[ "def", "network_to_pandas_hdf5", "(", "network", ",", "filename", ",", "rm_nodes", "=", "None", ")", ":", "if", "rm_nodes", "is", "not", "None", ":", "nodes", ",", "edges", "=", "remove_nodes", "(", "network", ",", "rm_nodes", ")", "else", ":", "nodes", ",", "edges", "=", "network", ".", "nodes_df", ",", "network", ".", "edges_df", "with", "pd", ".", "HDFStore", "(", "filename", ",", "mode", "=", "'w'", ")", "as", "store", ":", "store", "[", "'nodes'", "]", "=", "nodes", "store", "[", "'edges'", "]", "=", "edges", "store", "[", "'two_way'", "]", "=", "pd", ".", "Series", "(", "[", "network", ".", "_twoway", "]", ")", "store", "[", "'impedance_names'", "]", "=", "pd", ".", "Series", "(", "network", ".", "impedance_names", ")" ]
Save a Network's data to a Pandas HDFStore. Parameters ---------- network : pandana.Network filename : str rm_nodes : array_like A list, array, Index, or Series of node IDs that should *not* be saved as part of the Network.
[ "Save", "a", "Network", "s", "data", "to", "a", "Pandas", "HDFStore", "." ]
python
test
29.708333
saltstack/salt
salt/fileserver/svnfs.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/fileserver/svnfs.py#L667-L718
def file_hash(load, fnd): ''' Return a file hash, the hash type is set in the master config file ''' if 'env' in load: # "env" is not supported; Use "saltenv". load.pop('env') if not all(x in load for x in ('path', 'saltenv')): return '' saltenv = load['saltenv'] if saltenv == 'base': saltenv = 'trunk' ret = {} relpath = fnd['rel'] path = fnd['path'] # If the file doesn't exist, we can't get a hash if not path or not os.path.isfile(path): return ret # Set the hash_type as it is determined by config ret['hash_type'] = __opts__['hash_type'] # Check if the hash is cached # Cache file's contents should be "hash:mtime" cache_path = os.path.join(__opts__['cachedir'], 'svnfs', 'hash', saltenv, '{0}.hash.{1}'.format(relpath, __opts__['hash_type'])) # If we have a cache, serve that if the mtime hasn't changed if os.path.exists(cache_path): with salt.utils.files.fopen(cache_path, 'rb') as fp_: hsum, mtime = fp_.read().split(':') if os.path.getmtime(path) == mtime: # check if mtime changed ret['hsum'] = hsum return ret # if we don't have a cache entry-- lets make one ret['hsum'] = salt.utils.hashutils.get_hash(path, __opts__['hash_type']) cache_dir = os.path.dirname(cache_path) # make cache directory if it doesn't exist if not os.path.exists(cache_dir): os.makedirs(cache_dir) # save the cache object "hash:mtime" with salt.utils.files.fopen(cache_path, 'w') as fp_: fp_.write('{0}:{1}'.format(ret['hsum'], os.path.getmtime(path))) return ret
[ "def", "file_hash", "(", "load", ",", "fnd", ")", ":", "if", "'env'", "in", "load", ":", "# \"env\" is not supported; Use \"saltenv\".", "load", ".", "pop", "(", "'env'", ")", "if", "not", "all", "(", "x", "in", "load", "for", "x", "in", "(", "'path'", ",", "'saltenv'", ")", ")", ":", "return", "''", "saltenv", "=", "load", "[", "'saltenv'", "]", "if", "saltenv", "==", "'base'", ":", "saltenv", "=", "'trunk'", "ret", "=", "{", "}", "relpath", "=", "fnd", "[", "'rel'", "]", "path", "=", "fnd", "[", "'path'", "]", "# If the file doesn't exist, we can't get a hash", "if", "not", "path", "or", "not", "os", ".", "path", ".", "isfile", "(", "path", ")", ":", "return", "ret", "# Set the hash_type as it is determined by config", "ret", "[", "'hash_type'", "]", "=", "__opts__", "[", "'hash_type'", "]", "# Check if the hash is cached", "# Cache file's contents should be \"hash:mtime\"", "cache_path", "=", "os", ".", "path", ".", "join", "(", "__opts__", "[", "'cachedir'", "]", ",", "'svnfs'", ",", "'hash'", ",", "saltenv", ",", "'{0}.hash.{1}'", ".", "format", "(", "relpath", ",", "__opts__", "[", "'hash_type'", "]", ")", ")", "# If we have a cache, serve that if the mtime hasn't changed", "if", "os", ".", "path", ".", "exists", "(", "cache_path", ")", ":", "with", "salt", ".", "utils", ".", "files", ".", "fopen", "(", "cache_path", ",", "'rb'", ")", "as", "fp_", ":", "hsum", ",", "mtime", "=", "fp_", ".", "read", "(", ")", ".", "split", "(", "':'", ")", "if", "os", ".", "path", ".", "getmtime", "(", "path", ")", "==", "mtime", ":", "# check if mtime changed", "ret", "[", "'hsum'", "]", "=", "hsum", "return", "ret", "# if we don't have a cache entry-- lets make one", "ret", "[", "'hsum'", "]", "=", "salt", ".", "utils", ".", "hashutils", ".", "get_hash", "(", "path", ",", "__opts__", "[", "'hash_type'", "]", ")", "cache_dir", "=", "os", ".", "path", ".", "dirname", "(", "cache_path", ")", "# make cache directory if it doesn't exist", "if", "not", "os", ".", "path", ".", "exists", "(", "cache_dir", ")", ":", "os", ".", "makedirs", "(", "cache_dir", ")", "# save the cache object \"hash:mtime\"", "with", "salt", ".", "utils", ".", "files", ".", "fopen", "(", "cache_path", ",", "'w'", ")", "as", "fp_", ":", "fp_", ".", "write", "(", "'{0}:{1}'", ".", "format", "(", "ret", "[", "'hsum'", "]", ",", "os", ".", "path", ".", "getmtime", "(", "path", ")", ")", ")", "return", "ret" ]
Return a file hash, the hash type is set in the master config file
[ "Return", "a", "file", "hash", "the", "hash", "type", "is", "set", "in", "the", "master", "config", "file" ]
python
train
35.038462
pywbem/pywbem
pywbem/cim_obj.py
https://github.com/pywbem/pywbem/blob/e54ecb82c2211e289a268567443d60fdd489f1e4/pywbem/cim_obj.py#L466-L577
def _mof_escaped(strvalue): # Note: This is a raw docstring because it shows many backslashes, and # that avoids having to double them. r""" Return a MOF-escaped string from the input string. Parameters: strvalue (:term:`unicode string`): The string value. Must not be `None`. Special characters must not be backslash-escaped. Details on backslash-escaping: `DSP0004` defines that the character repertoire for MOF string constants is the entire repertoire for the CIM string datatype. That is, the entire Unicode character repertoire except for U+0000. The only character for which `DSP0004` requires the use of a MOF escape sequence in a MOF string constant, is the double quote (because a MOF string constant is enclosed in double quotes). `DSP0004` defines MOF escape sequences for several more characters, but it does not require their use in MOF. For example, it is valid for a MOF string constant to contain the (unescaped) characters U+000D (newline) or U+0009 (horizontal tab), and others. Processing the MOF escape sequences as unescaped characters may not be supported by MOF-related tools, and therefore this function plays it safe and uses the MOF escape sequences defined in `DSP0004` as much as possible. The following table shows the MOF escape sequences defined in `DSP0004` and whether they are used (i.e. generated) by this function: ========== ==== =========================================================== MOF escape Used Character sequence ========== ==== =========================================================== \b yes U+0008: Backspace \t yes U+0009: Horizontal tab \n yes U+000A: Line feed \f yes U+000C: Form feed \r yes U+000D: Carriage return \" yes U+0022: Double quote (") (required to be used) \' yes U+0027: Single quote (') \\ yes U+005C: Backslash (\) \x<hex> (1) U+<hex>: Any UCS-2 character, where <hex> is one to four hex digits, representing its UCS code position (this form is limited to the UCS-2 character repertoire) \X<hex> no U+<hex>: Any UCS-2 character, where <hex> is one to four hex digits, representing its UCS code position (this form is limited to the UCS-2 character repertoire) ========== ==== =========================================================== (1) Yes, for all other characters in the so called "control range" U+0001..U+001F. """ escaped_str = strvalue # Escape backslash (\) escaped_str = escaped_str.replace('\\', '\\\\') # Escape \b, \t, \n, \f, \r # Note, the Python escape sequences happen to be the same as in MOF escaped_str = escaped_str.\ replace('\b', '\\b').\ replace('\t', '\\t').\ replace('\n', '\\n').\ replace('\f', '\\f').\ replace('\r', '\\r') # Escape remaining control characters (U+0001...U+001F), skipping # U+0008, U+0009, U+000A, U+000C, U+000D that are already handled. # We hard code it to be faster, plus we can easily skip already handled # chars. # The generic code would be (not skipping already handled chars): # for cp in range(1, 32): # c = six.unichr(cp) # esc = '\\x{0:04X}'.format(cp) # escaped_str = escaped_str.replace(c, esc) escaped_str = escaped_str.\ replace(u'\u0001', '\\x0001').\ replace(u'\u0002', '\\x0002').\ replace(u'\u0003', '\\x0003').\ replace(u'\u0004', '\\x0004').\ replace(u'\u0005', '\\x0005').\ replace(u'\u0006', '\\x0006').\ replace(u'\u0007', '\\x0007').\ replace(u'\u000B', '\\x000B').\ replace(u'\u000E', '\\x000E').\ replace(u'\u000F', '\\x000F').\ replace(u'\u0010', '\\x0010').\ replace(u'\u0011', '\\x0011').\ replace(u'\u0012', '\\x0012').\ replace(u'\u0013', '\\x0013').\ replace(u'\u0014', '\\x0014').\ replace(u'\u0015', '\\x0015').\ replace(u'\u0016', '\\x0016').\ replace(u'\u0017', '\\x0017').\ replace(u'\u0018', '\\x0018').\ replace(u'\u0019', '\\x0019').\ replace(u'\u001A', '\\x001A').\ replace(u'\u001B', '\\x001B').\ replace(u'\u001C', '\\x001C').\ replace(u'\u001D', '\\x001D').\ replace(u'\u001E', '\\x001E').\ replace(u'\u001F', '\\x001F') # Escape single and double quote escaped_str = escaped_str.replace('"', '\\"') escaped_str = escaped_str.replace("'", "\\'") return escaped_str
[ "def", "_mof_escaped", "(", "strvalue", ")", ":", "# Note: This is a raw docstring because it shows many backslashes, and", "# that avoids having to double them.", "escaped_str", "=", "strvalue", "# Escape backslash (\\)", "escaped_str", "=", "escaped_str", ".", "replace", "(", "'\\\\'", ",", "'\\\\\\\\'", ")", "# Escape \\b, \\t, \\n, \\f, \\r", "# Note, the Python escape sequences happen to be the same as in MOF", "escaped_str", "=", "escaped_str", ".", "replace", "(", "'\\b'", ",", "'\\\\b'", ")", ".", "replace", "(", "'\\t'", ",", "'\\\\t'", ")", ".", "replace", "(", "'\\n'", ",", "'\\\\n'", ")", ".", "replace", "(", "'\\f'", ",", "'\\\\f'", ")", ".", "replace", "(", "'\\r'", ",", "'\\\\r'", ")", "# Escape remaining control characters (U+0001...U+001F), skipping", "# U+0008, U+0009, U+000A, U+000C, U+000D that are already handled.", "# We hard code it to be faster, plus we can easily skip already handled", "# chars.", "# The generic code would be (not skipping already handled chars):", "# for cp in range(1, 32):", "# c = six.unichr(cp)", "# esc = '\\\\x{0:04X}'.format(cp)", "# escaped_str = escaped_str.replace(c, esc)", "escaped_str", "=", "escaped_str", ".", "replace", "(", "u'\\u0001'", ",", "'\\\\x0001'", ")", ".", "replace", "(", "u'\\u0002'", ",", "'\\\\x0002'", ")", ".", "replace", "(", "u'\\u0003'", ",", "'\\\\x0003'", ")", ".", "replace", "(", "u'\\u0004'", ",", "'\\\\x0004'", ")", ".", "replace", "(", "u'\\u0005'", ",", "'\\\\x0005'", ")", ".", "replace", "(", "u'\\u0006'", ",", "'\\\\x0006'", ")", ".", "replace", "(", "u'\\u0007'", ",", "'\\\\x0007'", ")", ".", "replace", "(", "u'\\u000B'", ",", "'\\\\x000B'", ")", ".", "replace", "(", "u'\\u000E'", ",", "'\\\\x000E'", ")", ".", "replace", "(", "u'\\u000F'", ",", "'\\\\x000F'", ")", ".", "replace", "(", "u'\\u0010'", ",", "'\\\\x0010'", ")", ".", "replace", "(", "u'\\u0011'", ",", "'\\\\x0011'", ")", ".", "replace", "(", "u'\\u0012'", ",", "'\\\\x0012'", ")", ".", "replace", "(", "u'\\u0013'", ",", "'\\\\x0013'", ")", ".", "replace", "(", "u'\\u0014'", ",", "'\\\\x0014'", ")", ".", "replace", "(", "u'\\u0015'", ",", "'\\\\x0015'", ")", ".", "replace", "(", "u'\\u0016'", ",", "'\\\\x0016'", ")", ".", "replace", "(", "u'\\u0017'", ",", "'\\\\x0017'", ")", ".", "replace", "(", "u'\\u0018'", ",", "'\\\\x0018'", ")", ".", "replace", "(", "u'\\u0019'", ",", "'\\\\x0019'", ")", ".", "replace", "(", "u'\\u001A'", ",", "'\\\\x001A'", ")", ".", "replace", "(", "u'\\u001B'", ",", "'\\\\x001B'", ")", ".", "replace", "(", "u'\\u001C'", ",", "'\\\\x001C'", ")", ".", "replace", "(", "u'\\u001D'", ",", "'\\\\x001D'", ")", ".", "replace", "(", "u'\\u001E'", ",", "'\\\\x001E'", ")", ".", "replace", "(", "u'\\u001F'", ",", "'\\\\x001F'", ")", "# Escape single and double quote", "escaped_str", "=", "escaped_str", ".", "replace", "(", "'\"'", ",", "'\\\\\"'", ")", "escaped_str", "=", "escaped_str", ".", "replace", "(", "\"'\"", ",", "\"\\\\'\"", ")", "return", "escaped_str" ]
r""" Return a MOF-escaped string from the input string. Parameters: strvalue (:term:`unicode string`): The string value. Must not be `None`. Special characters must not be backslash-escaped. Details on backslash-escaping: `DSP0004` defines that the character repertoire for MOF string constants is the entire repertoire for the CIM string datatype. That is, the entire Unicode character repertoire except for U+0000. The only character for which `DSP0004` requires the use of a MOF escape sequence in a MOF string constant, is the double quote (because a MOF string constant is enclosed in double quotes). `DSP0004` defines MOF escape sequences for several more characters, but it does not require their use in MOF. For example, it is valid for a MOF string constant to contain the (unescaped) characters U+000D (newline) or U+0009 (horizontal tab), and others. Processing the MOF escape sequences as unescaped characters may not be supported by MOF-related tools, and therefore this function plays it safe and uses the MOF escape sequences defined in `DSP0004` as much as possible. The following table shows the MOF escape sequences defined in `DSP0004` and whether they are used (i.e. generated) by this function: ========== ==== =========================================================== MOF escape Used Character sequence ========== ==== =========================================================== \b yes U+0008: Backspace \t yes U+0009: Horizontal tab \n yes U+000A: Line feed \f yes U+000C: Form feed \r yes U+000D: Carriage return \" yes U+0022: Double quote (") (required to be used) \' yes U+0027: Single quote (') \\ yes U+005C: Backslash (\) \x<hex> (1) U+<hex>: Any UCS-2 character, where <hex> is one to four hex digits, representing its UCS code position (this form is limited to the UCS-2 character repertoire) \X<hex> no U+<hex>: Any UCS-2 character, where <hex> is one to four hex digits, representing its UCS code position (this form is limited to the UCS-2 character repertoire) ========== ==== =========================================================== (1) Yes, for all other characters in the so called "control range" U+0001..U+001F.
[ "r", "Return", "a", "MOF", "-", "escaped", "string", "from", "the", "input", "string", "." ]
python
train
41.392857
ihmeuw/vivarium
src/vivarium/framework/population.py
https://github.com/ihmeuw/vivarium/blob/c5f5d50f775c8bf337d3aae1ff7c57c025a8e258/src/vivarium/framework/population.py#L99-L150
def update(self, pop: Union[pd.DataFrame, pd.Series]): """Update the simulation's state to match ``pop`` Parameters ---------- pop : The data which should be copied into the simulation's state. If ``pop`` is a DataFrame only those columns included in the view's columns will be used. If ``pop`` is a Series it must have a name that matches one of the view's columns unless the view only has one column in which case the Series will be assumed to refer to that regardless of its name. """ if not pop.empty: if isinstance(pop, pd.Series): if pop.name in self._columns: affected_columns = [pop.name] elif len(self._columns) == 1: affected_columns = self._columns else: raise PopulationError('Cannot update with a Series unless the series name equals a column ' 'name or there is only a single column in the view') else: affected_columns = set(pop.columns) affected_columns = set(affected_columns).intersection(self._columns) state_table = self.manager.get_population(True) if not self.manager.growing: affected_columns = set(affected_columns).intersection(state_table.columns) for c in affected_columns: if c in state_table: v = state_table[c].values if isinstance(pop, pd.Series): v2 = pop.values else: v2 = pop[c].values v[pop.index] = v2 if v.dtype != v2.dtype: # This happens when the population is being grown because extending # the index forces columns that don't have a natural null type # to become 'object' if not self.manager.growing: raise PopulationError('Component corrupting population table. ' 'Old column type: {} New column type: {}'.format(v.dtype, v2.dtype)) v = v.astype(v2.dtype) else: if isinstance(pop, pd.Series): v = pop.values else: v = pop[c].values self.manager._population[c] = v
[ "def", "update", "(", "self", ",", "pop", ":", "Union", "[", "pd", ".", "DataFrame", ",", "pd", ".", "Series", "]", ")", ":", "if", "not", "pop", ".", "empty", ":", "if", "isinstance", "(", "pop", ",", "pd", ".", "Series", ")", ":", "if", "pop", ".", "name", "in", "self", ".", "_columns", ":", "affected_columns", "=", "[", "pop", ".", "name", "]", "elif", "len", "(", "self", ".", "_columns", ")", "==", "1", ":", "affected_columns", "=", "self", ".", "_columns", "else", ":", "raise", "PopulationError", "(", "'Cannot update with a Series unless the series name equals a column '", "'name or there is only a single column in the view'", ")", "else", ":", "affected_columns", "=", "set", "(", "pop", ".", "columns", ")", "affected_columns", "=", "set", "(", "affected_columns", ")", ".", "intersection", "(", "self", ".", "_columns", ")", "state_table", "=", "self", ".", "manager", ".", "get_population", "(", "True", ")", "if", "not", "self", ".", "manager", ".", "growing", ":", "affected_columns", "=", "set", "(", "affected_columns", ")", ".", "intersection", "(", "state_table", ".", "columns", ")", "for", "c", "in", "affected_columns", ":", "if", "c", "in", "state_table", ":", "v", "=", "state_table", "[", "c", "]", ".", "values", "if", "isinstance", "(", "pop", ",", "pd", ".", "Series", ")", ":", "v2", "=", "pop", ".", "values", "else", ":", "v2", "=", "pop", "[", "c", "]", ".", "values", "v", "[", "pop", ".", "index", "]", "=", "v2", "if", "v", ".", "dtype", "!=", "v2", ".", "dtype", ":", "# This happens when the population is being grown because extending", "# the index forces columns that don't have a natural null type", "# to become 'object'", "if", "not", "self", ".", "manager", ".", "growing", ":", "raise", "PopulationError", "(", "'Component corrupting population table. '", "'Old column type: {} New column type: {}'", ".", "format", "(", "v", ".", "dtype", ",", "v2", ".", "dtype", ")", ")", "v", "=", "v", ".", "astype", "(", "v2", ".", "dtype", ")", "else", ":", "if", "isinstance", "(", "pop", ",", "pd", ".", "Series", ")", ":", "v", "=", "pop", ".", "values", "else", ":", "v", "=", "pop", "[", "c", "]", ".", "values", "self", ".", "manager", ".", "_population", "[", "c", "]", "=", "v" ]
Update the simulation's state to match ``pop`` Parameters ---------- pop : The data which should be copied into the simulation's state. If ``pop`` is a DataFrame only those columns included in the view's columns will be used. If ``pop`` is a Series it must have a name that matches one of the view's columns unless the view only has one column in which case the Series will be assumed to refer to that regardless of its name.
[ "Update", "the", "simulation", "s", "state", "to", "match", "pop" ]
python
train
48.076923
numenta/htmresearch
htmresearch/frameworks/layers/l2_l4_network_creation.py
https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/frameworks/layers/l2_l4_network_creation.py#L185-L293
def createL4L2Column(network, networkConfig, suffix=""): """ Create a a single column containing one L4 and one L2. networkConfig is a dict that must contain the following keys (additional keys ok): { "enableFeedback": True, "externalInputSize": 1024, "sensorInputSize": 1024, "L4RegionType": "py.ApicalTMPairRegion", "L4Params": { <constructor parameters for the L4 region> }, "L2Params": { <constructor parameters for ColumnPoolerRegion> }, "lateralSPParams": { <constructor parameters for optional SPRegion> }, "feedForwardSPParams": { <constructor parameters for optional SPRegion> } } Region names are externalInput, sensorInput, L4Column, and ColumnPoolerRegion. Each name has an optional string suffix appended to it. Configuration options: "lateralSPParams" and "feedForwardSPParams" are optional. If included appropriate spatial pooler regions will be added to the network. If externalInputSize is 0, the externalInput sensor (and SP if appropriate) will NOT be created. In this case it is expected that L4 is a sequence memory region (e.g. ApicalTMSequenceRegion) """ externalInputName = "externalInput" + suffix sensorInputName = "sensorInput" + suffix L4ColumnName = "L4Column" + suffix L2ColumnName = "L2Column" + suffix L4Params = copy.deepcopy(networkConfig["L4Params"]) L4Params["basalInputWidth"] = networkConfig["externalInputSize"] L4Params["apicalInputWidth"] = networkConfig["L2Params"]["cellCount"] if networkConfig["externalInputSize"] > 0: network.addRegion( externalInputName, "py.RawSensor", json.dumps({"outputWidth": networkConfig["externalInputSize"]})) network.addRegion( sensorInputName, "py.RawSensor", json.dumps({"outputWidth": networkConfig["sensorInputSize"]})) # Fixup network to include SP, if defined in networkConfig if networkConfig["externalInputSize"] > 0: _addLateralSPRegion(network, networkConfig, suffix) _addFeedForwardSPRegion(network, networkConfig, suffix) network.addRegion( L4ColumnName, networkConfig["L4RegionType"], json.dumps(L4Params)) network.addRegion( L2ColumnName, "py.ColumnPoolerRegion", json.dumps(networkConfig["L2Params"])) # Set phases appropriately so regions are executed in the proper sequence # This is required when we create multiple columns - the order of execution # is not the same as the order of region creation. if networkConfig["externalInputSize"] > 0: network.setPhases(externalInputName,[0]) network.setPhases(sensorInputName,[0]) _setLateralSPPhases(network, networkConfig) _setFeedForwardSPPhases(network, networkConfig) # L4 and L2 regions always have phases 2 and 3, respectively network.setPhases(L4ColumnName,[2]) network.setPhases(L2ColumnName,[3]) # Link SP region(s), if applicable if networkConfig["externalInputSize"] > 0: _linkLateralSPRegion(network, networkConfig, externalInputName, L4ColumnName) _linkFeedForwardSPRegion(network, networkConfig, sensorInputName, L4ColumnName) # Link L4 to L2 network.link(L4ColumnName, L2ColumnName, "UniformLink", "", srcOutput="activeCells", destInput="feedforwardInput") network.link(L4ColumnName, L2ColumnName, "UniformLink", "", srcOutput="predictedActiveCells", destInput="feedforwardGrowthCandidates") # Link L2 feedback to L4 if networkConfig.get("enableFeedback", True): network.link(L2ColumnName, L4ColumnName, "UniformLink", "", srcOutput="feedForwardOutput", destInput="apicalInput", propagationDelay=1) # Link reset output to L2 and L4 network.link(sensorInputName, L2ColumnName, "UniformLink", "", srcOutput="resetOut", destInput="resetIn") network.link(sensorInputName, L4ColumnName, "UniformLink", "", srcOutput="resetOut", destInput="resetIn") enableProfiling(network) return network
[ "def", "createL4L2Column", "(", "network", ",", "networkConfig", ",", "suffix", "=", "\"\"", ")", ":", "externalInputName", "=", "\"externalInput\"", "+", "suffix", "sensorInputName", "=", "\"sensorInput\"", "+", "suffix", "L4ColumnName", "=", "\"L4Column\"", "+", "suffix", "L2ColumnName", "=", "\"L2Column\"", "+", "suffix", "L4Params", "=", "copy", ".", "deepcopy", "(", "networkConfig", "[", "\"L4Params\"", "]", ")", "L4Params", "[", "\"basalInputWidth\"", "]", "=", "networkConfig", "[", "\"externalInputSize\"", "]", "L4Params", "[", "\"apicalInputWidth\"", "]", "=", "networkConfig", "[", "\"L2Params\"", "]", "[", "\"cellCount\"", "]", "if", "networkConfig", "[", "\"externalInputSize\"", "]", ">", "0", ":", "network", ".", "addRegion", "(", "externalInputName", ",", "\"py.RawSensor\"", ",", "json", ".", "dumps", "(", "{", "\"outputWidth\"", ":", "networkConfig", "[", "\"externalInputSize\"", "]", "}", ")", ")", "network", ".", "addRegion", "(", "sensorInputName", ",", "\"py.RawSensor\"", ",", "json", ".", "dumps", "(", "{", "\"outputWidth\"", ":", "networkConfig", "[", "\"sensorInputSize\"", "]", "}", ")", ")", "# Fixup network to include SP, if defined in networkConfig", "if", "networkConfig", "[", "\"externalInputSize\"", "]", ">", "0", ":", "_addLateralSPRegion", "(", "network", ",", "networkConfig", ",", "suffix", ")", "_addFeedForwardSPRegion", "(", "network", ",", "networkConfig", ",", "suffix", ")", "network", ".", "addRegion", "(", "L4ColumnName", ",", "networkConfig", "[", "\"L4RegionType\"", "]", ",", "json", ".", "dumps", "(", "L4Params", ")", ")", "network", ".", "addRegion", "(", "L2ColumnName", ",", "\"py.ColumnPoolerRegion\"", ",", "json", ".", "dumps", "(", "networkConfig", "[", "\"L2Params\"", "]", ")", ")", "# Set phases appropriately so regions are executed in the proper sequence", "# This is required when we create multiple columns - the order of execution", "# is not the same as the order of region creation.", "if", "networkConfig", "[", "\"externalInputSize\"", "]", ">", "0", ":", "network", ".", "setPhases", "(", "externalInputName", ",", "[", "0", "]", ")", "network", ".", "setPhases", "(", "sensorInputName", ",", "[", "0", "]", ")", "_setLateralSPPhases", "(", "network", ",", "networkConfig", ")", "_setFeedForwardSPPhases", "(", "network", ",", "networkConfig", ")", "# L4 and L2 regions always have phases 2 and 3, respectively", "network", ".", "setPhases", "(", "L4ColumnName", ",", "[", "2", "]", ")", "network", ".", "setPhases", "(", "L2ColumnName", ",", "[", "3", "]", ")", "# Link SP region(s), if applicable", "if", "networkConfig", "[", "\"externalInputSize\"", "]", ">", "0", ":", "_linkLateralSPRegion", "(", "network", ",", "networkConfig", ",", "externalInputName", ",", "L4ColumnName", ")", "_linkFeedForwardSPRegion", "(", "network", ",", "networkConfig", ",", "sensorInputName", ",", "L4ColumnName", ")", "# Link L4 to L2", "network", ".", "link", "(", "L4ColumnName", ",", "L2ColumnName", ",", "\"UniformLink\"", ",", "\"\"", ",", "srcOutput", "=", "\"activeCells\"", ",", "destInput", "=", "\"feedforwardInput\"", ")", "network", ".", "link", "(", "L4ColumnName", ",", "L2ColumnName", ",", "\"UniformLink\"", ",", "\"\"", ",", "srcOutput", "=", "\"predictedActiveCells\"", ",", "destInput", "=", "\"feedforwardGrowthCandidates\"", ")", "# Link L2 feedback to L4", "if", "networkConfig", ".", "get", "(", "\"enableFeedback\"", ",", "True", ")", ":", "network", ".", "link", "(", "L2ColumnName", ",", "L4ColumnName", ",", "\"UniformLink\"", ",", "\"\"", ",", "srcOutput", "=", "\"feedForwardOutput\"", ",", "destInput", "=", "\"apicalInput\"", ",", "propagationDelay", "=", "1", ")", "# Link reset output to L2 and L4", "network", ".", "link", "(", "sensorInputName", ",", "L2ColumnName", ",", "\"UniformLink\"", ",", "\"\"", ",", "srcOutput", "=", "\"resetOut\"", ",", "destInput", "=", "\"resetIn\"", ")", "network", ".", "link", "(", "sensorInputName", ",", "L4ColumnName", ",", "\"UniformLink\"", ",", "\"\"", ",", "srcOutput", "=", "\"resetOut\"", ",", "destInput", "=", "\"resetIn\"", ")", "enableProfiling", "(", "network", ")", "return", "network" ]
Create a a single column containing one L4 and one L2. networkConfig is a dict that must contain the following keys (additional keys ok): { "enableFeedback": True, "externalInputSize": 1024, "sensorInputSize": 1024, "L4RegionType": "py.ApicalTMPairRegion", "L4Params": { <constructor parameters for the L4 region> }, "L2Params": { <constructor parameters for ColumnPoolerRegion> }, "lateralSPParams": { <constructor parameters for optional SPRegion> }, "feedForwardSPParams": { <constructor parameters for optional SPRegion> } } Region names are externalInput, sensorInput, L4Column, and ColumnPoolerRegion. Each name has an optional string suffix appended to it. Configuration options: "lateralSPParams" and "feedForwardSPParams" are optional. If included appropriate spatial pooler regions will be added to the network. If externalInputSize is 0, the externalInput sensor (and SP if appropriate) will NOT be created. In this case it is expected that L4 is a sequence memory region (e.g. ApicalTMSequenceRegion)
[ "Create", "a", "a", "single", "column", "containing", "one", "L4", "and", "one", "L2", "." ]
python
train
36.073394
gatkin/declxml
declxml.py
https://github.com/gatkin/declxml/blob/3a2324b43aee943e82a04587fbb68932c6f392ba/declxml.py#L287-L311
def parse_from_string( root_processor, # type: RootProcessor xml_string # type: Text ): # type: (...) -> Any """ Parse the XML string using the processor starting from the root of the document. :param xml_string: XML string to parse. See also :func:`declxml.parse_from_file` """ if not _is_valid_root_processor(root_processor): raise InvalidRootProcessor('Invalid root processor') parseable_xml_string = xml_string # type: Union[Text, bytes] if _PY2 and isinstance(xml_string, Text): parseable_xml_string = xml_string.encode('utf-8') root = ET.fromstring(parseable_xml_string) _xml_namespace_strip(root) state = _ProcessorState() state.push_location(root_processor.element_path) return root_processor.parse_at_root(root, state)
[ "def", "parse_from_string", "(", "root_processor", ",", "# type: RootProcessor", "xml_string", "# type: Text", ")", ":", "# type: (...) -> Any", "if", "not", "_is_valid_root_processor", "(", "root_processor", ")", ":", "raise", "InvalidRootProcessor", "(", "'Invalid root processor'", ")", "parseable_xml_string", "=", "xml_string", "# type: Union[Text, bytes]", "if", "_PY2", "and", "isinstance", "(", "xml_string", ",", "Text", ")", ":", "parseable_xml_string", "=", "xml_string", ".", "encode", "(", "'utf-8'", ")", "root", "=", "ET", ".", "fromstring", "(", "parseable_xml_string", ")", "_xml_namespace_strip", "(", "root", ")", "state", "=", "_ProcessorState", "(", ")", "state", ".", "push_location", "(", "root_processor", ".", "element_path", ")", "return", "root_processor", ".", "parse_at_root", "(", "root", ",", "state", ")" ]
Parse the XML string using the processor starting from the root of the document. :param xml_string: XML string to parse. See also :func:`declxml.parse_from_file`
[ "Parse", "the", "XML", "string", "using", "the", "processor", "starting", "from", "the", "root", "of", "the", "document", "." ]
python
train
31.96
f213/rumetr-client
rumetr/roometr.py
https://github.com/f213/rumetr-client/blob/5180152bcb2eed8246b88035db7c0bb1fe603166/rumetr/roometr.py#L95-L102
def post(self, url: str, data: str, expected_status_code=201): """ Do a POST request """ r = requests.post(self._format_url(url), json=data, headers=self.headers, timeout=TIMEOUT) self._check_response(r, expected_status_code) return r.json()
[ "def", "post", "(", "self", ",", "url", ":", "str", ",", "data", ":", "str", ",", "expected_status_code", "=", "201", ")", ":", "r", "=", "requests", ".", "post", "(", "self", ".", "_format_url", "(", "url", ")", ",", "json", "=", "data", ",", "headers", "=", "self", ".", "headers", ",", "timeout", "=", "TIMEOUT", ")", "self", ".", "_check_response", "(", "r", ",", "expected_status_code", ")", "return", "r", ".", "json", "(", ")" ]
Do a POST request
[ "Do", "a", "POST", "request" ]
python
train
35.375
horazont/aioxmpp
aioxmpp/stream.py
https://github.com/horazont/aioxmpp/blob/22a68e5e1d23f2a4dee470092adbd4672f9ef061/aioxmpp/stream.py#L1738-L1789
def unregister_presence_callback(self, type_, from_): """ Unregister a callback previously registered with :meth:`register_presence_callback`. :param type_: Presence type to listen for. :type type_: :class:`~.PresenceType` :param from_: Sender JID to listen for, or :data:`None` for a wildcard match. :type from_: :class:`~aioxmpp.JID` or :data:`None`. :raises KeyError: if no callback is currently registered for the given ``(type_, from_)`` pair :raises ValueError: if `type_` is not a valid :class:`~.PresenceType` (and cannot be cast to a :class:`~.PresenceType`) The match is made on the exact pair; it is not possible to unregister arbitrary listeners by passing :data:`None` to the `from_` arguments (i.e. the wildcarding only applies for receiving stanzas, not for unregistering callbacks; unregistering a wildcard match with `from_` set to :data:`None` is of course possible). .. versionchanged:: 0.7 The `type_` argument is now supposed to be a :class:`~.PresenceType` member. .. deprecated:: 0.7 Passing a :class:`str` as `type_` argument is deprecated and will raise a :class:`TypeError` as of the 1.0 release. See the Changelog for :ref:`api-changelog-0.7` for further details on how to upgrade your code efficiently. .. deprecated:: 0.9 This method has been deprecated. It is recommended to use :class:`aioxmpp.PresenceClient` instead. """ type_ = self._coerce_enum(type_, structs.PresenceType) warnings.warn( "unregister_presence_callback is deprecated; use " "aioxmpp.dispatcher.SimplePresenceDispatcher or " "aioxmpp.PresenceClient instead", DeprecationWarning, stacklevel=2 ) self._xxx_presence_dispatcher.unregister_callback( type_, from_, )
[ "def", "unregister_presence_callback", "(", "self", ",", "type_", ",", "from_", ")", ":", "type_", "=", "self", ".", "_coerce_enum", "(", "type_", ",", "structs", ".", "PresenceType", ")", "warnings", ".", "warn", "(", "\"unregister_presence_callback is deprecated; use \"", "\"aioxmpp.dispatcher.SimplePresenceDispatcher or \"", "\"aioxmpp.PresenceClient instead\"", ",", "DeprecationWarning", ",", "stacklevel", "=", "2", ")", "self", ".", "_xxx_presence_dispatcher", ".", "unregister_callback", "(", "type_", ",", "from_", ",", ")" ]
Unregister a callback previously registered with :meth:`register_presence_callback`. :param type_: Presence type to listen for. :type type_: :class:`~.PresenceType` :param from_: Sender JID to listen for, or :data:`None` for a wildcard match. :type from_: :class:`~aioxmpp.JID` or :data:`None`. :raises KeyError: if no callback is currently registered for the given ``(type_, from_)`` pair :raises ValueError: if `type_` is not a valid :class:`~.PresenceType` (and cannot be cast to a :class:`~.PresenceType`) The match is made on the exact pair; it is not possible to unregister arbitrary listeners by passing :data:`None` to the `from_` arguments (i.e. the wildcarding only applies for receiving stanzas, not for unregistering callbacks; unregistering a wildcard match with `from_` set to :data:`None` is of course possible). .. versionchanged:: 0.7 The `type_` argument is now supposed to be a :class:`~.PresenceType` member. .. deprecated:: 0.7 Passing a :class:`str` as `type_` argument is deprecated and will raise a :class:`TypeError` as of the 1.0 release. See the Changelog for :ref:`api-changelog-0.7` for further details on how to upgrade your code efficiently. .. deprecated:: 0.9 This method has been deprecated. It is recommended to use :class:`aioxmpp.PresenceClient` instead.
[ "Unregister", "a", "callback", "previously", "registered", "with", ":", "meth", ":", "register_presence_callback", "." ]
python
train
39.961538
webrecorder/pywb
pywb/apps/frontendapp.py
https://github.com/webrecorder/pywb/blob/77f8bb647639dd66f6b92b7a9174c28810e4b1d9/pywb/apps/frontendapp.py#L517-L526
def create_app(cls, port): """Create a new instance of FrontEndApp that listens on port with a hostname of 0.0.0.0 :param int port: The port FrontEndApp is to listen on :return: A new instance of FrontEndApp wrapped in GeventServer :rtype: GeventServer """ app = FrontEndApp() app_server = GeventServer(app, port=port, hostname='0.0.0.0') return app_server
[ "def", "create_app", "(", "cls", ",", "port", ")", ":", "app", "=", "FrontEndApp", "(", ")", "app_server", "=", "GeventServer", "(", "app", ",", "port", "=", "port", ",", "hostname", "=", "'0.0.0.0'", ")", "return", "app_server" ]
Create a new instance of FrontEndApp that listens on port with a hostname of 0.0.0.0 :param int port: The port FrontEndApp is to listen on :return: A new instance of FrontEndApp wrapped in GeventServer :rtype: GeventServer
[ "Create", "a", "new", "instance", "of", "FrontEndApp", "that", "listens", "on", "port", "with", "a", "hostname", "of", "0", ".", "0", ".", "0", ".", "0" ]
python
train
41.2
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/nbformat/v1/nbbase.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/nbformat/v1/nbbase.py#L55-L61
def new_text_cell(text=None): """Create a new text cell.""" cell = NotebookNode() if text is not None: cell.text = unicode(text) cell.cell_type = u'text' return cell
[ "def", "new_text_cell", "(", "text", "=", "None", ")", ":", "cell", "=", "NotebookNode", "(", ")", "if", "text", "is", "not", "None", ":", "cell", ".", "text", "=", "unicode", "(", "text", ")", "cell", ".", "cell_type", "=", "u'text'", "return", "cell" ]
Create a new text cell.
[ "Create", "a", "new", "text", "cell", "." ]
python
test
26.714286
SignalN/language
language/ngrams.py
https://github.com/SignalN/language/blob/5c50c78f65bcc2c999b44d530e7412185248352d/language/ngrams.py#L131-L145
def __similarity(s1, s2, ngrams_fn, n=3): """ The fraction of n-grams matching between two sequences Args: s1: a string s2: another string n: an int for the n in n-gram Returns: float: the fraction of n-grams matching """ ngrams1, ngrams2 = set(ngrams_fn(s1, n=n)), set(ngrams_fn(s2, n=n)) matches = ngrams1.intersection(ngrams2) return 2 * len(matches) / (len(ngrams1) + len(ngrams2))
[ "def", "__similarity", "(", "s1", ",", "s2", ",", "ngrams_fn", ",", "n", "=", "3", ")", ":", "ngrams1", ",", "ngrams2", "=", "set", "(", "ngrams_fn", "(", "s1", ",", "n", "=", "n", ")", ")", ",", "set", "(", "ngrams_fn", "(", "s2", ",", "n", "=", "n", ")", ")", "matches", "=", "ngrams1", ".", "intersection", "(", "ngrams2", ")", "return", "2", "*", "len", "(", "matches", ")", "/", "(", "len", "(", "ngrams1", ")", "+", "len", "(", "ngrams2", ")", ")" ]
The fraction of n-grams matching between two sequences Args: s1: a string s2: another string n: an int for the n in n-gram Returns: float: the fraction of n-grams matching
[ "The", "fraction", "of", "n", "-", "grams", "matching", "between", "two", "sequences" ]
python
train
31
zyga/morris
morris/__init__.py
https://github.com/zyga/morris/blob/7cd6da662c8c95b93b5fb8bb25eae8686becf31a/morris/__init__.py#L694-L706
def fire(self, args, kwargs): """ Fire this signal with the specified arguments and keyword arguments. Typically this is used by using :meth:`__call__()` on this object which is more natural as it does all the argument packing/unpacking transparently. """ for info in self._listeners[:]: if info.pass_signal: info.listener(*args, signal=self, **kwargs) else: info.listener(*args, **kwargs)
[ "def", "fire", "(", "self", ",", "args", ",", "kwargs", ")", ":", "for", "info", "in", "self", ".", "_listeners", "[", ":", "]", ":", "if", "info", ".", "pass_signal", ":", "info", ".", "listener", "(", "*", "args", ",", "signal", "=", "self", ",", "*", "*", "kwargs", ")", "else", ":", "info", ".", "listener", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
Fire this signal with the specified arguments and keyword arguments. Typically this is used by using :meth:`__call__()` on this object which is more natural as it does all the argument packing/unpacking transparently.
[ "Fire", "this", "signal", "with", "the", "specified", "arguments", "and", "keyword", "arguments", "." ]
python
train
37.692308
angr/angr
angr/storage/paged_memory.py
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/storage/paged_memory.py#L738-L749
def store_memory_object(self, mo, overwrite=True): """ This function optimizes a large store by storing a single reference to the :class:`SimMemoryObject` instead of one for each byte. :param memory_object: the memory object to store """ for p in self._containing_pages_mo(mo): self._apply_object_to_page(p, mo, overwrite=overwrite) self._update_range_mappings(mo.base, mo.object, mo.length)
[ "def", "store_memory_object", "(", "self", ",", "mo", ",", "overwrite", "=", "True", ")", ":", "for", "p", "in", "self", ".", "_containing_pages_mo", "(", "mo", ")", ":", "self", ".", "_apply_object_to_page", "(", "p", ",", "mo", ",", "overwrite", "=", "overwrite", ")", "self", ".", "_update_range_mappings", "(", "mo", ".", "base", ",", "mo", ".", "object", ",", "mo", ".", "length", ")" ]
This function optimizes a large store by storing a single reference to the :class:`SimMemoryObject` instead of one for each byte. :param memory_object: the memory object to store
[ "This", "function", "optimizes", "a", "large", "store", "by", "storing", "a", "single", "reference", "to", "the", ":", "class", ":", "SimMemoryObject", "instead", "of", "one", "for", "each", "byte", "." ]
python
train
37.583333
snare/voltron
voltron/core.py
https://github.com/snare/voltron/blob/4ee3cbe6f7c1e38303f5dc6114c48b60217253c3/voltron/core.py#L174-L191
def stop(self): """ Stop the server. """ log.debug("Stopping listeners") self.queue_lock.acquire() for s in self.listeners: log.debug("Stopping {}".format(s)) s.shutdown() s.socket.close() self.cancel_queue() for t in self.threads: t.join() self.listeners = [] self.threads = [] self.is_running = False self.queue_lock.release() log.debug("Listeners stopped and threads joined")
[ "def", "stop", "(", "self", ")", ":", "log", ".", "debug", "(", "\"Stopping listeners\"", ")", "self", ".", "queue_lock", ".", "acquire", "(", ")", "for", "s", "in", "self", ".", "listeners", ":", "log", ".", "debug", "(", "\"Stopping {}\"", ".", "format", "(", "s", ")", ")", "s", ".", "shutdown", "(", ")", "s", ".", "socket", ".", "close", "(", ")", "self", ".", "cancel_queue", "(", ")", "for", "t", "in", "self", ".", "threads", ":", "t", ".", "join", "(", ")", "self", ".", "listeners", "=", "[", "]", "self", ".", "threads", "=", "[", "]", "self", ".", "is_running", "=", "False", "self", ".", "queue_lock", ".", "release", "(", ")", "log", ".", "debug", "(", "\"Listeners stopped and threads joined\"", ")" ]
Stop the server.
[ "Stop", "the", "server", "." ]
python
train
28.5
SHTOOLS/SHTOOLS
pyshtools/shclasses/shtensor.py
https://github.com/SHTOOLS/SHTOOLS/blob/9a115cf83002df2ddec6b7f41aeb6be688e285de/pyshtools/shclasses/shtensor.py#L1394-L1450
def plot_eigh2(self, colorbar=True, cb_orientation='vertical', cb_label=None, ax=None, show=True, fname=None, **kwargs): """ Plot the second eigenvalue of the horizontal tensor. Usage ----- x.plot_eigh2([tick_interval, xlabel, ylabel, ax, colorbar, cb_orientation, cb_label, show, fname]) Parameters ---------- tick_interval : list or tuple, optional, default = [30, 30] Intervals to use when plotting the x and y ticks. If set to None, ticks will not be plotted. xlabel : str, optional, default = 'longitude' Label for the longitude axis. ylabel : str, optional, default = 'latitude' Label for the latitude axis. ax : matplotlib axes object, optional, default = None A single matplotlib axes object where the plot will appear. colorbar : bool, optional, default = True If True, plot a colorbar. cb_orientation : str, optional, default = 'vertical' Orientation of the colorbar: either 'vertical' or 'horizontal'. cb_label : str, optional, default = '$\lambda_{h2}$, Eotvos$^{-1}$' Text label for the colorbar. show : bool, optional, default = True If True, plot the image to the screen. fname : str, optional, default = None If present, and if axes is not specified, save the image to the specified file. kwargs : optional Keyword arguements that will be sent to the SHGrid.plot() and plt.imshow() methods. """ if cb_label is None: cb_label = self._eigh2_label if self.eigh2 is None: self.compute_eigh() if ax is None: fig, axes = self.eigh2.plot(colorbar=colorbar, cb_orientation=cb_orientation, cb_label=cb_label, show=False, **kwargs) if show: fig.show() if fname is not None: fig.savefig(fname) return fig, axes else: self.eigh2.plot(colorbar=colorbar, cb_orientation=cb_orientation, cb_label=cb_label, ax=ax, **kwargs)
[ "def", "plot_eigh2", "(", "self", ",", "colorbar", "=", "True", ",", "cb_orientation", "=", "'vertical'", ",", "cb_label", "=", "None", ",", "ax", "=", "None", ",", "show", "=", "True", ",", "fname", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "cb_label", "is", "None", ":", "cb_label", "=", "self", ".", "_eigh2_label", "if", "self", ".", "eigh2", "is", "None", ":", "self", ".", "compute_eigh", "(", ")", "if", "ax", "is", "None", ":", "fig", ",", "axes", "=", "self", ".", "eigh2", ".", "plot", "(", "colorbar", "=", "colorbar", ",", "cb_orientation", "=", "cb_orientation", ",", "cb_label", "=", "cb_label", ",", "show", "=", "False", ",", "*", "*", "kwargs", ")", "if", "show", ":", "fig", ".", "show", "(", ")", "if", "fname", "is", "not", "None", ":", "fig", ".", "savefig", "(", "fname", ")", "return", "fig", ",", "axes", "else", ":", "self", ".", "eigh2", ".", "plot", "(", "colorbar", "=", "colorbar", ",", "cb_orientation", "=", "cb_orientation", ",", "cb_label", "=", "cb_label", ",", "ax", "=", "ax", ",", "*", "*", "kwargs", ")" ]
Plot the second eigenvalue of the horizontal tensor. Usage ----- x.plot_eigh2([tick_interval, xlabel, ylabel, ax, colorbar, cb_orientation, cb_label, show, fname]) Parameters ---------- tick_interval : list or tuple, optional, default = [30, 30] Intervals to use when plotting the x and y ticks. If set to None, ticks will not be plotted. xlabel : str, optional, default = 'longitude' Label for the longitude axis. ylabel : str, optional, default = 'latitude' Label for the latitude axis. ax : matplotlib axes object, optional, default = None A single matplotlib axes object where the plot will appear. colorbar : bool, optional, default = True If True, plot a colorbar. cb_orientation : str, optional, default = 'vertical' Orientation of the colorbar: either 'vertical' or 'horizontal'. cb_label : str, optional, default = '$\lambda_{h2}$, Eotvos$^{-1}$' Text label for the colorbar. show : bool, optional, default = True If True, plot the image to the screen. fname : str, optional, default = None If present, and if axes is not specified, save the image to the specified file. kwargs : optional Keyword arguements that will be sent to the SHGrid.plot() and plt.imshow() methods.
[ "Plot", "the", "second", "eigenvalue", "of", "the", "horizontal", "tensor", "." ]
python
train
40.526316
lowandrew/OLCTools
sipprCommon/objectprep.py
https://github.com/lowandrew/OLCTools/blob/88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a/sipprCommon/objectprep.py#L11-L33
def objectprep(self): """ Creates fastq files from an in-progress Illumina MiSeq run or create an object and moves files appropriately """ # Create .fastq files if necessary. Otherwise create the metadata object if self.bcltofastq: if self.customsamplesheet: assert os.path.isfile(self.customsamplesheet), 'Cannot find custom sample sheet as specified {}' \ .format(self.customsamplesheet) # Create the FASTQ files self.samples = fastqCreator.CreateFastq(self) # Create a dictionary of the object samples_dict = vars(self.samples) # Extract the required information from the dictionary self.index = samples_dict['index'] self.index_length = samples_dict['indexlength'] self.forward = samples_dict['forwardlength'] self.reverse = samples_dict['reverselength'] self.forwardlength = samples_dict['forward'] self.reverselength = samples_dict['reverse'] self.header = samples_dict['header'] else: self.samples = createObject.ObjectCreation(self)
[ "def", "objectprep", "(", "self", ")", ":", "# Create .fastq files if necessary. Otherwise create the metadata object", "if", "self", ".", "bcltofastq", ":", "if", "self", ".", "customsamplesheet", ":", "assert", "os", ".", "path", ".", "isfile", "(", "self", ".", "customsamplesheet", ")", ",", "'Cannot find custom sample sheet as specified {}'", ".", "format", "(", "self", ".", "customsamplesheet", ")", "# Create the FASTQ files", "self", ".", "samples", "=", "fastqCreator", ".", "CreateFastq", "(", "self", ")", "# Create a dictionary of the object", "samples_dict", "=", "vars", "(", "self", ".", "samples", ")", "# Extract the required information from the dictionary", "self", ".", "index", "=", "samples_dict", "[", "'index'", "]", "self", ".", "index_length", "=", "samples_dict", "[", "'indexlength'", "]", "self", ".", "forward", "=", "samples_dict", "[", "'forwardlength'", "]", "self", ".", "reverse", "=", "samples_dict", "[", "'reverselength'", "]", "self", ".", "forwardlength", "=", "samples_dict", "[", "'forward'", "]", "self", ".", "reverselength", "=", "samples_dict", "[", "'reverse'", "]", "self", ".", "header", "=", "samples_dict", "[", "'header'", "]", "else", ":", "self", ".", "samples", "=", "createObject", ".", "ObjectCreation", "(", "self", ")" ]
Creates fastq files from an in-progress Illumina MiSeq run or create an object and moves files appropriately
[ "Creates", "fastq", "files", "from", "an", "in", "-", "progress", "Illumina", "MiSeq", "run", "or", "create", "an", "object", "and", "moves", "files", "appropriately" ]
python
train
50.869565
datakortet/dkfileutils
tasks.py
https://github.com/datakortet/dkfileutils/blob/924098d6e2edf88ad9b3ffdec9c74530f80a7d77/tasks.py#L127-L143
def watch(ctx): """Automatically run build whenever a relevant file changes. """ watcher = Watcher(ctx) watcher.watch_directory( path='{pkg.source_less}', ext='.less', action=lambda e: build(ctx, less=True) ) watcher.watch_directory( path='{pkg.source_js}', ext='.jsx', action=lambda e: build(ctx, js=True) ) watcher.watch_directory( path='{pkg.docs}', ext='.rst', action=lambda e: build(ctx, docs=True) ) watcher.start()
[ "def", "watch", "(", "ctx", ")", ":", "watcher", "=", "Watcher", "(", "ctx", ")", "watcher", ".", "watch_directory", "(", "path", "=", "'{pkg.source_less}'", ",", "ext", "=", "'.less'", ",", "action", "=", "lambda", "e", ":", "build", "(", "ctx", ",", "less", "=", "True", ")", ")", "watcher", ".", "watch_directory", "(", "path", "=", "'{pkg.source_js}'", ",", "ext", "=", "'.jsx'", ",", "action", "=", "lambda", "e", ":", "build", "(", "ctx", ",", "js", "=", "True", ")", ")", "watcher", ".", "watch_directory", "(", "path", "=", "'{pkg.docs}'", ",", "ext", "=", "'.rst'", ",", "action", "=", "lambda", "e", ":", "build", "(", "ctx", ",", "docs", "=", "True", ")", ")", "watcher", ".", "start", "(", ")" ]
Automatically run build whenever a relevant file changes.
[ "Automatically", "run", "build", "whenever", "a", "relevant", "file", "changes", "." ]
python
train
29
tylerbutler/engineer
engineer/devtools/theme_tools.py
https://github.com/tylerbutler/engineer/blob/8884f587297f37646c40e5553174852b444a4024/engineer/devtools/theme_tools.py#L47-L65
def list_theme(): """List all available Engineer themes.""" from engineer.themes import ThemeManager themes = ThemeManager.themes() col1, col2 = map(max, zip(*[(len(t.id) + 2, len(t.root_path) + 2) for t in themes.itervalues()])) themes = ThemeManager.themes_by_finder() for finder in sorted(themes.iterkeys()): if len(themes[finder]) > 0: puts("%s: " % finder) for theme in sorted(themes[finder], key=lambda _: _.id): with indent(4): puts( columns( [colored.cyan("%s:" % theme.id), col1], [colored.white(theme.root_path, bold=True), col2] ) )
[ "def", "list_theme", "(", ")", ":", "from", "engineer", ".", "themes", "import", "ThemeManager", "themes", "=", "ThemeManager", ".", "themes", "(", ")", "col1", ",", "col2", "=", "map", "(", "max", ",", "zip", "(", "*", "[", "(", "len", "(", "t", ".", "id", ")", "+", "2", ",", "len", "(", "t", ".", "root_path", ")", "+", "2", ")", "for", "t", "in", "themes", ".", "itervalues", "(", ")", "]", ")", ")", "themes", "=", "ThemeManager", ".", "themes_by_finder", "(", ")", "for", "finder", "in", "sorted", "(", "themes", ".", "iterkeys", "(", ")", ")", ":", "if", "len", "(", "themes", "[", "finder", "]", ")", ">", "0", ":", "puts", "(", "\"%s: \"", "%", "finder", ")", "for", "theme", "in", "sorted", "(", "themes", "[", "finder", "]", ",", "key", "=", "lambda", "_", ":", "_", ".", "id", ")", ":", "with", "indent", "(", "4", ")", ":", "puts", "(", "columns", "(", "[", "colored", ".", "cyan", "(", "\"%s:\"", "%", "theme", ".", "id", ")", ",", "col1", "]", ",", "[", "colored", ".", "white", "(", "theme", ".", "root_path", ",", "bold", "=", "True", ")", ",", "col2", "]", ")", ")" ]
List all available Engineer themes.
[ "List", "all", "available", "Engineer", "themes", "." ]
python
train
39.105263
chrisrink10/basilisp
src/basilisp/lang/reader.py
https://github.com/chrisrink10/basilisp/blob/3d82670ee218ec64eb066289c82766d14d18cc92/src/basilisp/lang/reader.py#L736-L760
def _expand_syntax_quote( ctx: ReaderContext, form: IterableLispForm ) -> Iterable[LispForm]: """Expand syntax quoted forms to handle unquoting and unquote-splicing. The unquoted form (unquote x) becomes: (list x) The unquote-spliced form (unquote-splicing x) becomes x All other forms are recursively processed as by _process_syntax_quoted_form and are returned as: (list form)""" expanded = [] for elem in form: if _is_unquote(elem): expanded.append(llist.l(_LIST, elem[1])) elif _is_unquote_splicing(elem): expanded.append(elem[1]) else: expanded.append(llist.l(_LIST, _process_syntax_quoted_form(ctx, elem))) return expanded
[ "def", "_expand_syntax_quote", "(", "ctx", ":", "ReaderContext", ",", "form", ":", "IterableLispForm", ")", "->", "Iterable", "[", "LispForm", "]", ":", "expanded", "=", "[", "]", "for", "elem", "in", "form", ":", "if", "_is_unquote", "(", "elem", ")", ":", "expanded", ".", "append", "(", "llist", ".", "l", "(", "_LIST", ",", "elem", "[", "1", "]", ")", ")", "elif", "_is_unquote_splicing", "(", "elem", ")", ":", "expanded", ".", "append", "(", "elem", "[", "1", "]", ")", "else", ":", "expanded", ".", "append", "(", "llist", ".", "l", "(", "_LIST", ",", "_process_syntax_quoted_form", "(", "ctx", ",", "elem", ")", ")", ")", "return", "expanded" ]
Expand syntax quoted forms to handle unquoting and unquote-splicing. The unquoted form (unquote x) becomes: (list x) The unquote-spliced form (unquote-splicing x) becomes x All other forms are recursively processed as by _process_syntax_quoted_form and are returned as: (list form)
[ "Expand", "syntax", "quoted", "forms", "to", "handle", "unquoting", "and", "unquote", "-", "splicing", "." ]
python
test
29.16
peterpakos/ppipa
ppipa/freeipaserver.py
https://github.com/peterpakos/ppipa/blob/cc7565fe9afa079437cccfaf8d6b8c3da3f080ee/ppipa/freeipaserver.py#L88-L95
def _search(self, base, fltr, attrs=None, scope=ldap.SCOPE_SUBTREE): """Perform LDAP search""" try: results = self._conn.search_s(base, scope, fltr, attrs) except Exception as e: log.exception(self._get_ldap_msg(e)) results = False return results
[ "def", "_search", "(", "self", ",", "base", ",", "fltr", ",", "attrs", "=", "None", ",", "scope", "=", "ldap", ".", "SCOPE_SUBTREE", ")", ":", "try", ":", "results", "=", "self", ".", "_conn", ".", "search_s", "(", "base", ",", "scope", ",", "fltr", ",", "attrs", ")", "except", "Exception", "as", "e", ":", "log", ".", "exception", "(", "self", ".", "_get_ldap_msg", "(", "e", ")", ")", "results", "=", "False", "return", "results" ]
Perform LDAP search
[ "Perform", "LDAP", "search" ]
python
train
38.375
eternnoir/pyTelegramBotAPI
telebot/__init__.py
https://github.com/eternnoir/pyTelegramBotAPI/blob/47b53b88123097f1b9562a6cd5d4e080b86185d1/telebot/__init__.py#L838-L847
def kick_chat_member(self, chat_id, user_id, until_date=None): """ Use this method to kick a user from a group or a supergroup. :param chat_id: Int or string : Unique identifier for the target group or username of the target supergroup :param user_id: Int : Unique identifier of the target user :param until_date: Date when the user will be unbanned, unix time. If user is banned for more than 366 days or less than 30 seconds from the current time they are considered to be banned forever :return: types.Message """ return apihelper.kick_chat_member(self.token, chat_id, user_id, until_date)
[ "def", "kick_chat_member", "(", "self", ",", "chat_id", ",", "user_id", ",", "until_date", "=", "None", ")", ":", "return", "apihelper", ".", "kick_chat_member", "(", "self", ".", "token", ",", "chat_id", ",", "user_id", ",", "until_date", ")" ]
Use this method to kick a user from a group or a supergroup. :param chat_id: Int or string : Unique identifier for the target group or username of the target supergroup :param user_id: Int : Unique identifier of the target user :param until_date: Date when the user will be unbanned, unix time. If user is banned for more than 366 days or less than 30 seconds from the current time they are considered to be banned forever :return: types.Message
[ "Use", "this", "method", "to", "kick", "a", "user", "from", "a", "group", "or", "a", "supergroup", ".", ":", "param", "chat_id", ":", "Int", "or", "string", ":", "Unique", "identifier", "for", "the", "target", "group", "or", "username", "of", "the", "target", "supergroup", ":", "param", "user_id", ":", "Int", ":", "Unique", "identifier", "of", "the", "target", "user", ":", "param", "until_date", ":", "Date", "when", "the", "user", "will", "be", "unbanned", "unix", "time", ".", "If", "user", "is", "banned", "for", "more", "than", "366", "days", "or", "less", "than", "30", "seconds", "from", "the", "current", "time", "they", "are", "considered", "to", "be", "banned", "forever", ":", "return", ":", "types", ".", "Message" ]
python
train
66.2
learningequality/ricecooker
ricecooker/utils/downloader.py
https://github.com/learningequality/ricecooker/blob/2f0385282500cb77ef2894646c6f9ce11bd7a853/ricecooker/utils/downloader.py#L129-L253
def download_static_assets(doc, destination, base_url, request_fn=make_request, url_blacklist=[], js_middleware=None, css_middleware=None, derive_filename=_derive_filename): """ Download all static assets referenced from an HTML page. The goal is to easily create HTML5 apps! Downloads JS, CSS, images, and audio clips. Args: doc: The HTML page source as a string or BeautifulSoup instance. destination: The folder to download the static assets to! base_url: The base URL where assets will be downloaded from. request_fn: The function to be called to make requests, passed to ricecooker.utils.html.download_file(). Pass in a custom one for custom caching logic. url_blacklist: A list of keywords of files to not include in downloading. Will do substring matching, so e.g. 'acorn.js' will match '/some/path/to/acorn.js'. js_middleware: If specificed, JS content will be passed into this callback which is expected to return JS content with any modifications. css_middleware: If specificed, CSS content will be passed into this callback which is expected to return CSS content with any modifications. Return the modified page HTML with links rewritten to the locations of the downloaded static files, as a BeautifulSoup object. (Call str() on it to extract the raw HTML.) """ if not isinstance(doc, BeautifulSoup): doc = BeautifulSoup(doc, "html.parser") # Helper function to download all assets for a given CSS selector. def download_assets(selector, attr, url_middleware=None, content_middleware=None, node_filter=None): nodes = doc.select(selector) for i, node in enumerate(nodes): if node_filter: if not node_filter(node): src = node[attr] node[attr] = '' print(' Skipping node with src ', src) continue if node[attr].startswith('data:'): continue url = urljoin(base_url, node[attr]) if _is_blacklisted(url, url_blacklist): print(' Skipping downloading blacklisted url', url) node[attr] = "" continue if url_middleware: url = url_middleware(url) filename = derive_filename(url) node[attr] = filename print(" Downloading", url, "to filename", filename) download_file(url, destination, request_fn=request_fn, filename=filename, middleware_callbacks=content_middleware) def js_content_middleware(content, url, **kwargs): if js_middleware: content = js_middleware(content, url, **kwargs) # Polyfill localStorage and document.cookie as iframes can't access # them return (content .replace("localStorage", "_localStorage") .replace('document.cookie.split', '"".split') .replace('document.cookie', 'window._document_cookie')) def css_node_filter(node): return "stylesheet" in node["rel"] def css_content_middleware(content, url, **kwargs): if css_middleware: content = css_middleware(content, url, **kwargs) file_dir = os.path.dirname(urlparse(url).path) # Download linked fonts and images def repl(match): src = match.group(1) if src.startswith('//localhost'): return 'url()' # Don't download data: files if src.startswith('data:'): return match.group(0) src_url = urljoin(base_url, os.path.join(file_dir, src)) if _is_blacklisted(src_url, url_blacklist): print(' Skipping downloading blacklisted url', src_url) return 'url()' derived_filename = derive_filename(src_url) download_file(src_url, destination, request_fn=request_fn, filename=derived_filename) return 'url("%s")' % derived_filename return _CSS_URL_RE.sub(repl, content) # Download all linked static assets. download_assets("img[src]", "src") # Images download_assets("link[href]", "href", content_middleware=css_content_middleware, node_filter=css_node_filter) # CSS download_assets("script[src]", "src", content_middleware=js_content_middleware) # JS download_assets("source[src]", "src") # Potentially audio download_assets("source[srcset]", "srcset") # Potentially audio # ... and also run the middleware on CSS/JS embedded in the page source to # get linked files. for node in doc.select('style'): node.string = css_content_middleware(node.get_text(), url='') for node in doc.select('script'): if not node.attrs.get('src'): node.string = js_content_middleware(node.get_text(), url='') return doc
[ "def", "download_static_assets", "(", "doc", ",", "destination", ",", "base_url", ",", "request_fn", "=", "make_request", ",", "url_blacklist", "=", "[", "]", ",", "js_middleware", "=", "None", ",", "css_middleware", "=", "None", ",", "derive_filename", "=", "_derive_filename", ")", ":", "if", "not", "isinstance", "(", "doc", ",", "BeautifulSoup", ")", ":", "doc", "=", "BeautifulSoup", "(", "doc", ",", "\"html.parser\"", ")", "# Helper function to download all assets for a given CSS selector.", "def", "download_assets", "(", "selector", ",", "attr", ",", "url_middleware", "=", "None", ",", "content_middleware", "=", "None", ",", "node_filter", "=", "None", ")", ":", "nodes", "=", "doc", ".", "select", "(", "selector", ")", "for", "i", ",", "node", "in", "enumerate", "(", "nodes", ")", ":", "if", "node_filter", ":", "if", "not", "node_filter", "(", "node", ")", ":", "src", "=", "node", "[", "attr", "]", "node", "[", "attr", "]", "=", "''", "print", "(", "' Skipping node with src '", ",", "src", ")", "continue", "if", "node", "[", "attr", "]", ".", "startswith", "(", "'data:'", ")", ":", "continue", "url", "=", "urljoin", "(", "base_url", ",", "node", "[", "attr", "]", ")", "if", "_is_blacklisted", "(", "url", ",", "url_blacklist", ")", ":", "print", "(", "' Skipping downloading blacklisted url'", ",", "url", ")", "node", "[", "attr", "]", "=", "\"\"", "continue", "if", "url_middleware", ":", "url", "=", "url_middleware", "(", "url", ")", "filename", "=", "derive_filename", "(", "url", ")", "node", "[", "attr", "]", "=", "filename", "print", "(", "\" Downloading\"", ",", "url", ",", "\"to filename\"", ",", "filename", ")", "download_file", "(", "url", ",", "destination", ",", "request_fn", "=", "request_fn", ",", "filename", "=", "filename", ",", "middleware_callbacks", "=", "content_middleware", ")", "def", "js_content_middleware", "(", "content", ",", "url", ",", "*", "*", "kwargs", ")", ":", "if", "js_middleware", ":", "content", "=", "js_middleware", "(", "content", ",", "url", ",", "*", "*", "kwargs", ")", "# Polyfill localStorage and document.cookie as iframes can't access", "# them", "return", "(", "content", ".", "replace", "(", "\"localStorage\"", ",", "\"_localStorage\"", ")", ".", "replace", "(", "'document.cookie.split'", ",", "'\"\".split'", ")", ".", "replace", "(", "'document.cookie'", ",", "'window._document_cookie'", ")", ")", "def", "css_node_filter", "(", "node", ")", ":", "return", "\"stylesheet\"", "in", "node", "[", "\"rel\"", "]", "def", "css_content_middleware", "(", "content", ",", "url", ",", "*", "*", "kwargs", ")", ":", "if", "css_middleware", ":", "content", "=", "css_middleware", "(", "content", ",", "url", ",", "*", "*", "kwargs", ")", "file_dir", "=", "os", ".", "path", ".", "dirname", "(", "urlparse", "(", "url", ")", ".", "path", ")", "# Download linked fonts and images", "def", "repl", "(", "match", ")", ":", "src", "=", "match", ".", "group", "(", "1", ")", "if", "src", ".", "startswith", "(", "'//localhost'", ")", ":", "return", "'url()'", "# Don't download data: files", "if", "src", ".", "startswith", "(", "'data:'", ")", ":", "return", "match", ".", "group", "(", "0", ")", "src_url", "=", "urljoin", "(", "base_url", ",", "os", ".", "path", ".", "join", "(", "file_dir", ",", "src", ")", ")", "if", "_is_blacklisted", "(", "src_url", ",", "url_blacklist", ")", ":", "print", "(", "' Skipping downloading blacklisted url'", ",", "src_url", ")", "return", "'url()'", "derived_filename", "=", "derive_filename", "(", "src_url", ")", "download_file", "(", "src_url", ",", "destination", ",", "request_fn", "=", "request_fn", ",", "filename", "=", "derived_filename", ")", "return", "'url(\"%s\")'", "%", "derived_filename", "return", "_CSS_URL_RE", ".", "sub", "(", "repl", ",", "content", ")", "# Download all linked static assets.", "download_assets", "(", "\"img[src]\"", ",", "\"src\"", ")", "# Images", "download_assets", "(", "\"link[href]\"", ",", "\"href\"", ",", "content_middleware", "=", "css_content_middleware", ",", "node_filter", "=", "css_node_filter", ")", "# CSS", "download_assets", "(", "\"script[src]\"", ",", "\"src\"", ",", "content_middleware", "=", "js_content_middleware", ")", "# JS", "download_assets", "(", "\"source[src]\"", ",", "\"src\"", ")", "# Potentially audio", "download_assets", "(", "\"source[srcset]\"", ",", "\"srcset\"", ")", "# Potentially audio", "# ... and also run the middleware on CSS/JS embedded in the page source to", "# get linked files.", "for", "node", "in", "doc", ".", "select", "(", "'style'", ")", ":", "node", ".", "string", "=", "css_content_middleware", "(", "node", ".", "get_text", "(", ")", ",", "url", "=", "''", ")", "for", "node", "in", "doc", ".", "select", "(", "'script'", ")", ":", "if", "not", "node", ".", "attrs", ".", "get", "(", "'src'", ")", ":", "node", ".", "string", "=", "js_content_middleware", "(", "node", ".", "get_text", "(", ")", ",", "url", "=", "''", ")", "return", "doc" ]
Download all static assets referenced from an HTML page. The goal is to easily create HTML5 apps! Downloads JS, CSS, images, and audio clips. Args: doc: The HTML page source as a string or BeautifulSoup instance. destination: The folder to download the static assets to! base_url: The base URL where assets will be downloaded from. request_fn: The function to be called to make requests, passed to ricecooker.utils.html.download_file(). Pass in a custom one for custom caching logic. url_blacklist: A list of keywords of files to not include in downloading. Will do substring matching, so e.g. 'acorn.js' will match '/some/path/to/acorn.js'. js_middleware: If specificed, JS content will be passed into this callback which is expected to return JS content with any modifications. css_middleware: If specificed, CSS content will be passed into this callback which is expected to return CSS content with any modifications. Return the modified page HTML with links rewritten to the locations of the downloaded static files, as a BeautifulSoup object. (Call str() on it to extract the raw HTML.)
[ "Download", "all", "static", "assets", "referenced", "from", "an", "HTML", "page", ".", "The", "goal", "is", "to", "easily", "create", "HTML5", "apps!", "Downloads", "JS", "CSS", "images", "and", "audio", "clips", "." ]
python
train
39.776
wmayner/pyphi
pyphi/examples.py
https://github.com/wmayner/pyphi/blob/deeca69a084d782a6fde7bf26f59e93b593c5d77/pyphi/examples.py#L24-L99
def basic_network(cm=False): """A 3-node network of logic gates. Diagram:: +~~~~~~~~+ +~~~~>| A |<~~~~+ | | (OR) +~~~+ | | +~~~~~~~~+ | | | | | | v | +~+~~~~~~+ +~~~~~+~+ | B |<~~~~~~+ C | | (COPY) +~~~~~~>| (XOR) | +~~~~~~~~+ +~~~~~~~+ TPM: +----------------+---------------+ | Previous state | Current state | +----------------+---------------+ | A, B, C | A, B, C | +================+===============+ | 0, 0, 0 | 0, 0, 0 | +----------------+---------------+ | 1, 0, 0 | 0, 0, 1 | +----------------+---------------+ | 0, 1, 0 | 1, 0, 1 | +----------------+---------------+ | 1, 1, 0 | 1, 0, 0 | +----------------+---------------+ | 0, 0, 1 | 1, 1, 0 | +----------------+---------------+ | 1, 0, 1 | 1, 1, 1 | +----------------+---------------+ | 0, 1, 1 | 1, 1, 1 | +----------------+---------------+ | 1, 1, 1 | 1, 1, 0 | +----------------+---------------+ Connectivity matrix: +---+---+---+---+ | . | A | B | C | +---+---+---+---+ | A | 0 | 0 | 1 | +---+---+---+---+ | B | 1 | 0 | 1 | +---+---+---+---+ | C | 1 | 1 | 0 | +---+---+---+---+ .. note:: |CM[i][j] = 1| means that there is a directed edge |(i,j)| from node |i| to node |j| and |CM[i][j] = 0| means there is no edge from |i| to |j|. """ tpm = np.array([ [0, 0, 0], [0, 0, 1], [1, 0, 1], [1, 0, 0], [1, 1, 0], [1, 1, 1], [1, 1, 1], [1, 1, 0] ]) if cm is False: cm = np.array([ [0, 0, 1], [1, 0, 1], [1, 1, 0] ]) else: cm = None return Network(tpm, cm=cm, node_labels=LABELS[:tpm.shape[1]])
[ "def", "basic_network", "(", "cm", "=", "False", ")", ":", "tpm", "=", "np", ".", "array", "(", "[", "[", "0", ",", "0", ",", "0", "]", ",", "[", "0", ",", "0", ",", "1", "]", ",", "[", "1", ",", "0", ",", "1", "]", ",", "[", "1", ",", "0", ",", "0", "]", ",", "[", "1", ",", "1", ",", "0", "]", ",", "[", "1", ",", "1", ",", "1", "]", ",", "[", "1", ",", "1", ",", "1", "]", ",", "[", "1", ",", "1", ",", "0", "]", "]", ")", "if", "cm", "is", "False", ":", "cm", "=", "np", ".", "array", "(", "[", "[", "0", ",", "0", ",", "1", "]", ",", "[", "1", ",", "0", ",", "1", "]", ",", "[", "1", ",", "1", ",", "0", "]", "]", ")", "else", ":", "cm", "=", "None", "return", "Network", "(", "tpm", ",", "cm", "=", "cm", ",", "node_labels", "=", "LABELS", "[", ":", "tpm", ".", "shape", "[", "1", "]", "]", ")" ]
A 3-node network of logic gates. Diagram:: +~~~~~~~~+ +~~~~>| A |<~~~~+ | | (OR) +~~~+ | | +~~~~~~~~+ | | | | | | v | +~+~~~~~~+ +~~~~~+~+ | B |<~~~~~~+ C | | (COPY) +~~~~~~>| (XOR) | +~~~~~~~~+ +~~~~~~~+ TPM: +----------------+---------------+ | Previous state | Current state | +----------------+---------------+ | A, B, C | A, B, C | +================+===============+ | 0, 0, 0 | 0, 0, 0 | +----------------+---------------+ | 1, 0, 0 | 0, 0, 1 | +----------------+---------------+ | 0, 1, 0 | 1, 0, 1 | +----------------+---------------+ | 1, 1, 0 | 1, 0, 0 | +----------------+---------------+ | 0, 0, 1 | 1, 1, 0 | +----------------+---------------+ | 1, 0, 1 | 1, 1, 1 | +----------------+---------------+ | 0, 1, 1 | 1, 1, 1 | +----------------+---------------+ | 1, 1, 1 | 1, 1, 0 | +----------------+---------------+ Connectivity matrix: +---+---+---+---+ | . | A | B | C | +---+---+---+---+ | A | 0 | 0 | 1 | +---+---+---+---+ | B | 1 | 0 | 1 | +---+---+---+---+ | C | 1 | 1 | 0 | +---+---+---+---+ .. note:: |CM[i][j] = 1| means that there is a directed edge |(i,j)| from node |i| to node |j| and |CM[i][j] = 0| means there is no edge from |i| to |j|.
[ "A", "3", "-", "node", "network", "of", "logic", "gates", "." ]
python
train
26.092105
uber/rides-python-sdk
uber_rides/auth.py
https://github.com/uber/rides-python-sdk/blob/76ecd75ab5235d792ec1010e36eca679ba285127/uber_rides/auth.py#L423-L444
def get_session(self): """Create Session to store credentials. Returns (Session) A Session object with OAuth 2.0 credentials. """ response = _request_access_token( grant_type=auth.CLIENT_CREDENTIALS_GRANT, client_id=self.client_id, client_secret=self.client_secret, scopes=self.scopes, ) oauth2credential = OAuth2Credential.make_from_response( response=response, grant_type=auth.CLIENT_CREDENTIALS_GRANT, client_id=self.client_id, client_secret=self.client_secret, ) return Session(oauth2credential=oauth2credential)
[ "def", "get_session", "(", "self", ")", ":", "response", "=", "_request_access_token", "(", "grant_type", "=", "auth", ".", "CLIENT_CREDENTIALS_GRANT", ",", "client_id", "=", "self", ".", "client_id", ",", "client_secret", "=", "self", ".", "client_secret", ",", "scopes", "=", "self", ".", "scopes", ",", ")", "oauth2credential", "=", "OAuth2Credential", ".", "make_from_response", "(", "response", "=", "response", ",", "grant_type", "=", "auth", ".", "CLIENT_CREDENTIALS_GRANT", ",", "client_id", "=", "self", ".", "client_id", ",", "client_secret", "=", "self", ".", "client_secret", ",", ")", "return", "Session", "(", "oauth2credential", "=", "oauth2credential", ")" ]
Create Session to store credentials. Returns (Session) A Session object with OAuth 2.0 credentials.
[ "Create", "Session", "to", "store", "credentials", "." ]
python
train
31.181818
doloopwhile/pyjq
pyjq.py
https://github.com/doloopwhile/pyjq/blob/003144e636af20e20862d4a191f05ec9ed9017b7/pyjq.py#L69-L74
def one(script, value=None, vars={}, url=None, opener=default_opener, library_paths=[]): """ Transform object by jq script, returning the first result. Raise ValueError unless results does not include exactly one element. """ return compile(script, vars, library_paths).one(_get_value(value, url, opener))
[ "def", "one", "(", "script", ",", "value", "=", "None", ",", "vars", "=", "{", "}", ",", "url", "=", "None", ",", "opener", "=", "default_opener", ",", "library_paths", "=", "[", "]", ")", ":", "return", "compile", "(", "script", ",", "vars", ",", "library_paths", ")", ".", "one", "(", "_get_value", "(", "value", ",", "url", ",", "opener", ")", ")" ]
Transform object by jq script, returning the first result. Raise ValueError unless results does not include exactly one element.
[ "Transform", "object", "by", "jq", "script", "returning", "the", "first", "result", ".", "Raise", "ValueError", "unless", "results", "does", "not", "include", "exactly", "one", "element", "." ]
python
train
53.333333
synw/dataswim
dataswim/data/clean.py
https://github.com/synw/dataswim/blob/4a4a53f80daa7cd8e8409d76a19ce07296269da2/dataswim/data/clean.py#L88-L103
def fill_nan(self, val: str, *cols): """ Fill NaN values with new values in the main dataframe :param val: new value :type val: str :param \*cols: names of the colums :type \*cols: str, at least one :example: ``ds.fill_nan("new value", "mycol1", "mycol2")`` """ df = self._fill_nan(val, *cols) if df is not None: self.df = df else: self.err("Can not fill nan values")
[ "def", "fill_nan", "(", "self", ",", "val", ":", "str", ",", "*", "cols", ")", ":", "df", "=", "self", ".", "_fill_nan", "(", "val", ",", "*", "cols", ")", "if", "df", "is", "not", "None", ":", "self", ".", "df", "=", "df", "else", ":", "self", ".", "err", "(", "\"Can not fill nan values\"", ")" ]
Fill NaN values with new values in the main dataframe :param val: new value :type val: str :param \*cols: names of the colums :type \*cols: str, at least one :example: ``ds.fill_nan("new value", "mycol1", "mycol2")``
[ "Fill", "NaN", "values", "with", "new", "values", "in", "the", "main", "dataframe" ]
python
train
29.125
sassoo/goldman
goldman/resources/oauth_ropc.py
https://github.com/sassoo/goldman/blob/b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2/goldman/resources/oauth_ropc.py#L52-L97
def on_post(self, req, resp): """ Validate the access token request for spec compliance The spec also dictates the JSON based error response on failure & is handled in this responder. """ grant_type = req.get_param('grant_type') password = req.get_param('password') username = req.get_param('username') # errors or not, disable client caching along the way # per the spec resp.disable_caching() if not grant_type or not password or not username: resp.status = falcon.HTTP_400 resp.serialize({ 'error': 'invalid_request', 'error_description': 'A grant_type, username, & password ' 'parameters are all required when ' 'requesting an OAuth access_token', 'error_uri': 'tools.ietf.org/html/rfc6749#section-4.3.2', }) elif grant_type != 'password': resp.status = falcon.HTTP_400 resp.serialize({ 'error': 'unsupported_grant_type', 'error_description': 'The grant_type parameter MUST be set ' 'to "password" not "%s"' % grant_type, 'error_uri': 'tools.ietf.org/html/rfc6749#section-4.3.2', }) else: try: token = self.auth_creds(username, password) resp.serialize({ 'access_token': token, 'token_type': 'Bearer', }) except AuthRejected as exc: resp.status = falcon.HTTP_401 resp.set_header('WWW-Authenticate', self._realm) resp.serialize({ 'error': 'invalid_client', 'error_description': exc.detail, })
[ "def", "on_post", "(", "self", ",", "req", ",", "resp", ")", ":", "grant_type", "=", "req", ".", "get_param", "(", "'grant_type'", ")", "password", "=", "req", ".", "get_param", "(", "'password'", ")", "username", "=", "req", ".", "get_param", "(", "'username'", ")", "# errors or not, disable client caching along the way", "# per the spec", "resp", ".", "disable_caching", "(", ")", "if", "not", "grant_type", "or", "not", "password", "or", "not", "username", ":", "resp", ".", "status", "=", "falcon", ".", "HTTP_400", "resp", ".", "serialize", "(", "{", "'error'", ":", "'invalid_request'", ",", "'error_description'", ":", "'A grant_type, username, & password '", "'parameters are all required when '", "'requesting an OAuth access_token'", ",", "'error_uri'", ":", "'tools.ietf.org/html/rfc6749#section-4.3.2'", ",", "}", ")", "elif", "grant_type", "!=", "'password'", ":", "resp", ".", "status", "=", "falcon", ".", "HTTP_400", "resp", ".", "serialize", "(", "{", "'error'", ":", "'unsupported_grant_type'", ",", "'error_description'", ":", "'The grant_type parameter MUST be set '", "'to \"password\" not \"%s\"'", "%", "grant_type", ",", "'error_uri'", ":", "'tools.ietf.org/html/rfc6749#section-4.3.2'", ",", "}", ")", "else", ":", "try", ":", "token", "=", "self", ".", "auth_creds", "(", "username", ",", "password", ")", "resp", ".", "serialize", "(", "{", "'access_token'", ":", "token", ",", "'token_type'", ":", "'Bearer'", ",", "}", ")", "except", "AuthRejected", "as", "exc", ":", "resp", ".", "status", "=", "falcon", ".", "HTTP_401", "resp", ".", "set_header", "(", "'WWW-Authenticate'", ",", "self", ".", "_realm", ")", "resp", ".", "serialize", "(", "{", "'error'", ":", "'invalid_client'", ",", "'error_description'", ":", "exc", ".", "detail", ",", "}", ")" ]
Validate the access token request for spec compliance The spec also dictates the JSON based error response on failure & is handled in this responder.
[ "Validate", "the", "access", "token", "request", "for", "spec", "compliance" ]
python
train
40.282609
gem/oq-engine
openquake/commonlib/shapefileparser.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/commonlib/shapefileparser.py#L428-L456
def extract_source_planes_strikes_dips(src): """ Extract strike and dip angles for source defined by multiple planes. """ if "characteristicFaultSource" not in src.tag: strikes = dict([(key, None) for key, _ in PLANES_STRIKES_PARAM]) dips = dict([(key, None) for key, _ in PLANES_DIPS_PARAM]) return strikes, dips tags = get_taglist(src) surface_set = src.nodes[tags.index("surface")] strikes = [] dips = [] num_planes = 0 for surface in surface_set: if "planarSurface" in surface.tag: strikes.append(float(surface.attrib["strike"])) dips.append(float(surface.attrib["dip"])) num_planes += 1 if num_planes > MAX_PLANES: raise ValueError("Number of planes in sourcs %s exceededs maximum " "of %s" % (str(num_planes), str(MAX_PLANES))) if num_planes: strikes = expand_src_param(strikes, PLANES_STRIKES_PARAM) dips = expand_src_param(dips, PLANES_DIPS_PARAM) else: strikes = dict([(key, None) for key, _ in PLANES_STRIKES_PARAM]) dips = dict([(key, None) for key, _ in PLANES_DIPS_PARAM]) return strikes, dips
[ "def", "extract_source_planes_strikes_dips", "(", "src", ")", ":", "if", "\"characteristicFaultSource\"", "not", "in", "src", ".", "tag", ":", "strikes", "=", "dict", "(", "[", "(", "key", ",", "None", ")", "for", "key", ",", "_", "in", "PLANES_STRIKES_PARAM", "]", ")", "dips", "=", "dict", "(", "[", "(", "key", ",", "None", ")", "for", "key", ",", "_", "in", "PLANES_DIPS_PARAM", "]", ")", "return", "strikes", ",", "dips", "tags", "=", "get_taglist", "(", "src", ")", "surface_set", "=", "src", ".", "nodes", "[", "tags", ".", "index", "(", "\"surface\"", ")", "]", "strikes", "=", "[", "]", "dips", "=", "[", "]", "num_planes", "=", "0", "for", "surface", "in", "surface_set", ":", "if", "\"planarSurface\"", "in", "surface", ".", "tag", ":", "strikes", ".", "append", "(", "float", "(", "surface", ".", "attrib", "[", "\"strike\"", "]", ")", ")", "dips", ".", "append", "(", "float", "(", "surface", ".", "attrib", "[", "\"dip\"", "]", ")", ")", "num_planes", "+=", "1", "if", "num_planes", ">", "MAX_PLANES", ":", "raise", "ValueError", "(", "\"Number of planes in sourcs %s exceededs maximum \"", "\"of %s\"", "%", "(", "str", "(", "num_planes", ")", ",", "str", "(", "MAX_PLANES", ")", ")", ")", "if", "num_planes", ":", "strikes", "=", "expand_src_param", "(", "strikes", ",", "PLANES_STRIKES_PARAM", ")", "dips", "=", "expand_src_param", "(", "dips", ",", "PLANES_DIPS_PARAM", ")", "else", ":", "strikes", "=", "dict", "(", "[", "(", "key", ",", "None", ")", "for", "key", ",", "_", "in", "PLANES_STRIKES_PARAM", "]", ")", "dips", "=", "dict", "(", "[", "(", "key", ",", "None", ")", "for", "key", ",", "_", "in", "PLANES_DIPS_PARAM", "]", ")", "return", "strikes", ",", "dips" ]
Extract strike and dip angles for source defined by multiple planes.
[ "Extract", "strike", "and", "dip", "angles", "for", "source", "defined", "by", "multiple", "planes", "." ]
python
train
40.275862
genialis/resolwe
resolwe/elastic/lookup.py
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/elastic/lookup.py#L134-L142
def get_lookup(self, operator): """Look up a lookup. :param operator: Name of the lookup operator """ try: return self._lookups[operator] except KeyError: raise NotImplementedError("Lookup operator '{}' is not supported".format(operator))
[ "def", "get_lookup", "(", "self", ",", "operator", ")", ":", "try", ":", "return", "self", ".", "_lookups", "[", "operator", "]", "except", "KeyError", ":", "raise", "NotImplementedError", "(", "\"Lookup operator '{}' is not supported\"", ".", "format", "(", "operator", ")", ")" ]
Look up a lookup. :param operator: Name of the lookup operator
[ "Look", "up", "a", "lookup", "." ]
python
train
32.777778
greenbender/pynntp
nntp/nntp.py
https://github.com/greenbender/pynntp/blob/991a76331cdf5d8f9dbf5b18f6e29adc80749a2f/nntp/nntp.py#L1110-L1128
def xzhdr(self, header, msgid_range=None): """XZHDR command. Args: msgid_range: A message-id as a string, or an article number as an integer, or a tuple of specifying a range of article numbers in the form (first, [last]) - if last is omitted then all articles after first are included. A msgid_range of None (the default) uses the current article. """ args = header if msgid_range is not None: args += " " + utils.unparse_msgid_range(msgid_range) code, message = self.command("XZHDR", args) if code != 221: raise NNTPReplyError(code, message) return self.info(code, message, compressed=True)
[ "def", "xzhdr", "(", "self", ",", "header", ",", "msgid_range", "=", "None", ")", ":", "args", "=", "header", "if", "msgid_range", "is", "not", "None", ":", "args", "+=", "\" \"", "+", "utils", ".", "unparse_msgid_range", "(", "msgid_range", ")", "code", ",", "message", "=", "self", ".", "command", "(", "\"XZHDR\"", ",", "args", ")", "if", "code", "!=", "221", ":", "raise", "NNTPReplyError", "(", "code", ",", "message", ")", "return", "self", ".", "info", "(", "code", ",", "message", ",", "compressed", "=", "True", ")" ]
XZHDR command. Args: msgid_range: A message-id as a string, or an article number as an integer, or a tuple of specifying a range of article numbers in the form (first, [last]) - if last is omitted then all articles after first are included. A msgid_range of None (the default) uses the current article.
[ "XZHDR", "command", "." ]
python
test
39
gmr/queries
queries/pool.py
https://github.com/gmr/queries/blob/a68855013dc6aaf9ed7b6909a4701f8da8796a0a/queries/pool.py#L524-L535
def get(cls, pid, session): """Get an idle, unused connection from the pool. Once a connection has been retrieved, it will be marked as in-use until it is freed. :param str pid: The pool ID :param queries.Session session: The session to assign to the connection :rtype: psycopg2.extensions.connection """ with cls._lock: cls._ensure_pool_exists(pid) return cls._pools[pid].get(session)
[ "def", "get", "(", "cls", ",", "pid", ",", "session", ")", ":", "with", "cls", ".", "_lock", ":", "cls", ".", "_ensure_pool_exists", "(", "pid", ")", "return", "cls", ".", "_pools", "[", "pid", "]", ".", "get", "(", "session", ")" ]
Get an idle, unused connection from the pool. Once a connection has been retrieved, it will be marked as in-use until it is freed. :param str pid: The pool ID :param queries.Session session: The session to assign to the connection :rtype: psycopg2.extensions.connection
[ "Get", "an", "idle", "unused", "connection", "from", "the", "pool", ".", "Once", "a", "connection", "has", "been", "retrieved", "it", "will", "be", "marked", "as", "in", "-", "use", "until", "it", "is", "freed", "." ]
python
train
38
ajenhl/tacl
tacl/data_store.py
https://github.com/ajenhl/tacl/blob/b8a343248e77f1c07a5a4ac133a9ad6e0b4781c2/tacl/data_store.py#L101-L114
def _add_temporary_results(self, results, label): """Adds `results` to a temporary table with `label`. :param results: results file :type results: `File` :param label: label to be associated with results :type label: `str` """ NGRAM, SIZE, NAME, SIGLUM, COUNT, LABEL = constants.QUERY_FIELDNAMES reader = csv.DictReader(results) data = [(row[NGRAM], row[SIZE], row[NAME], row[SIGLUM], row[COUNT], label) for row in reader] self._conn.executemany(constants.INSERT_TEMPORARY_RESULTS_SQL, data)
[ "def", "_add_temporary_results", "(", "self", ",", "results", ",", "label", ")", ":", "NGRAM", ",", "SIZE", ",", "NAME", ",", "SIGLUM", ",", "COUNT", ",", "LABEL", "=", "constants", ".", "QUERY_FIELDNAMES", "reader", "=", "csv", ".", "DictReader", "(", "results", ")", "data", "=", "[", "(", "row", "[", "NGRAM", "]", ",", "row", "[", "SIZE", "]", ",", "row", "[", "NAME", "]", ",", "row", "[", "SIGLUM", "]", ",", "row", "[", "COUNT", "]", ",", "label", ")", "for", "row", "in", "reader", "]", "self", ".", "_conn", ".", "executemany", "(", "constants", ".", "INSERT_TEMPORARY_RESULTS_SQL", ",", "data", ")" ]
Adds `results` to a temporary table with `label`. :param results: results file :type results: `File` :param label: label to be associated with results :type label: `str`
[ "Adds", "results", "to", "a", "temporary", "table", "with", "label", "." ]
python
train
41.214286
petl-developers/petl
petl/transform/intervals.py
https://github.com/petl-developers/petl/blob/1d33ca055f7e04e0d28a772041c9fd30c8d415d6/petl/transform/intervals.py#L14-L38
def tupletree(table, start='start', stop='stop', value=None): """ Construct an interval tree for the given table, where each node in the tree is a row of the table. """ import intervaltree tree = intervaltree.IntervalTree() it = iter(table) hdr = next(it) flds = list(map(text_type, hdr)) assert start in flds, 'start field not recognised' assert stop in flds, 'stop field not recognised' getstart = itemgetter(flds.index(start)) getstop = itemgetter(flds.index(stop)) if value is None: getvalue = tuple else: valueindices = asindices(hdr, value) assert len(valueindices) > 0, 'invalid value field specification' getvalue = itemgetter(*valueindices) for row in it: tree.addi(getstart(row), getstop(row), getvalue(row)) return tree
[ "def", "tupletree", "(", "table", ",", "start", "=", "'start'", ",", "stop", "=", "'stop'", ",", "value", "=", "None", ")", ":", "import", "intervaltree", "tree", "=", "intervaltree", ".", "IntervalTree", "(", ")", "it", "=", "iter", "(", "table", ")", "hdr", "=", "next", "(", "it", ")", "flds", "=", "list", "(", "map", "(", "text_type", ",", "hdr", ")", ")", "assert", "start", "in", "flds", ",", "'start field not recognised'", "assert", "stop", "in", "flds", ",", "'stop field not recognised'", "getstart", "=", "itemgetter", "(", "flds", ".", "index", "(", "start", ")", ")", "getstop", "=", "itemgetter", "(", "flds", ".", "index", "(", "stop", ")", ")", "if", "value", "is", "None", ":", "getvalue", "=", "tuple", "else", ":", "valueindices", "=", "asindices", "(", "hdr", ",", "value", ")", "assert", "len", "(", "valueindices", ")", ">", "0", ",", "'invalid value field specification'", "getvalue", "=", "itemgetter", "(", "*", "valueindices", ")", "for", "row", "in", "it", ":", "tree", ".", "addi", "(", "getstart", "(", "row", ")", ",", "getstop", "(", "row", ")", ",", "getvalue", "(", "row", ")", ")", "return", "tree" ]
Construct an interval tree for the given table, where each node in the tree is a row of the table.
[ "Construct", "an", "interval", "tree", "for", "the", "given", "table", "where", "each", "node", "in", "the", "tree", "is", "a", "row", "of", "the", "table", "." ]
python
train
32.64
itamarst/crochet
crochet/_shutdown.py
https://github.com/itamarst/crochet/blob/ecfc22cefa90f3dfbafa71883c1470e7294f2b6d/crochet/_shutdown.py#L40-L44
def register(self, f, *args, **kwargs): """ Register a function and arguments to be called later. """ self._functions.append(lambda: f(*args, **kwargs))
[ "def", "register", "(", "self", ",", "f", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "self", ".", "_functions", ".", "append", "(", "lambda", ":", "f", "(", "*", "args", ",", "*", "*", "kwargs", ")", ")" ]
Register a function and arguments to be called later.
[ "Register", "a", "function", "and", "arguments", "to", "be", "called", "later", "." ]
python
train
36
dadadel/pyment
pyment/docstring.py
https://github.com/dadadel/pyment/blob/3d1bdf87d083ff56230bd0bf7c5252e20552b7b6/pyment/docstring.py#L362-L371
def get_list_key(self, data, key, header_lines=2): """Get the list of a key elements. Each element is a tuple (key=None, description, type=None). Note that the tuple's element can differ depending on the key. :param data: the data to proceed :param key: the key """ return super(NumpydocTools, self).get_list_key(data, key, header_lines=header_lines)
[ "def", "get_list_key", "(", "self", ",", "data", ",", "key", ",", "header_lines", "=", "2", ")", ":", "return", "super", "(", "NumpydocTools", ",", "self", ")", ".", "get_list_key", "(", "data", ",", "key", ",", "header_lines", "=", "header_lines", ")" ]
Get the list of a key elements. Each element is a tuple (key=None, description, type=None). Note that the tuple's element can differ depending on the key. :param data: the data to proceed :param key: the key
[ "Get", "the", "list", "of", "a", "key", "elements", ".", "Each", "element", "is", "a", "tuple", "(", "key", "=", "None", "description", "type", "=", "None", ")", ".", "Note", "that", "the", "tuple", "s", "element", "can", "differ", "depending", "on", "the", "key", "." ]
python
train
39.9
dnanexus/dx-toolkit
src/python/dxpy/bindings/__init__.py
https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/bindings/__init__.py#L596-L622
def clone(self, project, folder="/", **kwargs): ''' :param project: Destination project ID :type project: string :param folder: Folder route to which to move the object :type folder: string :raises: :exc:`~dxpy.exceptions.DXError` if no project is associated with the object :returns: An object handler for the new cloned object :rtype: :class:`DXDataObject` Clones the associated remote object to *folder* in *project* and returns an object handler for the new object in the destination project. ''' if self._proj is None: raise DXError("Clone called when a project ID was not associated with this object handler") dxpy.api.project_clone(self._proj, {"objects": [self._dxid], "project": project, "destination": folder}, **kwargs) cloned_copy = copy.copy(self) cloned_copy.set_ids(cloned_copy.get_id(), project) return cloned_copy
[ "def", "clone", "(", "self", ",", "project", ",", "folder", "=", "\"/\"", ",", "*", "*", "kwargs", ")", ":", "if", "self", ".", "_proj", "is", "None", ":", "raise", "DXError", "(", "\"Clone called when a project ID was not associated with this object handler\"", ")", "dxpy", ".", "api", ".", "project_clone", "(", "self", ".", "_proj", ",", "{", "\"objects\"", ":", "[", "self", ".", "_dxid", "]", ",", "\"project\"", ":", "project", ",", "\"destination\"", ":", "folder", "}", ",", "*", "*", "kwargs", ")", "cloned_copy", "=", "copy", ".", "copy", "(", "self", ")", "cloned_copy", ".", "set_ids", "(", "cloned_copy", ".", "get_id", "(", ")", ",", "project", ")", "return", "cloned_copy" ]
:param project: Destination project ID :type project: string :param folder: Folder route to which to move the object :type folder: string :raises: :exc:`~dxpy.exceptions.DXError` if no project is associated with the object :returns: An object handler for the new cloned object :rtype: :class:`DXDataObject` Clones the associated remote object to *folder* in *project* and returns an object handler for the new object in the destination project.
[ ":", "param", "project", ":", "Destination", "project", "ID", ":", "type", "project", ":", "string", ":", "param", "folder", ":", "Folder", "route", "to", "which", "to", "move", "the", "object", ":", "type", "folder", ":", "string", ":", "raises", ":", ":", "exc", ":", "~dxpy", ".", "exceptions", ".", "DXError", "if", "no", "project", "is", "associated", "with", "the", "object", ":", "returns", ":", "An", "object", "handler", "for", "the", "new", "cloned", "object", ":", "rtype", ":", ":", "class", ":", "DXDataObject" ]
python
train
40.074074
push-things/django-th
django_th/publish.py
https://github.com/push-things/django-th/blob/86c999d16bcf30b6224206e5b40824309834ac8c/django_th/publish.py#L26-L35
def update_trigger(self, service): """ update the date when occurs the trigger :param service: service object to update """ now = arrow.utcnow().to(settings.TIME_ZONE).format('YYYY-MM-DD HH:mm:ssZZ') TriggerService.objects.filter(id=service.id).update(date_triggered=now, consumer_failed=0, provider_failed=0, )
[ "def", "update_trigger", "(", "self", ",", "service", ")", ":", "now", "=", "arrow", ".", "utcnow", "(", ")", ".", "to", "(", "settings", ".", "TIME_ZONE", ")", ".", "format", "(", "'YYYY-MM-DD HH:mm:ssZZ'", ")", "TriggerService", ".", "objects", ".", "filter", "(", "id", "=", "service", ".", "id", ")", ".", "update", "(", "date_triggered", "=", "now", ",", "consumer_failed", "=", "0", ",", "provider_failed", "=", "0", ",", ")" ]
update the date when occurs the trigger :param service: service object to update
[ "update", "the", "date", "when", "occurs", "the", "trigger", ":", "param", "service", ":", "service", "object", "to", "update" ]
python
train
53.8
HewlettPackard/python-hpOneView
hpOneView/resources/networking/fabrics.py
https://github.com/HewlettPackard/python-hpOneView/blob/3c6219723ef25e6e0c83d44a89007f89bc325b89/hpOneView/resources/networking/fabrics.py#L107-L121
def get_reserved_vlan_range(self, id_or_uri): """ Gets the reserved vlan ID range for the fabric. Note: This method is only available on HPE Synergy. Args: id_or_uri: ID or URI of fabric. Returns: dict: vlan-pool """ uri = self._client.build_uri(id_or_uri) + "/reserved-vlan-range" return self._client.get(uri)
[ "def", "get_reserved_vlan_range", "(", "self", ",", "id_or_uri", ")", ":", "uri", "=", "self", ".", "_client", ".", "build_uri", "(", "id_or_uri", ")", "+", "\"/reserved-vlan-range\"", "return", "self", ".", "_client", ".", "get", "(", "uri", ")" ]
Gets the reserved vlan ID range for the fabric. Note: This method is only available on HPE Synergy. Args: id_or_uri: ID or URI of fabric. Returns: dict: vlan-pool
[ "Gets", "the", "reserved", "vlan", "ID", "range", "for", "the", "fabric", "." ]
python
train
26.6
aws/aws-encryption-sdk-python
src/aws_encryption_sdk/internal/formatting/encryption_context.py
https://github.com/aws/aws-encryption-sdk-python/blob/d182155d5fb1ef176d9e7d0647679737d5146495/src/aws_encryption_sdk/internal/formatting/encryption_context.py#L29-L47
def assemble_content_aad(message_id, aad_content_string, seq_num, length): """Assembles the Body AAD string for a message body structure. :param message_id: Message ID :type message_id: str :param aad_content_string: ContentAADString object for frame type :type aad_content_string: aws_encryption_sdk.identifiers.ContentAADString :param seq_num: Sequence number of frame :type seq_num: int :param length: Content Length :type length: int :returns: Properly formatted AAD bytes for message body structure. :rtype: bytes :raises SerializationError: if aad_content_string is not known """ if not isinstance(aad_content_string, aws_encryption_sdk.identifiers.ContentAADString): raise SerializationError("Unknown aad_content_string") fmt = ">16s{}sIQ".format(len(aad_content_string.value)) return struct.pack(fmt, message_id, aad_content_string.value, seq_num, length)
[ "def", "assemble_content_aad", "(", "message_id", ",", "aad_content_string", ",", "seq_num", ",", "length", ")", ":", "if", "not", "isinstance", "(", "aad_content_string", ",", "aws_encryption_sdk", ".", "identifiers", ".", "ContentAADString", ")", ":", "raise", "SerializationError", "(", "\"Unknown aad_content_string\"", ")", "fmt", "=", "\">16s{}sIQ\"", ".", "format", "(", "len", "(", "aad_content_string", ".", "value", ")", ")", "return", "struct", ".", "pack", "(", "fmt", ",", "message_id", ",", "aad_content_string", ".", "value", ",", "seq_num", ",", "length", ")" ]
Assembles the Body AAD string for a message body structure. :param message_id: Message ID :type message_id: str :param aad_content_string: ContentAADString object for frame type :type aad_content_string: aws_encryption_sdk.identifiers.ContentAADString :param seq_num: Sequence number of frame :type seq_num: int :param length: Content Length :type length: int :returns: Properly formatted AAD bytes for message body structure. :rtype: bytes :raises SerializationError: if aad_content_string is not known
[ "Assembles", "the", "Body", "AAD", "string", "for", "a", "message", "body", "structure", "." ]
python
train
48.315789
materialsproject/pymatgen
pymatgen/io/abinit/flows.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/io/abinit/flows.py#L332-L338
def pickle_loads(cls, s): """Reconstruct the flow from a string.""" strio = StringIO() strio.write(s) strio.seek(0) flow = pmg_pickle_load(strio) return flow
[ "def", "pickle_loads", "(", "cls", ",", "s", ")", ":", "strio", "=", "StringIO", "(", ")", "strio", ".", "write", "(", "s", ")", "strio", ".", "seek", "(", "0", ")", "flow", "=", "pmg_pickle_load", "(", "strio", ")", "return", "flow" ]
Reconstruct the flow from a string.
[ "Reconstruct", "the", "flow", "from", "a", "string", "." ]
python
train
28.428571