repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
kgiusti/pyngus
examples/rpc-client.py
https://github.com/kgiusti/pyngus/blob/5392392046989f1bb84ba938c30e4d48311075f1/examples/rpc-client.py#L245-L258
def _send_request(self): """Send a message containing the RPC method call """ msg = Message() msg.subject = "An RPC call!" msg.address = self._to msg.reply_to = self._reply_to msg.body = self._method msg.correlation_id = 5 # whatever... print("sending RPC call request: %s" % str(self._method)) # @todo send timeout self._sender.send(msg, self, None, time.time() + # 10) self._sender.send(msg, self)
[ "def", "_send_request", "(", "self", ")", ":", "msg", "=", "Message", "(", ")", "msg", ".", "subject", "=", "\"An RPC call!\"", "msg", ".", "address", "=", "self", ".", "_to", "msg", ".", "reply_to", "=", "self", ".", "_reply_to", "msg", ".", "body", "=", "self", ".", "_method", "msg", ".", "correlation_id", "=", "5", "# whatever...", "print", "(", "\"sending RPC call request: %s\"", "%", "str", "(", "self", ".", "_method", ")", ")", "# @todo send timeout self._sender.send(msg, self, None, time.time() +", "# 10)", "self", ".", "_sender", ".", "send", "(", "msg", ",", "self", ")" ]
Send a message containing the RPC method call
[ "Send", "a", "message", "containing", "the", "RPC", "method", "call" ]
python
test
34.571429
deepmipt/DeepPavlov
deeppavlov/core/layers/tf_layers.py
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/core/layers/tf_layers.py#L74-L113
def dense_convolutional_network(units: tf.Tensor, n_hidden_list: List, filter_width=3, use_dilation=False, use_batch_norm=False, training_ph=None): """ Densely connected convolutional layers. Based on the paper: [Gao 17] https://arxiv.org/abs/1608.06993 Args: units: a tensorflow tensor with dimensionality [None, n_tokens, n_features] n_hidden_list: list with number of hidden units at the ouput of each layer filter_width: width of the kernel in tokens use_batch_norm: whether to use batch normalization between layers use_dilation: use power of 2 dilation scheme [1, 2, 4, 8 .. ] for layers 1, 2, 3, 4 ... training_ph: boolean placeholder determining whether is training phase now or not. It is used only for batch normalization to determine whether to use current batch average (std) or memory stored average (std) Returns: units: tensor at the output of the last convolutional layer with dimensionality [None, n_tokens, n_hidden_list[-1]] """ units_list = [units] for n_layer, n_filters in enumerate(n_hidden_list): total_units = tf.concat(units_list, axis=-1) if use_dilation: dilation_rate = 2 ** n_layer else: dilation_rate = 1 units = tf.layers.conv1d(total_units, n_filters, filter_width, dilation_rate=dilation_rate, padding='same', kernel_initializer=INITIALIZER()) if use_batch_norm: units = tf.layers.batch_normalization(units, training=training_ph) units = tf.nn.relu(units) units_list.append(units) return units
[ "def", "dense_convolutional_network", "(", "units", ":", "tf", ".", "Tensor", ",", "n_hidden_list", ":", "List", ",", "filter_width", "=", "3", ",", "use_dilation", "=", "False", ",", "use_batch_norm", "=", "False", ",", "training_ph", "=", "None", ")", ":", "units_list", "=", "[", "units", "]", "for", "n_layer", ",", "n_filters", "in", "enumerate", "(", "n_hidden_list", ")", ":", "total_units", "=", "tf", ".", "concat", "(", "units_list", ",", "axis", "=", "-", "1", ")", "if", "use_dilation", ":", "dilation_rate", "=", "2", "**", "n_layer", "else", ":", "dilation_rate", "=", "1", "units", "=", "tf", ".", "layers", ".", "conv1d", "(", "total_units", ",", "n_filters", ",", "filter_width", ",", "dilation_rate", "=", "dilation_rate", ",", "padding", "=", "'same'", ",", "kernel_initializer", "=", "INITIALIZER", "(", ")", ")", "if", "use_batch_norm", ":", "units", "=", "tf", ".", "layers", ".", "batch_normalization", "(", "units", ",", "training", "=", "training_ph", ")", "units", "=", "tf", ".", "nn", ".", "relu", "(", "units", ")", "units_list", ".", "append", "(", "units", ")", "return", "units" ]
Densely connected convolutional layers. Based on the paper: [Gao 17] https://arxiv.org/abs/1608.06993 Args: units: a tensorflow tensor with dimensionality [None, n_tokens, n_features] n_hidden_list: list with number of hidden units at the ouput of each layer filter_width: width of the kernel in tokens use_batch_norm: whether to use batch normalization between layers use_dilation: use power of 2 dilation scheme [1, 2, 4, 8 .. ] for layers 1, 2, 3, 4 ... training_ph: boolean placeholder determining whether is training phase now or not. It is used only for batch normalization to determine whether to use current batch average (std) or memory stored average (std) Returns: units: tensor at the output of the last convolutional layer with dimensionality [None, n_tokens, n_hidden_list[-1]]
[ "Densely", "connected", "convolutional", "layers", ".", "Based", "on", "the", "paper", ":", "[", "Gao", "17", "]", "https", ":", "//", "arxiv", ".", "org", "/", "abs", "/", "1608", ".", "06993" ]
python
test
49.65
arkottke/pysra
pysra/variation.py
https://github.com/arkottke/pysra/blob/c72fd389d6c15203c0c00728ac00f101bae6369d/pysra/variation.py#L594-L612
def calc_std_mod_reduc(mod_reduc): """Calculate the standard deviation as a function of G/G_max. Equation 7.29 from Darendeli (2001). Parameters ---------- mod_reduc : array_like Modulus reduction values. Returns ------- std : :class:`numpy.ndarray` Standard deviation. """ mod_reduc = np.asarray(mod_reduc).astype(float) std = (np.exp(-4.23) + np.sqrt(0.25 / np.exp(3.62) - (mod_reduc - 0.5) ** 2 / np.exp(3.62))) return std
[ "def", "calc_std_mod_reduc", "(", "mod_reduc", ")", ":", "mod_reduc", "=", "np", ".", "asarray", "(", "mod_reduc", ")", ".", "astype", "(", "float", ")", "std", "=", "(", "np", ".", "exp", "(", "-", "4.23", ")", "+", "np", ".", "sqrt", "(", "0.25", "/", "np", ".", "exp", "(", "3.62", ")", "-", "(", "mod_reduc", "-", "0.5", ")", "**", "2", "/", "np", ".", "exp", "(", "3.62", ")", ")", ")", "return", "std" ]
Calculate the standard deviation as a function of G/G_max. Equation 7.29 from Darendeli (2001). Parameters ---------- mod_reduc : array_like Modulus reduction values. Returns ------- std : :class:`numpy.ndarray` Standard deviation.
[ "Calculate", "the", "standard", "deviation", "as", "a", "function", "of", "G", "/", "G_max", "." ]
python
train
29.947368
inveniosoftware/invenio-search
invenio_search/ext.py
https://github.com/inveniosoftware/invenio-search/blob/19c073d608d4c811f1c5aecb6622402d39715228/invenio_search/ext.py#L256-L283
def create(self, ignore=None): """Yield tuple with created index name and responses from a client.""" ignore = ignore or [] def _create(tree_or_filename, alias=None): """Create indices and aliases by walking DFS.""" # Iterate over aliases: for name, value in tree_or_filename.items(): if isinstance(value, dict): for result in _create(value, alias=name): yield result else: with open(value, 'r') as body: yield name, self.client.indices.create( index=name, body=json.load(body), ignore=ignore, ) if alias: yield alias, self.client.indices.put_alias( index=list(_get_indices(tree_or_filename)), name=alias, ignore=ignore, ) for result in _create(self.active_aliases): yield result
[ "def", "create", "(", "self", ",", "ignore", "=", "None", ")", ":", "ignore", "=", "ignore", "or", "[", "]", "def", "_create", "(", "tree_or_filename", ",", "alias", "=", "None", ")", ":", "\"\"\"Create indices and aliases by walking DFS.\"\"\"", "# Iterate over aliases:", "for", "name", ",", "value", "in", "tree_or_filename", ".", "items", "(", ")", ":", "if", "isinstance", "(", "value", ",", "dict", ")", ":", "for", "result", "in", "_create", "(", "value", ",", "alias", "=", "name", ")", ":", "yield", "result", "else", ":", "with", "open", "(", "value", ",", "'r'", ")", "as", "body", ":", "yield", "name", ",", "self", ".", "client", ".", "indices", ".", "create", "(", "index", "=", "name", ",", "body", "=", "json", ".", "load", "(", "body", ")", ",", "ignore", "=", "ignore", ",", ")", "if", "alias", ":", "yield", "alias", ",", "self", ".", "client", ".", "indices", ".", "put_alias", "(", "index", "=", "list", "(", "_get_indices", "(", "tree_or_filename", ")", ")", ",", "name", "=", "alias", ",", "ignore", "=", "ignore", ",", ")", "for", "result", "in", "_create", "(", "self", ".", "active_aliases", ")", ":", "yield", "result" ]
Yield tuple with created index name and responses from a client.
[ "Yield", "tuple", "with", "created", "index", "name", "and", "responses", "from", "a", "client", "." ]
python
train
38.107143
autokey/autokey
lib/autokey/interface.py
https://github.com/autokey/autokey/blob/35decb72f286ce68cd2a1f09ace8891a520b58d1/lib/autokey/interface.py#L630-L635
def _restore_clipboard_text(self, backup: str): """Restore the clipboard content.""" # Pasting takes some time, so wait a bit before restoring the content. Otherwise the restore is done before # the pasting happens, causing the backup to be pasted instead of the desired clipboard content. time.sleep(0.2) self.clipboard.text = backup if backup is not None else ""
[ "def", "_restore_clipboard_text", "(", "self", ",", "backup", ":", "str", ")", ":", "# Pasting takes some time, so wait a bit before restoring the content. Otherwise the restore is done before", "# the pasting happens, causing the backup to be pasted instead of the desired clipboard content.", "time", ".", "sleep", "(", "0.2", ")", "self", ".", "clipboard", ".", "text", "=", "backup", "if", "backup", "is", "not", "None", "else", "\"\"" ]
Restore the clipboard content.
[ "Restore", "the", "clipboard", "content", "." ]
python
train
66.5
mitsei/dlkit
dlkit/json_/grading/managers.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/grading/managers.py#L746-L761
def get_grade_entry_admin_session(self): """Gets the ``OsidSession`` associated with the grade entry administration service. return: (osid.grading.GradeEntryAdminSession) - a ``GradeEntryAdminSession`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_grade_entry_admin()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_grade_entry_admin()`` is ``true``.* """ if not self.supports_grade_entry_admin(): raise errors.Unimplemented() # pylint: disable=no-member return sessions.GradeEntryAdminSession(runtime=self._runtime)
[ "def", "get_grade_entry_admin_session", "(", "self", ")", ":", "if", "not", "self", ".", "supports_grade_entry_admin", "(", ")", ":", "raise", "errors", ".", "Unimplemented", "(", ")", "# pylint: disable=no-member", "return", "sessions", ".", "GradeEntryAdminSession", "(", "runtime", "=", "self", ".", "_runtime", ")" ]
Gets the ``OsidSession`` associated with the grade entry administration service. return: (osid.grading.GradeEntryAdminSession) - a ``GradeEntryAdminSession`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_grade_entry_admin()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_grade_entry_admin()`` is ``true``.*
[ "Gets", "the", "OsidSession", "associated", "with", "the", "grade", "entry", "administration", "service", "." ]
python
train
44.1875
royi1000/py-libhdate
hdate/converters.py
https://github.com/royi1000/py-libhdate/blob/12af759fb69f1d6403abed3762beaf5ace16a34b/hdate/converters.py#L125-L149
def jdn_to_gdate(jdn): """ Convert from the Julian day to the Gregorian day. Algorithm from 'Julian and Gregorian Day Numbers' by Peter Meyer. Return: day, month, year """ # pylint: disable=invalid-name # The algorithm is a verbatim copy from Peter Meyer's article # No explanation in the article is given for the variables # Hence the exceptions for pylint and for flake8 (E741) l = jdn + 68569 # noqa: E741 n = (4 * l) // 146097 l = l - (146097 * n + 3) // 4 # noqa: E741 i = (4000 * (l + 1)) // 1461001 # that's 1,461,001 l = l - (1461 * i) // 4 + 31 # noqa: E741 j = (80 * l) // 2447 day = l - (2447 * j) // 80 l = j // 11 # noqa: E741 month = j + 2 - (12 * l) year = 100 * (n - 49) + i + l # that's a lower-case L return datetime.date(year, month, day)
[ "def", "jdn_to_gdate", "(", "jdn", ")", ":", "# pylint: disable=invalid-name", "# The algorithm is a verbatim copy from Peter Meyer's article", "# No explanation in the article is given for the variables", "# Hence the exceptions for pylint and for flake8 (E741)", "l", "=", "jdn", "+", "68569", "# noqa: E741", "n", "=", "(", "4", "*", "l", ")", "//", "146097", "l", "=", "l", "-", "(", "146097", "*", "n", "+", "3", ")", "//", "4", "# noqa: E741", "i", "=", "(", "4000", "*", "(", "l", "+", "1", ")", ")", "//", "1461001", "# that's 1,461,001", "l", "=", "l", "-", "(", "1461", "*", "i", ")", "//", "4", "+", "31", "# noqa: E741", "j", "=", "(", "80", "*", "l", ")", "//", "2447", "day", "=", "l", "-", "(", "2447", "*", "j", ")", "//", "80", "l", "=", "j", "//", "11", "# noqa: E741", "month", "=", "j", "+", "2", "-", "(", "12", "*", "l", ")", "year", "=", "100", "*", "(", "n", "-", "49", ")", "+", "i", "+", "l", "# that's a lower-case L", "return", "datetime", ".", "date", "(", "year", ",", "month", ",", "day", ")" ]
Convert from the Julian day to the Gregorian day. Algorithm from 'Julian and Gregorian Day Numbers' by Peter Meyer. Return: day, month, year
[ "Convert", "from", "the", "Julian", "day", "to", "the", "Gregorian", "day", "." ]
python
train
32.92
tanghaibao/jcvi
jcvi/compara/fractionation.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/compara/fractionation.py#L222-L285
def merge(args): """ %prog merge protein-quartets registry LOST Merge protein quartets table with dna quartets registry. This is specific to the napus project. """ from jcvi.formats.base import DictFile p = OptionParser(merge.__doc__) opts, args = p.parse_args(args) if len(args) != 3: sys.exit(not p.print_help()) quartets, registry, lost = args qq = DictFile(registry, keypos=1, valuepos=3) lost = DictFile(lost, keypos=1, valuepos=0, delimiter='|') qq.update(lost) fp = open(quartets) cases = { "AN,CN": 4, "BO,AN,CN": 8, "BO,CN": 2, "BR,AN": 1, "BR,AN,CN": 6, "BR,BO": 3, "BR,BO,AN": 5, "BR,BO,AN,CN": 9, "BR,BO,CN": 7, } ip = { "syntenic_model": "Syntenic_model_excluded_by_OMG", "complete": "Predictable", "partial": "Truncated", "pseudogene": "Pseudogene", "random": "Match_random", "real_ns": "Transposed", "gmap_fail": "GMAP_fail", "AN LOST": "AN_LOST", "CN LOST": "CN_LOST", "BR LOST": "BR_LOST", "BO LOST": "BO_LOST", "outside": "Outside_synteny_blocks", "[NF]": "Not_found", } for row in fp: atoms = row.strip().split("\t") genes = atoms[:4] tag = atoms[4] a, b, c, d = [qq.get(x, ".").rsplit("-", 1)[-1] for x in genes] qqs = [c, d, a, b] for i, q in enumerate(qqs): if atoms[i] != '.': qqs[i] = "syntenic_model" # Make comment comment = "Case{0}".format(cases[tag]) dots = sum([1 for x in genes if x == '.']) if dots == 1: idx = genes.index(".") status = qqs[idx] status = ip[status] comment += "-" + status print(row.strip() + "\t" + "\t".join(qqs + [comment]))
[ "def", "merge", "(", "args", ")", ":", "from", "jcvi", ".", "formats", ".", "base", "import", "DictFile", "p", "=", "OptionParser", "(", "merge", ".", "__doc__", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", "(", "args", ")", "!=", "3", ":", "sys", ".", "exit", "(", "not", "p", ".", "print_help", "(", ")", ")", "quartets", ",", "registry", ",", "lost", "=", "args", "qq", "=", "DictFile", "(", "registry", ",", "keypos", "=", "1", ",", "valuepos", "=", "3", ")", "lost", "=", "DictFile", "(", "lost", ",", "keypos", "=", "1", ",", "valuepos", "=", "0", ",", "delimiter", "=", "'|'", ")", "qq", ".", "update", "(", "lost", ")", "fp", "=", "open", "(", "quartets", ")", "cases", "=", "{", "\"AN,CN\"", ":", "4", ",", "\"BO,AN,CN\"", ":", "8", ",", "\"BO,CN\"", ":", "2", ",", "\"BR,AN\"", ":", "1", ",", "\"BR,AN,CN\"", ":", "6", ",", "\"BR,BO\"", ":", "3", ",", "\"BR,BO,AN\"", ":", "5", ",", "\"BR,BO,AN,CN\"", ":", "9", ",", "\"BR,BO,CN\"", ":", "7", ",", "}", "ip", "=", "{", "\"syntenic_model\"", ":", "\"Syntenic_model_excluded_by_OMG\"", ",", "\"complete\"", ":", "\"Predictable\"", ",", "\"partial\"", ":", "\"Truncated\"", ",", "\"pseudogene\"", ":", "\"Pseudogene\"", ",", "\"random\"", ":", "\"Match_random\"", ",", "\"real_ns\"", ":", "\"Transposed\"", ",", "\"gmap_fail\"", ":", "\"GMAP_fail\"", ",", "\"AN LOST\"", ":", "\"AN_LOST\"", ",", "\"CN LOST\"", ":", "\"CN_LOST\"", ",", "\"BR LOST\"", ":", "\"BR_LOST\"", ",", "\"BO LOST\"", ":", "\"BO_LOST\"", ",", "\"outside\"", ":", "\"Outside_synteny_blocks\"", ",", "\"[NF]\"", ":", "\"Not_found\"", ",", "}", "for", "row", "in", "fp", ":", "atoms", "=", "row", ".", "strip", "(", ")", ".", "split", "(", "\"\\t\"", ")", "genes", "=", "atoms", "[", ":", "4", "]", "tag", "=", "atoms", "[", "4", "]", "a", ",", "b", ",", "c", ",", "d", "=", "[", "qq", ".", "get", "(", "x", ",", "\".\"", ")", ".", "rsplit", "(", "\"-\"", ",", "1", ")", "[", "-", "1", "]", "for", "x", "in", "genes", "]", "qqs", "=", "[", "c", ",", "d", ",", "a", ",", "b", "]", "for", "i", ",", "q", "in", "enumerate", "(", "qqs", ")", ":", "if", "atoms", "[", "i", "]", "!=", "'.'", ":", "qqs", "[", "i", "]", "=", "\"syntenic_model\"", "# Make comment", "comment", "=", "\"Case{0}\"", ".", "format", "(", "cases", "[", "tag", "]", ")", "dots", "=", "sum", "(", "[", "1", "for", "x", "in", "genes", "if", "x", "==", "'.'", "]", ")", "if", "dots", "==", "1", ":", "idx", "=", "genes", ".", "index", "(", "\".\"", ")", "status", "=", "qqs", "[", "idx", "]", "status", "=", "ip", "[", "status", "]", "comment", "+=", "\"-\"", "+", "status", "print", "(", "row", ".", "strip", "(", ")", "+", "\"\\t\"", "+", "\"\\t\"", ".", "join", "(", "qqs", "+", "[", "comment", "]", ")", ")" ]
%prog merge protein-quartets registry LOST Merge protein quartets table with dna quartets registry. This is specific to the napus project.
[ "%prog", "merge", "protein", "-", "quartets", "registry", "LOST" ]
python
train
28.765625
aluzzardi/wssh
wssh/server.py
https://github.com/aluzzardi/wssh/blob/4ccde12af67a0d7a121294ec6c4a5ec3c17de425/wssh/server.py#L169-L180
def shell(self, term='xterm'): """ Start an interactive shell session This method invokes a shell on the remote SSH server and proxies traffic to/from both peers. You must connect to a SSH server using ssh_connect() prior to starting the session. """ channel = self._ssh.invoke_shell(term) self._bridge(channel) channel.close()
[ "def", "shell", "(", "self", ",", "term", "=", "'xterm'", ")", ":", "channel", "=", "self", ".", "_ssh", ".", "invoke_shell", "(", "term", ")", "self", ".", "_bridge", "(", "channel", ")", "channel", ".", "close", "(", ")" ]
Start an interactive shell session This method invokes a shell on the remote SSH server and proxies traffic to/from both peers. You must connect to a SSH server using ssh_connect() prior to starting the session.
[ "Start", "an", "interactive", "shell", "session" ]
python
train
32.5
pecan/pecan
pecan/routing.py
https://github.com/pecan/pecan/blob/833d0653fa0e6bbfb52545b091c30182105f4a82/pecan/routing.py#L119-L170
def lookup_controller(obj, remainder, request=None): ''' Traverses the requested url path and returns the appropriate controller object, including default routes. Handles common errors gracefully. ''' if request is None: warnings.warn( ( "The function signature for %s.lookup_controller is changing " "in the next version of pecan.\nPlease update to: " "`lookup_controller(self, obj, remainder, request)`." % ( __name__, ) ), DeprecationWarning ) notfound_handlers = [] while True: try: obj, remainder = find_object(obj, remainder, notfound_handlers, request) handle_security(obj) return obj, remainder except (exc.HTTPNotFound, exc.HTTPMethodNotAllowed, PecanNotFound) as e: if isinstance(e, PecanNotFound): e = exc.HTTPNotFound() while notfound_handlers: name, obj, remainder = notfound_handlers.pop() if name == '_default': # Notfound handler is, in fact, a controller, so stop # traversal return obj, remainder else: # Notfound handler is an internal redirect, so continue # traversal result = handle_lookup_traversal(obj, remainder) if result: # If no arguments are passed to the _lookup, yet the # argspec requires at least one, raise a 404 if ( remainder == [''] and len(obj._pecan['argspec'].args) > 1 ): raise e obj_, remainder_ = result return lookup_controller(obj_, remainder_, request) else: raise e
[ "def", "lookup_controller", "(", "obj", ",", "remainder", ",", "request", "=", "None", ")", ":", "if", "request", "is", "None", ":", "warnings", ".", "warn", "(", "(", "\"The function signature for %s.lookup_controller is changing \"", "\"in the next version of pecan.\\nPlease update to: \"", "\"`lookup_controller(self, obj, remainder, request)`.\"", "%", "(", "__name__", ",", ")", ")", ",", "DeprecationWarning", ")", "notfound_handlers", "=", "[", "]", "while", "True", ":", "try", ":", "obj", ",", "remainder", "=", "find_object", "(", "obj", ",", "remainder", ",", "notfound_handlers", ",", "request", ")", "handle_security", "(", "obj", ")", "return", "obj", ",", "remainder", "except", "(", "exc", ".", "HTTPNotFound", ",", "exc", ".", "HTTPMethodNotAllowed", ",", "PecanNotFound", ")", "as", "e", ":", "if", "isinstance", "(", "e", ",", "PecanNotFound", ")", ":", "e", "=", "exc", ".", "HTTPNotFound", "(", ")", "while", "notfound_handlers", ":", "name", ",", "obj", ",", "remainder", "=", "notfound_handlers", ".", "pop", "(", ")", "if", "name", "==", "'_default'", ":", "# Notfound handler is, in fact, a controller, so stop", "# traversal", "return", "obj", ",", "remainder", "else", ":", "# Notfound handler is an internal redirect, so continue", "# traversal", "result", "=", "handle_lookup_traversal", "(", "obj", ",", "remainder", ")", "if", "result", ":", "# If no arguments are passed to the _lookup, yet the", "# argspec requires at least one, raise a 404", "if", "(", "remainder", "==", "[", "''", "]", "and", "len", "(", "obj", ".", "_pecan", "[", "'argspec'", "]", ".", "args", ")", ">", "1", ")", ":", "raise", "e", "obj_", ",", "remainder_", "=", "result", "return", "lookup_controller", "(", "obj_", ",", "remainder_", ",", "request", ")", "else", ":", "raise", "e" ]
Traverses the requested url path and returns the appropriate controller object, including default routes. Handles common errors gracefully.
[ "Traverses", "the", "requested", "url", "path", "and", "returns", "the", "appropriate", "controller", "object", "including", "default", "routes", "." ]
python
train
39
mozilla-iot/webthing-python
webthing/thing.py
https://github.com/mozilla-iot/webthing-python/blob/65d467c89ed79d0bbc42b8b3c8f9e5a320edd237/webthing/thing.py#L298-L305
def add_event(self, event): """ Add a new event and notify subscribers. event -- the event that occurred """ self.events.append(event) self.event_notify(event)
[ "def", "add_event", "(", "self", ",", "event", ")", ":", "self", ".", "events", ".", "append", "(", "event", ")", "self", ".", "event_notify", "(", "event", ")" ]
Add a new event and notify subscribers. event -- the event that occurred
[ "Add", "a", "new", "event", "and", "notify", "subscribers", "." ]
python
test
25.125
iotile/coretools
iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Tool/docbook/__init__.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Tool/docbook/__init__.py#L372-L386
def __xinclude_lxml(target, source, env): """ Resolving XIncludes, using the lxml module. """ from lxml import etree doc = etree.parse(str(source[0])) doc.xinclude() try: doc.write(str(target[0]), xml_declaration=True, encoding="UTF-8", pretty_print=True) except: pass return None
[ "def", "__xinclude_lxml", "(", "target", ",", "source", ",", "env", ")", ":", "from", "lxml", "import", "etree", "doc", "=", "etree", ".", "parse", "(", "str", "(", "source", "[", "0", "]", ")", ")", "doc", ".", "xinclude", "(", ")", "try", ":", "doc", ".", "write", "(", "str", "(", "target", "[", "0", "]", ")", ",", "xml_declaration", "=", "True", ",", "encoding", "=", "\"UTF-8\"", ",", "pretty_print", "=", "True", ")", "except", ":", "pass", "return", "None" ]
Resolving XIncludes, using the lxml module.
[ "Resolving", "XIncludes", "using", "the", "lxml", "module", "." ]
python
train
22.866667
CGATOxford/UMI-tools
umi_tools/network.py
https://github.com/CGATOxford/UMI-tools/blob/c4b5d84aac391d59916d294f8f4f8f5378abcfbe/umi_tools/network.py#L433-L463
def _get_adj_list_directional(self, umis, counts): ''' identify all umis within the hamming distance threshold and where the counts of the first umi is > (2 * second umi counts)-1''' adj_list = {umi: [] for umi in umis} if self.fuzzy_match: for umi1 in umis: # we need a second regex for some insertions, # e.g UMI1 = "ATCG", UMI2 = "ATTC" comp_regex_err = regex.compile("(%s){e<=1}" % str(umi1)) comp_regex_del = regex.compile("(%s){i<=1}" % str(umi1)[::-1]) for umi2 in umis: if umi1 == umi2: continue if counts[umi1] >= (counts[umi2]*self.dir_threshold): if (max(len(umi1), len(umi2)) - min(len(umi1), len(umi2))) > 1: continue if (comp_regex_err.match(str(umi2)) or comp_regex_del.match(str(umi2))): adj_list[umi1].append(umi2) else: for umi1, umi2 in itertools.combinations(umis, 2): if edit_distance(umi1, umi2) <= 1: if counts[umi1] >= (counts[umi2]*2)-1: adj_list[umi1].append(umi2) if counts[umi2] >= (counts[umi1]*2)-1: adj_list[umi2].append(umi1) return adj_list
[ "def", "_get_adj_list_directional", "(", "self", ",", "umis", ",", "counts", ")", ":", "adj_list", "=", "{", "umi", ":", "[", "]", "for", "umi", "in", "umis", "}", "if", "self", ".", "fuzzy_match", ":", "for", "umi1", "in", "umis", ":", "# we need a second regex for some insertions,", "# e.g UMI1 = \"ATCG\", UMI2 = \"ATTC\"", "comp_regex_err", "=", "regex", ".", "compile", "(", "\"(%s){e<=1}\"", "%", "str", "(", "umi1", ")", ")", "comp_regex_del", "=", "regex", ".", "compile", "(", "\"(%s){i<=1}\"", "%", "str", "(", "umi1", ")", "[", ":", ":", "-", "1", "]", ")", "for", "umi2", "in", "umis", ":", "if", "umi1", "==", "umi2", ":", "continue", "if", "counts", "[", "umi1", "]", ">=", "(", "counts", "[", "umi2", "]", "*", "self", ".", "dir_threshold", ")", ":", "if", "(", "max", "(", "len", "(", "umi1", ")", ",", "len", "(", "umi2", ")", ")", "-", "min", "(", "len", "(", "umi1", ")", ",", "len", "(", "umi2", ")", ")", ")", ">", "1", ":", "continue", "if", "(", "comp_regex_err", ".", "match", "(", "str", "(", "umi2", ")", ")", "or", "comp_regex_del", ".", "match", "(", "str", "(", "umi2", ")", ")", ")", ":", "adj_list", "[", "umi1", "]", ".", "append", "(", "umi2", ")", "else", ":", "for", "umi1", ",", "umi2", "in", "itertools", ".", "combinations", "(", "umis", ",", "2", ")", ":", "if", "edit_distance", "(", "umi1", ",", "umi2", ")", "<=", "1", ":", "if", "counts", "[", "umi1", "]", ">=", "(", "counts", "[", "umi2", "]", "*", "2", ")", "-", "1", ":", "adj_list", "[", "umi1", "]", ".", "append", "(", "umi2", ")", "if", "counts", "[", "umi2", "]", ">=", "(", "counts", "[", "umi1", "]", "*", "2", ")", "-", "1", ":", "adj_list", "[", "umi2", "]", ".", "append", "(", "umi1", ")", "return", "adj_list" ]
identify all umis within the hamming distance threshold and where the counts of the first umi is > (2 * second umi counts)-1
[ "identify", "all", "umis", "within", "the", "hamming", "distance", "threshold", "and", "where", "the", "counts", "of", "the", "first", "umi", "is", ">", "(", "2", "*", "second", "umi", "counts", ")", "-", "1" ]
python
train
46
Scoppio/RagnarokEngine3
Tutorials/Tutorial 3 - Creating new entities/tuto3.py
https://github.com/Scoppio/RagnarokEngine3/blob/4395d419ccd64fe9327c41f200b72ee0176ad896/Tutorials/Tutorial 3 - Creating new entities/tuto3.py#L120-L130
def __generate_location(self): """ Reset the location of the cloud once it has left the viewable area of the screen. """ screen_width = world.get_backbuffer_size().X self.movement_speed = random.randrange(10, 25) # This line of code places the cloud to the right of the viewable screen, so it appears to # gradually move in from the right instead of randomally appearing on some portion of the viewable # window. self.coords = R.Vector2(screen_width + self.image.get_width(), random.randrange(0, 100))
[ "def", "__generate_location", "(", "self", ")", ":", "screen_width", "=", "world", ".", "get_backbuffer_size", "(", ")", ".", "X", "self", ".", "movement_speed", "=", "random", ".", "randrange", "(", "10", ",", "25", ")", "# This line of code places the cloud to the right of the viewable screen, so it appears to", "# gradually move in from the right instead of randomally appearing on some portion of the viewable", "# window.", "self", ".", "coords", "=", "R", ".", "Vector2", "(", "screen_width", "+", "self", ".", "image", ".", "get_width", "(", ")", ",", "random", ".", "randrange", "(", "0", ",", "100", ")", ")" ]
Reset the location of the cloud once it has left the viewable area of the screen.
[ "Reset", "the", "location", "of", "the", "cloud", "once", "it", "has", "left", "the", "viewable", "area", "of", "the", "screen", "." ]
python
train
51.272727
square/pylink
examples/rtt.py
https://github.com/square/pylink/blob/81dda0a191d923a8b2627c52cb778aba24d279d7/examples/rtt.py#L68-L90
def write_rtt(jlink): """Writes kayboard input to JLink RTT buffer #0. This method is a loop that blocks waiting on stdin. When enter is pressed, LF and NUL bytes are added to the input and transmitted as a byte list. If the JLink is disconnected, it will exit gracefully. If any other exceptions are raised, they will be caught and re-raised after interrupting the main thread. Args: jlink (pylink.JLink): The JLink to write to. Raises: Exception on error. """ try: while jlink.connected(): bytes = list(bytearray(input(), "utf-8") + b"\x0A\x00") bytes_written = jlink.rtt_write(0, bytes) except Exception: print("IO write thread exception, exiting...") thread.interrupt_main() raise
[ "def", "write_rtt", "(", "jlink", ")", ":", "try", ":", "while", "jlink", ".", "connected", "(", ")", ":", "bytes", "=", "list", "(", "bytearray", "(", "input", "(", ")", ",", "\"utf-8\"", ")", "+", "b\"\\x0A\\x00\"", ")", "bytes_written", "=", "jlink", ".", "rtt_write", "(", "0", ",", "bytes", ")", "except", "Exception", ":", "print", "(", "\"IO write thread exception, exiting...\"", ")", "thread", ".", "interrupt_main", "(", ")", "raise" ]
Writes kayboard input to JLink RTT buffer #0. This method is a loop that blocks waiting on stdin. When enter is pressed, LF and NUL bytes are added to the input and transmitted as a byte list. If the JLink is disconnected, it will exit gracefully. If any other exceptions are raised, they will be caught and re-raised after interrupting the main thread. Args: jlink (pylink.JLink): The JLink to write to. Raises: Exception on error.
[ "Writes", "kayboard", "input", "to", "JLink", "RTT", "buffer", "#0", "." ]
python
train
33.782609
tonioo/sievelib
sievelib/commands.py
https://github.com/tonioo/sievelib/blob/88822d1f1daf30ef3dd9ac74911301b0773ef3c8/sievelib/commands.py#L361-L387
def __is_valid_value_for_arg(self, arg, value, check_extension=True): """Check if value is allowed for arg Some commands only allow a limited set of values. The method always returns True for methods that do not provide such a set. :param arg: the argument's name :param value: the value to check :param check_extension: check if value requires an extension :return: True on succes, False otherwise """ if "values" not in arg and "extension_values" not in arg: return True if "values" in arg and value.lower() in arg["values"]: return True if "extension_values" in arg: extension = arg["extension_values"].get(value.lower()) if extension: condition = ( check_extension and extension not in RequireCommand.loaded_extensions ) if condition: raise ExtensionNotLoaded(extension) return True return False
[ "def", "__is_valid_value_for_arg", "(", "self", ",", "arg", ",", "value", ",", "check_extension", "=", "True", ")", ":", "if", "\"values\"", "not", "in", "arg", "and", "\"extension_values\"", "not", "in", "arg", ":", "return", "True", "if", "\"values\"", "in", "arg", "and", "value", ".", "lower", "(", ")", "in", "arg", "[", "\"values\"", "]", ":", "return", "True", "if", "\"extension_values\"", "in", "arg", ":", "extension", "=", "arg", "[", "\"extension_values\"", "]", ".", "get", "(", "value", ".", "lower", "(", ")", ")", "if", "extension", ":", "condition", "=", "(", "check_extension", "and", "extension", "not", "in", "RequireCommand", ".", "loaded_extensions", ")", "if", "condition", ":", "raise", "ExtensionNotLoaded", "(", "extension", ")", "return", "True", "return", "False" ]
Check if value is allowed for arg Some commands only allow a limited set of values. The method always returns True for methods that do not provide such a set. :param arg: the argument's name :param value: the value to check :param check_extension: check if value requires an extension :return: True on succes, False otherwise
[ "Check", "if", "value", "is", "allowed", "for", "arg" ]
python
train
38.925926
gwastro/pycbc
pycbc/tmpltbank/partitioned_bank.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/tmpltbank/partitioned_bank.py#L297-L361
def calc_point_distance_vary(self, chi_coords, point_fupper, mus): """ Calculate distance between point and the bank allowing the metric to vary based on varying upper frequency cutoff. Slower than calc_point_distance, but more reliable when upper frequency cutoff can change a lot. Parameters ----------- chi_coords : numpy.array The position of the point in the chi coordinates. point_fupper : float The upper frequency cutoff to use for this point. This value must be one of the ones already calculated in the metric. mus : numpy.array A 2D array where idx 0 holds the upper frequency cutoff and idx 1 holds the coordinates in the [not covaried] mu parameter space for each value of the upper frequency cutoff. Returns -------- min_dist : float The smallest **SQUARED** metric distance between the test point and the bank. indexes : The chi1_bin, chi2_bin and position within that bin at which the closest matching point lies. """ chi1_bin, chi2_bin = self.find_point_bin(chi_coords) min_dist = 1000000000 indexes = None for chi1_bin_offset, chi2_bin_offset in self.bin_loop_order: curr_chi1_bin = chi1_bin + chi1_bin_offset curr_chi2_bin = chi2_bin + chi2_bin_offset # No points = Next iteration curr_bank = self.massbank[curr_chi1_bin][curr_chi2_bin] if not curr_bank['mass1s'].size: continue # *NOT* the same of .min and .max f_upper = numpy.minimum(point_fupper, curr_bank['freqcuts']) f_other = numpy.maximum(point_fupper, curr_bank['freqcuts']) # NOTE: freq_idxes is a vector! freq_idxes = numpy.array([self.frequency_map[f] for f in f_upper]) # vecs1 gives a 2x2 vector: idx0 = stored index, idx1 = mu index vecs1 = mus[freq_idxes, :] # vecs2 gives a 2x2 vector: idx0 = stored index, idx1 = mu index range_idxes = numpy.arange(len(freq_idxes)) vecs2 = curr_bank['mus'][range_idxes, freq_idxes, :] # Now do the sums dists = (vecs1 - vecs2)*(vecs1 - vecs2) # This reduces to 1D: idx = stored index dists = numpy.sum(dists, axis=1) norm_upper = numpy.array([self.normalization_map[f] \ for f in f_upper]) norm_other = numpy.array([self.normalization_map[f] \ for f in f_other]) norm_fac = norm_upper / norm_other renormed_dists = 1 - (1 - dists)*norm_fac curr_min_dist = renormed_dists.min() if curr_min_dist < min_dist: min_dist = curr_min_dist indexes = curr_chi1_bin, curr_chi2_bin, renormed_dists.argmin() return min_dist, indexes
[ "def", "calc_point_distance_vary", "(", "self", ",", "chi_coords", ",", "point_fupper", ",", "mus", ")", ":", "chi1_bin", ",", "chi2_bin", "=", "self", ".", "find_point_bin", "(", "chi_coords", ")", "min_dist", "=", "1000000000", "indexes", "=", "None", "for", "chi1_bin_offset", ",", "chi2_bin_offset", "in", "self", ".", "bin_loop_order", ":", "curr_chi1_bin", "=", "chi1_bin", "+", "chi1_bin_offset", "curr_chi2_bin", "=", "chi2_bin", "+", "chi2_bin_offset", "# No points = Next iteration", "curr_bank", "=", "self", ".", "massbank", "[", "curr_chi1_bin", "]", "[", "curr_chi2_bin", "]", "if", "not", "curr_bank", "[", "'mass1s'", "]", ".", "size", ":", "continue", "# *NOT* the same of .min and .max", "f_upper", "=", "numpy", ".", "minimum", "(", "point_fupper", ",", "curr_bank", "[", "'freqcuts'", "]", ")", "f_other", "=", "numpy", ".", "maximum", "(", "point_fupper", ",", "curr_bank", "[", "'freqcuts'", "]", ")", "# NOTE: freq_idxes is a vector!", "freq_idxes", "=", "numpy", ".", "array", "(", "[", "self", ".", "frequency_map", "[", "f", "]", "for", "f", "in", "f_upper", "]", ")", "# vecs1 gives a 2x2 vector: idx0 = stored index, idx1 = mu index", "vecs1", "=", "mus", "[", "freq_idxes", ",", ":", "]", "# vecs2 gives a 2x2 vector: idx0 = stored index, idx1 = mu index", "range_idxes", "=", "numpy", ".", "arange", "(", "len", "(", "freq_idxes", ")", ")", "vecs2", "=", "curr_bank", "[", "'mus'", "]", "[", "range_idxes", ",", "freq_idxes", ",", ":", "]", "# Now do the sums", "dists", "=", "(", "vecs1", "-", "vecs2", ")", "*", "(", "vecs1", "-", "vecs2", ")", "# This reduces to 1D: idx = stored index", "dists", "=", "numpy", ".", "sum", "(", "dists", ",", "axis", "=", "1", ")", "norm_upper", "=", "numpy", ".", "array", "(", "[", "self", ".", "normalization_map", "[", "f", "]", "for", "f", "in", "f_upper", "]", ")", "norm_other", "=", "numpy", ".", "array", "(", "[", "self", ".", "normalization_map", "[", "f", "]", "for", "f", "in", "f_other", "]", ")", "norm_fac", "=", "norm_upper", "/", "norm_other", "renormed_dists", "=", "1", "-", "(", "1", "-", "dists", ")", "*", "norm_fac", "curr_min_dist", "=", "renormed_dists", ".", "min", "(", ")", "if", "curr_min_dist", "<", "min_dist", ":", "min_dist", "=", "curr_min_dist", "indexes", "=", "curr_chi1_bin", ",", "curr_chi2_bin", ",", "renormed_dists", ".", "argmin", "(", ")", "return", "min_dist", ",", "indexes" ]
Calculate distance between point and the bank allowing the metric to vary based on varying upper frequency cutoff. Slower than calc_point_distance, but more reliable when upper frequency cutoff can change a lot. Parameters ----------- chi_coords : numpy.array The position of the point in the chi coordinates. point_fupper : float The upper frequency cutoff to use for this point. This value must be one of the ones already calculated in the metric. mus : numpy.array A 2D array where idx 0 holds the upper frequency cutoff and idx 1 holds the coordinates in the [not covaried] mu parameter space for each value of the upper frequency cutoff. Returns -------- min_dist : float The smallest **SQUARED** metric distance between the test point and the bank. indexes : The chi1_bin, chi2_bin and position within that bin at which the closest matching point lies.
[ "Calculate", "distance", "between", "point", "and", "the", "bank", "allowing", "the", "metric", "to", "vary", "based", "on", "varying", "upper", "frequency", "cutoff", ".", "Slower", "than", "calc_point_distance", "but", "more", "reliable", "when", "upper", "frequency", "cutoff", "can", "change", "a", "lot", "." ]
python
train
45.753846
noxdafox/clipspy
clips/classes.py
https://github.com/noxdafox/clipspy/blob/b22d71a6da821c1715d8fa00d7d75cabc09ed364/clips/classes.py#L446-L448
def initializable(self): """True if the Slot is initializable.""" return bool(lib.EnvSlotInitableP(self._env, self._cls, self._name))
[ "def", "initializable", "(", "self", ")", ":", "return", "bool", "(", "lib", ".", "EnvSlotInitableP", "(", "self", ".", "_env", ",", "self", ".", "_cls", ",", "self", ".", "_name", ")", ")" ]
True if the Slot is initializable.
[ "True", "if", "the", "Slot", "is", "initializable", "." ]
python
train
49
treycucco/bidon
bidon/db/core/sql_writer.py
https://github.com/treycucco/bidon/blob/d9f24596841d0e69e8ac70a1d1a1deecea95e340/bidon/db/core/sql_writer.py#L78-L89
def transform_op(self, op, value): """For comparisons, if the value is None (null), the '=' operator must be replaced with ' is ' and the '!=' operator must be replaced with ' is not '. This function handles that conversion. It's up to the caller to call this function only on comparisons and not on assignments. """ if value is None: if _EQ_RE.match(op): return "is" elif _NEQ_RE.match(op): return "is not" return op
[ "def", "transform_op", "(", "self", ",", "op", ",", "value", ")", ":", "if", "value", "is", "None", ":", "if", "_EQ_RE", ".", "match", "(", "op", ")", ":", "return", "\"is\"", "elif", "_NEQ_RE", ".", "match", "(", "op", ")", ":", "return", "\"is not\"", "return", "op" ]
For comparisons, if the value is None (null), the '=' operator must be replaced with ' is ' and the '!=' operator must be replaced with ' is not '. This function handles that conversion. It's up to the caller to call this function only on comparisons and not on assignments.
[ "For", "comparisons", "if", "the", "value", "is", "None", "(", "null", ")", "the", "=", "operator", "must", "be", "replaced", "with", "is", "and", "the", "!", "=", "operator", "must", "be", "replaced", "with", "is", "not", ".", "This", "function", "handles", "that", "conversion", ".", "It", "s", "up", "to", "the", "caller", "to", "call", "this", "function", "only", "on", "comparisons", "and", "not", "on", "assignments", "." ]
python
train
38.25
lucaoflaif/pyCoinMarketCapAPI
coinmarketcapapi/cache.py
https://github.com/lucaoflaif/pyCoinMarketCapAPI/blob/a6ab1fa57c0610e8abf3a03e9144379bf9e49907/coinmarketcapapi/cache.py#L77-L85
def get_unset_cache(self): """return : returns a tuple (num_of_not_None_caches, [list of unset caches endpoint]) """ caches = [] if self._cached_api_global_response is None: caches.append('global') if self._cached_api_ticker_response is None: caches.append('ticker') return (len(caches), caches)
[ "def", "get_unset_cache", "(", "self", ")", ":", "caches", "=", "[", "]", "if", "self", ".", "_cached_api_global_response", "is", "None", ":", "caches", ".", "append", "(", "'global'", ")", "if", "self", ".", "_cached_api_ticker_response", "is", "None", ":", "caches", ".", "append", "(", "'ticker'", ")", "return", "(", "len", "(", "caches", ")", ",", "caches", ")" ]
return : returns a tuple (num_of_not_None_caches, [list of unset caches endpoint])
[ "return", ":", "returns", "a", "tuple", "(", "num_of_not_None_caches", "[", "list", "of", "unset", "caches", "endpoint", "]", ")" ]
python
train
39.888889
aleju/imgaug
imgaug/augmentables/polys.py
https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/augmentables/polys.py#L1158-L1182
def remove_out_of_image(self, fully=True, partly=False): """ Remove all polygons that are fully or partially outside of the image. Parameters ---------- fully : bool, optional Whether to remove polygons that are fully outside of the image. partly : bool, optional Whether to remove polygons that are partially outside of the image. Returns ------- imgaug.PolygonsOnImage Reduced set of polygons, with those that were fully/partially outside of the image removed. """ polys_clean = [ poly for poly in self.polygons if not poly.is_out_of_image(self.shape, fully=fully, partly=partly) ] # TODO use deepcopy() here return PolygonsOnImage(polys_clean, shape=self.shape)
[ "def", "remove_out_of_image", "(", "self", ",", "fully", "=", "True", ",", "partly", "=", "False", ")", ":", "polys_clean", "=", "[", "poly", "for", "poly", "in", "self", ".", "polygons", "if", "not", "poly", ".", "is_out_of_image", "(", "self", ".", "shape", ",", "fully", "=", "fully", ",", "partly", "=", "partly", ")", "]", "# TODO use deepcopy() here", "return", "PolygonsOnImage", "(", "polys_clean", ",", "shape", "=", "self", ".", "shape", ")" ]
Remove all polygons that are fully or partially outside of the image. Parameters ---------- fully : bool, optional Whether to remove polygons that are fully outside of the image. partly : bool, optional Whether to remove polygons that are partially outside of the image. Returns ------- imgaug.PolygonsOnImage Reduced set of polygons, with those that were fully/partially outside of the image removed.
[ "Remove", "all", "polygons", "that", "are", "fully", "or", "partially", "outside", "of", "the", "image", "." ]
python
valid
33.12
h2non/paco
paco/reduce.py
https://github.com/h2non/paco/blob/1e5ef4df317e7cbbcefdf67d8dee28ce90538f3d/paco/reduce.py#L10-L83
def reduce(coro, iterable, initializer=None, limit=1, right=False, loop=None): """ Apply function of two arguments cumulatively to the items of sequence, from left to right, so as to reduce the sequence to a single value. Reduction will be executed sequentially without concurrency, so passed values would be in order. This function is the asynchronous coroutine equivalent to Python standard `functools.reduce()` function. This function is a coroutine. This function can be composed in a pipeline chain with ``|`` operator. Arguments: coro (coroutine function): reducer coroutine binary function. iterable (iterable|asynchronousiterable): an iterable collection yielding coroutines functions. initializer (mixed): initial accumulator value used in the first reduction call. limit (int): max iteration concurrency limit. Use ``0`` for no limit. right (bool): reduce iterable from right to left. loop (asyncio.BaseEventLoop): optional event loop to use. Raises: TypeError: if input arguments are not valid. Returns: mixed: accumulated final reduced value. Usage:: async def reducer(acc, num): return acc + num await paco.reduce(reducer, [1, 2, 3, 4, 5], initializer=0) # => 15 """ assert_corofunction(coro=coro) assert_iter(iterable=iterable) # Reduced accumulator value acc = initializer # If interable is empty, just return the initializer value if len(iterable) == 0: return initializer # Create concurrent executor pool = ConcurrentExecutor(limit=limit, loop=loop) # Reducer partial function for deferred coroutine execution def reducer(element): @asyncio.coroutine def wrapper(): nonlocal acc acc = yield from coro(acc, element) return wrapper # Support right reduction if right: iterable.reverse() # Iterate and attach coroutine for defer scheduling for element in iterable: pool.add(reducer(element)) # Wait until all coroutines finish yield from pool.run(ignore_empty=True) # Returns final reduced value return acc
[ "def", "reduce", "(", "coro", ",", "iterable", ",", "initializer", "=", "None", ",", "limit", "=", "1", ",", "right", "=", "False", ",", "loop", "=", "None", ")", ":", "assert_corofunction", "(", "coro", "=", "coro", ")", "assert_iter", "(", "iterable", "=", "iterable", ")", "# Reduced accumulator value", "acc", "=", "initializer", "# If interable is empty, just return the initializer value", "if", "len", "(", "iterable", ")", "==", "0", ":", "return", "initializer", "# Create concurrent executor", "pool", "=", "ConcurrentExecutor", "(", "limit", "=", "limit", ",", "loop", "=", "loop", ")", "# Reducer partial function for deferred coroutine execution", "def", "reducer", "(", "element", ")", ":", "@", "asyncio", ".", "coroutine", "def", "wrapper", "(", ")", ":", "nonlocal", "acc", "acc", "=", "yield", "from", "coro", "(", "acc", ",", "element", ")", "return", "wrapper", "# Support right reduction", "if", "right", ":", "iterable", ".", "reverse", "(", ")", "# Iterate and attach coroutine for defer scheduling", "for", "element", "in", "iterable", ":", "pool", ".", "add", "(", "reducer", "(", "element", ")", ")", "# Wait until all coroutines finish", "yield", "from", "pool", ".", "run", "(", "ignore_empty", "=", "True", ")", "# Returns final reduced value", "return", "acc" ]
Apply function of two arguments cumulatively to the items of sequence, from left to right, so as to reduce the sequence to a single value. Reduction will be executed sequentially without concurrency, so passed values would be in order. This function is the asynchronous coroutine equivalent to Python standard `functools.reduce()` function. This function is a coroutine. This function can be composed in a pipeline chain with ``|`` operator. Arguments: coro (coroutine function): reducer coroutine binary function. iterable (iterable|asynchronousiterable): an iterable collection yielding coroutines functions. initializer (mixed): initial accumulator value used in the first reduction call. limit (int): max iteration concurrency limit. Use ``0`` for no limit. right (bool): reduce iterable from right to left. loop (asyncio.BaseEventLoop): optional event loop to use. Raises: TypeError: if input arguments are not valid. Returns: mixed: accumulated final reduced value. Usage:: async def reducer(acc, num): return acc + num await paco.reduce(reducer, [1, 2, 3, 4, 5], initializer=0) # => 15
[ "Apply", "function", "of", "two", "arguments", "cumulatively", "to", "the", "items", "of", "sequence", "from", "left", "to", "right", "so", "as", "to", "reduce", "the", "sequence", "to", "a", "single", "value", "." ]
python
train
29.594595
bio2bel/bio2bel
src/bio2bel/manager/abstract_manager.py
https://github.com/bio2bel/bio2bel/blob/d80762d891fa18b248709ff0b0f97ebb65ec64c2/src/bio2bel/manager/abstract_manager.py#L370-L384
def add_cli_summarize(main: click.Group) -> click.Group: # noqa: D202 """Add a ``summarize`` command to main :mod:`click` function.""" @main.command() @click.pass_obj def summarize(manager: AbstractManager): """Summarize the contents of the database.""" if not manager.is_populated(): click.secho(f'{manager.module_name} has not been populated', fg='red') sys.exit(1) for name, count in sorted(manager.summarize().items()): click.echo(f'{name.capitalize()}: {count}') return main
[ "def", "add_cli_summarize", "(", "main", ":", "click", ".", "Group", ")", "->", "click", ".", "Group", ":", "# noqa: D202", "@", "main", ".", "command", "(", ")", "@", "click", ".", "pass_obj", "def", "summarize", "(", "manager", ":", "AbstractManager", ")", ":", "\"\"\"Summarize the contents of the database.\"\"\"", "if", "not", "manager", ".", "is_populated", "(", ")", ":", "click", ".", "secho", "(", "f'{manager.module_name} has not been populated'", ",", "fg", "=", "'red'", ")", "sys", ".", "exit", "(", "1", ")", "for", "name", ",", "count", "in", "sorted", "(", "manager", ".", "summarize", "(", ")", ".", "items", "(", ")", ")", ":", "click", ".", "echo", "(", "f'{name.capitalize()}: {count}'", ")", "return", "main" ]
Add a ``summarize`` command to main :mod:`click` function.
[ "Add", "a", "summarize", "command", "to", "main", ":", "mod", ":", "click", "function", "." ]
python
valid
36.6
appknox/pyaxmlparser
pyaxmlparser/core.py
https://github.com/appknox/pyaxmlparser/blob/8ffe43e81a534f42c3620f3c1e3c6c0181a066bd/pyaxmlparser/core.py#L614-L631
def get_elements(self, tag_name, attribute, with_namespace=True): """ Deprecated: use `get_all_attribute_value()` instead Return elements in xml files which match with the tag name and the specific attribute :param tag_name: a string which specify the tag name :param attribute: a string which specify the attribute """ for i in self.xml: if self.xml[i] is None: continue for item in self.xml[i].findall('.//' + tag_name): if with_namespace: value = item.get(self._ns(attribute)) else: value = item.get(attribute) # There might be an attribute without the namespace if value: yield self._format_value(value)
[ "def", "get_elements", "(", "self", ",", "tag_name", ",", "attribute", ",", "with_namespace", "=", "True", ")", ":", "for", "i", "in", "self", ".", "xml", ":", "if", "self", ".", "xml", "[", "i", "]", "is", "None", ":", "continue", "for", "item", "in", "self", ".", "xml", "[", "i", "]", ".", "findall", "(", "'.//'", "+", "tag_name", ")", ":", "if", "with_namespace", ":", "value", "=", "item", ".", "get", "(", "self", ".", "_ns", "(", "attribute", ")", ")", "else", ":", "value", "=", "item", ".", "get", "(", "attribute", ")", "# There might be an attribute without the namespace", "if", "value", ":", "yield", "self", ".", "_format_value", "(", "value", ")" ]
Deprecated: use `get_all_attribute_value()` instead Return elements in xml files which match with the tag name and the specific attribute :param tag_name: a string which specify the tag name :param attribute: a string which specify the attribute
[ "Deprecated", ":", "use", "get_all_attribute_value", "()", "instead", "Return", "elements", "in", "xml", "files", "which", "match", "with", "the", "tag", "name", "and", "the", "specific", "attribute", ":", "param", "tag_name", ":", "a", "string", "which", "specify", "the", "tag", "name", ":", "param", "attribute", ":", "a", "string", "which", "specify", "the", "attribute" ]
python
train
45
google/python-gflags
gflags/flagvalues.py
https://github.com/google/python-gflags/blob/4f06c3d0d6cbe9b1fb90ee9fb1c082b3bf9285f6/gflags/flagvalues.py#L914-L925
def ModuleHelp(self, module): """Describe the key flags of a module. Args: module: A module object or a module name (a string). Returns: string describing the key flags of a module. """ helplist = [] self.__RenderOurModuleKeyFlags(module, helplist) return '\n'.join(helplist)
[ "def", "ModuleHelp", "(", "self", ",", "module", ")", ":", "helplist", "=", "[", "]", "self", ".", "__RenderOurModuleKeyFlags", "(", "module", ",", "helplist", ")", "return", "'\\n'", ".", "join", "(", "helplist", ")" ]
Describe the key flags of a module. Args: module: A module object or a module name (a string). Returns: string describing the key flags of a module.
[ "Describe", "the", "key", "flags", "of", "a", "module", "." ]
python
train
25.5
mosdef-hub/mbuild
mbuild/coordinate_transform.py
https://github.com/mosdef-hub/mbuild/blob/dcb80a2becd5d0e6a7e3e7bcb1b59793c46a2dd3/mbuild/coordinate_transform.py#L555-L574
def spin(compound, theta, around): """Rotate a compound in place around an arbitrary vector. Parameters ---------- compound : mb.Compound The compound being rotated. theta : float The angle by which to rotate the compound, in radians. around : np.ndarray, shape=(3,), dtype=float The axis about which to spin the compound. """ around = np.asarray(around).reshape(3) if np.array_equal(around, np.zeros(3)): raise ValueError('Cannot spin around a zero vector') center_pos = compound.center translate(compound, -center_pos) rotate(compound, theta, around) translate(compound, center_pos)
[ "def", "spin", "(", "compound", ",", "theta", ",", "around", ")", ":", "around", "=", "np", ".", "asarray", "(", "around", ")", ".", "reshape", "(", "3", ")", "if", "np", ".", "array_equal", "(", "around", ",", "np", ".", "zeros", "(", "3", ")", ")", ":", "raise", "ValueError", "(", "'Cannot spin around a zero vector'", ")", "center_pos", "=", "compound", ".", "center", "translate", "(", "compound", ",", "-", "center_pos", ")", "rotate", "(", "compound", ",", "theta", ",", "around", ")", "translate", "(", "compound", ",", "center_pos", ")" ]
Rotate a compound in place around an arbitrary vector. Parameters ---------- compound : mb.Compound The compound being rotated. theta : float The angle by which to rotate the compound, in radians. around : np.ndarray, shape=(3,), dtype=float The axis about which to spin the compound.
[ "Rotate", "a", "compound", "in", "place", "around", "an", "arbitrary", "vector", "." ]
python
train
32.55
secure-systems-lab/securesystemslib
securesystemslib/keys.py
https://github.com/secure-systems-lab/securesystemslib/blob/beb3109d5bb462e5a60eed88fb40ed1167bd354e/securesystemslib/keys.py#L496-L582
def format_metadata_to_key(key_metadata): """ <Purpose> Construct a key dictionary (e.g., securesystemslib.formats.RSAKEY_SCHEMA) according to the keytype of 'key_metadata'. The dict returned by this function has the exact format as the dict returned by one of the key generations functions, like generate_ed25519_key(). The dict returned has the form: {'keytype': keytype, 'scheme': scheme, 'keyid': 'f30a0870d026980100c0573bd557394f8c1bbd6...', 'keyval': {'public': '...', 'private': '...'}} For example, RSA key dictionaries in RSAKEY_SCHEMA format should be used by modules storing a collection of keys, such as with keydb.py. RSA keys as stored in metadata files use a different format, so this function should be called if an RSA key is extracted from one of these metadata files and need converting. The key generation functions create an entirely new key and return it in the format appropriate for 'keydb.py'. >>> ed25519_key = generate_ed25519_key() >>> key_val = ed25519_key['keyval'] >>> keytype = ed25519_key['keytype'] >>> scheme = ed25519_key['scheme'] >>> ed25519_metadata = \ format_keyval_to_metadata(keytype, scheme, key_val, private=True) >>> ed25519_key_2, junk = format_metadata_to_key(ed25519_metadata) >>> securesystemslib.formats.ED25519KEY_SCHEMA.matches(ed25519_key_2) True >>> ed25519_key == ed25519_key_2 True <Arguments> key_metadata: The key dictionary as stored in Metadata files, conforming to 'securesystemslib.formats.KEY_SCHEMA'. It has the form: {'keytype': '...', 'scheme': scheme, 'keyval': {'public': '...', 'private': '...'}} <Exceptions> securesystemslib.exceptions.FormatError, if 'key_metadata' does not conform to 'securesystemslib.formats.KEY_SCHEMA'. <Side Effects> None. <Returns> In the case of an RSA key, a dictionary conformant to 'securesystemslib.formats.RSAKEY_SCHEMA'. """ # Does 'key_metadata' have the correct format? # This check will ensure 'key_metadata' has the appropriate number # of objects and object types, and that all dict keys are properly named. # Raise 'securesystemslib.exceptions.FormatError' if the check fails. securesystemslib.formats.KEY_SCHEMA.check_match(key_metadata) # Construct the dictionary to be returned. key_dict = {} keytype = key_metadata['keytype'] scheme = key_metadata['scheme'] key_value = key_metadata['keyval'] # Convert 'key_value' to 'securesystemslib.formats.KEY_SCHEMA' and generate # its hash The hash is in hexdigest form. default_keyid = _get_keyid(keytype, scheme, key_value) keyids = set() keyids.add(default_keyid) for hash_algorithm in securesystemslib.settings.HASH_ALGORITHMS: keyid = _get_keyid(keytype, scheme, key_value, hash_algorithm) keyids.add(keyid) # All the required key values gathered. Build 'key_dict'. # 'keyid_hash_algorithms' key_dict['keytype'] = keytype key_dict['scheme'] = scheme key_dict['keyid'] = default_keyid key_dict['keyid_hash_algorithms'] = securesystemslib.settings.HASH_ALGORITHMS key_dict['keyval'] = key_value return key_dict, keyids
[ "def", "format_metadata_to_key", "(", "key_metadata", ")", ":", "# Does 'key_metadata' have the correct format?", "# This check will ensure 'key_metadata' has the appropriate number", "# of objects and object types, and that all dict keys are properly named.", "# Raise 'securesystemslib.exceptions.FormatError' if the check fails.", "securesystemslib", ".", "formats", ".", "KEY_SCHEMA", ".", "check_match", "(", "key_metadata", ")", "# Construct the dictionary to be returned.", "key_dict", "=", "{", "}", "keytype", "=", "key_metadata", "[", "'keytype'", "]", "scheme", "=", "key_metadata", "[", "'scheme'", "]", "key_value", "=", "key_metadata", "[", "'keyval'", "]", "# Convert 'key_value' to 'securesystemslib.formats.KEY_SCHEMA' and generate", "# its hash The hash is in hexdigest form.", "default_keyid", "=", "_get_keyid", "(", "keytype", ",", "scheme", ",", "key_value", ")", "keyids", "=", "set", "(", ")", "keyids", ".", "add", "(", "default_keyid", ")", "for", "hash_algorithm", "in", "securesystemslib", ".", "settings", ".", "HASH_ALGORITHMS", ":", "keyid", "=", "_get_keyid", "(", "keytype", ",", "scheme", ",", "key_value", ",", "hash_algorithm", ")", "keyids", ".", "add", "(", "keyid", ")", "# All the required key values gathered. Build 'key_dict'.", "# 'keyid_hash_algorithms'", "key_dict", "[", "'keytype'", "]", "=", "keytype", "key_dict", "[", "'scheme'", "]", "=", "scheme", "key_dict", "[", "'keyid'", "]", "=", "default_keyid", "key_dict", "[", "'keyid_hash_algorithms'", "]", "=", "securesystemslib", ".", "settings", ".", "HASH_ALGORITHMS", "key_dict", "[", "'keyval'", "]", "=", "key_value", "return", "key_dict", ",", "keyids" ]
<Purpose> Construct a key dictionary (e.g., securesystemslib.formats.RSAKEY_SCHEMA) according to the keytype of 'key_metadata'. The dict returned by this function has the exact format as the dict returned by one of the key generations functions, like generate_ed25519_key(). The dict returned has the form: {'keytype': keytype, 'scheme': scheme, 'keyid': 'f30a0870d026980100c0573bd557394f8c1bbd6...', 'keyval': {'public': '...', 'private': '...'}} For example, RSA key dictionaries in RSAKEY_SCHEMA format should be used by modules storing a collection of keys, such as with keydb.py. RSA keys as stored in metadata files use a different format, so this function should be called if an RSA key is extracted from one of these metadata files and need converting. The key generation functions create an entirely new key and return it in the format appropriate for 'keydb.py'. >>> ed25519_key = generate_ed25519_key() >>> key_val = ed25519_key['keyval'] >>> keytype = ed25519_key['keytype'] >>> scheme = ed25519_key['scheme'] >>> ed25519_metadata = \ format_keyval_to_metadata(keytype, scheme, key_val, private=True) >>> ed25519_key_2, junk = format_metadata_to_key(ed25519_metadata) >>> securesystemslib.formats.ED25519KEY_SCHEMA.matches(ed25519_key_2) True >>> ed25519_key == ed25519_key_2 True <Arguments> key_metadata: The key dictionary as stored in Metadata files, conforming to 'securesystemslib.formats.KEY_SCHEMA'. It has the form: {'keytype': '...', 'scheme': scheme, 'keyval': {'public': '...', 'private': '...'}} <Exceptions> securesystemslib.exceptions.FormatError, if 'key_metadata' does not conform to 'securesystemslib.formats.KEY_SCHEMA'. <Side Effects> None. <Returns> In the case of an RSA key, a dictionary conformant to 'securesystemslib.formats.RSAKEY_SCHEMA'.
[ "<Purpose", ">", "Construct", "a", "key", "dictionary", "(", "e", ".", "g", ".", "securesystemslib", ".", "formats", ".", "RSAKEY_SCHEMA", ")", "according", "to", "the", "keytype", "of", "key_metadata", ".", "The", "dict", "returned", "by", "this", "function", "has", "the", "exact", "format", "as", "the", "dict", "returned", "by", "one", "of", "the", "key", "generations", "functions", "like", "generate_ed25519_key", "()", ".", "The", "dict", "returned", "has", "the", "form", ":" ]
python
train
36.505747
omtinez/pddb
pddb/pddb.py
https://github.com/omtinez/pddb/blob/a24cee0702c8286c5c466c51ca65cf8dbc2c183c/pddb/pddb.py#L947-L967
def _request(request, request_fallback=None): ''' Extract request fields wherever they may come from: GET, POST, forms, fallback ''' # Use lambdas to avoid evaluating bottle.request.* which may throw an Error all_dicts = [ lambda: request.json, lambda: request.forms, lambda: request.query, lambda: request.files, #lambda: request.POST, lambda: request_fallback ] request_dict = dict() for req_dict_ in all_dicts: try: req_dict = req_dict_() except KeyError: continue if req_dict is not None and hasattr(req_dict, 'items'): for req_key, req_val in req_dict.items(): request_dict[req_key] = req_val return request_dict
[ "def", "_request", "(", "request", ",", "request_fallback", "=", "None", ")", ":", "# Use lambdas to avoid evaluating bottle.request.* which may throw an Error", "all_dicts", "=", "[", "lambda", ":", "request", ".", "json", ",", "lambda", ":", "request", ".", "forms", ",", "lambda", ":", "request", ".", "query", ",", "lambda", ":", "request", ".", "files", ",", "#lambda: request.POST,", "lambda", ":", "request_fallback", "]", "request_dict", "=", "dict", "(", ")", "for", "req_dict_", "in", "all_dicts", ":", "try", ":", "req_dict", "=", "req_dict_", "(", ")", "except", "KeyError", ":", "continue", "if", "req_dict", "is", "not", "None", "and", "hasattr", "(", "req_dict", ",", "'items'", ")", ":", "for", "req_key", ",", "req_val", "in", "req_dict", ".", "items", "(", ")", ":", "request_dict", "[", "req_key", "]", "=", "req_val", "return", "request_dict" ]
Extract request fields wherever they may come from: GET, POST, forms, fallback
[ "Extract", "request", "fields", "wherever", "they", "may", "come", "from", ":", "GET", "POST", "forms", "fallback" ]
python
train
39.47619
JukeboxPipeline/jukebox-core
src/jukeboxcore/gui/widgets/browser.py
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/gui/widgets/browser.py#L557-L571
def resizeEvent(self, event): """Schedules an item layout if resize mode is \"adjust\". Somehow this is needed for correctly scaling down items. The reason this was reimplemented was the CommentDelegate. :param event: the resize event :type event: QtCore.QEvent :returns: None :rtype: None :raises: None """ if self.resizeMode() == self.Adjust: self.scheduleDelayedItemsLayout() return super(ListLevel, self).resizeEvent(event)
[ "def", "resizeEvent", "(", "self", ",", "event", ")", ":", "if", "self", ".", "resizeMode", "(", ")", "==", "self", ".", "Adjust", ":", "self", ".", "scheduleDelayedItemsLayout", "(", ")", "return", "super", "(", "ListLevel", ",", "self", ")", ".", "resizeEvent", "(", "event", ")" ]
Schedules an item layout if resize mode is \"adjust\". Somehow this is needed for correctly scaling down items. The reason this was reimplemented was the CommentDelegate. :param event: the resize event :type event: QtCore.QEvent :returns: None :rtype: None :raises: None
[ "Schedules", "an", "item", "layout", "if", "resize", "mode", "is", "\\", "adjust", "\\", ".", "Somehow", "this", "is", "needed", "for", "correctly", "scaling", "down", "items", "." ]
python
train
34.333333
linuxsoftware/ls.joyous
ls/joyous/models/events.py
https://github.com/linuxsoftware/ls.joyous/blob/316283140ca5171a68ad3170a5964fdc89be0b56/ls/joyous/models/events.py#L807-L811
def _getFromTime(self, atDate=None): """ Time that the event starts (in the local time zone). """ return getLocalTime(self.date_from, self.time_from, self.tz)
[ "def", "_getFromTime", "(", "self", ",", "atDate", "=", "None", ")", ":", "return", "getLocalTime", "(", "self", ".", "date_from", ",", "self", ".", "time_from", ",", "self", ".", "tz", ")" ]
Time that the event starts (in the local time zone).
[ "Time", "that", "the", "event", "starts", "(", "in", "the", "local", "time", "zone", ")", "." ]
python
train
37.2
Alignak-monitoring/alignak
alignak/objects/item.py
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/item.py#L1315-L1337
def linkify_with_escalations(self, escalations): """ Link with escalations :param escalations: all escalations object :type escalations: alignak.objects.escalation.Escalations :return: None """ for i in self: if not hasattr(i, 'escalations'): continue links_list = strip_and_uniq(i.escalations) new = [] for name in [e for e in links_list if e]: escalation = escalations.find_by_name(name) if escalation is not None and escalation.uuid not in new: new.append(escalation.uuid) else: i.add_error("the escalation '%s' defined for '%s' is unknown" % (name, i.get_name())) i.escalations = new
[ "def", "linkify_with_escalations", "(", "self", ",", "escalations", ")", ":", "for", "i", "in", "self", ":", "if", "not", "hasattr", "(", "i", ",", "'escalations'", ")", ":", "continue", "links_list", "=", "strip_and_uniq", "(", "i", ".", "escalations", ")", "new", "=", "[", "]", "for", "name", "in", "[", "e", "for", "e", "in", "links_list", "if", "e", "]", ":", "escalation", "=", "escalations", ".", "find_by_name", "(", "name", ")", "if", "escalation", "is", "not", "None", "and", "escalation", ".", "uuid", "not", "in", "new", ":", "new", ".", "append", "(", "escalation", ".", "uuid", ")", "else", ":", "i", ".", "add_error", "(", "\"the escalation '%s' defined for '%s' is unknown\"", "%", "(", "name", ",", "i", ".", "get_name", "(", ")", ")", ")", "i", ".", "escalations", "=", "new" ]
Link with escalations :param escalations: all escalations object :type escalations: alignak.objects.escalation.Escalations :return: None
[ "Link", "with", "escalations" ]
python
train
35.652174
pylast/pylast
src/pylast/__init__.py
https://github.com/pylast/pylast/blob/a52f66d316797fc819b5f1d186d77f18ba97b4ff/src/pylast/__init__.py#L2451-L2460
def get_tagged_albums(self, tag, limit=None, cacheable=True): """Returns the albums tagged by a user.""" params = self._get_params() params["tag"] = tag params["taggingtype"] = "album" if limit: params["limit"] = limit doc = self._request(self.ws_prefix + ".getpersonaltags", cacheable, params) return _extract_albums(doc, self.network)
[ "def", "get_tagged_albums", "(", "self", ",", "tag", ",", "limit", "=", "None", ",", "cacheable", "=", "True", ")", ":", "params", "=", "self", ".", "_get_params", "(", ")", "params", "[", "\"tag\"", "]", "=", "tag", "params", "[", "\"taggingtype\"", "]", "=", "\"album\"", "if", "limit", ":", "params", "[", "\"limit\"", "]", "=", "limit", "doc", "=", "self", ".", "_request", "(", "self", ".", "ws_prefix", "+", "\".getpersonaltags\"", ",", "cacheable", ",", "params", ")", "return", "_extract_albums", "(", "doc", ",", "self", ".", "network", ")" ]
Returns the albums tagged by a user.
[ "Returns", "the", "albums", "tagged", "by", "a", "user", "." ]
python
train
39.6
tensorflow/probability
tensorflow_probability/python/optimizer/differential_evolution.py
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/optimizer/differential_evolution.py#L510-L516
def _find_best_in_population(population, values): """Finds the population member with the lowest value.""" best_value = tf.math.reduce_min(input_tensor=values) best_index = tf.where(tf.math.equal(values, best_value))[0, 0] return ([population_part[best_index] for population_part in population], best_value)
[ "def", "_find_best_in_population", "(", "population", ",", "values", ")", ":", "best_value", "=", "tf", ".", "math", ".", "reduce_min", "(", "input_tensor", "=", "values", ")", "best_index", "=", "tf", ".", "where", "(", "tf", ".", "math", ".", "equal", "(", "values", ",", "best_value", ")", ")", "[", "0", ",", "0", "]", "return", "(", "[", "population_part", "[", "best_index", "]", "for", "population_part", "in", "population", "]", ",", "best_value", ")" ]
Finds the population member with the lowest value.
[ "Finds", "the", "population", "member", "with", "the", "lowest", "value", "." ]
python
test
45.714286
mapmyfitness/jtime
jtime/utils.py
https://github.com/mapmyfitness/jtime/blob/402fb6b40ac7a78c23fd02fac50c6dbe49e5ebfd/jtime/utils.py#L16-L56
def working_cycletime(start, end, workday_start=datetime.timedelta(hours=0), workday_end=datetime.timedelta(hours=24)): """ Get the working time between a beginning and an end point subtracting out non-office time """ def clamp(t, start, end): "Return 't' clamped to the range ['start', 'end']" return max(start, min(end, t)) def day_part(t): "Return timedelta between midnight and 't'." return t - t.replace(hour=0, minute=0, second=0) if not start: return None if not end: end = datetime.datetime.now() zero = datetime.timedelta(0) # Make sure that the work day is valid assert(zero <= workday_start <= workday_end <= datetime.timedelta(1)) # Get the workday delta workday = workday_end - workday_start # Get the number of days it took days = (end - start).days + 1 # Number of weeks weeks = days // 7 # Get the number of days in addition to weeks extra = (max(0, 5 - start.weekday()) + min(5, 1 + end.weekday())) % 5 # Get the number of working days weekdays = weeks * 5 + extra # Get the total time spent accounting for the workday total = workday * weekdays if start.weekday() < 5: # Figuring out how much time it wasn't being worked on and subtracting total -= clamp(day_part(start) - workday_start, zero, workday) if end.weekday() < 5: # Figuring out how much time it wasn't being worked on and subtracting total -= clamp(workday_end - day_part(end), zero, workday) cycle_time = timedelta_total_seconds(total) / timedelta_total_seconds(workday) return cycle_time
[ "def", "working_cycletime", "(", "start", ",", "end", ",", "workday_start", "=", "datetime", ".", "timedelta", "(", "hours", "=", "0", ")", ",", "workday_end", "=", "datetime", ".", "timedelta", "(", "hours", "=", "24", ")", ")", ":", "def", "clamp", "(", "t", ",", "start", ",", "end", ")", ":", "\"Return 't' clamped to the range ['start', 'end']\"", "return", "max", "(", "start", ",", "min", "(", "end", ",", "t", ")", ")", "def", "day_part", "(", "t", ")", ":", "\"Return timedelta between midnight and 't'.\"", "return", "t", "-", "t", ".", "replace", "(", "hour", "=", "0", ",", "minute", "=", "0", ",", "second", "=", "0", ")", "if", "not", "start", ":", "return", "None", "if", "not", "end", ":", "end", "=", "datetime", ".", "datetime", ".", "now", "(", ")", "zero", "=", "datetime", ".", "timedelta", "(", "0", ")", "# Make sure that the work day is valid", "assert", "(", "zero", "<=", "workday_start", "<=", "workday_end", "<=", "datetime", ".", "timedelta", "(", "1", ")", ")", "# Get the workday delta", "workday", "=", "workday_end", "-", "workday_start", "# Get the number of days it took", "days", "=", "(", "end", "-", "start", ")", ".", "days", "+", "1", "# Number of weeks", "weeks", "=", "days", "//", "7", "# Get the number of days in addition to weeks", "extra", "=", "(", "max", "(", "0", ",", "5", "-", "start", ".", "weekday", "(", ")", ")", "+", "min", "(", "5", ",", "1", "+", "end", ".", "weekday", "(", ")", ")", ")", "%", "5", "# Get the number of working days", "weekdays", "=", "weeks", "*", "5", "+", "extra", "# Get the total time spent accounting for the workday", "total", "=", "workday", "*", "weekdays", "if", "start", ".", "weekday", "(", ")", "<", "5", ":", "# Figuring out how much time it wasn't being worked on and subtracting", "total", "-=", "clamp", "(", "day_part", "(", "start", ")", "-", "workday_start", ",", "zero", ",", "workday", ")", "if", "end", ".", "weekday", "(", ")", "<", "5", ":", "# Figuring out how much time it wasn't being worked on and subtracting", "total", "-=", "clamp", "(", "workday_end", "-", "day_part", "(", "end", ")", ",", "zero", ",", "workday", ")", "cycle_time", "=", "timedelta_total_seconds", "(", "total", ")", "/", "timedelta_total_seconds", "(", "workday", ")", "return", "cycle_time" ]
Get the working time between a beginning and an end point subtracting out non-office time
[ "Get", "the", "working", "time", "between", "a", "beginning", "and", "an", "end", "point", "subtracting", "out", "non", "-", "office", "time" ]
python
train
39.439024
pandas-dev/pandas
pandas/plotting/_misc.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/plotting/_misc.py#L270-L356
def andrews_curves(frame, class_column, ax=None, samples=200, color=None, colormap=None, **kwds): """ Generate a matplotlib plot of Andrews curves, for visualising clusters of multivariate data. Andrews curves have the functional form: f(t) = x_1/sqrt(2) + x_2 sin(t) + x_3 cos(t) + x_4 sin(2t) + x_5 cos(2t) + ... Where x coefficients correspond to the values of each dimension and t is linearly spaced between -pi and +pi. Each row of frame then corresponds to a single curve. Parameters ---------- frame : DataFrame Data to be plotted, preferably normalized to (0.0, 1.0) class_column : Name of the column containing class names ax : matplotlib axes object, default None samples : Number of points to plot in each curve color : list or tuple, optional Colors to use for the different classes colormap : str or matplotlib colormap object, default None Colormap to select colors from. If string, load colormap with that name from matplotlib. kwds : keywords Options to pass to matplotlib plotting method Returns ------- class:`matplotlip.axis.Axes` """ from math import sqrt, pi import matplotlib.pyplot as plt def function(amplitudes): def f(t): x1 = amplitudes[0] result = x1 / sqrt(2.0) # Take the rest of the coefficients and resize them # appropriately. Take a copy of amplitudes as otherwise numpy # deletes the element from amplitudes itself. coeffs = np.delete(np.copy(amplitudes), 0) coeffs.resize(int((coeffs.size + 1) / 2), 2) # Generate the harmonics and arguments for the sin and cos # functions. harmonics = np.arange(0, coeffs.shape[0]) + 1 trig_args = np.outer(harmonics, t) result += np.sum(coeffs[:, 0, np.newaxis] * np.sin(trig_args) + coeffs[:, 1, np.newaxis] * np.cos(trig_args), axis=0) return result return f n = len(frame) class_col = frame[class_column] classes = frame[class_column].drop_duplicates() df = frame.drop(class_column, axis=1) t = np.linspace(-pi, pi, samples) used_legends = set() color_values = _get_standard_colors(num_colors=len(classes), colormap=colormap, color_type='random', color=color) colors = dict(zip(classes, color_values)) if ax is None: ax = plt.gca(xlim=(-pi, pi)) for i in range(n): row = df.iloc[i].values f = function(row) y = f(t) kls = class_col.iat[i] label = pprint_thing(kls) if label not in used_legends: used_legends.add(label) ax.plot(t, y, color=colors[kls], label=label, **kwds) else: ax.plot(t, y, color=colors[kls], **kwds) ax.legend(loc='upper right') ax.grid() return ax
[ "def", "andrews_curves", "(", "frame", ",", "class_column", ",", "ax", "=", "None", ",", "samples", "=", "200", ",", "color", "=", "None", ",", "colormap", "=", "None", ",", "*", "*", "kwds", ")", ":", "from", "math", "import", "sqrt", ",", "pi", "import", "matplotlib", ".", "pyplot", "as", "plt", "def", "function", "(", "amplitudes", ")", ":", "def", "f", "(", "t", ")", ":", "x1", "=", "amplitudes", "[", "0", "]", "result", "=", "x1", "/", "sqrt", "(", "2.0", ")", "# Take the rest of the coefficients and resize them", "# appropriately. Take a copy of amplitudes as otherwise numpy", "# deletes the element from amplitudes itself.", "coeffs", "=", "np", ".", "delete", "(", "np", ".", "copy", "(", "amplitudes", ")", ",", "0", ")", "coeffs", ".", "resize", "(", "int", "(", "(", "coeffs", ".", "size", "+", "1", ")", "/", "2", ")", ",", "2", ")", "# Generate the harmonics and arguments for the sin and cos", "# functions.", "harmonics", "=", "np", ".", "arange", "(", "0", ",", "coeffs", ".", "shape", "[", "0", "]", ")", "+", "1", "trig_args", "=", "np", ".", "outer", "(", "harmonics", ",", "t", ")", "result", "+=", "np", ".", "sum", "(", "coeffs", "[", ":", ",", "0", ",", "np", ".", "newaxis", "]", "*", "np", ".", "sin", "(", "trig_args", ")", "+", "coeffs", "[", ":", ",", "1", ",", "np", ".", "newaxis", "]", "*", "np", ".", "cos", "(", "trig_args", ")", ",", "axis", "=", "0", ")", "return", "result", "return", "f", "n", "=", "len", "(", "frame", ")", "class_col", "=", "frame", "[", "class_column", "]", "classes", "=", "frame", "[", "class_column", "]", ".", "drop_duplicates", "(", ")", "df", "=", "frame", ".", "drop", "(", "class_column", ",", "axis", "=", "1", ")", "t", "=", "np", ".", "linspace", "(", "-", "pi", ",", "pi", ",", "samples", ")", "used_legends", "=", "set", "(", ")", "color_values", "=", "_get_standard_colors", "(", "num_colors", "=", "len", "(", "classes", ")", ",", "colormap", "=", "colormap", ",", "color_type", "=", "'random'", ",", "color", "=", "color", ")", "colors", "=", "dict", "(", "zip", "(", "classes", ",", "color_values", ")", ")", "if", "ax", "is", "None", ":", "ax", "=", "plt", ".", "gca", "(", "xlim", "=", "(", "-", "pi", ",", "pi", ")", ")", "for", "i", "in", "range", "(", "n", ")", ":", "row", "=", "df", ".", "iloc", "[", "i", "]", ".", "values", "f", "=", "function", "(", "row", ")", "y", "=", "f", "(", "t", ")", "kls", "=", "class_col", ".", "iat", "[", "i", "]", "label", "=", "pprint_thing", "(", "kls", ")", "if", "label", "not", "in", "used_legends", ":", "used_legends", ".", "add", "(", "label", ")", "ax", ".", "plot", "(", "t", ",", "y", ",", "color", "=", "colors", "[", "kls", "]", ",", "label", "=", "label", ",", "*", "*", "kwds", ")", "else", ":", "ax", ".", "plot", "(", "t", ",", "y", ",", "color", "=", "colors", "[", "kls", "]", ",", "*", "*", "kwds", ")", "ax", ".", "legend", "(", "loc", "=", "'upper right'", ")", "ax", ".", "grid", "(", ")", "return", "ax" ]
Generate a matplotlib plot of Andrews curves, for visualising clusters of multivariate data. Andrews curves have the functional form: f(t) = x_1/sqrt(2) + x_2 sin(t) + x_3 cos(t) + x_4 sin(2t) + x_5 cos(2t) + ... Where x coefficients correspond to the values of each dimension and t is linearly spaced between -pi and +pi. Each row of frame then corresponds to a single curve. Parameters ---------- frame : DataFrame Data to be plotted, preferably normalized to (0.0, 1.0) class_column : Name of the column containing class names ax : matplotlib axes object, default None samples : Number of points to plot in each curve color : list or tuple, optional Colors to use for the different classes colormap : str or matplotlib colormap object, default None Colormap to select colors from. If string, load colormap with that name from matplotlib. kwds : keywords Options to pass to matplotlib plotting method Returns ------- class:`matplotlip.axis.Axes`
[ "Generate", "a", "matplotlib", "plot", "of", "Andrews", "curves", "for", "visualising", "clusters", "of", "multivariate", "data", "." ]
python
train
34.448276
kgori/treeCl
treeCl/distance_matrix.py
https://github.com/kgori/treeCl/blob/fed624b3db1c19cc07175ca04e3eda6905a8d305/treeCl/distance_matrix.py#L137-L144
def check_psd(matrix, tolerance=1e-6): """ A square matrix is PSD if all eigenvalues of its Hermitian part are non- negative. The Hermitian part is given by (self + M*)/2, where M* is the complex conjugate transpose of M """ hermitian = (matrix + matrix.T.conjugate()) / 2 eigenvalues = np.linalg.eigh(hermitian)[0] return (eigenvalues > -tolerance).all()
[ "def", "check_psd", "(", "matrix", ",", "tolerance", "=", "1e-6", ")", ":", "hermitian", "=", "(", "matrix", "+", "matrix", ".", "T", ".", "conjugate", "(", ")", ")", "/", "2", "eigenvalues", "=", "np", ".", "linalg", ".", "eigh", "(", "hermitian", ")", "[", "0", "]", "return", "(", "eigenvalues", ">", "-", "tolerance", ")", ".", "all", "(", ")" ]
A square matrix is PSD if all eigenvalues of its Hermitian part are non- negative. The Hermitian part is given by (self + M*)/2, where M* is the complex conjugate transpose of M
[ "A", "square", "matrix", "is", "PSD", "if", "all", "eigenvalues", "of", "its", "Hermitian", "part", "are", "non", "-", "negative", ".", "The", "Hermitian", "part", "is", "given", "by", "(", "self", "+", "M", "*", ")", "/", "2", "where", "M", "*", "is", "the", "complex", "conjugate", "transpose", "of", "M" ]
python
train
46.625
jaijuneja/PyTLDR
pytldr/summarize/lsa.py
https://github.com/jaijuneja/PyTLDR/blob/4ba2ab88dbbb1318a86bf4483264ab213e166b6b/pytldr/summarize/lsa.py#L49-L102
def summarize(self, text, topics=4, length=5, binary_matrix=True, topic_sigma_threshold=0.5): """ Implements the method of latent semantic analysis described by Steinberger and Jezek in the paper: J. Steinberger and K. Jezek (2004). Using latent semantic analysis in text summarization and summary evaluation. Proc. ISIM ’04, pp. 93–100. :param text: a string of text to be summarized, path to a text file, or URL starting with http :param topics: the number of topics/concepts covered in the input text (defines the degree of dimensionality reduction in the SVD step) :param length: the length of the output summary; either a number of sentences (e.g. 5) or a percentage of the original document (e.g. 0.5) :param binary_matrix: boolean value indicating whether the matrix of word counts should be binary (True by default) :param topic_sigma_threshold: filters out topics/concepts with a singular value less than this percentage of the largest singular value (must be between 0 and 1, 0.5 by default) :return: list of sentences for the summary """ text = self._parse_input(text) sentences, unprocessed_sentences = self._tokenizer.tokenize_sentences(text) length = self._parse_summary_length(length, len(sentences)) if length == len(sentences): return unprocessed_sentences topics = self._validate_num_topics(topics, sentences) # Generate a matrix of terms that appear in each sentence weighting = 'binary' if binary_matrix else 'frequency' sentence_matrix = self._compute_matrix(sentences, weighting=weighting) sentence_matrix = sentence_matrix.transpose() # Filter out negatives in the sparse matrix (need to do this on Vt for LSA method): sentence_matrix = sentence_matrix.multiply(sentence_matrix > 0) s, u, v = self._svd(sentence_matrix, num_concepts=topics) # Only consider topics/concepts whose singular values are half of the largest singular value if 1 <= topic_sigma_threshold < 0: raise ValueError('Parameter topic_sigma_threshold must take a value between 0 and 1') sigma_threshold = max(u) * topic_sigma_threshold u[u < sigma_threshold] = 0 # Set all other singular values to zero # Build a "length vector" containing the length (i.e. saliency) of each sentence saliency_vec = np.dot(np.square(u), np.square(v)) top_sentences = saliency_vec.argsort()[-length:][::-1] # Return the sentences in the order in which they appear in the document top_sentences.sort() return [unprocessed_sentences[i] for i in top_sentences]
[ "def", "summarize", "(", "self", ",", "text", ",", "topics", "=", "4", ",", "length", "=", "5", ",", "binary_matrix", "=", "True", ",", "topic_sigma_threshold", "=", "0.5", ")", ":", "text", "=", "self", ".", "_parse_input", "(", "text", ")", "sentences", ",", "unprocessed_sentences", "=", "self", ".", "_tokenizer", ".", "tokenize_sentences", "(", "text", ")", "length", "=", "self", ".", "_parse_summary_length", "(", "length", ",", "len", "(", "sentences", ")", ")", "if", "length", "==", "len", "(", "sentences", ")", ":", "return", "unprocessed_sentences", "topics", "=", "self", ".", "_validate_num_topics", "(", "topics", ",", "sentences", ")", "# Generate a matrix of terms that appear in each sentence", "weighting", "=", "'binary'", "if", "binary_matrix", "else", "'frequency'", "sentence_matrix", "=", "self", ".", "_compute_matrix", "(", "sentences", ",", "weighting", "=", "weighting", ")", "sentence_matrix", "=", "sentence_matrix", ".", "transpose", "(", ")", "# Filter out negatives in the sparse matrix (need to do this on Vt for LSA method):", "sentence_matrix", "=", "sentence_matrix", ".", "multiply", "(", "sentence_matrix", ">", "0", ")", "s", ",", "u", ",", "v", "=", "self", ".", "_svd", "(", "sentence_matrix", ",", "num_concepts", "=", "topics", ")", "# Only consider topics/concepts whose singular values are half of the largest singular value", "if", "1", "<=", "topic_sigma_threshold", "<", "0", ":", "raise", "ValueError", "(", "'Parameter topic_sigma_threshold must take a value between 0 and 1'", ")", "sigma_threshold", "=", "max", "(", "u", ")", "*", "topic_sigma_threshold", "u", "[", "u", "<", "sigma_threshold", "]", "=", "0", "# Set all other singular values to zero", "# Build a \"length vector\" containing the length (i.e. saliency) of each sentence", "saliency_vec", "=", "np", ".", "dot", "(", "np", ".", "square", "(", "u", ")", ",", "np", ".", "square", "(", "v", ")", ")", "top_sentences", "=", "saliency_vec", ".", "argsort", "(", ")", "[", "-", "length", ":", "]", "[", ":", ":", "-", "1", "]", "# Return the sentences in the order in which they appear in the document", "top_sentences", ".", "sort", "(", ")", "return", "[", "unprocessed_sentences", "[", "i", "]", "for", "i", "in", "top_sentences", "]" ]
Implements the method of latent semantic analysis described by Steinberger and Jezek in the paper: J. Steinberger and K. Jezek (2004). Using latent semantic analysis in text summarization and summary evaluation. Proc. ISIM ’04, pp. 93–100. :param text: a string of text to be summarized, path to a text file, or URL starting with http :param topics: the number of topics/concepts covered in the input text (defines the degree of dimensionality reduction in the SVD step) :param length: the length of the output summary; either a number of sentences (e.g. 5) or a percentage of the original document (e.g. 0.5) :param binary_matrix: boolean value indicating whether the matrix of word counts should be binary (True by default) :param topic_sigma_threshold: filters out topics/concepts with a singular value less than this percentage of the largest singular value (must be between 0 and 1, 0.5 by default) :return: list of sentences for the summary
[ "Implements", "the", "method", "of", "latent", "semantic", "analysis", "described", "by", "Steinberger", "and", "Jezek", "in", "the", "paper", ":" ]
python
train
50.222222
dfiel/greenwavereality
greenwavereality/greenwavereality.py
https://github.com/dfiel/greenwavereality/blob/cb2c0328385eb7afda910cc483266086432fb708/greenwavereality/greenwavereality.py#L99-L106
def grab_bulbs(host, token=None): """Grab XML, then add all bulbs to a dict. Removes room functionality""" xml = grab_xml(host, token) bulbs = {} for room in xml: for device in room['device']: bulbs[int(device['did'])] = device return bulbs
[ "def", "grab_bulbs", "(", "host", ",", "token", "=", "None", ")", ":", "xml", "=", "grab_xml", "(", "host", ",", "token", ")", "bulbs", "=", "{", "}", "for", "room", "in", "xml", ":", "for", "device", "in", "room", "[", "'device'", "]", ":", "bulbs", "[", "int", "(", "device", "[", "'did'", "]", ")", "]", "=", "device", "return", "bulbs" ]
Grab XML, then add all bulbs to a dict. Removes room functionality
[ "Grab", "XML", "then", "add", "all", "bulbs", "to", "a", "dict", ".", "Removes", "room", "functionality" ]
python
train
34.125
tanghaibao/jcvi
jcvi/graphics/tree.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/graphics/tree.py#L254-L338
def main(args): """ %prog newicktree Plot Newick formatted tree. The gene structure can be plotted along if --gffdir is given. The gff file needs to be `genename.gff`. If --sizes is on, also show the number of amino acids. With --barcode a mapping file can be provided to convert seq names to eg. species names, useful in unified tree display. This file should have distinctive barcodes in column1 and new names in column2, tab delimited. """ p = OptionParser(main.__doc__) p.add_option("--outgroup", help="Outgroup for rerooting the tree. " + "Use comma to separate multiple taxa.") p.add_option("--noreroot", default=False, action="store_true", help="Don't reroot the input tree [default: %default]") p.add_option("--rmargin", default=.3, type="float", help="Set blank rmargin to the right [default: %default]") p.add_option("--gffdir", default=None, help="The directory that contain GFF files [default: %default]") p.add_option("--sizes", default=None, help="The FASTA file or the sizes file [default: %default]") p.add_option("--SH", default=None, type="string", help="SH test p-value [default: %default]") p.add_option("--scutoff", default=0, type="int", help="cutoff for displaying node support, 0-100 [default: %default]") p.add_option("--barcode", default=None, help="path to seq names barcode mapping file: " "barcode<tab>new_name [default: %default]") p.add_option("--leafcolor", default="k", help="Font color for the OTUs, or path to a file " "containing color mappings: leafname<tab>color [default: %default]") p.add_option("--leaffont", default=12, help="Font size for the OTUs") p.add_option("--geoscale", default=False, action="store_true", help="Plot geological scale") opts, args, iopts = p.set_image_options(args, figsize="8x6") if len(args) != 1: sys.exit(not p.print_help()) datafile, = args outgroup = None reroot = not opts.noreroot if opts.outgroup: outgroup = opts.outgroup.split(",") if datafile == "demo": tx = """(((Os02g0681100:0.1151,Sb04g031800:0.11220)1.0:0.0537, (Os04g0578800:0.04318,Sb06g026210:0.04798)-1.0:0.08870)1.0:0.06985, ((Os03g0124100:0.08845,Sb01g048930:0.09055)1.0:0.05332, (Os10g0534700:0.06592,Sb01g030630:0.04824)-1.0:0.07886):0.09389);""" else: logging.debug("Load tree file `{0}`.".format(datafile)) tx = open(datafile).read() pf = datafile.rsplit(".", 1)[0] fig = plt.figure(1, (iopts.w, iopts.h)) root = fig.add_axes([0, 0, 1, 1]) if opts.geoscale: draw_geoscale(root) else: if op.isfile(opts.leafcolor): leafcolor = "k" leafcolorfile = opts.leafcolor else: leafcolor = opts.leafcolor leafcolorfile = None draw_tree(root, tx, rmargin=opts.rmargin, leafcolor=leafcolor, outgroup=outgroup, reroot=reroot, gffdir=opts.gffdir, sizes=opts.sizes, SH=opts.SH, scutoff=opts.scutoff, barcodefile=opts.barcode, leafcolorfile=leafcolorfile, leaffont=opts.leaffont) root.set_xlim(0, 1) root.set_ylim(0, 1) root.set_axis_off() image_name = pf + "." + iopts.format savefig(image_name, dpi=iopts.dpi, iopts=iopts)
[ "def", "main", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "main", ".", "__doc__", ")", "p", ".", "add_option", "(", "\"--outgroup\"", ",", "help", "=", "\"Outgroup for rerooting the tree. \"", "+", "\"Use comma to separate multiple taxa.\"", ")", "p", ".", "add_option", "(", "\"--noreroot\"", ",", "default", "=", "False", ",", "action", "=", "\"store_true\"", ",", "help", "=", "\"Don't reroot the input tree [default: %default]\"", ")", "p", ".", "add_option", "(", "\"--rmargin\"", ",", "default", "=", ".3", ",", "type", "=", "\"float\"", ",", "help", "=", "\"Set blank rmargin to the right [default: %default]\"", ")", "p", ".", "add_option", "(", "\"--gffdir\"", ",", "default", "=", "None", ",", "help", "=", "\"The directory that contain GFF files [default: %default]\"", ")", "p", ".", "add_option", "(", "\"--sizes\"", ",", "default", "=", "None", ",", "help", "=", "\"The FASTA file or the sizes file [default: %default]\"", ")", "p", ".", "add_option", "(", "\"--SH\"", ",", "default", "=", "None", ",", "type", "=", "\"string\"", ",", "help", "=", "\"SH test p-value [default: %default]\"", ")", "p", ".", "add_option", "(", "\"--scutoff\"", ",", "default", "=", "0", ",", "type", "=", "\"int\"", ",", "help", "=", "\"cutoff for displaying node support, 0-100 [default: %default]\"", ")", "p", ".", "add_option", "(", "\"--barcode\"", ",", "default", "=", "None", ",", "help", "=", "\"path to seq names barcode mapping file: \"", "\"barcode<tab>new_name [default: %default]\"", ")", "p", ".", "add_option", "(", "\"--leafcolor\"", ",", "default", "=", "\"k\"", ",", "help", "=", "\"Font color for the OTUs, or path to a file \"", "\"containing color mappings: leafname<tab>color [default: %default]\"", ")", "p", ".", "add_option", "(", "\"--leaffont\"", ",", "default", "=", "12", ",", "help", "=", "\"Font size for the OTUs\"", ")", "p", ".", "add_option", "(", "\"--geoscale\"", ",", "default", "=", "False", ",", "action", "=", "\"store_true\"", ",", "help", "=", "\"Plot geological scale\"", ")", "opts", ",", "args", ",", "iopts", "=", "p", ".", "set_image_options", "(", "args", ",", "figsize", "=", "\"8x6\"", ")", "if", "len", "(", "args", ")", "!=", "1", ":", "sys", ".", "exit", "(", "not", "p", ".", "print_help", "(", ")", ")", "datafile", ",", "=", "args", "outgroup", "=", "None", "reroot", "=", "not", "opts", ".", "noreroot", "if", "opts", ".", "outgroup", ":", "outgroup", "=", "opts", ".", "outgroup", ".", "split", "(", "\",\"", ")", "if", "datafile", "==", "\"demo\"", ":", "tx", "=", "\"\"\"(((Os02g0681100:0.1151,Sb04g031800:0.11220)1.0:0.0537,\n (Os04g0578800:0.04318,Sb06g026210:0.04798)-1.0:0.08870)1.0:0.06985,\n ((Os03g0124100:0.08845,Sb01g048930:0.09055)1.0:0.05332,\n (Os10g0534700:0.06592,Sb01g030630:0.04824)-1.0:0.07886):0.09389);\"\"\"", "else", ":", "logging", ".", "debug", "(", "\"Load tree file `{0}`.\"", ".", "format", "(", "datafile", ")", ")", "tx", "=", "open", "(", "datafile", ")", ".", "read", "(", ")", "pf", "=", "datafile", ".", "rsplit", "(", "\".\"", ",", "1", ")", "[", "0", "]", "fig", "=", "plt", ".", "figure", "(", "1", ",", "(", "iopts", ".", "w", ",", "iopts", ".", "h", ")", ")", "root", "=", "fig", ".", "add_axes", "(", "[", "0", ",", "0", ",", "1", ",", "1", "]", ")", "if", "opts", ".", "geoscale", ":", "draw_geoscale", "(", "root", ")", "else", ":", "if", "op", ".", "isfile", "(", "opts", ".", "leafcolor", ")", ":", "leafcolor", "=", "\"k\"", "leafcolorfile", "=", "opts", ".", "leafcolor", "else", ":", "leafcolor", "=", "opts", ".", "leafcolor", "leafcolorfile", "=", "None", "draw_tree", "(", "root", ",", "tx", ",", "rmargin", "=", "opts", ".", "rmargin", ",", "leafcolor", "=", "leafcolor", ",", "outgroup", "=", "outgroup", ",", "reroot", "=", "reroot", ",", "gffdir", "=", "opts", ".", "gffdir", ",", "sizes", "=", "opts", ".", "sizes", ",", "SH", "=", "opts", ".", "SH", ",", "scutoff", "=", "opts", ".", "scutoff", ",", "barcodefile", "=", "opts", ".", "barcode", ",", "leafcolorfile", "=", "leafcolorfile", ",", "leaffont", "=", "opts", ".", "leaffont", ")", "root", ".", "set_xlim", "(", "0", ",", "1", ")", "root", ".", "set_ylim", "(", "0", ",", "1", ")", "root", ".", "set_axis_off", "(", ")", "image_name", "=", "pf", "+", "\".\"", "+", "iopts", ".", "format", "savefig", "(", "image_name", ",", "dpi", "=", "iopts", ".", "dpi", ",", "iopts", "=", "iopts", ")" ]
%prog newicktree Plot Newick formatted tree. The gene structure can be plotted along if --gffdir is given. The gff file needs to be `genename.gff`. If --sizes is on, also show the number of amino acids. With --barcode a mapping file can be provided to convert seq names to eg. species names, useful in unified tree display. This file should have distinctive barcodes in column1 and new names in column2, tab delimited.
[ "%prog", "newicktree" ]
python
train
40.647059
QUANTAXIS/QUANTAXIS
QUANTAXIS/QAIndicator/indicators.py
https://github.com/QUANTAXIS/QUANTAXIS/blob/bb1fe424e4108b62a1f712b81a05cf829297a5c0/QUANTAXIS/QAIndicator/indicators.py#L456-L464
def QA_indicator_BOLL(DataFrame, N=20, P=2): '布林线' C = DataFrame['close'] boll = MA(C, N) UB = boll + P * STD(C, N) LB = boll - P * STD(C, N) DICT = {'BOLL': boll, 'UB': UB, 'LB': LB} return pd.DataFrame(DICT)
[ "def", "QA_indicator_BOLL", "(", "DataFrame", ",", "N", "=", "20", ",", "P", "=", "2", ")", ":", "C", "=", "DataFrame", "[", "'close'", "]", "boll", "=", "MA", "(", "C", ",", "N", ")", "UB", "=", "boll", "+", "P", "*", "STD", "(", "C", ",", "N", ")", "LB", "=", "boll", "-", "P", "*", "STD", "(", "C", ",", "N", ")", "DICT", "=", "{", "'BOLL'", ":", "boll", ",", "'UB'", ":", "UB", ",", "'LB'", ":", "LB", "}", "return", "pd", ".", "DataFrame", "(", "DICT", ")" ]
布林线
[ "布林线" ]
python
train
25.555556
Microsoft/azure-devops-python-api
azure-devops/azure/devops/v5_0/project_analysis/project_analysis_client.py
https://github.com/Microsoft/azure-devops-python-api/blob/4777ffda2f5052fabbaddb2abe9cb434e0cf1aa8/azure-devops/azure/devops/v5_0/project_analysis/project_analysis_client.py#L66-L93
def get_git_repositories_activity_metrics(self, project, from_date, aggregation_type, skip, top): """GetGitRepositoriesActivityMetrics. [Preview API] Retrieves git activity metrics for repositories matching a specified criteria. :param str project: Project ID or project name :param datetime from_date: Date from which, the trends are to be fetched. :param str aggregation_type: Bucket size on which, trends are to be aggregated. :param int skip: The number of repositories to ignore. :param int top: The number of repositories for which activity metrics are to be retrieved. :rtype: [RepositoryActivityMetrics] """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') query_parameters = {} if from_date is not None: query_parameters['fromDate'] = self._serialize.query('from_date', from_date, 'iso-8601') if aggregation_type is not None: query_parameters['aggregationType'] = self._serialize.query('aggregation_type', aggregation_type, 'str') if skip is not None: query_parameters['$skip'] = self._serialize.query('skip', skip, 'int') if top is not None: query_parameters['$top'] = self._serialize.query('top', top, 'int') response = self._send(http_method='GET', location_id='df7fbbca-630a-40e3-8aa3-7a3faf66947e', version='5.0-preview.1', route_values=route_values, query_parameters=query_parameters) return self._deserialize('[RepositoryActivityMetrics]', self._unwrap_collection(response))
[ "def", "get_git_repositories_activity_metrics", "(", "self", ",", "project", ",", "from_date", ",", "aggregation_type", ",", "skip", ",", "top", ")", ":", "route_values", "=", "{", "}", "if", "project", "is", "not", "None", ":", "route_values", "[", "'project'", "]", "=", "self", ".", "_serialize", ".", "url", "(", "'project'", ",", "project", ",", "'str'", ")", "query_parameters", "=", "{", "}", "if", "from_date", "is", "not", "None", ":", "query_parameters", "[", "'fromDate'", "]", "=", "self", ".", "_serialize", ".", "query", "(", "'from_date'", ",", "from_date", ",", "'iso-8601'", ")", "if", "aggregation_type", "is", "not", "None", ":", "query_parameters", "[", "'aggregationType'", "]", "=", "self", ".", "_serialize", ".", "query", "(", "'aggregation_type'", ",", "aggregation_type", ",", "'str'", ")", "if", "skip", "is", "not", "None", ":", "query_parameters", "[", "'$skip'", "]", "=", "self", ".", "_serialize", ".", "query", "(", "'skip'", ",", "skip", ",", "'int'", ")", "if", "top", "is", "not", "None", ":", "query_parameters", "[", "'$top'", "]", "=", "self", ".", "_serialize", ".", "query", "(", "'top'", ",", "top", ",", "'int'", ")", "response", "=", "self", ".", "_send", "(", "http_method", "=", "'GET'", ",", "location_id", "=", "'df7fbbca-630a-40e3-8aa3-7a3faf66947e'", ",", "version", "=", "'5.0-preview.1'", ",", "route_values", "=", "route_values", ",", "query_parameters", "=", "query_parameters", ")", "return", "self", ".", "_deserialize", "(", "'[RepositoryActivityMetrics]'", ",", "self", ".", "_unwrap_collection", "(", "response", ")", ")" ]
GetGitRepositoriesActivityMetrics. [Preview API] Retrieves git activity metrics for repositories matching a specified criteria. :param str project: Project ID or project name :param datetime from_date: Date from which, the trends are to be fetched. :param str aggregation_type: Bucket size on which, trends are to be aggregated. :param int skip: The number of repositories to ignore. :param int top: The number of repositories for which activity metrics are to be retrieved. :rtype: [RepositoryActivityMetrics]
[ "GetGitRepositoriesActivityMetrics", ".", "[", "Preview", "API", "]", "Retrieves", "git", "activity", "metrics", "for", "repositories", "matching", "a", "specified", "criteria", ".", ":", "param", "str", "project", ":", "Project", "ID", "or", "project", "name", ":", "param", "datetime", "from_date", ":", "Date", "from", "which", "the", "trends", "are", "to", "be", "fetched", ".", ":", "param", "str", "aggregation_type", ":", "Bucket", "size", "on", "which", "trends", "are", "to", "be", "aggregated", ".", ":", "param", "int", "skip", ":", "The", "number", "of", "repositories", "to", "ignore", ".", ":", "param", "int", "top", ":", "The", "number", "of", "repositories", "for", "which", "activity", "metrics", "are", "to", "be", "retrieved", ".", ":", "rtype", ":", "[", "RepositoryActivityMetrics", "]" ]
python
train
62.607143
pyviz/holoviews
holoviews/element/path.py
https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/element/path.py#L329-L344
def clone(self, *args, **overrides): """ Returns a clone of the object with matching parameter values containing the specified args and kwargs. """ link = overrides.pop('link', True) settings = dict(self.get_param_values(), **overrides) if 'id' not in settings: settings['id'] = self.id if not args and link: settings['plot_id'] = self._plot_id pos_args = getattr(self, '_' + type(self).__name__ + '__pos_params', []) return self.__class__(*(settings[n] for n in pos_args), **{k:v for k,v in settings.items() if k not in pos_args})
[ "def", "clone", "(", "self", ",", "*", "args", ",", "*", "*", "overrides", ")", ":", "link", "=", "overrides", ".", "pop", "(", "'link'", ",", "True", ")", "settings", "=", "dict", "(", "self", ".", "get_param_values", "(", ")", ",", "*", "*", "overrides", ")", "if", "'id'", "not", "in", "settings", ":", "settings", "[", "'id'", "]", "=", "self", ".", "id", "if", "not", "args", "and", "link", ":", "settings", "[", "'plot_id'", "]", "=", "self", ".", "_plot_id", "pos_args", "=", "getattr", "(", "self", ",", "'_'", "+", "type", "(", "self", ")", ".", "__name__", "+", "'__pos_params'", ",", "[", "]", ")", "return", "self", ".", "__class__", "(", "*", "(", "settings", "[", "n", "]", "for", "n", "in", "pos_args", ")", ",", "*", "*", "{", "k", ":", "v", "for", "k", ",", "v", "in", "settings", ".", "items", "(", ")", "if", "k", "not", "in", "pos_args", "}", ")" ]
Returns a clone of the object with matching parameter values containing the specified args and kwargs.
[ "Returns", "a", "clone", "of", "the", "object", "with", "matching", "parameter", "values", "containing", "the", "specified", "args", "and", "kwargs", "." ]
python
train
42.75
apple/turicreate
src/unity/python/turicreate/toolkits/recommender/util.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/recommender/util.py#L1763-L1772
def _get_popularity_baseline(self): """ Returns a new popularity model matching the data set this model was trained with. Can be used for comparison purposes. """ response = self.__proxy__.get_popularity_baseline() from .popularity_recommender import PopularityRecommender return PopularityRecommender(response)
[ "def", "_get_popularity_baseline", "(", "self", ")", ":", "response", "=", "self", ".", "__proxy__", ".", "get_popularity_baseline", "(", ")", "from", ".", "popularity_recommender", "import", "PopularityRecommender", "return", "PopularityRecommender", "(", "response", ")" ]
Returns a new popularity model matching the data set this model was trained with. Can be used for comparison purposes.
[ "Returns", "a", "new", "popularity", "model", "matching", "the", "data", "set", "this", "model", "was", "trained", "with", ".", "Can", "be", "used", "for", "comparison", "purposes", "." ]
python
train
36.1
bitesofcode/projexui
projexui/widgets/xcombobox.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xcombobox.py#L373-L393
def setCheckedItems(self, items): """ Returns the checked items for this combobox. :return items | [<str>, ..] """ if not self.isCheckable(): return model = self.model() for i in range(self.count()): item_text = self.itemText(i) if not item_text: continue if nativestring(item_text) in items: state = Qt.Checked else: state = Qt.Unchecked model.item(i).setCheckState(state)
[ "def", "setCheckedItems", "(", "self", ",", "items", ")", ":", "if", "not", "self", ".", "isCheckable", "(", ")", ":", "return", "model", "=", "self", ".", "model", "(", ")", "for", "i", "in", "range", "(", "self", ".", "count", "(", ")", ")", ":", "item_text", "=", "self", ".", "itemText", "(", "i", ")", "if", "not", "item_text", ":", "continue", "if", "nativestring", "(", "item_text", ")", "in", "items", ":", "state", "=", "Qt", ".", "Checked", "else", ":", "state", "=", "Qt", ".", "Unchecked", "model", ".", "item", "(", "i", ")", ".", "setCheckState", "(", "state", ")" ]
Returns the checked items for this combobox. :return items | [<str>, ..]
[ "Returns", "the", "checked", "items", "for", "this", "combobox", ".", ":", "return", "items", "|", "[", "<str", ">", "..", "]" ]
python
train
27.47619
paylogic/pip-accel
pip_accel/bdist.py
https://github.com/paylogic/pip-accel/blob/ccad1b784927a322d996db593403b1d2d2e22666/pip_accel/bdist.py#L59-L122
def get_binary_dist(self, requirement): """ Get or create a cached binary distribution archive. :param requirement: A :class:`.Requirement` object. :returns: An iterable of tuples with two values each: A :class:`tarfile.TarInfo` object and a file-like object. Gets the cached binary distribution that was previously built for the given requirement. If no binary distribution has been cached yet, a new binary distribution is built and added to the cache. Uses :func:`build_binary_dist()` to build binary distribution archives. If this fails with a build error :func:`get_binary_dist()` will use :class:`.SystemPackageManager` to check for and install missing system packages and retry the build when missing system packages were installed. """ cache_file = self.cache.get(requirement) if cache_file: if self.needs_invalidation(requirement, cache_file): logger.info("Invalidating old %s binary (source has changed) ..", requirement) cache_file = None else: logger.debug("%s hasn't been cached yet, doing so now.", requirement) if not cache_file: # Build the binary distribution. try: raw_file = self.build_binary_dist(requirement) except BuildFailed: logger.warning("Build of %s failed, checking for missing dependencies ..", requirement) if self.system_package_manager.install_dependencies(requirement): raw_file = self.build_binary_dist(requirement) else: raise # Transform the binary distribution archive into a form that we can re-use. fd, transformed_file = tempfile.mkstemp(prefix='pip-accel-bdist-', suffix='.tar.gz') try: archive = tarfile.open(transformed_file, 'w:gz') try: for member, from_handle in self.transform_binary_dist(raw_file): archive.addfile(member, from_handle) finally: archive.close() # Push the binary distribution archive to all available backends. with open(transformed_file, 'rb') as handle: self.cache.put(requirement, handle) finally: # Close file descriptor before removing the temporary file. # Without closing Windows is complaining that the file cannot # be removed because it is used by another process. os.close(fd) # Cleanup the temporary file. os.remove(transformed_file) # Get the absolute pathname of the file in the local cache. cache_file = self.cache.get(requirement) # Enable checksum based cache invalidation. self.persist_checksum(requirement, cache_file) archive = tarfile.open(cache_file, 'r:gz') try: for member in archive.getmembers(): yield member, archive.extractfile(member.name) finally: archive.close()
[ "def", "get_binary_dist", "(", "self", ",", "requirement", ")", ":", "cache_file", "=", "self", ".", "cache", ".", "get", "(", "requirement", ")", "if", "cache_file", ":", "if", "self", ".", "needs_invalidation", "(", "requirement", ",", "cache_file", ")", ":", "logger", ".", "info", "(", "\"Invalidating old %s binary (source has changed) ..\"", ",", "requirement", ")", "cache_file", "=", "None", "else", ":", "logger", ".", "debug", "(", "\"%s hasn't been cached yet, doing so now.\"", ",", "requirement", ")", "if", "not", "cache_file", ":", "# Build the binary distribution.", "try", ":", "raw_file", "=", "self", ".", "build_binary_dist", "(", "requirement", ")", "except", "BuildFailed", ":", "logger", ".", "warning", "(", "\"Build of %s failed, checking for missing dependencies ..\"", ",", "requirement", ")", "if", "self", ".", "system_package_manager", ".", "install_dependencies", "(", "requirement", ")", ":", "raw_file", "=", "self", ".", "build_binary_dist", "(", "requirement", ")", "else", ":", "raise", "# Transform the binary distribution archive into a form that we can re-use.", "fd", ",", "transformed_file", "=", "tempfile", ".", "mkstemp", "(", "prefix", "=", "'pip-accel-bdist-'", ",", "suffix", "=", "'.tar.gz'", ")", "try", ":", "archive", "=", "tarfile", ".", "open", "(", "transformed_file", ",", "'w:gz'", ")", "try", ":", "for", "member", ",", "from_handle", "in", "self", ".", "transform_binary_dist", "(", "raw_file", ")", ":", "archive", ".", "addfile", "(", "member", ",", "from_handle", ")", "finally", ":", "archive", ".", "close", "(", ")", "# Push the binary distribution archive to all available backends.", "with", "open", "(", "transformed_file", ",", "'rb'", ")", "as", "handle", ":", "self", ".", "cache", ".", "put", "(", "requirement", ",", "handle", ")", "finally", ":", "# Close file descriptor before removing the temporary file.", "# Without closing Windows is complaining that the file cannot", "# be removed because it is used by another process.", "os", ".", "close", "(", "fd", ")", "# Cleanup the temporary file.", "os", ".", "remove", "(", "transformed_file", ")", "# Get the absolute pathname of the file in the local cache.", "cache_file", "=", "self", ".", "cache", ".", "get", "(", "requirement", ")", "# Enable checksum based cache invalidation.", "self", ".", "persist_checksum", "(", "requirement", ",", "cache_file", ")", "archive", "=", "tarfile", ".", "open", "(", "cache_file", ",", "'r:gz'", ")", "try", ":", "for", "member", "in", "archive", ".", "getmembers", "(", ")", ":", "yield", "member", ",", "archive", ".", "extractfile", "(", "member", ".", "name", ")", "finally", ":", "archive", ".", "close", "(", ")" ]
Get or create a cached binary distribution archive. :param requirement: A :class:`.Requirement` object. :returns: An iterable of tuples with two values each: A :class:`tarfile.TarInfo` object and a file-like object. Gets the cached binary distribution that was previously built for the given requirement. If no binary distribution has been cached yet, a new binary distribution is built and added to the cache. Uses :func:`build_binary_dist()` to build binary distribution archives. If this fails with a build error :func:`get_binary_dist()` will use :class:`.SystemPackageManager` to check for and install missing system packages and retry the build when missing system packages were installed.
[ "Get", "or", "create", "a", "cached", "binary", "distribution", "archive", "." ]
python
train
49.5
useblocks/sphinxcontrib-needs
sphinxcontrib/needs/utils.py
https://github.com/useblocks/sphinxcontrib-needs/blob/f49af4859a74e9fe76de5b9133c01335ac6ae191/sphinxcontrib/needs/utils.py#L18-L78
def row_col_maker(app, fromdocname, all_needs, need_info, need_key, make_ref=False, ref_lookup=False, prefix=''): """ Creates and returns a column. :param app: current sphinx app :param fromdocname: current document :param all_needs: Dictionary of all need objects :param need_info: need_info object, which stores all related need data :param need_key: The key to access the needed data from need_info :param make_ref: If true, creates a reference for the given data in need_key :param ref_lookup: If true, it uses the data to lookup for a related need and uses its data to create the reference :param prefix: string, which is used as prefix for the text output :return: column object (nodes.entry) """ row_col = nodes.entry() para_col = nodes.paragraph() if need_key in need_info and need_info[need_key] is not None: if not isinstance(need_info[need_key], (list, set)): data = [need_info[need_key]] else: data = need_info[need_key] for index, datum in enumerate(data): link_id = datum link_part = None if need_key in ['links', 'back_links']: if '.' in datum: link_id = datum.split('.')[0] link_part = datum.split('.')[1] datum_text = prefix + datum text_col = nodes.Text(datum_text, datum_text) if make_ref or ref_lookup: try: ref_col = nodes.reference("", "") if not ref_lookup: ref_col['refuri'] = app.builder.get_relative_uri(fromdocname, need_info['docname']) ref_col['refuri'] += "#" + datum else: temp_need = all_needs[link_id] ref_col['refuri'] = app.builder.get_relative_uri(fromdocname, temp_need['docname']) ref_col['refuri'] += "#" + temp_need["id"] if link_part is not None: ref_col['refuri'] += '.' + link_part except KeyError: para_col += text_col else: ref_col.append(text_col) para_col += ref_col else: para_col += text_col if index + 1 < len(data): para_col += nodes.emphasis("; ", "; ") row_col += para_col return row_col
[ "def", "row_col_maker", "(", "app", ",", "fromdocname", ",", "all_needs", ",", "need_info", ",", "need_key", ",", "make_ref", "=", "False", ",", "ref_lookup", "=", "False", ",", "prefix", "=", "''", ")", ":", "row_col", "=", "nodes", ".", "entry", "(", ")", "para_col", "=", "nodes", ".", "paragraph", "(", ")", "if", "need_key", "in", "need_info", "and", "need_info", "[", "need_key", "]", "is", "not", "None", ":", "if", "not", "isinstance", "(", "need_info", "[", "need_key", "]", ",", "(", "list", ",", "set", ")", ")", ":", "data", "=", "[", "need_info", "[", "need_key", "]", "]", "else", ":", "data", "=", "need_info", "[", "need_key", "]", "for", "index", ",", "datum", "in", "enumerate", "(", "data", ")", ":", "link_id", "=", "datum", "link_part", "=", "None", "if", "need_key", "in", "[", "'links'", ",", "'back_links'", "]", ":", "if", "'.'", "in", "datum", ":", "link_id", "=", "datum", ".", "split", "(", "'.'", ")", "[", "0", "]", "link_part", "=", "datum", ".", "split", "(", "'.'", ")", "[", "1", "]", "datum_text", "=", "prefix", "+", "datum", "text_col", "=", "nodes", ".", "Text", "(", "datum_text", ",", "datum_text", ")", "if", "make_ref", "or", "ref_lookup", ":", "try", ":", "ref_col", "=", "nodes", ".", "reference", "(", "\"\"", ",", "\"\"", ")", "if", "not", "ref_lookup", ":", "ref_col", "[", "'refuri'", "]", "=", "app", ".", "builder", ".", "get_relative_uri", "(", "fromdocname", ",", "need_info", "[", "'docname'", "]", ")", "ref_col", "[", "'refuri'", "]", "+=", "\"#\"", "+", "datum", "else", ":", "temp_need", "=", "all_needs", "[", "link_id", "]", "ref_col", "[", "'refuri'", "]", "=", "app", ".", "builder", ".", "get_relative_uri", "(", "fromdocname", ",", "temp_need", "[", "'docname'", "]", ")", "ref_col", "[", "'refuri'", "]", "+=", "\"#\"", "+", "temp_need", "[", "\"id\"", "]", "if", "link_part", "is", "not", "None", ":", "ref_col", "[", "'refuri'", "]", "+=", "'.'", "+", "link_part", "except", "KeyError", ":", "para_col", "+=", "text_col", "else", ":", "ref_col", ".", "append", "(", "text_col", ")", "para_col", "+=", "ref_col", "else", ":", "para_col", "+=", "text_col", "if", "index", "+", "1", "<", "len", "(", "data", ")", ":", "para_col", "+=", "nodes", ".", "emphasis", "(", "\"; \"", ",", "\"; \"", ")", "row_col", "+=", "para_col", "return", "row_col" ]
Creates and returns a column. :param app: current sphinx app :param fromdocname: current document :param all_needs: Dictionary of all need objects :param need_info: need_info object, which stores all related need data :param need_key: The key to access the needed data from need_info :param make_ref: If true, creates a reference for the given data in need_key :param ref_lookup: If true, it uses the data to lookup for a related need and uses its data to create the reference :param prefix: string, which is used as prefix for the text output :return: column object (nodes.entry)
[ "Creates", "and", "returns", "a", "column", "." ]
python
train
39.819672
gwastro/pycbc
pycbc/types/timeseries.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/types/timeseries.py#L201-L206
def at_time(self, time, nearest_sample=False): """ Return the value at the specified gps time """ if nearest_sample: time += self.delta_t / 2.0 return self[int((time-self.start_time)*self.sample_rate)]
[ "def", "at_time", "(", "self", ",", "time", ",", "nearest_sample", "=", "False", ")", ":", "if", "nearest_sample", ":", "time", "+=", "self", ".", "delta_t", "/", "2.0", "return", "self", "[", "int", "(", "(", "time", "-", "self", ".", "start_time", ")", "*", "self", ".", "sample_rate", ")", "]" ]
Return the value at the specified gps time
[ "Return", "the", "value", "at", "the", "specified", "gps", "time" ]
python
train
40
anjishnu/ask-alexa-pykit
ask/alexa_io.py
https://github.com/anjishnu/ask-alexa-pykit/blob/a47c278ca7a60532bbe1a9b789f6c37e609fea8b/ask/alexa_io.py#L195-L213
def route_request(self, request_json, metadata=None): ''' Route the request object to the right handler function ''' request = Request(request_json) request.metadata = metadata # add reprompt handler or some such for default? handler_fn = self._handlers[self._default] # Set default handling for noisy requests if not request.is_intent() and (request.request_type() in self._handlers): ''' Route request to a non intent handler ''' handler_fn = self._handlers[request.request_type()] elif request.is_intent() and request.intent_name() in self._handlers['IntentRequest']: ''' Route to right intent handler ''' handler_fn = self._handlers['IntentRequest'][request.intent_name()] response = handler_fn(request) response.set_session(request.session) return response.to_json()
[ "def", "route_request", "(", "self", ",", "request_json", ",", "metadata", "=", "None", ")", ":", "request", "=", "Request", "(", "request_json", ")", "request", ".", "metadata", "=", "metadata", "# add reprompt handler or some such for default?", "handler_fn", "=", "self", ".", "_handlers", "[", "self", ".", "_default", "]", "# Set default handling for noisy requests", "if", "not", "request", ".", "is_intent", "(", ")", "and", "(", "request", ".", "request_type", "(", ")", "in", "self", ".", "_handlers", ")", ":", "''' Route request to a non intent handler '''", "handler_fn", "=", "self", ".", "_handlers", "[", "request", ".", "request_type", "(", ")", "]", "elif", "request", ".", "is_intent", "(", ")", "and", "request", ".", "intent_name", "(", ")", "in", "self", ".", "_handlers", "[", "'IntentRequest'", "]", ":", "''' Route to right intent handler '''", "handler_fn", "=", "self", ".", "_handlers", "[", "'IntentRequest'", "]", "[", "request", ".", "intent_name", "(", ")", "]", "response", "=", "handler_fn", "(", "request", ")", "response", ".", "set_session", "(", "request", ".", "session", ")", "return", "response", ".", "to_json", "(", ")" ]
Route the request object to the right handler function
[ "Route", "the", "request", "object", "to", "the", "right", "handler", "function" ]
python
train
46.631579
Contraz/demosys-py
demosys/management/__init__.py
https://github.com/Contraz/demosys-py/blob/6466128a3029c4d09631420ccce73024025bd5b6/demosys/management/__init__.py#L9-L19
def find_commands(command_dir: str) -> List[str]: """ Get all command names in the a folder :return: List of commands names """ if not command_dir: return [] return [name for _, name, is_pkg in pkgutil.iter_modules([command_dir]) if not is_pkg and not name.startswith('_')]
[ "def", "find_commands", "(", "command_dir", ":", "str", ")", "->", "List", "[", "str", "]", ":", "if", "not", "command_dir", ":", "return", "[", "]", "return", "[", "name", "for", "_", ",", "name", ",", "is_pkg", "in", "pkgutil", ".", "iter_modules", "(", "[", "command_dir", "]", ")", "if", "not", "is_pkg", "and", "not", "name", ".", "startswith", "(", "'_'", ")", "]" ]
Get all command names in the a folder :return: List of commands names
[ "Get", "all", "command", "names", "in", "the", "a", "folder" ]
python
valid
28.090909
yamins81/tabular
tabular/io.py
https://github.com/yamins81/tabular/blob/1caf091c8c395960a9ad7078f95158b533cc52dd/tabular/io.py#L1618-L1692
def loadbinary(fname): """ Load a numpy binary file or archive created by tabular.io.savebinary. Load a numpy binary file (``.npy``) or archive (``.npz``) created by :func:`tabular.io.savebinary`. The data and associated data type (e.g. `dtype`, including if given, column names) are loaded and reconstituted. If `fname` is a numpy archive, it may contain additional data giving hierarchical column-oriented structure (e.g. `coloring`). See :func:`tabular.tab.tabarray.__new__` for more information about coloring. The ``.npz`` file is a zipped archive created using :func:`numpy.savez` and containing one or more ``.npy`` files, which are NumPy binary files created by :func:`numpy.save`. **Parameters** **fname** : string or file-like object File name or open numpy binary file (``.npy``) or archive (``.npz``) created by :func:`tabular.io.savebinary`. * When `fname` is a ``.npy`` binary file, it is reconstituted as a flat ndarray of data, with structured dtype. * When `fname` is a ``.npz`` archive, it contains at least one ``.npy`` binary file and optionally another: * ``data.npy`` must be in the archive, and is reconstituted as `X`, a flat ndarray of data, with structured dtype, `dtype`. * ``coloring.npy``, if present is reconstitued as `coloring`, a dictionary. **Returns** **X** : numpy ndarray with structured dtype The data, where each column is named and is of a uniform NumPy data type. **dtype** : numpy dtype object The data type of `X`, e.g. `X.dtype`. **coloring** : dictionary, or None Hierarchical structure on the columns given in the header of the file; an attribute of tabarrays. See :func:`tabular.tab.tabarray.__new__` for more information about coloring. **See Also:** :func:`tabular.io.savebinary`, :func:`numpy.load`, :func:`numpy.save`, :func:`numpy.savez` """ X = np.load(fname) if isinstance(X, np.lib.npyio.NpzFile): if 'coloring' in X.files: coloring = X['coloring'].tolist() else: coloring = None if 'data' in X.files: return [X['data'], X['data'].dtype, coloring] else: return [None, None, coloring] else: return [X, X.dtype, None]
[ "def", "loadbinary", "(", "fname", ")", ":", "X", "=", "np", ".", "load", "(", "fname", ")", "if", "isinstance", "(", "X", ",", "np", ".", "lib", ".", "npyio", ".", "NpzFile", ")", ":", "if", "'coloring'", "in", "X", ".", "files", ":", "coloring", "=", "X", "[", "'coloring'", "]", ".", "tolist", "(", ")", "else", ":", "coloring", "=", "None", "if", "'data'", "in", "X", ".", "files", ":", "return", "[", "X", "[", "'data'", "]", ",", "X", "[", "'data'", "]", ".", "dtype", ",", "coloring", "]", "else", ":", "return", "[", "None", ",", "None", ",", "coloring", "]", "else", ":", "return", "[", "X", ",", "X", ".", "dtype", ",", "None", "]" ]
Load a numpy binary file or archive created by tabular.io.savebinary. Load a numpy binary file (``.npy``) or archive (``.npz``) created by :func:`tabular.io.savebinary`. The data and associated data type (e.g. `dtype`, including if given, column names) are loaded and reconstituted. If `fname` is a numpy archive, it may contain additional data giving hierarchical column-oriented structure (e.g. `coloring`). See :func:`tabular.tab.tabarray.__new__` for more information about coloring. The ``.npz`` file is a zipped archive created using :func:`numpy.savez` and containing one or more ``.npy`` files, which are NumPy binary files created by :func:`numpy.save`. **Parameters** **fname** : string or file-like object File name or open numpy binary file (``.npy``) or archive (``.npz``) created by :func:`tabular.io.savebinary`. * When `fname` is a ``.npy`` binary file, it is reconstituted as a flat ndarray of data, with structured dtype. * When `fname` is a ``.npz`` archive, it contains at least one ``.npy`` binary file and optionally another: * ``data.npy`` must be in the archive, and is reconstituted as `X`, a flat ndarray of data, with structured dtype, `dtype`. * ``coloring.npy``, if present is reconstitued as `coloring`, a dictionary. **Returns** **X** : numpy ndarray with structured dtype The data, where each column is named and is of a uniform NumPy data type. **dtype** : numpy dtype object The data type of `X`, e.g. `X.dtype`. **coloring** : dictionary, or None Hierarchical structure on the columns given in the header of the file; an attribute of tabarrays. See :func:`tabular.tab.tabarray.__new__` for more information about coloring. **See Also:** :func:`tabular.io.savebinary`, :func:`numpy.load`, :func:`numpy.save`, :func:`numpy.savez`
[ "Load", "a", "numpy", "binary", "file", "or", "archive", "created", "by", "tabular", ".", "io", ".", "savebinary", ".", "Load", "a", "numpy", "binary", "file", "(", ".", "npy", ")", "or", "archive", "(", ".", "npz", ")", "created", "by", ":", "func", ":", "tabular", ".", "io", ".", "savebinary", "." ]
python
train
32.76
dls-controls/pymalcolm
malcolm/core/part.py
https://github.com/dls-controls/pymalcolm/blob/80ea667e4da26365a6cebc0249f52fdc744bd983/malcolm/core/part.py#L137-L145
def add_attribute_model(self, name, # type: str attr, # type: AttributeModel writeable_func=None, # type: Optional[Callable] ): # type: (...) -> AttributeModel """Register a pre-existing AttributeModel to be added to the Block""" return self._field_registry.add_attribute_model( name, attr, writeable_func, self._part)
[ "def", "add_attribute_model", "(", "self", ",", "name", ",", "# type: str", "attr", ",", "# type: AttributeModel", "writeable_func", "=", "None", ",", "# type: Optional[Callable]", ")", ":", "# type: (...) -> AttributeModel", "return", "self", ".", "_field_registry", ".", "add_attribute_model", "(", "name", ",", "attr", ",", "writeable_func", ",", "self", ".", "_part", ")" ]
Register a pre-existing AttributeModel to be added to the Block
[ "Register", "a", "pre", "-", "existing", "AttributeModel", "to", "be", "added", "to", "the", "Block" ]
python
train
51.222222
saltstack/salt
salt/cloud/clouds/ec2.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/ec2.py#L2934-L2948
def queue_instances(instances): ''' Queue a set of instances to be provisioned later. Expects a list. Currently this only queries node data, and then places it in the cloud cache (if configured). If the salt-cloud-reactor is being used, these instances will be automatically provisioned using that. For more information about the salt-cloud-reactor, see: https://github.com/saltstack-formulas/salt-cloud-reactor ''' for instance_id in instances: node = _get_node(instance_id=instance_id) __utils__['cloud.cache_node'](node, __active_provider_name__, __opts__)
[ "def", "queue_instances", "(", "instances", ")", ":", "for", "instance_id", "in", "instances", ":", "node", "=", "_get_node", "(", "instance_id", "=", "instance_id", ")", "__utils__", "[", "'cloud.cache_node'", "]", "(", "node", ",", "__active_provider_name__", ",", "__opts__", ")" ]
Queue a set of instances to be provisioned later. Expects a list. Currently this only queries node data, and then places it in the cloud cache (if configured). If the salt-cloud-reactor is being used, these instances will be automatically provisioned using that. For more information about the salt-cloud-reactor, see: https://github.com/saltstack-formulas/salt-cloud-reactor
[ "Queue", "a", "set", "of", "instances", "to", "be", "provisioned", "later", ".", "Expects", "a", "list", "." ]
python
train
40
LonamiWebs/Telethon
telethon/events/common.py
https://github.com/LonamiWebs/Telethon/blob/1ead9757d366b58c1e0567cddb0196e20f1a445f/telethon/events/common.py#L167-L176
def _load_entities(self): """ Must load all the entities it needs from cache, and return ``False`` if it could not find all of them. """ if not self._chat_peer: return True # Nothing to load (e.g. MessageDeleted) self._chat, self._input_chat = self._get_entity_pair(self.chat_id) return self._input_chat is not None
[ "def", "_load_entities", "(", "self", ")", ":", "if", "not", "self", ".", "_chat_peer", ":", "return", "True", "# Nothing to load (e.g. MessageDeleted)", "self", ".", "_chat", ",", "self", ".", "_input_chat", "=", "self", ".", "_get_entity_pair", "(", "self", ".", "chat_id", ")", "return", "self", ".", "_input_chat", "is", "not", "None" ]
Must load all the entities it needs from cache, and return ``False`` if it could not find all of them.
[ "Must", "load", "all", "the", "entities", "it", "needs", "from", "cache", "and", "return", "False", "if", "it", "could", "not", "find", "all", "of", "them", "." ]
python
train
37.6
tensorflow/cleverhans
cleverhans/confidence_report.py
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/confidence_report.py#L238-L270
def print_stats(correctness, confidence, name): """ Prints out accuracy, coverage, etc. statistics :param correctness: ndarray One bool per example specifying whether it was correctly classified :param confidence: ndarray The probability associated with each prediction :param name: str The name of this type of data (e.g. "clean", "MaxConfidence") """ accuracy = correctness.mean() wrongness = 1 - correctness denom1 = np.maximum(1, wrongness.sum()) ave_prob_on_mistake = (wrongness * confidence).sum() / denom1 assert ave_prob_on_mistake <= 1., ave_prob_on_mistake denom2 = np.maximum(1, correctness.sum()) ave_prob_on_correct = (correctness * confidence).sum() / denom2 covered = confidence > 0.5 cov_half = covered.mean() acc_half = (correctness * covered).sum() / np.maximum(1, covered.sum()) print('Accuracy on %s examples: %0.4f' % (name, accuracy)) print("Average prob on mistakes: %0.4f" % ave_prob_on_mistake) print("Average prob on correct: %0.4f" % ave_prob_on_correct) print("Accuracy when prob thresholded at .5: %0.4f" % acc_half) print("Coverage when prob thresholded at .5: %0.4f" % cov_half) success_rate = acc_half * cov_half # Success is correctly classifying a covered example print("Success rate at .5: %0.4f" % success_rate) # Failure is misclassifying a covered example failure_rate = (1. - acc_half) * cov_half print("Failure rate at .5: %0.4f" % failure_rate) print()
[ "def", "print_stats", "(", "correctness", ",", "confidence", ",", "name", ")", ":", "accuracy", "=", "correctness", ".", "mean", "(", ")", "wrongness", "=", "1", "-", "correctness", "denom1", "=", "np", ".", "maximum", "(", "1", ",", "wrongness", ".", "sum", "(", ")", ")", "ave_prob_on_mistake", "=", "(", "wrongness", "*", "confidence", ")", ".", "sum", "(", ")", "/", "denom1", "assert", "ave_prob_on_mistake", "<=", "1.", ",", "ave_prob_on_mistake", "denom2", "=", "np", ".", "maximum", "(", "1", ",", "correctness", ".", "sum", "(", ")", ")", "ave_prob_on_correct", "=", "(", "correctness", "*", "confidence", ")", ".", "sum", "(", ")", "/", "denom2", "covered", "=", "confidence", ">", "0.5", "cov_half", "=", "covered", ".", "mean", "(", ")", "acc_half", "=", "(", "correctness", "*", "covered", ")", ".", "sum", "(", ")", "/", "np", ".", "maximum", "(", "1", ",", "covered", ".", "sum", "(", ")", ")", "print", "(", "'Accuracy on %s examples: %0.4f'", "%", "(", "name", ",", "accuracy", ")", ")", "print", "(", "\"Average prob on mistakes: %0.4f\"", "%", "ave_prob_on_mistake", ")", "print", "(", "\"Average prob on correct: %0.4f\"", "%", "ave_prob_on_correct", ")", "print", "(", "\"Accuracy when prob thresholded at .5: %0.4f\"", "%", "acc_half", ")", "print", "(", "\"Coverage when prob thresholded at .5: %0.4f\"", "%", "cov_half", ")", "success_rate", "=", "acc_half", "*", "cov_half", "# Success is correctly classifying a covered example", "print", "(", "\"Success rate at .5: %0.4f\"", "%", "success_rate", ")", "# Failure is misclassifying a covered example", "failure_rate", "=", "(", "1.", "-", "acc_half", ")", "*", "cov_half", "print", "(", "\"Failure rate at .5: %0.4f\"", "%", "failure_rate", ")", "print", "(", ")" ]
Prints out accuracy, coverage, etc. statistics :param correctness: ndarray One bool per example specifying whether it was correctly classified :param confidence: ndarray The probability associated with each prediction :param name: str The name of this type of data (e.g. "clean", "MaxConfidence")
[ "Prints", "out", "accuracy", "coverage", "etc", ".", "statistics", ":", "param", "correctness", ":", "ndarray", "One", "bool", "per", "example", "specifying", "whether", "it", "was", "correctly", "classified", ":", "param", "confidence", ":", "ndarray", "The", "probability", "associated", "with", "each", "prediction", ":", "param", "name", ":", "str", "The", "name", "of", "this", "type", "of", "data", "(", "e", ".", "g", ".", "clean", "MaxConfidence", ")" ]
python
train
43.333333
heuer/cablemap
cablemap.core/cablemap/core/predicates.py
https://github.com/heuer/cablemap/blob/42066c8fc2972d237a2c35578e14525aaf705f38/cablemap.core/cablemap/core/predicates.py#L215-L232
def origin_central_asia(origin): """\ Returns if the origin is located in Central Asia. Holds true for the following countries: * Afghanistan * Kazakhstan * Kyrgyzstan * Tajikistan * Turkmenistan * Uzbekistan `origin` The origin to check. """ return origin_afghanistan(origin) or origin_kazakhstan(origin) \ or origin_kyrgyzstan(origin) or origin_tajikistan(origin) \ or origin_turkmenistan(origin) or origin_uzbekistan(origin)
[ "def", "origin_central_asia", "(", "origin", ")", ":", "return", "origin_afghanistan", "(", "origin", ")", "or", "origin_kazakhstan", "(", "origin", ")", "or", "origin_kyrgyzstan", "(", "origin", ")", "or", "origin_tajikistan", "(", "origin", ")", "or", "origin_turkmenistan", "(", "origin", ")", "or", "origin_uzbekistan", "(", "origin", ")" ]
\ Returns if the origin is located in Central Asia. Holds true for the following countries: * Afghanistan * Kazakhstan * Kyrgyzstan * Tajikistan * Turkmenistan * Uzbekistan `origin` The origin to check.
[ "\\", "Returns", "if", "the", "origin", "is", "located", "in", "Central", "Asia", "." ]
python
train
28.555556
sibirrer/lenstronomy
lenstronomy/LensModel/Profiles/nfw_ellipse.py
https://github.com/sibirrer/lenstronomy/blob/4edb100a4f3f4fdc4fac9b0032d2b0283d0aa1d6/lenstronomy/LensModel/Profiles/nfw_ellipse.py#L44-L65
def derivatives(self, x, y, Rs, theta_Rs, e1, e2, center_x=0, center_y=0): """ returns df/dx and df/dy of the function (integral of NFW) """ phi_G, q = param_util.ellipticity2phi_q(e1, e2) x_shift = x - center_x y_shift = y - center_y cos_phi = np.cos(phi_G) sin_phi = np.sin(phi_G) e = min(abs(1. - q), 0.99) xt1 = (cos_phi*x_shift+sin_phi*y_shift)*np.sqrt(1 - e) xt2 = (-sin_phi*x_shift+cos_phi*y_shift)*np.sqrt(1 + e) R_ = np.sqrt(xt1**2 + xt2**2) rho0_input = self.nfw._alpha2rho0(theta_Rs=theta_Rs, Rs=Rs) if Rs < 0.0000001: Rs = 0.0000001 f_x_prim, f_y_prim = self.nfw.nfwAlpha(R_, Rs, rho0_input, xt1, xt2) f_x_prim *= np.sqrt(1 - e) f_y_prim *= np.sqrt(1 + e) f_x = cos_phi*f_x_prim-sin_phi*f_y_prim f_y = sin_phi*f_x_prim+cos_phi*f_y_prim return f_x, f_y
[ "def", "derivatives", "(", "self", ",", "x", ",", "y", ",", "Rs", ",", "theta_Rs", ",", "e1", ",", "e2", ",", "center_x", "=", "0", ",", "center_y", "=", "0", ")", ":", "phi_G", ",", "q", "=", "param_util", ".", "ellipticity2phi_q", "(", "e1", ",", "e2", ")", "x_shift", "=", "x", "-", "center_x", "y_shift", "=", "y", "-", "center_y", "cos_phi", "=", "np", ".", "cos", "(", "phi_G", ")", "sin_phi", "=", "np", ".", "sin", "(", "phi_G", ")", "e", "=", "min", "(", "abs", "(", "1.", "-", "q", ")", ",", "0.99", ")", "xt1", "=", "(", "cos_phi", "*", "x_shift", "+", "sin_phi", "*", "y_shift", ")", "*", "np", ".", "sqrt", "(", "1", "-", "e", ")", "xt2", "=", "(", "-", "sin_phi", "*", "x_shift", "+", "cos_phi", "*", "y_shift", ")", "*", "np", ".", "sqrt", "(", "1", "+", "e", ")", "R_", "=", "np", ".", "sqrt", "(", "xt1", "**", "2", "+", "xt2", "**", "2", ")", "rho0_input", "=", "self", ".", "nfw", ".", "_alpha2rho0", "(", "theta_Rs", "=", "theta_Rs", ",", "Rs", "=", "Rs", ")", "if", "Rs", "<", "0.0000001", ":", "Rs", "=", "0.0000001", "f_x_prim", ",", "f_y_prim", "=", "self", ".", "nfw", ".", "nfwAlpha", "(", "R_", ",", "Rs", ",", "rho0_input", ",", "xt1", ",", "xt2", ")", "f_x_prim", "*=", "np", ".", "sqrt", "(", "1", "-", "e", ")", "f_y_prim", "*=", "np", ".", "sqrt", "(", "1", "+", "e", ")", "f_x", "=", "cos_phi", "*", "f_x_prim", "-", "sin_phi", "*", "f_y_prim", "f_y", "=", "sin_phi", "*", "f_x_prim", "+", "cos_phi", "*", "f_y_prim", "return", "f_x", ",", "f_y" ]
returns df/dx and df/dy of the function (integral of NFW)
[ "returns", "df", "/", "dx", "and", "df", "/", "dy", "of", "the", "function", "(", "integral", "of", "NFW", ")" ]
python
train
41.545455
googleapis/google-cloud-python
pubsub/google/cloud/pubsub_v1/futures.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/pubsub/google/cloud/pubsub_v1/futures.py#L115-L140
def exception(self, timeout=None): """Return the exception raised by the call, if any. This blocks until the message has successfully been published, and returns the exception. If the call succeeded, return None. Args: timeout (Union[int, float]): The number of seconds before this call times out and raises TimeoutError. Raises: TimeoutError: If the request times out. Returns: Exception: The exception raised by the call, if any. """ # Wait until the future is done. if not self._completed.wait(timeout=timeout): raise exceptions.TimeoutError("Timed out waiting for result.") # If the batch completed successfully, this should return None. if self._result != self._SENTINEL: return None # Okay, this batch had an error; this should return it. return self._exception
[ "def", "exception", "(", "self", ",", "timeout", "=", "None", ")", ":", "# Wait until the future is done.", "if", "not", "self", ".", "_completed", ".", "wait", "(", "timeout", "=", "timeout", ")", ":", "raise", "exceptions", ".", "TimeoutError", "(", "\"Timed out waiting for result.\"", ")", "# If the batch completed successfully, this should return None.", "if", "self", ".", "_result", "!=", "self", ".", "_SENTINEL", ":", "return", "None", "# Okay, this batch had an error; this should return it.", "return", "self", ".", "_exception" ]
Return the exception raised by the call, if any. This blocks until the message has successfully been published, and returns the exception. If the call succeeded, return None. Args: timeout (Union[int, float]): The number of seconds before this call times out and raises TimeoutError. Raises: TimeoutError: If the request times out. Returns: Exception: The exception raised by the call, if any.
[ "Return", "the", "exception", "raised", "by", "the", "call", "if", "any", "." ]
python
train
35.692308
SpriteLink/NIPAP
utilities/ipplan-import.py
https://github.com/SpriteLink/NIPAP/blob/f96069f11ab952d80b13cab06e0528f2d24b3de9/utilities/ipplan-import.py#L113-L139
def add_host(host): """ Put your host information in the prefix object. """ p = new_prefix() p.prefix = str(host['ipaddr']) p.type = "host" p.description = host['description'] p.node = host['fqdn'] p.avps = {} # Use remaining data from ipplan to populate comment field. if 'additional' in host: p.comment = host['additional'] # Use specific info to create extra attributes. if len(host['location']) > 0: p.avps['location'] = host['location'] if len(host['mac']) > 0: p.avps['mac'] = host['mac'] if len(host['phone']) > 0: p.avps['phone'] = host['phone'] if len(host['user']) > 0: p.avps['user'] = host['user'] return p
[ "def", "add_host", "(", "host", ")", ":", "p", "=", "new_prefix", "(", ")", "p", ".", "prefix", "=", "str", "(", "host", "[", "'ipaddr'", "]", ")", "p", ".", "type", "=", "\"host\"", "p", ".", "description", "=", "host", "[", "'description'", "]", "p", ".", "node", "=", "host", "[", "'fqdn'", "]", "p", ".", "avps", "=", "{", "}", "# Use remaining data from ipplan to populate comment field.", "if", "'additional'", "in", "host", ":", "p", ".", "comment", "=", "host", "[", "'additional'", "]", "# Use specific info to create extra attributes.", "if", "len", "(", "host", "[", "'location'", "]", ")", ">", "0", ":", "p", ".", "avps", "[", "'location'", "]", "=", "host", "[", "'location'", "]", "if", "len", "(", "host", "[", "'mac'", "]", ")", ">", "0", ":", "p", ".", "avps", "[", "'mac'", "]", "=", "host", "[", "'mac'", "]", "if", "len", "(", "host", "[", "'phone'", "]", ")", ">", "0", ":", "p", ".", "avps", "[", "'phone'", "]", "=", "host", "[", "'phone'", "]", "if", "len", "(", "host", "[", "'user'", "]", ")", ">", "0", ":", "p", ".", "avps", "[", "'user'", "]", "=", "host", "[", "'user'", "]", "return", "p" ]
Put your host information in the prefix object.
[ "Put", "your", "host", "information", "in", "the", "prefix", "object", "." ]
python
train
25.851852
pytorch/text
torchtext/data/field.py
https://github.com/pytorch/text/blob/26bfce6869dc704f1d86792f9a681d453d7e7bb8/torchtext/data/field.py#L277-L309
def build_vocab(self, *args, **kwargs): """Construct the Vocab object for this field from one or more datasets. Arguments: Positional arguments: Dataset objects or other iterable data sources from which to construct the Vocab object that represents the set of possible values for this field. If a Dataset object is provided, all columns corresponding to this field are used; individual columns can also be provided directly. Remaining keyword arguments: Passed to the constructor of Vocab. """ counter = Counter() sources = [] for arg in args: if isinstance(arg, Dataset): sources += [getattr(arg, name) for name, field in arg.fields.items() if field is self] else: sources.append(arg) for data in sources: for x in data: if not self.sequential: x = [x] try: counter.update(x) except TypeError: counter.update(chain.from_iterable(x)) specials = list(OrderedDict.fromkeys( tok for tok in [self.unk_token, self.pad_token, self.init_token, self.eos_token] + kwargs.pop('specials', []) if tok is not None)) self.vocab = self.vocab_cls(counter, specials=specials, **kwargs)
[ "def", "build_vocab", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "counter", "=", "Counter", "(", ")", "sources", "=", "[", "]", "for", "arg", "in", "args", ":", "if", "isinstance", "(", "arg", ",", "Dataset", ")", ":", "sources", "+=", "[", "getattr", "(", "arg", ",", "name", ")", "for", "name", ",", "field", "in", "arg", ".", "fields", ".", "items", "(", ")", "if", "field", "is", "self", "]", "else", ":", "sources", ".", "append", "(", "arg", ")", "for", "data", "in", "sources", ":", "for", "x", "in", "data", ":", "if", "not", "self", ".", "sequential", ":", "x", "=", "[", "x", "]", "try", ":", "counter", ".", "update", "(", "x", ")", "except", "TypeError", ":", "counter", ".", "update", "(", "chain", ".", "from_iterable", "(", "x", ")", ")", "specials", "=", "list", "(", "OrderedDict", ".", "fromkeys", "(", "tok", "for", "tok", "in", "[", "self", ".", "unk_token", ",", "self", ".", "pad_token", ",", "self", ".", "init_token", ",", "self", ".", "eos_token", "]", "+", "kwargs", ".", "pop", "(", "'specials'", ",", "[", "]", ")", "if", "tok", "is", "not", "None", ")", ")", "self", ".", "vocab", "=", "self", ".", "vocab_cls", "(", "counter", ",", "specials", "=", "specials", ",", "*", "*", "kwargs", ")" ]
Construct the Vocab object for this field from one or more datasets. Arguments: Positional arguments: Dataset objects or other iterable data sources from which to construct the Vocab object that represents the set of possible values for this field. If a Dataset object is provided, all columns corresponding to this field are used; individual columns can also be provided directly. Remaining keyword arguments: Passed to the constructor of Vocab.
[ "Construct", "the", "Vocab", "object", "for", "this", "field", "from", "one", "or", "more", "datasets", "." ]
python
train
44.515152
oceanprotocol/squid-py
squid_py/aquarius/aquarius.py
https://github.com/oceanprotocol/squid-py/blob/43a5b7431627e4c9ab7382ed9eb8153e96ed4483/squid_py/aquarius/aquarius.py#L153-L166
def update_asset_ddo(self, did, ddo): """ Update the ddo of a did already registered. :param did: Asset DID string :param ddo: DDO instance :return: API response (depends on implementation) """ response = self.requests_session.put(f'{self.url}/{did}', data=ddo.as_text(), headers=self._headers) if response.status_code == 200 or response.status_code == 201: return json.loads(response.content) else: raise Exception(f'Unable to update DDO: {response.content}')
[ "def", "update_asset_ddo", "(", "self", ",", "did", ",", "ddo", ")", ":", "response", "=", "self", ".", "requests_session", ".", "put", "(", "f'{self.url}/{did}'", ",", "data", "=", "ddo", ".", "as_text", "(", ")", ",", "headers", "=", "self", ".", "_headers", ")", "if", "response", ".", "status_code", "==", "200", "or", "response", ".", "status_code", "==", "201", ":", "return", "json", ".", "loads", "(", "response", ".", "content", ")", "else", ":", "raise", "Exception", "(", "f'Unable to update DDO: {response.content}'", ")" ]
Update the ddo of a did already registered. :param did: Asset DID string :param ddo: DDO instance :return: API response (depends on implementation)
[ "Update", "the", "ddo", "of", "a", "did", "already", "registered", "." ]
python
train
42.071429
jhuapl-boss/intern
intern/service/boss/project.py
https://github.com/jhuapl-boss/intern/blob/d8fc6df011d8f212c87e6a1fd4cc21cfb5d103ed/intern/service/boss/project.py#L74-L85
def create_group(self, name): """Create a new group. Args: name (string): Name of the group to create. Raises: requests.HTTPError on failure. """ self.service.create_group( name, self.url_prefix, self.auth, self.session, self.session_send_opts)
[ "def", "create_group", "(", "self", ",", "name", ")", ":", "self", ".", "service", ".", "create_group", "(", "name", ",", "self", ".", "url_prefix", ",", "self", ".", "auth", ",", "self", ".", "session", ",", "self", ".", "session_send_opts", ")" ]
Create a new group. Args: name (string): Name of the group to create. Raises: requests.HTTPError on failure.
[ "Create", "a", "new", "group", "." ]
python
train
26.916667
zeth/inputs
inputs.py
https://github.com/zeth/inputs/blob/a46681dbf77d6ab07834f550e5855c1f50701f99/inputs.py#L1925-L1937
def handle_relative(self, event): """Relative mouse movement.""" delta_x, delta_y = self._get_relative(event) if delta_x: self.events.append( self.emulate_rel(0x00, delta_x, self.timeval)) if delta_y: self.events.append( self.emulate_rel(0x01, delta_y, self.timeval))
[ "def", "handle_relative", "(", "self", ",", "event", ")", ":", "delta_x", ",", "delta_y", "=", "self", ".", "_get_relative", "(", "event", ")", "if", "delta_x", ":", "self", ".", "events", ".", "append", "(", "self", ".", "emulate_rel", "(", "0x00", ",", "delta_x", ",", "self", ".", "timeval", ")", ")", "if", "delta_y", ":", "self", ".", "events", ".", "append", "(", "self", ".", "emulate_rel", "(", "0x01", ",", "delta_y", ",", "self", ".", "timeval", ")", ")" ]
Relative mouse movement.
[ "Relative", "mouse", "movement", "." ]
python
train
36.538462
git-afsantos/bonsai
bonsai/model.py
https://github.com/git-afsantos/bonsai/blob/aa5af3f535b3b506bfc95c107c501fc9c4bcd072/bonsai/model.py#L477-L486
def pretty_str(self, indent=0): """Return a human-readable string representation of this object. Kwargs: indent (int): The amount of spaces to use as indentation. """ spaces = ' ' * indent pretty = '{}namespace {}:\n'.format(spaces, self.name) pretty += '\n\n'.join(c.pretty_str(indent + 2) for c in self.children) return pretty
[ "def", "pretty_str", "(", "self", ",", "indent", "=", "0", ")", ":", "spaces", "=", "' '", "*", "indent", "pretty", "=", "'{}namespace {}:\\n'", ".", "format", "(", "spaces", ",", "self", ".", "name", ")", "pretty", "+=", "'\\n\\n'", ".", "join", "(", "c", ".", "pretty_str", "(", "indent", "+", "2", ")", "for", "c", "in", "self", ".", "children", ")", "return", "pretty" ]
Return a human-readable string representation of this object. Kwargs: indent (int): The amount of spaces to use as indentation.
[ "Return", "a", "human", "-", "readable", "string", "representation", "of", "this", "object", "." ]
python
train
38.8
sosy-lab/benchexec
benchexec/cgroups.py
https://github.com/sosy-lab/benchexec/blob/44428f67f41384c03aea13e7e25f884764653617/benchexec/cgroups.py#L133-L147
def _parse_proc_pid_cgroup(content): """ Parse a /proc/*/cgroup file into tuples of (subsystem,cgroup). @param content: An iterable over the lines of the file. @return: a generator of tuples """ for ownCgroup in content: #each line is "id:subsystem,subsystem:path" ownCgroup = ownCgroup.strip().split(':') try: path = ownCgroup[2][1:] # remove leading / except IndexError: raise IndexError("index out of range for " + str(ownCgroup)) for subsystem in ownCgroup[1].split(','): yield (subsystem, path)
[ "def", "_parse_proc_pid_cgroup", "(", "content", ")", ":", "for", "ownCgroup", "in", "content", ":", "#each line is \"id:subsystem,subsystem:path\"", "ownCgroup", "=", "ownCgroup", ".", "strip", "(", ")", ".", "split", "(", "':'", ")", "try", ":", "path", "=", "ownCgroup", "[", "2", "]", "[", "1", ":", "]", "# remove leading /", "except", "IndexError", ":", "raise", "IndexError", "(", "\"index out of range for \"", "+", "str", "(", "ownCgroup", ")", ")", "for", "subsystem", "in", "ownCgroup", "[", "1", "]", ".", "split", "(", "','", ")", ":", "yield", "(", "subsystem", ",", "path", ")" ]
Parse a /proc/*/cgroup file into tuples of (subsystem,cgroup). @param content: An iterable over the lines of the file. @return: a generator of tuples
[ "Parse", "a", "/", "proc", "/", "*", "/", "cgroup", "file", "into", "tuples", "of", "(", "subsystem", "cgroup", ")", "." ]
python
train
39
samluescher/django-media-tree
media_tree/utils/staticfiles.py
https://github.com/samluescher/django-media-tree/blob/3eb6345faaf57e2fbe35ca431d4d133f950f2b5f/media_tree/utils/staticfiles.py#L108-L124
def find(file_node, dirs=ICON_DIRS, default_name=None, file_ext='.png'): """ Iterating all icon dirs, try to find a file called like the node's extension / mime subtype / mime type (in that order). For instance, for an MP3 file ("audio/mpeg"), this would look for: "mp3.png" / "audio/mpeg.png" / "audio.png" """ names = [] for attr_name in ('extension', 'mimetype', 'mime_supertype'): attr = getattr(file_node, attr_name) if attr: names.append(attr) if default_name: names.append(default_name) icon_path = StaticPathFinder.find(names, dirs, file_ext) if icon_path: return StaticIconFile(file_node, icon_path)
[ "def", "find", "(", "file_node", ",", "dirs", "=", "ICON_DIRS", ",", "default_name", "=", "None", ",", "file_ext", "=", "'.png'", ")", ":", "names", "=", "[", "]", "for", "attr_name", "in", "(", "'extension'", ",", "'mimetype'", ",", "'mime_supertype'", ")", ":", "attr", "=", "getattr", "(", "file_node", ",", "attr_name", ")", "if", "attr", ":", "names", ".", "append", "(", "attr", ")", "if", "default_name", ":", "names", ".", "append", "(", "default_name", ")", "icon_path", "=", "StaticPathFinder", ".", "find", "(", "names", ",", "dirs", ",", "file_ext", ")", "if", "icon_path", ":", "return", "StaticIconFile", "(", "file_node", ",", "icon_path", ")" ]
Iterating all icon dirs, try to find a file called like the node's extension / mime subtype / mime type (in that order). For instance, for an MP3 file ("audio/mpeg"), this would look for: "mp3.png" / "audio/mpeg.png" / "audio.png"
[ "Iterating", "all", "icon", "dirs", "try", "to", "find", "a", "file", "called", "like", "the", "node", "s", "extension", "/", "mime", "subtype", "/", "mime", "type", "(", "in", "that", "order", ")", ".", "For", "instance", "for", "an", "MP3", "file", "(", "audio", "/", "mpeg", ")", "this", "would", "look", "for", ":", "mp3", ".", "png", "/", "audio", "/", "mpeg", ".", "png", "/", "audio", ".", "png" ]
python
train
43.823529
nyaruka/smartmin
smartmin/views.py
https://github.com/nyaruka/smartmin/blob/488a676a4960555e4d216a7b95d6e01a4ad4efd8/smartmin/views.py#L232-L246
def lookup_field_class(self, field, obj=None, default=None): """ Looks up any additional class we should include when rendering this field """ css = "" # is there a class specified for this field if field in self.field_config and 'class' in self.field_config[field]: css = self.field_config[field]['class'] # if we were given a default, use that elif default: css = default return css
[ "def", "lookup_field_class", "(", "self", ",", "field", ",", "obj", "=", "None", ",", "default", "=", "None", ")", ":", "css", "=", "\"\"", "# is there a class specified for this field", "if", "field", "in", "self", ".", "field_config", "and", "'class'", "in", "self", ".", "field_config", "[", "field", "]", ":", "css", "=", "self", ".", "field_config", "[", "field", "]", "[", "'class'", "]", "# if we were given a default, use that", "elif", "default", ":", "css", "=", "default", "return", "css" ]
Looks up any additional class we should include when rendering this field
[ "Looks", "up", "any", "additional", "class", "we", "should", "include", "when", "rendering", "this", "field" ]
python
train
31.266667
basho/riak-python-client
riak/codecs/pbuf.py
https://github.com/basho/riak-python-client/blob/91de13a16607cdf553d1a194e762734e3bec4231/riak/codecs/pbuf.py#L847-L895
def decode_timeseries_row(self, tsrow, tscols=None, convert_timestamp=False): """ Decodes a TsRow into a list :param tsrow: the protobuf TsRow to decode. :type tsrow: riak.pb.riak_ts_pb2.TsRow :param tscols: the protobuf TsColumn data to help decode. :type tscols: list :rtype list """ row = [] for i, cell in enumerate(tsrow.cells): col = None if tscols is not None: col = tscols[i] if cell.HasField('varchar_value'): if col and not (col.type == TsColumnType.Value('VARCHAR') or col.type == TsColumnType.Value('BLOB')): raise TypeError('expected VARCHAR or BLOB column') else: row.append(cell.varchar_value) elif cell.HasField('sint64_value'): if col and col.type != TsColumnType.Value('SINT64'): raise TypeError('expected SINT64 column') else: row.append(cell.sint64_value) elif cell.HasField('double_value'): if col and col.type != TsColumnType.Value('DOUBLE'): raise TypeError('expected DOUBLE column') else: row.append(cell.double_value) elif cell.HasField('timestamp_value'): if col and col.type != TsColumnType.Value('TIMESTAMP'): raise TypeError('expected TIMESTAMP column') else: dt = cell.timestamp_value if convert_timestamp: dt = datetime_from_unix_time_millis( cell.timestamp_value) row.append(dt) elif cell.HasField('boolean_value'): if col and col.type != TsColumnType.Value('BOOLEAN'): raise TypeError('expected BOOLEAN column') else: row.append(cell.boolean_value) else: row.append(None) return row
[ "def", "decode_timeseries_row", "(", "self", ",", "tsrow", ",", "tscols", "=", "None", ",", "convert_timestamp", "=", "False", ")", ":", "row", "=", "[", "]", "for", "i", ",", "cell", "in", "enumerate", "(", "tsrow", ".", "cells", ")", ":", "col", "=", "None", "if", "tscols", "is", "not", "None", ":", "col", "=", "tscols", "[", "i", "]", "if", "cell", ".", "HasField", "(", "'varchar_value'", ")", ":", "if", "col", "and", "not", "(", "col", ".", "type", "==", "TsColumnType", ".", "Value", "(", "'VARCHAR'", ")", "or", "col", ".", "type", "==", "TsColumnType", ".", "Value", "(", "'BLOB'", ")", ")", ":", "raise", "TypeError", "(", "'expected VARCHAR or BLOB column'", ")", "else", ":", "row", ".", "append", "(", "cell", ".", "varchar_value", ")", "elif", "cell", ".", "HasField", "(", "'sint64_value'", ")", ":", "if", "col", "and", "col", ".", "type", "!=", "TsColumnType", ".", "Value", "(", "'SINT64'", ")", ":", "raise", "TypeError", "(", "'expected SINT64 column'", ")", "else", ":", "row", ".", "append", "(", "cell", ".", "sint64_value", ")", "elif", "cell", ".", "HasField", "(", "'double_value'", ")", ":", "if", "col", "and", "col", ".", "type", "!=", "TsColumnType", ".", "Value", "(", "'DOUBLE'", ")", ":", "raise", "TypeError", "(", "'expected DOUBLE column'", ")", "else", ":", "row", ".", "append", "(", "cell", ".", "double_value", ")", "elif", "cell", ".", "HasField", "(", "'timestamp_value'", ")", ":", "if", "col", "and", "col", ".", "type", "!=", "TsColumnType", ".", "Value", "(", "'TIMESTAMP'", ")", ":", "raise", "TypeError", "(", "'expected TIMESTAMP column'", ")", "else", ":", "dt", "=", "cell", ".", "timestamp_value", "if", "convert_timestamp", ":", "dt", "=", "datetime_from_unix_time_millis", "(", "cell", ".", "timestamp_value", ")", "row", ".", "append", "(", "dt", ")", "elif", "cell", ".", "HasField", "(", "'boolean_value'", ")", ":", "if", "col", "and", "col", ".", "type", "!=", "TsColumnType", ".", "Value", "(", "'BOOLEAN'", ")", ":", "raise", "TypeError", "(", "'expected BOOLEAN column'", ")", "else", ":", "row", ".", "append", "(", "cell", ".", "boolean_value", ")", "else", ":", "row", ".", "append", "(", "None", ")", "return", "row" ]
Decodes a TsRow into a list :param tsrow: the protobuf TsRow to decode. :type tsrow: riak.pb.riak_ts_pb2.TsRow :param tscols: the protobuf TsColumn data to help decode. :type tscols: list :rtype list
[ "Decodes", "a", "TsRow", "into", "a", "list" ]
python
train
42.816327
CitrineInformatics/dftparse
dftparse/vasp/outcar_parser.py
https://github.com/CitrineInformatics/dftparse/blob/53a1bf19945cf1c195d6af9beccb3d1b7f4a4c1d/dftparse/vasp/outcar_parser.py#L4-L10
def _parse_total_magnetization(line, lines): """Parse the total magnetization, which is somewhat hidden""" toks = line.split() res = {"number of electrons": float(toks[3])} if len(toks) > 5: res["total magnetization"] = float(toks[5]) return res
[ "def", "_parse_total_magnetization", "(", "line", ",", "lines", ")", ":", "toks", "=", "line", ".", "split", "(", ")", "res", "=", "{", "\"number of electrons\"", ":", "float", "(", "toks", "[", "3", "]", ")", "}", "if", "len", "(", "toks", ")", ">", "5", ":", "res", "[", "\"total magnetization\"", "]", "=", "float", "(", "toks", "[", "5", "]", ")", "return", "res" ]
Parse the total magnetization, which is somewhat hidden
[ "Parse", "the", "total", "magnetization", "which", "is", "somewhat", "hidden" ]
python
train
38.142857
heitzmann/gdspy
gdspy/__init__.py
https://github.com/heitzmann/gdspy/blob/2c8d1313248c544e2066d19095b7ad7158c79bc9/gdspy/__init__.py#L2499-L2525
def get_labels(self, depth=None): """ Returns a list with a copy of the labels in this cell. Parameters ---------- depth : integer or ``None`` If not ``None``, defines from how many reference levels to retrieve labels from. Returns ------- out : list of ``Label`` List containing the labels in this cell and its references. """ labels = libCopy.deepcopy(self.labels) if depth is None or depth > 0: for element in self.elements: if isinstance(element, CellReference): labels.extend( element.get_labels(None if depth is None else depth - 1)) elif isinstance(element, CellArray): labels.extend( element.get_labels(None if depth is None else depth - 1)) return labels
[ "def", "get_labels", "(", "self", ",", "depth", "=", "None", ")", ":", "labels", "=", "libCopy", ".", "deepcopy", "(", "self", ".", "labels", ")", "if", "depth", "is", "None", "or", "depth", ">", "0", ":", "for", "element", "in", "self", ".", "elements", ":", "if", "isinstance", "(", "element", ",", "CellReference", ")", ":", "labels", ".", "extend", "(", "element", ".", "get_labels", "(", "None", "if", "depth", "is", "None", "else", "depth", "-", "1", ")", ")", "elif", "isinstance", "(", "element", ",", "CellArray", ")", ":", "labels", ".", "extend", "(", "element", ".", "get_labels", "(", "None", "if", "depth", "is", "None", "else", "depth", "-", "1", ")", ")", "return", "labels" ]
Returns a list with a copy of the labels in this cell. Parameters ---------- depth : integer or ``None`` If not ``None``, defines from how many reference levels to retrieve labels from. Returns ------- out : list of ``Label`` List containing the labels in this cell and its references.
[ "Returns", "a", "list", "with", "a", "copy", "of", "the", "labels", "in", "this", "cell", "." ]
python
train
36.62963
weld-project/weld
python/grizzly/grizzly/grizzly_impl.py
https://github.com/weld-project/weld/blob/8ddd6db6b28878bef0892da44b1d2002b564389c/python/grizzly/grizzly/grizzly_impl.py#L469-L510
def compare(array, other, op, ty_str): """ Performs passed-in comparison op between every element in the passed-in array and other, and returns an array of booleans. Args: array (WeldObject / Numpy.ndarray): Input array other (WeldObject / Numpy.ndarray): Second input array op (str): Op string used for element-wise comparison (== >= <= !=) ty (WeldType): Type of each element in the input array Returns: A WeldObject representing this computation """ weld_obj = WeldObject(encoder_, decoder_) array_var = weld_obj.update(array) if isinstance(array, WeldObject): array_var = array.obj_id weld_obj.dependencies[array_var] = array # Strings need to be encoded into vec[char] array. # Constants can be added directly to NVL snippet. if isinstance(other, str) or isinstance(other, WeldObject): other_var = weld_obj.update(other) if isinstance(other, WeldObject): other_var = tmp.obj_id weld_obj.dependencies[other_var] = other else: other_var = "%s(%s)" % (ty_str, str(other)) weld_template = """ map( %(array)s, |a: %(ty)s| a %(op)s %(other)s ) """ weld_obj.weld_code = weld_template % {"array": array_var, "other": other_var, "op": op, "ty": ty_str} return weld_obj
[ "def", "compare", "(", "array", ",", "other", ",", "op", ",", "ty_str", ")", ":", "weld_obj", "=", "WeldObject", "(", "encoder_", ",", "decoder_", ")", "array_var", "=", "weld_obj", ".", "update", "(", "array", ")", "if", "isinstance", "(", "array", ",", "WeldObject", ")", ":", "array_var", "=", "array", ".", "obj_id", "weld_obj", ".", "dependencies", "[", "array_var", "]", "=", "array", "# Strings need to be encoded into vec[char] array.", "# Constants can be added directly to NVL snippet.", "if", "isinstance", "(", "other", ",", "str", ")", "or", "isinstance", "(", "other", ",", "WeldObject", ")", ":", "other_var", "=", "weld_obj", ".", "update", "(", "other", ")", "if", "isinstance", "(", "other", ",", "WeldObject", ")", ":", "other_var", "=", "tmp", ".", "obj_id", "weld_obj", ".", "dependencies", "[", "other_var", "]", "=", "other", "else", ":", "other_var", "=", "\"%s(%s)\"", "%", "(", "ty_str", ",", "str", "(", "other", ")", ")", "weld_template", "=", "\"\"\"\n map(\n %(array)s,\n |a: %(ty)s| a %(op)s %(other)s\n )\n \"\"\"", "weld_obj", ".", "weld_code", "=", "weld_template", "%", "{", "\"array\"", ":", "array_var", ",", "\"other\"", ":", "other_var", ",", "\"op\"", ":", "op", ",", "\"ty\"", ":", "ty_str", "}", "return", "weld_obj" ]
Performs passed-in comparison op between every element in the passed-in array and other, and returns an array of booleans. Args: array (WeldObject / Numpy.ndarray): Input array other (WeldObject / Numpy.ndarray): Second input array op (str): Op string used for element-wise comparison (== >= <= !=) ty (WeldType): Type of each element in the input array Returns: A WeldObject representing this computation
[ "Performs", "passed", "-", "in", "comparison", "op", "between", "every", "element", "in", "the", "passed", "-", "in", "array", "and", "other", "and", "returns", "an", "array", "of", "booleans", "." ]
python
train
33.738095
sanger-pathogens/Fastaq
pyfastaq/tasks.py
https://github.com/sanger-pathogens/Fastaq/blob/2c775c846d2491678a9637daa320592e02c26c72/pyfastaq/tasks.py#L431-L443
def mean_length(infile, limit=None): '''Returns the mean length of the sequences in the input file. By default uses all sequences. To limit to the first N sequences, use limit=N''' total = 0 count = 0 seq_reader = sequences.file_reader(infile) for seq in seq_reader: total += len(seq) count += 1 if limit is not None and count >= limit: break assert count > 0 return total / count
[ "def", "mean_length", "(", "infile", ",", "limit", "=", "None", ")", ":", "total", "=", "0", "count", "=", "0", "seq_reader", "=", "sequences", ".", "file_reader", "(", "infile", ")", "for", "seq", "in", "seq_reader", ":", "total", "+=", "len", "(", "seq", ")", "count", "+=", "1", "if", "limit", "is", "not", "None", "and", "count", ">=", "limit", ":", "break", "assert", "count", ">", "0", "return", "total", "/", "count" ]
Returns the mean length of the sequences in the input file. By default uses all sequences. To limit to the first N sequences, use limit=N
[ "Returns", "the", "mean", "length", "of", "the", "sequences", "in", "the", "input", "file", ".", "By", "default", "uses", "all", "sequences", ".", "To", "limit", "to", "the", "first", "N", "sequences", "use", "limit", "=", "N" ]
python
valid
33.307692
ssato/python-anyconfig
src/anyconfig/utils.py
https://github.com/ssato/python-anyconfig/blob/f2f4fb8d8e232aadea866c202e1dd7a5967e2877/src/anyconfig/utils.py#L38-L51
def get_file_extension(file_path): """ >>> get_file_extension("/a/b/c") '' >>> get_file_extension("/a/b.txt") 'txt' >>> get_file_extension("/a/b/c.tar.xz") 'xz' """ _ext = os.path.splitext(file_path)[-1] if _ext: return _ext[1:] if _ext.startswith('.') else _ext return ""
[ "def", "get_file_extension", "(", "file_path", ")", ":", "_ext", "=", "os", ".", "path", ".", "splitext", "(", "file_path", ")", "[", "-", "1", "]", "if", "_ext", ":", "return", "_ext", "[", "1", ":", "]", "if", "_ext", ".", "startswith", "(", "'.'", ")", "else", "_ext", "return", "\"\"" ]
>>> get_file_extension("/a/b/c") '' >>> get_file_extension("/a/b.txt") 'txt' >>> get_file_extension("/a/b/c.tar.xz") 'xz'
[ ">>>", "get_file_extension", "(", "/", "a", "/", "b", "/", "c", ")", ">>>", "get_file_extension", "(", "/", "a", "/", "b", ".", "txt", ")", "txt", ">>>", "get_file_extension", "(", "/", "a", "/", "b", "/", "c", ".", "tar", ".", "xz", ")", "xz" ]
python
train
22.285714
kennknowles/python-jsonpath-rw
jsonpath_rw/parser.py
https://github.com/kennknowles/python-jsonpath-rw/blob/f615451d7b405e23e0f80b15cad03b1427b0256d/jsonpath_rw/parser.py#L94-L101
def p_jsonpath_named_operator(self, p): "jsonpath : NAMED_OPERATOR" if p[1] == 'this': p[0] = This() elif p[1] == 'parent': p[0] = Parent() else: raise Exception('Unknown named operator `%s` at %s:%s' % (p[1], p.lineno(1), p.lexpos(1)))
[ "def", "p_jsonpath_named_operator", "(", "self", ",", "p", ")", ":", "if", "p", "[", "1", "]", "==", "'this'", ":", "p", "[", "0", "]", "=", "This", "(", ")", "elif", "p", "[", "1", "]", "==", "'parent'", ":", "p", "[", "0", "]", "=", "Parent", "(", ")", "else", ":", "raise", "Exception", "(", "'Unknown named operator `%s` at %s:%s'", "%", "(", "p", "[", "1", "]", ",", "p", ".", "lineno", "(", "1", ")", ",", "p", ".", "lexpos", "(", "1", ")", ")", ")" ]
jsonpath : NAMED_OPERATOR
[ "jsonpath", ":", "NAMED_OPERATOR" ]
python
train
37.125
chrislit/abydos
abydos/distance/_prefix.py
https://github.com/chrislit/abydos/blob/165466b3ff6afd8024a4c8660421b0c4e7773db9/abydos/distance/_prefix.py#L41-L82
def sim(self, src, tar): """Return the prefix similarity of two strings. Prefix similarity is the ratio of the length of the shorter term that exactly matches the longer term to the length of the shorter term, beginning at the start of both terms. Parameters ---------- src : str Source string for comparison tar : str Target string for comparison Returns ------- float Prefix similarity Examples -------- >>> cmp = Prefix() >>> cmp.sim('cat', 'hat') 0.0 >>> cmp.sim('Niall', 'Neil') 0.25 >>> cmp.sim('aluminum', 'Catalan') 0.0 >>> cmp.sim('ATCG', 'TAGC') 0.0 """ if src == tar: return 1.0 if not src or not tar: return 0.0 min_word, max_word = (src, tar) if len(src) < len(tar) else (tar, src) min_len = len(min_word) for i in range(min_len, 0, -1): if min_word[:i] == max_word[:i]: return i / min_len return 0.0
[ "def", "sim", "(", "self", ",", "src", ",", "tar", ")", ":", "if", "src", "==", "tar", ":", "return", "1.0", "if", "not", "src", "or", "not", "tar", ":", "return", "0.0", "min_word", ",", "max_word", "=", "(", "src", ",", "tar", ")", "if", "len", "(", "src", ")", "<", "len", "(", "tar", ")", "else", "(", "tar", ",", "src", ")", "min_len", "=", "len", "(", "min_word", ")", "for", "i", "in", "range", "(", "min_len", ",", "0", ",", "-", "1", ")", ":", "if", "min_word", "[", ":", "i", "]", "==", "max_word", "[", ":", "i", "]", ":", "return", "i", "/", "min_len", "return", "0.0" ]
Return the prefix similarity of two strings. Prefix similarity is the ratio of the length of the shorter term that exactly matches the longer term to the length of the shorter term, beginning at the start of both terms. Parameters ---------- src : str Source string for comparison tar : str Target string for comparison Returns ------- float Prefix similarity Examples -------- >>> cmp = Prefix() >>> cmp.sim('cat', 'hat') 0.0 >>> cmp.sim('Niall', 'Neil') 0.25 >>> cmp.sim('aluminum', 'Catalan') 0.0 >>> cmp.sim('ATCG', 'TAGC') 0.0
[ "Return", "the", "prefix", "similarity", "of", "two", "strings", "." ]
python
valid
26.119048
UpCloudLtd/upcloud-python-api
upcloud_api/cloud_manager/firewall_mixin.py
https://github.com/UpCloudLtd/upcloud-python-api/blob/954b0ad7c4b932b2be31a95d88975f6b0eeac8ed/upcloud_api/cloud_manager/firewall_mixin.py#L57-L62
def delete_firewall_rule(self, server_uuid, firewall_rule_position): """ Delete a firewall rule based on a server uuid and rule position. """ url = '/server/{0}/firewall_rule/{1}'.format(server_uuid, firewall_rule_position) return self.request('DELETE', url)
[ "def", "delete_firewall_rule", "(", "self", ",", "server_uuid", ",", "firewall_rule_position", ")", ":", "url", "=", "'/server/{0}/firewall_rule/{1}'", ".", "format", "(", "server_uuid", ",", "firewall_rule_position", ")", "return", "self", ".", "request", "(", "'DELETE'", ",", "url", ")" ]
Delete a firewall rule based on a server uuid and rule position.
[ "Delete", "a", "firewall", "rule", "based", "on", "a", "server", "uuid", "and", "rule", "position", "." ]
python
train
48.833333
ceph/ceph-deploy
ceph_deploy/util/system.py
https://github.com/ceph/ceph-deploy/blob/86943fcc454cd4c99a86e3493e9e93a59c661fef/ceph_deploy/util/system.py#L5-L16
def executable_path(conn, executable): """ Remote validator that accepts a connection object to ensure that a certain executable is available returning its full path if so. Otherwise an exception with thorough details will be raised, informing the user that the executable was not found. """ executable_path = conn.remote_module.which(executable) if not executable_path: raise ExecutableNotFound(executable, conn.hostname) return executable_path
[ "def", "executable_path", "(", "conn", ",", "executable", ")", ":", "executable_path", "=", "conn", ".", "remote_module", ".", "which", "(", "executable", ")", "if", "not", "executable_path", ":", "raise", "ExecutableNotFound", "(", "executable", ",", "conn", ".", "hostname", ")", "return", "executable_path" ]
Remote validator that accepts a connection object to ensure that a certain executable is available returning its full path if so. Otherwise an exception with thorough details will be raised, informing the user that the executable was not found.
[ "Remote", "validator", "that", "accepts", "a", "connection", "object", "to", "ensure", "that", "a", "certain", "executable", "is", "available", "returning", "its", "full", "path", "if", "so", "." ]
python
train
39.916667
ensime/ensime-vim
ensime_shared/typecheck.py
https://github.com/ensime/ensime-vim/blob/caa734e84f002b25446c615706283a74edd4ecfe/ensime_shared/typecheck.py#L17-L24
def buffer_typechecks_and_display(self, call_id, payload): """Adds typecheck events to the buffer, and displays them right away. This is a workaround for this issue: https://github.com/ensime/ensime-server/issues/1616 """ self.buffer_typechecks(call_id, payload) self.editor.display_notes(self.buffered_notes)
[ "def", "buffer_typechecks_and_display", "(", "self", ",", "call_id", ",", "payload", ")", ":", "self", ".", "buffer_typechecks", "(", "call_id", ",", "payload", ")", "self", ".", "editor", ".", "display_notes", "(", "self", ".", "buffered_notes", ")" ]
Adds typecheck events to the buffer, and displays them right away. This is a workaround for this issue: https://github.com/ensime/ensime-server/issues/1616
[ "Adds", "typecheck", "events", "to", "the", "buffer", "and", "displays", "them", "right", "away", "." ]
python
train
43.875
bitesofcode/projexui
projexui/widgets/xdocktoolbar.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xdocktoolbar.py#L522-L625
def setCurrentAction(self, action): """ Sets the current action for this widget that highlights the size for this toolbar. :param action | <QAction> """ if action == self._currentAction: return self._currentAction = action self.currentActionChanged.emit(action) labels = self.actionLabels() anim_grp = QParallelAnimationGroup(self) max_size = self.maximumPixmapSize() min_size = self.minimumPixmapSize() if action: label = self.labelForAction(action) index = labels.index(label) # create the highlight effect palette = self.palette() effect = QGraphicsDropShadowEffect(label) effect.setXOffset(0) effect.setYOffset(0) effect.setBlurRadius(20) effect.setColor(QColor(40, 40, 40)) label.setGraphicsEffect(effect) offset = self.padding() if self.position() in (XDockToolbar.Position.East, XDockToolbar.Position.West): self.resize(max_size.width() + offset, self.height()) elif self.position() in (XDockToolbar.Position.North, XDockToolbar.Position.South): self.resize(self.width(), max_size.height() + offset) w = max_size.width() h = max_size.height() dw = (max_size.width() - min_size.width()) / 3 dh = (max_size.height() - min_size.height()) / 3 for i in range(4): before = index - i after = index + i if 0 <= before and before < len(labels): anim = XObjectAnimation(labels[before], 'setPixmapSize', anim_grp) anim.setEasingCurve(self.easingCurve()) anim.setStartValue(labels[before].pixmapSize()) anim.setEndValue(QSize(w, h)) anim.setDuration(self.duration()) anim_grp.addAnimation(anim) if i: labels[before].setGraphicsEffect(None) if after != before and 0 <= after and after < len(labels): anim = XObjectAnimation(labels[after], 'setPixmapSize', anim_grp) anim.setEasingCurve(self.easingCurve()) anim.setStartValue(labels[after].pixmapSize()) anim.setEndValue(QSize(w, h)) anim.setDuration(self.duration()) anim_grp.addAnimation(anim) if i: labels[after].setGraphicsEffect(None) w -= dw h -= dh else: offset = self.padding() for label in self.actionLabels(): # clear the graphics effect label.setGraphicsEffect(None) # create the animation anim = XObjectAnimation(label, 'setPixmapSize', self) anim.setEasingCurve(self.easingCurve()) anim.setStartValue(label.pixmapSize()) anim.setEndValue(min_size) anim.setDuration(self.duration()) anim_grp.addAnimation(anim) anim_grp.finished.connect(self.resizeToMinimum) anim_grp.start() self._animating = True anim_grp.finished.connect(anim_grp.deleteLater) anim_grp.finished.connect(self.__markAnimatingFinished) if self._currentAction: self._hoverTimer.start() else: self._hoverTimer.stop()
[ "def", "setCurrentAction", "(", "self", ",", "action", ")", ":", "if", "action", "==", "self", ".", "_currentAction", ":", "return", "self", ".", "_currentAction", "=", "action", "self", ".", "currentActionChanged", ".", "emit", "(", "action", ")", "labels", "=", "self", ".", "actionLabels", "(", ")", "anim_grp", "=", "QParallelAnimationGroup", "(", "self", ")", "max_size", "=", "self", ".", "maximumPixmapSize", "(", ")", "min_size", "=", "self", ".", "minimumPixmapSize", "(", ")", "if", "action", ":", "label", "=", "self", ".", "labelForAction", "(", "action", ")", "index", "=", "labels", ".", "index", "(", "label", ")", "# create the highlight effect\r", "palette", "=", "self", ".", "palette", "(", ")", "effect", "=", "QGraphicsDropShadowEffect", "(", "label", ")", "effect", ".", "setXOffset", "(", "0", ")", "effect", ".", "setYOffset", "(", "0", ")", "effect", ".", "setBlurRadius", "(", "20", ")", "effect", ".", "setColor", "(", "QColor", "(", "40", ",", "40", ",", "40", ")", ")", "label", ".", "setGraphicsEffect", "(", "effect", ")", "offset", "=", "self", ".", "padding", "(", ")", "if", "self", ".", "position", "(", ")", "in", "(", "XDockToolbar", ".", "Position", ".", "East", ",", "XDockToolbar", ".", "Position", ".", "West", ")", ":", "self", ".", "resize", "(", "max_size", ".", "width", "(", ")", "+", "offset", ",", "self", ".", "height", "(", ")", ")", "elif", "self", ".", "position", "(", ")", "in", "(", "XDockToolbar", ".", "Position", ".", "North", ",", "XDockToolbar", ".", "Position", ".", "South", ")", ":", "self", ".", "resize", "(", "self", ".", "width", "(", ")", ",", "max_size", ".", "height", "(", ")", "+", "offset", ")", "w", "=", "max_size", ".", "width", "(", ")", "h", "=", "max_size", ".", "height", "(", ")", "dw", "=", "(", "max_size", ".", "width", "(", ")", "-", "min_size", ".", "width", "(", ")", ")", "/", "3", "dh", "=", "(", "max_size", ".", "height", "(", ")", "-", "min_size", ".", "height", "(", ")", ")", "/", "3", "for", "i", "in", "range", "(", "4", ")", ":", "before", "=", "index", "-", "i", "after", "=", "index", "+", "i", "if", "0", "<=", "before", "and", "before", "<", "len", "(", "labels", ")", ":", "anim", "=", "XObjectAnimation", "(", "labels", "[", "before", "]", ",", "'setPixmapSize'", ",", "anim_grp", ")", "anim", ".", "setEasingCurve", "(", "self", ".", "easingCurve", "(", ")", ")", "anim", ".", "setStartValue", "(", "labels", "[", "before", "]", ".", "pixmapSize", "(", ")", ")", "anim", ".", "setEndValue", "(", "QSize", "(", "w", ",", "h", ")", ")", "anim", ".", "setDuration", "(", "self", ".", "duration", "(", ")", ")", "anim_grp", ".", "addAnimation", "(", "anim", ")", "if", "i", ":", "labels", "[", "before", "]", ".", "setGraphicsEffect", "(", "None", ")", "if", "after", "!=", "before", "and", "0", "<=", "after", "and", "after", "<", "len", "(", "labels", ")", ":", "anim", "=", "XObjectAnimation", "(", "labels", "[", "after", "]", ",", "'setPixmapSize'", ",", "anim_grp", ")", "anim", ".", "setEasingCurve", "(", "self", ".", "easingCurve", "(", ")", ")", "anim", ".", "setStartValue", "(", "labels", "[", "after", "]", ".", "pixmapSize", "(", ")", ")", "anim", ".", "setEndValue", "(", "QSize", "(", "w", ",", "h", ")", ")", "anim", ".", "setDuration", "(", "self", ".", "duration", "(", ")", ")", "anim_grp", ".", "addAnimation", "(", "anim", ")", "if", "i", ":", "labels", "[", "after", "]", ".", "setGraphicsEffect", "(", "None", ")", "w", "-=", "dw", "h", "-=", "dh", "else", ":", "offset", "=", "self", ".", "padding", "(", ")", "for", "label", "in", "self", ".", "actionLabels", "(", ")", ":", "# clear the graphics effect \r", "label", ".", "setGraphicsEffect", "(", "None", ")", "# create the animation\r", "anim", "=", "XObjectAnimation", "(", "label", ",", "'setPixmapSize'", ",", "self", ")", "anim", ".", "setEasingCurve", "(", "self", ".", "easingCurve", "(", ")", ")", "anim", ".", "setStartValue", "(", "label", ".", "pixmapSize", "(", ")", ")", "anim", ".", "setEndValue", "(", "min_size", ")", "anim", ".", "setDuration", "(", "self", ".", "duration", "(", ")", ")", "anim_grp", ".", "addAnimation", "(", "anim", ")", "anim_grp", ".", "finished", ".", "connect", "(", "self", ".", "resizeToMinimum", ")", "anim_grp", ".", "start", "(", ")", "self", ".", "_animating", "=", "True", "anim_grp", ".", "finished", ".", "connect", "(", "anim_grp", ".", "deleteLater", ")", "anim_grp", ".", "finished", ".", "connect", "(", "self", ".", "__markAnimatingFinished", ")", "if", "self", ".", "_currentAction", ":", "self", ".", "_hoverTimer", ".", "start", "(", ")", "else", ":", "self", ".", "_hoverTimer", ".", "stop", "(", ")" ]
Sets the current action for this widget that highlights the size for this toolbar. :param action | <QAction>
[ "Sets", "the", "current", "action", "for", "this", "widget", "that", "highlights", "the", "size", "for", "this", "toolbar", ".", ":", "param", "action", "|", "<QAction", ">" ]
python
train
39.644231
nathancahill/mimicdb
mimicdb/s3/bucket.py
https://github.com/nathancahill/mimicdb/blob/9d0e8ebcba31d937f73752f9b88e5a4fec860765/mimicdb/s3/bucket.py#L129-L148
def _get_all(self, *args, **kwargs): """If 'force' is in the headers, retrieve the list of keys from S3. Otherwise, use the list() function to retrieve the keys from MimicDB. """ headers = kwargs.get('headers', args[2] if len(args) > 2 else None) or dict() if 'force' in headers: keys = super(Bucket, self)._get_all(*args, **kwargs) for key in keys: mimicdb.backend.sadd(tpl.bucket % self.name, key.name) mimicdb.backend.hmset(tpl.key % (self.name, key.name), dict(size=key.size, md5=key.etag.strip('"'))) key.name = key.name return keys prefix = kwargs.get('prefix', '') return list(self.list(prefix=prefix))
[ "def", "_get_all", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "headers", "=", "kwargs", ".", "get", "(", "'headers'", ",", "args", "[", "2", "]", "if", "len", "(", "args", ")", ">", "2", "else", "None", ")", "or", "dict", "(", ")", "if", "'force'", "in", "headers", ":", "keys", "=", "super", "(", "Bucket", ",", "self", ")", ".", "_get_all", "(", "*", "args", ",", "*", "*", "kwargs", ")", "for", "key", "in", "keys", ":", "mimicdb", ".", "backend", ".", "sadd", "(", "tpl", ".", "bucket", "%", "self", ".", "name", ",", "key", ".", "name", ")", "mimicdb", ".", "backend", ".", "hmset", "(", "tpl", ".", "key", "%", "(", "self", ".", "name", ",", "key", ".", "name", ")", ",", "dict", "(", "size", "=", "key", ".", "size", ",", "md5", "=", "key", ".", "etag", ".", "strip", "(", "'\"'", ")", ")", ")", "key", ".", "name", "=", "key", ".", "name", "return", "keys", "prefix", "=", "kwargs", ".", "get", "(", "'prefix'", ",", "''", ")", "return", "list", "(", "self", ".", "list", "(", "prefix", "=", "prefix", ")", ")" ]
If 'force' is in the headers, retrieve the list of keys from S3. Otherwise, use the list() function to retrieve the keys from MimicDB.
[ "If", "force", "is", "in", "the", "headers", "retrieve", "the", "list", "of", "keys", "from", "S3", ".", "Otherwise", "use", "the", "list", "()", "function", "to", "retrieve", "the", "keys", "from", "MimicDB", "." ]
python
valid
36.8
panosl/django-currencies
currencies/utils.py
https://github.com/panosl/django-currencies/blob/8d4c6c202ad7c4cc06263ab2c1b1f969bbe99acd/currencies/utils.py#L48-L55
def price_rounding(price, decimals=2): """Takes a decimal price and rounds to a number of decimal places""" try: exponent = D('.' + decimals * '0') except InvalidOperation: # Currencies with no decimal places, ex. JPY, HUF exponent = D() return price.quantize(exponent, rounding=ROUND_UP)
[ "def", "price_rounding", "(", "price", ",", "decimals", "=", "2", ")", ":", "try", ":", "exponent", "=", "D", "(", "'.'", "+", "decimals", "*", "'0'", ")", "except", "InvalidOperation", ":", "# Currencies with no decimal places, ex. JPY, HUF", "exponent", "=", "D", "(", ")", "return", "price", ".", "quantize", "(", "exponent", ",", "rounding", "=", "ROUND_UP", ")" ]
Takes a decimal price and rounds to a number of decimal places
[ "Takes", "a", "decimal", "price", "and", "rounds", "to", "a", "number", "of", "decimal", "places" ]
python
train
40.125
fozzle/python-brotherprint
brotherprint/brotherprint.py
https://github.com/fozzle/python-brotherprint/blob/5fb92df11b599c30a7da3d6ac7ed60acff230044/brotherprint/brotherprint.py#L568-L584
def double_width(self, action): '''Enable/cancel doublewidth printing Args: action: Enable or disable doublewidth printing. Options are 'on' and 'off' Returns: None Raises: RuntimeError: Invalid action. ''' if action == 'on': action = '1' elif action == 'off': action = '0' else: raise RuntimeError('Invalid action for function doubleWidth. Options are on and off') self.send(chr(27)+'W'+action)
[ "def", "double_width", "(", "self", ",", "action", ")", ":", "if", "action", "==", "'on'", ":", "action", "=", "'1'", "elif", "action", "==", "'off'", ":", "action", "=", "'0'", "else", ":", "raise", "RuntimeError", "(", "'Invalid action for function doubleWidth. Options are on and off'", ")", "self", ".", "send", "(", "chr", "(", "27", ")", "+", "'W'", "+", "action", ")" ]
Enable/cancel doublewidth printing Args: action: Enable or disable doublewidth printing. Options are 'on' and 'off' Returns: None Raises: RuntimeError: Invalid action.
[ "Enable", "/", "cancel", "doublewidth", "printing", "Args", ":", "action", ":", "Enable", "or", "disable", "doublewidth", "printing", ".", "Options", "are", "on", "and", "off", "Returns", ":", "None", "Raises", ":", "RuntimeError", ":", "Invalid", "action", "." ]
python
train
31.294118
cgearhart/Resound
extract.py
https://github.com/cgearhart/Resound/blob/83a15be0ce2dc13003574c6039f8a1ad87734bc2/extract.py#L16-L42
def main(input_filename, songname, format, counter): """ Calculate the fingerprint hashses of the referenced audio file and save to disk as a pickle file """ # open the file & convert to wav song_data = AudioSegment.from_file(input_filename, format=format) song_data = song_data.set_channels(1) # convert to mono wav_tmp = song_data.export(format="wav") # write to a tmp file buffer wav_tmp.seek(0) rate, wav_data = wavfile.read(wav_tmp) # extract peaks and compute constellation hashes & offsets peaks = resound.get_peaks(np.array(wav_data)) fingerprints = list(resound.hashes(peaks)) # hash, offset pairs if not fingerprints: raise RuntimeError("No fingerprints detected in source file - check your parameters passed to Resound.") # Combine duplicate keys for fp, abs_offset in fingerprints: counter[fp].append((abs_offset, songname)) print " Identified {} keypoints in '{}'.".format(len(counter), songname) return counter
[ "def", "main", "(", "input_filename", ",", "songname", ",", "format", ",", "counter", ")", ":", "# open the file & convert to wav", "song_data", "=", "AudioSegment", ".", "from_file", "(", "input_filename", ",", "format", "=", "format", ")", "song_data", "=", "song_data", ".", "set_channels", "(", "1", ")", "# convert to mono", "wav_tmp", "=", "song_data", ".", "export", "(", "format", "=", "\"wav\"", ")", "# write to a tmp file buffer", "wav_tmp", ".", "seek", "(", "0", ")", "rate", ",", "wav_data", "=", "wavfile", ".", "read", "(", "wav_tmp", ")", "# extract peaks and compute constellation hashes & offsets", "peaks", "=", "resound", ".", "get_peaks", "(", "np", ".", "array", "(", "wav_data", ")", ")", "fingerprints", "=", "list", "(", "resound", ".", "hashes", "(", "peaks", ")", ")", "# hash, offset pairs", "if", "not", "fingerprints", ":", "raise", "RuntimeError", "(", "\"No fingerprints detected in source file - check your parameters passed to Resound.\"", ")", "# Combine duplicate keys", "for", "fp", ",", "abs_offset", "in", "fingerprints", ":", "counter", "[", "fp", "]", ".", "append", "(", "(", "abs_offset", ",", "songname", ")", ")", "print", "\" Identified {} keypoints in '{}'.\"", ".", "format", "(", "len", "(", "counter", ")", ",", "songname", ")", "return", "counter" ]
Calculate the fingerprint hashses of the referenced audio file and save to disk as a pickle file
[ "Calculate", "the", "fingerprint", "hashses", "of", "the", "referenced", "audio", "file", "and", "save", "to", "disk", "as", "a", "pickle", "file" ]
python
train
36.962963
ECESeniorDesign/greenhouse_envmgmt
greenhouse_envmgmt/control.py
https://github.com/ECESeniorDesign/greenhouse_envmgmt/blob/864e0ce98bfb220f9954026913a5470536de9818/greenhouse_envmgmt/control.py#L178-L219
def control(self, on=[], off=[]): """ This method serves as the primary interaction point to the controls interface. - The 'on' and 'off' arguments can either be a list or a single string. This allows for both individual device control and batch controls. Note: Both the onlist and offlist are optional. If only one item is being managed, it can be passed as a string. Usage: - Turning off all devices: ctrlobj.control(off="all") - Turning on all devices: ctrlobj.control(on="all") - Turning on the light and fan ONLY (for example) ctrlobj.control(on=["light", "fan"]) - Turning on the light and turning off the fan (for example) ctrolobj.control(on="light", off="fan") """ controls = {"light", "valve", "fan", "pump"} def cast_arg(arg): if type(arg) is str: if arg == "all": return controls else: return {arg} & controls else: return set(arg) & controls # User has requested individual controls. for item in cast_arg(on): self.manage(item, "on") for item in cast_arg(off): self.manage(item, "off") sleep(.01) # Force delay to throttle requests return self.update()
[ "def", "control", "(", "self", ",", "on", "=", "[", "]", ",", "off", "=", "[", "]", ")", ":", "controls", "=", "{", "\"light\"", ",", "\"valve\"", ",", "\"fan\"", ",", "\"pump\"", "}", "def", "cast_arg", "(", "arg", ")", ":", "if", "type", "(", "arg", ")", "is", "str", ":", "if", "arg", "==", "\"all\"", ":", "return", "controls", "else", ":", "return", "{", "arg", "}", "&", "controls", "else", ":", "return", "set", "(", "arg", ")", "&", "controls", "# User has requested individual controls.", "for", "item", "in", "cast_arg", "(", "on", ")", ":", "self", ".", "manage", "(", "item", ",", "\"on\"", ")", "for", "item", "in", "cast_arg", "(", "off", ")", ":", "self", ".", "manage", "(", "item", ",", "\"off\"", ")", "sleep", "(", ".01", ")", "# Force delay to throttle requests", "return", "self", ".", "update", "(", ")" ]
This method serves as the primary interaction point to the controls interface. - The 'on' and 'off' arguments can either be a list or a single string. This allows for both individual device control and batch controls. Note: Both the onlist and offlist are optional. If only one item is being managed, it can be passed as a string. Usage: - Turning off all devices: ctrlobj.control(off="all") - Turning on all devices: ctrlobj.control(on="all") - Turning on the light and fan ONLY (for example) ctrlobj.control(on=["light", "fan"]) - Turning on the light and turning off the fan (for example) ctrolobj.control(on="light", off="fan")
[ "This", "method", "serves", "as", "the", "primary", "interaction", "point", "to", "the", "controls", "interface", ".", "-", "The", "on", "and", "off", "arguments", "can", "either", "be", "a", "list", "or", "a", "single", "string", ".", "This", "allows", "for", "both", "individual", "device", "control", "and", "batch", "controls", "." ]
python
train
34.095238
nficano/python-lambda
aws_lambda/aws_lambda.py
https://github.com/nficano/python-lambda/blob/b0bd25404df70212d7fa057758760366406d64f2/aws_lambda/aws_lambda.py#L272-L366
def build( src, requirements=None, local_package=None, config_file='config.yaml', profile_name=None, ): """Builds the file bundle. :param str src: The path to your Lambda ready project (folder must contain a valid config.yaml and handler module (e.g.: service.py). :param str local_package: The path to a local package with should be included in the deploy as well (and/or is not available on PyPi) """ # Load and parse the config file. path_to_config_file = os.path.join(src, config_file) cfg = read_cfg(path_to_config_file, profile_name) # Get the absolute path to the output directory and create it if it doesn't # already exist. dist_directory = cfg.get('dist_directory', 'dist') path_to_dist = os.path.join(src, dist_directory) mkdir(path_to_dist) # Combine the name of the Lambda function with the current timestamp to use # for the output filename. function_name = cfg.get('function_name') output_filename = '{0}-{1}.zip'.format(timestamp(), function_name) path_to_temp = mkdtemp(prefix='aws-lambda') pip_install_to_target( path_to_temp, requirements=requirements, local_package=local_package, ) # Hack for Zope. if 'zope' in os.listdir(path_to_temp): print( 'Zope packages detected; fixing Zope package paths to ' 'make them importable.', ) # Touch. with open(os.path.join(path_to_temp, 'zope/__init__.py'), 'wb'): pass # Gracefully handle whether ".zip" was included in the filename or not. output_filename = ( '{0}.zip'.format(output_filename) if not output_filename.endswith('.zip') else output_filename ) # Allow definition of source code directories we want to build into our # zipped package. build_config = defaultdict(**cfg.get('build', {})) build_source_directories = build_config.get('source_directories', '') build_source_directories = ( build_source_directories if build_source_directories is not None else '' ) source_directories = [ d.strip() for d in build_source_directories.split(',') ] files = [] for filename in os.listdir(src): if os.path.isfile(filename): if filename == '.DS_Store': continue if filename == config_file: continue print('Bundling: %r' % filename) files.append(os.path.join(src, filename)) elif os.path.isdir(filename) and filename in source_directories: print('Bundling directory: %r' % filename) files.append(os.path.join(src, filename)) # "cd" into `temp_path` directory. os.chdir(path_to_temp) for f in files: if os.path.isfile(f): _, filename = os.path.split(f) # Copy handler file into root of the packages folder. copyfile(f, os.path.join(path_to_temp, filename)) copystat(f, os.path.join(path_to_temp, filename)) elif os.path.isdir(f): destination_folder = os.path.join(path_to_temp, f[len(src) + 1:]) copytree(f, destination_folder) # Zip them together into a single file. # TODO: Delete temp directory created once the archive has been compiled. path_to_zip_file = archive('./', path_to_dist, output_filename) return path_to_zip_file
[ "def", "build", "(", "src", ",", "requirements", "=", "None", ",", "local_package", "=", "None", ",", "config_file", "=", "'config.yaml'", ",", "profile_name", "=", "None", ",", ")", ":", "# Load and parse the config file.", "path_to_config_file", "=", "os", ".", "path", ".", "join", "(", "src", ",", "config_file", ")", "cfg", "=", "read_cfg", "(", "path_to_config_file", ",", "profile_name", ")", "# Get the absolute path to the output directory and create it if it doesn't", "# already exist.", "dist_directory", "=", "cfg", ".", "get", "(", "'dist_directory'", ",", "'dist'", ")", "path_to_dist", "=", "os", ".", "path", ".", "join", "(", "src", ",", "dist_directory", ")", "mkdir", "(", "path_to_dist", ")", "# Combine the name of the Lambda function with the current timestamp to use", "# for the output filename.", "function_name", "=", "cfg", ".", "get", "(", "'function_name'", ")", "output_filename", "=", "'{0}-{1}.zip'", ".", "format", "(", "timestamp", "(", ")", ",", "function_name", ")", "path_to_temp", "=", "mkdtemp", "(", "prefix", "=", "'aws-lambda'", ")", "pip_install_to_target", "(", "path_to_temp", ",", "requirements", "=", "requirements", ",", "local_package", "=", "local_package", ",", ")", "# Hack for Zope.", "if", "'zope'", "in", "os", ".", "listdir", "(", "path_to_temp", ")", ":", "print", "(", "'Zope packages detected; fixing Zope package paths to '", "'make them importable.'", ",", ")", "# Touch.", "with", "open", "(", "os", ".", "path", ".", "join", "(", "path_to_temp", ",", "'zope/__init__.py'", ")", ",", "'wb'", ")", ":", "pass", "# Gracefully handle whether \".zip\" was included in the filename or not.", "output_filename", "=", "(", "'{0}.zip'", ".", "format", "(", "output_filename", ")", "if", "not", "output_filename", ".", "endswith", "(", "'.zip'", ")", "else", "output_filename", ")", "# Allow definition of source code directories we want to build into our", "# zipped package.", "build_config", "=", "defaultdict", "(", "*", "*", "cfg", ".", "get", "(", "'build'", ",", "{", "}", ")", ")", "build_source_directories", "=", "build_config", ".", "get", "(", "'source_directories'", ",", "''", ")", "build_source_directories", "=", "(", "build_source_directories", "if", "build_source_directories", "is", "not", "None", "else", "''", ")", "source_directories", "=", "[", "d", ".", "strip", "(", ")", "for", "d", "in", "build_source_directories", ".", "split", "(", "','", ")", "]", "files", "=", "[", "]", "for", "filename", "in", "os", ".", "listdir", "(", "src", ")", ":", "if", "os", ".", "path", ".", "isfile", "(", "filename", ")", ":", "if", "filename", "==", "'.DS_Store'", ":", "continue", "if", "filename", "==", "config_file", ":", "continue", "print", "(", "'Bundling: %r'", "%", "filename", ")", "files", ".", "append", "(", "os", ".", "path", ".", "join", "(", "src", ",", "filename", ")", ")", "elif", "os", ".", "path", ".", "isdir", "(", "filename", ")", "and", "filename", "in", "source_directories", ":", "print", "(", "'Bundling directory: %r'", "%", "filename", ")", "files", ".", "append", "(", "os", ".", "path", ".", "join", "(", "src", ",", "filename", ")", ")", "# \"cd\" into `temp_path` directory.", "os", ".", "chdir", "(", "path_to_temp", ")", "for", "f", "in", "files", ":", "if", "os", ".", "path", ".", "isfile", "(", "f", ")", ":", "_", ",", "filename", "=", "os", ".", "path", ".", "split", "(", "f", ")", "# Copy handler file into root of the packages folder.", "copyfile", "(", "f", ",", "os", ".", "path", ".", "join", "(", "path_to_temp", ",", "filename", ")", ")", "copystat", "(", "f", ",", "os", ".", "path", ".", "join", "(", "path_to_temp", ",", "filename", ")", ")", "elif", "os", ".", "path", ".", "isdir", "(", "f", ")", ":", "destination_folder", "=", "os", ".", "path", ".", "join", "(", "path_to_temp", ",", "f", "[", "len", "(", "src", ")", "+", "1", ":", "]", ")", "copytree", "(", "f", ",", "destination_folder", ")", "# Zip them together into a single file.", "# TODO: Delete temp directory created once the archive has been compiled.", "path_to_zip_file", "=", "archive", "(", "'./'", ",", "path_to_dist", ",", "output_filename", ")", "return", "path_to_zip_file" ]
Builds the file bundle. :param str src: The path to your Lambda ready project (folder must contain a valid config.yaml and handler module (e.g.: service.py). :param str local_package: The path to a local package with should be included in the deploy as well (and/or is not available on PyPi)
[ "Builds", "the", "file", "bundle", "." ]
python
valid
35.4
neo4j/neo4j-python-driver
neo4j/types/__init__.py
https://github.com/neo4j/neo4j-python-driver/blob/0c641e826765e86ff5454dae57c99521db8ca45c/neo4j/types/__init__.py#L219-L235
def index(self, key): """ Return the index of the given item. :param key: :return: """ if isinstance(key, int): if 0 <= key < len(self.__keys): return key raise IndexError(key) elif isinstance(key, str): try: return self.__keys.index(key) except ValueError: raise KeyError(key) else: raise TypeError(key)
[ "def", "index", "(", "self", ",", "key", ")", ":", "if", "isinstance", "(", "key", ",", "int", ")", ":", "if", "0", "<=", "key", "<", "len", "(", "self", ".", "__keys", ")", ":", "return", "key", "raise", "IndexError", "(", "key", ")", "elif", "isinstance", "(", "key", ",", "str", ")", ":", "try", ":", "return", "self", ".", "__keys", ".", "index", "(", "key", ")", "except", "ValueError", ":", "raise", "KeyError", "(", "key", ")", "else", ":", "raise", "TypeError", "(", "key", ")" ]
Return the index of the given item. :param key: :return:
[ "Return", "the", "index", "of", "the", "given", "item", "." ]
python
train
26.647059
warner/magic-wormhole
src/wormhole/cli/cmd_receive.py
https://github.com/warner/magic-wormhole/blob/995d3f546a33eec4f64df929848d86937d2003a7/src/wormhole/cli/cmd_receive.py#L40-L52
def receive(args, reactor=reactor, _debug_stash_wormhole=None): """I implement 'wormhole receive'. I return a Deferred that fires with None (for success), or signals one of the following errors: * WrongPasswordError: the two sides didn't use matching passwords * Timeout: something didn't happen fast enough for our tastes * TransferError: the sender rejected the transfer: verifier mismatch * any other error: something unexpected happened """ r = Receiver(args, reactor) d = r.go() if _debug_stash_wormhole is not None: _debug_stash_wormhole.append(r._w) return d
[ "def", "receive", "(", "args", ",", "reactor", "=", "reactor", ",", "_debug_stash_wormhole", "=", "None", ")", ":", "r", "=", "Receiver", "(", "args", ",", "reactor", ")", "d", "=", "r", ".", "go", "(", ")", "if", "_debug_stash_wormhole", "is", "not", "None", ":", "_debug_stash_wormhole", ".", "append", "(", "r", ".", "_w", ")", "return", "d" ]
I implement 'wormhole receive'. I return a Deferred that fires with None (for success), or signals one of the following errors: * WrongPasswordError: the two sides didn't use matching passwords * Timeout: something didn't happen fast enough for our tastes * TransferError: the sender rejected the transfer: verifier mismatch * any other error: something unexpected happened
[ "I", "implement", "wormhole", "receive", ".", "I", "return", "a", "Deferred", "that", "fires", "with", "None", "(", "for", "success", ")", "or", "signals", "one", "of", "the", "following", "errors", ":", "*", "WrongPasswordError", ":", "the", "two", "sides", "didn", "t", "use", "matching", "passwords", "*", "Timeout", ":", "something", "didn", "t", "happen", "fast", "enough", "for", "our", "tastes", "*", "TransferError", ":", "the", "sender", "rejected", "the", "transfer", ":", "verifier", "mismatch", "*", "any", "other", "error", ":", "something", "unexpected", "happened" ]
python
train
46.538462
seleniumbase/SeleniumBase
seleniumbase/core/browser_launcher.py
https://github.com/seleniumbase/SeleniumBase/blob/62e5b43ee1f90a9ed923841bdd53b1b38358f43a/seleniumbase/core/browser_launcher.py#L63-L85
def _add_chrome_proxy_extension( chrome_options, proxy_string, proxy_user, proxy_pass): """ Implementation of https://stackoverflow.com/a/35293284 for https://stackoverflow.com/questions/12848327/ (Run Selenium on a proxy server that requires authentication.) """ if not "".join(sys.argv) == "-c": # Single-threaded proxy_helper.create_proxy_zip(proxy_string, proxy_user, proxy_pass) else: # Pytest multi-threaded test lock = threading.Lock() with lock: time.sleep(random.uniform(0.02, 0.15)) if not os.path.exists(PROXY_ZIP_PATH): proxy_helper.create_proxy_zip( proxy_string, proxy_user, proxy_pass) time.sleep(random.uniform(0.1, 0.2)) proxy_zip = PROXY_ZIP_PATH if not os.path.exists(PROXY_ZIP_PATH): # Handle "Permission denied" on the default proxy.zip path proxy_zip = PROXY_ZIP_PATH_2 chrome_options.add_extension(proxy_zip) return chrome_options
[ "def", "_add_chrome_proxy_extension", "(", "chrome_options", ",", "proxy_string", ",", "proxy_user", ",", "proxy_pass", ")", ":", "if", "not", "\"\"", ".", "join", "(", "sys", ".", "argv", ")", "==", "\"-c\"", ":", "# Single-threaded", "proxy_helper", ".", "create_proxy_zip", "(", "proxy_string", ",", "proxy_user", ",", "proxy_pass", ")", "else", ":", "# Pytest multi-threaded test", "lock", "=", "threading", ".", "Lock", "(", ")", "with", "lock", ":", "time", ".", "sleep", "(", "random", ".", "uniform", "(", "0.02", ",", "0.15", ")", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "PROXY_ZIP_PATH", ")", ":", "proxy_helper", ".", "create_proxy_zip", "(", "proxy_string", ",", "proxy_user", ",", "proxy_pass", ")", "time", ".", "sleep", "(", "random", ".", "uniform", "(", "0.1", ",", "0.2", ")", ")", "proxy_zip", "=", "PROXY_ZIP_PATH", "if", "not", "os", ".", "path", ".", "exists", "(", "PROXY_ZIP_PATH", ")", ":", "# Handle \"Permission denied\" on the default proxy.zip path", "proxy_zip", "=", "PROXY_ZIP_PATH_2", "chrome_options", ".", "add_extension", "(", "proxy_zip", ")", "return", "chrome_options" ]
Implementation of https://stackoverflow.com/a/35293284 for https://stackoverflow.com/questions/12848327/ (Run Selenium on a proxy server that requires authentication.)
[ "Implementation", "of", "https", ":", "//", "stackoverflow", ".", "com", "/", "a", "/", "35293284", "for", "https", ":", "//", "stackoverflow", ".", "com", "/", "questions", "/", "12848327", "/", "(", "Run", "Selenium", "on", "a", "proxy", "server", "that", "requires", "authentication", ".", ")" ]
python
train
43.956522
c-soft/satel_integra
satel_integra/satel_integra.py
https://github.com/c-soft/satel_integra/blob/3b6d2020d1e10dc5aa40f30ee4ecc0f3a053eb3c/satel_integra/satel_integra.py#L409-L443
async def monitor_status(self, alarm_status_callback=None, zone_changed_callback=None, output_changed_callback=None): """Start monitoring of the alarm status. Send command to satel integra to start sending updates. Read in a loop and call respective callbacks when received messages. """ self._alarm_status_callback = alarm_status_callback self._zone_changed_callback = zone_changed_callback self._output_changed_callback = output_changed_callback _LOGGER.info("Starting monitor_status loop") while not self.closed: _LOGGER.debug("Iteration... ") while not self.connected: _LOGGER.info("Not connected, re-connecting... ") await self.connect() if not self.connected: _LOGGER.warning("Not connected, sleeping for 10s... ") await asyncio.sleep(self._reconnection_timeout) continue await self.start_monitoring() if not self.connected: _LOGGER.warning("Start monitoring failed, sleeping for 10s...") await asyncio.sleep(self._reconnection_timeout) continue while True: await self._update_status() _LOGGER.debug("Got status!") if not self.connected: _LOGGER.info("Got connection broken, reconnecting!") break _LOGGER.info("Closed, quit monitoring.")
[ "async", "def", "monitor_status", "(", "self", ",", "alarm_status_callback", "=", "None", ",", "zone_changed_callback", "=", "None", ",", "output_changed_callback", "=", "None", ")", ":", "self", ".", "_alarm_status_callback", "=", "alarm_status_callback", "self", ".", "_zone_changed_callback", "=", "zone_changed_callback", "self", ".", "_output_changed_callback", "=", "output_changed_callback", "_LOGGER", ".", "info", "(", "\"Starting monitor_status loop\"", ")", "while", "not", "self", ".", "closed", ":", "_LOGGER", ".", "debug", "(", "\"Iteration... \"", ")", "while", "not", "self", ".", "connected", ":", "_LOGGER", ".", "info", "(", "\"Not connected, re-connecting... \"", ")", "await", "self", ".", "connect", "(", ")", "if", "not", "self", ".", "connected", ":", "_LOGGER", ".", "warning", "(", "\"Not connected, sleeping for 10s... \"", ")", "await", "asyncio", ".", "sleep", "(", "self", ".", "_reconnection_timeout", ")", "continue", "await", "self", ".", "start_monitoring", "(", ")", "if", "not", "self", ".", "connected", ":", "_LOGGER", ".", "warning", "(", "\"Start monitoring failed, sleeping for 10s...\"", ")", "await", "asyncio", ".", "sleep", "(", "self", ".", "_reconnection_timeout", ")", "continue", "while", "True", ":", "await", "self", ".", "_update_status", "(", ")", "_LOGGER", ".", "debug", "(", "\"Got status!\"", ")", "if", "not", "self", ".", "connected", ":", "_LOGGER", ".", "info", "(", "\"Got connection broken, reconnecting!\"", ")", "break", "_LOGGER", ".", "info", "(", "\"Closed, quit monitoring.\"", ")" ]
Start monitoring of the alarm status. Send command to satel integra to start sending updates. Read in a loop and call respective callbacks when received messages.
[ "Start", "monitoring", "of", "the", "alarm", "status", "." ]
python
test
44.4
tensorflow/tensorboard
tensorboard/plugins/hparams/list_session_groups.py
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/hparams/list_session_groups.py#L524-L558
def _set_avg_session_metrics(session_group): """Sets the metrics for the group to be the average of its sessions. The resulting session group metrics consist of the union of metrics across the group's sessions. The value of each session group metric is the average of that metric values across the sessions in the group. The 'step' and 'wall_time_secs' fields of the resulting MetricValue field in the session group are populated with the corresponding averages (truncated for 'step') as well. Args: session_group: A SessionGroup protobuffer. """ assert session_group.sessions, 'SessionGroup cannot be empty.' # Algorithm: Iterate over all (session, metric) pairs and maintain a # dict from _MetricIdentifier to _MetricStats objects. # Then use the final dict state to compute the average for each metric. metric_stats = collections.defaultdict(_MetricStats) for session in session_group.sessions: for metric_value in session.metric_values: metric_name = _MetricIdentifier(group=metric_value.name.group, tag=metric_value.name.tag) stats = metric_stats[metric_name] stats.total += metric_value.value stats.count += 1 stats.total_step += metric_value.training_step stats.total_wall_time_secs += metric_value.wall_time_secs del session_group.metric_values[:] for (metric_name, stats) in six.iteritems(metric_stats): session_group.metric_values.add( name=api_pb2.MetricName(group=metric_name.group, tag=metric_name.tag), value=float(stats.total)/float(stats.count), training_step=stats.total_step // stats.count, wall_time_secs=stats.total_wall_time_secs / stats.count)
[ "def", "_set_avg_session_metrics", "(", "session_group", ")", ":", "assert", "session_group", ".", "sessions", ",", "'SessionGroup cannot be empty.'", "# Algorithm: Iterate over all (session, metric) pairs and maintain a", "# dict from _MetricIdentifier to _MetricStats objects.", "# Then use the final dict state to compute the average for each metric.", "metric_stats", "=", "collections", ".", "defaultdict", "(", "_MetricStats", ")", "for", "session", "in", "session_group", ".", "sessions", ":", "for", "metric_value", "in", "session", ".", "metric_values", ":", "metric_name", "=", "_MetricIdentifier", "(", "group", "=", "metric_value", ".", "name", ".", "group", ",", "tag", "=", "metric_value", ".", "name", ".", "tag", ")", "stats", "=", "metric_stats", "[", "metric_name", "]", "stats", ".", "total", "+=", "metric_value", ".", "value", "stats", ".", "count", "+=", "1", "stats", ".", "total_step", "+=", "metric_value", ".", "training_step", "stats", ".", "total_wall_time_secs", "+=", "metric_value", ".", "wall_time_secs", "del", "session_group", ".", "metric_values", "[", ":", "]", "for", "(", "metric_name", ",", "stats", ")", "in", "six", ".", "iteritems", "(", "metric_stats", ")", ":", "session_group", ".", "metric_values", ".", "add", "(", "name", "=", "api_pb2", ".", "MetricName", "(", "group", "=", "metric_name", ".", "group", ",", "tag", "=", "metric_name", ".", "tag", ")", ",", "value", "=", "float", "(", "stats", ".", "total", ")", "/", "float", "(", "stats", ".", "count", ")", ",", "training_step", "=", "stats", ".", "total_step", "//", "stats", ".", "count", ",", "wall_time_secs", "=", "stats", ".", "total_wall_time_secs", "/", "stats", ".", "count", ")" ]
Sets the metrics for the group to be the average of its sessions. The resulting session group metrics consist of the union of metrics across the group's sessions. The value of each session group metric is the average of that metric values across the sessions in the group. The 'step' and 'wall_time_secs' fields of the resulting MetricValue field in the session group are populated with the corresponding averages (truncated for 'step') as well. Args: session_group: A SessionGroup protobuffer.
[ "Sets", "the", "metrics", "for", "the", "group", "to", "be", "the", "average", "of", "its", "sessions", "." ]
python
train
48.171429
pandas-dev/pandas
pandas/core/sparse/frame.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/sparse/frame.py#L997-L1039
def homogenize(series_dict): """ Conform a set of SparseSeries (with NaN fill_value) to a common SparseIndex corresponding to the locations where they all have data Parameters ---------- series_dict : dict or DataFrame Notes ----- Using the dumbest algorithm I could think of. Should put some more thought into this Returns ------- homogenized : dict of SparseSeries """ index = None need_reindex = False for _, series in series_dict.items(): if not np.isnan(series.fill_value): raise TypeError('this method is only valid with NaN fill values') if index is None: index = series.sp_index elif not series.sp_index.equals(index): need_reindex = True index = index.intersect(series.sp_index) if need_reindex: output = {} for name, series in series_dict.items(): if not series.sp_index.equals(index): series = series.sparse_reindex(index) output[name] = series else: output = series_dict return output
[ "def", "homogenize", "(", "series_dict", ")", ":", "index", "=", "None", "need_reindex", "=", "False", "for", "_", ",", "series", "in", "series_dict", ".", "items", "(", ")", ":", "if", "not", "np", ".", "isnan", "(", "series", ".", "fill_value", ")", ":", "raise", "TypeError", "(", "'this method is only valid with NaN fill values'", ")", "if", "index", "is", "None", ":", "index", "=", "series", ".", "sp_index", "elif", "not", "series", ".", "sp_index", ".", "equals", "(", "index", ")", ":", "need_reindex", "=", "True", "index", "=", "index", ".", "intersect", "(", "series", ".", "sp_index", ")", "if", "need_reindex", ":", "output", "=", "{", "}", "for", "name", ",", "series", "in", "series_dict", ".", "items", "(", ")", ":", "if", "not", "series", ".", "sp_index", ".", "equals", "(", "index", ")", ":", "series", "=", "series", ".", "sparse_reindex", "(", "index", ")", "output", "[", "name", "]", "=", "series", "else", ":", "output", "=", "series_dict", "return", "output" ]
Conform a set of SparseSeries (with NaN fill_value) to a common SparseIndex corresponding to the locations where they all have data Parameters ---------- series_dict : dict or DataFrame Notes ----- Using the dumbest algorithm I could think of. Should put some more thought into this Returns ------- homogenized : dict of SparseSeries
[ "Conform", "a", "set", "of", "SparseSeries", "(", "with", "NaN", "fill_value", ")", "to", "a", "common", "SparseIndex", "corresponding", "to", "the", "locations", "where", "they", "all", "have", "data" ]
python
train
25.093023
log2timeline/dfvfs
examples/recursive_hasher.py
https://github.com/log2timeline/dfvfs/blob/2b3ccd115f9901d89f383397d4a1376a873c83c4/examples/recursive_hasher.py#L39-L78
def _CalculateHashDataStream(self, file_entry, data_stream_name): """Calculates a message digest hash of the data of the file entry. Args: file_entry (dfvfs.FileEntry): file entry. data_stream_name (str): name of the data stream. Returns: bytes: digest hash or None. """ hash_context = hashlib.sha256() try: file_object = file_entry.GetFileObject(data_stream_name=data_stream_name) except IOError as exception: logging.warning(( 'Unable to open path specification:\n{0:s}' 'with error: {1!s}').format( file_entry.path_spec.comparable, exception)) return None if not file_object: return None try: data = file_object.read(self._READ_BUFFER_SIZE) while data: hash_context.update(data) data = file_object.read(self._READ_BUFFER_SIZE) except IOError as exception: logging.warning(( 'Unable to read from path specification:\n{0:s}' 'with error: {1!s}').format( file_entry.path_spec.comparable, exception)) return None finally: file_object.close() return hash_context.hexdigest()
[ "def", "_CalculateHashDataStream", "(", "self", ",", "file_entry", ",", "data_stream_name", ")", ":", "hash_context", "=", "hashlib", ".", "sha256", "(", ")", "try", ":", "file_object", "=", "file_entry", ".", "GetFileObject", "(", "data_stream_name", "=", "data_stream_name", ")", "except", "IOError", "as", "exception", ":", "logging", ".", "warning", "(", "(", "'Unable to open path specification:\\n{0:s}'", "'with error: {1!s}'", ")", ".", "format", "(", "file_entry", ".", "path_spec", ".", "comparable", ",", "exception", ")", ")", "return", "None", "if", "not", "file_object", ":", "return", "None", "try", ":", "data", "=", "file_object", ".", "read", "(", "self", ".", "_READ_BUFFER_SIZE", ")", "while", "data", ":", "hash_context", ".", "update", "(", "data", ")", "data", "=", "file_object", ".", "read", "(", "self", ".", "_READ_BUFFER_SIZE", ")", "except", "IOError", "as", "exception", ":", "logging", ".", "warning", "(", "(", "'Unable to read from path specification:\\n{0:s}'", "'with error: {1!s}'", ")", ".", "format", "(", "file_entry", ".", "path_spec", ".", "comparable", ",", "exception", ")", ")", "return", "None", "finally", ":", "file_object", ".", "close", "(", ")", "return", "hash_context", ".", "hexdigest", "(", ")" ]
Calculates a message digest hash of the data of the file entry. Args: file_entry (dfvfs.FileEntry): file entry. data_stream_name (str): name of the data stream. Returns: bytes: digest hash or None.
[ "Calculates", "a", "message", "digest", "hash", "of", "the", "data", "of", "the", "file", "entry", "." ]
python
train
28.6
CalebBell/fluids
fluids/compressible.py
https://github.com/CalebBell/fluids/blob/57f556752e039f1d3e5a822f408c184783db2828/fluids/compressible.py#L1576-L1667
def Fritzsche(SG, Tavg, L=None, D=None, P1=None, P2=None, Q=None, Ts=288.7, Ps=101325., Zavg=1, E=1): r'''Calculation function for dealing with flow of a compressible gas in a pipeline with the Fritzsche formula. Can calculate any of the following, given all other inputs: * Flow rate * Upstream pressure * Downstream pressure * Diameter of pipe * Length of pipe A variety of different constants and expressions have been presented for the Fritzsche formula. Here, the form as in [1]_ is used but with all inputs in base SI units. .. math:: Q = 93.500 \frac{T_s}{P_s}\left(\frac{P_1^2 - P_2^2} {L\cdot {SG}^{0.8587} \cdot T_{avg}}\right)^{0.538}D^{2.69} Parameters ---------- SG : float Specific gravity of fluid with respect to air at the reference temperature and pressure `Ts` and `Ps`, [-] Tavg : float Average temperature of the fluid in the pipeline, [K] L : float, optional Length of pipe, [m] D : float, optional Diameter of pipe, [m] P1 : float, optional Inlet pressure to pipe, [Pa] P2 : float, optional Outlet pressure from pipe, [Pa] Q : float, optional Flow rate of gas through pipe, [m^3/s] Ts : float, optional Reference temperature for the specific gravity of the gas, [K] Ps : float, optional Reference pressure for the specific gravity of the gas, [Pa] Zavg : float, optional Average compressibility factor for gas, [-] E : float, optional Pipeline efficiency, a correction factor between 0 and 1 Returns ------- Q, P1, P2, D, or L : float The missing input which was solved for [base SI] Notes ----- This model is also presented in [1]_ with a leading constant of 2.827, the same exponents as used here, units of mm (diameter), kPa, km (length), and flow in m^3/hour. This model is shown in base SI units in [2]_, and with a leading constant of 94.2565, a diameter power of 2.6911, main group power of 0.5382 and a specific gravity power of 0.858. The difference is very small. Examples -------- >>> Fritzsche(D=0.340, P1=90E5, P2=20E5, L=160E3, SG=0.693, Tavg=277.15) 39.421535157535565 References ---------- .. [1] Menon, E. Shashi. Gas Pipeline Hydraulics. 1st edition. Boca Raton, FL: CRC Press, 2005. .. [2] Coelho, Paulo M., and Carlos Pinho. "Considerations about Equations for Steady State Flow in Natural Gas Pipelines." Journal of the Brazilian Society of Mechanical Sciences and Engineering 29, no. 3 (September 2007): 262-73. doi:10.1590/S1678-58782007000300005. ''' # Rational('2.827E-3')/(3600*24)*(1000)**Rational('2.69')*(1000)**Rational('0.538')*1000/(1000**2)**Rational('0.538') c5 = 93.50009798751128188757518688244137811221 # 14135*10**(57/125)/432 c2 = 0.8587 c3 = 0.538 c4 = 2.69 if Q is None and (None not in [L, D, P1, P2]): return c5*E*(Ts/Ps)*((P1**2 - P2**2)/(SG**c2*Tavg*L*Zavg))**c3*D**c4 elif D is None and (None not in [L, Q, P1, P2]): return (Ps*Q*(SG**(-c2)*(P1**2 - P2**2)/(L*Tavg*Zavg))**(-c3)/(E*Ts*c5))**(1./c4) elif P1 is None and (None not in [L, Q, D, P2]): return (L*SG**c2*Tavg*Zavg*(D**(-c4)*Ps*Q/(E*Ts*c5))**(1./c3) + P2**2)**0.5 elif P2 is None and (None not in [L, Q, D, P1]): return (-L*SG**c2*Tavg*Zavg*(D**(-c4)*Ps*Q/(E*Ts*c5))**(1./c3) + P1**2)**0.5 elif L is None and (None not in [P2, Q, D, P1]): return SG**(-c2)*(D**(-c4)*Ps*Q/(E*Ts*c5))**(-1./c3)*(P1**2 - P2**2)/(Tavg*Zavg) else: raise Exception('This function solves for either flow, upstream pressure, downstream pressure, diameter, or length; all other inputs must be provided.')
[ "def", "Fritzsche", "(", "SG", ",", "Tavg", ",", "L", "=", "None", ",", "D", "=", "None", ",", "P1", "=", "None", ",", "P2", "=", "None", ",", "Q", "=", "None", ",", "Ts", "=", "288.7", ",", "Ps", "=", "101325.", ",", "Zavg", "=", "1", ",", "E", "=", "1", ")", ":", "# Rational('2.827E-3')/(3600*24)*(1000)**Rational('2.69')*(1000)**Rational('0.538')*1000/(1000**2)**Rational('0.538')", "c5", "=", "93.50009798751128188757518688244137811221", "# 14135*10**(57/125)/432", "c2", "=", "0.8587", "c3", "=", "0.538", "c4", "=", "2.69", "if", "Q", "is", "None", "and", "(", "None", "not", "in", "[", "L", ",", "D", ",", "P1", ",", "P2", "]", ")", ":", "return", "c5", "*", "E", "*", "(", "Ts", "/", "Ps", ")", "*", "(", "(", "P1", "**", "2", "-", "P2", "**", "2", ")", "/", "(", "SG", "**", "c2", "*", "Tavg", "*", "L", "*", "Zavg", ")", ")", "**", "c3", "*", "D", "**", "c4", "elif", "D", "is", "None", "and", "(", "None", "not", "in", "[", "L", ",", "Q", ",", "P1", ",", "P2", "]", ")", ":", "return", "(", "Ps", "*", "Q", "*", "(", "SG", "**", "(", "-", "c2", ")", "*", "(", "P1", "**", "2", "-", "P2", "**", "2", ")", "/", "(", "L", "*", "Tavg", "*", "Zavg", ")", ")", "**", "(", "-", "c3", ")", "/", "(", "E", "*", "Ts", "*", "c5", ")", ")", "**", "(", "1.", "/", "c4", ")", "elif", "P1", "is", "None", "and", "(", "None", "not", "in", "[", "L", ",", "Q", ",", "D", ",", "P2", "]", ")", ":", "return", "(", "L", "*", "SG", "**", "c2", "*", "Tavg", "*", "Zavg", "*", "(", "D", "**", "(", "-", "c4", ")", "*", "Ps", "*", "Q", "/", "(", "E", "*", "Ts", "*", "c5", ")", ")", "**", "(", "1.", "/", "c3", ")", "+", "P2", "**", "2", ")", "**", "0.5", "elif", "P2", "is", "None", "and", "(", "None", "not", "in", "[", "L", ",", "Q", ",", "D", ",", "P1", "]", ")", ":", "return", "(", "-", "L", "*", "SG", "**", "c2", "*", "Tavg", "*", "Zavg", "*", "(", "D", "**", "(", "-", "c4", ")", "*", "Ps", "*", "Q", "/", "(", "E", "*", "Ts", "*", "c5", ")", ")", "**", "(", "1.", "/", "c3", ")", "+", "P1", "**", "2", ")", "**", "0.5", "elif", "L", "is", "None", "and", "(", "None", "not", "in", "[", "P2", ",", "Q", ",", "D", ",", "P1", "]", ")", ":", "return", "SG", "**", "(", "-", "c2", ")", "*", "(", "D", "**", "(", "-", "c4", ")", "*", "Ps", "*", "Q", "/", "(", "E", "*", "Ts", "*", "c5", ")", ")", "**", "(", "-", "1.", "/", "c3", ")", "*", "(", "P1", "**", "2", "-", "P2", "**", "2", ")", "/", "(", "Tavg", "*", "Zavg", ")", "else", ":", "raise", "Exception", "(", "'This function solves for either flow, upstream pressure, downstream pressure, diameter, or length; all other inputs must be provided.'", ")" ]
r'''Calculation function for dealing with flow of a compressible gas in a pipeline with the Fritzsche formula. Can calculate any of the following, given all other inputs: * Flow rate * Upstream pressure * Downstream pressure * Diameter of pipe * Length of pipe A variety of different constants and expressions have been presented for the Fritzsche formula. Here, the form as in [1]_ is used but with all inputs in base SI units. .. math:: Q = 93.500 \frac{T_s}{P_s}\left(\frac{P_1^2 - P_2^2} {L\cdot {SG}^{0.8587} \cdot T_{avg}}\right)^{0.538}D^{2.69} Parameters ---------- SG : float Specific gravity of fluid with respect to air at the reference temperature and pressure `Ts` and `Ps`, [-] Tavg : float Average temperature of the fluid in the pipeline, [K] L : float, optional Length of pipe, [m] D : float, optional Diameter of pipe, [m] P1 : float, optional Inlet pressure to pipe, [Pa] P2 : float, optional Outlet pressure from pipe, [Pa] Q : float, optional Flow rate of gas through pipe, [m^3/s] Ts : float, optional Reference temperature for the specific gravity of the gas, [K] Ps : float, optional Reference pressure for the specific gravity of the gas, [Pa] Zavg : float, optional Average compressibility factor for gas, [-] E : float, optional Pipeline efficiency, a correction factor between 0 and 1 Returns ------- Q, P1, P2, D, or L : float The missing input which was solved for [base SI] Notes ----- This model is also presented in [1]_ with a leading constant of 2.827, the same exponents as used here, units of mm (diameter), kPa, km (length), and flow in m^3/hour. This model is shown in base SI units in [2]_, and with a leading constant of 94.2565, a diameter power of 2.6911, main group power of 0.5382 and a specific gravity power of 0.858. The difference is very small. Examples -------- >>> Fritzsche(D=0.340, P1=90E5, P2=20E5, L=160E3, SG=0.693, Tavg=277.15) 39.421535157535565 References ---------- .. [1] Menon, E. Shashi. Gas Pipeline Hydraulics. 1st edition. Boca Raton, FL: CRC Press, 2005. .. [2] Coelho, Paulo M., and Carlos Pinho. "Considerations about Equations for Steady State Flow in Natural Gas Pipelines." Journal of the Brazilian Society of Mechanical Sciences and Engineering 29, no. 3 (September 2007): 262-73. doi:10.1590/S1678-58782007000300005.
[ "r", "Calculation", "function", "for", "dealing", "with", "flow", "of", "a", "compressible", "gas", "in", "a", "pipeline", "with", "the", "Fritzsche", "formula", ".", "Can", "calculate", "any", "of", "the", "following", "given", "all", "other", "inputs", ":" ]
python
train
40.880435
trustar/trustar-python
trustar/models/page.py
https://github.com/trustar/trustar-python/blob/707d51adc58d68aed7de12a4ca37949cb75cf122/trustar/models/page.py#L47-L61
def has_more_pages(self): """ :return: ``True`` if there are more pages available on the server. """ # if has_next property exists, it represents whether more pages exist if self.has_next is not None: return self.has_next # otherwise, try to compute whether or not more pages exist total_pages = self.get_total_pages() if self.page_number is None or total_pages is None: return None else: return self.page_number + 1 < total_pages
[ "def", "has_more_pages", "(", "self", ")", ":", "# if has_next property exists, it represents whether more pages exist", "if", "self", ".", "has_next", "is", "not", "None", ":", "return", "self", ".", "has_next", "# otherwise, try to compute whether or not more pages exist", "total_pages", "=", "self", ".", "get_total_pages", "(", ")", "if", "self", ".", "page_number", "is", "None", "or", "total_pages", "is", "None", ":", "return", "None", "else", ":", "return", "self", ".", "page_number", "+", "1", "<", "total_pages" ]
:return: ``True`` if there are more pages available on the server.
[ ":", "return", ":", "True", "if", "there", "are", "more", "pages", "available", "on", "the", "server", "." ]
python
train
35.066667
DLR-RM/RAFCON
source/rafcon/core/library_manager.py
https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/core/library_manager.py#L405-L409
def remove_library_from_file_system(self, library_path, library_name): """Remove library from hard disk.""" library_file_system_path = self.get_os_path_to_library(library_path, library_name)[0] shutil.rmtree(library_file_system_path) self.refresh_libraries()
[ "def", "remove_library_from_file_system", "(", "self", ",", "library_path", ",", "library_name", ")", ":", "library_file_system_path", "=", "self", ".", "get_os_path_to_library", "(", "library_path", ",", "library_name", ")", "[", "0", "]", "shutil", ".", "rmtree", "(", "library_file_system_path", ")", "self", ".", "refresh_libraries", "(", ")" ]
Remove library from hard disk.
[ "Remove", "library", "from", "hard", "disk", "." ]
python
train
57.2
ioam/lancet
lancet/launch.py
https://github.com/ioam/lancet/blob/1fbbf88fa0e8974ff9ed462e3cb11722ddebdd6e/lancet/launch.py#L640-L653
def summary(self): """ A succinct summary of the Launcher configuration. Unlike the repr, a summary does not have to be complete but must supply key information relevant to the user. """ print("Type: %s" % self.__class__.__name__) print("Batch Name: %r" % self.batch_name) if self.tag: print("Tag: %s" % self.tag) print("Root directory: %r" % self.get_root_directory()) print("Maximum concurrency: %s" % self.max_concurrency) if self.description: print("Description: %s" % self.description)
[ "def", "summary", "(", "self", ")", ":", "print", "(", "\"Type: %s\"", "%", "self", ".", "__class__", ".", "__name__", ")", "print", "(", "\"Batch Name: %r\"", "%", "self", ".", "batch_name", ")", "if", "self", ".", "tag", ":", "print", "(", "\"Tag: %s\"", "%", "self", ".", "tag", ")", "print", "(", "\"Root directory: %r\"", "%", "self", ".", "get_root_directory", "(", ")", ")", "print", "(", "\"Maximum concurrency: %s\"", "%", "self", ".", "max_concurrency", ")", "if", "self", ".", "description", ":", "print", "(", "\"Description: %s\"", "%", "self", ".", "description", ")" ]
A succinct summary of the Launcher configuration. Unlike the repr, a summary does not have to be complete but must supply key information relevant to the user.
[ "A", "succinct", "summary", "of", "the", "Launcher", "configuration", ".", "Unlike", "the", "repr", "a", "summary", "does", "not", "have", "to", "be", "complete", "but", "must", "supply", "key", "information", "relevant", "to", "the", "user", "." ]
python
valid
42.142857
YosaiProject/yosai
yosai/core/realm/realm.py
https://github.com/YosaiProject/yosai/blob/7f96aa6b837ceae9bf3d7387cd7e35f5ab032575/yosai/core/realm/realm.py#L289-L340
def get_authzd_permissions(self, identifier, perm_domain): """ :type identifier: str :type domain: str :returns: a list of relevant json blobs, each a list of permission dicts """ related_perms = [] keys = ['*', perm_domain] def query_permissions(self): msg = ("Could not obtain cached permissions for [{0}]. " "Will try to acquire permissions from account store." .format(identifier)) logger.debug(msg) # permissions is a dict: {'domain': json blob of lists of dicts} permissions = self.account_store.get_authz_permissions(identifier) if not permissions: msg = "Could not get permissions from account_store for {0}".\ format(identifier) raise ValueError(msg) return permissions try: msg2 = ("Attempting to get cached authz_info for [{0}]" .format(identifier)) logger.debug(msg2) domain = 'authorization:permissions:' + self.name # related_perms is a list of json blobs whose contents are ordered # such that the order matches that in the keys parameter: related_perms = self.cache_handler.\ hmget_or_create(domain=domain, identifier=identifier, keys=keys, creator_func=query_permissions, creator=self) except ValueError: msg3 = ("No permissions found for identifiers [{0}]. " "Returning None.".format(identifier)) logger.warning(msg3) except AttributeError: # this means the cache_handler isn't configured queried_permissions = query_permissions(self) related_perms = [queried_permissions.get('*'), queried_permissions.get(perm_domain)] return related_perms
[ "def", "get_authzd_permissions", "(", "self", ",", "identifier", ",", "perm_domain", ")", ":", "related_perms", "=", "[", "]", "keys", "=", "[", "'*'", ",", "perm_domain", "]", "def", "query_permissions", "(", "self", ")", ":", "msg", "=", "(", "\"Could not obtain cached permissions for [{0}]. \"", "\"Will try to acquire permissions from account store.\"", ".", "format", "(", "identifier", ")", ")", "logger", ".", "debug", "(", "msg", ")", "# permissions is a dict: {'domain': json blob of lists of dicts}", "permissions", "=", "self", ".", "account_store", ".", "get_authz_permissions", "(", "identifier", ")", "if", "not", "permissions", ":", "msg", "=", "\"Could not get permissions from account_store for {0}\"", ".", "format", "(", "identifier", ")", "raise", "ValueError", "(", "msg", ")", "return", "permissions", "try", ":", "msg2", "=", "(", "\"Attempting to get cached authz_info for [{0}]\"", ".", "format", "(", "identifier", ")", ")", "logger", ".", "debug", "(", "msg2", ")", "domain", "=", "'authorization:permissions:'", "+", "self", ".", "name", "# related_perms is a list of json blobs whose contents are ordered", "# such that the order matches that in the keys parameter:", "related_perms", "=", "self", ".", "cache_handler", ".", "hmget_or_create", "(", "domain", "=", "domain", ",", "identifier", "=", "identifier", ",", "keys", "=", "keys", ",", "creator_func", "=", "query_permissions", ",", "creator", "=", "self", ")", "except", "ValueError", ":", "msg3", "=", "(", "\"No permissions found for identifiers [{0}]. \"", "\"Returning None.\"", ".", "format", "(", "identifier", ")", ")", "logger", ".", "warning", "(", "msg3", ")", "except", "AttributeError", ":", "# this means the cache_handler isn't configured", "queried_permissions", "=", "query_permissions", "(", "self", ")", "related_perms", "=", "[", "queried_permissions", ".", "get", "(", "'*'", ")", ",", "queried_permissions", ".", "get", "(", "perm_domain", ")", "]", "return", "related_perms" ]
:type identifier: str :type domain: str :returns: a list of relevant json blobs, each a list of permission dicts
[ ":", "type", "identifier", ":", "str", ":", "type", "domain", ":", "str" ]
python
train
38.923077
PythonCharmers/python-future
src/libfuturize/fixer_util.py
https://github.com/PythonCharmers/python-future/blob/c423752879acc05eebc29b0bb9909327bd5c7308/src/libfuturize/fixer_util.py#L230-L267
def future_import(feature, node): """ This seems to work """ root = find_root(node) if does_tree_import(u"__future__", feature, node): return # Look for a shebang or encoding line shebang_encoding_idx = None for idx, node in enumerate(root.children): # Is it a shebang or encoding line? if is_shebang_comment(node) or is_encoding_comment(node): shebang_encoding_idx = idx if is_docstring(node): # skip over docstring continue names = check_future_import(node) if not names: # not a future statement; need to insert before this break if feature in names: # already imported return import_ = FromImport(u'__future__', [Leaf(token.NAME, feature, prefix=" ")]) if shebang_encoding_idx == 0 and idx == 0: # If this __future__ import would go on the first line, # detach the shebang / encoding prefix from the current first line. # and attach it to our new __future__ import node. import_.prefix = root.children[0].prefix root.children[0].prefix = u'' # End the __future__ import line with a newline and add a blank line # afterwards: children = [import_ , Newline()] root.insert_child(idx, Node(syms.simple_stmt, children))
[ "def", "future_import", "(", "feature", ",", "node", ")", ":", "root", "=", "find_root", "(", "node", ")", "if", "does_tree_import", "(", "u\"__future__\"", ",", "feature", ",", "node", ")", ":", "return", "# Look for a shebang or encoding line", "shebang_encoding_idx", "=", "None", "for", "idx", ",", "node", "in", "enumerate", "(", "root", ".", "children", ")", ":", "# Is it a shebang or encoding line?", "if", "is_shebang_comment", "(", "node", ")", "or", "is_encoding_comment", "(", "node", ")", ":", "shebang_encoding_idx", "=", "idx", "if", "is_docstring", "(", "node", ")", ":", "# skip over docstring", "continue", "names", "=", "check_future_import", "(", "node", ")", "if", "not", "names", ":", "# not a future statement; need to insert before this", "break", "if", "feature", "in", "names", ":", "# already imported", "return", "import_", "=", "FromImport", "(", "u'__future__'", ",", "[", "Leaf", "(", "token", ".", "NAME", ",", "feature", ",", "prefix", "=", "\" \"", ")", "]", ")", "if", "shebang_encoding_idx", "==", "0", "and", "idx", "==", "0", ":", "# If this __future__ import would go on the first line,", "# detach the shebang / encoding prefix from the current first line.", "# and attach it to our new __future__ import node.", "import_", ".", "prefix", "=", "root", ".", "children", "[", "0", "]", ".", "prefix", "root", ".", "children", "[", "0", "]", ".", "prefix", "=", "u''", "# End the __future__ import line with a newline and add a blank line", "# afterwards:", "children", "=", "[", "import_", ",", "Newline", "(", ")", "]", "root", ".", "insert_child", "(", "idx", ",", "Node", "(", "syms", ".", "simple_stmt", ",", "children", ")", ")" ]
This seems to work
[ "This", "seems", "to", "work" ]
python
train
34.973684
Alignak-monitoring/alignak
alignak/objects/hostescalation.py
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/hostescalation.py#L109-L128
def explode(self, escalations): """Create instance of Escalation for each HostEscalation object :param escalations: list of escalation, used to add new ones :type escalations: alignak.objects.escalation.Escalations :return: None """ # Now we explode all escalations (host_name, hostgroup_name) to escalations for escalation in self: properties = escalation.__class__.properties name = getattr(escalation, 'host_name', getattr(escalation, 'hostgroup_name', '')) creation_dict = { 'escalation_name': 'Generated-HE-%s-%s' % (name, escalation.uuid) } for prop in properties: if hasattr(escalation, prop): creation_dict[prop] = getattr(escalation, prop) escalations.add_escalation(Escalation(creation_dict))
[ "def", "explode", "(", "self", ",", "escalations", ")", ":", "# Now we explode all escalations (host_name, hostgroup_name) to escalations", "for", "escalation", "in", "self", ":", "properties", "=", "escalation", ".", "__class__", ".", "properties", "name", "=", "getattr", "(", "escalation", ",", "'host_name'", ",", "getattr", "(", "escalation", ",", "'hostgroup_name'", ",", "''", ")", ")", "creation_dict", "=", "{", "'escalation_name'", ":", "'Generated-HE-%s-%s'", "%", "(", "name", ",", "escalation", ".", "uuid", ")", "}", "for", "prop", "in", "properties", ":", "if", "hasattr", "(", "escalation", ",", "prop", ")", ":", "creation_dict", "[", "prop", "]", "=", "getattr", "(", "escalation", ",", "prop", ")", "escalations", ".", "add_escalation", "(", "Escalation", "(", "creation_dict", ")", ")" ]
Create instance of Escalation for each HostEscalation object :param escalations: list of escalation, used to add new ones :type escalations: alignak.objects.escalation.Escalations :return: None
[ "Create", "instance", "of", "Escalation", "for", "each", "HostEscalation", "object" ]
python
train
44.25
tanghaibao/goatools
goatools/parsers/ncbi_gene_file_reader.py
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/parsers/ncbi_gene_file_reader.py#L259-L263
def _init_idxs_strpat(self, usr_hdrs): """List of indexes whose values will be strings.""" strpat = self.strpat_hdrs.keys() self.idxs_strpat = [ Idx for Hdr, Idx in self.hdr2idx.items() if Hdr in usr_hdrs and Hdr in strpat]
[ "def", "_init_idxs_strpat", "(", "self", ",", "usr_hdrs", ")", ":", "strpat", "=", "self", ".", "strpat_hdrs", ".", "keys", "(", ")", "self", ".", "idxs_strpat", "=", "[", "Idx", "for", "Hdr", ",", "Idx", "in", "self", ".", "hdr2idx", ".", "items", "(", ")", "if", "Hdr", "in", "usr_hdrs", "and", "Hdr", "in", "strpat", "]" ]
List of indexes whose values will be strings.
[ "List", "of", "indexes", "whose", "values", "will", "be", "strings", "." ]
python
train
51
tjvr/kurt
kurt/__init__.py
https://github.com/tjvr/kurt/blob/fcccd80cae11dc233f6dd02b40ec9a388c62f259/kurt/__init__.py#L328-L402
def save(self, path=None, debug=False): """Save project to file. :param path: Path or file pointer. If you pass a file pointer, you're responsible for closing it. If path is not given, the :attr:`path` attribute is used, usually the original path given to :attr:`load()`. If `path` has the extension of an existing plugin, the project will be converted using :attr:`convert`. Otherwise, the extension will be replaced with the extension of the current plugin. (Note that log output for the conversion will be printed to stdout. If you want to deal with the output, call :attr:`convert` directly.) If the path ends in a folder instead of a file, the filename is based on the project's :attr:`name`. :param debug: If true, return debugging information from the format plugin instead of the path. :raises: :py:class:`ValueError` if there's no path or name. :returns: path to the saved file. """ p = self.copy() plugin = p._plugin # require path p.path = path or self.path if not p.path: raise ValueError, "path is required" if isinstance(p.path, basestring): # split path (folder, filename) = os.path.split(p.path) (name, extension) = os.path.splitext(filename) # get plugin from extension if path: # only if not using self.path try: plugin = kurt.plugin.Kurt.get_plugin(extension=extension) except ValueError: pass # build output path if not name: name = _clean_filename(self.name) if not name: raise ValueError, "name is required" filename = name + plugin.extension p.path = os.path.join(folder, filename) # open fp = open(p.path, "wb") else: fp = p.path path = None if not plugin: raise ValueError, "must convert project to a format before saving" for m in p.convert(plugin): print m result = p._save(fp) if path: fp.close() return result if debug else p.path
[ "def", "save", "(", "self", ",", "path", "=", "None", ",", "debug", "=", "False", ")", ":", "p", "=", "self", ".", "copy", "(", ")", "plugin", "=", "p", ".", "_plugin", "# require path", "p", ".", "path", "=", "path", "or", "self", ".", "path", "if", "not", "p", ".", "path", ":", "raise", "ValueError", ",", "\"path is required\"", "if", "isinstance", "(", "p", ".", "path", ",", "basestring", ")", ":", "# split path", "(", "folder", ",", "filename", ")", "=", "os", ".", "path", ".", "split", "(", "p", ".", "path", ")", "(", "name", ",", "extension", ")", "=", "os", ".", "path", ".", "splitext", "(", "filename", ")", "# get plugin from extension", "if", "path", ":", "# only if not using self.path", "try", ":", "plugin", "=", "kurt", ".", "plugin", ".", "Kurt", ".", "get_plugin", "(", "extension", "=", "extension", ")", "except", "ValueError", ":", "pass", "# build output path", "if", "not", "name", ":", "name", "=", "_clean_filename", "(", "self", ".", "name", ")", "if", "not", "name", ":", "raise", "ValueError", ",", "\"name is required\"", "filename", "=", "name", "+", "plugin", ".", "extension", "p", ".", "path", "=", "os", ".", "path", ".", "join", "(", "folder", ",", "filename", ")", "# open", "fp", "=", "open", "(", "p", ".", "path", ",", "\"wb\"", ")", "else", ":", "fp", "=", "p", ".", "path", "path", "=", "None", "if", "not", "plugin", ":", "raise", "ValueError", ",", "\"must convert project to a format before saving\"", "for", "m", "in", "p", ".", "convert", "(", "plugin", ")", ":", "print", "m", "result", "=", "p", ".", "_save", "(", "fp", ")", "if", "path", ":", "fp", ".", "close", "(", ")", "return", "result", "if", "debug", "else", "p", ".", "path" ]
Save project to file. :param path: Path or file pointer. If you pass a file pointer, you're responsible for closing it. If path is not given, the :attr:`path` attribute is used, usually the original path given to :attr:`load()`. If `path` has the extension of an existing plugin, the project will be converted using :attr:`convert`. Otherwise, the extension will be replaced with the extension of the current plugin. (Note that log output for the conversion will be printed to stdout. If you want to deal with the output, call :attr:`convert` directly.) If the path ends in a folder instead of a file, the filename is based on the project's :attr:`name`. :param debug: If true, return debugging information from the format plugin instead of the path. :raises: :py:class:`ValueError` if there's no path or name. :returns: path to the saved file.
[ "Save", "project", "to", "file", "." ]
python
train
32.933333