repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
rraadd88/rohan
rohan/dandage/io_strs.py
https://github.com/rraadd88/rohan/blob/b0643a3582a2fffc0165ace69fb80880d92bfb10/rohan/dandage/io_strs.py#L130-L157
def linebreaker(l,break_pt=16): """ used for adding labels in plots. :param l: list of strings :param break_pt: number, insert new line after this many letters """ l_out=[] for i in l: if len(i)>break_pt: i_words=i.split(' ') i_out='' line_len=0 for w in i_words: line_len+=len(w)+1 if i_words.index(w)==0: i_out=w elif line_len>break_pt: line_len=0 i_out="%s\n%s" % (i_out,w) else: i_out="%s %s" % (i_out,w) l_out.append(i_out) # l_out.append("%s\n%s" % (i[:break_pt],i[break_pt:])) else: l_out.append(i) return l_out
[ "def", "linebreaker", "(", "l", ",", "break_pt", "=", "16", ")", ":", "l_out", "=", "[", "]", "for", "i", "in", "l", ":", "if", "len", "(", "i", ")", ">", "break_pt", ":", "i_words", "=", "i", ".", "split", "(", "' '", ")", "i_out", "=", "''", "line_len", "=", "0", "for", "w", "in", "i_words", ":", "line_len", "+=", "len", "(", "w", ")", "+", "1", "if", "i_words", ".", "index", "(", "w", ")", "==", "0", ":", "i_out", "=", "w", "elif", "line_len", ">", "break_pt", ":", "line_len", "=", "0", "i_out", "=", "\"%s\\n%s\"", "%", "(", "i_out", ",", "w", ")", "else", ":", "i_out", "=", "\"%s %s\"", "%", "(", "i_out", ",", "w", ")", "l_out", ".", "append", "(", "i_out", ")", "# l_out.append(\"%s\\n%s\" % (i[:break_pt],i[break_pt:]))", "else", ":", "l_out", ".", "append", "(", "i", ")", "return", "l_out" ]
used for adding labels in plots. :param l: list of strings :param break_pt: number, insert new line after this many letters
[ "used", "for", "adding", "labels", "in", "plots", "." ]
python
train
python-rope/rope
rope/base/resources.py
https://github.com/python-rope/rope/blob/1c9f9cd5964b099a99a9111e998f0dc728860688/rope/base/resources.py#L147-L161
def get_children(self): """Return the children of this folder""" try: children = os.listdir(self.real_path) except OSError: return [] result = [] for name in children: try: child = self.get_child(name) except exceptions.ResourceNotFoundError: continue if not self.project.is_ignored(child): result.append(self.get_child(name)) return result
[ "def", "get_children", "(", "self", ")", ":", "try", ":", "children", "=", "os", ".", "listdir", "(", "self", ".", "real_path", ")", "except", "OSError", ":", "return", "[", "]", "result", "=", "[", "]", "for", "name", "in", "children", ":", "try", ":", "child", "=", "self", ".", "get_child", "(", "name", ")", "except", "exceptions", ".", "ResourceNotFoundError", ":", "continue", "if", "not", "self", ".", "project", ".", "is_ignored", "(", "child", ")", ":", "result", ".", "append", "(", "self", ".", "get_child", "(", "name", ")", ")", "return", "result" ]
Return the children of this folder
[ "Return", "the", "children", "of", "this", "folder" ]
python
train
saltstack/salt
salt/returners/mysql.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/returners/mysql.py#L386-L407
def get_fun(fun): ''' Return a dict of the last function called for all minions ''' with _get_serv(ret=None, commit=True) as cur: sql = '''SELECT s.id,s.jid, s.full_ret FROM `salt_returns` s JOIN ( SELECT MAX(`jid`) as jid from `salt_returns` GROUP BY fun, id) max ON s.jid = max.jid WHERE s.fun = %s ''' cur.execute(sql, (fun,)) data = cur.fetchall() ret = {} if data: for minion, _, full_ret in data: ret[minion] = salt.utils.json.loads(full_ret) return ret
[ "def", "get_fun", "(", "fun", ")", ":", "with", "_get_serv", "(", "ret", "=", "None", ",", "commit", "=", "True", ")", "as", "cur", ":", "sql", "=", "'''SELECT s.id,s.jid, s.full_ret\n FROM `salt_returns` s\n JOIN ( SELECT MAX(`jid`) as jid\n from `salt_returns` GROUP BY fun, id) max\n ON s.jid = max.jid\n WHERE s.fun = %s\n '''", "cur", ".", "execute", "(", "sql", ",", "(", "fun", ",", ")", ")", "data", "=", "cur", ".", "fetchall", "(", ")", "ret", "=", "{", "}", "if", "data", ":", "for", "minion", ",", "_", ",", "full_ret", "in", "data", ":", "ret", "[", "minion", "]", "=", "salt", ".", "utils", ".", "json", ".", "loads", "(", "full_ret", ")", "return", "ret" ]
Return a dict of the last function called for all minions
[ "Return", "a", "dict", "of", "the", "last", "function", "called", "for", "all", "minions" ]
python
train
jaredLunde/vital-tools
vital/tools/__init__.py
https://github.com/jaredLunde/vital-tools/blob/ea924c9bbb6ec22aa66f8095f018b1ee0099ac04/vital/tools/__init__.py#L35-L67
def import_from(name): """ Imports a module, class or method from string and unwraps it if wrapped by functools @name: (#str) name of the python object -> imported object """ obj = name if isinstance(name, str) and len(name): try: obj = locate(name) assert obj is not None except (AttributeError, TypeError, AssertionError, ErrorDuringImport): try: name = name.split(".") attr = name[-1] name = ".".join(name[:-1]) mod = importlib.import_module(name) obj = getattr(mod, attr) except (SyntaxError, AttributeError, ImportError, ValueError): try: name = name.split(".") attr_sup = name[-1] name = ".".join(name[:-1]) mod = importlib.import_module(name) obj = getattr(getattr(mod, attr_sup), attr) except: # We give up. pass obj = unwrap_obj(obj) return obj
[ "def", "import_from", "(", "name", ")", ":", "obj", "=", "name", "if", "isinstance", "(", "name", ",", "str", ")", "and", "len", "(", "name", ")", ":", "try", ":", "obj", "=", "locate", "(", "name", ")", "assert", "obj", "is", "not", "None", "except", "(", "AttributeError", ",", "TypeError", ",", "AssertionError", ",", "ErrorDuringImport", ")", ":", "try", ":", "name", "=", "name", ".", "split", "(", "\".\"", ")", "attr", "=", "name", "[", "-", "1", "]", "name", "=", "\".\"", ".", "join", "(", "name", "[", ":", "-", "1", "]", ")", "mod", "=", "importlib", ".", "import_module", "(", "name", ")", "obj", "=", "getattr", "(", "mod", ",", "attr", ")", "except", "(", "SyntaxError", ",", "AttributeError", ",", "ImportError", ",", "ValueError", ")", ":", "try", ":", "name", "=", "name", ".", "split", "(", "\".\"", ")", "attr_sup", "=", "name", "[", "-", "1", "]", "name", "=", "\".\"", ".", "join", "(", "name", "[", ":", "-", "1", "]", ")", "mod", "=", "importlib", ".", "import_module", "(", "name", ")", "obj", "=", "getattr", "(", "getattr", "(", "mod", ",", "attr_sup", ")", ",", "attr", ")", "except", ":", "# We give up.", "pass", "obj", "=", "unwrap_obj", "(", "obj", ")", "return", "obj" ]
Imports a module, class or method from string and unwraps it if wrapped by functools @name: (#str) name of the python object -> imported object
[ "Imports", "a", "module", "class", "or", "method", "from", "string", "and", "unwraps", "it", "if", "wrapped", "by", "functools" ]
python
train
orb-framework/orb
orb/core/schema.py
https://github.com/orb-framework/orb/blob/575be2689cb269e65a0a2678232ff940acc19e5a/orb/core/schema.py#L107-L120
def ancestry(self): """ Returns the different inherited schemas for this instance. :return [<TableSchema>, ..] """ if not self.inherits(): return [] schema = orb.system.schema(self.inherits()) if not schema: return [] return schema.ancestry() + [schema]
[ "def", "ancestry", "(", "self", ")", ":", "if", "not", "self", ".", "inherits", "(", ")", ":", "return", "[", "]", "schema", "=", "orb", ".", "system", ".", "schema", "(", "self", ".", "inherits", "(", ")", ")", "if", "not", "schema", ":", "return", "[", "]", "return", "schema", ".", "ancestry", "(", ")", "+", "[", "schema", "]" ]
Returns the different inherited schemas for this instance. :return [<TableSchema>, ..]
[ "Returns", "the", "different", "inherited", "schemas", "for", "this", "instance", "." ]
python
train
MozillaSecurity/fuzzfetch
src/fuzzfetch/fetch.py
https://github.com/MozillaSecurity/fuzzfetch/blob/166cbfc71b679db019b9ac777dce12ccfdfc2c10/src/fuzzfetch/fetch.py#L72-L78
def _si(number): """Format a number using base-2 SI prefixes""" prefixes = ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y'] while number > 1024: number /= 1024.0 prefixes.pop(0) return '%0.2f%s' % (number, prefixes.pop(0))
[ "def", "_si", "(", "number", ")", ":", "prefixes", "=", "[", "''", ",", "'K'", ",", "'M'", ",", "'G'", ",", "'T'", ",", "'P'", ",", "'E'", ",", "'Z'", ",", "'Y'", "]", "while", "number", ">", "1024", ":", "number", "/=", "1024.0", "prefixes", ".", "pop", "(", "0", ")", "return", "'%0.2f%s'", "%", "(", "number", ",", "prefixes", ".", "pop", "(", "0", ")", ")" ]
Format a number using base-2 SI prefixes
[ "Format", "a", "number", "using", "base", "-", "2", "SI", "prefixes" ]
python
train
Dallinger/Dallinger
dallinger/experiment_server/experiment_server.py
https://github.com/Dallinger/Dallinger/blob/76ca8217c709989c116d0ebd8fca37bd22f591af/dallinger/experiment_server/experiment_server.py#L1386-L1425
def node_transmissions(node_id): """Get all the transmissions of a node. The node id must be specified in the url. You can also pass direction (to/from/all) or status (all/pending/received) as arguments. """ exp = Experiment(session) # get the parameters direction = request_parameter(parameter="direction", default="incoming") status = request_parameter(parameter="status", default="all") for x in [direction, status]: if type(x) == Response: return x # check the node exists node = models.Node.query.get(node_id) if node is None: return error_response(error_type="/node/transmissions, node does not exist") # execute the request transmissions = node.transmissions(direction=direction, status=status) try: if direction in ["incoming", "all"] and status in ["pending", "all"]: node.receive() session.commit() # ping the experiment exp.transmission_get_request(node=node, transmissions=transmissions) session.commit() except Exception: return error_response( error_type="/node/transmissions GET server error", status=403, participant=node.participant, ) # return the data return success_response(transmissions=[t.__json__() for t in transmissions])
[ "def", "node_transmissions", "(", "node_id", ")", ":", "exp", "=", "Experiment", "(", "session", ")", "# get the parameters", "direction", "=", "request_parameter", "(", "parameter", "=", "\"direction\"", ",", "default", "=", "\"incoming\"", ")", "status", "=", "request_parameter", "(", "parameter", "=", "\"status\"", ",", "default", "=", "\"all\"", ")", "for", "x", "in", "[", "direction", ",", "status", "]", ":", "if", "type", "(", "x", ")", "==", "Response", ":", "return", "x", "# check the node exists", "node", "=", "models", ".", "Node", ".", "query", ".", "get", "(", "node_id", ")", "if", "node", "is", "None", ":", "return", "error_response", "(", "error_type", "=", "\"/node/transmissions, node does not exist\"", ")", "# execute the request", "transmissions", "=", "node", ".", "transmissions", "(", "direction", "=", "direction", ",", "status", "=", "status", ")", "try", ":", "if", "direction", "in", "[", "\"incoming\"", ",", "\"all\"", "]", "and", "status", "in", "[", "\"pending\"", ",", "\"all\"", "]", ":", "node", ".", "receive", "(", ")", "session", ".", "commit", "(", ")", "# ping the experiment", "exp", ".", "transmission_get_request", "(", "node", "=", "node", ",", "transmissions", "=", "transmissions", ")", "session", ".", "commit", "(", ")", "except", "Exception", ":", "return", "error_response", "(", "error_type", "=", "\"/node/transmissions GET server error\"", ",", "status", "=", "403", ",", "participant", "=", "node", ".", "participant", ",", ")", "# return the data", "return", "success_response", "(", "transmissions", "=", "[", "t", ".", "__json__", "(", ")", "for", "t", "in", "transmissions", "]", ")" ]
Get all the transmissions of a node. The node id must be specified in the url. You can also pass direction (to/from/all) or status (all/pending/received) as arguments.
[ "Get", "all", "the", "transmissions", "of", "a", "node", "." ]
python
train
jgoodlet/punter
punter/helpers.py
https://github.com/jgoodlet/punter/blob/605ee52a1e5019b360dd643f4bf6861aefa93812/punter/helpers.py#L38-L56
def get_query_type(query): """Gets the type of query in use (email or url). In order to provide a proper API endpoint it is necessary to first determine the type of query the client is using. There are only two options currently: 1) domain or 2) email. :param query: Search query provided by client. """ if URL_RE.match(query): query_type = 'domain' elif EMAIL_RE.match(query): query_type = 'email' else: query_type = '' return query_type
[ "def", "get_query_type", "(", "query", ")", ":", "if", "URL_RE", ".", "match", "(", "query", ")", ":", "query_type", "=", "'domain'", "elif", "EMAIL_RE", ".", "match", "(", "query", ")", ":", "query_type", "=", "'email'", "else", ":", "query_type", "=", "''", "return", "query_type" ]
Gets the type of query in use (email or url). In order to provide a proper API endpoint it is necessary to first determine the type of query the client is using. There are only two options currently: 1) domain or 2) email. :param query: Search query provided by client.
[ "Gets", "the", "type", "of", "query", "in", "use", "(", "email", "or", "url", ")", "." ]
python
train
ClericPy/torequests
torequests/main.py
https://github.com/ClericPy/torequests/blob/1793261688d7a47e1c3a0830d83f8552f5e3e5d9/torequests/main.py#L182-L200
def submit(self, func, *args, **kwargs): """Submit a function to the pool, `self.submit(function,arg1,arg2,arg3=3)`""" with self._shutdown_lock: if self._shutdown: raise RuntimeError("cannot schedule new futures after shutdown") callback = kwargs.pop("callback", self.default_callback) future = NewFuture( self._timeout, args, kwargs, callback=callback, catch_exception=self.catch_exception, ) w = _WorkItem(future, func, args, kwargs) self._work_queue.put(w) self._adjust_thread_count() self._all_futures.add(future) return future
[ "def", "submit", "(", "self", ",", "func", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "with", "self", ".", "_shutdown_lock", ":", "if", "self", ".", "_shutdown", ":", "raise", "RuntimeError", "(", "\"cannot schedule new futures after shutdown\"", ")", "callback", "=", "kwargs", ".", "pop", "(", "\"callback\"", ",", "self", ".", "default_callback", ")", "future", "=", "NewFuture", "(", "self", ".", "_timeout", ",", "args", ",", "kwargs", ",", "callback", "=", "callback", ",", "catch_exception", "=", "self", ".", "catch_exception", ",", ")", "w", "=", "_WorkItem", "(", "future", ",", "func", ",", "args", ",", "kwargs", ")", "self", ".", "_work_queue", ".", "put", "(", "w", ")", "self", ".", "_adjust_thread_count", "(", ")", "self", ".", "_all_futures", ".", "add", "(", "future", ")", "return", "future" ]
Submit a function to the pool, `self.submit(function,arg1,arg2,arg3=3)`
[ "Submit", "a", "function", "to", "the", "pool", "self", ".", "submit", "(", "function", "arg1", "arg2", "arg3", "=", "3", ")" ]
python
train
craffel/mir_eval
mir_eval/pattern.py
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/pattern.py#L390-L520
def three_layer_FPR(reference_patterns, estimated_patterns): """Three Layer F1 Score, Precision and Recall. As described by Meridith. Examples -------- >>> ref_patterns = mir_eval.io.load_patterns("ref_pattern.txt") >>> est_patterns = mir_eval.io.load_patterns("est_pattern.txt") >>> F, P, R = mir_eval.pattern.three_layer_FPR(ref_patterns, ... est_patterns) Parameters ---------- reference_patterns : list The reference patterns in the format returned by :func:`mir_eval.io.load_patterns()` estimated_patterns : list The estimated patterns in the same format Returns ------- f_measure : float The three-layer F1 Score precision : float The three-layer Precision recall : float The three-layer Recall """ validate(reference_patterns, estimated_patterns) def compute_first_layer_PR(ref_occs, est_occs): """Computes the first layer Precision and Recall values given the set of occurrences in the reference and the set of occurrences in the estimation. Parameters ---------- ref_occs : est_occs : Returns ------- """ # Find the length of the intersection between reference and estimation s = len(_occurrence_intersection(ref_occs, est_occs)) # Compute the first layer scores precision = s / float(len(ref_occs)) recall = s / float(len(est_occs)) return precision, recall def compute_second_layer_PR(ref_pattern, est_pattern): """Computes the second layer Precision and Recall values given the set of occurrences in the reference and the set of occurrences in the estimation. Parameters ---------- ref_pattern : est_pattern : Returns ------- """ # Compute the first layer scores F_1 = compute_layer(ref_pattern, est_pattern) # Compute the second layer scores precision = np.mean(np.max(F_1, axis=0)) recall = np.mean(np.max(F_1, axis=1)) return precision, recall def compute_layer(ref_elements, est_elements, layer=1): """Computes the F-measure matrix for a given layer. The reference and estimated elements can be either patters or occurrences, depending on the layer. For layer 1, the elements must be occurrences. For layer 2, the elements must be patterns. Parameters ---------- ref_elements : est_elements : layer : (Default value = 1) Returns ------- """ if layer != 1 and layer != 2: raise ValueError("Layer (%d) must be an integer between 1 and 2" % layer) nP = len(ref_elements) # Number of elements in reference nQ = len(est_elements) # Number of elements in estimation F = np.zeros((nP, nQ)) # F-measure matrix for the given layer for iP in range(nP): for iQ in range(nQ): if layer == 1: func = compute_first_layer_PR elif layer == 2: func = compute_second_layer_PR # Compute layer scores precision, recall = func(ref_elements[iP], est_elements[iQ]) F[iP, iQ] = util.f_measure(precision, recall) return F # If no patterns were provided, metric is zero if _n_onset_midi(reference_patterns) == 0 or \ _n_onset_midi(estimated_patterns) == 0: return 0., 0., 0. # Compute the second layer (it includes the first layer) F_2 = compute_layer(reference_patterns, estimated_patterns, layer=2) # Compute the final scores (third layer) precision_3 = np.mean(np.max(F_2, axis=0)) recall_3 = np.mean(np.max(F_2, axis=1)) f_measure_3 = util.f_measure(precision_3, recall_3) return f_measure_3, precision_3, recall_3
[ "def", "three_layer_FPR", "(", "reference_patterns", ",", "estimated_patterns", ")", ":", "validate", "(", "reference_patterns", ",", "estimated_patterns", ")", "def", "compute_first_layer_PR", "(", "ref_occs", ",", "est_occs", ")", ":", "\"\"\"Computes the first layer Precision and Recall values given the\n set of occurrences in the reference and the set of occurrences in the\n estimation.\n\n Parameters\n ----------\n ref_occs :\n\n est_occs :\n\n\n Returns\n -------\n\n \"\"\"", "# Find the length of the intersection between reference and estimation", "s", "=", "len", "(", "_occurrence_intersection", "(", "ref_occs", ",", "est_occs", ")", ")", "# Compute the first layer scores", "precision", "=", "s", "/", "float", "(", "len", "(", "ref_occs", ")", ")", "recall", "=", "s", "/", "float", "(", "len", "(", "est_occs", ")", ")", "return", "precision", ",", "recall", "def", "compute_second_layer_PR", "(", "ref_pattern", ",", "est_pattern", ")", ":", "\"\"\"Computes the second layer Precision and Recall values given the\n set of occurrences in the reference and the set of occurrences in the\n estimation.\n\n Parameters\n ----------\n ref_pattern :\n\n est_pattern :\n\n\n Returns\n -------\n\n \"\"\"", "# Compute the first layer scores", "F_1", "=", "compute_layer", "(", "ref_pattern", ",", "est_pattern", ")", "# Compute the second layer scores", "precision", "=", "np", ".", "mean", "(", "np", ".", "max", "(", "F_1", ",", "axis", "=", "0", ")", ")", "recall", "=", "np", ".", "mean", "(", "np", ".", "max", "(", "F_1", ",", "axis", "=", "1", ")", ")", "return", "precision", ",", "recall", "def", "compute_layer", "(", "ref_elements", ",", "est_elements", ",", "layer", "=", "1", ")", ":", "\"\"\"Computes the F-measure matrix for a given layer. The reference and\n estimated elements can be either patters or occurrences, depending\n on the layer.\n\n For layer 1, the elements must be occurrences.\n For layer 2, the elements must be patterns.\n\n Parameters\n ----------\n ref_elements :\n\n est_elements :\n\n layer :\n (Default value = 1)\n\n Returns\n -------\n\n \"\"\"", "if", "layer", "!=", "1", "and", "layer", "!=", "2", ":", "raise", "ValueError", "(", "\"Layer (%d) must be an integer between 1 and 2\"", "%", "layer", ")", "nP", "=", "len", "(", "ref_elements", ")", "# Number of elements in reference", "nQ", "=", "len", "(", "est_elements", ")", "# Number of elements in estimation", "F", "=", "np", ".", "zeros", "(", "(", "nP", ",", "nQ", ")", ")", "# F-measure matrix for the given layer", "for", "iP", "in", "range", "(", "nP", ")", ":", "for", "iQ", "in", "range", "(", "nQ", ")", ":", "if", "layer", "==", "1", ":", "func", "=", "compute_first_layer_PR", "elif", "layer", "==", "2", ":", "func", "=", "compute_second_layer_PR", "# Compute layer scores", "precision", ",", "recall", "=", "func", "(", "ref_elements", "[", "iP", "]", ",", "est_elements", "[", "iQ", "]", ")", "F", "[", "iP", ",", "iQ", "]", "=", "util", ".", "f_measure", "(", "precision", ",", "recall", ")", "return", "F", "# If no patterns were provided, metric is zero", "if", "_n_onset_midi", "(", "reference_patterns", ")", "==", "0", "or", "_n_onset_midi", "(", "estimated_patterns", ")", "==", "0", ":", "return", "0.", ",", "0.", ",", "0.", "# Compute the second layer (it includes the first layer)", "F_2", "=", "compute_layer", "(", "reference_patterns", ",", "estimated_patterns", ",", "layer", "=", "2", ")", "# Compute the final scores (third layer)", "precision_3", "=", "np", ".", "mean", "(", "np", ".", "max", "(", "F_2", ",", "axis", "=", "0", ")", ")", "recall_3", "=", "np", ".", "mean", "(", "np", ".", "max", "(", "F_2", ",", "axis", "=", "1", ")", ")", "f_measure_3", "=", "util", ".", "f_measure", "(", "precision_3", ",", "recall_3", ")", "return", "f_measure_3", ",", "precision_3", ",", "recall_3" ]
Three Layer F1 Score, Precision and Recall. As described by Meridith. Examples -------- >>> ref_patterns = mir_eval.io.load_patterns("ref_pattern.txt") >>> est_patterns = mir_eval.io.load_patterns("est_pattern.txt") >>> F, P, R = mir_eval.pattern.three_layer_FPR(ref_patterns, ... est_patterns) Parameters ---------- reference_patterns : list The reference patterns in the format returned by :func:`mir_eval.io.load_patterns()` estimated_patterns : list The estimated patterns in the same format Returns ------- f_measure : float The three-layer F1 Score precision : float The three-layer Precision recall : float The three-layer Recall
[ "Three", "Layer", "F1", "Score", "Precision", "and", "Recall", ".", "As", "described", "by", "Meridith", "." ]
python
train
jssimporter/python-jss
jss/distribution_point.py
https://github.com/jssimporter/python-jss/blob/b95185d74e0c0531b0b563f280d4129e21d5fe5d/jss/distribution_point.py#L399-L408
def _copy(self, filename, destination): """Copy a file or folder to the repository. Will mount if needed. Args: filename: Path to copy. destination: Remote path to copy file to. """ super(MountedRepository, self)._copy(filename, destination)
[ "def", "_copy", "(", "self", ",", "filename", ",", "destination", ")", ":", "super", "(", "MountedRepository", ",", "self", ")", ".", "_copy", "(", "filename", ",", "destination", ")" ]
Copy a file or folder to the repository. Will mount if needed. Args: filename: Path to copy. destination: Remote path to copy file to.
[ "Copy", "a", "file", "or", "folder", "to", "the", "repository", "." ]
python
train
briancappello/flask-unchained
flask_unchained/string_utils.py
https://github.com/briancappello/flask-unchained/blob/4d536cb90e2cc4829c1c05f2c74d3e22901a1399/flask_unchained/string_utils.py#L72-L84
def slugify(string): """ Converts a string to a valid slug. For example:: slugify('Hello World') -> 'hello-world' """ if not string: return string string = re.sub(r'[^\w\s-]', '', unicodedata.normalize('NFKD', de_camel(string, '-')) .encode('ascii', 'ignore') .decode('ascii')).strip() return re.sub(r'[-_\s]+', '-', string).strip('-').lower()
[ "def", "slugify", "(", "string", ")", ":", "if", "not", "string", ":", "return", "string", "string", "=", "re", ".", "sub", "(", "r'[^\\w\\s-]'", ",", "''", ",", "unicodedata", ".", "normalize", "(", "'NFKD'", ",", "de_camel", "(", "string", ",", "'-'", ")", ")", ".", "encode", "(", "'ascii'", ",", "'ignore'", ")", ".", "decode", "(", "'ascii'", ")", ")", ".", "strip", "(", ")", "return", "re", ".", "sub", "(", "r'[-_\\s]+'", ",", "'-'", ",", "string", ")", ".", "strip", "(", "'-'", ")", ".", "lower", "(", ")" ]
Converts a string to a valid slug. For example:: slugify('Hello World') -> 'hello-world'
[ "Converts", "a", "string", "to", "a", "valid", "slug", ".", "For", "example", "::" ]
python
train
dswah/pyGAM
pygam/utils.py
https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/pygam/utils.py#L432-L453
def round_to_n_decimal_places(array, n=3): """ tool to keep round a float to n decimal places. n=3 by default Parameters ---------- array : np.array n : int. number of decimal places to keep Returns ------- array : rounded np.array """ # check if in scientific notation if issubclass(array.__class__, float) and '%.e'%array == str(array): return array # do nothing shape = np.shape(array) out = ((np.atleast_1d(array) * 10**n).round().astype('int') / (10.**n)) return out.reshape(shape)
[ "def", "round_to_n_decimal_places", "(", "array", ",", "n", "=", "3", ")", ":", "# check if in scientific notation", "if", "issubclass", "(", "array", ".", "__class__", ",", "float", ")", "and", "'%.e'", "%", "array", "==", "str", "(", "array", ")", ":", "return", "array", "# do nothing", "shape", "=", "np", ".", "shape", "(", "array", ")", "out", "=", "(", "(", "np", ".", "atleast_1d", "(", "array", ")", "*", "10", "**", "n", ")", ".", "round", "(", ")", ".", "astype", "(", "'int'", ")", "/", "(", "10.", "**", "n", ")", ")", "return", "out", ".", "reshape", "(", "shape", ")" ]
tool to keep round a float to n decimal places. n=3 by default Parameters ---------- array : np.array n : int. number of decimal places to keep Returns ------- array : rounded np.array
[ "tool", "to", "keep", "round", "a", "float", "to", "n", "decimal", "places", "." ]
python
train
log2timeline/plaso
plaso/parsers/dpkg.py
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/parsers/dpkg.py#L167-L185
def VerifyStructure(self, parser_mediator, line): """Verifies if a line from a text file is in the expected format. Args: parser_mediator (ParserMediator): parser mediator. line (str): line from a text file. Returns: bool: True if the line is in the expected format, False if not. """ try: structure = self._DPKG_LOG_LINE.parseString(line) except pyparsing.ParseException as exception: logger.debug( 'Unable to parse Debian dpkg.log file with error: {0!s}'.format( exception)) return False return 'date_time' in structure and 'body' in structure
[ "def", "VerifyStructure", "(", "self", ",", "parser_mediator", ",", "line", ")", ":", "try", ":", "structure", "=", "self", ".", "_DPKG_LOG_LINE", ".", "parseString", "(", "line", ")", "except", "pyparsing", ".", "ParseException", "as", "exception", ":", "logger", ".", "debug", "(", "'Unable to parse Debian dpkg.log file with error: {0!s}'", ".", "format", "(", "exception", ")", ")", "return", "False", "return", "'date_time'", "in", "structure", "and", "'body'", "in", "structure" ]
Verifies if a line from a text file is in the expected format. Args: parser_mediator (ParserMediator): parser mediator. line (str): line from a text file. Returns: bool: True if the line is in the expected format, False if not.
[ "Verifies", "if", "a", "line", "from", "a", "text", "file", "is", "in", "the", "expected", "format", "." ]
python
train
spyder-ide/spyder
spyder/plugins/editor/utils/editor.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/editor/utils/editor.py#L1103-L1119
def with_wait_cursor(func): """ Show a wait cursor while the wrapped function is running. The cursor is restored as soon as the function exits. :param func: wrapped function """ @functools.wraps(func) def wrapper(*args, **kwargs): QApplication.setOverrideCursor( QCursor(Qt.WaitCursor)) try: ret_val = func(*args, **kwargs) finally: QApplication.restoreOverrideCursor() return ret_val return wrapper
[ "def", "with_wait_cursor", "(", "func", ")", ":", "@", "functools", ".", "wraps", "(", "func", ")", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "QApplication", ".", "setOverrideCursor", "(", "QCursor", "(", "Qt", ".", "WaitCursor", ")", ")", "try", ":", "ret_val", "=", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "finally", ":", "QApplication", ".", "restoreOverrideCursor", "(", ")", "return", "ret_val", "return", "wrapper" ]
Show a wait cursor while the wrapped function is running. The cursor is restored as soon as the function exits. :param func: wrapped function
[ "Show", "a", "wait", "cursor", "while", "the", "wrapped", "function", "is", "running", ".", "The", "cursor", "is", "restored", "as", "soon", "as", "the", "function", "exits", "." ]
python
train
drericstrong/pypbe
pypbe/core.py
https://github.com/drericstrong/pypbe/blob/0911caedb2e4b3932ad3e27c8c171cb67f1c0df9/pypbe/core.py#L401-L425
def plot_histogram(self, title_prefix="", title_override="", figsize=(8, 6)): """ Plots a histogram of the results after the Monte Carlo simulation is run. NOTE- This method must be called AFTER "roll_mc". :param title_prefix: If desired, prefix the title (such as "Alg 1") :param title_override: Override the title string entirely :param figsize: The size of the histogram plot :return: a seaborn figure of the histogram """ # Check that roll_mc has been called if not self.arr_res: raise ValueError("Call roll_mc before plotting the histogram.") # Find a title using either the override or _construct_title method if title_override: title = title_override else: title = title_prefix + PBE._construct_title(self.num_dice, self.dice_type, self.add_val, self.num_attribute, self.keep_attribute, self.keep_dice, self.reroll, self.num_arrays) # Construct the histogram f = self._plot_hist(self.arr_res, self.pbe_res, title, figsize) return f
[ "def", "plot_histogram", "(", "self", ",", "title_prefix", "=", "\"\"", ",", "title_override", "=", "\"\"", ",", "figsize", "=", "(", "8", ",", "6", ")", ")", ":", "# Check that roll_mc has been called\r", "if", "not", "self", ".", "arr_res", ":", "raise", "ValueError", "(", "\"Call roll_mc before plotting the histogram.\"", ")", "# Find a title using either the override or _construct_title method\r", "if", "title_override", ":", "title", "=", "title_override", "else", ":", "title", "=", "title_prefix", "+", "PBE", ".", "_construct_title", "(", "self", ".", "num_dice", ",", "self", ".", "dice_type", ",", "self", ".", "add_val", ",", "self", ".", "num_attribute", ",", "self", ".", "keep_attribute", ",", "self", ".", "keep_dice", ",", "self", ".", "reroll", ",", "self", ".", "num_arrays", ")", "# Construct the histogram\r", "f", "=", "self", ".", "_plot_hist", "(", "self", ".", "arr_res", ",", "self", ".", "pbe_res", ",", "title", ",", "figsize", ")", "return", "f" ]
Plots a histogram of the results after the Monte Carlo simulation is run. NOTE- This method must be called AFTER "roll_mc". :param title_prefix: If desired, prefix the title (such as "Alg 1") :param title_override: Override the title string entirely :param figsize: The size of the histogram plot :return: a seaborn figure of the histogram
[ "Plots", "a", "histogram", "of", "the", "results", "after", "the", "Monte", "Carlo", "simulation", "is", "run", ".", "NOTE", "-", "This", "method", "must", "be", "called", "AFTER", "roll_mc", ".", ":", "param", "title_prefix", ":", "If", "desired", "prefix", "the", "title", "(", "such", "as", "Alg", "1", ")", ":", "param", "title_override", ":", "Override", "the", "title", "string", "entirely", ":", "param", "figsize", ":", "The", "size", "of", "the", "histogram", "plot", ":", "return", ":", "a", "seaborn", "figure", "of", "the", "histogram" ]
python
train
apache/spark
python/pyspark/sql/session.py
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/session.py#L461-L482
def _get_numpy_record_dtype(self, rec): """ Used when converting a pandas.DataFrame to Spark using to_records(), this will correct the dtypes of fields in a record so they can be properly loaded into Spark. :param rec: a numpy record to check field dtypes :return corrected dtype for a numpy.record or None if no correction needed """ import numpy as np cur_dtypes = rec.dtype col_names = cur_dtypes.names record_type_list = [] has_rec_fix = False for i in xrange(len(cur_dtypes)): curr_type = cur_dtypes[i] # If type is a datetime64 timestamp, convert to microseconds # NOTE: if dtype is datetime[ns] then np.record.tolist() will output values as longs, # conversion from [us] or lower will lead to py datetime objects, see SPARK-22417 if curr_type == np.dtype('datetime64[ns]'): curr_type = 'datetime64[us]' has_rec_fix = True record_type_list.append((str(col_names[i]), curr_type)) return np.dtype(record_type_list) if has_rec_fix else None
[ "def", "_get_numpy_record_dtype", "(", "self", ",", "rec", ")", ":", "import", "numpy", "as", "np", "cur_dtypes", "=", "rec", ".", "dtype", "col_names", "=", "cur_dtypes", ".", "names", "record_type_list", "=", "[", "]", "has_rec_fix", "=", "False", "for", "i", "in", "xrange", "(", "len", "(", "cur_dtypes", ")", ")", ":", "curr_type", "=", "cur_dtypes", "[", "i", "]", "# If type is a datetime64 timestamp, convert to microseconds", "# NOTE: if dtype is datetime[ns] then np.record.tolist() will output values as longs,", "# conversion from [us] or lower will lead to py datetime objects, see SPARK-22417", "if", "curr_type", "==", "np", ".", "dtype", "(", "'datetime64[ns]'", ")", ":", "curr_type", "=", "'datetime64[us]'", "has_rec_fix", "=", "True", "record_type_list", ".", "append", "(", "(", "str", "(", "col_names", "[", "i", "]", ")", ",", "curr_type", ")", ")", "return", "np", ".", "dtype", "(", "record_type_list", ")", "if", "has_rec_fix", "else", "None" ]
Used when converting a pandas.DataFrame to Spark using to_records(), this will correct the dtypes of fields in a record so they can be properly loaded into Spark. :param rec: a numpy record to check field dtypes :return corrected dtype for a numpy.record or None if no correction needed
[ "Used", "when", "converting", "a", "pandas", ".", "DataFrame", "to", "Spark", "using", "to_records", "()", "this", "will", "correct", "the", "dtypes", "of", "fields", "in", "a", "record", "so", "they", "can", "be", "properly", "loaded", "into", "Spark", ".", ":", "param", "rec", ":", "a", "numpy", "record", "to", "check", "field", "dtypes", ":", "return", "corrected", "dtype", "for", "a", "numpy", ".", "record", "or", "None", "if", "no", "correction", "needed" ]
python
train
boriel/zxbasic
api/constants.py
https://github.com/boriel/zxbasic/blob/23b28db10e41117805bdb3c0f78543590853b132/api/constants.py#L166-L170
def to_type(cls, typename): """ Converts a type ID to name. On error returns None """ NAME_TYPES = {cls.TYPE_NAMES[x]: x for x in cls.TYPE_NAMES} return NAME_TYPES.get(typename, None)
[ "def", "to_type", "(", "cls", ",", "typename", ")", ":", "NAME_TYPES", "=", "{", "cls", ".", "TYPE_NAMES", "[", "x", "]", ":", "x", "for", "x", "in", "cls", ".", "TYPE_NAMES", "}", "return", "NAME_TYPES", ".", "get", "(", "typename", ",", "None", ")" ]
Converts a type ID to name. On error returns None
[ "Converts", "a", "type", "ID", "to", "name", ".", "On", "error", "returns", "None" ]
python
train
beathan/django-akamai
django_akamai/purge.py
https://github.com/beathan/django-akamai/blob/00cab2dd5fab3745742721185e75a55a5c26fe7e/django_akamai/purge.py#L68-L106
def load_edgegrid_client_settings(): '''Load Akamai EdgeGrid configuration returns a (hostname, EdgeGridAuth) tuple from the following locations: 1. Values specified directly in the Django settings:: AKAMAI_CCU_CLIENT_SECRET AKAMAI_CCU_HOST AKAMAI_CCU_ACCESS_TOKEN AKAMAI_CCU_CLIENT_TOKEN 2. An edgerc file specified in the AKAMAI_EDGERC_FILENAME settings 3. The default ~/.edgerc file Both edgerc file load options will return the values from the “CCU” section by default. This may be customized using the AKAMAI_EDGERC_CCU_SECTION setting. ''' if getattr(settings, 'AKAMAI_CCU_CLIENT_SECRET', None): # If the settings module has the values directly and they are not empty # we'll use them without checking for an edgerc file: host = settings.AKAMAI_CCU_HOST auth = EdgeGridAuth(access_token=settings.AKAMAI_CCU_ACCESS_TOKEN, client_token=settings.AKAMAI_CCU_CLIENT_TOKEN, client_secret=settings.AKAMAI_CCU_CLIENT_SECRET) return host, auth else: edgerc_section = getattr(settings, 'AKAMAI_EDGERC_CCU_SECTION', 'CCU') edgerc_path = getattr(settings, 'AKAMAI_EDGERC_FILENAME', '~/.edgerc') edgerc_path = os.path.expanduser(edgerc_path) if os.path.isfile(edgerc_path): edgerc = EdgeRc(edgerc_path) host = edgerc.get(edgerc_section, 'host') auth = EdgeGridAuth.from_edgerc(edgerc, section=edgerc_section) return host, auth raise InvalidAkamaiConfiguration('Cannot find Akamai client configuration!')
[ "def", "load_edgegrid_client_settings", "(", ")", ":", "if", "getattr", "(", "settings", ",", "'AKAMAI_CCU_CLIENT_SECRET'", ",", "None", ")", ":", "# If the settings module has the values directly and they are not empty", "# we'll use them without checking for an edgerc file:", "host", "=", "settings", ".", "AKAMAI_CCU_HOST", "auth", "=", "EdgeGridAuth", "(", "access_token", "=", "settings", ".", "AKAMAI_CCU_ACCESS_TOKEN", ",", "client_token", "=", "settings", ".", "AKAMAI_CCU_CLIENT_TOKEN", ",", "client_secret", "=", "settings", ".", "AKAMAI_CCU_CLIENT_SECRET", ")", "return", "host", ",", "auth", "else", ":", "edgerc_section", "=", "getattr", "(", "settings", ",", "'AKAMAI_EDGERC_CCU_SECTION'", ",", "'CCU'", ")", "edgerc_path", "=", "getattr", "(", "settings", ",", "'AKAMAI_EDGERC_FILENAME'", ",", "'~/.edgerc'", ")", "edgerc_path", "=", "os", ".", "path", ".", "expanduser", "(", "edgerc_path", ")", "if", "os", ".", "path", ".", "isfile", "(", "edgerc_path", ")", ":", "edgerc", "=", "EdgeRc", "(", "edgerc_path", ")", "host", "=", "edgerc", ".", "get", "(", "edgerc_section", ",", "'host'", ")", "auth", "=", "EdgeGridAuth", ".", "from_edgerc", "(", "edgerc", ",", "section", "=", "edgerc_section", ")", "return", "host", ",", "auth", "raise", "InvalidAkamaiConfiguration", "(", "'Cannot find Akamai client configuration!'", ")" ]
Load Akamai EdgeGrid configuration returns a (hostname, EdgeGridAuth) tuple from the following locations: 1. Values specified directly in the Django settings:: AKAMAI_CCU_CLIENT_SECRET AKAMAI_CCU_HOST AKAMAI_CCU_ACCESS_TOKEN AKAMAI_CCU_CLIENT_TOKEN 2. An edgerc file specified in the AKAMAI_EDGERC_FILENAME settings 3. The default ~/.edgerc file Both edgerc file load options will return the values from the “CCU” section by default. This may be customized using the AKAMAI_EDGERC_CCU_SECTION setting.
[ "Load", "Akamai", "EdgeGrid", "configuration" ]
python
train
NoviceLive/intellicoder
intellicoder/utils.py
https://github.com/NoviceLive/intellicoder/blob/6cac5ebfce65c370dbebe47756a1789b120ef982/intellicoder/utils.py#L238-L245
def hash_func(name): """Hash the string using a hash algorithm found in tombkeeper/Shellcode_Template_in_C. """ ret = 0 for char in name: ret = ((ret << 5) + ret + ord(char)) & 0xffffffff return hex(ret)
[ "def", "hash_func", "(", "name", ")", ":", "ret", "=", "0", "for", "char", "in", "name", ":", "ret", "=", "(", "(", "ret", "<<", "5", ")", "+", "ret", "+", "ord", "(", "char", ")", ")", "&", "0xffffffff", "return", "hex", "(", "ret", ")" ]
Hash the string using a hash algorithm found in tombkeeper/Shellcode_Template_in_C.
[ "Hash", "the", "string", "using", "a", "hash", "algorithm", "found", "in", "tombkeeper", "/", "Shellcode_Template_in_C", "." ]
python
train
cloudera/cm_api
python/examples/aws.py
https://github.com/cloudera/cm_api/blob/5d2512375bd94684b4da36df9e0d9177865ffcbb/python/examples/aws.py#L129-L137
def initialize_api(args): """ Initializes the global API instance using the given arguments. @param args: arguments provided to the script. """ global api api = ApiResource(server_host=args.hostname, server_port=args.port, username=args.username, password=args.password, version=args.api_version, use_tls=args.use_tls)
[ "def", "initialize_api", "(", "args", ")", ":", "global", "api", "api", "=", "ApiResource", "(", "server_host", "=", "args", ".", "hostname", ",", "server_port", "=", "args", ".", "port", ",", "username", "=", "args", ".", "username", ",", "password", "=", "args", ".", "password", ",", "version", "=", "args", ".", "api_version", ",", "use_tls", "=", "args", ".", "use_tls", ")" ]
Initializes the global API instance using the given arguments. @param args: arguments provided to the script.
[ "Initializes", "the", "global", "API", "instance", "using", "the", "given", "arguments", "." ]
python
train
sosreport/sos
sos/sosreport.py
https://github.com/sosreport/sos/blob/2ebc04da53dc871c8dd5243567afa4f8592dca29/sos/sosreport.py#L831-L849
def del_preset(self, name): """Delete a named command line preset. :param name: the name of the preset to delete :returns: True on success or False otherwise """ policy = self.policy if not policy.find_preset(name): self.ui_log.error("Preset '%s' not found" % name) return False try: policy.del_preset(name=name) except Exception as e: self.ui_log.error(str(e) + "\n") return False self.ui_log.info("Deleted preset '%s'\n" % name) return True
[ "def", "del_preset", "(", "self", ",", "name", ")", ":", "policy", "=", "self", ".", "policy", "if", "not", "policy", ".", "find_preset", "(", "name", ")", ":", "self", ".", "ui_log", ".", "error", "(", "\"Preset '%s' not found\"", "%", "name", ")", "return", "False", "try", ":", "policy", ".", "del_preset", "(", "name", "=", "name", ")", "except", "Exception", "as", "e", ":", "self", ".", "ui_log", ".", "error", "(", "str", "(", "e", ")", "+", "\"\\n\"", ")", "return", "False", "self", ".", "ui_log", ".", "info", "(", "\"Deleted preset '%s'\\n\"", "%", "name", ")", "return", "True" ]
Delete a named command line preset. :param name: the name of the preset to delete :returns: True on success or False otherwise
[ "Delete", "a", "named", "command", "line", "preset", "." ]
python
train
apache/incubator-mxnet
python/mxnet/contrib/onnx/mx2onnx/_op_translations.py
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/onnx/mx2onnx/_op_translations.py#L946-L961
def convert_dropout(node, **kwargs): """Map MXNet's Dropout operator attributes to onnx's Dropout operator and return the created node. """ name, input_nodes, attrs = get_inputs(node, kwargs) probability = float(attrs.get("p", 0.5)) dropout_node = onnx.helper.make_node( "Dropout", input_nodes, [name], ratio=probability, name=name ) return [dropout_node]
[ "def", "convert_dropout", "(", "node", ",", "*", "*", "kwargs", ")", ":", "name", ",", "input_nodes", ",", "attrs", "=", "get_inputs", "(", "node", ",", "kwargs", ")", "probability", "=", "float", "(", "attrs", ".", "get", "(", "\"p\"", ",", "0.5", ")", ")", "dropout_node", "=", "onnx", ".", "helper", ".", "make_node", "(", "\"Dropout\"", ",", "input_nodes", ",", "[", "name", "]", ",", "ratio", "=", "probability", ",", "name", "=", "name", ")", "return", "[", "dropout_node", "]" ]
Map MXNet's Dropout operator attributes to onnx's Dropout operator and return the created node.
[ "Map", "MXNet", "s", "Dropout", "operator", "attributes", "to", "onnx", "s", "Dropout", "operator", "and", "return", "the", "created", "node", "." ]
python
train
pudo-attic/scrapekit
scrapekit/http.py
https://github.com/pudo-attic/scrapekit/blob/cfd258120922fcd571430cdf00ba50f3cf18dc15/scrapekit/http.py#L18-L27
def html(self): """ Create an ``lxml``-based HTML DOM from the response. The tree will not have a root, so all queries need to be relative (i.e. start with a dot). """ try: from lxml import html return html.fromstring(self.content) except ImportError as ie: raise DependencyException(ie)
[ "def", "html", "(", "self", ")", ":", "try", ":", "from", "lxml", "import", "html", "return", "html", ".", "fromstring", "(", "self", ".", "content", ")", "except", "ImportError", "as", "ie", ":", "raise", "DependencyException", "(", "ie", ")" ]
Create an ``lxml``-based HTML DOM from the response. The tree will not have a root, so all queries need to be relative (i.e. start with a dot).
[ "Create", "an", "lxml", "-", "based", "HTML", "DOM", "from", "the", "response", ".", "The", "tree", "will", "not", "have", "a", "root", "so", "all", "queries", "need", "to", "be", "relative", "(", "i", ".", "e", ".", "start", "with", "a", "dot", ")", "." ]
python
train
adafruit/Adafruit_Python_PureIO
Adafruit_PureIO/smbus.py
https://github.com/adafruit/Adafruit_Python_PureIO/blob/6f4976d91c52d70b67b28bba75a429b5328a52c1/Adafruit_PureIO/smbus.py#L283-L294
def write_i2c_block_data(self, addr, cmd, vals): """Write a buffer of data to the specified cmd register of the device. """ assert self._device is not None, 'Bus must be opened before operations are made against it!' # Construct a string of data to send, including room for the command register. data = bytearray(len(vals)+1) data[0] = cmd & 0xFF # Command register at the start. data[1:] = vals[0:] # Copy in the block data (ugly but necessary to ensure # the entire write happens in one transaction). # Send the data to the device. self._select_device(addr) self._device.write(data)
[ "def", "write_i2c_block_data", "(", "self", ",", "addr", ",", "cmd", ",", "vals", ")", ":", "assert", "self", ".", "_device", "is", "not", "None", ",", "'Bus must be opened before operations are made against it!'", "# Construct a string of data to send, including room for the command register.", "data", "=", "bytearray", "(", "len", "(", "vals", ")", "+", "1", ")", "data", "[", "0", "]", "=", "cmd", "&", "0xFF", "# Command register at the start.", "data", "[", "1", ":", "]", "=", "vals", "[", "0", ":", "]", "# Copy in the block data (ugly but necessary to ensure", "# the entire write happens in one transaction).", "# Send the data to the device.", "self", ".", "_select_device", "(", "addr", ")", "self", ".", "_device", ".", "write", "(", "data", ")" ]
Write a buffer of data to the specified cmd register of the device.
[ "Write", "a", "buffer", "of", "data", "to", "the", "specified", "cmd", "register", "of", "the", "device", "." ]
python
test
SecurityInnovation/PGPy
pgpy/pgp.py
https://github.com/SecurityInnovation/PGPy/blob/f1c3d68e32c334f5aa14c34580925e97f17f4fde/pgpy/pgp.py#L1317-L1348
def pubkey(self): """If the :py:obj:`PGPKey` object is a private key, this method returns a corresponding public key object with all the trimmings. Otherwise, returns ``None`` """ if not self.is_public: if self._sibling is None or isinstance(self._sibling, weakref.ref): # create a new key shell pub = PGPKey() pub.ascii_headers = self.ascii_headers.copy() # get the public half of the primary key pub._key = self._key.pubkey() # get the public half of each subkey for skid, subkey in self.subkeys.items(): pub |= subkey.pubkey # copy user ids and user attributes for uid in self._uids: pub |= copy.copy(uid) # copy signatures that weren't copied with uids for sig in self._signatures: if sig.parent is None: pub |= copy.copy(sig) # keep connect the two halves using a weak reference self._sibling = weakref.ref(pub) pub._sibling = weakref.ref(self) return self._sibling() return None
[ "def", "pubkey", "(", "self", ")", ":", "if", "not", "self", ".", "is_public", ":", "if", "self", ".", "_sibling", "is", "None", "or", "isinstance", "(", "self", ".", "_sibling", ",", "weakref", ".", "ref", ")", ":", "# create a new key shell", "pub", "=", "PGPKey", "(", ")", "pub", ".", "ascii_headers", "=", "self", ".", "ascii_headers", ".", "copy", "(", ")", "# get the public half of the primary key", "pub", ".", "_key", "=", "self", ".", "_key", ".", "pubkey", "(", ")", "# get the public half of each subkey", "for", "skid", ",", "subkey", "in", "self", ".", "subkeys", ".", "items", "(", ")", ":", "pub", "|=", "subkey", ".", "pubkey", "# copy user ids and user attributes", "for", "uid", "in", "self", ".", "_uids", ":", "pub", "|=", "copy", ".", "copy", "(", "uid", ")", "# copy signatures that weren't copied with uids", "for", "sig", "in", "self", ".", "_signatures", ":", "if", "sig", ".", "parent", "is", "None", ":", "pub", "|=", "copy", ".", "copy", "(", "sig", ")", "# keep connect the two halves using a weak reference", "self", ".", "_sibling", "=", "weakref", ".", "ref", "(", "pub", ")", "pub", ".", "_sibling", "=", "weakref", ".", "ref", "(", "self", ")", "return", "self", ".", "_sibling", "(", ")", "return", "None" ]
If the :py:obj:`PGPKey` object is a private key, this method returns a corresponding public key object with all the trimmings. Otherwise, returns ``None``
[ "If", "the", ":", "py", ":", "obj", ":", "PGPKey", "object", "is", "a", "private", "key", "this", "method", "returns", "a", "corresponding", "public", "key", "object", "with", "all", "the", "trimmings", ".", "Otherwise", "returns", "None" ]
python
train
RedHatInsights/insights-core
insights/parsers/df.py
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/parsers/df.py#L68-L125
def parse_df_lines(df_content): """Parse contents of each line in ``df`` output. Parse each line of ``df`` output ensuring that wrapped lines are reassembled prior to parsing, and that mount names containing spaces are maintained. Parameters: df_content (list): Lines of df output to be parsed. Returns: list: A list of ``Record`` ``namedtuple``'s. One for each line of the ``df`` output with columns as the key values. The fields of ``Record`` provide information about the file system attributes as determined by the arguments to the ``df`` command. So, for example, if ``df`` is given the ``-alP``, the values are in terms of 1024 blocks. If ``-li`` is given, then the values are in terms of inodes:: - filesystem: Name of the filesystem - total: total number of resources on the filesystem - used: number of the resources used on the filesystem - available: number of the resource available on the filesystem - capacity: percentage of the resource used on the filesystem - mounted_on: mount point of the filesystem """ df_ls = {} df_out = [] is_sep = False columns = Record._fields for line in df_content[1:]: # [1:] -> Skip the header # Stop at 5 splits to avoid splitting spaces in path line_splits = line.rstrip().split(None, 5) if len(line_splits) >= 6: for i, name in enumerate(columns): df_ls[name] = line_splits[i] is_sep = False elif len(line_splits) == 1: # First line of the separated line df_ls[columns[0]] = line_splits[0] is_sep = True elif is_sep and len(line_splits) >= 5: # Re-split to avoid this kind of "Mounted on": "VMware Tools" line_splits = line.split(None, 4) # Last line of the separated line for i, name in enumerate(columns[1:]): df_ls[name] = line_splits[i] is_sep = False elif not line_splits: # Skip empty lines (might in sosreport) continue else: raise ParseException("Could not parse line '{l}'".format(l=line)) # Only add this record if we've got a line and it's not separated if df_ls and not is_sep: rec = Record(**df_ls) df_out.append(rec) df_ls = {} return df_out
[ "def", "parse_df_lines", "(", "df_content", ")", ":", "df_ls", "=", "{", "}", "df_out", "=", "[", "]", "is_sep", "=", "False", "columns", "=", "Record", ".", "_fields", "for", "line", "in", "df_content", "[", "1", ":", "]", ":", "# [1:] -> Skip the header", "# Stop at 5 splits to avoid splitting spaces in path", "line_splits", "=", "line", ".", "rstrip", "(", ")", ".", "split", "(", "None", ",", "5", ")", "if", "len", "(", "line_splits", ")", ">=", "6", ":", "for", "i", ",", "name", "in", "enumerate", "(", "columns", ")", ":", "df_ls", "[", "name", "]", "=", "line_splits", "[", "i", "]", "is_sep", "=", "False", "elif", "len", "(", "line_splits", ")", "==", "1", ":", "# First line of the separated line", "df_ls", "[", "columns", "[", "0", "]", "]", "=", "line_splits", "[", "0", "]", "is_sep", "=", "True", "elif", "is_sep", "and", "len", "(", "line_splits", ")", ">=", "5", ":", "# Re-split to avoid this kind of \"Mounted on\": \"VMware Tools\"", "line_splits", "=", "line", ".", "split", "(", "None", ",", "4", ")", "# Last line of the separated line", "for", "i", ",", "name", "in", "enumerate", "(", "columns", "[", "1", ":", "]", ")", ":", "df_ls", "[", "name", "]", "=", "line_splits", "[", "i", "]", "is_sep", "=", "False", "elif", "not", "line_splits", ":", "# Skip empty lines (might in sosreport)", "continue", "else", ":", "raise", "ParseException", "(", "\"Could not parse line '{l}'\"", ".", "format", "(", "l", "=", "line", ")", ")", "# Only add this record if we've got a line and it's not separated", "if", "df_ls", "and", "not", "is_sep", ":", "rec", "=", "Record", "(", "*", "*", "df_ls", ")", "df_out", ".", "append", "(", "rec", ")", "df_ls", "=", "{", "}", "return", "df_out" ]
Parse contents of each line in ``df`` output. Parse each line of ``df`` output ensuring that wrapped lines are reassembled prior to parsing, and that mount names containing spaces are maintained. Parameters: df_content (list): Lines of df output to be parsed. Returns: list: A list of ``Record`` ``namedtuple``'s. One for each line of the ``df`` output with columns as the key values. The fields of ``Record`` provide information about the file system attributes as determined by the arguments to the ``df`` command. So, for example, if ``df`` is given the ``-alP``, the values are in terms of 1024 blocks. If ``-li`` is given, then the values are in terms of inodes:: - filesystem: Name of the filesystem - total: total number of resources on the filesystem - used: number of the resources used on the filesystem - available: number of the resource available on the filesystem - capacity: percentage of the resource used on the filesystem - mounted_on: mount point of the filesystem
[ "Parse", "contents", "of", "each", "line", "in", "df", "output", "." ]
python
train
UCL-INGI/INGInious
inginious/frontend/fix_webpy_cookies.py
https://github.com/UCL-INGI/INGInious/blob/cbda9a9c7f2b8e8eb1e6d7d51f0d18092086300c/inginious/frontend/fix_webpy_cookies.py#L10-L23
def fix_webpy_cookies(): """ Fixes a bug in web.py. See #208. PR is waiting to be merged upstream at https://github.com/webpy/webpy/pull/419 TODO: remove me once PR is merged upstream. """ try: web.webapi.parse_cookies('a="test"') # make the bug occur except NameError: # monkeypatch web.py SimpleCookie.iteritems = SimpleCookie.items web.webapi.Cookie = namedtuple('Cookie', ['SimpleCookie', 'CookieError'])(SimpleCookie, CookieError) web.webapi.parse_cookies('a="test"')
[ "def", "fix_webpy_cookies", "(", ")", ":", "try", ":", "web", ".", "webapi", ".", "parse_cookies", "(", "'a=\"test\"'", ")", "# make the bug occur", "except", "NameError", ":", "# monkeypatch web.py", "SimpleCookie", ".", "iteritems", "=", "SimpleCookie", ".", "items", "web", ".", "webapi", ".", "Cookie", "=", "namedtuple", "(", "'Cookie'", ",", "[", "'SimpleCookie'", ",", "'CookieError'", "]", ")", "(", "SimpleCookie", ",", "CookieError", ")", "web", ".", "webapi", ".", "parse_cookies", "(", "'a=\"test\"'", ")" ]
Fixes a bug in web.py. See #208. PR is waiting to be merged upstream at https://github.com/webpy/webpy/pull/419 TODO: remove me once PR is merged upstream.
[ "Fixes", "a", "bug", "in", "web", ".", "py", ".", "See", "#208", ".", "PR", "is", "waiting", "to", "be", "merged", "upstream", "at", "https", ":", "//", "github", ".", "com", "/", "webpy", "/", "webpy", "/", "pull", "/", "419", "TODO", ":", "remove", "me", "once", "PR", "is", "merged", "upstream", "." ]
python
train
BreakingBytes/simkit
simkit/core/__init__.py
https://github.com/BreakingBytes/simkit/blob/205163d879d3880b6c9ef609f1b723a58773026b/simkit/core/__init__.py#L212-L223
def default(self, o): """ JSONEncoder default method that converts NumPy arrays and quantities objects to lists. """ if isinstance(o, Q_): return o.magnitude elif isinstance(o, np.ndarray): return o.tolist() else: # raise TypeError if not serializable return super(SimKitJSONEncoder, self).default(o)
[ "def", "default", "(", "self", ",", "o", ")", ":", "if", "isinstance", "(", "o", ",", "Q_", ")", ":", "return", "o", ".", "magnitude", "elif", "isinstance", "(", "o", ",", "np", ".", "ndarray", ")", ":", "return", "o", ".", "tolist", "(", ")", "else", ":", "# raise TypeError if not serializable", "return", "super", "(", "SimKitJSONEncoder", ",", "self", ")", ".", "default", "(", "o", ")" ]
JSONEncoder default method that converts NumPy arrays and quantities objects to lists.
[ "JSONEncoder", "default", "method", "that", "converts", "NumPy", "arrays", "and", "quantities", "objects", "to", "lists", "." ]
python
train
saltstack/salt
salt/modules/win_lgpo.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/win_lgpo.py#L4501-L4524
def _in_range_inclusive(cls, val, **kwargs): ''' checks that a value is in an inclusive range The value for 0 used by Max Password Age is actually 0xffffffff ''' minimum = kwargs.get('min', 0) maximum = kwargs.get('max', 1) zero_value = kwargs.get('zero_value', 0) if isinstance(val, six.string_types): if val.lower() == 'not defined': return True else: try: val = int(val) except ValueError: return False if val is not None: if minimum <= val <= maximum or val == zero_value: return True else: return False else: return False
[ "def", "_in_range_inclusive", "(", "cls", ",", "val", ",", "*", "*", "kwargs", ")", ":", "minimum", "=", "kwargs", ".", "get", "(", "'min'", ",", "0", ")", "maximum", "=", "kwargs", ".", "get", "(", "'max'", ",", "1", ")", "zero_value", "=", "kwargs", ".", "get", "(", "'zero_value'", ",", "0", ")", "if", "isinstance", "(", "val", ",", "six", ".", "string_types", ")", ":", "if", "val", ".", "lower", "(", ")", "==", "'not defined'", ":", "return", "True", "else", ":", "try", ":", "val", "=", "int", "(", "val", ")", "except", "ValueError", ":", "return", "False", "if", "val", "is", "not", "None", ":", "if", "minimum", "<=", "val", "<=", "maximum", "or", "val", "==", "zero_value", ":", "return", "True", "else", ":", "return", "False", "else", ":", "return", "False" ]
checks that a value is in an inclusive range The value for 0 used by Max Password Age is actually 0xffffffff
[ "checks", "that", "a", "value", "is", "in", "an", "inclusive", "range", "The", "value", "for", "0", "used", "by", "Max", "Password", "Age", "is", "actually", "0xffffffff" ]
python
train
horazont/aioxmpp
aioxmpp/stream.py
https://github.com/horazont/aioxmpp/blob/22a68e5e1d23f2a4dee470092adbd4672f9ef061/aioxmpp/stream.py#L2436-L2452
def _stop_sm(self): """ Version of :meth:`stop_sm` which can be called during startup. """ if not self.sm_enabled: raise RuntimeError("Stream Management is not enabled") self._logger.info("stopping SM stream") self._sm_enabled = False del self._sm_outbound_base del self._sm_inbound_ctr self._clear_unacked(StanzaState.SENT_WITHOUT_SM) del self._sm_unacked_list self._destroy_stream_state(ConnectionError( "stream management disabled" ))
[ "def", "_stop_sm", "(", "self", ")", ":", "if", "not", "self", ".", "sm_enabled", ":", "raise", "RuntimeError", "(", "\"Stream Management is not enabled\"", ")", "self", ".", "_logger", ".", "info", "(", "\"stopping SM stream\"", ")", "self", ".", "_sm_enabled", "=", "False", "del", "self", ".", "_sm_outbound_base", "del", "self", ".", "_sm_inbound_ctr", "self", ".", "_clear_unacked", "(", "StanzaState", ".", "SENT_WITHOUT_SM", ")", "del", "self", ".", "_sm_unacked_list", "self", ".", "_destroy_stream_state", "(", "ConnectionError", "(", "\"stream management disabled\"", ")", ")" ]
Version of :meth:`stop_sm` which can be called during startup.
[ "Version", "of", ":", "meth", ":", "stop_sm", "which", "can", "be", "called", "during", "startup", "." ]
python
train
SBRG/ssbio
ssbio/protein/structure/structprop.py
https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/protein/structure/structprop.py#L392-L422
def get_seqprop_within(self, chain_id, resnum, angstroms, only_protein=True, use_ca=False, custom_coord=None, return_resnums=False): """Get a SeqProp object of the amino acids within X angstroms of the specified chain + residue number. Args: resnum (int): Residue number of the structure chain_id (str): Chain ID of the residue number angstroms (float): Radius of the search sphere only_protein (bool): If only protein atoms (no HETATMS) should be included in the returned sequence use_ca (bool): If the alpha-carbon atom should be used for searching, default is False (last atom of residue used) Returns: SeqProp: Sequence that represents the amino acids in the vicinity of your residue number. """ # XTODO: change "remove" parameter to be clean_seq and to remove all non standard amino acids # TODO: make return_resnums smarter polypep, resnums = self.get_polypeptide_within(chain_id=chain_id, resnum=resnum, angstroms=angstroms, use_ca=use_ca, only_protein=only_protein, custom_coord=custom_coord, return_resnums=True) # final_seq = polypep.get_sequence() # seqprop = SeqProp(id='{}-{}_within_{}_of_{}'.format(self.id, chain_id, angstroms, resnum), # seq=final_seq) chain_subseq = self.chains.get_by_id(chain_id).get_subsequence(resnums) if return_resnums: return chain_subseq, resnums else: return chain_subseq
[ "def", "get_seqprop_within", "(", "self", ",", "chain_id", ",", "resnum", ",", "angstroms", ",", "only_protein", "=", "True", ",", "use_ca", "=", "False", ",", "custom_coord", "=", "None", ",", "return_resnums", "=", "False", ")", ":", "# XTODO: change \"remove\" parameter to be clean_seq and to remove all non standard amino acids", "# TODO: make return_resnums smarter", "polypep", ",", "resnums", "=", "self", ".", "get_polypeptide_within", "(", "chain_id", "=", "chain_id", ",", "resnum", "=", "resnum", ",", "angstroms", "=", "angstroms", ",", "use_ca", "=", "use_ca", ",", "only_protein", "=", "only_protein", ",", "custom_coord", "=", "custom_coord", ",", "return_resnums", "=", "True", ")", "# final_seq = polypep.get_sequence()", "# seqprop = SeqProp(id='{}-{}_within_{}_of_{}'.format(self.id, chain_id, angstroms, resnum),", "# seq=final_seq)", "chain_subseq", "=", "self", ".", "chains", ".", "get_by_id", "(", "chain_id", ")", ".", "get_subsequence", "(", "resnums", ")", "if", "return_resnums", ":", "return", "chain_subseq", ",", "resnums", "else", ":", "return", "chain_subseq" ]
Get a SeqProp object of the amino acids within X angstroms of the specified chain + residue number. Args: resnum (int): Residue number of the structure chain_id (str): Chain ID of the residue number angstroms (float): Radius of the search sphere only_protein (bool): If only protein atoms (no HETATMS) should be included in the returned sequence use_ca (bool): If the alpha-carbon atom should be used for searching, default is False (last atom of residue used) Returns: SeqProp: Sequence that represents the amino acids in the vicinity of your residue number.
[ "Get", "a", "SeqProp", "object", "of", "the", "amino", "acids", "within", "X", "angstroms", "of", "the", "specified", "chain", "+", "residue", "number", "." ]
python
train
bitcaster-io/bitcaster
src/bitcaster/web/templatetags/partition.py
https://github.com/bitcaster-io/bitcaster/blob/04625a4b67c1ad01e5d38faa3093828b360d4a98/src/bitcaster/web/templatetags/partition.py#L23-L59
def rows(thelist, n): """ Break a list into ``n`` rows, filling up each row to the maximum equal length possible. For example:: >>> l = range(10) >>> rows(l, 2) [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]] >>> rows(l, 3) [[0, 1, 2, 3], [4, 5, 6, 7], [8, 9]] >>> rows(l, 4) [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]] >>> rows(l, 5) [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9]] >>> rows(l, 9) [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [], [], [], []] # This filter will always return `n` rows, even if some are empty: >>> rows(range(2), 3) [[0], [1], []] """ try: n = int(n) thelist = list(thelist) except (ValueError, TypeError): return [thelist] list_len = len(thelist) split = list_len // n if list_len % n != 0: split += 1 return [thelist[split * i:split * (i + 1)] for i in range(n)]
[ "def", "rows", "(", "thelist", ",", "n", ")", ":", "try", ":", "n", "=", "int", "(", "n", ")", "thelist", "=", "list", "(", "thelist", ")", "except", "(", "ValueError", ",", "TypeError", ")", ":", "return", "[", "thelist", "]", "list_len", "=", "len", "(", "thelist", ")", "split", "=", "list_len", "//", "n", "if", "list_len", "%", "n", "!=", "0", ":", "split", "+=", "1", "return", "[", "thelist", "[", "split", "*", "i", ":", "split", "*", "(", "i", "+", "1", ")", "]", "for", "i", "in", "range", "(", "n", ")", "]" ]
Break a list into ``n`` rows, filling up each row to the maximum equal length possible. For example:: >>> l = range(10) >>> rows(l, 2) [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]] >>> rows(l, 3) [[0, 1, 2, 3], [4, 5, 6, 7], [8, 9]] >>> rows(l, 4) [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]] >>> rows(l, 5) [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9]] >>> rows(l, 9) [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [], [], [], []] # This filter will always return `n` rows, even if some are empty: >>> rows(range(2), 3) [[0], [1], []]
[ "Break", "a", "list", "into", "n", "rows", "filling", "up", "each", "row", "to", "the", "maximum", "equal", "length", "possible", ".", "For", "example", "::" ]
python
train
childsish/lhc-python
lhc/misc/tools.py
https://github.com/childsish/lhc-python/blob/0a669f46a40a39f24d28665e8b5b606dc7e86beb/lhc/misc/tools.py#L38-L50
def combinations_with_replacement(iterable, r): """ This function acts as a replacement for the itertools.combinations_with_replacement function. The original does not replace items that come earlier in the provided iterator. """ stk = [[i,] for i in iterable] pop = stk.pop while len(stk) > 0: top = pop() if len(top) == r: yield tuple(top) else: stk.extend(top + [i] for i in iterable)
[ "def", "combinations_with_replacement", "(", "iterable", ",", "r", ")", ":", "stk", "=", "[", "[", "i", ",", "]", "for", "i", "in", "iterable", "]", "pop", "=", "stk", ".", "pop", "while", "len", "(", "stk", ")", ">", "0", ":", "top", "=", "pop", "(", ")", "if", "len", "(", "top", ")", "==", "r", ":", "yield", "tuple", "(", "top", ")", "else", ":", "stk", ".", "extend", "(", "top", "+", "[", "i", "]", "for", "i", "in", "iterable", ")" ]
This function acts as a replacement for the itertools.combinations_with_replacement function. The original does not replace items that come earlier in the provided iterator.
[ "This", "function", "acts", "as", "a", "replacement", "for", "the", "itertools", ".", "combinations_with_replacement", "function", ".", "The", "original", "does", "not", "replace", "items", "that", "come", "earlier", "in", "the", "provided", "iterator", "." ]
python
train
spyder-ide/spyder
spyder/widgets/onecolumntree.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/widgets/onecolumntree.py#L204-L211
def sort_top_level_items(self, key): """Sorting tree wrt top level items""" self.save_expanded_state() items = sorted([self.takeTopLevelItem(0) for index in range(self.topLevelItemCount())], key=key) for index, item in enumerate(items): self.insertTopLevelItem(index, item) self.restore_expanded_state()
[ "def", "sort_top_level_items", "(", "self", ",", "key", ")", ":", "self", ".", "save_expanded_state", "(", ")", "items", "=", "sorted", "(", "[", "self", ".", "takeTopLevelItem", "(", "0", ")", "for", "index", "in", "range", "(", "self", ".", "topLevelItemCount", "(", ")", ")", "]", ",", "key", "=", "key", ")", "for", "index", ",", "item", "in", "enumerate", "(", "items", ")", ":", "self", ".", "insertTopLevelItem", "(", "index", ",", "item", ")", "self", ".", "restore_expanded_state", "(", ")" ]
Sorting tree wrt top level items
[ "Sorting", "tree", "wrt", "top", "level", "items" ]
python
train
brandon-rhodes/uncommitted
uncommitted/command.py
https://github.com/brandon-rhodes/uncommitted/blob/80ebd95a3735e26bd8b9b7b62ff25e1e749a7472/uncommitted/command.py#L174-L201
def scan(repos, options): """Given a repository list [(path, vcsname), ...], scan each of them.""" ignore_set = set() repos = repos[::-1] # Create a queue we can push and pop from while repos: directory, dotdir = repos.pop() ignore_this = any(pat in directory for pat in options.ignore_patterns) if ignore_this: if options.verbose: output(b'Ignoring repo: %s' % directory) output(b'') continue vcsname, get_status = SYSTEMS[dotdir] lines, subrepos = get_status(directory, ignore_set, options) # We want to tackle subrepos immediately after their repository, # so we put them at the front of the queue. subrepos = [(os.path.join(directory, r), dotdir) for r in subrepos] repos.extend(reversed(subrepos)) if lines is None: # signal that we should ignore this one continue if lines or options.verbose: output(b'%s - %s' % (directory, vcsname)) for line in lines: output(line) output(b'')
[ "def", "scan", "(", "repos", ",", "options", ")", ":", "ignore_set", "=", "set", "(", ")", "repos", "=", "repos", "[", ":", ":", "-", "1", "]", "# Create a queue we can push and pop from", "while", "repos", ":", "directory", ",", "dotdir", "=", "repos", ".", "pop", "(", ")", "ignore_this", "=", "any", "(", "pat", "in", "directory", "for", "pat", "in", "options", ".", "ignore_patterns", ")", "if", "ignore_this", ":", "if", "options", ".", "verbose", ":", "output", "(", "b'Ignoring repo: %s'", "%", "directory", ")", "output", "(", "b''", ")", "continue", "vcsname", ",", "get_status", "=", "SYSTEMS", "[", "dotdir", "]", "lines", ",", "subrepos", "=", "get_status", "(", "directory", ",", "ignore_set", ",", "options", ")", "# We want to tackle subrepos immediately after their repository,", "# so we put them at the front of the queue.", "subrepos", "=", "[", "(", "os", ".", "path", ".", "join", "(", "directory", ",", "r", ")", ",", "dotdir", ")", "for", "r", "in", "subrepos", "]", "repos", ".", "extend", "(", "reversed", "(", "subrepos", ")", ")", "if", "lines", "is", "None", ":", "# signal that we should ignore this one", "continue", "if", "lines", "or", "options", ".", "verbose", ":", "output", "(", "b'%s - %s'", "%", "(", "directory", ",", "vcsname", ")", ")", "for", "line", "in", "lines", ":", "output", "(", "line", ")", "output", "(", "b''", ")" ]
Given a repository list [(path, vcsname), ...], scan each of them.
[ "Given", "a", "repository", "list", "[", "(", "path", "vcsname", ")", "...", "]", "scan", "each", "of", "them", "." ]
python
train
night-crawler/django-docker-helpers
django_docker_helpers/utils.py
https://github.com/night-crawler/django-docker-helpers/blob/b64f8009101a8eb61d3841124ba19e3ab881aa2f/django_docker_helpers/utils.py#L364-L382
def run_env_once(f: t.Callable) -> t.Callable: """ A decorator to prevent ``manage.py`` from running code twice for everything. (https://stackoverflow.com/questions/16546652/why-does-django-run-everything-twice) :param f: function or method to decorate :return: callable """ @wraps(f) def wrapper(*args, **kwargs): has_run = os.environ.get(wrapper.__name__) if has_run == '1': return result = f(*args, **kwargs) os.environ[wrapper.__name__] = '1' return result return wrapper
[ "def", "run_env_once", "(", "f", ":", "t", ".", "Callable", ")", "->", "t", ".", "Callable", ":", "@", "wraps", "(", "f", ")", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "has_run", "=", "os", ".", "environ", ".", "get", "(", "wrapper", ".", "__name__", ")", "if", "has_run", "==", "'1'", ":", "return", "result", "=", "f", "(", "*", "args", ",", "*", "*", "kwargs", ")", "os", ".", "environ", "[", "wrapper", ".", "__name__", "]", "=", "'1'", "return", "result", "return", "wrapper" ]
A decorator to prevent ``manage.py`` from running code twice for everything. (https://stackoverflow.com/questions/16546652/why-does-django-run-everything-twice) :param f: function or method to decorate :return: callable
[ "A", "decorator", "to", "prevent", "manage", ".", "py", "from", "running", "code", "twice", "for", "everything", ".", "(", "https", ":", "//", "stackoverflow", ".", "com", "/", "questions", "/", "16546652", "/", "why", "-", "does", "-", "django", "-", "run", "-", "everything", "-", "twice", ")" ]
python
train
MIT-LCP/wfdb-python
wfdb/io/_signal.py
https://github.com/MIT-LCP/wfdb-python/blob/cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c/wfdb/io/_signal.py#L70-L143
def check_sig_cohesion(self, write_fields, expanded): """ Check the cohesion of the d_signal/e_d_signal field with the other fields used to write the record """ # Using list of arrays e_d_signal if expanded: # Set default samps_per_frame spf = self.samps_per_frame for ch in range(len(spf)): if spf[ch] is None: spf[ch] = 1 # Match the actual signal shape against stated length and number of channels if self.n_sig != len(self.e_d_signal): raise ValueError('n_sig does not match the length of e_d_signal') for ch in range(self.n_sig): if len(self.e_d_signal[ch]) != spf[ch]*self.sig_len: raise ValueError('Length of channel '+str(ch)+'does not match samps_per_frame['+str(ch+']*sig_len')) # For each channel (if any), make sure the digital format has no values out of bounds for ch in range(self.n_sig): fmt = self.fmt[ch] dmin, dmax = _digi_bounds(self.fmt[ch]) chmin = min(self.e_d_signal[ch]) chmax = max(self.e_d_signal[ch]) if (chmin < dmin) or (chmax > dmax): raise IndexError("Channel "+str(ch)+" contain values outside allowed range ["+str(dmin)+", "+str(dmax)+"] for fmt "+str(fmt)) # Ensure that the checksums and initial value fields match the digital signal (if the fields are present) if self.n_sig > 0: if 'checksum' in write_fields: realchecksum = self.calc_checksum(expanded) if self.checksum != realchecksum: print("The actual checksum of e_d_signal is: ", realchecksum) raise ValueError("checksum field does not match actual checksum of e_d_signal") if 'init_value' in write_fields: realinit_value = [self.e_d_signal[ch][0] for ch in range(self.n_sig)] if self.init_value != realinit_value: print("The actual init_value of e_d_signal is: ", realinit_value) raise ValueError("init_value field does not match actual init_value of e_d_signal") # Using uniform d_signal else: # Match the actual signal shape against stated length and number of channels if (self.sig_len, self.n_sig) != self.d_signal.shape: print('sig_len: ', self.sig_len) print('n_sig: ', self.n_sig) print('d_signal.shape: ', self.d_signal.shape) raise ValueError('sig_len and n_sig do not match shape of d_signal') # For each channel (if any), make sure the digital format has no values out of bounds for ch in range(self.n_sig): fmt = self.fmt[ch] dmin, dmax = _digi_bounds(self.fmt[ch]) chmin = min(self.d_signal[:,ch]) chmax = max(self.d_signal[:,ch]) if (chmin < dmin) or (chmax > dmax): raise IndexError("Channel "+str(ch)+" contain values outside allowed range ["+str(dmin)+", "+str(dmax)+"] for fmt "+str(fmt)) # Ensure that the checksums and initial value fields match the digital signal (if the fields are present) if self.n_sig>0: if 'checksum' in write_fields: realchecksum = self.calc_checksum() if self.checksum != realchecksum: print("The actual checksum of d_signal is: ", realchecksum) raise ValueError("checksum field does not match actual checksum of d_signal") if 'init_value' in write_fields: realinit_value = list(self.d_signal[0,:]) if self.init_value != realinit_value: print("The actual init_value of d_signal is: ", realinit_value) raise ValueError("init_value field does not match actual init_value of d_signal")
[ "def", "check_sig_cohesion", "(", "self", ",", "write_fields", ",", "expanded", ")", ":", "# Using list of arrays e_d_signal", "if", "expanded", ":", "# Set default samps_per_frame", "spf", "=", "self", ".", "samps_per_frame", "for", "ch", "in", "range", "(", "len", "(", "spf", ")", ")", ":", "if", "spf", "[", "ch", "]", "is", "None", ":", "spf", "[", "ch", "]", "=", "1", "# Match the actual signal shape against stated length and number of channels", "if", "self", ".", "n_sig", "!=", "len", "(", "self", ".", "e_d_signal", ")", ":", "raise", "ValueError", "(", "'n_sig does not match the length of e_d_signal'", ")", "for", "ch", "in", "range", "(", "self", ".", "n_sig", ")", ":", "if", "len", "(", "self", ".", "e_d_signal", "[", "ch", "]", ")", "!=", "spf", "[", "ch", "]", "*", "self", ".", "sig_len", ":", "raise", "ValueError", "(", "'Length of channel '", "+", "str", "(", "ch", ")", "+", "'does not match samps_per_frame['", "+", "str", "(", "ch", "+", "']*sig_len'", ")", ")", "# For each channel (if any), make sure the digital format has no values out of bounds", "for", "ch", "in", "range", "(", "self", ".", "n_sig", ")", ":", "fmt", "=", "self", ".", "fmt", "[", "ch", "]", "dmin", ",", "dmax", "=", "_digi_bounds", "(", "self", ".", "fmt", "[", "ch", "]", ")", "chmin", "=", "min", "(", "self", ".", "e_d_signal", "[", "ch", "]", ")", "chmax", "=", "max", "(", "self", ".", "e_d_signal", "[", "ch", "]", ")", "if", "(", "chmin", "<", "dmin", ")", "or", "(", "chmax", ">", "dmax", ")", ":", "raise", "IndexError", "(", "\"Channel \"", "+", "str", "(", "ch", ")", "+", "\" contain values outside allowed range [\"", "+", "str", "(", "dmin", ")", "+", "\", \"", "+", "str", "(", "dmax", ")", "+", "\"] for fmt \"", "+", "str", "(", "fmt", ")", ")", "# Ensure that the checksums and initial value fields match the digital signal (if the fields are present)", "if", "self", ".", "n_sig", ">", "0", ":", "if", "'checksum'", "in", "write_fields", ":", "realchecksum", "=", "self", ".", "calc_checksum", "(", "expanded", ")", "if", "self", ".", "checksum", "!=", "realchecksum", ":", "print", "(", "\"The actual checksum of e_d_signal is: \"", ",", "realchecksum", ")", "raise", "ValueError", "(", "\"checksum field does not match actual checksum of e_d_signal\"", ")", "if", "'init_value'", "in", "write_fields", ":", "realinit_value", "=", "[", "self", ".", "e_d_signal", "[", "ch", "]", "[", "0", "]", "for", "ch", "in", "range", "(", "self", ".", "n_sig", ")", "]", "if", "self", ".", "init_value", "!=", "realinit_value", ":", "print", "(", "\"The actual init_value of e_d_signal is: \"", ",", "realinit_value", ")", "raise", "ValueError", "(", "\"init_value field does not match actual init_value of e_d_signal\"", ")", "# Using uniform d_signal", "else", ":", "# Match the actual signal shape against stated length and number of channels", "if", "(", "self", ".", "sig_len", ",", "self", ".", "n_sig", ")", "!=", "self", ".", "d_signal", ".", "shape", ":", "print", "(", "'sig_len: '", ",", "self", ".", "sig_len", ")", "print", "(", "'n_sig: '", ",", "self", ".", "n_sig", ")", "print", "(", "'d_signal.shape: '", ",", "self", ".", "d_signal", ".", "shape", ")", "raise", "ValueError", "(", "'sig_len and n_sig do not match shape of d_signal'", ")", "# For each channel (if any), make sure the digital format has no values out of bounds", "for", "ch", "in", "range", "(", "self", ".", "n_sig", ")", ":", "fmt", "=", "self", ".", "fmt", "[", "ch", "]", "dmin", ",", "dmax", "=", "_digi_bounds", "(", "self", ".", "fmt", "[", "ch", "]", ")", "chmin", "=", "min", "(", "self", ".", "d_signal", "[", ":", ",", "ch", "]", ")", "chmax", "=", "max", "(", "self", ".", "d_signal", "[", ":", ",", "ch", "]", ")", "if", "(", "chmin", "<", "dmin", ")", "or", "(", "chmax", ">", "dmax", ")", ":", "raise", "IndexError", "(", "\"Channel \"", "+", "str", "(", "ch", ")", "+", "\" contain values outside allowed range [\"", "+", "str", "(", "dmin", ")", "+", "\", \"", "+", "str", "(", "dmax", ")", "+", "\"] for fmt \"", "+", "str", "(", "fmt", ")", ")", "# Ensure that the checksums and initial value fields match the digital signal (if the fields are present)", "if", "self", ".", "n_sig", ">", "0", ":", "if", "'checksum'", "in", "write_fields", ":", "realchecksum", "=", "self", ".", "calc_checksum", "(", ")", "if", "self", ".", "checksum", "!=", "realchecksum", ":", "print", "(", "\"The actual checksum of d_signal is: \"", ",", "realchecksum", ")", "raise", "ValueError", "(", "\"checksum field does not match actual checksum of d_signal\"", ")", "if", "'init_value'", "in", "write_fields", ":", "realinit_value", "=", "list", "(", "self", ".", "d_signal", "[", "0", ",", ":", "]", ")", "if", "self", ".", "init_value", "!=", "realinit_value", ":", "print", "(", "\"The actual init_value of d_signal is: \"", ",", "realinit_value", ")", "raise", "ValueError", "(", "\"init_value field does not match actual init_value of d_signal\"", ")" ]
Check the cohesion of the d_signal/e_d_signal field with the other fields used to write the record
[ "Check", "the", "cohesion", "of", "the", "d_signal", "/", "e_d_signal", "field", "with", "the", "other", "fields", "used", "to", "write", "the", "record" ]
python
train
bwohlberg/sporco
sporco/linalg.py
https://github.com/bwohlberg/sporco/blob/8946a04331106f4e39904fbdf2dc7351900baa04/sporco/linalg.py#L1295-L1321
def split(u, axis=0): """ Split an array into a list of arrays on the specified axis. The length of the list is the shape of the array on the specified axis, and the corresponding axis is removed from each entry in the list. This function does not have the same behaviour as :func:`numpy.split`. Parameters ---------- u : array_like Input array axis : int, optional (default 0) Axis on which to split the input array Returns ------- v : list of ndarray List of arrays """ # Convert negative axis to positive if axis < 0: axis = u.ndim + axis # Construct axis selection slice slct0 = (slice(None),) * axis return [u[slct0 + (k,)] for k in range(u.shape[axis])]
[ "def", "split", "(", "u", ",", "axis", "=", "0", ")", ":", "# Convert negative axis to positive", "if", "axis", "<", "0", ":", "axis", "=", "u", ".", "ndim", "+", "axis", "# Construct axis selection slice", "slct0", "=", "(", "slice", "(", "None", ")", ",", ")", "*", "axis", "return", "[", "u", "[", "slct0", "+", "(", "k", ",", ")", "]", "for", "k", "in", "range", "(", "u", ".", "shape", "[", "axis", "]", ")", "]" ]
Split an array into a list of arrays on the specified axis. The length of the list is the shape of the array on the specified axis, and the corresponding axis is removed from each entry in the list. This function does not have the same behaviour as :func:`numpy.split`. Parameters ---------- u : array_like Input array axis : int, optional (default 0) Axis on which to split the input array Returns ------- v : list of ndarray List of arrays
[ "Split", "an", "array", "into", "a", "list", "of", "arrays", "on", "the", "specified", "axis", ".", "The", "length", "of", "the", "list", "is", "the", "shape", "of", "the", "array", "on", "the", "specified", "axis", "and", "the", "corresponding", "axis", "is", "removed", "from", "each", "entry", "in", "the", "list", ".", "This", "function", "does", "not", "have", "the", "same", "behaviour", "as", ":", "func", ":", "numpy", ".", "split", "." ]
python
train
Jammy2211/PyAutoLens
autolens/data/array/util/mapping_util.py
https://github.com/Jammy2211/PyAutoLens/blob/91e50369c7a9c048c83d217625578b72423cd5a7/autolens/data/array/util/mapping_util.py#L381-L401
def sparse_grid_from_unmasked_sparse_grid(unmasked_sparse_grid, sparse_to_unmasked_sparse): """Use the central arc-second coordinate of every unmasked pixelization grid's pixels and mapping between each pixelization pixel and unmasked pixelization pixel to compute the central arc-second coordinate of every masked pixelization grid pixel. Parameters ----------- unmasked_sparse_grid : ndarray The (y,x) arc-second centre of every unmasked pixelization grid pixel. sparse_to_unmasked_sparse : ndarray The index mapping between every pixelization pixel and masked pixelization pixel. """ total_pix_pixels = sparse_to_unmasked_sparse.shape[0] pix_grid = np.zeros((total_pix_pixels, 2)) for pixel_index in range(total_pix_pixels): pix_grid[pixel_index, 0] = unmasked_sparse_grid[sparse_to_unmasked_sparse[pixel_index], 0] pix_grid[pixel_index, 1] = unmasked_sparse_grid[sparse_to_unmasked_sparse[pixel_index], 1] return pix_grid
[ "def", "sparse_grid_from_unmasked_sparse_grid", "(", "unmasked_sparse_grid", ",", "sparse_to_unmasked_sparse", ")", ":", "total_pix_pixels", "=", "sparse_to_unmasked_sparse", ".", "shape", "[", "0", "]", "pix_grid", "=", "np", ".", "zeros", "(", "(", "total_pix_pixels", ",", "2", ")", ")", "for", "pixel_index", "in", "range", "(", "total_pix_pixels", ")", ":", "pix_grid", "[", "pixel_index", ",", "0", "]", "=", "unmasked_sparse_grid", "[", "sparse_to_unmasked_sparse", "[", "pixel_index", "]", ",", "0", "]", "pix_grid", "[", "pixel_index", ",", "1", "]", "=", "unmasked_sparse_grid", "[", "sparse_to_unmasked_sparse", "[", "pixel_index", "]", ",", "1", "]", "return", "pix_grid" ]
Use the central arc-second coordinate of every unmasked pixelization grid's pixels and mapping between each pixelization pixel and unmasked pixelization pixel to compute the central arc-second coordinate of every masked pixelization grid pixel. Parameters ----------- unmasked_sparse_grid : ndarray The (y,x) arc-second centre of every unmasked pixelization grid pixel. sparse_to_unmasked_sparse : ndarray The index mapping between every pixelization pixel and masked pixelization pixel.
[ "Use", "the", "central", "arc", "-", "second", "coordinate", "of", "every", "unmasked", "pixelization", "grid", "s", "pixels", "and", "mapping", "between", "each", "pixelization", "pixel", "and", "unmasked", "pixelization", "pixel", "to", "compute", "the", "central", "arc", "-", "second", "coordinate", "of", "every", "masked", "pixelization", "grid", "pixel", "." ]
python
valid
amzn/ion-python
amazon/ion/reader_text.py
https://github.com/amzn/ion-python/blob/0b21fa3ba7755f55f745e4aa970d86343b82449d/amazon/ion/reader_text.py#L1651-L1655
def _struct_or_lob_handler(c, ctx): """Handles tokens that begin with an open brace.""" assert c == _OPEN_BRACE c, self = yield yield ctx.immediate_transition(_STRUCT_OR_LOB_TABLE[c](c, ctx))
[ "def", "_struct_or_lob_handler", "(", "c", ",", "ctx", ")", ":", "assert", "c", "==", "_OPEN_BRACE", "c", ",", "self", "=", "yield", "yield", "ctx", ".", "immediate_transition", "(", "_STRUCT_OR_LOB_TABLE", "[", "c", "]", "(", "c", ",", "ctx", ")", ")" ]
Handles tokens that begin with an open brace.
[ "Handles", "tokens", "that", "begin", "with", "an", "open", "brace", "." ]
python
train
Fortran-FOSS-Programmers/ford
ford/fortran_project.py
https://github.com/Fortran-FOSS-Programmers/ford/blob/d46a44eae20d99205292c31785f936fbed47070f/ford/fortran_project.py#L295-L303
def markdown(self,md,base_url='..'): """ Process the documentation with Markdown to produce HTML. """ print("\nProcessing documentation comments...") ford.sourceform.set_base_url(base_url) if self.settings['warn'].lower() == 'true': print() for src in self.allfiles: src.markdown(md, self)
[ "def", "markdown", "(", "self", ",", "md", ",", "base_url", "=", "'..'", ")", ":", "print", "(", "\"\\nProcessing documentation comments...\"", ")", "ford", ".", "sourceform", ".", "set_base_url", "(", "base_url", ")", "if", "self", ".", "settings", "[", "'warn'", "]", ".", "lower", "(", ")", "==", "'true'", ":", "print", "(", ")", "for", "src", "in", "self", ".", "allfiles", ":", "src", ".", "markdown", "(", "md", ",", "self", ")" ]
Process the documentation with Markdown to produce HTML.
[ "Process", "the", "documentation", "with", "Markdown", "to", "produce", "HTML", "." ]
python
train
quantmind/pulsar
pulsar/utils/tools/pidfile.py
https://github.com/quantmind/pulsar/blob/fee44e871954aa6ca36d00bb5a3739abfdb89b26/pulsar/utils/tools/pidfile.py#L44-L52
def unlink(self): """ delete pidfile""" try: with open(self.fname, "r") as f: pid1 = int(f.read() or 0) if pid1 == self.pid: os.unlink(self.fname) except Exception: pass
[ "def", "unlink", "(", "self", ")", ":", "try", ":", "with", "open", "(", "self", ".", "fname", ",", "\"r\"", ")", "as", "f", ":", "pid1", "=", "int", "(", "f", ".", "read", "(", ")", "or", "0", ")", "if", "pid1", "==", "self", ".", "pid", ":", "os", ".", "unlink", "(", "self", ".", "fname", ")", "except", "Exception", ":", "pass" ]
delete pidfile
[ "delete", "pidfile" ]
python
train
prometheus/client_python
prometheus_client/multiprocess.py
https://github.com/prometheus/client_python/blob/31f5557e2e84ca4ffa9a03abf6e3f4d0c8b8c3eb/prometheus_client/multiprocess.py#L29-L114
def merge(files, accumulate=True): """Merge metrics from given mmap files. By default, histograms are accumulated, as per prometheus wire format. But if writing the merged data back to mmap files, use accumulate=False to avoid compound accumulation. """ metrics = {} for f in files: parts = os.path.basename(f).split('_') typ = parts[0] d = MmapedDict(f, read_mode=True) for key, value in d.read_all_values(): metric_name, name, labels = json.loads(key) labels_key = tuple(sorted(labels.items())) metric = metrics.get(metric_name) if metric is None: metric = Metric(metric_name, 'Multiprocess metric', typ) metrics[metric_name] = metric if typ == 'gauge': pid = parts[2][:-3] metric._multiprocess_mode = parts[1] metric.add_sample(name, labels_key + (('pid', pid),), value) else: # The duplicates and labels are fixed in the next for. metric.add_sample(name, labels_key, value) d.close() for metric in metrics.values(): samples = defaultdict(float) buckets = {} for s in metric.samples: name, labels, value = s.name, s.labels, s.value if metric.type == 'gauge': without_pid = tuple(l for l in labels if l[0] != 'pid') if metric._multiprocess_mode == 'min': current = samples.setdefault((name, without_pid), value) if value < current: samples[(s.name, without_pid)] = value elif metric._multiprocess_mode == 'max': current = samples.setdefault((name, without_pid), value) if value > current: samples[(s.name, without_pid)] = value elif metric._multiprocess_mode == 'livesum': samples[(name, without_pid)] += value else: # all/liveall samples[(name, labels)] = value elif metric.type == 'histogram': bucket = tuple(float(l[1]) for l in labels if l[0] == 'le') if bucket: # _bucket without_le = tuple(l for l in labels if l[0] != 'le') buckets.setdefault(without_le, {}) buckets[without_le].setdefault(bucket[0], 0.0) buckets[without_le][bucket[0]] += value else: # _sum/_count samples[(s.name, labels)] += value else: # Counter and Summary. samples[(s.name, labels)] += value # Accumulate bucket values. if metric.type == 'histogram': for labels, values in buckets.items(): acc = 0.0 for bucket, value in sorted(values.items()): sample_key = ( metric.name + '_bucket', labels + (('le', floatToGoString(bucket)),), ) if accumulate: acc += value samples[sample_key] = acc else: samples[sample_key] = value if accumulate: samples[(metric.name + '_count', labels)] = acc # Convert to correct sample format. metric.samples = [Sample(name_, dict(labels), value) for (name_, labels), value in samples.items()] return metrics.values()
[ "def", "merge", "(", "files", ",", "accumulate", "=", "True", ")", ":", "metrics", "=", "{", "}", "for", "f", "in", "files", ":", "parts", "=", "os", ".", "path", ".", "basename", "(", "f", ")", ".", "split", "(", "'_'", ")", "typ", "=", "parts", "[", "0", "]", "d", "=", "MmapedDict", "(", "f", ",", "read_mode", "=", "True", ")", "for", "key", ",", "value", "in", "d", ".", "read_all_values", "(", ")", ":", "metric_name", ",", "name", ",", "labels", "=", "json", ".", "loads", "(", "key", ")", "labels_key", "=", "tuple", "(", "sorted", "(", "labels", ".", "items", "(", ")", ")", ")", "metric", "=", "metrics", ".", "get", "(", "metric_name", ")", "if", "metric", "is", "None", ":", "metric", "=", "Metric", "(", "metric_name", ",", "'Multiprocess metric'", ",", "typ", ")", "metrics", "[", "metric_name", "]", "=", "metric", "if", "typ", "==", "'gauge'", ":", "pid", "=", "parts", "[", "2", "]", "[", ":", "-", "3", "]", "metric", ".", "_multiprocess_mode", "=", "parts", "[", "1", "]", "metric", ".", "add_sample", "(", "name", ",", "labels_key", "+", "(", "(", "'pid'", ",", "pid", ")", ",", ")", ",", "value", ")", "else", ":", "# The duplicates and labels are fixed in the next for.", "metric", ".", "add_sample", "(", "name", ",", "labels_key", ",", "value", ")", "d", ".", "close", "(", ")", "for", "metric", "in", "metrics", ".", "values", "(", ")", ":", "samples", "=", "defaultdict", "(", "float", ")", "buckets", "=", "{", "}", "for", "s", "in", "metric", ".", "samples", ":", "name", ",", "labels", ",", "value", "=", "s", ".", "name", ",", "s", ".", "labels", ",", "s", ".", "value", "if", "metric", ".", "type", "==", "'gauge'", ":", "without_pid", "=", "tuple", "(", "l", "for", "l", "in", "labels", "if", "l", "[", "0", "]", "!=", "'pid'", ")", "if", "metric", ".", "_multiprocess_mode", "==", "'min'", ":", "current", "=", "samples", ".", "setdefault", "(", "(", "name", ",", "without_pid", ")", ",", "value", ")", "if", "value", "<", "current", ":", "samples", "[", "(", "s", ".", "name", ",", "without_pid", ")", "]", "=", "value", "elif", "metric", ".", "_multiprocess_mode", "==", "'max'", ":", "current", "=", "samples", ".", "setdefault", "(", "(", "name", ",", "without_pid", ")", ",", "value", ")", "if", "value", ">", "current", ":", "samples", "[", "(", "s", ".", "name", ",", "without_pid", ")", "]", "=", "value", "elif", "metric", ".", "_multiprocess_mode", "==", "'livesum'", ":", "samples", "[", "(", "name", ",", "without_pid", ")", "]", "+=", "value", "else", ":", "# all/liveall", "samples", "[", "(", "name", ",", "labels", ")", "]", "=", "value", "elif", "metric", ".", "type", "==", "'histogram'", ":", "bucket", "=", "tuple", "(", "float", "(", "l", "[", "1", "]", ")", "for", "l", "in", "labels", "if", "l", "[", "0", "]", "==", "'le'", ")", "if", "bucket", ":", "# _bucket", "without_le", "=", "tuple", "(", "l", "for", "l", "in", "labels", "if", "l", "[", "0", "]", "!=", "'le'", ")", "buckets", ".", "setdefault", "(", "without_le", ",", "{", "}", ")", "buckets", "[", "without_le", "]", ".", "setdefault", "(", "bucket", "[", "0", "]", ",", "0.0", ")", "buckets", "[", "without_le", "]", "[", "bucket", "[", "0", "]", "]", "+=", "value", "else", ":", "# _sum/_count", "samples", "[", "(", "s", ".", "name", ",", "labels", ")", "]", "+=", "value", "else", ":", "# Counter and Summary.", "samples", "[", "(", "s", ".", "name", ",", "labels", ")", "]", "+=", "value", "# Accumulate bucket values.", "if", "metric", ".", "type", "==", "'histogram'", ":", "for", "labels", ",", "values", "in", "buckets", ".", "items", "(", ")", ":", "acc", "=", "0.0", "for", "bucket", ",", "value", "in", "sorted", "(", "values", ".", "items", "(", ")", ")", ":", "sample_key", "=", "(", "metric", ".", "name", "+", "'_bucket'", ",", "labels", "+", "(", "(", "'le'", ",", "floatToGoString", "(", "bucket", ")", ")", ",", ")", ",", ")", "if", "accumulate", ":", "acc", "+=", "value", "samples", "[", "sample_key", "]", "=", "acc", "else", ":", "samples", "[", "sample_key", "]", "=", "value", "if", "accumulate", ":", "samples", "[", "(", "metric", ".", "name", "+", "'_count'", ",", "labels", ")", "]", "=", "acc", "# Convert to correct sample format.", "metric", ".", "samples", "=", "[", "Sample", "(", "name_", ",", "dict", "(", "labels", ")", ",", "value", ")", "for", "(", "name_", ",", "labels", ")", ",", "value", "in", "samples", ".", "items", "(", ")", "]", "return", "metrics", ".", "values", "(", ")" ]
Merge metrics from given mmap files. By default, histograms are accumulated, as per prometheus wire format. But if writing the merged data back to mmap files, use accumulate=False to avoid compound accumulation.
[ "Merge", "metrics", "from", "given", "mmap", "files", "." ]
python
train
partofthething/ace
ace/validation/validate_smoothers.py
https://github.com/partofthething/ace/blob/1593a49f3c2e845514323e9c36ee253fe77bac3c/ace/validation/validate_smoothers.py#L77-L92
def validate_average_best_span(): """Figure 2d? from Friedman.""" N = 200 num_trials = 400 avg = numpy.zeros(N) for i in range(num_trials): x, y = smoother_friedman82.build_sample_smoother_problem_friedman82(N=N) my_smoother = smoother.perform_smooth( x, y, smoother_cls=supersmoother.SuperSmoother ) avg += my_smoother._smoothed_best_spans.smooth_result if not (i + 1) % 20: print(i + 1) avg /= num_trials plt.plot(my_smoother.x, avg, '.', label='Average JCV') finish_plot()
[ "def", "validate_average_best_span", "(", ")", ":", "N", "=", "200", "num_trials", "=", "400", "avg", "=", "numpy", ".", "zeros", "(", "N", ")", "for", "i", "in", "range", "(", "num_trials", ")", ":", "x", ",", "y", "=", "smoother_friedman82", ".", "build_sample_smoother_problem_friedman82", "(", "N", "=", "N", ")", "my_smoother", "=", "smoother", ".", "perform_smooth", "(", "x", ",", "y", ",", "smoother_cls", "=", "supersmoother", ".", "SuperSmoother", ")", "avg", "+=", "my_smoother", ".", "_smoothed_best_spans", ".", "smooth_result", "if", "not", "(", "i", "+", "1", ")", "%", "20", ":", "print", "(", "i", "+", "1", ")", "avg", "/=", "num_trials", "plt", ".", "plot", "(", "my_smoother", ".", "x", ",", "avg", ",", "'.'", ",", "label", "=", "'Average JCV'", ")", "finish_plot", "(", ")" ]
Figure 2d? from Friedman.
[ "Figure", "2d?", "from", "Friedman", "." ]
python
train
Parallels/artifactory
artifactory.py
https://github.com/Parallels/artifactory/blob/09ddcc4ae15095eec2347d39774c3f8aca6c4654/artifactory.py#L420-L426
def rest_put(self, url, params=None, headers=None, auth=None, verify=True, cert=None): """ Perform a PUT request to url with optional authentication """ res = requests.put(url, params=params, headers=headers, auth=auth, verify=verify, cert=cert) return res.text, res.status_code
[ "def", "rest_put", "(", "self", ",", "url", ",", "params", "=", "None", ",", "headers", "=", "None", ",", "auth", "=", "None", ",", "verify", "=", "True", ",", "cert", "=", "None", ")", ":", "res", "=", "requests", ".", "put", "(", "url", ",", "params", "=", "params", ",", "headers", "=", "headers", ",", "auth", "=", "auth", ",", "verify", "=", "verify", ",", "cert", "=", "cert", ")", "return", "res", ".", "text", ",", "res", ".", "status_code" ]
Perform a PUT request to url with optional authentication
[ "Perform", "a", "PUT", "request", "to", "url", "with", "optional", "authentication" ]
python
train
ga4gh/ga4gh-server
ga4gh/server/backend.py
https://github.com/ga4gh/ga4gh-server/blob/1aa18922ef136db8604f6f098cb1732cba6f2a76/ga4gh/server/backend.py#L405-L421
def continuousGenerator(self, request): """ Returns a generator over the (continuous, nextPageToken) pairs defined by the (JSON string) request. """ compoundId = None if request.continuous_set_id != "": compoundId = datamodel.ContinuousSetCompoundId.parse( request.continuous_set_id) if compoundId is None: raise exceptions.ContinuousSetNotSpecifiedException() dataset = self.getDataRepository().getDataset( compoundId.dataset_id) continuousSet = dataset.getContinuousSet(request.continuous_set_id) iterator = paging.ContinuousIterator(request, continuousSet) return iterator
[ "def", "continuousGenerator", "(", "self", ",", "request", ")", ":", "compoundId", "=", "None", "if", "request", ".", "continuous_set_id", "!=", "\"\"", ":", "compoundId", "=", "datamodel", ".", "ContinuousSetCompoundId", ".", "parse", "(", "request", ".", "continuous_set_id", ")", "if", "compoundId", "is", "None", ":", "raise", "exceptions", ".", "ContinuousSetNotSpecifiedException", "(", ")", "dataset", "=", "self", ".", "getDataRepository", "(", ")", ".", "getDataset", "(", "compoundId", ".", "dataset_id", ")", "continuousSet", "=", "dataset", ".", "getContinuousSet", "(", "request", ".", "continuous_set_id", ")", "iterator", "=", "paging", ".", "ContinuousIterator", "(", "request", ",", "continuousSet", ")", "return", "iterator" ]
Returns a generator over the (continuous, nextPageToken) pairs defined by the (JSON string) request.
[ "Returns", "a", "generator", "over", "the", "(", "continuous", "nextPageToken", ")", "pairs", "defined", "by", "the", "(", "JSON", "string", ")", "request", "." ]
python
train
cameronbwhite/Flask-CAS
flask_cas/routing.py
https://github.com/cameronbwhite/Flask-CAS/blob/f85173938654cb9b9316a5c869000b74b008422e/flask_cas/routing.py#L62-L87
def logout(): """ When the user accesses this route they are logged out. """ cas_username_session_key = current_app.config['CAS_USERNAME_SESSION_KEY'] cas_attributes_session_key = current_app.config['CAS_ATTRIBUTES_SESSION_KEY'] if cas_username_session_key in flask.session: del flask.session[cas_username_session_key] if cas_attributes_session_key in flask.session: del flask.session[cas_attributes_session_key] if(current_app.config['CAS_AFTER_LOGOUT'] is not None): redirect_url = create_cas_logout_url( current_app.config['CAS_SERVER'], current_app.config['CAS_LOGOUT_ROUTE'], current_app.config['CAS_AFTER_LOGOUT']) else: redirect_url = create_cas_logout_url( current_app.config['CAS_SERVER'], current_app.config['CAS_LOGOUT_ROUTE']) current_app.logger.debug('Redirecting to: {0}'.format(redirect_url)) return flask.redirect(redirect_url)
[ "def", "logout", "(", ")", ":", "cas_username_session_key", "=", "current_app", ".", "config", "[", "'CAS_USERNAME_SESSION_KEY'", "]", "cas_attributes_session_key", "=", "current_app", ".", "config", "[", "'CAS_ATTRIBUTES_SESSION_KEY'", "]", "if", "cas_username_session_key", "in", "flask", ".", "session", ":", "del", "flask", ".", "session", "[", "cas_username_session_key", "]", "if", "cas_attributes_session_key", "in", "flask", ".", "session", ":", "del", "flask", ".", "session", "[", "cas_attributes_session_key", "]", "if", "(", "current_app", ".", "config", "[", "'CAS_AFTER_LOGOUT'", "]", "is", "not", "None", ")", ":", "redirect_url", "=", "create_cas_logout_url", "(", "current_app", ".", "config", "[", "'CAS_SERVER'", "]", ",", "current_app", ".", "config", "[", "'CAS_LOGOUT_ROUTE'", "]", ",", "current_app", ".", "config", "[", "'CAS_AFTER_LOGOUT'", "]", ")", "else", ":", "redirect_url", "=", "create_cas_logout_url", "(", "current_app", ".", "config", "[", "'CAS_SERVER'", "]", ",", "current_app", ".", "config", "[", "'CAS_LOGOUT_ROUTE'", "]", ")", "current_app", ".", "logger", ".", "debug", "(", "'Redirecting to: {0}'", ".", "format", "(", "redirect_url", ")", ")", "return", "flask", ".", "redirect", "(", "redirect_url", ")" ]
When the user accesses this route they are logged out.
[ "When", "the", "user", "accesses", "this", "route", "they", "are", "logged", "out", "." ]
python
train
greenelab/PathCORE-T
pathcore/network.py
https://github.com/greenelab/PathCORE-T/blob/9d079d5ebffea2fe9fb9ab557588d51ad67d2c9c/pathcore/network.py#L59-L76
def connected_to(self, vertex_id): """ Parameters ----------- vertex_id : int Get what `vertex_id` is connected to. Returns ----------- int|None, the vertex id connected to the input `vertex_id` in this edge, as long as `vertex_id` is one of the vertices connected by this edge. """ if vertex_id not in self.edge: return None connected_to = (self.edge[1] if self.edge[0] == vertex_id else self.edge[0]) return connected_to
[ "def", "connected_to", "(", "self", ",", "vertex_id", ")", ":", "if", "vertex_id", "not", "in", "self", ".", "edge", ":", "return", "None", "connected_to", "=", "(", "self", ".", "edge", "[", "1", "]", "if", "self", ".", "edge", "[", "0", "]", "==", "vertex_id", "else", "self", ".", "edge", "[", "0", "]", ")", "return", "connected_to" ]
Parameters ----------- vertex_id : int Get what `vertex_id` is connected to. Returns ----------- int|None, the vertex id connected to the input `vertex_id` in this edge, as long as `vertex_id` is one of the vertices connected by this edge.
[ "Parameters", "-----------", "vertex_id", ":", "int", "Get", "what", "vertex_id", "is", "connected", "to", "." ]
python
train
yaz/yaz
yaz/main.py
https://github.com/yaz/yaz/blob/48c842fe053bf9cd6446c4b33fb081c65339aa48/yaz/main.py#L11-L89
def main(argv=None, white_list=None, load_yaz_extension=True): """The entry point for a yaz script This will almost always be called from a python script in the following manner: if __name__ == "__main__": yaz.main() This function will perform the following steps: 1. It will load any additional python code from the yaz_extension python module located in the ~/.yaz directory when LOAD_YAZ_EXTENSION is True and the yaz_extension module exists 2. It collects all yaz tasks and plugins. When WHITE_LIST is a non-empty list, only the tasks and plugins located therein will be considered 3. It will parse arguments from ARGV, or the command line when ARGV is not given, resulting in a yaz task or a parser help message. 4. When a suitable task is found, this task is executed. In case of a task which is part of a plugin, i.e. class, then this plugin is initialized, possibly resulting in other plugins to also be initialized if there are marked as `@yaz.dependency`. """ assert argv is None or isinstance(argv, list), type(argv) assert white_list is None or isinstance(white_list, list), type(white_list) assert isinstance(load_yaz_extension, bool), type(load_yaz_extension) argv = sys.argv if argv is None else argv assert len(argv) > 0, len(argv) if load_yaz_extension: load("~/.yaz", "yaz_extension") parser = Parser(prog=argv[0]) parser.add_task_tree(get_task_tree(white_list)) task, kwargs = parser.parse_arguments(argv) if task: try: result = task(**kwargs) # when the result is a boolean, exit with 0 (success) or 1 (failure) if isinstance(result, bool): code = 0 if result else 1 output = None # when the result is an integer, exit with that integer value elif isinstance(result, int): code = result % 256 output = None # otherwise exit with 0 (success) and print the result else: code = 0 output = result # when yaz.Error occurs, exit with the given return code and print the error message # when any other error occurs, let python handle the exception (i.e. exit(1) and print call stack) except Error as error: code = error.return_code output = error else: # when no task is found to execute, exit with 1 (failure) and print the help text code = 1 output = parser.format_help().rstrip() if output is not None: print(output) sys.exit(code)
[ "def", "main", "(", "argv", "=", "None", ",", "white_list", "=", "None", ",", "load_yaz_extension", "=", "True", ")", ":", "assert", "argv", "is", "None", "or", "isinstance", "(", "argv", ",", "list", ")", ",", "type", "(", "argv", ")", "assert", "white_list", "is", "None", "or", "isinstance", "(", "white_list", ",", "list", ")", ",", "type", "(", "white_list", ")", "assert", "isinstance", "(", "load_yaz_extension", ",", "bool", ")", ",", "type", "(", "load_yaz_extension", ")", "argv", "=", "sys", ".", "argv", "if", "argv", "is", "None", "else", "argv", "assert", "len", "(", "argv", ")", ">", "0", ",", "len", "(", "argv", ")", "if", "load_yaz_extension", ":", "load", "(", "\"~/.yaz\"", ",", "\"yaz_extension\"", ")", "parser", "=", "Parser", "(", "prog", "=", "argv", "[", "0", "]", ")", "parser", ".", "add_task_tree", "(", "get_task_tree", "(", "white_list", ")", ")", "task", ",", "kwargs", "=", "parser", ".", "parse_arguments", "(", "argv", ")", "if", "task", ":", "try", ":", "result", "=", "task", "(", "*", "*", "kwargs", ")", "# when the result is a boolean, exit with 0 (success) or 1 (failure)", "if", "isinstance", "(", "result", ",", "bool", ")", ":", "code", "=", "0", "if", "result", "else", "1", "output", "=", "None", "# when the result is an integer, exit with that integer value", "elif", "isinstance", "(", "result", ",", "int", ")", ":", "code", "=", "result", "%", "256", "output", "=", "None", "# otherwise exit with 0 (success) and print the result", "else", ":", "code", "=", "0", "output", "=", "result", "# when yaz.Error occurs, exit with the given return code and print the error message", "# when any other error occurs, let python handle the exception (i.e. exit(1) and print call stack)", "except", "Error", "as", "error", ":", "code", "=", "error", ".", "return_code", "output", "=", "error", "else", ":", "# when no task is found to execute, exit with 1 (failure) and print the help text", "code", "=", "1", "output", "=", "parser", ".", "format_help", "(", ")", ".", "rstrip", "(", ")", "if", "output", "is", "not", "None", ":", "print", "(", "output", ")", "sys", ".", "exit", "(", "code", ")" ]
The entry point for a yaz script This will almost always be called from a python script in the following manner: if __name__ == "__main__": yaz.main() This function will perform the following steps: 1. It will load any additional python code from the yaz_extension python module located in the ~/.yaz directory when LOAD_YAZ_EXTENSION is True and the yaz_extension module exists 2. It collects all yaz tasks and plugins. When WHITE_LIST is a non-empty list, only the tasks and plugins located therein will be considered 3. It will parse arguments from ARGV, or the command line when ARGV is not given, resulting in a yaz task or a parser help message. 4. When a suitable task is found, this task is executed. In case of a task which is part of a plugin, i.e. class, then this plugin is initialized, possibly resulting in other plugins to also be initialized if there are marked as `@yaz.dependency`.
[ "The", "entry", "point", "for", "a", "yaz", "script" ]
python
valid
abilian/abilian-core
abilian/services/audit/service.py
https://github.com/abilian/abilian-core/blob/0a71275bf108c3d51e13ca9e093c0249235351e3/abilian/services/audit/service.py#L340-L374
def get_model_changes( entity_type, year=None, month=None, day=None, hour=None, since=None ): # type: (Text, int, int, int, int, datetime) -> Query """Get models modified at the given date with the Audit service. :param entity_type: string like "extranet_medicen.apps.crm.models.Compte". Beware the typo, there won't be a warning message. :param since: datetime :param year: int :param month: int :param day: int :param hour: int :returns: a query object """ query = AuditEntry.query if since: query = query.filter(AuditEntry.happened_at >= since) if year: query = query.filter(extract("year", AuditEntry.happened_at) == year) if month: query = query.filter(extract("month", AuditEntry.happened_at) == month) if day: query = query.filter(extract("day", AuditEntry.happened_at) == day) if hour: query = query.filter(extract("hour", AuditEntry.happened_at) == hour) query = query.filter(AuditEntry.entity_type.like(entity_type)).order_by( AuditEntry.happened_at ) return query
[ "def", "get_model_changes", "(", "entity_type", ",", "year", "=", "None", ",", "month", "=", "None", ",", "day", "=", "None", ",", "hour", "=", "None", ",", "since", "=", "None", ")", ":", "# type: (Text, int, int, int, int, datetime) -> Query", "query", "=", "AuditEntry", ".", "query", "if", "since", ":", "query", "=", "query", ".", "filter", "(", "AuditEntry", ".", "happened_at", ">=", "since", ")", "if", "year", ":", "query", "=", "query", ".", "filter", "(", "extract", "(", "\"year\"", ",", "AuditEntry", ".", "happened_at", ")", "==", "year", ")", "if", "month", ":", "query", "=", "query", ".", "filter", "(", "extract", "(", "\"month\"", ",", "AuditEntry", ".", "happened_at", ")", "==", "month", ")", "if", "day", ":", "query", "=", "query", ".", "filter", "(", "extract", "(", "\"day\"", ",", "AuditEntry", ".", "happened_at", ")", "==", "day", ")", "if", "hour", ":", "query", "=", "query", ".", "filter", "(", "extract", "(", "\"hour\"", ",", "AuditEntry", ".", "happened_at", ")", "==", "hour", ")", "query", "=", "query", ".", "filter", "(", "AuditEntry", ".", "entity_type", ".", "like", "(", "entity_type", ")", ")", ".", "order_by", "(", "AuditEntry", ".", "happened_at", ")", "return", "query" ]
Get models modified at the given date with the Audit service. :param entity_type: string like "extranet_medicen.apps.crm.models.Compte". Beware the typo, there won't be a warning message. :param since: datetime :param year: int :param month: int :param day: int :param hour: int :returns: a query object
[ "Get", "models", "modified", "at", "the", "given", "date", "with", "the", "Audit", "service", "." ]
python
train
DataBiosphere/toil
src/toil/lib/docker.py
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/lib/docker.py#L477-L497
def containerIsRunning(container_name): """ Checks whether the container is running or not. :param container_name: Name of the container being checked. :returns: True if status is 'running', False if status is anything else, and None if the container does not exist. """ client = docker.from_env(version='auto') try: this_container = client.containers.get(container_name) if this_container.status == 'running': return True else: # this_container.status == 'exited', 'restarting', or 'paused' return False except NotFound: return None except requests.exceptions.HTTPError as e: logger.debug("Server error attempting to call container: ", container_name) raise create_api_error_from_http_exception(e)
[ "def", "containerIsRunning", "(", "container_name", ")", ":", "client", "=", "docker", ".", "from_env", "(", "version", "=", "'auto'", ")", "try", ":", "this_container", "=", "client", ".", "containers", ".", "get", "(", "container_name", ")", "if", "this_container", ".", "status", "==", "'running'", ":", "return", "True", "else", ":", "# this_container.status == 'exited', 'restarting', or 'paused'", "return", "False", "except", "NotFound", ":", "return", "None", "except", "requests", ".", "exceptions", ".", "HTTPError", "as", "e", ":", "logger", ".", "debug", "(", "\"Server error attempting to call container: \"", ",", "container_name", ")", "raise", "create_api_error_from_http_exception", "(", "e", ")" ]
Checks whether the container is running or not. :param container_name: Name of the container being checked. :returns: True if status is 'running', False if status is anything else, and None if the container does not exist.
[ "Checks", "whether", "the", "container", "is", "running", "or", "not", ".", ":", "param", "container_name", ":", "Name", "of", "the", "container", "being", "checked", ".", ":", "returns", ":", "True", "if", "status", "is", "running", "False", "if", "status", "is", "anything", "else", "and", "None", "if", "the", "container", "does", "not", "exist", "." ]
python
train
softwarefactory-project/rdopkg
rdopkg/utils/specfile.py
https://github.com/softwarefactory-project/rdopkg/blob/2d2bed4e7cd329558a36d0dd404ec4ac8f9f254c/rdopkg/utils/specfile.py#L473-L483
def recognized_release(self): """ Check if this Release value is something we can parse. :rtype: bool """ _, _, rest = self.get_release_parts() # If "rest" is not a well-known value here, then this package is # using a Release value pattern we cannot recognize. if rest == '' or re.match(r'%{\??dist}', rest): return True return False
[ "def", "recognized_release", "(", "self", ")", ":", "_", ",", "_", ",", "rest", "=", "self", ".", "get_release_parts", "(", ")", "# If \"rest\" is not a well-known value here, then this package is", "# using a Release value pattern we cannot recognize.", "if", "rest", "==", "''", "or", "re", ".", "match", "(", "r'%{\\??dist}'", ",", "rest", ")", ":", "return", "True", "return", "False" ]
Check if this Release value is something we can parse. :rtype: bool
[ "Check", "if", "this", "Release", "value", "is", "something", "we", "can", "parse", ".", ":", "rtype", ":", "bool" ]
python
train
BlueBrain/NeuroM
neurom/check/morphtree.py
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/check/morphtree.py#L190-L202
def get_flat_neurites(neuron, tol=0.1, method='ratio'): '''Check if a neuron has neurites that are flat within a tolerance Args: neurite(Neurite): neurite to operate on tol(float): the tolerance or the ratio method(string): 'tolerance' or 'ratio' described in :meth:`is_flat` Returns: Bool list corresponding to the flatness check for each neurite in neuron neurites with respect to the given criteria ''' return [n for n in neuron.neurites if is_flat(n, tol, method)]
[ "def", "get_flat_neurites", "(", "neuron", ",", "tol", "=", "0.1", ",", "method", "=", "'ratio'", ")", ":", "return", "[", "n", "for", "n", "in", "neuron", ".", "neurites", "if", "is_flat", "(", "n", ",", "tol", ",", "method", ")", "]" ]
Check if a neuron has neurites that are flat within a tolerance Args: neurite(Neurite): neurite to operate on tol(float): the tolerance or the ratio method(string): 'tolerance' or 'ratio' described in :meth:`is_flat` Returns: Bool list corresponding to the flatness check for each neurite in neuron neurites with respect to the given criteria
[ "Check", "if", "a", "neuron", "has", "neurites", "that", "are", "flat", "within", "a", "tolerance" ]
python
train
jazzband/django-analytical
analytical/templatetags/snapengage.py
https://github.com/jazzband/django-analytical/blob/5487fd677bd47bc63fc2cf39597a0adc5d6c9ab3/analytical/templatetags/snapengage.py#L56-L66
def snapengage(parser, token): """ SnapEngage set-up template tag. Renders Javascript code to set-up SnapEngage chat. You must supply your widget ID in the ``SNAPENGAGE_WIDGET_ID`` setting. """ bits = token.split_contents() if len(bits) > 1: raise TemplateSyntaxError("'%s' takes no arguments" % bits[0]) return SnapEngageNode()
[ "def", "snapengage", "(", "parser", ",", "token", ")", ":", "bits", "=", "token", ".", "split_contents", "(", ")", "if", "len", "(", "bits", ")", ">", "1", ":", "raise", "TemplateSyntaxError", "(", "\"'%s' takes no arguments\"", "%", "bits", "[", "0", "]", ")", "return", "SnapEngageNode", "(", ")" ]
SnapEngage set-up template tag. Renders Javascript code to set-up SnapEngage chat. You must supply your widget ID in the ``SNAPENGAGE_WIDGET_ID`` setting.
[ "SnapEngage", "set", "-", "up", "template", "tag", "." ]
python
valid
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/parallel/apps/launcher.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/parallel/apps/launcher.py#L466-L469
def find_args(self): """Build self.args using all the fields.""" return self.mpi_cmd + ['-n', str(self.n)] + self.mpi_args + \ self.program + self.program_args
[ "def", "find_args", "(", "self", ")", ":", "return", "self", ".", "mpi_cmd", "+", "[", "'-n'", ",", "str", "(", "self", ".", "n", ")", "]", "+", "self", ".", "mpi_args", "+", "self", ".", "program", "+", "self", ".", "program_args" ]
Build self.args using all the fields.
[ "Build", "self", ".", "args", "using", "all", "the", "fields", "." ]
python
test
DataDog/integrations-core
datadog_checks_dev/datadog_checks/dev/tooling/utils.py
https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/datadog_checks_dev/datadog_checks/dev/tooling/utils.py#L138-L145
def load_manifest(check_name): """ Load the manifest file into a dictionary """ manifest_path = os.path.join(get_root(), check_name, 'manifest.json') if file_exists(manifest_path): return json.loads(read_file(manifest_path)) return {}
[ "def", "load_manifest", "(", "check_name", ")", ":", "manifest_path", "=", "os", ".", "path", ".", "join", "(", "get_root", "(", ")", ",", "check_name", ",", "'manifest.json'", ")", "if", "file_exists", "(", "manifest_path", ")", ":", "return", "json", ".", "loads", "(", "read_file", "(", "manifest_path", ")", ")", "return", "{", "}" ]
Load the manifest file into a dictionary
[ "Load", "the", "manifest", "file", "into", "a", "dictionary" ]
python
train
grahambell/pymoc
lib/pymoc/moc.py
https://github.com/grahambell/pymoc/blob/0e2e57ce07ff3de6ac024627c1fb6ad30c2fde48/lib/pymoc/moc.py#L365-L381
def cells(self): """The number of cells in the MOC. This gives the total number of cells at all orders, with cells from every order counted equally. >>> m = MOC(0, (1, 2)) >>> m.cells 2 """ n = 0 for (order, cells) in self: n += len(cells) return n
[ "def", "cells", "(", "self", ")", ":", "n", "=", "0", "for", "(", "order", ",", "cells", ")", "in", "self", ":", "n", "+=", "len", "(", "cells", ")", "return", "n" ]
The number of cells in the MOC. This gives the total number of cells at all orders, with cells from every order counted equally. >>> m = MOC(0, (1, 2)) >>> m.cells 2
[ "The", "number", "of", "cells", "in", "the", "MOC", "." ]
python
train
yandex/yandex-tank
yandextank/plugins/InfluxUploader/decoder.py
https://github.com/yandex/yandex-tank/blob/d71d63b6ab5de8b8a5ea2b728b6ab9ac0b1ba71b/yandextank/plugins/InfluxUploader/decoder.py#L38-L57
def decode_monitoring(self, data): """ The reason why we have two separate methods for monitoring and aggregates is a strong difference in incoming data. """ points = list() for second_data in data: for host, host_data in second_data["data"].iteritems(): points.append( self.__make_points( "monitoring", {"host": host, "comment": host_data.get("comment")}, second_data["timestamp"], { metric: value for metric, value in host_data["metrics"].iteritems() } ) ) return points
[ "def", "decode_monitoring", "(", "self", ",", "data", ")", ":", "points", "=", "list", "(", ")", "for", "second_data", "in", "data", ":", "for", "host", ",", "host_data", "in", "second_data", "[", "\"data\"", "]", ".", "iteritems", "(", ")", ":", "points", ".", "append", "(", "self", ".", "__make_points", "(", "\"monitoring\"", ",", "{", "\"host\"", ":", "host", ",", "\"comment\"", ":", "host_data", ".", "get", "(", "\"comment\"", ")", "}", ",", "second_data", "[", "\"timestamp\"", "]", ",", "{", "metric", ":", "value", "for", "metric", ",", "value", "in", "host_data", "[", "\"metrics\"", "]", ".", "iteritems", "(", ")", "}", ")", ")", "return", "points" ]
The reason why we have two separate methods for monitoring and aggregates is a strong difference in incoming data.
[ "The", "reason", "why", "we", "have", "two", "separate", "methods", "for", "monitoring", "and", "aggregates", "is", "a", "strong", "difference", "in", "incoming", "data", "." ]
python
test
lpantano/seqcluster
seqcluster/libs/thinkbayes.py
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/libs/thinkbayes.py#L1804-L1813
def PredictivePmf(self, xs, name=''): """Makes a predictive distribution. xs: values to go into the Pmf Returns: Pmf that maps from x to the mean prevalence of x """ alpha0 = self.params.sum() ps = self.params / alpha0 return MakePmfFromItems(zip(xs, ps), name=name)
[ "def", "PredictivePmf", "(", "self", ",", "xs", ",", "name", "=", "''", ")", ":", "alpha0", "=", "self", ".", "params", ".", "sum", "(", ")", "ps", "=", "self", ".", "params", "/", "alpha0", "return", "MakePmfFromItems", "(", "zip", "(", "xs", ",", "ps", ")", ",", "name", "=", "name", ")" ]
Makes a predictive distribution. xs: values to go into the Pmf Returns: Pmf that maps from x to the mean prevalence of x
[ "Makes", "a", "predictive", "distribution", "." ]
python
train
scikit-learn-contrib/categorical-encoding
category_encoders/hashing.py
https://github.com/scikit-learn-contrib/categorical-encoding/blob/5e9e803c9131b377af305d5302723ba2415001da/category_encoders/hashing.py#L88-L134
def fit(self, X, y=None, **kwargs): """Fit encoder according to X and y. Parameters ---------- X : array-like, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. Returns ------- self : encoder Returns self. """ # first check the type X = util.convert_input(X) self._dim = X.shape[1] # if columns aren't passed, just use every string column if self.cols is None: self.cols = util.get_obj_cols(X) else: self.cols = util.convert_cols_to_list(self.cols) X_temp = self.transform(X, override_return_df=True) self.feature_names = X_temp.columns.tolist() # drop all output columns with 0 variance. if self.drop_invariant: self.drop_cols = [] generated_cols = util.get_generated_cols(X, X_temp, self.cols) self.drop_cols = [x for x in generated_cols if X_temp[x].var() <= 10e-5] try: [self.feature_names.remove(x) for x in self.drop_cols] except KeyError as e: if self.verbose > 0: print("Could not remove column from feature names." "Not found in generated cols.\n{}".format(e)) return self
[ "def", "fit", "(", "self", ",", "X", ",", "y", "=", "None", ",", "*", "*", "kwargs", ")", ":", "# first check the type", "X", "=", "util", ".", "convert_input", "(", "X", ")", "self", ".", "_dim", "=", "X", ".", "shape", "[", "1", "]", "# if columns aren't passed, just use every string column", "if", "self", ".", "cols", "is", "None", ":", "self", ".", "cols", "=", "util", ".", "get_obj_cols", "(", "X", ")", "else", ":", "self", ".", "cols", "=", "util", ".", "convert_cols_to_list", "(", "self", ".", "cols", ")", "X_temp", "=", "self", ".", "transform", "(", "X", ",", "override_return_df", "=", "True", ")", "self", ".", "feature_names", "=", "X_temp", ".", "columns", ".", "tolist", "(", ")", "# drop all output columns with 0 variance.", "if", "self", ".", "drop_invariant", ":", "self", ".", "drop_cols", "=", "[", "]", "generated_cols", "=", "util", ".", "get_generated_cols", "(", "X", ",", "X_temp", ",", "self", ".", "cols", ")", "self", ".", "drop_cols", "=", "[", "x", "for", "x", "in", "generated_cols", "if", "X_temp", "[", "x", "]", ".", "var", "(", ")", "<=", "10e-5", "]", "try", ":", "[", "self", ".", "feature_names", ".", "remove", "(", "x", ")", "for", "x", "in", "self", ".", "drop_cols", "]", "except", "KeyError", "as", "e", ":", "if", "self", ".", "verbose", ">", "0", ":", "print", "(", "\"Could not remove column from feature names.\"", "\"Not found in generated cols.\\n{}\"", ".", "format", "(", "e", ")", ")", "return", "self" ]
Fit encoder according to X and y. Parameters ---------- X : array-like, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. Returns ------- self : encoder Returns self.
[ "Fit", "encoder", "according", "to", "X", "and", "y", "." ]
python
valid
onnx/onnxmltools
onnxmltools/convert/sparkml/ops_input_output.py
https://github.com/onnx/onnxmltools/blob/d4e4c31990fc2d9fd1f92139f497d360914c9df2/onnxmltools/convert/sparkml/ops_input_output.py#L13-L168
def build_io_name_map(): ''' map of spark models to input-output tuples Each lambda gets the corresponding input or output column name from the model ''' map = { "pyspark.ml.feature.BucketedRandomProjectionLSHModel": ( lambda model: [model.getOrDefault("inputCol")], lambda model: [model.getOrDefault("outputCol")] ), "pyspark.ml.regression.AFTSurvivalRegressionModel": ( lambda model: [model.getOrDefault("featuresCol")], lambda model: [model.getOrDefault("predictionCol")] ), "pyspark.ml.feature.ElementwiseProduct": ( lambda model: [model.getOrDefault("inputCol")], lambda model: [model.getOrDefault("outputCol")] ), "pyspark.ml.feature.MinHashLSHModel": ( lambda model: [model.getOrDefault("inputCol")], lambda model: [model.getOrDefault("outputCol")] ), "pyspark.ml.feature.Word2VecModel": ( lambda model: [model.getOrDefault("inputCol")], lambda model: [model.getOrDefault("outputCol")] ), "pyspark.ml.feature.IndexToString": ( lambda model: [model.getOrDefault("inputCol")], lambda model: [model.getOrDefault("outputCol")] ), "pyspark.ml.feature.ChiSqSelectorModel": ( lambda model: [model.getOrDefault("featuresCol")], lambda model: [model.getOrDefault("outputCol")] ), "pyspark.ml.classification.OneVsRestModel": ( lambda model: [model.getOrDefault("featuresCol")], lambda model: [model.getOrDefault("predictionCol")] ), "pyspark.ml.regression.GBTRegressionModel": ( lambda model: [model.getOrDefault("featuresCol")], lambda model: [model.getOrDefault("predictionCol")] ), "pyspark.ml.classification.GBTClassificationModel": ( lambda model: [model.getOrDefault("featuresCol")], lambda model: [model.getOrDefault("predictionCol"), 'probability'] ), "pyspark.ml.feature.DCT": ( lambda model: [model.getOrDefault("inputCol")], lambda model: [model.getOrDefault("outputCol")] ), "pyspark.ml.feature.PCAModel": ( lambda model: [model.getOrDefault("inputCol")], lambda model: [model.getOrDefault("outputCol")] ), "pyspark.ml.feature.PolynomialExpansion": ( lambda model: [model.getOrDefault("inputCol")], lambda model: [model.getOrDefault("outputCol")] ), "pyspark.ml.feature.Tokenizer": ( lambda model: [model.getOrDefault("inputCol")], lambda model: [model.getOrDefault("outputCol")] ), "pyspark.ml.classification.NaiveBayesModel": ( lambda model: [model.getOrDefault("featuresCol")], lambda model: [model.getOrDefault("predictionCol"), model.getOrDefault("probabilityCol")] ), "pyspark.ml.feature.VectorSlicer": ( lambda model: [model.getOrDefault("inputCol")], lambda model: [model.getOrDefault("outputCol")] ), "pyspark.ml.feature.StopWordsRemover": ( lambda model: [model.getOrDefault("inputCol")], lambda model: [model.getOrDefault("outputCol")] ), "pyspark.ml.feature.NGram": ( lambda model: [model.getOrDefault("inputCol")], lambda model: [model.getOrDefault("outputCol")] ), "pyspark.ml.feature.Bucketizer": ( lambda model: [model.getOrDefault("inputCol")], lambda model: [model.getOrDefault("outputCol")] ), "pyspark.ml.regression.RandomForestRegressionModel": ( lambda model: [model.getOrDefault("featuresCol")], lambda model: [model.getOrDefault("predictionCol")] ), "pyspark.ml.classification.RandomForestClassificationModel": ( lambda model: [model.getOrDefault("featuresCol")], lambda model: [model.getOrDefault("predictionCol"), model.getOrDefault("probabilityCol")] ), "pyspark.ml.regression.DecisionTreeRegressionModel": ( lambda model: [model.getOrDefault("featuresCol")], lambda model: [model.getOrDefault("predictionCol")] ), "pyspark.ml.classification.DecisionTreeClassificationModel": ( lambda model: [model.getOrDefault("featuresCol")], lambda model: [model.getOrDefault("predictionCol"), model.getOrDefault("probabilityCol")] ), "pyspark.ml.feature.VectorIndexerModel": ( lambda model: [model.getOrDefault("inputCol")], lambda model: [model.getOrDefault("outputCol")] ), "pyspark.ml.regression.GeneralizedLinearRegressionModel": ( lambda model: [model.getOrDefault("featuresCol")], lambda model: [model.getOrDefault("predictionCol")] ), "pyspark.ml.regression.LinearRegressionModel": ( lambda model: [model.getOrDefault("featuresCol")], lambda model: [model.getOrDefault("predictionCol")] ), "pyspark.ml.feature.ImputerModel": ( lambda model: model.getOrDefault("inputCols"), lambda model: model.getOrDefault("outputCols") ), "pyspark.ml.feature.MaxAbsScalerModel": ( lambda model: [model.getOrDefault("inputCol")], lambda model: [model.getOrDefault("outputCol")] ), "pyspark.ml.feature.MinMaxScalerModel": ( lambda model: [model.getOrDefault("inputCol")], lambda model: [model.getOrDefault("outputCol")] ), "pyspark.ml.feature.StandardScalerModel": ( lambda model: [model.getOrDefault("inputCol")], lambda model: [model.getOrDefault("outputCol")] ), "pyspark.ml.feature.Normalizer": ( lambda model: [model.getOrDefault("inputCol")], lambda model: [model.getOrDefault("outputCol")] ), "pyspark.ml.feature.Binarizer": ( lambda model: [model.getOrDefault("inputCol")], lambda model: [model.getOrDefault("outputCol")] ), "pyspark.ml.classification.LinearSVCModel": ( lambda model: [model.getOrDefault("featuresCol")], lambda model: [model.getOrDefault("predictionCol")] ), "pyspark.ml.classification.LogisticRegressionModel": ( lambda model: [model.getOrDefault("featuresCol")], lambda model: [model.getOrDefault("predictionCol"), model.getOrDefault("probabilityCol")] ), "pyspark.ml.feature.OneHotEncoderModel": ( lambda model: model.getOrDefault("inputCols"), lambda model: model.getOrDefault("outputCols") ), "pyspark.ml.feature.StringIndexerModel": ( lambda model: [model.getOrDefault("inputCol")], lambda model: [model.getOrDefault("outputCol")] ), "pyspark.ml.feature.VectorAssembler": ( lambda model: model.getOrDefault("inputCols"), lambda model: [model.getOrDefault("outputCol")] ) } return map
[ "def", "build_io_name_map", "(", ")", ":", "map", "=", "{", "\"pyspark.ml.feature.BucketedRandomProjectionLSHModel\"", ":", "(", "lambda", "model", ":", "[", "model", ".", "getOrDefault", "(", "\"inputCol\"", ")", "]", ",", "lambda", "model", ":", "[", "model", ".", "getOrDefault", "(", "\"outputCol\"", ")", "]", ")", ",", "\"pyspark.ml.regression.AFTSurvivalRegressionModel\"", ":", "(", "lambda", "model", ":", "[", "model", ".", "getOrDefault", "(", "\"featuresCol\"", ")", "]", ",", "lambda", "model", ":", "[", "model", ".", "getOrDefault", "(", "\"predictionCol\"", ")", "]", ")", ",", "\"pyspark.ml.feature.ElementwiseProduct\"", ":", "(", "lambda", "model", ":", "[", "model", ".", "getOrDefault", "(", "\"inputCol\"", ")", "]", ",", "lambda", "model", ":", "[", "model", ".", "getOrDefault", "(", "\"outputCol\"", ")", "]", ")", ",", "\"pyspark.ml.feature.MinHashLSHModel\"", ":", "(", "lambda", "model", ":", "[", "model", ".", "getOrDefault", "(", "\"inputCol\"", ")", "]", ",", "lambda", "model", ":", "[", "model", ".", "getOrDefault", "(", "\"outputCol\"", ")", "]", ")", ",", "\"pyspark.ml.feature.Word2VecModel\"", ":", "(", "lambda", "model", ":", "[", "model", ".", "getOrDefault", "(", "\"inputCol\"", ")", "]", ",", "lambda", "model", ":", "[", "model", ".", "getOrDefault", "(", "\"outputCol\"", ")", "]", ")", ",", "\"pyspark.ml.feature.IndexToString\"", ":", "(", "lambda", "model", ":", "[", "model", ".", "getOrDefault", "(", "\"inputCol\"", ")", "]", ",", "lambda", "model", ":", "[", "model", ".", "getOrDefault", "(", "\"outputCol\"", ")", "]", ")", ",", "\"pyspark.ml.feature.ChiSqSelectorModel\"", ":", "(", "lambda", "model", ":", "[", "model", ".", "getOrDefault", "(", "\"featuresCol\"", ")", "]", ",", "lambda", "model", ":", "[", "model", ".", "getOrDefault", "(", "\"outputCol\"", ")", "]", ")", ",", "\"pyspark.ml.classification.OneVsRestModel\"", ":", "(", "lambda", "model", ":", "[", "model", ".", "getOrDefault", "(", "\"featuresCol\"", ")", "]", ",", "lambda", "model", ":", "[", "model", ".", "getOrDefault", "(", "\"predictionCol\"", ")", "]", ")", ",", "\"pyspark.ml.regression.GBTRegressionModel\"", ":", "(", "lambda", "model", ":", "[", "model", ".", "getOrDefault", "(", "\"featuresCol\"", ")", "]", ",", "lambda", "model", ":", "[", "model", ".", "getOrDefault", "(", "\"predictionCol\"", ")", "]", ")", ",", "\"pyspark.ml.classification.GBTClassificationModel\"", ":", "(", "lambda", "model", ":", "[", "model", ".", "getOrDefault", "(", "\"featuresCol\"", ")", "]", ",", "lambda", "model", ":", "[", "model", ".", "getOrDefault", "(", "\"predictionCol\"", ")", ",", "'probability'", "]", ")", ",", "\"pyspark.ml.feature.DCT\"", ":", "(", "lambda", "model", ":", "[", "model", ".", "getOrDefault", "(", "\"inputCol\"", ")", "]", ",", "lambda", "model", ":", "[", "model", ".", "getOrDefault", "(", "\"outputCol\"", ")", "]", ")", ",", "\"pyspark.ml.feature.PCAModel\"", ":", "(", "lambda", "model", ":", "[", "model", ".", "getOrDefault", "(", "\"inputCol\"", ")", "]", ",", "lambda", "model", ":", "[", "model", ".", "getOrDefault", "(", "\"outputCol\"", ")", "]", ")", ",", "\"pyspark.ml.feature.PolynomialExpansion\"", ":", "(", "lambda", "model", ":", "[", "model", ".", "getOrDefault", "(", "\"inputCol\"", ")", "]", ",", "lambda", "model", ":", "[", "model", ".", "getOrDefault", "(", "\"outputCol\"", ")", "]", ")", ",", "\"pyspark.ml.feature.Tokenizer\"", ":", "(", "lambda", "model", ":", "[", "model", ".", "getOrDefault", "(", "\"inputCol\"", ")", "]", ",", "lambda", "model", ":", "[", "model", ".", "getOrDefault", "(", "\"outputCol\"", ")", "]", ")", ",", "\"pyspark.ml.classification.NaiveBayesModel\"", ":", "(", "lambda", "model", ":", "[", "model", ".", "getOrDefault", "(", "\"featuresCol\"", ")", "]", ",", "lambda", "model", ":", "[", "model", ".", "getOrDefault", "(", "\"predictionCol\"", ")", ",", "model", ".", "getOrDefault", "(", "\"probabilityCol\"", ")", "]", ")", ",", "\"pyspark.ml.feature.VectorSlicer\"", ":", "(", "lambda", "model", ":", "[", "model", ".", "getOrDefault", "(", "\"inputCol\"", ")", "]", ",", "lambda", "model", ":", "[", "model", ".", "getOrDefault", "(", "\"outputCol\"", ")", "]", ")", ",", "\"pyspark.ml.feature.StopWordsRemover\"", ":", "(", "lambda", "model", ":", "[", "model", ".", "getOrDefault", "(", "\"inputCol\"", ")", "]", ",", "lambda", "model", ":", "[", "model", ".", "getOrDefault", "(", "\"outputCol\"", ")", "]", ")", ",", "\"pyspark.ml.feature.NGram\"", ":", "(", "lambda", "model", ":", "[", "model", ".", "getOrDefault", "(", "\"inputCol\"", ")", "]", ",", "lambda", "model", ":", "[", "model", ".", "getOrDefault", "(", "\"outputCol\"", ")", "]", ")", ",", "\"pyspark.ml.feature.Bucketizer\"", ":", "(", "lambda", "model", ":", "[", "model", ".", "getOrDefault", "(", "\"inputCol\"", ")", "]", ",", "lambda", "model", ":", "[", "model", ".", "getOrDefault", "(", "\"outputCol\"", ")", "]", ")", ",", "\"pyspark.ml.regression.RandomForestRegressionModel\"", ":", "(", "lambda", "model", ":", "[", "model", ".", "getOrDefault", "(", "\"featuresCol\"", ")", "]", ",", "lambda", "model", ":", "[", "model", ".", "getOrDefault", "(", "\"predictionCol\"", ")", "]", ")", ",", "\"pyspark.ml.classification.RandomForestClassificationModel\"", ":", "(", "lambda", "model", ":", "[", "model", ".", "getOrDefault", "(", "\"featuresCol\"", ")", "]", ",", "lambda", "model", ":", "[", "model", ".", "getOrDefault", "(", "\"predictionCol\"", ")", ",", "model", ".", "getOrDefault", "(", "\"probabilityCol\"", ")", "]", ")", ",", "\"pyspark.ml.regression.DecisionTreeRegressionModel\"", ":", "(", "lambda", "model", ":", "[", "model", ".", "getOrDefault", "(", "\"featuresCol\"", ")", "]", ",", "lambda", "model", ":", "[", "model", ".", "getOrDefault", "(", "\"predictionCol\"", ")", "]", ")", ",", "\"pyspark.ml.classification.DecisionTreeClassificationModel\"", ":", "(", "lambda", "model", ":", "[", "model", ".", "getOrDefault", "(", "\"featuresCol\"", ")", "]", ",", "lambda", "model", ":", "[", "model", ".", "getOrDefault", "(", "\"predictionCol\"", ")", ",", "model", ".", "getOrDefault", "(", "\"probabilityCol\"", ")", "]", ")", ",", "\"pyspark.ml.feature.VectorIndexerModel\"", ":", "(", "lambda", "model", ":", "[", "model", ".", "getOrDefault", "(", "\"inputCol\"", ")", "]", ",", "lambda", "model", ":", "[", "model", ".", "getOrDefault", "(", "\"outputCol\"", ")", "]", ")", ",", "\"pyspark.ml.regression.GeneralizedLinearRegressionModel\"", ":", "(", "lambda", "model", ":", "[", "model", ".", "getOrDefault", "(", "\"featuresCol\"", ")", "]", ",", "lambda", "model", ":", "[", "model", ".", "getOrDefault", "(", "\"predictionCol\"", ")", "]", ")", ",", "\"pyspark.ml.regression.LinearRegressionModel\"", ":", "(", "lambda", "model", ":", "[", "model", ".", "getOrDefault", "(", "\"featuresCol\"", ")", "]", ",", "lambda", "model", ":", "[", "model", ".", "getOrDefault", "(", "\"predictionCol\"", ")", "]", ")", ",", "\"pyspark.ml.feature.ImputerModel\"", ":", "(", "lambda", "model", ":", "model", ".", "getOrDefault", "(", "\"inputCols\"", ")", ",", "lambda", "model", ":", "model", ".", "getOrDefault", "(", "\"outputCols\"", ")", ")", ",", "\"pyspark.ml.feature.MaxAbsScalerModel\"", ":", "(", "lambda", "model", ":", "[", "model", ".", "getOrDefault", "(", "\"inputCol\"", ")", "]", ",", "lambda", "model", ":", "[", "model", ".", "getOrDefault", "(", "\"outputCol\"", ")", "]", ")", ",", "\"pyspark.ml.feature.MinMaxScalerModel\"", ":", "(", "lambda", "model", ":", "[", "model", ".", "getOrDefault", "(", "\"inputCol\"", ")", "]", ",", "lambda", "model", ":", "[", "model", ".", "getOrDefault", "(", "\"outputCol\"", ")", "]", ")", ",", "\"pyspark.ml.feature.StandardScalerModel\"", ":", "(", "lambda", "model", ":", "[", "model", ".", "getOrDefault", "(", "\"inputCol\"", ")", "]", ",", "lambda", "model", ":", "[", "model", ".", "getOrDefault", "(", "\"outputCol\"", ")", "]", ")", ",", "\"pyspark.ml.feature.Normalizer\"", ":", "(", "lambda", "model", ":", "[", "model", ".", "getOrDefault", "(", "\"inputCol\"", ")", "]", ",", "lambda", "model", ":", "[", "model", ".", "getOrDefault", "(", "\"outputCol\"", ")", "]", ")", ",", "\"pyspark.ml.feature.Binarizer\"", ":", "(", "lambda", "model", ":", "[", "model", ".", "getOrDefault", "(", "\"inputCol\"", ")", "]", ",", "lambda", "model", ":", "[", "model", ".", "getOrDefault", "(", "\"outputCol\"", ")", "]", ")", ",", "\"pyspark.ml.classification.LinearSVCModel\"", ":", "(", "lambda", "model", ":", "[", "model", ".", "getOrDefault", "(", "\"featuresCol\"", ")", "]", ",", "lambda", "model", ":", "[", "model", ".", "getOrDefault", "(", "\"predictionCol\"", ")", "]", ")", ",", "\"pyspark.ml.classification.LogisticRegressionModel\"", ":", "(", "lambda", "model", ":", "[", "model", ".", "getOrDefault", "(", "\"featuresCol\"", ")", "]", ",", "lambda", "model", ":", "[", "model", ".", "getOrDefault", "(", "\"predictionCol\"", ")", ",", "model", ".", "getOrDefault", "(", "\"probabilityCol\"", ")", "]", ")", ",", "\"pyspark.ml.feature.OneHotEncoderModel\"", ":", "(", "lambda", "model", ":", "model", ".", "getOrDefault", "(", "\"inputCols\"", ")", ",", "lambda", "model", ":", "model", ".", "getOrDefault", "(", "\"outputCols\"", ")", ")", ",", "\"pyspark.ml.feature.StringIndexerModel\"", ":", "(", "lambda", "model", ":", "[", "model", ".", "getOrDefault", "(", "\"inputCol\"", ")", "]", ",", "lambda", "model", ":", "[", "model", ".", "getOrDefault", "(", "\"outputCol\"", ")", "]", ")", ",", "\"pyspark.ml.feature.VectorAssembler\"", ":", "(", "lambda", "model", ":", "model", ".", "getOrDefault", "(", "\"inputCols\"", ")", ",", "lambda", "model", ":", "[", "model", ".", "getOrDefault", "(", "\"outputCol\"", ")", "]", ")", "}", "return", "map" ]
map of spark models to input-output tuples Each lambda gets the corresponding input or output column name from the model
[ "map", "of", "spark", "models", "to", "input", "-", "output", "tuples", "Each", "lambda", "gets", "the", "corresponding", "input", "or", "output", "column", "name", "from", "the", "model" ]
python
train
swharden/PyOriginTools
PyOriginTools/workbook.py
https://github.com/swharden/PyOriginTools/blob/536fb8e11234ffdc27e26b1800e0358179ca7d26/PyOriginTools/workbook.py#L81-L88
def onex(self): """ delete all X columns except the first one. """ xCols=[i for i in range(self.nCols) if self.colTypes[i]==3] if len(xCols)>1: for colI in xCols[1:][::-1]: self.colDelete(colI)
[ "def", "onex", "(", "self", ")", ":", "xCols", "=", "[", "i", "for", "i", "in", "range", "(", "self", ".", "nCols", ")", "if", "self", ".", "colTypes", "[", "i", "]", "==", "3", "]", "if", "len", "(", "xCols", ")", ">", "1", ":", "for", "colI", "in", "xCols", "[", "1", ":", "]", "[", ":", ":", "-", "1", "]", ":", "self", ".", "colDelete", "(", "colI", ")" ]
delete all X columns except the first one.
[ "delete", "all", "X", "columns", "except", "the", "first", "one", "." ]
python
train
gwastro/pycbc
pycbc/workflow/configuration.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/workflow/configuration.py#L948-L971
def has_option_tag(self, section, option, tag): """ Convenience function accessing has_option_tags() for a single tag: see documentation for that function. NB calling has_option_tags() directly is preferred for simplicity. Parameters ----------- self : ConfigParser object The ConfigParser object (automatically passed when this is appended to the ConfigParser class) section : string The section of the ConfigParser object to read option : string The ConfigParser option to look for tag : string The name of the subsection to look in, if not found in [section] Returns -------- Boolean Is the option in the section or [section-tag] """ return self.has_option_tags(section, option, [tag])
[ "def", "has_option_tag", "(", "self", ",", "section", ",", "option", ",", "tag", ")", ":", "return", "self", ".", "has_option_tags", "(", "section", ",", "option", ",", "[", "tag", "]", ")" ]
Convenience function accessing has_option_tags() for a single tag: see documentation for that function. NB calling has_option_tags() directly is preferred for simplicity. Parameters ----------- self : ConfigParser object The ConfigParser object (automatically passed when this is appended to the ConfigParser class) section : string The section of the ConfigParser object to read option : string The ConfigParser option to look for tag : string The name of the subsection to look in, if not found in [section] Returns -------- Boolean Is the option in the section or [section-tag]
[ "Convenience", "function", "accessing", "has_option_tags", "()", "for", "a", "single", "tag", ":", "see", "documentation", "for", "that", "function", ".", "NB", "calling", "has_option_tags", "()", "directly", "is", "preferred", "for", "simplicity", "." ]
python
train
mitsei/dlkit
dlkit/json_/relationship/sessions.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/relationship/sessions.py#L814-L856
def get_relationships_by_query(self, relationship_query): """Gets a list of ``Relationships`` matching the given relationship query. arg: relationship_query (osid.relationship.RelationshipQuery): the relationship query return: (osid.relationship.RelationshipList) - the returned ``RelationshipList`` raise: NullArgument - ``relationship_query`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure raise: Unsupported - ``relationship_query`` is not of this service *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.ResourceQuerySession.get_resources_by_query and_list = list() or_list = list() for term in relationship_query._query_terms: if '$in' in relationship_query._query_terms[term] and '$nin' in relationship_query._query_terms[term]: and_list.append( {'$or': [{term: {'$in': relationship_query._query_terms[term]['$in']}}, {term: {'$nin': relationship_query._query_terms[term]['$nin']}}]}) else: and_list.append({term: relationship_query._query_terms[term]}) for term in relationship_query._keyword_terms: or_list.append({term: relationship_query._keyword_terms[term]}) if or_list: and_list.append({'$or': or_list}) view_filter = self._view_filter() if view_filter: and_list.append(view_filter) if and_list: query_terms = {'$and': and_list} collection = JSONClientValidated('relationship', collection='Relationship', runtime=self._runtime) result = collection.find(query_terms).sort('_id', DESCENDING) else: result = [] return objects.RelationshipList(result, runtime=self._runtime, proxy=self._proxy)
[ "def", "get_relationships_by_query", "(", "self", ",", "relationship_query", ")", ":", "# Implemented from template for", "# osid.resource.ResourceQuerySession.get_resources_by_query", "and_list", "=", "list", "(", ")", "or_list", "=", "list", "(", ")", "for", "term", "in", "relationship_query", ".", "_query_terms", ":", "if", "'$in'", "in", "relationship_query", ".", "_query_terms", "[", "term", "]", "and", "'$nin'", "in", "relationship_query", ".", "_query_terms", "[", "term", "]", ":", "and_list", ".", "append", "(", "{", "'$or'", ":", "[", "{", "term", ":", "{", "'$in'", ":", "relationship_query", ".", "_query_terms", "[", "term", "]", "[", "'$in'", "]", "}", "}", ",", "{", "term", ":", "{", "'$nin'", ":", "relationship_query", ".", "_query_terms", "[", "term", "]", "[", "'$nin'", "]", "}", "}", "]", "}", ")", "else", ":", "and_list", ".", "append", "(", "{", "term", ":", "relationship_query", ".", "_query_terms", "[", "term", "]", "}", ")", "for", "term", "in", "relationship_query", ".", "_keyword_terms", ":", "or_list", ".", "append", "(", "{", "term", ":", "relationship_query", ".", "_keyword_terms", "[", "term", "]", "}", ")", "if", "or_list", ":", "and_list", ".", "append", "(", "{", "'$or'", ":", "or_list", "}", ")", "view_filter", "=", "self", ".", "_view_filter", "(", ")", "if", "view_filter", ":", "and_list", ".", "append", "(", "view_filter", ")", "if", "and_list", ":", "query_terms", "=", "{", "'$and'", ":", "and_list", "}", "collection", "=", "JSONClientValidated", "(", "'relationship'", ",", "collection", "=", "'Relationship'", ",", "runtime", "=", "self", ".", "_runtime", ")", "result", "=", "collection", ".", "find", "(", "query_terms", ")", ".", "sort", "(", "'_id'", ",", "DESCENDING", ")", "else", ":", "result", "=", "[", "]", "return", "objects", ".", "RelationshipList", "(", "result", ",", "runtime", "=", "self", ".", "_runtime", ",", "proxy", "=", "self", ".", "_proxy", ")" ]
Gets a list of ``Relationships`` matching the given relationship query. arg: relationship_query (osid.relationship.RelationshipQuery): the relationship query return: (osid.relationship.RelationshipList) - the returned ``RelationshipList`` raise: NullArgument - ``relationship_query`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure raise: Unsupported - ``relationship_query`` is not of this service *compliance: mandatory -- This method must be implemented.*
[ "Gets", "a", "list", "of", "Relationships", "matching", "the", "given", "relationship", "query", "." ]
python
train
openstack/networking-arista
networking_arista/ml2/rpc/arista_eapi.py
https://github.com/openstack/networking-arista/blob/07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe/networking_arista/ml2/rpc/arista_eapi.py#L313-L337
def get_host_physnet(self, context): """Returns dictionary which contains physical topology information for a given host_id """ host_id = utils.hostname(context.host) cmd = ['show network physical-topology neighbors'] try: response = self._run_eos_cmds(cmd) # Get response for 'show network physical-topology neighbors' # command neighbors = response[0]['neighbors'] for neighbor in neighbors: if host_id in neighbor: physnet = neighbors[neighbor]['toPort'][0]['hostname'] LOG.debug("get_physical_network: Physical Network for " "%(host)s is %(physnet)s", {'host': host_id, 'physnet': physnet}) return physnet LOG.debug("Physical network not found for %(host)s", {'host': host_id}) except Exception as exc: LOG.error(_LE('command %(cmd)s failed with ' '%(exc)s'), {'cmd': cmd, 'exc': exc}) return None
[ "def", "get_host_physnet", "(", "self", ",", "context", ")", ":", "host_id", "=", "utils", ".", "hostname", "(", "context", ".", "host", ")", "cmd", "=", "[", "'show network physical-topology neighbors'", "]", "try", ":", "response", "=", "self", ".", "_run_eos_cmds", "(", "cmd", ")", "# Get response for 'show network physical-topology neighbors'", "# command", "neighbors", "=", "response", "[", "0", "]", "[", "'neighbors'", "]", "for", "neighbor", "in", "neighbors", ":", "if", "host_id", "in", "neighbor", ":", "physnet", "=", "neighbors", "[", "neighbor", "]", "[", "'toPort'", "]", "[", "0", "]", "[", "'hostname'", "]", "LOG", ".", "debug", "(", "\"get_physical_network: Physical Network for \"", "\"%(host)s is %(physnet)s\"", ",", "{", "'host'", ":", "host_id", ",", "'physnet'", ":", "physnet", "}", ")", "return", "physnet", "LOG", ".", "debug", "(", "\"Physical network not found for %(host)s\"", ",", "{", "'host'", ":", "host_id", "}", ")", "except", "Exception", "as", "exc", ":", "LOG", ".", "error", "(", "_LE", "(", "'command %(cmd)s failed with '", "'%(exc)s'", ")", ",", "{", "'cmd'", ":", "cmd", ",", "'exc'", ":", "exc", "}", ")", "return", "None" ]
Returns dictionary which contains physical topology information for a given host_id
[ "Returns", "dictionary", "which", "contains", "physical", "topology", "information" ]
python
train
hubo1016/vlcp
vlcp/event/matchtree.py
https://github.com/hubo1016/vlcp/blob/239055229ec93a99cc7e15208075724ccf543bd1/vlcp/event/matchtree.py#L125-L133
def matchesWithMatchers(self, event): ''' Return all matches for this event. The first matcher is also returned for each matched object. :param event: an input event ''' ret = [] self._matches(event, set(), ret) return tuple(ret)
[ "def", "matchesWithMatchers", "(", "self", ",", "event", ")", ":", "ret", "=", "[", "]", "self", ".", "_matches", "(", "event", ",", "set", "(", ")", ",", "ret", ")", "return", "tuple", "(", "ret", ")" ]
Return all matches for this event. The first matcher is also returned for each matched object. :param event: an input event
[ "Return", "all", "matches", "for", "this", "event", ".", "The", "first", "matcher", "is", "also", "returned", "for", "each", "matched", "object", ".", ":", "param", "event", ":", "an", "input", "event" ]
python
train
cloudera/cm_api
python/src/cm_api/endpoints/clusters.py
https://github.com/cloudera/cm_api/blob/5d2512375bd94684b4da36df9e0d9177865ffcbb/python/src/cm_api/endpoints/clusters.py#L163-L170
def delete_service(self, name): """ Delete a service by name. @param name: Service name @return: The deleted ApiService object """ return services.delete_service(self._get_resource_root(), name, self.name)
[ "def", "delete_service", "(", "self", ",", "name", ")", ":", "return", "services", ".", "delete_service", "(", "self", ".", "_get_resource_root", "(", ")", ",", "name", ",", "self", ".", "name", ")" ]
Delete a service by name. @param name: Service name @return: The deleted ApiService object
[ "Delete", "a", "service", "by", "name", "." ]
python
train
brocade/pynos
pynos/versions/base/interface.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/base/interface.py#L780-L863
def pvlan_host_association(self, **kwargs): """Set interface PVLAN association. Args: int_type (str): Type of interface. (gigabitethernet, tengigabitethernet, etc) name (str): Name of interface. (1/0/5, 1/0/10, etc) pri_vlan (str): The primary PVLAN. sec_vlan (str): The secondary PVLAN. callback (function): A function executed upon completion of the method. The only parameter passed to `callback` will be the ``ElementTree`` `config`. Returns: Return value of `callback`. Raises: KeyError: if `int_type`, `name`, `pri_vlan`, or `sec_vlan` is not specified. ValueError: if `int_type`, `name`, `pri_vlan`, or `sec_vlan` is invalid. Examples: >>> import pynos.device >>> switches = ['10.24.39.211', '10.24.39.203'] >>> auth = ('admin', 'password') >>> int_type = 'tengigabitethernet' >>> name = '225/0/38' >>> pri_vlan = '75' >>> sec_vlan = '100' >>> for switch in switches: ... conn = (switch, '22') ... with pynos.device.Device(conn=conn, auth=auth) as dev: ... output = dev.interface.private_vlan_type(name=pri_vlan, ... pvlan_type='primary') ... output = dev.interface.private_vlan_type(name=sec_vlan, ... pvlan_type='isolated') ... output = dev.interface.vlan_pvlan_association_add( ... name=pri_vlan, sec_vlan=sec_vlan) ... output = dev.interface.enable_switchport(int_type, ... name) ... output = dev.interface.private_vlan_mode( ... int_type=int_type, name=name, mode='host') ... output = dev.interface.pvlan_host_association( ... int_type=int_type, name=name, pri_vlan=pri_vlan, ... sec_vlan=sec_vlan) ... dev.interface.pvlan_host_association() ... # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): KeyError """ int_type = kwargs.pop('int_type').lower() name = kwargs.pop('name') pri_vlan = kwargs.pop('pri_vlan') sec_vlan = kwargs.pop('sec_vlan') callback = kwargs.pop('callback', self._callback) int_types = ['gigabitethernet', 'tengigabitethernet', 'fortygigabitethernet', 'hundredgigabitethernet', 'port_channel'] if int_type not in int_types: raise ValueError("Incorrect int_type value.") if not pynos.utilities.valid_interface(int_type, name): raise ValueError('`name` must be in the format of x/y/z for ' 'physical interfaces or x for port channel.') if not pynos.utilities.valid_vlan_id(pri_vlan): raise InvalidVlanId("`sec_vlan` must be between `1` and `4095`.") if not pynos.utilities.valid_vlan_id(sec_vlan): raise InvalidVlanId("`sec_vlan` must be between `1` and `4095`.") pvlan_args = dict(name=name, host_pri_pvlan=pri_vlan) associate_pvlan = getattr(self._interface, 'interface_%s_switchport_private_vlan_' 'host_association_host_pri_pvlan' % int_type) config = associate_pvlan(**pvlan_args) sec_assoc = config.find('.//*host-association') sec_assoc = ET.SubElement(sec_assoc, 'host-sec-pvlan') sec_assoc.text = sec_vlan return callback(config)
[ "def", "pvlan_host_association", "(", "self", ",", "*", "*", "kwargs", ")", ":", "int_type", "=", "kwargs", ".", "pop", "(", "'int_type'", ")", ".", "lower", "(", ")", "name", "=", "kwargs", ".", "pop", "(", "'name'", ")", "pri_vlan", "=", "kwargs", ".", "pop", "(", "'pri_vlan'", ")", "sec_vlan", "=", "kwargs", ".", "pop", "(", "'sec_vlan'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "int_types", "=", "[", "'gigabitethernet'", ",", "'tengigabitethernet'", ",", "'fortygigabitethernet'", ",", "'hundredgigabitethernet'", ",", "'port_channel'", "]", "if", "int_type", "not", "in", "int_types", ":", "raise", "ValueError", "(", "\"Incorrect int_type value.\"", ")", "if", "not", "pynos", ".", "utilities", ".", "valid_interface", "(", "int_type", ",", "name", ")", ":", "raise", "ValueError", "(", "'`name` must be in the format of x/y/z for '", "'physical interfaces or x for port channel.'", ")", "if", "not", "pynos", ".", "utilities", ".", "valid_vlan_id", "(", "pri_vlan", ")", ":", "raise", "InvalidVlanId", "(", "\"`sec_vlan` must be between `1` and `4095`.\"", ")", "if", "not", "pynos", ".", "utilities", ".", "valid_vlan_id", "(", "sec_vlan", ")", ":", "raise", "InvalidVlanId", "(", "\"`sec_vlan` must be between `1` and `4095`.\"", ")", "pvlan_args", "=", "dict", "(", "name", "=", "name", ",", "host_pri_pvlan", "=", "pri_vlan", ")", "associate_pvlan", "=", "getattr", "(", "self", ".", "_interface", ",", "'interface_%s_switchport_private_vlan_'", "'host_association_host_pri_pvlan'", "%", "int_type", ")", "config", "=", "associate_pvlan", "(", "*", "*", "pvlan_args", ")", "sec_assoc", "=", "config", ".", "find", "(", "'.//*host-association'", ")", "sec_assoc", "=", "ET", ".", "SubElement", "(", "sec_assoc", ",", "'host-sec-pvlan'", ")", "sec_assoc", ".", "text", "=", "sec_vlan", "return", "callback", "(", "config", ")" ]
Set interface PVLAN association. Args: int_type (str): Type of interface. (gigabitethernet, tengigabitethernet, etc) name (str): Name of interface. (1/0/5, 1/0/10, etc) pri_vlan (str): The primary PVLAN. sec_vlan (str): The secondary PVLAN. callback (function): A function executed upon completion of the method. The only parameter passed to `callback` will be the ``ElementTree`` `config`. Returns: Return value of `callback`. Raises: KeyError: if `int_type`, `name`, `pri_vlan`, or `sec_vlan` is not specified. ValueError: if `int_type`, `name`, `pri_vlan`, or `sec_vlan` is invalid. Examples: >>> import pynos.device >>> switches = ['10.24.39.211', '10.24.39.203'] >>> auth = ('admin', 'password') >>> int_type = 'tengigabitethernet' >>> name = '225/0/38' >>> pri_vlan = '75' >>> sec_vlan = '100' >>> for switch in switches: ... conn = (switch, '22') ... with pynos.device.Device(conn=conn, auth=auth) as dev: ... output = dev.interface.private_vlan_type(name=pri_vlan, ... pvlan_type='primary') ... output = dev.interface.private_vlan_type(name=sec_vlan, ... pvlan_type='isolated') ... output = dev.interface.vlan_pvlan_association_add( ... name=pri_vlan, sec_vlan=sec_vlan) ... output = dev.interface.enable_switchport(int_type, ... name) ... output = dev.interface.private_vlan_mode( ... int_type=int_type, name=name, mode='host') ... output = dev.interface.pvlan_host_association( ... int_type=int_type, name=name, pri_vlan=pri_vlan, ... sec_vlan=sec_vlan) ... dev.interface.pvlan_host_association() ... # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): KeyError
[ "Set", "interface", "PVLAN", "association", "." ]
python
train
mitsei/dlkit
dlkit/json_/logging_/sessions.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/logging_/sessions.py#L2490-L2509
def is_child_of_log(self, id_, log_id): """Tests if an ``Id`` is a direct child of a log. arg: id (osid.id.Id): an ``Id`` arg: log_id (osid.id.Id): the ``Id`` of a log return: (boolean) - ``true`` if this ``id`` is a child of ``log_id,`` ``false`` otherwise raise: NotFound - ``log_id`` is not found raise: NullArgument - ``id`` or ``log_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* *implementation notes*: If ``id`` not found return ``false``. """ # Implemented from template for # osid.resource.BinHierarchySession.is_child_of_bin if self._catalog_session is not None: return self._catalog_session.is_child_of_catalog(id_=id_, catalog_id=log_id) return self._hierarchy_session.is_child(id_=log_id, child_id=id_)
[ "def", "is_child_of_log", "(", "self", ",", "id_", ",", "log_id", ")", ":", "# Implemented from template for", "# osid.resource.BinHierarchySession.is_child_of_bin", "if", "self", ".", "_catalog_session", "is", "not", "None", ":", "return", "self", ".", "_catalog_session", ".", "is_child_of_catalog", "(", "id_", "=", "id_", ",", "catalog_id", "=", "log_id", ")", "return", "self", ".", "_hierarchy_session", ".", "is_child", "(", "id_", "=", "log_id", ",", "child_id", "=", "id_", ")" ]
Tests if an ``Id`` is a direct child of a log. arg: id (osid.id.Id): an ``Id`` arg: log_id (osid.id.Id): the ``Id`` of a log return: (boolean) - ``true`` if this ``id`` is a child of ``log_id,`` ``false`` otherwise raise: NotFound - ``log_id`` is not found raise: NullArgument - ``id`` or ``log_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* *implementation notes*: If ``id`` not found return ``false``.
[ "Tests", "if", "an", "Id", "is", "a", "direct", "child", "of", "a", "log", "." ]
python
train
Microsoft/knack
knack/prompting.py
https://github.com/Microsoft/knack/blob/5f1a480a33f103e2688c46eef59fb2d9eaf2baad/knack/prompting.py#L99-L131
def prompt_choice_list(msg, a_list, default=1, help_string=None): """Prompt user to select from a list of possible choices. :param msg:A message displayed to the user before the choice list :type msg: str :param a_list:The list of choices (list of strings or list of dicts with 'name' & 'desc') "type a_list: list :param default:The default option that should be chosen if user doesn't enter a choice :type default: int :returns: The list index of the item chosen. """ verify_is_a_tty() options = '\n'.join([' [{}] {}{}' .format(i + 1, x['name'] if isinstance(x, dict) and 'name' in x else x, ' - ' + x['desc'] if isinstance(x, dict) and 'desc' in x else '') for i, x in enumerate(a_list)]) allowed_vals = list(range(1, len(a_list) + 1)) while True: val = _input('{}\n{}\nPlease enter a choice [Default choice({})]: '.format(msg, options, default)) if val == '?' and help_string is not None: print(help_string) continue if not val: val = '{}'.format(default) try: ans = int(val) if ans in allowed_vals: # array index is 0-based, user input is 1-based return ans - 1 raise ValueError except ValueError: logger.warning('Valid values are %s', allowed_vals)
[ "def", "prompt_choice_list", "(", "msg", ",", "a_list", ",", "default", "=", "1", ",", "help_string", "=", "None", ")", ":", "verify_is_a_tty", "(", ")", "options", "=", "'\\n'", ".", "join", "(", "[", "' [{}] {}{}'", ".", "format", "(", "i", "+", "1", ",", "x", "[", "'name'", "]", "if", "isinstance", "(", "x", ",", "dict", ")", "and", "'name'", "in", "x", "else", "x", ",", "' - '", "+", "x", "[", "'desc'", "]", "if", "isinstance", "(", "x", ",", "dict", ")", "and", "'desc'", "in", "x", "else", "''", ")", "for", "i", ",", "x", "in", "enumerate", "(", "a_list", ")", "]", ")", "allowed_vals", "=", "list", "(", "range", "(", "1", ",", "len", "(", "a_list", ")", "+", "1", ")", ")", "while", "True", ":", "val", "=", "_input", "(", "'{}\\n{}\\nPlease enter a choice [Default choice({})]: '", ".", "format", "(", "msg", ",", "options", ",", "default", ")", ")", "if", "val", "==", "'?'", "and", "help_string", "is", "not", "None", ":", "print", "(", "help_string", ")", "continue", "if", "not", "val", ":", "val", "=", "'{}'", ".", "format", "(", "default", ")", "try", ":", "ans", "=", "int", "(", "val", ")", "if", "ans", "in", "allowed_vals", ":", "# array index is 0-based, user input is 1-based", "return", "ans", "-", "1", "raise", "ValueError", "except", "ValueError", ":", "logger", ".", "warning", "(", "'Valid values are %s'", ",", "allowed_vals", ")" ]
Prompt user to select from a list of possible choices. :param msg:A message displayed to the user before the choice list :type msg: str :param a_list:The list of choices (list of strings or list of dicts with 'name' & 'desc') "type a_list: list :param default:The default option that should be chosen if user doesn't enter a choice :type default: int :returns: The list index of the item chosen.
[ "Prompt", "user", "to", "select", "from", "a", "list", "of", "possible", "choices", "." ]
python
train
reincubate/ricloud
ricloud/asmaster_api.py
https://github.com/reincubate/ricloud/blob/e46bce4529fbdca34a4190c18c7219e937e2b697/ricloud/asmaster_api.py#L59-L66
def _set_allowed_services_and_actions(self, services): """Expect services to be a list of service dictionaries, each with `name` and `actions` keys.""" for service in services: self.services[service['name']] = {} for action in service['actions']: name = action.pop('name') self.services[service['name']][name] = action
[ "def", "_set_allowed_services_and_actions", "(", "self", ",", "services", ")", ":", "for", "service", "in", "services", ":", "self", ".", "services", "[", "service", "[", "'name'", "]", "]", "=", "{", "}", "for", "action", "in", "service", "[", "'actions'", "]", ":", "name", "=", "action", ".", "pop", "(", "'name'", ")", "self", ".", "services", "[", "service", "[", "'name'", "]", "]", "[", "name", "]", "=", "action" ]
Expect services to be a list of service dictionaries, each with `name` and `actions` keys.
[ "Expect", "services", "to", "be", "a", "list", "of", "service", "dictionaries", "each", "with", "name", "and", "actions", "keys", "." ]
python
train
calmjs/calmjs.parse
src/calmjs/parse/vlq.py
https://github.com/calmjs/calmjs.parse/blob/369f0ee346c5a84c4d5c35a7733a0e63b02eac59/src/calmjs/parse/vlq.py#L64-L83
def encode_vlq(i): """ Encode integer `i` into a VLQ encoded string. """ # shift in the sign to least significant bit raw = (-i << 1) + 1 if i < 0 else i << 1 if raw < VLQ_MULTI_CHAR: # short-circuit simple case as it doesn't need continuation return INT_B64[raw] result = [] while raw: # assume continue result.append(raw & VLQ_BASE_MASK | VLQ_CONT) # shift out processed bits raw = raw >> VLQ_SHIFT # discontinue the last unit result[-1] &= VLQ_BASE_MASK return ''.join(INT_B64[i] for i in result)
[ "def", "encode_vlq", "(", "i", ")", ":", "# shift in the sign to least significant bit", "raw", "=", "(", "-", "i", "<<", "1", ")", "+", "1", "if", "i", "<", "0", "else", "i", "<<", "1", "if", "raw", "<", "VLQ_MULTI_CHAR", ":", "# short-circuit simple case as it doesn't need continuation", "return", "INT_B64", "[", "raw", "]", "result", "=", "[", "]", "while", "raw", ":", "# assume continue", "result", ".", "append", "(", "raw", "&", "VLQ_BASE_MASK", "|", "VLQ_CONT", ")", "# shift out processed bits", "raw", "=", "raw", ">>", "VLQ_SHIFT", "# discontinue the last unit", "result", "[", "-", "1", "]", "&=", "VLQ_BASE_MASK", "return", "''", ".", "join", "(", "INT_B64", "[", "i", "]", "for", "i", "in", "result", ")" ]
Encode integer `i` into a VLQ encoded string.
[ "Encode", "integer", "i", "into", "a", "VLQ", "encoded", "string", "." ]
python
train
hydpy-dev/hydpy
hydpy/core/sequencetools.py
https://github.com/hydpy-dev/hydpy/blob/1bc6a82cf30786521d86b36e27900c6717d3348d/hydpy/core/sequencetools.py#L856-L862
def disk2ram(self): """Move internal data from disk to RAM.""" values = self.series self.deactivate_disk() self.ramflag = True self.__set_array(values) self.update_fastaccess()
[ "def", "disk2ram", "(", "self", ")", ":", "values", "=", "self", ".", "series", "self", ".", "deactivate_disk", "(", ")", "self", ".", "ramflag", "=", "True", "self", ".", "__set_array", "(", "values", ")", "self", ".", "update_fastaccess", "(", ")" ]
Move internal data from disk to RAM.
[ "Move", "internal", "data", "from", "disk", "to", "RAM", "." ]
python
train
nikcub/floyd
floyd/db/__init__.py
https://github.com/nikcub/floyd/blob/5772d0047efb11c9ce5f7d234a9da4576ce24edc/floyd/db/__init__.py#L140-L161
def parse_md(self): """Takes a post path and returns a dictionary of variables""" post_content = _MARKDOWN.convert(self.raw_src) if hasattr(_MARKDOWN, 'Meta'): # 'Meta' in _MARKDOWN and _MARKDOWN.Meta: for key in _MARKDOWN.Meta: print "\t meta: %s: %s (%s)" % (key, _MARKDOWN.Meta[key][0], type(_MARKDOWN.Meta[key][0])) if key == 'pubdate': setattr(self, key, datetime.datetime.fromtimestamp(float(_MARKDOWN.Meta[key][0]))) else: setattr(self, key, _MARKDOWN.Meta[key][0]) self.content = post_content self.stub = self.__key__ # set required fields # @TODO required in schema rather than here if not hasattr(self, 'pubdate'): print '\t Notice: setting default pubdate' setattr(self, 'pubdate', datetime.datetime.now())
[ "def", "parse_md", "(", "self", ")", ":", "post_content", "=", "_MARKDOWN", ".", "convert", "(", "self", ".", "raw_src", ")", "if", "hasattr", "(", "_MARKDOWN", ",", "'Meta'", ")", ":", "# 'Meta' in _MARKDOWN and _MARKDOWN.Meta:", "for", "key", "in", "_MARKDOWN", ".", "Meta", ":", "print", "\"\\t meta: %s: %s (%s)\"", "%", "(", "key", ",", "_MARKDOWN", ".", "Meta", "[", "key", "]", "[", "0", "]", ",", "type", "(", "_MARKDOWN", ".", "Meta", "[", "key", "]", "[", "0", "]", ")", ")", "if", "key", "==", "'pubdate'", ":", "setattr", "(", "self", ",", "key", ",", "datetime", ".", "datetime", ".", "fromtimestamp", "(", "float", "(", "_MARKDOWN", ".", "Meta", "[", "key", "]", "[", "0", "]", ")", ")", ")", "else", ":", "setattr", "(", "self", ",", "key", ",", "_MARKDOWN", ".", "Meta", "[", "key", "]", "[", "0", "]", ")", "self", ".", "content", "=", "post_content", "self", ".", "stub", "=", "self", ".", "__key__", "# set required fields", "# @TODO required in schema rather than here", "if", "not", "hasattr", "(", "self", ",", "'pubdate'", ")", ":", "print", "'\\t Notice: setting default pubdate'", "setattr", "(", "self", ",", "'pubdate'", ",", "datetime", ".", "datetime", ".", "now", "(", ")", ")" ]
Takes a post path and returns a dictionary of variables
[ "Takes", "a", "post", "path", "and", "returns", "a", "dictionary", "of", "variables" ]
python
train
ppb/pursuedpybear
ppb/scenes.py
https://github.com/ppb/pursuedpybear/blob/db3bfaaf86d14b4d1bb9e0b24cc8dc63f29c2191/ppb/scenes.py#L35-L55
def add(self, game_object: Hashable, tags: Iterable[Hashable]=()) -> None: """ Add a game_object to the container. game_object: Any Hashable object. The item to be added. tags: An iterable of Hashable objects. Values that can be used to retrieve a group containing the game_object. Examples: container.add(MyObject()) container.add(MyObject(), tags=("red", "blue") """ if isinstance(tags, (str, bytes)): raise TypeError("You passed a string instead of an iterable, this probably isn't what you intended.\n\nTry making it a tuple.") self.all.add(game_object) for kind in type(game_object).mro(): self.kinds[kind].add(game_object) for tag in tags: self.tags[tag].add(game_object)
[ "def", "add", "(", "self", ",", "game_object", ":", "Hashable", ",", "tags", ":", "Iterable", "[", "Hashable", "]", "=", "(", ")", ")", "->", "None", ":", "if", "isinstance", "(", "tags", ",", "(", "str", ",", "bytes", ")", ")", ":", "raise", "TypeError", "(", "\"You passed a string instead of an iterable, this probably isn't what you intended.\\n\\nTry making it a tuple.\"", ")", "self", ".", "all", ".", "add", "(", "game_object", ")", "for", "kind", "in", "type", "(", "game_object", ")", ".", "mro", "(", ")", ":", "self", ".", "kinds", "[", "kind", "]", ".", "add", "(", "game_object", ")", "for", "tag", "in", "tags", ":", "self", ".", "tags", "[", "tag", "]", ".", "add", "(", "game_object", ")" ]
Add a game_object to the container. game_object: Any Hashable object. The item to be added. tags: An iterable of Hashable objects. Values that can be used to retrieve a group containing the game_object. Examples: container.add(MyObject()) container.add(MyObject(), tags=("red", "blue")
[ "Add", "a", "game_object", "to", "the", "container", "." ]
python
train
edx/ease
ease/feature_extractor.py
https://github.com/edx/ease/blob/a7890ed403da94d03726b0639cd8ebda45af6bbb/ease/feature_extractor.py#L38-L73
def initialize_dictionaries(self, e_set, max_feats2 = 200): """ Initializes dictionaries from an essay set object Dictionaries must be initialized prior to using this to extract features e_set is an input essay set returns a confirmation of initialization """ if(hasattr(e_set, '_type')): if(e_set._type == "train"): #normal text (unstemmed) useful words/bigrams nvocab = util_functions.get_vocab(e_set._text, e_set._score, max_feats2 = max_feats2) #stemmed and spell corrected vocab useful words/ngrams svocab = util_functions.get_vocab(e_set._clean_stem_text, e_set._score, max_feats2 = max_feats2) #dictionary trained on proper vocab self._normal_dict = CountVectorizer(ngram_range=(1,2), vocabulary=nvocab) #dictionary trained on proper vocab self._stem_dict = CountVectorizer(ngram_range=(1,2), vocabulary=svocab) self.dict_initialized = True #Average spelling errors in set. needed later for spelling detection self._mean_spelling_errors=sum(e_set._spelling_errors)/float(len(e_set._spelling_errors)) self._spell_errors_per_character=sum(e_set._spelling_errors)/float(sum([len(t) for t in e_set._text])) #Gets the number and positions of grammar errors good_pos_tags,bad_pos_positions=self._get_grammar_errors(e_set._pos,e_set._text,e_set._tokens) self._grammar_errors_per_character=(sum(good_pos_tags)/float(sum([len(t) for t in e_set._text]))) #Generate bag of words features bag_feats=self.gen_bag_feats(e_set) #Sum of a row of bag of words features (topical words in an essay) f_row_sum=numpy.sum(bag_feats[:,:]) #Average index of how "topical" essays are self._mean_f_prop=f_row_sum/float(sum([len(t) for t in e_set._text])) ret = "ok" else: raise util_functions.InputError(e_set, "needs to be an essay set of the train type.") else: raise util_functions.InputError(e_set, "wrong input. need an essay set object") return ret
[ "def", "initialize_dictionaries", "(", "self", ",", "e_set", ",", "max_feats2", "=", "200", ")", ":", "if", "(", "hasattr", "(", "e_set", ",", "'_type'", ")", ")", ":", "if", "(", "e_set", ".", "_type", "==", "\"train\"", ")", ":", "#normal text (unstemmed) useful words/bigrams", "nvocab", "=", "util_functions", ".", "get_vocab", "(", "e_set", ".", "_text", ",", "e_set", ".", "_score", ",", "max_feats2", "=", "max_feats2", ")", "#stemmed and spell corrected vocab useful words/ngrams", "svocab", "=", "util_functions", ".", "get_vocab", "(", "e_set", ".", "_clean_stem_text", ",", "e_set", ".", "_score", ",", "max_feats2", "=", "max_feats2", ")", "#dictionary trained on proper vocab", "self", ".", "_normal_dict", "=", "CountVectorizer", "(", "ngram_range", "=", "(", "1", ",", "2", ")", ",", "vocabulary", "=", "nvocab", ")", "#dictionary trained on proper vocab", "self", ".", "_stem_dict", "=", "CountVectorizer", "(", "ngram_range", "=", "(", "1", ",", "2", ")", ",", "vocabulary", "=", "svocab", ")", "self", ".", "dict_initialized", "=", "True", "#Average spelling errors in set. needed later for spelling detection", "self", ".", "_mean_spelling_errors", "=", "sum", "(", "e_set", ".", "_spelling_errors", ")", "/", "float", "(", "len", "(", "e_set", ".", "_spelling_errors", ")", ")", "self", ".", "_spell_errors_per_character", "=", "sum", "(", "e_set", ".", "_spelling_errors", ")", "/", "float", "(", "sum", "(", "[", "len", "(", "t", ")", "for", "t", "in", "e_set", ".", "_text", "]", ")", ")", "#Gets the number and positions of grammar errors", "good_pos_tags", ",", "bad_pos_positions", "=", "self", ".", "_get_grammar_errors", "(", "e_set", ".", "_pos", ",", "e_set", ".", "_text", ",", "e_set", ".", "_tokens", ")", "self", ".", "_grammar_errors_per_character", "=", "(", "sum", "(", "good_pos_tags", ")", "/", "float", "(", "sum", "(", "[", "len", "(", "t", ")", "for", "t", "in", "e_set", ".", "_text", "]", ")", ")", ")", "#Generate bag of words features", "bag_feats", "=", "self", ".", "gen_bag_feats", "(", "e_set", ")", "#Sum of a row of bag of words features (topical words in an essay)", "f_row_sum", "=", "numpy", ".", "sum", "(", "bag_feats", "[", ":", ",", ":", "]", ")", "#Average index of how \"topical\" essays are", "self", ".", "_mean_f_prop", "=", "f_row_sum", "/", "float", "(", "sum", "(", "[", "len", "(", "t", ")", "for", "t", "in", "e_set", ".", "_text", "]", ")", ")", "ret", "=", "\"ok\"", "else", ":", "raise", "util_functions", ".", "InputError", "(", "e_set", ",", "\"needs to be an essay set of the train type.\"", ")", "else", ":", "raise", "util_functions", ".", "InputError", "(", "e_set", ",", "\"wrong input. need an essay set object\"", ")", "return", "ret" ]
Initializes dictionaries from an essay set object Dictionaries must be initialized prior to using this to extract features e_set is an input essay set returns a confirmation of initialization
[ "Initializes", "dictionaries", "from", "an", "essay", "set", "object", "Dictionaries", "must", "be", "initialized", "prior", "to", "using", "this", "to", "extract", "features", "e_set", "is", "an", "input", "essay", "set", "returns", "a", "confirmation", "of", "initialization" ]
python
valid
saltstack/salt
salt/loader.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/loader.py#L2044-L2058
def global_injector_decorator(inject_globals): ''' Decorator used by the LazyLoader to inject globals into a function at execute time. globals Dictionary with global variables to inject ''' def inner_decorator(f): @functools.wraps(f) def wrapper(*args, **kwargs): with salt.utils.context.func_globals_inject(f, **inject_globals): return f(*args, **kwargs) return wrapper return inner_decorator
[ "def", "global_injector_decorator", "(", "inject_globals", ")", ":", "def", "inner_decorator", "(", "f", ")", ":", "@", "functools", ".", "wraps", "(", "f", ")", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "with", "salt", ".", "utils", ".", "context", ".", "func_globals_inject", "(", "f", ",", "*", "*", "inject_globals", ")", ":", "return", "f", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "wrapper", "return", "inner_decorator" ]
Decorator used by the LazyLoader to inject globals into a function at execute time. globals Dictionary with global variables to inject
[ "Decorator", "used", "by", "the", "LazyLoader", "to", "inject", "globals", "into", "a", "function", "at", "execute", "time", "." ]
python
train
dronekit/dronekit-python
examples/guided_set_speed_yaw/guided_set_speed_yaw.py
https://github.com/dronekit/dronekit-python/blob/91c147fa61f521f5fff5d0cee06d07ed93614af8/examples/guided_set_speed_yaw/guided_set_speed_yaw.py#L298-L324
def goto(dNorth, dEast, gotoFunction=vehicle.simple_goto): """ Moves the vehicle to a position dNorth metres North and dEast metres East of the current position. The method takes a function pointer argument with a single `dronekit.lib.LocationGlobal` parameter for the target position. This allows it to be called with different position-setting commands. By default it uses the standard method: dronekit.lib.Vehicle.simple_goto(). The method reports the distance to target every two seconds. """ currentLocation = vehicle.location.global_relative_frame targetLocation = get_location_metres(currentLocation, dNorth, dEast) targetDistance = get_distance_metres(currentLocation, targetLocation) gotoFunction(targetLocation) #print "DEBUG: targetLocation: %s" % targetLocation #print "DEBUG: targetLocation: %s" % targetDistance while vehicle.mode.name=="GUIDED": #Stop action if we are no longer in guided mode. #print "DEBUG: mode: %s" % vehicle.mode.name remainingDistance=get_distance_metres(vehicle.location.global_relative_frame, targetLocation) print("Distance to target: ", remainingDistance) if remainingDistance<=targetDistance*0.01: #Just below target, in case of undershoot. print("Reached target") break; time.sleep(2)
[ "def", "goto", "(", "dNorth", ",", "dEast", ",", "gotoFunction", "=", "vehicle", ".", "simple_goto", ")", ":", "currentLocation", "=", "vehicle", ".", "location", ".", "global_relative_frame", "targetLocation", "=", "get_location_metres", "(", "currentLocation", ",", "dNorth", ",", "dEast", ")", "targetDistance", "=", "get_distance_metres", "(", "currentLocation", ",", "targetLocation", ")", "gotoFunction", "(", "targetLocation", ")", "#print \"DEBUG: targetLocation: %s\" % targetLocation", "#print \"DEBUG: targetLocation: %s\" % targetDistance", "while", "vehicle", ".", "mode", ".", "name", "==", "\"GUIDED\"", ":", "#Stop action if we are no longer in guided mode.", "#print \"DEBUG: mode: %s\" % vehicle.mode.name", "remainingDistance", "=", "get_distance_metres", "(", "vehicle", ".", "location", ".", "global_relative_frame", ",", "targetLocation", ")", "print", "(", "\"Distance to target: \"", ",", "remainingDistance", ")", "if", "remainingDistance", "<=", "targetDistance", "*", "0.01", ":", "#Just below target, in case of undershoot.", "print", "(", "\"Reached target\"", ")", "break", "time", ".", "sleep", "(", "2", ")" ]
Moves the vehicle to a position dNorth metres North and dEast metres East of the current position. The method takes a function pointer argument with a single `dronekit.lib.LocationGlobal` parameter for the target position. This allows it to be called with different position-setting commands. By default it uses the standard method: dronekit.lib.Vehicle.simple_goto(). The method reports the distance to target every two seconds.
[ "Moves", "the", "vehicle", "to", "a", "position", "dNorth", "metres", "North", "and", "dEast", "metres", "East", "of", "the", "current", "position", "." ]
python
train
dswah/pyGAM
pygam/terms.py
https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/pygam/terms.py#L1202-L1214
def info(self): """get information about this term Parameters ---------- Returns ------- dict containing information to duplicate this term """ info = super(TensorTerm, self).info info.update({'terms':[term.info for term in self._terms]}) return info
[ "def", "info", "(", "self", ")", ":", "info", "=", "super", "(", "TensorTerm", ",", "self", ")", ".", "info", "info", ".", "update", "(", "{", "'terms'", ":", "[", "term", ".", "info", "for", "term", "in", "self", ".", "_terms", "]", "}", ")", "return", "info" ]
get information about this term Parameters ---------- Returns ------- dict containing information to duplicate this term
[ "get", "information", "about", "this", "term" ]
python
train
SystemRDL/systemrdl-compiler
systemrdl/node.py
https://github.com/SystemRDL/systemrdl-compiler/blob/6ae64f2bb6ecbbe9db356e20e8ac94e85bdeed3a/systemrdl/node.py#L74-L107
def children(self, unroll=False, skip_not_present=True): """ Returns an iterator that provides nodes for all immediate children of this component. Parameters ---------- unroll : bool If True, any children that are arrays are unrolled. skip_not_present : bool If True, skips children whose 'ispresent' property is set to False Yields ------ :class:`~Node` All immediate children """ for child_inst in self.inst.children: if skip_not_present: # Check if property ispresent == False if not child_inst.properties.get('ispresent', True): # ispresent was explicitly set to False. Skip it continue if unroll and isinstance(child_inst, comp.AddressableComponent) and child_inst.is_array: # Unroll the array range_list = [range(n) for n in child_inst.array_dimensions] for idxs in itertools.product(*range_list): N = Node._factory(child_inst, self.env, self) N.current_idx = idxs # pylint: disable=attribute-defined-outside-init yield N else: yield Node._factory(child_inst, self.env, self)
[ "def", "children", "(", "self", ",", "unroll", "=", "False", ",", "skip_not_present", "=", "True", ")", ":", "for", "child_inst", "in", "self", ".", "inst", ".", "children", ":", "if", "skip_not_present", ":", "# Check if property ispresent == False", "if", "not", "child_inst", ".", "properties", ".", "get", "(", "'ispresent'", ",", "True", ")", ":", "# ispresent was explicitly set to False. Skip it", "continue", "if", "unroll", "and", "isinstance", "(", "child_inst", ",", "comp", ".", "AddressableComponent", ")", "and", "child_inst", ".", "is_array", ":", "# Unroll the array", "range_list", "=", "[", "range", "(", "n", ")", "for", "n", "in", "child_inst", ".", "array_dimensions", "]", "for", "idxs", "in", "itertools", ".", "product", "(", "*", "range_list", ")", ":", "N", "=", "Node", ".", "_factory", "(", "child_inst", ",", "self", ".", "env", ",", "self", ")", "N", ".", "current_idx", "=", "idxs", "# pylint: disable=attribute-defined-outside-init", "yield", "N", "else", ":", "yield", "Node", ".", "_factory", "(", "child_inst", ",", "self", ".", "env", ",", "self", ")" ]
Returns an iterator that provides nodes for all immediate children of this component. Parameters ---------- unroll : bool If True, any children that are arrays are unrolled. skip_not_present : bool If True, skips children whose 'ispresent' property is set to False Yields ------ :class:`~Node` All immediate children
[ "Returns", "an", "iterator", "that", "provides", "nodes", "for", "all", "immediate", "children", "of", "this", "component", "." ]
python
train
inasafe/inasafe
safe/common/parameters/resource_parameter.py
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/common/parameters/resource_parameter.py#L55-L64
def serialize(self): """Convert the parameter into a dictionary. :return: The parameter dictionary. :rtype: dict """ pickle = super(ResourceParameter, self).serialize() pickle['frequency'] = self.frequency pickle['unit'] = self._unit.serialize() return pickle
[ "def", "serialize", "(", "self", ")", ":", "pickle", "=", "super", "(", "ResourceParameter", ",", "self", ")", ".", "serialize", "(", ")", "pickle", "[", "'frequency'", "]", "=", "self", ".", "frequency", "pickle", "[", "'unit'", "]", "=", "self", ".", "_unit", ".", "serialize", "(", ")", "return", "pickle" ]
Convert the parameter into a dictionary. :return: The parameter dictionary. :rtype: dict
[ "Convert", "the", "parameter", "into", "a", "dictionary", "." ]
python
train
SKA-ScienceDataProcessor/integration-prototype
sip/execution_control/configuration_db/sip_config_db/scheduling/subarray.py
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/execution_control/configuration_db/sip_config_db/scheduling/subarray.py#L107-L120
def configure_sbi(self, sbi_config: dict, schema_path: str = None): """Add a new SBI to the database associated with this subarray. Args: sbi_config (dict): SBI configuration. schema_path (str, optional): Path to the SBI config schema. """ if not self.active: raise RuntimeError("Unable to add SBIs to inactive subarray!") sbi_config['subarray_id'] = self._id sbi = SchedulingBlockInstance.from_config(sbi_config, schema_path) self._add_sbi_id(sbi_config['id']) return sbi
[ "def", "configure_sbi", "(", "self", ",", "sbi_config", ":", "dict", ",", "schema_path", ":", "str", "=", "None", ")", ":", "if", "not", "self", ".", "active", ":", "raise", "RuntimeError", "(", "\"Unable to add SBIs to inactive subarray!\"", ")", "sbi_config", "[", "'subarray_id'", "]", "=", "self", ".", "_id", "sbi", "=", "SchedulingBlockInstance", ".", "from_config", "(", "sbi_config", ",", "schema_path", ")", "self", ".", "_add_sbi_id", "(", "sbi_config", "[", "'id'", "]", ")", "return", "sbi" ]
Add a new SBI to the database associated with this subarray. Args: sbi_config (dict): SBI configuration. schema_path (str, optional): Path to the SBI config schema.
[ "Add", "a", "new", "SBI", "to", "the", "database", "associated", "with", "this", "subarray", "." ]
python
train
ContextLab/quail
quail/simulate.py
https://github.com/ContextLab/quail/blob/71dd53c792dd915dc84879d8237e3582dd68b7a4/quail/simulate.py#L5-L14
def simulate_list(nwords=16, nrec=10, ncats=4): """A function to simulate a list""" # load wordpool wp = pd.read_csv('data/cut_wordpool.csv') # get one list wp = wp[wp['GROUP']==np.random.choice(list(range(16)), 1)[0]].sample(16) wp['COLOR'] = [[int(np.random.rand() * 255) for i in range(3)] for i in range(16)]
[ "def", "simulate_list", "(", "nwords", "=", "16", ",", "nrec", "=", "10", ",", "ncats", "=", "4", ")", ":", "# load wordpool", "wp", "=", "pd", ".", "read_csv", "(", "'data/cut_wordpool.csv'", ")", "# get one list", "wp", "=", "wp", "[", "wp", "[", "'GROUP'", "]", "==", "np", ".", "random", ".", "choice", "(", "list", "(", "range", "(", "16", ")", ")", ",", "1", ")", "[", "0", "]", "]", ".", "sample", "(", "16", ")", "wp", "[", "'COLOR'", "]", "=", "[", "[", "int", "(", "np", ".", "random", ".", "rand", "(", ")", "*", "255", ")", "for", "i", "in", "range", "(", "3", ")", "]", "for", "i", "in", "range", "(", "16", ")", "]" ]
A function to simulate a list
[ "A", "function", "to", "simulate", "a", "list" ]
python
train
bitcraze/crazyflie-lib-python
cflib/crazyflie/param.py
https://github.com/bitcraze/crazyflie-lib-python/blob/f6ebb4eb315bbe6e02db518936ac17fb615b2af8/cflib/crazyflie/param.py#L250-L255
def request_param_update(self, complete_name): """ Request an update of the value for the supplied parameter. """ self.param_updater.request_param_update( self.toc.get_element_id(complete_name))
[ "def", "request_param_update", "(", "self", ",", "complete_name", ")", ":", "self", ".", "param_updater", ".", "request_param_update", "(", "self", ".", "toc", ".", "get_element_id", "(", "complete_name", ")", ")" ]
Request an update of the value for the supplied parameter.
[ "Request", "an", "update", "of", "the", "value", "for", "the", "supplied", "parameter", "." ]
python
train
jakevdp/supersmoother
supersmoother/utils.py
https://github.com/jakevdp/supersmoother/blob/0c96cf13dcd6f9006d3c0421f9cd6e18abe27a2f/supersmoother/utils.py#L10-L30
def setattr_context(obj, **kwargs): """ Context manager to temporarily change the values of object attributes while executing a function. Example ------- >>> class Foo: pass >>> f = Foo(); f.attr = 'hello' >>> with setattr_context(f, attr='goodbye'): ... print(f.attr) goodbye >>> print(f.attr) hello """ old_kwargs = dict([(key, getattr(obj, key)) for key in kwargs]) [setattr(obj, key, val) for key, val in kwargs.items()] try: yield finally: [setattr(obj, key, val) for key, val in old_kwargs.items()]
[ "def", "setattr_context", "(", "obj", ",", "*", "*", "kwargs", ")", ":", "old_kwargs", "=", "dict", "(", "[", "(", "key", ",", "getattr", "(", "obj", ",", "key", ")", ")", "for", "key", "in", "kwargs", "]", ")", "[", "setattr", "(", "obj", ",", "key", ",", "val", ")", "for", "key", ",", "val", "in", "kwargs", ".", "items", "(", ")", "]", "try", ":", "yield", "finally", ":", "[", "setattr", "(", "obj", ",", "key", ",", "val", ")", "for", "key", ",", "val", "in", "old_kwargs", ".", "items", "(", ")", "]" ]
Context manager to temporarily change the values of object attributes while executing a function. Example ------- >>> class Foo: pass >>> f = Foo(); f.attr = 'hello' >>> with setattr_context(f, attr='goodbye'): ... print(f.attr) goodbye >>> print(f.attr) hello
[ "Context", "manager", "to", "temporarily", "change", "the", "values", "of", "object", "attributes", "while", "executing", "a", "function", "." ]
python
train
tanghaibao/jcvi
jcvi/assembly/opticalmap.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/opticalmap.py#L307-L316
def select_bed(bed): """ Return non-overlapping set of ranges, choosing high scoring blocks over low scoring alignments when there are conflicts. """ ranges = [Range(x.seqid, x.start, x.end, float(x.score), i) for i, x in enumerate(bed)] selected, score = range_chain(ranges) selected = [bed[x.id] for x in selected] return selected
[ "def", "select_bed", "(", "bed", ")", ":", "ranges", "=", "[", "Range", "(", "x", ".", "seqid", ",", "x", ".", "start", ",", "x", ".", "end", ",", "float", "(", "x", ".", "score", ")", ",", "i", ")", "for", "i", ",", "x", "in", "enumerate", "(", "bed", ")", "]", "selected", ",", "score", "=", "range_chain", "(", "ranges", ")", "selected", "=", "[", "bed", "[", "x", ".", "id", "]", "for", "x", "in", "selected", "]", "return", "selected" ]
Return non-overlapping set of ranges, choosing high scoring blocks over low scoring alignments when there are conflicts.
[ "Return", "non", "-", "overlapping", "set", "of", "ranges", "choosing", "high", "scoring", "blocks", "over", "low", "scoring", "alignments", "when", "there", "are", "conflicts", "." ]
python
train
estnltk/estnltk
estnltk/text.py
https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/text.py#L408-L412
def paragraph_ends(self): """The end positions of ``paragraphs`` layer elements.""" if not self.is_tagged(PARAGRAPHS): self.tokenize_paragraphs() return self.ends(PARAGRAPHS)
[ "def", "paragraph_ends", "(", "self", ")", ":", "if", "not", "self", ".", "is_tagged", "(", "PARAGRAPHS", ")", ":", "self", ".", "tokenize_paragraphs", "(", ")", "return", "self", ".", "ends", "(", "PARAGRAPHS", ")" ]
The end positions of ``paragraphs`` layer elements.
[ "The", "end", "positions", "of", "paragraphs", "layer", "elements", "." ]
python
train
rwl/godot
godot/ui/graph_view_model.py
https://github.com/rwl/godot/blob/013687c9e8983d2aa2ceebb8a76c5c4f1e37c90f/godot/ui/graph_view_model.py#L356-L386
def add_edge(self, info): """ Handles adding an Edge to the graph. """ if not info.initialized: return graph = self._request_graph(info.ui.control) if graph is None: return n_nodes = len(graph.nodes) IDs = [v.ID for v in graph.nodes] if n_nodes == 0: tail_node = Node(ID=make_unique_name("node", IDs)) head_name = make_unique_name("node", IDs + [tail_node.ID]) head_node = Node(ID=head_name) elif n_nodes == 1: tail_node = graph.nodes[0] head_node = Node(ID=make_unique_name("node", IDs)) else: tail_node = graph.nodes[0] head_node = graph.nodes[1] edge = Edge(tail_node, head_node, _nodes=graph.nodes) retval = edge.edit_traits(parent=info.ui.control, kind="livemodal") if retval.result: graph.edges.append(edge)
[ "def", "add_edge", "(", "self", ",", "info", ")", ":", "if", "not", "info", ".", "initialized", ":", "return", "graph", "=", "self", ".", "_request_graph", "(", "info", ".", "ui", ".", "control", ")", "if", "graph", "is", "None", ":", "return", "n_nodes", "=", "len", "(", "graph", ".", "nodes", ")", "IDs", "=", "[", "v", ".", "ID", "for", "v", "in", "graph", ".", "nodes", "]", "if", "n_nodes", "==", "0", ":", "tail_node", "=", "Node", "(", "ID", "=", "make_unique_name", "(", "\"node\"", ",", "IDs", ")", ")", "head_name", "=", "make_unique_name", "(", "\"node\"", ",", "IDs", "+", "[", "tail_node", ".", "ID", "]", ")", "head_node", "=", "Node", "(", "ID", "=", "head_name", ")", "elif", "n_nodes", "==", "1", ":", "tail_node", "=", "graph", ".", "nodes", "[", "0", "]", "head_node", "=", "Node", "(", "ID", "=", "make_unique_name", "(", "\"node\"", ",", "IDs", ")", ")", "else", ":", "tail_node", "=", "graph", ".", "nodes", "[", "0", "]", "head_node", "=", "graph", ".", "nodes", "[", "1", "]", "edge", "=", "Edge", "(", "tail_node", ",", "head_node", ",", "_nodes", "=", "graph", ".", "nodes", ")", "retval", "=", "edge", ".", "edit_traits", "(", "parent", "=", "info", ".", "ui", ".", "control", ",", "kind", "=", "\"livemodal\"", ")", "if", "retval", ".", "result", ":", "graph", ".", "edges", ".", "append", "(", "edge", ")" ]
Handles adding an Edge to the graph.
[ "Handles", "adding", "an", "Edge", "to", "the", "graph", "." ]
python
test
materialsproject/pymatgen
pymatgen/io/feff/inputs.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/io/feff/inputs.py#L104-L119
def from_cif_file(cif_file, source='', comment=''): """ Static method to create Header object from cif_file Args: cif_file: cif_file path and name source: User supplied identifier, i.e. for Materials Project this would be the material ID number comment: User comment that goes in header Returns: Header Object """ r = CifParser(cif_file) structure = r.get_structures()[0] return Header(structure, source, comment)
[ "def", "from_cif_file", "(", "cif_file", ",", "source", "=", "''", ",", "comment", "=", "''", ")", ":", "r", "=", "CifParser", "(", "cif_file", ")", "structure", "=", "r", ".", "get_structures", "(", ")", "[", "0", "]", "return", "Header", "(", "structure", ",", "source", ",", "comment", ")" ]
Static method to create Header object from cif_file Args: cif_file: cif_file path and name source: User supplied identifier, i.e. for Materials Project this would be the material ID number comment: User comment that goes in header Returns: Header Object
[ "Static", "method", "to", "create", "Header", "object", "from", "cif_file" ]
python
train
tensorflow/tensor2tensor
tensor2tensor/layers/common_attention.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_attention.py#L728-L759
def make_edge_vectors(adjacency_matrix, num_edge_types, depth, name=None): """Gets edge vectors for the edge types in the adjacency matrix. Args: adjacency_matrix: A [batch, num_nodes, num_nodes] tensor of ints. num_edge_types: Number of different edge types depth: Number of channels name: a string Returns: A [batch, num_nodes, num_nodes, depth] vector of tensors """ with tf.variable_scope(name, default_name="edge_vectors"): att_adj_vectors_shape = [num_edge_types, depth] adjacency_matrix_shape = common_layers.shape_list(adjacency_matrix) adj_vectors = ( tf.get_variable( "adj_vectors", att_adj_vectors_shape, initializer=tf.random_normal_initializer(0, depth**-0.5)) * (depth**0.5)) # Avoiding gathers so that it works on TPUs # adjacency_matrix_one_hot has shape # [batch, num_nodes, num_nodes, num_edge_types] adjacency_matrix_one_hot = tf.one_hot(adjacency_matrix, num_edge_types) att_adj_vectors = tf.matmul( tf.reshape(tf.to_float(adjacency_matrix_one_hot), [-1, num_edge_types]), adj_vectors) return tf.reshape(att_adj_vectors, [adjacency_matrix_shape[0], adjacency_matrix_shape[1], adjacency_matrix_shape[2], depth])
[ "def", "make_edge_vectors", "(", "adjacency_matrix", ",", "num_edge_types", ",", "depth", ",", "name", "=", "None", ")", ":", "with", "tf", ".", "variable_scope", "(", "name", ",", "default_name", "=", "\"edge_vectors\"", ")", ":", "att_adj_vectors_shape", "=", "[", "num_edge_types", ",", "depth", "]", "adjacency_matrix_shape", "=", "common_layers", ".", "shape_list", "(", "adjacency_matrix", ")", "adj_vectors", "=", "(", "tf", ".", "get_variable", "(", "\"adj_vectors\"", ",", "att_adj_vectors_shape", ",", "initializer", "=", "tf", ".", "random_normal_initializer", "(", "0", ",", "depth", "**", "-", "0.5", ")", ")", "*", "(", "depth", "**", "0.5", ")", ")", "# Avoiding gathers so that it works on TPUs", "# adjacency_matrix_one_hot has shape", "# [batch, num_nodes, num_nodes, num_edge_types]", "adjacency_matrix_one_hot", "=", "tf", ".", "one_hot", "(", "adjacency_matrix", ",", "num_edge_types", ")", "att_adj_vectors", "=", "tf", ".", "matmul", "(", "tf", ".", "reshape", "(", "tf", ".", "to_float", "(", "adjacency_matrix_one_hot", ")", ",", "[", "-", "1", ",", "num_edge_types", "]", ")", ",", "adj_vectors", ")", "return", "tf", ".", "reshape", "(", "att_adj_vectors", ",", "[", "adjacency_matrix_shape", "[", "0", "]", ",", "adjacency_matrix_shape", "[", "1", "]", ",", "adjacency_matrix_shape", "[", "2", "]", ",", "depth", "]", ")" ]
Gets edge vectors for the edge types in the adjacency matrix. Args: adjacency_matrix: A [batch, num_nodes, num_nodes] tensor of ints. num_edge_types: Number of different edge types depth: Number of channels name: a string Returns: A [batch, num_nodes, num_nodes, depth] vector of tensors
[ "Gets", "edge", "vectors", "for", "the", "edge", "types", "in", "the", "adjacency", "matrix", "." ]
python
train
heigeo/climata
climata/base.py
https://github.com/heigeo/climata/blob/2028bdbd40e1c8985b0b62f7cb969ce7dfa8f1bd/climata/base.py#L196-L208
def get_params(self): """ Get parameters for web service, noting whether any are "complex" """ params = {} complex = False for name, opt in self.filter_options.items(): if opt.ignored: continue if self.set_param(params, name): complex = True return params, complex
[ "def", "get_params", "(", "self", ")", ":", "params", "=", "{", "}", "complex", "=", "False", "for", "name", ",", "opt", "in", "self", ".", "filter_options", ".", "items", "(", ")", ":", "if", "opt", ".", "ignored", ":", "continue", "if", "self", ".", "set_param", "(", "params", ",", "name", ")", ":", "complex", "=", "True", "return", "params", ",", "complex" ]
Get parameters for web service, noting whether any are "complex"
[ "Get", "parameters", "for", "web", "service", "noting", "whether", "any", "are", "complex" ]
python
train
tdegeus/GooseMPL
GooseMPL/__init__.py
https://github.com/tdegeus/GooseMPL/blob/16e1e06cbcf7131ac98c03ca7251ce83734ef905/GooseMPL/__init__.py#L287-L323
def rel2abs_x(x, axis=None): r''' Transform relative x-coordinates to absolute x-coordinates. Relative coordinates correspond to a fraction of the relevant axis. Be sure to set the limits and scale before calling this function! :arguments: **x** (``float``, ``list``) Relative coordinates. :options: **axis** ([``plt.gca()``] | ...) Specify the axis to which to apply the limits. :returns: **x** (``float``, ``list``) Absolute coordinates. ''' # get current axis if axis is None: axis = plt.gca() # get current limits xmin, xmax = axis.get_xlim() # transform # - log scale if axis.get_xscale() == 'log': try : return [10.**(np.log10(xmin)+i*(np.log10(xmax)-np.log10(xmin))) if i is not None else i for i in x] except: return 10.**(np.log10(xmin)+x*(np.log10(xmax)-np.log10(xmin))) # - normal scale else: try : return [xmin+i*(xmax-xmin) if i is not None else i for i in x] except: return xmin+x*(xmax-xmin)
[ "def", "rel2abs_x", "(", "x", ",", "axis", "=", "None", ")", ":", "# get current axis", "if", "axis", "is", "None", ":", "axis", "=", "plt", ".", "gca", "(", ")", "# get current limits", "xmin", ",", "xmax", "=", "axis", ".", "get_xlim", "(", ")", "# transform", "# - log scale", "if", "axis", ".", "get_xscale", "(", ")", "==", "'log'", ":", "try", ":", "return", "[", "10.", "**", "(", "np", ".", "log10", "(", "xmin", ")", "+", "i", "*", "(", "np", ".", "log10", "(", "xmax", ")", "-", "np", ".", "log10", "(", "xmin", ")", ")", ")", "if", "i", "is", "not", "None", "else", "i", "for", "i", "in", "x", "]", "except", ":", "return", "10.", "**", "(", "np", ".", "log10", "(", "xmin", ")", "+", "x", "*", "(", "np", ".", "log10", "(", "xmax", ")", "-", "np", ".", "log10", "(", "xmin", ")", ")", ")", "# - normal scale", "else", ":", "try", ":", "return", "[", "xmin", "+", "i", "*", "(", "xmax", "-", "xmin", ")", "if", "i", "is", "not", "None", "else", "i", "for", "i", "in", "x", "]", "except", ":", "return", "xmin", "+", "x", "*", "(", "xmax", "-", "xmin", ")" ]
r''' Transform relative x-coordinates to absolute x-coordinates. Relative coordinates correspond to a fraction of the relevant axis. Be sure to set the limits and scale before calling this function! :arguments: **x** (``float``, ``list``) Relative coordinates. :options: **axis** ([``plt.gca()``] | ...) Specify the axis to which to apply the limits. :returns: **x** (``float``, ``list``) Absolute coordinates.
[ "r", "Transform", "relative", "x", "-", "coordinates", "to", "absolute", "x", "-", "coordinates", ".", "Relative", "coordinates", "correspond", "to", "a", "fraction", "of", "the", "relevant", "axis", ".", "Be", "sure", "to", "set", "the", "limits", "and", "scale", "before", "calling", "this", "function!" ]
python
train
sentinel-hub/eo-learn
features/eolearn/features/radiometric_normalization.py
https://github.com/sentinel-hub/eo-learn/blob/b8c390b9f553c561612fe9eb64e720611633a035/features/eolearn/features/radiometric_normalization.py#L135-L143
def _get_indices(self, data): """ Compute indices along temporal dimension corresponding to the sought percentile :param data: Input 3D array holding the reference band :type data: numpy array :return: 2D array holding the temporal index corresponding to percentile """ indices = self._index_by_percentile(data, self.percentile) return indices
[ "def", "_get_indices", "(", "self", ",", "data", ")", ":", "indices", "=", "self", ".", "_index_by_percentile", "(", "data", ",", "self", ".", "percentile", ")", "return", "indices" ]
Compute indices along temporal dimension corresponding to the sought percentile :param data: Input 3D array holding the reference band :type data: numpy array :return: 2D array holding the temporal index corresponding to percentile
[ "Compute", "indices", "along", "temporal", "dimension", "corresponding", "to", "the", "sought", "percentile" ]
python
train
bsolomon1124/pyfinance
pyfinance/utils.py
https://github.com/bsolomon1124/pyfinance/blob/c95925209a809b4e648e79cbeaf7711d8e5ff1a6/pyfinance/utils.py#L618-L670
def random_tickers( length, n_tickers, endswith=None, letters=None, slicer=itertools.islice ): """Generate a length-n_tickers list of unique random ticker symbols. Parameters ---------- length : int The length of each ticker string. n_tickers : int Number of tickers to generate. endswith : str, default None Specify the ending element(s) of each ticker (for example, 'X'). letters : sequence, default None Sequence of possible letters to choose from. If None, defaults to `string.ascii_uppercase`. Returns ------- list of str Examples -------- >>> from pyfinance import utils >>> utils.random_tickers(length=5, n_tickers=4, endswith='X') ['UZTFX', 'ROYAX', 'ZBVIX', 'IUWYX'] >>> utils.random_tickers(3, 8) ['SBW', 'GDF', 'FOG', 'PWO', 'QDH', 'MJJ', 'YZD', 'QST'] """ # The trick here is that we need uniqueness. That defeats the # purpose of using NumPy because we need to generate 1x1. # (Although the alternative is just to generate a "large # enough" duplicated sequence and prune from it.) if letters is None: letters = string.ascii_uppercase if endswith: # Only generate substrings up to `endswith` length = length - len(endswith) join = "".join def yield_ticker(rand=random.choices): if endswith: while True: yield join(rand(letters, k=length)) + endswith else: while True: yield join(rand(letters, k=length)) tickers = itertools.islice(unique_everseen(yield_ticker()), n_tickers) return list(tickers)
[ "def", "random_tickers", "(", "length", ",", "n_tickers", ",", "endswith", "=", "None", ",", "letters", "=", "None", ",", "slicer", "=", "itertools", ".", "islice", ")", ":", "# The trick here is that we need uniqueness. That defeats the\r", "# purpose of using NumPy because we need to generate 1x1.\r", "# (Although the alternative is just to generate a \"large\r", "# enough\" duplicated sequence and prune from it.)\r", "if", "letters", "is", "None", ":", "letters", "=", "string", ".", "ascii_uppercase", "if", "endswith", ":", "# Only generate substrings up to `endswith`\r", "length", "=", "length", "-", "len", "(", "endswith", ")", "join", "=", "\"\"", ".", "join", "def", "yield_ticker", "(", "rand", "=", "random", ".", "choices", ")", ":", "if", "endswith", ":", "while", "True", ":", "yield", "join", "(", "rand", "(", "letters", ",", "k", "=", "length", ")", ")", "+", "endswith", "else", ":", "while", "True", ":", "yield", "join", "(", "rand", "(", "letters", ",", "k", "=", "length", ")", ")", "tickers", "=", "itertools", ".", "islice", "(", "unique_everseen", "(", "yield_ticker", "(", ")", ")", ",", "n_tickers", ")", "return", "list", "(", "tickers", ")" ]
Generate a length-n_tickers list of unique random ticker symbols. Parameters ---------- length : int The length of each ticker string. n_tickers : int Number of tickers to generate. endswith : str, default None Specify the ending element(s) of each ticker (for example, 'X'). letters : sequence, default None Sequence of possible letters to choose from. If None, defaults to `string.ascii_uppercase`. Returns ------- list of str Examples -------- >>> from pyfinance import utils >>> utils.random_tickers(length=5, n_tickers=4, endswith='X') ['UZTFX', 'ROYAX', 'ZBVIX', 'IUWYX'] >>> utils.random_tickers(3, 8) ['SBW', 'GDF', 'FOG', 'PWO', 'QDH', 'MJJ', 'YZD', 'QST']
[ "Generate", "a", "length", "-", "n_tickers", "list", "of", "unique", "random", "ticker", "symbols", ".", "Parameters", "----------", "length", ":", "int", "The", "length", "of", "each", "ticker", "string", ".", "n_tickers", ":", "int", "Number", "of", "tickers", "to", "generate", ".", "endswith", ":", "str", "default", "None", "Specify", "the", "ending", "element", "(", "s", ")", "of", "each", "ticker", "(", "for", "example", "X", ")", ".", "letters", ":", "sequence", "default", "None", "Sequence", "of", "possible", "letters", "to", "choose", "from", ".", "If", "None", "defaults", "to", "string", ".", "ascii_uppercase", ".", "Returns", "-------", "list", "of", "str", "Examples", "--------", ">>>", "from", "pyfinance", "import", "utils", ">>>", "utils", ".", "random_tickers", "(", "length", "=", "5", "n_tickers", "=", "4", "endswith", "=", "X", ")", "[", "UZTFX", "ROYAX", "ZBVIX", "IUWYX", "]", ">>>", "utils", ".", "random_tickers", "(", "3", "8", ")", "[", "SBW", "GDF", "FOG", "PWO", "QDH", "MJJ", "YZD", "QST", "]" ]
python
train
google/grr
grr/server/grr_response_server/databases/mysql_paths.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/databases/mysql_paths.py#L202-L346
def _MultiWritePathInfos(self, path_infos, connection=None): """Writes a collection of path info records for specified clients.""" query = "" path_info_count = 0 path_info_values = [] parent_path_info_count = 0 parent_path_info_values = [] has_stat_entries = False has_hash_entries = False for client_id, client_path_infos in iteritems(path_infos): for path_info in client_path_infos: path = mysql_utils.ComponentsToPath(path_info.components) path_info_values.append(db_utils.ClientIDToInt(client_id)) path_info_values.append(int(path_info.path_type)) path_info_values.append(path_info.GetPathID().AsBytes()) path_info_values.append(path) path_info_values.append(bool(path_info.directory)) path_info_values.append(len(path_info.components)) if path_info.HasField("stat_entry"): path_info_values.append(path_info.stat_entry.SerializeToString()) has_stat_entries = True else: path_info_values.append(None) if path_info.HasField("hash_entry"): path_info_values.append(path_info.hash_entry.SerializeToString()) path_info_values.append(path_info.hash_entry.sha256.AsBytes()) has_hash_entries = True else: path_info_values.append(None) path_info_values.append(None) path_info_count += 1 # TODO(hanuszczak): Implement a trie in order to avoid inserting # duplicated records. for parent_path_info in path_info.GetAncestors(): path = mysql_utils.ComponentsToPath(parent_path_info.components) parent_path_info_values.append(db_utils.ClientIDToInt(client_id)) parent_path_info_values.append(int(parent_path_info.path_type)) parent_path_info_values.append(parent_path_info.GetPathID().AsBytes()) parent_path_info_values.append(path) parent_path_info_values.append(len(parent_path_info.components)) parent_path_info_count += 1 query += """ CREATE TEMPORARY TABLE client_path_infos( client_id BIGINT UNSIGNED NOT NULL, path_type INT UNSIGNED NOT NULL, path_id BINARY(32) NOT NULL, path TEXT NOT NULL, directory BOOLEAN NOT NULL, depth INT NOT NULL, stat_entry MEDIUMBLOB NULL, hash_entry MEDIUMBLOB NULL, sha256 BINARY(32) NULL, timestamp TIMESTAMP(6) NOT NULL DEFAULT now(6) );""" if path_info_count > 0: query += """ INSERT INTO client_path_infos(client_id, path_type, path_id, path, directory, depth, stat_entry, hash_entry, sha256) VALUES {}; """.format(mysql_utils.Placeholders(num=9, values=path_info_count)) query += """ INSERT INTO client_paths(client_id, path_type, path_id, path, directory, depth) SELECT client_id, path_type, path_id, path, directory, depth FROM client_path_infos ON DUPLICATE KEY UPDATE client_paths.directory = client_paths.directory OR VALUES(client_paths.directory), client_paths.timestamp = now(6); """ if parent_path_info_count > 0: placeholders = ["(%s, %s, %s, %s, TRUE, %s)"] * parent_path_info_count query += """ INSERT INTO client_paths(client_id, path_type, path_id, path, directory, depth) VALUES {} ON DUPLICATE KEY UPDATE directory = TRUE, timestamp = now(); """.format(", ".join(placeholders)) if has_stat_entries: query += """ INSERT INTO client_path_stat_entries(client_id, path_type, path_id, stat_entry, timestamp) SELECT client_id, path_type, path_id, stat_entry, timestamp FROM client_path_infos WHERE stat_entry IS NOT NULL; """ query += """ UPDATE client_paths, client_path_infos SET client_paths.last_stat_entry_timestamp = client_path_infos.timestamp WHERE client_paths.client_id = client_path_infos.client_id AND client_paths.path_type = client_path_infos.path_type AND client_paths.path_id = client_path_infos.path_id AND client_path_infos.stat_entry IS NOT NULL; """ if has_hash_entries: query += """ INSERT INTO client_path_hash_entries(client_id, path_type, path_id, hash_entry, sha256, timestamp) SELECT client_id, path_type, path_id, hash_entry, sha256, timestamp FROM client_path_infos WHERE hash_entry IS NOT NULL; """ query += """ UPDATE client_paths, client_path_infos SET client_paths.last_hash_entry_timestamp = client_path_infos.timestamp WHERE client_paths.client_id = client_path_infos.client_id AND client_paths.path_type = client_path_infos.path_type AND client_paths.path_id = client_path_infos.path_id AND client_path_infos.hash_entry IS NOT NULL; """ try: with contextlib.closing(connection.cursor()) as cursor: cursor.execute(query, path_info_values + parent_path_info_values) finally: # Drop the temporary table in a separate cursor. This ensures that # even if the previous cursor.execute fails mid-way leaving the # temporary table created (as table creation can't be rolled back), the # table would still be correctly dropped. # # This is important since connections are reused in the MySQL connection # pool. with contextlib.closing(connection.cursor()) as cursor: cursor.execute("DROP TEMPORARY TABLE IF EXISTS client_path_infos")
[ "def", "_MultiWritePathInfos", "(", "self", ",", "path_infos", ",", "connection", "=", "None", ")", ":", "query", "=", "\"\"", "path_info_count", "=", "0", "path_info_values", "=", "[", "]", "parent_path_info_count", "=", "0", "parent_path_info_values", "=", "[", "]", "has_stat_entries", "=", "False", "has_hash_entries", "=", "False", "for", "client_id", ",", "client_path_infos", "in", "iteritems", "(", "path_infos", ")", ":", "for", "path_info", "in", "client_path_infos", ":", "path", "=", "mysql_utils", ".", "ComponentsToPath", "(", "path_info", ".", "components", ")", "path_info_values", ".", "append", "(", "db_utils", ".", "ClientIDToInt", "(", "client_id", ")", ")", "path_info_values", ".", "append", "(", "int", "(", "path_info", ".", "path_type", ")", ")", "path_info_values", ".", "append", "(", "path_info", ".", "GetPathID", "(", ")", ".", "AsBytes", "(", ")", ")", "path_info_values", ".", "append", "(", "path", ")", "path_info_values", ".", "append", "(", "bool", "(", "path_info", ".", "directory", ")", ")", "path_info_values", ".", "append", "(", "len", "(", "path_info", ".", "components", ")", ")", "if", "path_info", ".", "HasField", "(", "\"stat_entry\"", ")", ":", "path_info_values", ".", "append", "(", "path_info", ".", "stat_entry", ".", "SerializeToString", "(", ")", ")", "has_stat_entries", "=", "True", "else", ":", "path_info_values", ".", "append", "(", "None", ")", "if", "path_info", ".", "HasField", "(", "\"hash_entry\"", ")", ":", "path_info_values", ".", "append", "(", "path_info", ".", "hash_entry", ".", "SerializeToString", "(", ")", ")", "path_info_values", ".", "append", "(", "path_info", ".", "hash_entry", ".", "sha256", ".", "AsBytes", "(", ")", ")", "has_hash_entries", "=", "True", "else", ":", "path_info_values", ".", "append", "(", "None", ")", "path_info_values", ".", "append", "(", "None", ")", "path_info_count", "+=", "1", "# TODO(hanuszczak): Implement a trie in order to avoid inserting", "# duplicated records.", "for", "parent_path_info", "in", "path_info", ".", "GetAncestors", "(", ")", ":", "path", "=", "mysql_utils", ".", "ComponentsToPath", "(", "parent_path_info", ".", "components", ")", "parent_path_info_values", ".", "append", "(", "db_utils", ".", "ClientIDToInt", "(", "client_id", ")", ")", "parent_path_info_values", ".", "append", "(", "int", "(", "parent_path_info", ".", "path_type", ")", ")", "parent_path_info_values", ".", "append", "(", "parent_path_info", ".", "GetPathID", "(", ")", ".", "AsBytes", "(", ")", ")", "parent_path_info_values", ".", "append", "(", "path", ")", "parent_path_info_values", ".", "append", "(", "len", "(", "parent_path_info", ".", "components", ")", ")", "parent_path_info_count", "+=", "1", "query", "+=", "\"\"\"\n CREATE TEMPORARY TABLE client_path_infos(\n client_id BIGINT UNSIGNED NOT NULL,\n path_type INT UNSIGNED NOT NULL,\n path_id BINARY(32) NOT NULL,\n path TEXT NOT NULL,\n directory BOOLEAN NOT NULL,\n depth INT NOT NULL,\n stat_entry MEDIUMBLOB NULL,\n hash_entry MEDIUMBLOB NULL,\n sha256 BINARY(32) NULL,\n timestamp TIMESTAMP(6) NOT NULL DEFAULT now(6)\n );\"\"\"", "if", "path_info_count", ">", "0", ":", "query", "+=", "\"\"\"\n INSERT INTO client_path_infos(client_id, path_type, path_id,\n path, directory, depth,\n stat_entry, hash_entry, sha256)\n VALUES {};\n \"\"\"", ".", "format", "(", "mysql_utils", ".", "Placeholders", "(", "num", "=", "9", ",", "values", "=", "path_info_count", ")", ")", "query", "+=", "\"\"\"\n INSERT INTO client_paths(client_id, path_type, path_id, path,\n directory, depth)\n SELECT client_id, path_type, path_id, path, directory, depth\n FROM client_path_infos\n ON DUPLICATE KEY UPDATE\n client_paths.directory = client_paths.directory OR VALUES(client_paths.directory),\n client_paths.timestamp = now(6);\n \"\"\"", "if", "parent_path_info_count", ">", "0", ":", "placeholders", "=", "[", "\"(%s, %s, %s, %s, TRUE, %s)\"", "]", "*", "parent_path_info_count", "query", "+=", "\"\"\"\n INSERT INTO client_paths(client_id, path_type, path_id, path,\n directory, depth)\n VALUES {}\n ON DUPLICATE KEY UPDATE\n directory = TRUE,\n timestamp = now();\n \"\"\"", ".", "format", "(", "\", \"", ".", "join", "(", "placeholders", ")", ")", "if", "has_stat_entries", ":", "query", "+=", "\"\"\"\n INSERT INTO client_path_stat_entries(client_id, path_type, path_id,\n stat_entry, timestamp)\n SELECT client_id, path_type, path_id, stat_entry, timestamp\n FROM client_path_infos\n WHERE stat_entry IS NOT NULL;\n \"\"\"", "query", "+=", "\"\"\"\n UPDATE client_paths, client_path_infos\n SET client_paths.last_stat_entry_timestamp = client_path_infos.timestamp\n WHERE client_paths.client_id = client_path_infos.client_id\n AND client_paths.path_type = client_path_infos.path_type\n AND client_paths.path_id = client_path_infos.path_id\n AND client_path_infos.stat_entry IS NOT NULL;\n \"\"\"", "if", "has_hash_entries", ":", "query", "+=", "\"\"\"\n INSERT INTO client_path_hash_entries(client_id, path_type, path_id,\n hash_entry, sha256, timestamp)\n SELECT client_id, path_type, path_id, hash_entry, sha256, timestamp\n FROM client_path_infos\n WHERE hash_entry IS NOT NULL;\n \"\"\"", "query", "+=", "\"\"\"\n UPDATE client_paths, client_path_infos\n SET client_paths.last_hash_entry_timestamp = client_path_infos.timestamp\n WHERE client_paths.client_id = client_path_infos.client_id\n AND client_paths.path_type = client_path_infos.path_type\n AND client_paths.path_id = client_path_infos.path_id\n AND client_path_infos.hash_entry IS NOT NULL;\n \"\"\"", "try", ":", "with", "contextlib", ".", "closing", "(", "connection", ".", "cursor", "(", ")", ")", "as", "cursor", ":", "cursor", ".", "execute", "(", "query", ",", "path_info_values", "+", "parent_path_info_values", ")", "finally", ":", "# Drop the temporary table in a separate cursor. This ensures that", "# even if the previous cursor.execute fails mid-way leaving the", "# temporary table created (as table creation can't be rolled back), the", "# table would still be correctly dropped.", "#", "# This is important since connections are reused in the MySQL connection", "# pool.", "with", "contextlib", ".", "closing", "(", "connection", ".", "cursor", "(", ")", ")", "as", "cursor", ":", "cursor", ".", "execute", "(", "\"DROP TEMPORARY TABLE IF EXISTS client_path_infos\"", ")" ]
Writes a collection of path info records for specified clients.
[ "Writes", "a", "collection", "of", "path", "info", "records", "for", "specified", "clients", "." ]
python
train
salu133445/pypianoroll
pypianoroll/track.py
https://github.com/salu133445/pypianoroll/blob/6224dc1e29222de2124d249acb80f3d072166917/pypianoroll/track.py#L76-L99
def assign_constant(self, value, dtype=None): """ Assign a constant value to all nonzeros in the pianoroll. If the pianoroll is not binarized, its data type will be preserved. If the pianoroll is binarized, it will be casted to the type of `value`. Arguments --------- value : int or float The constant value to be assigned to all the nonzeros in the pianoroll. """ if not self.is_binarized(): self.pianoroll[self.pianoroll.nonzero()] = value return if dtype is None: if isinstance(value, int): dtype = int elif isinstance(value, float): dtype = float nonzero = self.pianoroll.nonzero() self.pianoroll = np.zeros(self.pianoroll.shape, dtype) self.pianoroll[nonzero] = value
[ "def", "assign_constant", "(", "self", ",", "value", ",", "dtype", "=", "None", ")", ":", "if", "not", "self", ".", "is_binarized", "(", ")", ":", "self", ".", "pianoroll", "[", "self", ".", "pianoroll", ".", "nonzero", "(", ")", "]", "=", "value", "return", "if", "dtype", "is", "None", ":", "if", "isinstance", "(", "value", ",", "int", ")", ":", "dtype", "=", "int", "elif", "isinstance", "(", "value", ",", "float", ")", ":", "dtype", "=", "float", "nonzero", "=", "self", ".", "pianoroll", ".", "nonzero", "(", ")", "self", ".", "pianoroll", "=", "np", ".", "zeros", "(", "self", ".", "pianoroll", ".", "shape", ",", "dtype", ")", "self", ".", "pianoroll", "[", "nonzero", "]", "=", "value" ]
Assign a constant value to all nonzeros in the pianoroll. If the pianoroll is not binarized, its data type will be preserved. If the pianoroll is binarized, it will be casted to the type of `value`. Arguments --------- value : int or float The constant value to be assigned to all the nonzeros in the pianoroll.
[ "Assign", "a", "constant", "value", "to", "all", "nonzeros", "in", "the", "pianoroll", ".", "If", "the", "pianoroll", "is", "not", "binarized", "its", "data", "type", "will", "be", "preserved", ".", "If", "the", "pianoroll", "is", "binarized", "it", "will", "be", "casted", "to", "the", "type", "of", "value", "." ]
python
train
buruzaemon/natto-py
natto/environment.py
https://github.com/buruzaemon/natto-py/blob/018fe004c47c45c66bdf2e03fe24e981ae089b76/natto/environment.py#L98-L162
def __get_libpath(self): '''Return the absolute path to the MeCab library. On Windows, the path to the system dictionary is used to deduce the path to libmecab.dll. Otherwise, mecab-config is used find the libmecab shared object or dynamic library (*NIX or Mac OS, respectively). Will defer to the user-specified MECAB_PATH environment variable, if set. Returns: The absolute path to the MeCab library. Raises: EnvironmentError: A problem was encountered in trying to locate the MeCab library. ''' libp = os.getenv(self.MECAB_PATH) if libp: return os.path.abspath(libp) else: plat = sys.platform if plat == 'win32': lib = self._LIBMECAB.format(self._WINLIB_EXT) try: v = self.__regkey_value(self._WINHKEY, self._WINVALUE) ldir = v.split('etc')[0] libp = os.path.join(ldir, 'bin', lib) except EnvironmentError as err: logger.error('{}\n'.format(err)) logger.error('{}\n'.format(sys.exc_info()[0])) raise EnvironmentError( self._ERROR_WINREG.format(self._WINVALUE, self._WINHKEY)) else: # UNIX-y OS? if plat == 'darwin': lib = self._LIBMECAB.format(self._MACLIB_EXT) else: lib = self._LIBMECAB.format(self._UNIXLIB_EXT) try: cmd = ['mecab-config', '--libs-only-L'] res = Popen(cmd, stdout=PIPE).communicate() lines = res[0].decode() if not lines.startswith('unrecognized'): linfo = lines.strip() libp = os.path.join(linfo, lib) else: raise EnvironmentError( self._ERROR_MECABCONFIG.format(lib)) except EnvironmentError as err: logger.error('{}\n'.format(err)) logger.error('{}\n'.format(sys.exc_info()[0])) raise EnvironmentError(self._ERROR_NOLIB.format(lib)) if libp and os.path.exists(libp): libp = os.path.abspath(libp) os.environ[self.MECAB_PATH] = libp return libp else: raise EnvironmentError(self._ERROR_NOLIB.format(libp))
[ "def", "__get_libpath", "(", "self", ")", ":", "libp", "=", "os", ".", "getenv", "(", "self", ".", "MECAB_PATH", ")", "if", "libp", ":", "return", "os", ".", "path", ".", "abspath", "(", "libp", ")", "else", ":", "plat", "=", "sys", ".", "platform", "if", "plat", "==", "'win32'", ":", "lib", "=", "self", ".", "_LIBMECAB", ".", "format", "(", "self", ".", "_WINLIB_EXT", ")", "try", ":", "v", "=", "self", ".", "__regkey_value", "(", "self", ".", "_WINHKEY", ",", "self", ".", "_WINVALUE", ")", "ldir", "=", "v", ".", "split", "(", "'etc'", ")", "[", "0", "]", "libp", "=", "os", ".", "path", ".", "join", "(", "ldir", ",", "'bin'", ",", "lib", ")", "except", "EnvironmentError", "as", "err", ":", "logger", ".", "error", "(", "'{}\\n'", ".", "format", "(", "err", ")", ")", "logger", ".", "error", "(", "'{}\\n'", ".", "format", "(", "sys", ".", "exc_info", "(", ")", "[", "0", "]", ")", ")", "raise", "EnvironmentError", "(", "self", ".", "_ERROR_WINREG", ".", "format", "(", "self", ".", "_WINVALUE", ",", "self", ".", "_WINHKEY", ")", ")", "else", ":", "# UNIX-y OS?\r", "if", "plat", "==", "'darwin'", ":", "lib", "=", "self", ".", "_LIBMECAB", ".", "format", "(", "self", ".", "_MACLIB_EXT", ")", "else", ":", "lib", "=", "self", ".", "_LIBMECAB", ".", "format", "(", "self", ".", "_UNIXLIB_EXT", ")", "try", ":", "cmd", "=", "[", "'mecab-config'", ",", "'--libs-only-L'", "]", "res", "=", "Popen", "(", "cmd", ",", "stdout", "=", "PIPE", ")", ".", "communicate", "(", ")", "lines", "=", "res", "[", "0", "]", ".", "decode", "(", ")", "if", "not", "lines", ".", "startswith", "(", "'unrecognized'", ")", ":", "linfo", "=", "lines", ".", "strip", "(", ")", "libp", "=", "os", ".", "path", ".", "join", "(", "linfo", ",", "lib", ")", "else", ":", "raise", "EnvironmentError", "(", "self", ".", "_ERROR_MECABCONFIG", ".", "format", "(", "lib", ")", ")", "except", "EnvironmentError", "as", "err", ":", "logger", ".", "error", "(", "'{}\\n'", ".", "format", "(", "err", ")", ")", "logger", ".", "error", "(", "'{}\\n'", ".", "format", "(", "sys", ".", "exc_info", "(", ")", "[", "0", "]", ")", ")", "raise", "EnvironmentError", "(", "self", ".", "_ERROR_NOLIB", ".", "format", "(", "lib", ")", ")", "if", "libp", "and", "os", ".", "path", ".", "exists", "(", "libp", ")", ":", "libp", "=", "os", ".", "path", ".", "abspath", "(", "libp", ")", "os", ".", "environ", "[", "self", ".", "MECAB_PATH", "]", "=", "libp", "return", "libp", "else", ":", "raise", "EnvironmentError", "(", "self", ".", "_ERROR_NOLIB", ".", "format", "(", "libp", ")", ")" ]
Return the absolute path to the MeCab library. On Windows, the path to the system dictionary is used to deduce the path to libmecab.dll. Otherwise, mecab-config is used find the libmecab shared object or dynamic library (*NIX or Mac OS, respectively). Will defer to the user-specified MECAB_PATH environment variable, if set. Returns: The absolute path to the MeCab library. Raises: EnvironmentError: A problem was encountered in trying to locate the MeCab library.
[ "Return", "the", "absolute", "path", "to", "the", "MeCab", "library", ".", "On", "Windows", "the", "path", "to", "the", "system", "dictionary", "is", "used", "to", "deduce", "the", "path", "to", "libmecab", ".", "dll", ".", "Otherwise", "mecab", "-", "config", "is", "used", "find", "the", "libmecab", "shared", "object", "or", "dynamic", "library", "(", "*", "NIX", "or", "Mac", "OS", "respectively", ")", ".", "Will", "defer", "to", "the", "user", "-", "specified", "MECAB_PATH", "environment", "variable", "if", "set", ".", "Returns", ":", "The", "absolute", "path", "to", "the", "MeCab", "library", ".", "Raises", ":", "EnvironmentError", ":", "A", "problem", "was", "encountered", "in", "trying", "to", "locate", "the", "MeCab", "library", "." ]
python
train
rocky/python-uncompyle6
uncompyle6/semantics/pysource.py
https://github.com/rocky/python-uncompyle6/blob/c5d7944e657f0ad05a0e2edd34e1acb27001abc0/uncompyle6/semantics/pysource.py#L1174-L1283
def listcomprehension_walk2(self, node): """List comprehensions the way they are done in Python 2 and sometimes in Python 3. They're more other comprehensions, e.g. set comprehensions See if we can combine code. """ p = self.prec self.prec = 27 code = Code(node[1].attr, self.scanner, self.currentclass) ast = self.build_ast(code._tokens, code._customize) self.customize(code._customize) # skip over: sstmt, stmt, return, ret_expr # and other singleton derivations while (len(ast) == 1 or (ast in ('sstmt', 'return') and ast[-1] in ('RETURN_LAST', 'RETURN_VALUE'))): self.prec = 100 ast = ast[0] n = ast[1] # collection = node[-3] collections = [node[-3]] list_ifs = [] if self.version == 3.0 and n != 'list_iter': # FIXME 3.0 is a snowflake here. We need # special code for this. Not sure if this is totally # correct. stores = [ast[3]] assert ast[4] == 'comp_iter' n = ast[4] # Find the list comprehension body. It is the inner-most # node that is not comp_.. . while n == 'comp_iter': if n[0] == 'comp_for': n = n[0] stores.append(n[2]) n = n[3] elif n[0] in ('comp_if', 'comp_if_not'): n = n[0] # FIXME: just a guess if n[0].kind == 'expr': list_ifs.append(n) else: list_ifs.append([1]) n = n[2] pass else: break pass # Skip over n[0] which is something like: _[1] self.preorder(n[1]) else: assert n == 'list_iter' stores = [] # Find the list comprehension body. It is the inner-most # node that is not list_.. . while n == 'list_iter': n = n[0] # recurse one step if n == 'list_for': stores.append(n[2]) n = n[3] if self.version >= 3.6 and n[0] == 'list_for': # Dog-paddle down largely singleton reductions # to find the collection (expr) c = n[0][0] if c == 'expr': c = c[0] # FIXME: grammar is wonky here? Is this really an attribute? if c == 'attribute': c = c[0] collections.append(c) pass elif n in ('list_if', 'list_if_not'): # FIXME: just a guess if n[0].kind == 'expr': list_ifs.append(n) else: list_ifs.append([1]) n = n[2] pass pass assert n == 'lc_body', ast self.preorder(n[0]) # FIXME: add indentation around "for"'s and "in"'s if self.version < 3.6: self.write(' for ') self.preorder(stores[0]) self.write(' in ') self.preorder(collections[0]) if list_ifs: self.preorder(list_ifs[0]) pass else: for i, store in enumerate(stores): self.write(' for ') self.preorder(store) self.write(' in ') self.preorder(collections[i]) if i < len(list_ifs): self.preorder(list_ifs[i]) pass pass self.prec = p
[ "def", "listcomprehension_walk2", "(", "self", ",", "node", ")", ":", "p", "=", "self", ".", "prec", "self", ".", "prec", "=", "27", "code", "=", "Code", "(", "node", "[", "1", "]", ".", "attr", ",", "self", ".", "scanner", ",", "self", ".", "currentclass", ")", "ast", "=", "self", ".", "build_ast", "(", "code", ".", "_tokens", ",", "code", ".", "_customize", ")", "self", ".", "customize", "(", "code", ".", "_customize", ")", "# skip over: sstmt, stmt, return, ret_expr", "# and other singleton derivations", "while", "(", "len", "(", "ast", ")", "==", "1", "or", "(", "ast", "in", "(", "'sstmt'", ",", "'return'", ")", "and", "ast", "[", "-", "1", "]", "in", "(", "'RETURN_LAST'", ",", "'RETURN_VALUE'", ")", ")", ")", ":", "self", ".", "prec", "=", "100", "ast", "=", "ast", "[", "0", "]", "n", "=", "ast", "[", "1", "]", "# collection = node[-3]", "collections", "=", "[", "node", "[", "-", "3", "]", "]", "list_ifs", "=", "[", "]", "if", "self", ".", "version", "==", "3.0", "and", "n", "!=", "'list_iter'", ":", "# FIXME 3.0 is a snowflake here. We need", "# special code for this. Not sure if this is totally", "# correct.", "stores", "=", "[", "ast", "[", "3", "]", "]", "assert", "ast", "[", "4", "]", "==", "'comp_iter'", "n", "=", "ast", "[", "4", "]", "# Find the list comprehension body. It is the inner-most", "# node that is not comp_.. .", "while", "n", "==", "'comp_iter'", ":", "if", "n", "[", "0", "]", "==", "'comp_for'", ":", "n", "=", "n", "[", "0", "]", "stores", ".", "append", "(", "n", "[", "2", "]", ")", "n", "=", "n", "[", "3", "]", "elif", "n", "[", "0", "]", "in", "(", "'comp_if'", ",", "'comp_if_not'", ")", ":", "n", "=", "n", "[", "0", "]", "# FIXME: just a guess", "if", "n", "[", "0", "]", ".", "kind", "==", "'expr'", ":", "list_ifs", ".", "append", "(", "n", ")", "else", ":", "list_ifs", ".", "append", "(", "[", "1", "]", ")", "n", "=", "n", "[", "2", "]", "pass", "else", ":", "break", "pass", "# Skip over n[0] which is something like: _[1]", "self", ".", "preorder", "(", "n", "[", "1", "]", ")", "else", ":", "assert", "n", "==", "'list_iter'", "stores", "=", "[", "]", "# Find the list comprehension body. It is the inner-most", "# node that is not list_.. .", "while", "n", "==", "'list_iter'", ":", "n", "=", "n", "[", "0", "]", "# recurse one step", "if", "n", "==", "'list_for'", ":", "stores", ".", "append", "(", "n", "[", "2", "]", ")", "n", "=", "n", "[", "3", "]", "if", "self", ".", "version", ">=", "3.6", "and", "n", "[", "0", "]", "==", "'list_for'", ":", "# Dog-paddle down largely singleton reductions", "# to find the collection (expr)", "c", "=", "n", "[", "0", "]", "[", "0", "]", "if", "c", "==", "'expr'", ":", "c", "=", "c", "[", "0", "]", "# FIXME: grammar is wonky here? Is this really an attribute?", "if", "c", "==", "'attribute'", ":", "c", "=", "c", "[", "0", "]", "collections", ".", "append", "(", "c", ")", "pass", "elif", "n", "in", "(", "'list_if'", ",", "'list_if_not'", ")", ":", "# FIXME: just a guess", "if", "n", "[", "0", "]", ".", "kind", "==", "'expr'", ":", "list_ifs", ".", "append", "(", "n", ")", "else", ":", "list_ifs", ".", "append", "(", "[", "1", "]", ")", "n", "=", "n", "[", "2", "]", "pass", "pass", "assert", "n", "==", "'lc_body'", ",", "ast", "self", ".", "preorder", "(", "n", "[", "0", "]", ")", "# FIXME: add indentation around \"for\"'s and \"in\"'s", "if", "self", ".", "version", "<", "3.6", ":", "self", ".", "write", "(", "' for '", ")", "self", ".", "preorder", "(", "stores", "[", "0", "]", ")", "self", ".", "write", "(", "' in '", ")", "self", ".", "preorder", "(", "collections", "[", "0", "]", ")", "if", "list_ifs", ":", "self", ".", "preorder", "(", "list_ifs", "[", "0", "]", ")", "pass", "else", ":", "for", "i", ",", "store", "in", "enumerate", "(", "stores", ")", ":", "self", ".", "write", "(", "' for '", ")", "self", ".", "preorder", "(", "store", ")", "self", ".", "write", "(", "' in '", ")", "self", ".", "preorder", "(", "collections", "[", "i", "]", ")", "if", "i", "<", "len", "(", "list_ifs", ")", ":", "self", ".", "preorder", "(", "list_ifs", "[", "i", "]", ")", "pass", "pass", "self", ".", "prec", "=", "p" ]
List comprehensions the way they are done in Python 2 and sometimes in Python 3. They're more other comprehensions, e.g. set comprehensions See if we can combine code.
[ "List", "comprehensions", "the", "way", "they", "are", "done", "in", "Python", "2", "and", "sometimes", "in", "Python", "3", ".", "They", "re", "more", "other", "comprehensions", "e", ".", "g", ".", "set", "comprehensions", "See", "if", "we", "can", "combine", "code", "." ]
python
train
senaite/senaite.core
bika/lims/content/analysisrequest.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/content/analysisrequest.py#L1503-L1520
def getAnalysesNum(self): """ Returns an array with the number of analyses for the current AR in different statuses, like follows: [verified, total, not_submitted, to_be_verified] """ an_nums = [0, 0, 0, 0] for analysis in self.getAnalyses(): review_state = analysis.review_state if review_state in ['retracted', 'rejected', 'cancelled']: continue if review_state == 'to_be_verified': an_nums[3] += 1 elif review_state in ['published', 'verified']: an_nums[0] += 1 else: an_nums[2] += 1 an_nums[1] += 1 return an_nums
[ "def", "getAnalysesNum", "(", "self", ")", ":", "an_nums", "=", "[", "0", ",", "0", ",", "0", ",", "0", "]", "for", "analysis", "in", "self", ".", "getAnalyses", "(", ")", ":", "review_state", "=", "analysis", ".", "review_state", "if", "review_state", "in", "[", "'retracted'", ",", "'rejected'", ",", "'cancelled'", "]", ":", "continue", "if", "review_state", "==", "'to_be_verified'", ":", "an_nums", "[", "3", "]", "+=", "1", "elif", "review_state", "in", "[", "'published'", ",", "'verified'", "]", ":", "an_nums", "[", "0", "]", "+=", "1", "else", ":", "an_nums", "[", "2", "]", "+=", "1", "an_nums", "[", "1", "]", "+=", "1", "return", "an_nums" ]
Returns an array with the number of analyses for the current AR in different statuses, like follows: [verified, total, not_submitted, to_be_verified]
[ "Returns", "an", "array", "with", "the", "number", "of", "analyses", "for", "the", "current", "AR", "in", "different", "statuses", "like", "follows", ":", "[", "verified", "total", "not_submitted", "to_be_verified", "]" ]
python
train