repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
numenta/htmresearch
projects/energy_based_pooling/energy_based_models/energy_based_pooler.py
https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/projects/energy_based_pooling/energy_based_models/energy_based_pooler.py#L215-L221
def learn(self, x): """Encodes an input array, and performs weight updates and updates to the activity statistics according to the respective methods implemented below.""" y = self.encode(x) self.update_statistics([y]) self.update_weights([x],[y]) return y
[ "def", "learn", "(", "self", ",", "x", ")", ":", "y", "=", "self", ".", "encode", "(", "x", ")", "self", ".", "update_statistics", "(", "[", "y", "]", ")", "self", ".", "update_weights", "(", "[", "x", "]", ",", "[", "y", "]", ")", "return", "y" ]
Encodes an input array, and performs weight updates and updates to the activity statistics according to the respective methods implemented below.
[ "Encodes", "an", "input", "array", "and", "performs", "weight", "updates", "and", "updates", "to", "the", "activity", "statistics", "according", "to", "the", "respective", "methods", "implemented", "below", "." ]
python
train
42.857143
boundary/pulse-api-cli
boundary/property_handler.py
https://github.com/boundary/pulse-api-cli/blob/b01ca65b442eed19faac309c9d62bbc3cb2c098f/boundary/property_handler.py#L22-L31
def _process_properties(self, properties): """ Transforms the command line properties into python dictionary :return: """ if properties is not None: self._properties = {} for p in properties: d = p.split('=') self._properties[d[0]] = d[1]
[ "def", "_process_properties", "(", "self", ",", "properties", ")", ":", "if", "properties", "is", "not", "None", ":", "self", ".", "_properties", "=", "{", "}", "for", "p", "in", "properties", ":", "d", "=", "p", ".", "split", "(", "'='", ")", "self", ".", "_properties", "[", "d", "[", "0", "]", "]", "=", "d", "[", "1", "]" ]
Transforms the command line properties into python dictionary :return:
[ "Transforms", "the", "command", "line", "properties", "into", "python", "dictionary", ":", "return", ":" ]
python
test
32.5
broadinstitute/fiss
firecloud/fiss.py
https://github.com/broadinstitute/fiss/blob/dddf91547479506dbbafb69ec84d44dcc4a94ab4/firecloud/fiss.py#L737-L765
def config_new(args): '''Attempt to install a new method config into a workspace, by: generating a template from a versioned method in the methods repo, then launching a local editor (respecting the $EDITOR environment variable) to fill in the incomplete input/output fields. Returns True if the config was successfully installed, otherwise False''' cfg = config_template(args) # Iteratively try to edit/install the config: exit iteration by EITHER # Successful config_put() after editing # Leaving config unchanged in editor, e.g. quitting out of VI with :q # FIXME: put an small integer upper bound on the # of loops here while True: try: edited = fccore.edit_text(cfg) if edited == cfg: eprint("No edits made, method config not installed ...") break if __EDITME__ in edited: eprint("Edit is incomplete, method config not installed ...") time.sleep(1) continue args.config = cfg = edited config_put(args) return True except FireCloudServerError as fce: __pretty_print_fc_exception(fce) return False
[ "def", "config_new", "(", "args", ")", ":", "cfg", "=", "config_template", "(", "args", ")", "# Iteratively try to edit/install the config: exit iteration by EITHER", "# Successful config_put() after editing", "# Leaving config unchanged in editor, e.g. quitting out of VI with :q", "# FIXME: put an small integer upper bound on the # of loops here", "while", "True", ":", "try", ":", "edited", "=", "fccore", ".", "edit_text", "(", "cfg", ")", "if", "edited", "==", "cfg", ":", "eprint", "(", "\"No edits made, method config not installed ...\"", ")", "break", "if", "__EDITME__", "in", "edited", ":", "eprint", "(", "\"Edit is incomplete, method config not installed ...\"", ")", "time", ".", "sleep", "(", "1", ")", "continue", "args", ".", "config", "=", "cfg", "=", "edited", "config_put", "(", "args", ")", "return", "True", "except", "FireCloudServerError", "as", "fce", ":", "__pretty_print_fc_exception", "(", "fce", ")", "return", "False" ]
Attempt to install a new method config into a workspace, by: generating a template from a versioned method in the methods repo, then launching a local editor (respecting the $EDITOR environment variable) to fill in the incomplete input/output fields. Returns True if the config was successfully installed, otherwise False
[ "Attempt", "to", "install", "a", "new", "method", "config", "into", "a", "workspace", "by", ":", "generating", "a", "template", "from", "a", "versioned", "method", "in", "the", "methods", "repo", "then", "launching", "a", "local", "editor", "(", "respecting", "the", "$EDITOR", "environment", "variable", ")", "to", "fill", "in", "the", "incomplete", "input", "/", "output", "fields", ".", "Returns", "True", "if", "the", "config", "was", "successfully", "installed", "otherwise", "False" ]
python
train
42.103448
CEA-COSMIC/ModOpt
modopt/opt/algorithms.py
https://github.com/CEA-COSMIC/ModOpt/blob/019b189cb897cbb4d210c44a100daaa08468830c/modopt/opt/algorithms.py#L774-L788
def _update_param(self): r"""Update parameters This method updates the values of the algorthm parameters with the methods provided """ # Update the gamma parameter. if not isinstance(self._gamma_update, type(None)): self._gamma = self._gamma_update(self._gamma) # Update lambda parameter. if not isinstance(self._lambda_update, type(None)): self._lambda_param = self._lambda_update(self._lambda_param)
[ "def", "_update_param", "(", "self", ")", ":", "# Update the gamma parameter.", "if", "not", "isinstance", "(", "self", ".", "_gamma_update", ",", "type", "(", "None", ")", ")", ":", "self", ".", "_gamma", "=", "self", ".", "_gamma_update", "(", "self", ".", "_gamma", ")", "# Update lambda parameter.", "if", "not", "isinstance", "(", "self", ".", "_lambda_update", ",", "type", "(", "None", ")", ")", ":", "self", ".", "_lambda_param", "=", "self", ".", "_lambda_update", "(", "self", ".", "_lambda_param", ")" ]
r"""Update parameters This method updates the values of the algorthm parameters with the methods provided
[ "r", "Update", "parameters" ]
python
train
31.933333
edx/edx-enterprise
consent/helpers.py
https://github.com/edx/edx-enterprise/blob/aea91379ab0a87cd3bc798961fce28b60ee49a80/consent/helpers.py#L34-L49
def get_course_data_sharing_consent(username, course_id, enterprise_customer_uuid): """ Get the data sharing consent object associated with a certain user of a customer for a course. :param username: The user that grants consent. :param course_id: The course for which consent is granted. :param enterprise_customer_uuid: The consent requester. :return: The data sharing consent object """ # Prevent circular imports. DataSharingConsent = apps.get_model('consent', 'DataSharingConsent') # pylint: disable=invalid-name return DataSharingConsent.objects.proxied_get( username=username, course_id=course_id, enterprise_customer__uuid=enterprise_customer_uuid )
[ "def", "get_course_data_sharing_consent", "(", "username", ",", "course_id", ",", "enterprise_customer_uuid", ")", ":", "# Prevent circular imports.", "DataSharingConsent", "=", "apps", ".", "get_model", "(", "'consent'", ",", "'DataSharingConsent'", ")", "# pylint: disable=invalid-name", "return", "DataSharingConsent", ".", "objects", ".", "proxied_get", "(", "username", "=", "username", ",", "course_id", "=", "course_id", ",", "enterprise_customer__uuid", "=", "enterprise_customer_uuid", ")" ]
Get the data sharing consent object associated with a certain user of a customer for a course. :param username: The user that grants consent. :param course_id: The course for which consent is granted. :param enterprise_customer_uuid: The consent requester. :return: The data sharing consent object
[ "Get", "the", "data", "sharing", "consent", "object", "associated", "with", "a", "certain", "user", "of", "a", "customer", "for", "a", "course", "." ]
python
valid
44.5
alfred82santa/dirty-models
dirty_models/base.py
https://github.com/alfred82santa/dirty-models/blob/354becdb751b21f673515eae928c256c7e923c50/dirty_models/base.py#L29-L35
def set_read_only(self, value): """ Sets whether model could be modified or not """ if self.__read_only__ != value: self.__read_only__ = value self._update_read_only()
[ "def", "set_read_only", "(", "self", ",", "value", ")", ":", "if", "self", ".", "__read_only__", "!=", "value", ":", "self", ".", "__read_only__", "=", "value", "self", ".", "_update_read_only", "(", ")" ]
Sets whether model could be modified or not
[ "Sets", "whether", "model", "could", "be", "modified", "or", "not" ]
python
train
31
hbldh/flask-pybankid
flask_pybankid.py
https://github.com/hbldh/flask-pybankid/blob/b9af666f587b027391b25d811788d934a12b57e6/flask_pybankid.py#L192-L203
def create_from_pybankid_exception(cls, exception): """Class method for initiating from a `PyBankID` exception. :param bankid.exceptions.BankIDError exception: :return: The wrapped exception. :rtype: :py:class:`~FlaskPyBankIDError` """ return cls( "{0}: {1}".format(exception.__class__.__name__, str(exception)), _exception_class_to_status_code.get(exception.__class__), )
[ "def", "create_from_pybankid_exception", "(", "cls", ",", "exception", ")", ":", "return", "cls", "(", "\"{0}: {1}\"", ".", "format", "(", "exception", ".", "__class__", ".", "__name__", ",", "str", "(", "exception", ")", ")", ",", "_exception_class_to_status_code", ".", "get", "(", "exception", ".", "__class__", ")", ",", ")" ]
Class method for initiating from a `PyBankID` exception. :param bankid.exceptions.BankIDError exception: :return: The wrapped exception. :rtype: :py:class:`~FlaskPyBankIDError`
[ "Class", "method", "for", "initiating", "from", "a", "PyBankID", "exception", "." ]
python
train
36.916667
honzamach/pynspect
pynspect/traversers.py
https://github.com/honzamach/pynspect/blob/0582dcc1f7aafe50e25a21c792ea1b3367ea5881/pynspect/traversers.py#L642-L649
def evaluate_binop_logical(self, operation, left, right, **kwargs): """ Evaluate given logical binary operation with given operands. """ if not operation in self.binops_logical: raise ValueError("Invalid logical binary operation '{}'".format(operation)) result = self.binops_logical[operation](left, right) return bool(result)
[ "def", "evaluate_binop_logical", "(", "self", ",", "operation", ",", "left", ",", "right", ",", "*", "*", "kwargs", ")", ":", "if", "not", "operation", "in", "self", ".", "binops_logical", ":", "raise", "ValueError", "(", "\"Invalid logical binary operation '{}'\"", ".", "format", "(", "operation", ")", ")", "result", "=", "self", ".", "binops_logical", "[", "operation", "]", "(", "left", ",", "right", ")", "return", "bool", "(", "result", ")" ]
Evaluate given logical binary operation with given operands.
[ "Evaluate", "given", "logical", "binary", "operation", "with", "given", "operands", "." ]
python
train
47.375
Crypto-toolbox/btfxwss
btfxwss/connection.py
https://github.com/Crypto-toolbox/btfxwss/blob/16827fa6aacb2c0e289aa852bf61a18df6905835/btfxwss/connection.py#L505-L535
def _resubscribe(self, soft=False): """Resubscribes to all channels found in self.channel_configs. :param soft: if True, unsubscribes first. :return: None """ # Restore non-default Bitfinex websocket configuration if self.bitfinex_config: self.send(**self.bitfinex_config) q_list = [] while True: try: identifier, q = self.channel_configs.popitem(last=True if soft else False) except KeyError: break q_list.append((identifier, q.copy())) if identifier == 'auth': self.send(**q, auth=True) continue if soft: q['event'] = 'unsubscribe' self.send(**q) # Resubscribe for soft start. if soft: for identifier, q in reversed(q_list): self.channel_configs[identifier] = q self.send(**q) else: for identifier, q in q_list: self.channel_configs[identifier] = q
[ "def", "_resubscribe", "(", "self", ",", "soft", "=", "False", ")", ":", "# Restore non-default Bitfinex websocket configuration", "if", "self", ".", "bitfinex_config", ":", "self", ".", "send", "(", "*", "*", "self", ".", "bitfinex_config", ")", "q_list", "=", "[", "]", "while", "True", ":", "try", ":", "identifier", ",", "q", "=", "self", ".", "channel_configs", ".", "popitem", "(", "last", "=", "True", "if", "soft", "else", "False", ")", "except", "KeyError", ":", "break", "q_list", ".", "append", "(", "(", "identifier", ",", "q", ".", "copy", "(", ")", ")", ")", "if", "identifier", "==", "'auth'", ":", "self", ".", "send", "(", "*", "*", "q", ",", "auth", "=", "True", ")", "continue", "if", "soft", ":", "q", "[", "'event'", "]", "=", "'unsubscribe'", "self", ".", "send", "(", "*", "*", "q", ")", "# Resubscribe for soft start.", "if", "soft", ":", "for", "identifier", ",", "q", "in", "reversed", "(", "q_list", ")", ":", "self", ".", "channel_configs", "[", "identifier", "]", "=", "q", "self", ".", "send", "(", "*", "*", "q", ")", "else", ":", "for", "identifier", ",", "q", "in", "q_list", ":", "self", ".", "channel_configs", "[", "identifier", "]", "=", "q" ]
Resubscribes to all channels found in self.channel_configs. :param soft: if True, unsubscribes first. :return: None
[ "Resubscribes", "to", "all", "channels", "found", "in", "self", ".", "channel_configs", "." ]
python
test
33.741935
angr/angr
angr/state_plugins/posix.py
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/state_plugins/posix.py#L540-L551
def dump_file_by_path(self, path, **kwargs): """ Returns the concrete content for a file by path. :param path: file path as string :param kwargs: passed to state.solver.eval :return: file contents as string """ file = self.state.fs.get(path) if file is None: return None return file.concretize(**kwargs)
[ "def", "dump_file_by_path", "(", "self", ",", "path", ",", "*", "*", "kwargs", ")", ":", "file", "=", "self", ".", "state", ".", "fs", ".", "get", "(", "path", ")", "if", "file", "is", "None", ":", "return", "None", "return", "file", ".", "concretize", "(", "*", "*", "kwargs", ")" ]
Returns the concrete content for a file by path. :param path: file path as string :param kwargs: passed to state.solver.eval :return: file contents as string
[ "Returns", "the", "concrete", "content", "for", "a", "file", "by", "path", "." ]
python
train
31.416667
Jajcus/pyxmpp2
pyxmpp2/mainloop/poll.py
https://github.com/Jajcus/pyxmpp2/blob/14a40a3950910a9cd008b55f0d8905aa0186ce18/pyxmpp2/mainloop/poll.py#L89-L108
def _prepare_io_handler(self, handler): """Call the `interfaces.IOHandler.prepare` method and remove the handler from unprepared handler list when done. """ logger.debug(" preparing handler: {0!r}".format(handler)) ret = handler.prepare() logger.debug(" prepare result: {0!r}".format(ret)) if isinstance(ret, HandlerReady): del self._unprepared_handlers[handler] prepared = True elif isinstance(ret, PrepareAgain): if ret.timeout is not None: if self._timeout is not None: self._timeout = min(self._timeout, ret.timeout) else: self._timeout = ret.timeout prepared = False else: raise TypeError("Unexpected result type from prepare()") return prepared
[ "def", "_prepare_io_handler", "(", "self", ",", "handler", ")", ":", "logger", ".", "debug", "(", "\" preparing handler: {0!r}\"", ".", "format", "(", "handler", ")", ")", "ret", "=", "handler", ".", "prepare", "(", ")", "logger", ".", "debug", "(", "\" prepare result: {0!r}\"", ".", "format", "(", "ret", ")", ")", "if", "isinstance", "(", "ret", ",", "HandlerReady", ")", ":", "del", "self", ".", "_unprepared_handlers", "[", "handler", "]", "prepared", "=", "True", "elif", "isinstance", "(", "ret", ",", "PrepareAgain", ")", ":", "if", "ret", ".", "timeout", "is", "not", "None", ":", "if", "self", ".", "_timeout", "is", "not", "None", ":", "self", ".", "_timeout", "=", "min", "(", "self", ".", "_timeout", ",", "ret", ".", "timeout", ")", "else", ":", "self", ".", "_timeout", "=", "ret", ".", "timeout", "prepared", "=", "False", "else", ":", "raise", "TypeError", "(", "\"Unexpected result type from prepare()\"", ")", "return", "prepared" ]
Call the `interfaces.IOHandler.prepare` method and remove the handler from unprepared handler list when done.
[ "Call", "the", "interfaces", ".", "IOHandler", ".", "prepare", "method", "and", "remove", "the", "handler", "from", "unprepared", "handler", "list", "when", "done", "." ]
python
valid
42.25
nameko/nameko
nameko/utils/concurrency/__init__.py
https://github.com/nameko/nameko/blob/88d7e5211de4fcc1c34cd7f84d7c77f0619c5f5d/nameko/utils/concurrency/__init__.py#L7-L49
def fail_fast_imap(pool, call, items): """ Run a function against each item in a given list, yielding each function result in turn, where the function call is handled in a :class:`~eventlet.greenthread.GreenThread` spawned by the provided pool. If any function raises an exception, all other ongoing threads are killed, and the exception is raised to the caller. This function is similar to :meth:`~eventlet.greenpool.GreenPool.imap`. :param pool: Pool to spawn function threads from :type pool: eventlet.greenpool.GreenPool :param call: Function call to make, expecting to receive an item from the given list """ result_queue = LightQueue(maxsize=len(items)) spawned_threads = set() def handle_result(finished_thread): try: thread_result = finished_thread.wait() spawned_threads.remove(finished_thread) result_queue.put((thread_result, None)) except Exception: spawned_threads.remove(finished_thread) result_queue.put((None, sys.exc_info())) for item in items: gt = pool.spawn(call, item) spawned_threads.add(gt) gt.link(handle_result) while spawned_threads: result, exc_info = result_queue.get() if exc_info is not None: # Kill all other ongoing threads for ongoing_thread in spawned_threads: ongoing_thread.kill() # simply raising here (even raising a full exc_info) isn't # sufficient to preserve the original stack trace. # greenlet.throw() achieves this. eventlet.getcurrent().throw(*exc_info) yield result
[ "def", "fail_fast_imap", "(", "pool", ",", "call", ",", "items", ")", ":", "result_queue", "=", "LightQueue", "(", "maxsize", "=", "len", "(", "items", ")", ")", "spawned_threads", "=", "set", "(", ")", "def", "handle_result", "(", "finished_thread", ")", ":", "try", ":", "thread_result", "=", "finished_thread", ".", "wait", "(", ")", "spawned_threads", ".", "remove", "(", "finished_thread", ")", "result_queue", ".", "put", "(", "(", "thread_result", ",", "None", ")", ")", "except", "Exception", ":", "spawned_threads", ".", "remove", "(", "finished_thread", ")", "result_queue", ".", "put", "(", "(", "None", ",", "sys", ".", "exc_info", "(", ")", ")", ")", "for", "item", "in", "items", ":", "gt", "=", "pool", ".", "spawn", "(", "call", ",", "item", ")", "spawned_threads", ".", "add", "(", "gt", ")", "gt", ".", "link", "(", "handle_result", ")", "while", "spawned_threads", ":", "result", ",", "exc_info", "=", "result_queue", ".", "get", "(", ")", "if", "exc_info", "is", "not", "None", ":", "# Kill all other ongoing threads", "for", "ongoing_thread", "in", "spawned_threads", ":", "ongoing_thread", ".", "kill", "(", ")", "# simply raising here (even raising a full exc_info) isn't", "# sufficient to preserve the original stack trace.", "# greenlet.throw() achieves this.", "eventlet", ".", "getcurrent", "(", ")", ".", "throw", "(", "*", "exc_info", ")", "yield", "result" ]
Run a function against each item in a given list, yielding each function result in turn, where the function call is handled in a :class:`~eventlet.greenthread.GreenThread` spawned by the provided pool. If any function raises an exception, all other ongoing threads are killed, and the exception is raised to the caller. This function is similar to :meth:`~eventlet.greenpool.GreenPool.imap`. :param pool: Pool to spawn function threads from :type pool: eventlet.greenpool.GreenPool :param call: Function call to make, expecting to receive an item from the given list
[ "Run", "a", "function", "against", "each", "item", "in", "a", "given", "list", "yielding", "each", "function", "result", "in", "turn", "where", "the", "function", "call", "is", "handled", "in", "a", ":", "class", ":", "~eventlet", ".", "greenthread", ".", "GreenThread", "spawned", "by", "the", "provided", "pool", "." ]
python
train
38.511628
J535D165/recordlinkage
recordlinkage/adapters.py
https://github.com/J535D165/recordlinkage/blob/87a5f4af904e0834047cd07ff1c70146b1e6d693/recordlinkage/adapters.py#L57-L78
def _prob_match(self, features): """Compute match probabilities. Parameters ---------- features : numpy.ndarray The data to train the model on. Returns ------- numpy.ndarray The match probabilties. """ # compute the probabilities probs = self.kernel.predict_proba(features) # get the position of match probabilities classes = list(self.kernel.classes_) match_class_position = classes.index(1) return probs[:, match_class_position]
[ "def", "_prob_match", "(", "self", ",", "features", ")", ":", "# compute the probabilities", "probs", "=", "self", ".", "kernel", ".", "predict_proba", "(", "features", ")", "# get the position of match probabilities", "classes", "=", "list", "(", "self", ".", "kernel", ".", "classes_", ")", "match_class_position", "=", "classes", ".", "index", "(", "1", ")", "return", "probs", "[", ":", ",", "match_class_position", "]" ]
Compute match probabilities. Parameters ---------- features : numpy.ndarray The data to train the model on. Returns ------- numpy.ndarray The match probabilties.
[ "Compute", "match", "probabilities", "." ]
python
train
25
OLC-LOC-Bioinformatics/GenomeQAML
genomeqaml/extract_features.py
https://github.com/OLC-LOC-Bioinformatics/GenomeQAML/blob/2953e574c185afab23075641da4ce5392bc003e9/genomeqaml/extract_features.py#L98-L130
def find_genus(files, database, threads=12): """ Uses MASH to find the genus of fasta files. :param files: File dictionary returned by filer method. :param database: Path to reduced refseq database sketch. :param threads: Number of threads to run mash with. :return: genus_dict: Dictionary of genus for each sample. Will return NA if genus could not be found. """ genus_dict = dict() tmpdir = str(time.time()).split('.')[-1] if not os.path.isdir(tmpdir): os.makedirs(tmpdir) for file_name, fasta in files.items(): mash.screen(database, fasta, threads=threads, w='', i=0.95, output_file=os.path.join(tmpdir, 'screen.tab')) screen_output = mash.read_mash_screen(os.path.join(tmpdir, 'screen.tab')) try: os.remove(os.path.join(tmpdir, 'screen.tab')) except IOError: pass try: genus = screen_output[0].query_id.split('/')[-3] if genus == 'Shigella': genus = 'Escherichia' genus_dict[file_name] = genus except IndexError: genus_dict[file_name] = 'NA' shutil.rmtree(tmpdir) return genus_dict
[ "def", "find_genus", "(", "files", ",", "database", ",", "threads", "=", "12", ")", ":", "genus_dict", "=", "dict", "(", ")", "tmpdir", "=", "str", "(", "time", ".", "time", "(", ")", ")", ".", "split", "(", "'.'", ")", "[", "-", "1", "]", "if", "not", "os", ".", "path", ".", "isdir", "(", "tmpdir", ")", ":", "os", ".", "makedirs", "(", "tmpdir", ")", "for", "file_name", ",", "fasta", "in", "files", ".", "items", "(", ")", ":", "mash", ".", "screen", "(", "database", ",", "fasta", ",", "threads", "=", "threads", ",", "w", "=", "''", ",", "i", "=", "0.95", ",", "output_file", "=", "os", ".", "path", ".", "join", "(", "tmpdir", ",", "'screen.tab'", ")", ")", "screen_output", "=", "mash", ".", "read_mash_screen", "(", "os", ".", "path", ".", "join", "(", "tmpdir", ",", "'screen.tab'", ")", ")", "try", ":", "os", ".", "remove", "(", "os", ".", "path", ".", "join", "(", "tmpdir", ",", "'screen.tab'", ")", ")", "except", "IOError", ":", "pass", "try", ":", "genus", "=", "screen_output", "[", "0", "]", ".", "query_id", ".", "split", "(", "'/'", ")", "[", "-", "3", "]", "if", "genus", "==", "'Shigella'", ":", "genus", "=", "'Escherichia'", "genus_dict", "[", "file_name", "]", "=", "genus", "except", "IndexError", ":", "genus_dict", "[", "file_name", "]", "=", "'NA'", "shutil", ".", "rmtree", "(", "tmpdir", ")", "return", "genus_dict" ]
Uses MASH to find the genus of fasta files. :param files: File dictionary returned by filer method. :param database: Path to reduced refseq database sketch. :param threads: Number of threads to run mash with. :return: genus_dict: Dictionary of genus for each sample. Will return NA if genus could not be found.
[ "Uses", "MASH", "to", "find", "the", "genus", "of", "fasta", "files", ".", ":", "param", "files", ":", "File", "dictionary", "returned", "by", "filer", "method", ".", ":", "param", "database", ":", "Path", "to", "reduced", "refseq", "database", "sketch", ".", ":", "param", "threads", ":", "Number", "of", "threads", "to", "run", "mash", "with", ".", ":", "return", ":", "genus_dict", ":", "Dictionary", "of", "genus", "for", "each", "sample", ".", "Will", "return", "NA", "if", "genus", "could", "not", "be", "found", "." ]
python
train
37.272727
google/dotty
efilter/parsers/common/token_stream.py
https://github.com/google/dotty/blob/b145131499be0c4b755fc2e2ac19be11a50bce6a/efilter/parsers/common/token_stream.py#L87-L99
def reject(self, f, *args): """Like 'match', but throw a parse error if 'f' matches. This is useful when a parser wants to be strict about specific things being prohibited. For example, DottySQL bans the use of SQL keywords as variable names. """ match = self.match(f, *args) if match: token = self.peek(0) raise errors.EfilterParseError( query=self.tokenizer.source, token=token, message="Was not expecting a %s here." % token.name)
[ "def", "reject", "(", "self", ",", "f", ",", "*", "args", ")", ":", "match", "=", "self", ".", "match", "(", "f", ",", "*", "args", ")", "if", "match", ":", "token", "=", "self", ".", "peek", "(", "0", ")", "raise", "errors", ".", "EfilterParseError", "(", "query", "=", "self", ".", "tokenizer", ".", "source", ",", "token", "=", "token", ",", "message", "=", "\"Was not expecting a %s here.\"", "%", "token", ".", "name", ")" ]
Like 'match', but throw a parse error if 'f' matches. This is useful when a parser wants to be strict about specific things being prohibited. For example, DottySQL bans the use of SQL keywords as variable names.
[ "Like", "match", "but", "throw", "a", "parse", "error", "if", "f", "matches", "." ]
python
train
41.076923
Esri/ArcREST
src/arcrest/webmap/operationallayers.py
https://github.com/Esri/ArcREST/blob/ab240fde2b0200f61d4a5f6df033516e53f2f416/src/arcrest/webmap/operationallayers.py#L72-L90
def __look_up_geom(self, geomType): """ compares the geometry object's type verse the JSOn specs for geometry types Inputs: geomType - string - geometry object's type Returns: string JSON geometry type or None if not an allowed type """ if geomType.lower() == "point": return "esriGeometryPoint" elif geomType.lower() == "polyline": return "esriGeometryPolyline" elif geomType.lower() == "polygon": return "esriGeometryPolygon" elif geomType.lower() == "multipoint": return "esriGeometryMultipoint" else: return None
[ "def", "__look_up_geom", "(", "self", ",", "geomType", ")", ":", "if", "geomType", ".", "lower", "(", ")", "==", "\"point\"", ":", "return", "\"esriGeometryPoint\"", "elif", "geomType", ".", "lower", "(", ")", "==", "\"polyline\"", ":", "return", "\"esriGeometryPolyline\"", "elif", "geomType", ".", "lower", "(", ")", "==", "\"polygon\"", ":", "return", "\"esriGeometryPolygon\"", "elif", "geomType", ".", "lower", "(", ")", "==", "\"multipoint\"", ":", "return", "\"esriGeometryMultipoint\"", "else", ":", "return", "None" ]
compares the geometry object's type verse the JSOn specs for geometry types Inputs: geomType - string - geometry object's type Returns: string JSON geometry type or None if not an allowed type
[ "compares", "the", "geometry", "object", "s", "type", "verse", "the", "JSOn", "specs", "for", "geometry", "types", "Inputs", ":", "geomType", "-", "string", "-", "geometry", "object", "s", "type", "Returns", ":", "string", "JSON", "geometry", "type", "or", "None", "if", "not", "an", "allowed", "type" ]
python
train
35.789474
senaite/senaite.core
bika/lims/workflow/analysisrequest/guards.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/workflow/analysisrequest/guards.py#L171-L180
def guard_sample(analysis_request): """Returns whether 'sample' transition can be performed or not. Returns True only if the analysis request has the DateSampled and Sampler set or if the user belongs to the Samplers group """ if analysis_request.getDateSampled() and analysis_request.getSampler(): return True current_user = api.get_current_user() return "Sampler" in current_user.getRolesInContext(analysis_request)
[ "def", "guard_sample", "(", "analysis_request", ")", ":", "if", "analysis_request", ".", "getDateSampled", "(", ")", "and", "analysis_request", ".", "getSampler", "(", ")", ":", "return", "True", "current_user", "=", "api", ".", "get_current_user", "(", ")", "return", "\"Sampler\"", "in", "current_user", ".", "getRolesInContext", "(", "analysis_request", ")" ]
Returns whether 'sample' transition can be performed or not. Returns True only if the analysis request has the DateSampled and Sampler set or if the user belongs to the Samplers group
[ "Returns", "whether", "sample", "transition", "can", "be", "performed", "or", "not", ".", "Returns", "True", "only", "if", "the", "analysis", "request", "has", "the", "DateSampled", "and", "Sampler", "set", "or", "if", "the", "user", "belongs", "to", "the", "Samplers", "group" ]
python
train
44.5
vtkiorg/vtki
vtki/examples/examples.py
https://github.com/vtkiorg/vtki/blob/5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1/vtki/examples/examples.py#L77-L109
def plot_ants_plane(off_screen=False, notebook=None): """ Demonstrate how to create a plot class to plot multiple meshes while adding scalars and text. Plot two ants and airplane """ # load and shrink airplane airplane = vtki.PolyData(planefile) airplane.points /= 10 # pts = airplane.points # gets pointer to array # pts /= 10 # shrink # rotate and translate ant so it is on the plane ant = vtki.PolyData(antfile) ant.rotate_x(90) ant.translate([90, 60, 15]) # Make a copy and add another ant ant_copy = ant.copy() ant_copy.translate([30, 0, -10]) # Create plotting object plotter = vtki.Plotter(off_screen=off_screen, notebook=notebook) plotter.add_mesh(ant, 'r') plotter.add_mesh(ant_copy, 'b') # Add airplane mesh and make the color equal to the Y position plane_scalars = airplane.points[:, 1] plotter.add_mesh(airplane, scalars=plane_scalars, stitle='Plane Y\nLocation') plotter.add_text('Ants and Plane Example') plotter.plot()
[ "def", "plot_ants_plane", "(", "off_screen", "=", "False", ",", "notebook", "=", "None", ")", ":", "# load and shrink airplane", "airplane", "=", "vtki", ".", "PolyData", "(", "planefile", ")", "airplane", ".", "points", "/=", "10", "# pts = airplane.points # gets pointer to array", "# pts /= 10 # shrink", "# rotate and translate ant so it is on the plane", "ant", "=", "vtki", ".", "PolyData", "(", "antfile", ")", "ant", ".", "rotate_x", "(", "90", ")", "ant", ".", "translate", "(", "[", "90", ",", "60", ",", "15", "]", ")", "# Make a copy and add another ant", "ant_copy", "=", "ant", ".", "copy", "(", ")", "ant_copy", ".", "translate", "(", "[", "30", ",", "0", ",", "-", "10", "]", ")", "# Create plotting object", "plotter", "=", "vtki", ".", "Plotter", "(", "off_screen", "=", "off_screen", ",", "notebook", "=", "notebook", ")", "plotter", ".", "add_mesh", "(", "ant", ",", "'r'", ")", "plotter", ".", "add_mesh", "(", "ant_copy", ",", "'b'", ")", "# Add airplane mesh and make the color equal to the Y position", "plane_scalars", "=", "airplane", ".", "points", "[", ":", ",", "1", "]", "plotter", ".", "add_mesh", "(", "airplane", ",", "scalars", "=", "plane_scalars", ",", "stitle", "=", "'Plane Y\\nLocation'", ")", "plotter", ".", "add_text", "(", "'Ants and Plane Example'", ")", "plotter", ".", "plot", "(", ")" ]
Demonstrate how to create a plot class to plot multiple meshes while adding scalars and text. Plot two ants and airplane
[ "Demonstrate", "how", "to", "create", "a", "plot", "class", "to", "plot", "multiple", "meshes", "while", "adding", "scalars", "and", "text", "." ]
python
train
30.69697
saltstack/salt
salt/states/lxd_container.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/lxd_container.py#L716-L852
def migrated(name, remote_addr, cert, key, verify_cert, src_remote_addr, stop_and_start=False, src_cert=None, src_key=None, src_verify_cert=None): ''' Ensure a container is migrated to another host If the container is running, it either must be shut down first (use stop_and_start=True) or criu must be installed on the source and destination machines. For this operation both certs need to be authenticated, use :mod:`lxd.authenticate <salt.states.lxd.authenticate` to authenticate your cert(s). name : The container to migrate remote_addr : An URL to the destination remote Server Examples: https://myserver.lan:8443 /var/lib/mysocket.sock cert : PEM Formatted SSL Zertifikate. Examples: ~/.config/lxc/client.crt key : PEM Formatted SSL Key. Examples: ~/.config/lxc/client.key verify_cert : True Wherever to verify the cert, this is by default True but in the most cases you want to set it off as LXD normaly uses self-signed certificates. src_remote_addr : An URL to the source remote Server Examples: https://myserver.lan:8443 /var/lib/mysocket.sock stop_and_start: Stop before migrating and start after src_cert : PEM Formatted SSL Zertifikate, if None we copy "cert" Examples: ~/.config/lxc/client.crt src_key : PEM Formatted SSL Key, if None we copy "key" Examples: ~/.config/lxc/client.key src_verify_cert : Wherever to verify the cert, if None we copy "verify_cert" ''' ret = { 'name': name, 'remote_addr': remote_addr, 'cert': cert, 'key': key, 'verify_cert': verify_cert, 'src_remote_addr': src_remote_addr, 'src_and_start': stop_and_start, 'src_cert': src_cert, 'src_key': src_key, 'changes': {} } dest_container = None try: dest_container = __salt__['lxd.container_get']( name, remote_addr, cert, key, verify_cert, _raw=True ) except CommandExecutionError as e: return _error(ret, six.text_type(e)) except SaltInvocationError as e: # Destination container not found pass if dest_container is not None: return _success( ret, 'Container "{0}" exists on the destination'.format(name) ) if src_verify_cert is None: src_verify_cert = verify_cert try: __salt__['lxd.container_get']( name, src_remote_addr, src_cert, src_key, src_verify_cert, _raw=True ) except CommandExecutionError as e: return _error(ret, six.text_type(e)) except SaltInvocationError as e: # Container not found return _error(ret, 'Source Container "{0}" not found'.format(name)) if __opts__['test']: ret['changes']['migrated'] = ( 'Would migrate the container "{0}" from "{1}" to "{2}"' ).format(name, src_remote_addr, remote_addr) return _unchanged(ret, ret['changes']['migrated']) try: __salt__['lxd.container_migrate']( name, stop_and_start, remote_addr, cert, key, verify_cert, src_remote_addr, src_cert, src_key, src_verify_cert ) except CommandExecutionError as e: return _error(ret, six.text_type(e)) ret['changes']['migrated'] = ( 'Migrated the container "{0}" from "{1}" to "{2}"' ).format(name, src_remote_addr, remote_addr) return _success(ret, ret['changes']['migrated'])
[ "def", "migrated", "(", "name", ",", "remote_addr", ",", "cert", ",", "key", ",", "verify_cert", ",", "src_remote_addr", ",", "stop_and_start", "=", "False", ",", "src_cert", "=", "None", ",", "src_key", "=", "None", ",", "src_verify_cert", "=", "None", ")", ":", "ret", "=", "{", "'name'", ":", "name", ",", "'remote_addr'", ":", "remote_addr", ",", "'cert'", ":", "cert", ",", "'key'", ":", "key", ",", "'verify_cert'", ":", "verify_cert", ",", "'src_remote_addr'", ":", "src_remote_addr", ",", "'src_and_start'", ":", "stop_and_start", ",", "'src_cert'", ":", "src_cert", ",", "'src_key'", ":", "src_key", ",", "'changes'", ":", "{", "}", "}", "dest_container", "=", "None", "try", ":", "dest_container", "=", "__salt__", "[", "'lxd.container_get'", "]", "(", "name", ",", "remote_addr", ",", "cert", ",", "key", ",", "verify_cert", ",", "_raw", "=", "True", ")", "except", "CommandExecutionError", "as", "e", ":", "return", "_error", "(", "ret", ",", "six", ".", "text_type", "(", "e", ")", ")", "except", "SaltInvocationError", "as", "e", ":", "# Destination container not found", "pass", "if", "dest_container", "is", "not", "None", ":", "return", "_success", "(", "ret", ",", "'Container \"{0}\" exists on the destination'", ".", "format", "(", "name", ")", ")", "if", "src_verify_cert", "is", "None", ":", "src_verify_cert", "=", "verify_cert", "try", ":", "__salt__", "[", "'lxd.container_get'", "]", "(", "name", ",", "src_remote_addr", ",", "src_cert", ",", "src_key", ",", "src_verify_cert", ",", "_raw", "=", "True", ")", "except", "CommandExecutionError", "as", "e", ":", "return", "_error", "(", "ret", ",", "six", ".", "text_type", "(", "e", ")", ")", "except", "SaltInvocationError", "as", "e", ":", "# Container not found", "return", "_error", "(", "ret", ",", "'Source Container \"{0}\" not found'", ".", "format", "(", "name", ")", ")", "if", "__opts__", "[", "'test'", "]", ":", "ret", "[", "'changes'", "]", "[", "'migrated'", "]", "=", "(", "'Would migrate the container \"{0}\" from \"{1}\" to \"{2}\"'", ")", ".", "format", "(", "name", ",", "src_remote_addr", ",", "remote_addr", ")", "return", "_unchanged", "(", "ret", ",", "ret", "[", "'changes'", "]", "[", "'migrated'", "]", ")", "try", ":", "__salt__", "[", "'lxd.container_migrate'", "]", "(", "name", ",", "stop_and_start", ",", "remote_addr", ",", "cert", ",", "key", ",", "verify_cert", ",", "src_remote_addr", ",", "src_cert", ",", "src_key", ",", "src_verify_cert", ")", "except", "CommandExecutionError", "as", "e", ":", "return", "_error", "(", "ret", ",", "six", ".", "text_type", "(", "e", ")", ")", "ret", "[", "'changes'", "]", "[", "'migrated'", "]", "=", "(", "'Migrated the container \"{0}\" from \"{1}\" to \"{2}\"'", ")", ".", "format", "(", "name", ",", "src_remote_addr", ",", "remote_addr", ")", "return", "_success", "(", "ret", ",", "ret", "[", "'changes'", "]", "[", "'migrated'", "]", ")" ]
Ensure a container is migrated to another host If the container is running, it either must be shut down first (use stop_and_start=True) or criu must be installed on the source and destination machines. For this operation both certs need to be authenticated, use :mod:`lxd.authenticate <salt.states.lxd.authenticate` to authenticate your cert(s). name : The container to migrate remote_addr : An URL to the destination remote Server Examples: https://myserver.lan:8443 /var/lib/mysocket.sock cert : PEM Formatted SSL Zertifikate. Examples: ~/.config/lxc/client.crt key : PEM Formatted SSL Key. Examples: ~/.config/lxc/client.key verify_cert : True Wherever to verify the cert, this is by default True but in the most cases you want to set it off as LXD normaly uses self-signed certificates. src_remote_addr : An URL to the source remote Server Examples: https://myserver.lan:8443 /var/lib/mysocket.sock stop_and_start: Stop before migrating and start after src_cert : PEM Formatted SSL Zertifikate, if None we copy "cert" Examples: ~/.config/lxc/client.crt src_key : PEM Formatted SSL Key, if None we copy "key" Examples: ~/.config/lxc/client.key src_verify_cert : Wherever to verify the cert, if None we copy "verify_cert"
[ "Ensure", "a", "container", "is", "migrated", "to", "another", "host" ]
python
train
26.89781
ewels/MultiQC
multiqc/modules/hicpro/hicpro.py
https://github.com/ewels/MultiQC/blob/2037d6322b2554146a74efbf869156ad20d4c4ec/multiqc/modules/hicpro/hicpro.py#L269-L299
def hicpro_mapping_chart (self): """ Generate the HiC-Pro Aligned reads plot """ # Specify the order of the different possible categories keys = OrderedDict() keys['Full_Alignments_Read'] = { 'color': '#005ce6', 'name': 'Full reads Alignments' } keys['Trimmed_Alignments_Read'] = { 'color': '#3385ff', 'name': 'Trimmed reads Alignments' } keys['Failed_To_Align_Read'] = { 'color': '#a9a2a2', 'name': 'Failed To Align' } data = [{},{}] for s_name in self.hicpro_data: for r in [1,2]: data[r-1]['{} [R{}]'.format(s_name, r)] = { 'Full_Alignments_Read': self.hicpro_data[s_name]['global_R{}'.format(r)], 'Trimmed_Alignments_Read': self.hicpro_data[s_name]['local_R{}'.format(r)], 'Failed_To_Align_Read': int(self.hicpro_data[s_name]['total_R{}'.format(r)]) - int(self.hicpro_data[s_name]['mapped_R{}'.format(r)]) } # Config for the plot config = { 'id': 'hicpro_mapping_stats_plot', 'title': 'HiC-Pro: Mapping Statistics', 'ylab': '# Reads', 'ylab': '# Reads: Read 1', 'data_labels': [ {'name': 'Read 1', 'ylab': '# Reads: Read 1'}, {'name': 'Read 2', 'ylab': '# Reads: Read 2'} ] } return bargraph.plot(data, [keys, keys], config)
[ "def", "hicpro_mapping_chart", "(", "self", ")", ":", "# Specify the order of the different possible categories", "keys", "=", "OrderedDict", "(", ")", "keys", "[", "'Full_Alignments_Read'", "]", "=", "{", "'color'", ":", "'#005ce6'", ",", "'name'", ":", "'Full reads Alignments'", "}", "keys", "[", "'Trimmed_Alignments_Read'", "]", "=", "{", "'color'", ":", "'#3385ff'", ",", "'name'", ":", "'Trimmed reads Alignments'", "}", "keys", "[", "'Failed_To_Align_Read'", "]", "=", "{", "'color'", ":", "'#a9a2a2'", ",", "'name'", ":", "'Failed To Align'", "}", "data", "=", "[", "{", "}", ",", "{", "}", "]", "for", "s_name", "in", "self", ".", "hicpro_data", ":", "for", "r", "in", "[", "1", ",", "2", "]", ":", "data", "[", "r", "-", "1", "]", "[", "'{} [R{}]'", ".", "format", "(", "s_name", ",", "r", ")", "]", "=", "{", "'Full_Alignments_Read'", ":", "self", ".", "hicpro_data", "[", "s_name", "]", "[", "'global_R{}'", ".", "format", "(", "r", ")", "]", ",", "'Trimmed_Alignments_Read'", ":", "self", ".", "hicpro_data", "[", "s_name", "]", "[", "'local_R{}'", ".", "format", "(", "r", ")", "]", ",", "'Failed_To_Align_Read'", ":", "int", "(", "self", ".", "hicpro_data", "[", "s_name", "]", "[", "'total_R{}'", ".", "format", "(", "r", ")", "]", ")", "-", "int", "(", "self", ".", "hicpro_data", "[", "s_name", "]", "[", "'mapped_R{}'", ".", "format", "(", "r", ")", "]", ")", "}", "# Config for the plot", "config", "=", "{", "'id'", ":", "'hicpro_mapping_stats_plot'", ",", "'title'", ":", "'HiC-Pro: Mapping Statistics'", ",", "'ylab'", ":", "'# Reads'", ",", "'ylab'", ":", "'# Reads: Read 1'", ",", "'data_labels'", ":", "[", "{", "'name'", ":", "'Read 1'", ",", "'ylab'", ":", "'# Reads: Read 1'", "}", ",", "{", "'name'", ":", "'Read 2'", ",", "'ylab'", ":", "'# Reads: Read 2'", "}", "]", "}", "return", "bargraph", ".", "plot", "(", "data", ",", "[", "keys", ",", "keys", "]", ",", "config", ")" ]
Generate the HiC-Pro Aligned reads plot
[ "Generate", "the", "HiC", "-", "Pro", "Aligned", "reads", "plot" ]
python
train
46.806452
jmcarp/robobrowser
robobrowser/helpers.py
https://github.com/jmcarp/robobrowser/blob/4284c11d00ae1397983e269aa180e5cf7ee5f4cf/robobrowser/helpers.py#L46-L54
def find(soup, name=None, attrs=None, recursive=True, text=None, **kwargs): """Modified find method; see `find_all`, above. """ tags = find_all( soup, name, attrs or {}, recursive, text, 1, **kwargs ) if tags: return tags[0]
[ "def", "find", "(", "soup", ",", "name", "=", "None", ",", "attrs", "=", "None", ",", "recursive", "=", "True", ",", "text", "=", "None", ",", "*", "*", "kwargs", ")", ":", "tags", "=", "find_all", "(", "soup", ",", "name", ",", "attrs", "or", "{", "}", ",", "recursive", ",", "text", ",", "1", ",", "*", "*", "kwargs", ")", "if", "tags", ":", "return", "tags", "[", "0", "]" ]
Modified find method; see `find_all`, above.
[ "Modified", "find", "method", ";", "see", "find_all", "above", "." ]
python
train
28.111111
googleapis/google-cloud-python
api_core/google/api_core/datetime_helpers.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/api_core/google/api_core/datetime_helpers.py#L269-L279
def timestamp_pb(self): """Return a timestamp message. Returns: (:class:`~google.protobuf.timestamp_pb2.Timestamp`): Timestamp message """ inst = self if self.tzinfo is not None else self.replace(tzinfo=pytz.UTC) delta = inst - _UTC_EPOCH seconds = int(delta.total_seconds()) nanos = self._nanosecond or self.microsecond * 1000 return timestamp_pb2.Timestamp(seconds=seconds, nanos=nanos)
[ "def", "timestamp_pb", "(", "self", ")", ":", "inst", "=", "self", "if", "self", ".", "tzinfo", "is", "not", "None", "else", "self", ".", "replace", "(", "tzinfo", "=", "pytz", ".", "UTC", ")", "delta", "=", "inst", "-", "_UTC_EPOCH", "seconds", "=", "int", "(", "delta", ".", "total_seconds", "(", ")", ")", "nanos", "=", "self", ".", "_nanosecond", "or", "self", ".", "microsecond", "*", "1000", "return", "timestamp_pb2", ".", "Timestamp", "(", "seconds", "=", "seconds", ",", "nanos", "=", "nanos", ")" ]
Return a timestamp message. Returns: (:class:`~google.protobuf.timestamp_pb2.Timestamp`): Timestamp message
[ "Return", "a", "timestamp", "message", "." ]
python
train
41.363636
WebarchivCZ/WA-KAT
src/wa_kat/templates/static/js/Lib/site-packages/components/author_picker.py
https://github.com/WebarchivCZ/WA-KAT/blob/16d064a3a775dc1d2713debda7847ded52dd2a06/src/wa_kat/templates/static/js/Lib/site-packages/components/author_picker.py#L246-L253
def bind(cls): """ Bind the buttons to adapter's event handler. """ super(cls, cls).bind() cls.search_btn_el.bind("click", cls.start) cls.input_el.bind("keypress", func_on_enter(cls.start))
[ "def", "bind", "(", "cls", ")", ":", "super", "(", "cls", ",", "cls", ")", ".", "bind", "(", ")", "cls", ".", "search_btn_el", ".", "bind", "(", "\"click\"", ",", "cls", ".", "start", ")", "cls", ".", "input_el", ".", "bind", "(", "\"keypress\"", ",", "func_on_enter", "(", "cls", ".", "start", ")", ")" ]
Bind the buttons to adapter's event handler.
[ "Bind", "the", "buttons", "to", "adapter", "s", "event", "handler", "." ]
python
train
28.875
zimeon/iiif
iiif/flask_utils.py
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/flask_utils.py#L375-L381
def error_response(self, e): """Make response for an IIIFError e. Also add compliance header. """ self.add_compliance_header() return self.make_response(*e.image_server_response(self.api_version))
[ "def", "error_response", "(", "self", ",", "e", ")", ":", "self", ".", "add_compliance_header", "(", ")", "return", "self", ".", "make_response", "(", "*", "e", ".", "image_server_response", "(", "self", ".", "api_version", ")", ")" ]
Make response for an IIIFError e. Also add compliance header.
[ "Make", "response", "for", "an", "IIIFError", "e", "." ]
python
train
33
ionelmc/python-cogen
cogen/web/wsgi.py
https://github.com/ionelmc/python-cogen/blob/83b0edb88425eba6e5bfda9f1dcd34642517e2a8/cogen/web/wsgi.py#L250-L533
def run(self): """A bit bulky atm...""" self.close_connection = False try: while True: self.started_response = False self.status = "" self.outheaders = [] self.sent_headers = False self.chunked_write = False self.write_buffer = StringIO.StringIO() self.content_length = None # Copy the class environ into self. ENVIRON = self.environ = self.connection_environ.copy() self.environ.update(self.server_environ) request_line = yield self.connfh.readline() if request_line == "\r\n": # RFC 2616 sec 4.1: "... it should ignore the CRLF." tolerance = 5 while tolerance and request_line == "\r\n": request_line = yield self.connfh.readline() tolerance -= 1 if not tolerance: return method, path, req_protocol = request_line.strip().split(" ", 2) ENVIRON["REQUEST_METHOD"] = method ENVIRON["CONTENT_LENGTH"] = '' scheme, location, path, params, qs, frag = urlparse(path) if frag: yield self.simple_response("400 Bad Request", "Illegal #fragment in Request-URI.") return if scheme: ENVIRON["wsgi.url_scheme"] = scheme if params: path = path + ";" + params ENVIRON["SCRIPT_NAME"] = "" # Unquote the path+params (e.g. "/this%20path" -> "this path"). # http://www.w3.org/Protocols/rfc2616/rfc2616-sec5.html#sec5.1.2 # # But note that "...a URI must be separated into its components # before the escaped characters within those components can be # safely decoded." http://www.ietf.org/rfc/rfc2396.txt, sec 2.4.2 atoms = [unquote(x) for x in quoted_slash.split(path)] path = "%2F".join(atoms) ENVIRON["PATH_INFO"] = path # Note that, like wsgiref and most other WSGI servers, # we unquote the path but not the query string. ENVIRON["QUERY_STRING"] = qs # Compare request and server HTTP protocol versions, in case our # server does not support the requested protocol. Limit our output # to min(req, server). We want the following output: # request server actual written supported response # protocol protocol response protocol feature set # a 1.0 1.0 1.0 1.0 # b 1.0 1.1 1.1 1.0 # c 1.1 1.0 1.0 1.0 # d 1.1 1.1 1.1 1.1 # Notice that, in (b), the response will be "HTTP/1.1" even though # the client only understands 1.0. RFC 2616 10.5.6 says we should # only return 505 if the _major_ version is different. rp = int(req_protocol[5]), int(req_protocol[7]) server_protocol = ENVIRON["ACTUAL_SERVER_PROTOCOL"] sp = int(server_protocol[5]), int(server_protocol[7]) if sp[0] != rp[0]: yield self.simple_response("505 HTTP Version Not Supported") return # Bah. "SERVER_PROTOCOL" is actually the REQUEST protocol. ENVIRON["SERVER_PROTOCOL"] = req_protocol self.response_protocol = "HTTP/%s.%s" % min(rp, sp) # If the Request-URI was an absoluteURI, use its location atom. if location: ENVIRON["SERVER_NAME"] = location # then all the http headers try: while True: line = yield self.connfh.readline() if line == '\r\n': # Normal end of headers break if line[0] in ' \t': # It's a continuation line. v = line.strip() else: k, v = line.split(":", 1) k, v = k.strip().upper(), v.strip() envname = "HTTP_" + k.replace("-", "_") if k in comma_separated_headers: existing = ENVIRON.get(envname) if existing: v = ", ".join((existing, v)) ENVIRON[envname] = v ct = ENVIRON.pop("HTTP_CONTENT_TYPE", None) if ct: ENVIRON["CONTENT_TYPE"] = ct cl = ENVIRON.pop("HTTP_CONTENT_LENGTH", None) if cl: ENVIRON["CONTENT_LENGTH"] = cl except ValueError, ex: yield self.simple_response("400 Bad Request", repr(ex.args)) return creds = ENVIRON.get("HTTP_AUTHORIZATION", "").split(" ", 1) ENVIRON["AUTH_TYPE"] = creds[0] if creds[0].lower() == 'basic': user, pw = base64.decodestring(creds[1]).split(":", 1) ENVIRON["REMOTE_USER"] = user # Persistent connection support if req_protocol == "HTTP/1.1": if ENVIRON.get("HTTP_CONNECTION", "") == "close": self.close_connection = True else: # HTTP/1.0 if ENVIRON.get("HTTP_CONNECTION", "").lower() != "keep-alive": self.close_connection = True # Transfer-Encoding support te = None if self.response_protocol == "HTTP/1.1": te = ENVIRON.get("HTTP_TRANSFER_ENCODING") if te: te = [x.strip().lower() for x in te.split(",") if x.strip()] if te: # reject transfer encodings for now yield self.simple_response("501 Unimplemented") self.close_connection = True return ENV_COGEN_PROXY = ENVIRON['cogen.wsgi'] = async.COGENProxy( content_length = int(ENVIRON.get('CONTENT_LENGTH', None) or 0) or None, read_count = 0, operation = None, result = None, exception = None ) ENVIRON['cogen.http_connection'] = self ENVIRON['cogen.core'] = async.COGENOperationWrapper( ENV_COGEN_PROXY, core ) ENVIRON['cogen.call'] = async.COGENCallWrapper(ENV_COGEN_PROXY) ENVIRON['cogen.input'] = async.COGENOperationWrapper( ENV_COGEN_PROXY, self.connfh ) ENVIRON['cogen.yield'] = async.COGENSimpleWrapper(ENV_COGEN_PROXY) response = self.wsgi_app(ENVIRON, self.start_response) #~ print 'WSGI RESPONSE:', response try: if isinstance(response, WSGIFileWrapper): # set tcp_cork to pack the header with the file data if hasattr(socket, "TCP_CORK"): self.conn.setsockopt(socket.IPPROTO_TCP, socket.TCP_CORK, 1) assert self.started_response, "App returned the wsgi.file_wrapper but didn't call start_response." assert not self.sent_headers self.sent_headers = True yield sockets.SendAll(self.conn, self.render_headers()+self.write_buffer.getvalue() ) offset = response.filelike.tell() if self.chunked_write: fsize = os.fstat(response.filelike.fileno()).st_size yield sockets.SendAll(self.conn, hex(int(fsize-offset))+"\r\n") yield self.conn.sendfile( response.filelike, blocksize=response.blocksize, offset=offset, length=self.content_length, timeout=self.sendfile_timeout ) if self.chunked_write: yield sockets.SendAll(self.conn, "\r\n") # also, tcp_cork will make the file data sent on packet boundaries, # wich is a good thing if hasattr(socket, "TCP_CORK"): self.conn.setsockopt(socket.IPPROTO_TCP, socket.TCP_CORK, 0) else: for chunk in response: if chunk: assert self.started_response, "App sended a value but hasn't called start_response." if not self.sent_headers: self.sent_headers = True headers = [self.render_headers(), self.write_buffer.getvalue()] else: headers = [] if self.chunked_write: buf = [hex(len(chunk))[2:], "\r\n", chunk, "\r\n"] if headers: headers.extend(buf) yield sockets.SendAll(self.conn, "".join(headers)) else: yield sockets.SendAll(self.conn, "".join(buf)) else: if headers: headers.append(chunk) yield sockets.SendAll(self.conn, "".join(headers)) else: yield sockets.SendAll(self.conn, chunk) else: if self.started_response: if not self.sent_headers: self.sent_headers = True yield sockets.SendAll(self.conn, self.render_headers()+self.write_buffer.getvalue()) if ENV_COGEN_PROXY.operation: op = ENV_COGEN_PROXY.operation ENV_COGEN_PROXY.operation = None try: #~ print 'WSGI OP:', op ENV_COGEN_PROXY.exception = None ENV_COGEN_PROXY.result = yield op #~ print 'WSGI OP RESULT:',ENVIRON['cogen.wsgi'].result except: #~ print 'WSGI OP EXCEPTION:', sys.exc_info() ENV_COGEN_PROXY.exception = sys.exc_info() ENV_COGEN_PROXY.result = ENV_COGEN_PROXY.exception[1] del op finally: if hasattr(response, 'close'): response.close() if self.started_response: if not self.sent_headers: self.sent_headers = True yield sockets.SendAll(self.conn, self.render_headers()+self.write_buffer.getvalue() ) else: import warnings warnings.warn("App was consumed and hasn't called start_response") if self.chunked_write: yield sockets.SendAll(self.conn, "0\r\n\r\n") if self.close_connection: return # TODO: consume any unread data except (socket.error, OSError, pywinerror), e: errno = e.args[0] if errno not in useless_socket_errors: yield self.simple_response("500 Internal Server Error", format_exc()) return except (OperationTimeout, ConnectionClosed, SocketError): return except (KeyboardInterrupt, SystemExit, GeneratorExit, MemoryError): raise except: if not self.started_response: yield self.simple_response( "500 Internal Server Error", format_exc() ) else: print "*" * 60 traceback.print_exc() print "*" * 60 sys.exc_clear() finally: self.conn.close() ENVIRON = self.environ = None
[ "def", "run", "(", "self", ")", ":", "self", ".", "close_connection", "=", "False", "try", ":", "while", "True", ":", "self", ".", "started_response", "=", "False", "self", ".", "status", "=", "\"\"", "self", ".", "outheaders", "=", "[", "]", "self", ".", "sent_headers", "=", "False", "self", ".", "chunked_write", "=", "False", "self", ".", "write_buffer", "=", "StringIO", ".", "StringIO", "(", ")", "self", ".", "content_length", "=", "None", "# Copy the class environ into self.\r", "ENVIRON", "=", "self", ".", "environ", "=", "self", ".", "connection_environ", ".", "copy", "(", ")", "self", ".", "environ", ".", "update", "(", "self", ".", "server_environ", ")", "request_line", "=", "yield", "self", ".", "connfh", ".", "readline", "(", ")", "if", "request_line", "==", "\"\\r\\n\"", ":", "# RFC 2616 sec 4.1: \"... it should ignore the CRLF.\"\r", "tolerance", "=", "5", "while", "tolerance", "and", "request_line", "==", "\"\\r\\n\"", ":", "request_line", "=", "yield", "self", ".", "connfh", ".", "readline", "(", ")", "tolerance", "-=", "1", "if", "not", "tolerance", ":", "return", "method", ",", "path", ",", "req_protocol", "=", "request_line", ".", "strip", "(", ")", ".", "split", "(", "\" \"", ",", "2", ")", "ENVIRON", "[", "\"REQUEST_METHOD\"", "]", "=", "method", "ENVIRON", "[", "\"CONTENT_LENGTH\"", "]", "=", "''", "scheme", ",", "location", ",", "path", ",", "params", ",", "qs", ",", "frag", "=", "urlparse", "(", "path", ")", "if", "frag", ":", "yield", "self", ".", "simple_response", "(", "\"400 Bad Request\"", ",", "\"Illegal #fragment in Request-URI.\"", ")", "return", "if", "scheme", ":", "ENVIRON", "[", "\"wsgi.url_scheme\"", "]", "=", "scheme", "if", "params", ":", "path", "=", "path", "+", "\";\"", "+", "params", "ENVIRON", "[", "\"SCRIPT_NAME\"", "]", "=", "\"\"", "# Unquote the path+params (e.g. \"/this%20path\" -> \"this path\").\r", "# http://www.w3.org/Protocols/rfc2616/rfc2616-sec5.html#sec5.1.2\r", "#\r", "# But note that \"...a URI must be separated into its components\r", "# before the escaped characters within those components can be\r", "# safely decoded.\" http://www.ietf.org/rfc/rfc2396.txt, sec 2.4.2\r", "atoms", "=", "[", "unquote", "(", "x", ")", "for", "x", "in", "quoted_slash", ".", "split", "(", "path", ")", "]", "path", "=", "\"%2F\"", ".", "join", "(", "atoms", ")", "ENVIRON", "[", "\"PATH_INFO\"", "]", "=", "path", "# Note that, like wsgiref and most other WSGI servers,\r", "# we unquote the path but not the query string.\r", "ENVIRON", "[", "\"QUERY_STRING\"", "]", "=", "qs", "# Compare request and server HTTP protocol versions, in case our\r", "# server does not support the requested protocol. Limit our output\r", "# to min(req, server). We want the following output:\r", "# request server actual written supported response\r", "# protocol protocol response protocol feature set\r", "# a 1.0 1.0 1.0 1.0\r", "# b 1.0 1.1 1.1 1.0\r", "# c 1.1 1.0 1.0 1.0\r", "# d 1.1 1.1 1.1 1.1\r", "# Notice that, in (b), the response will be \"HTTP/1.1\" even though\r", "# the client only understands 1.0. RFC 2616 10.5.6 says we should\r", "# only return 505 if the _major_ version is different.\r", "rp", "=", "int", "(", "req_protocol", "[", "5", "]", ")", ",", "int", "(", "req_protocol", "[", "7", "]", ")", "server_protocol", "=", "ENVIRON", "[", "\"ACTUAL_SERVER_PROTOCOL\"", "]", "sp", "=", "int", "(", "server_protocol", "[", "5", "]", ")", ",", "int", "(", "server_protocol", "[", "7", "]", ")", "if", "sp", "[", "0", "]", "!=", "rp", "[", "0", "]", ":", "yield", "self", ".", "simple_response", "(", "\"505 HTTP Version Not Supported\"", ")", "return", "# Bah. \"SERVER_PROTOCOL\" is actually the REQUEST protocol.\r", "ENVIRON", "[", "\"SERVER_PROTOCOL\"", "]", "=", "req_protocol", "self", ".", "response_protocol", "=", "\"HTTP/%s.%s\"", "%", "min", "(", "rp", ",", "sp", ")", "# If the Request-URI was an absoluteURI, use its location atom.\r", "if", "location", ":", "ENVIRON", "[", "\"SERVER_NAME\"", "]", "=", "location", "# then all the http headers\r", "try", ":", "while", "True", ":", "line", "=", "yield", "self", ".", "connfh", ".", "readline", "(", ")", "if", "line", "==", "'\\r\\n'", ":", "# Normal end of headers\r", "break", "if", "line", "[", "0", "]", "in", "' \\t'", ":", "# It's a continuation line.\r", "v", "=", "line", ".", "strip", "(", ")", "else", ":", "k", ",", "v", "=", "line", ".", "split", "(", "\":\"", ",", "1", ")", "k", ",", "v", "=", "k", ".", "strip", "(", ")", ".", "upper", "(", ")", ",", "v", ".", "strip", "(", ")", "envname", "=", "\"HTTP_\"", "+", "k", ".", "replace", "(", "\"-\"", ",", "\"_\"", ")", "if", "k", "in", "comma_separated_headers", ":", "existing", "=", "ENVIRON", ".", "get", "(", "envname", ")", "if", "existing", ":", "v", "=", "\", \"", ".", "join", "(", "(", "existing", ",", "v", ")", ")", "ENVIRON", "[", "envname", "]", "=", "v", "ct", "=", "ENVIRON", ".", "pop", "(", "\"HTTP_CONTENT_TYPE\"", ",", "None", ")", "if", "ct", ":", "ENVIRON", "[", "\"CONTENT_TYPE\"", "]", "=", "ct", "cl", "=", "ENVIRON", ".", "pop", "(", "\"HTTP_CONTENT_LENGTH\"", ",", "None", ")", "if", "cl", ":", "ENVIRON", "[", "\"CONTENT_LENGTH\"", "]", "=", "cl", "except", "ValueError", ",", "ex", ":", "yield", "self", ".", "simple_response", "(", "\"400 Bad Request\"", ",", "repr", "(", "ex", ".", "args", ")", ")", "return", "creds", "=", "ENVIRON", ".", "get", "(", "\"HTTP_AUTHORIZATION\"", ",", "\"\"", ")", ".", "split", "(", "\" \"", ",", "1", ")", "ENVIRON", "[", "\"AUTH_TYPE\"", "]", "=", "creds", "[", "0", "]", "if", "creds", "[", "0", "]", ".", "lower", "(", ")", "==", "'basic'", ":", "user", ",", "pw", "=", "base64", ".", "decodestring", "(", "creds", "[", "1", "]", ")", ".", "split", "(", "\":\"", ",", "1", ")", "ENVIRON", "[", "\"REMOTE_USER\"", "]", "=", "user", "# Persistent connection support\r", "if", "req_protocol", "==", "\"HTTP/1.1\"", ":", "if", "ENVIRON", ".", "get", "(", "\"HTTP_CONNECTION\"", ",", "\"\"", ")", "==", "\"close\"", ":", "self", ".", "close_connection", "=", "True", "else", ":", "# HTTP/1.0\r", "if", "ENVIRON", ".", "get", "(", "\"HTTP_CONNECTION\"", ",", "\"\"", ")", ".", "lower", "(", ")", "!=", "\"keep-alive\"", ":", "self", ".", "close_connection", "=", "True", "# Transfer-Encoding support\r", "te", "=", "None", "if", "self", ".", "response_protocol", "==", "\"HTTP/1.1\"", ":", "te", "=", "ENVIRON", ".", "get", "(", "\"HTTP_TRANSFER_ENCODING\"", ")", "if", "te", ":", "te", "=", "[", "x", ".", "strip", "(", ")", ".", "lower", "(", ")", "for", "x", "in", "te", ".", "split", "(", "\",\"", ")", "if", "x", ".", "strip", "(", ")", "]", "if", "te", ":", "# reject transfer encodings for now\r", "yield", "self", ".", "simple_response", "(", "\"501 Unimplemented\"", ")", "self", ".", "close_connection", "=", "True", "return", "ENV_COGEN_PROXY", "=", "ENVIRON", "[", "'cogen.wsgi'", "]", "=", "async", ".", "COGENProxy", "(", "content_length", "=", "int", "(", "ENVIRON", ".", "get", "(", "'CONTENT_LENGTH'", ",", "None", ")", "or", "0", ")", "or", "None", ",", "read_count", "=", "0", ",", "operation", "=", "None", ",", "result", "=", "None", ",", "exception", "=", "None", ")", "ENVIRON", "[", "'cogen.http_connection'", "]", "=", "self", "ENVIRON", "[", "'cogen.core'", "]", "=", "async", ".", "COGENOperationWrapper", "(", "ENV_COGEN_PROXY", ",", "core", ")", "ENVIRON", "[", "'cogen.call'", "]", "=", "async", ".", "COGENCallWrapper", "(", "ENV_COGEN_PROXY", ")", "ENVIRON", "[", "'cogen.input'", "]", "=", "async", ".", "COGENOperationWrapper", "(", "ENV_COGEN_PROXY", ",", "self", ".", "connfh", ")", "ENVIRON", "[", "'cogen.yield'", "]", "=", "async", ".", "COGENSimpleWrapper", "(", "ENV_COGEN_PROXY", ")", "response", "=", "self", ".", "wsgi_app", "(", "ENVIRON", ",", "self", ".", "start_response", ")", "#~ print 'WSGI RESPONSE:', response\r", "try", ":", "if", "isinstance", "(", "response", ",", "WSGIFileWrapper", ")", ":", "# set tcp_cork to pack the header with the file data\r", "if", "hasattr", "(", "socket", ",", "\"TCP_CORK\"", ")", ":", "self", ".", "conn", ".", "setsockopt", "(", "socket", ".", "IPPROTO_TCP", ",", "socket", ".", "TCP_CORK", ",", "1", ")", "assert", "self", ".", "started_response", ",", "\"App returned the wsgi.file_wrapper but didn't call start_response.\"", "assert", "not", "self", ".", "sent_headers", "self", ".", "sent_headers", "=", "True", "yield", "sockets", ".", "SendAll", "(", "self", ".", "conn", ",", "self", ".", "render_headers", "(", ")", "+", "self", ".", "write_buffer", ".", "getvalue", "(", ")", ")", "offset", "=", "response", ".", "filelike", ".", "tell", "(", ")", "if", "self", ".", "chunked_write", ":", "fsize", "=", "os", ".", "fstat", "(", "response", ".", "filelike", ".", "fileno", "(", ")", ")", ".", "st_size", "yield", "sockets", ".", "SendAll", "(", "self", ".", "conn", ",", "hex", "(", "int", "(", "fsize", "-", "offset", ")", ")", "+", "\"\\r\\n\"", ")", "yield", "self", ".", "conn", ".", "sendfile", "(", "response", ".", "filelike", ",", "blocksize", "=", "response", ".", "blocksize", ",", "offset", "=", "offset", ",", "length", "=", "self", ".", "content_length", ",", "timeout", "=", "self", ".", "sendfile_timeout", ")", "if", "self", ".", "chunked_write", ":", "yield", "sockets", ".", "SendAll", "(", "self", ".", "conn", ",", "\"\\r\\n\"", ")", "# also, tcp_cork will make the file data sent on packet boundaries,\r", "# wich is a good thing\r", "if", "hasattr", "(", "socket", ",", "\"TCP_CORK\"", ")", ":", "self", ".", "conn", ".", "setsockopt", "(", "socket", ".", "IPPROTO_TCP", ",", "socket", ".", "TCP_CORK", ",", "0", ")", "else", ":", "for", "chunk", "in", "response", ":", "if", "chunk", ":", "assert", "self", ".", "started_response", ",", "\"App sended a value but hasn't called start_response.\"", "if", "not", "self", ".", "sent_headers", ":", "self", ".", "sent_headers", "=", "True", "headers", "=", "[", "self", ".", "render_headers", "(", ")", ",", "self", ".", "write_buffer", ".", "getvalue", "(", ")", "]", "else", ":", "headers", "=", "[", "]", "if", "self", ".", "chunked_write", ":", "buf", "=", "[", "hex", "(", "len", "(", "chunk", ")", ")", "[", "2", ":", "]", ",", "\"\\r\\n\"", ",", "chunk", ",", "\"\\r\\n\"", "]", "if", "headers", ":", "headers", ".", "extend", "(", "buf", ")", "yield", "sockets", ".", "SendAll", "(", "self", ".", "conn", ",", "\"\"", ".", "join", "(", "headers", ")", ")", "else", ":", "yield", "sockets", ".", "SendAll", "(", "self", ".", "conn", ",", "\"\"", ".", "join", "(", "buf", ")", ")", "else", ":", "if", "headers", ":", "headers", ".", "append", "(", "chunk", ")", "yield", "sockets", ".", "SendAll", "(", "self", ".", "conn", ",", "\"\"", ".", "join", "(", "headers", ")", ")", "else", ":", "yield", "sockets", ".", "SendAll", "(", "self", ".", "conn", ",", "chunk", ")", "else", ":", "if", "self", ".", "started_response", ":", "if", "not", "self", ".", "sent_headers", ":", "self", ".", "sent_headers", "=", "True", "yield", "sockets", ".", "SendAll", "(", "self", ".", "conn", ",", "self", ".", "render_headers", "(", ")", "+", "self", ".", "write_buffer", ".", "getvalue", "(", ")", ")", "if", "ENV_COGEN_PROXY", ".", "operation", ":", "op", "=", "ENV_COGEN_PROXY", ".", "operation", "ENV_COGEN_PROXY", ".", "operation", "=", "None", "try", ":", "#~ print 'WSGI OP:', op\r", "ENV_COGEN_PROXY", ".", "exception", "=", "None", "ENV_COGEN_PROXY", ".", "result", "=", "yield", "op", "#~ print 'WSGI OP RESULT:',ENVIRON['cogen.wsgi'].result\r", "except", ":", "#~ print 'WSGI OP EXCEPTION:', sys.exc_info()\r", "ENV_COGEN_PROXY", ".", "exception", "=", "sys", ".", "exc_info", "(", ")", "ENV_COGEN_PROXY", ".", "result", "=", "ENV_COGEN_PROXY", ".", "exception", "[", "1", "]", "del", "op", "finally", ":", "if", "hasattr", "(", "response", ",", "'close'", ")", ":", "response", ".", "close", "(", ")", "if", "self", ".", "started_response", ":", "if", "not", "self", ".", "sent_headers", ":", "self", ".", "sent_headers", "=", "True", "yield", "sockets", ".", "SendAll", "(", "self", ".", "conn", ",", "self", ".", "render_headers", "(", ")", "+", "self", ".", "write_buffer", ".", "getvalue", "(", ")", ")", "else", ":", "import", "warnings", "warnings", ".", "warn", "(", "\"App was consumed and hasn't called start_response\"", ")", "if", "self", ".", "chunked_write", ":", "yield", "sockets", ".", "SendAll", "(", "self", ".", "conn", ",", "\"0\\r\\n\\r\\n\"", ")", "if", "self", ".", "close_connection", ":", "return", "# TODO: consume any unread data\r", "except", "(", "socket", ".", "error", ",", "OSError", ",", "pywinerror", ")", ",", "e", ":", "errno", "=", "e", ".", "args", "[", "0", "]", "if", "errno", "not", "in", "useless_socket_errors", ":", "yield", "self", ".", "simple_response", "(", "\"500 Internal Server Error\"", ",", "format_exc", "(", ")", ")", "return", "except", "(", "OperationTimeout", ",", "ConnectionClosed", ",", "SocketError", ")", ":", "return", "except", "(", "KeyboardInterrupt", ",", "SystemExit", ",", "GeneratorExit", ",", "MemoryError", ")", ":", "raise", "except", ":", "if", "not", "self", ".", "started_response", ":", "yield", "self", ".", "simple_response", "(", "\"500 Internal Server Error\"", ",", "format_exc", "(", ")", ")", "else", ":", "print", "\"*\"", "*", "60", "traceback", ".", "print_exc", "(", ")", "print", "\"*\"", "*", "60", "sys", ".", "exc_clear", "(", ")", "finally", ":", "self", ".", "conn", ".", "close", "(", ")", "ENVIRON", "=", "self", ".", "environ", "=", "None" ]
A bit bulky atm...
[ "A", "bit", "bulky", "atm", "..." ]
python
train
38.549296
pydata/xarray
xarray/backends/file_manager.py
https://github.com/pydata/xarray/blob/6d93a95d05bdbfc33fff24064f67d29dd891ab58/xarray/backends/file_manager.py#L134-L140
def _make_key(self): """Make a key for caching files in the LRU cache.""" value = (self._opener, self._args, 'a' if self._mode == 'w' else self._mode, tuple(sorted(self._kwargs.items()))) return _HashedSequence(value)
[ "def", "_make_key", "(", "self", ")", ":", "value", "=", "(", "self", ".", "_opener", ",", "self", ".", "_args", ",", "'a'", "if", "self", ".", "_mode", "==", "'w'", "else", "self", ".", "_mode", ",", "tuple", "(", "sorted", "(", "self", ".", "_kwargs", ".", "items", "(", ")", ")", ")", ")", "return", "_HashedSequence", "(", "value", ")" ]
Make a key for caching files in the LRU cache.
[ "Make", "a", "key", "for", "caching", "files", "in", "the", "LRU", "cache", "." ]
python
train
40.857143
danielfrg/word2vec
word2vec/scripts_interface.py
https://github.com/danielfrg/word2vec/blob/762200acec2941a030abed69e946838af35eb2ae/word2vec/scripts_interface.py#L6-L112
def word2vec( train, output, size=100, window=5, sample="1e-3", hs=0, negative=5, threads=12, iter_=5, min_count=5, alpha=0.025, debug=2, binary=1, cbow=1, save_vocab=None, read_vocab=None, verbose=False, ): """ word2vec execution Parameters for training: train <file> Use text data from <file> to train the model output <file> Use <file> to save the resulting word vectors / word clusters size <int> Set size of word vectors; default is 100 window <int> Set max skip length between words; default is 5 sample <float> Set threshold for occurrence of words. Those that appear with higher frequency in the training data will be randomly down-sampled; default is 0 (off), useful value is 1e-5 hs <int> Use Hierarchical Softmax; default is 1 (0 = not used) negative <int> Number of negative examples; default is 0, common values are 5 - 10 (0 = not used) threads <int> Use <int> threads (default 1) min_count <int> This will discard words that appear less than <int> times; default is 5 alpha <float> Set the starting learning rate; default is 0.025 debug <int> Set the debug mode (default = 2 = more info during training) binary <int> Save the resulting vectors in binary moded; default is 0 (off) cbow <int> Use the continuous back of words model; default is 1 (use 0 for skip-gram model) save_vocab <file> The vocabulary will be saved to <file> read_vocab <file> The vocabulary will be read from <file>, not constructed from the training data verbose Print output from training """ command = ["word2vec"] args = [ "-train", "-output", "-size", "-window", "-sample", "-hs", "-negative", "-threads", "-iter", "-min-count", "-alpha", "-debug", "-binary", "-cbow", ] values = [ train, output, size, window, sample, hs, negative, threads, iter_, min_count, alpha, debug, binary, cbow, ] for arg, value in zip(args, values): command.append(arg) command.append(str(value)) if save_vocab is not None: command.append("-save-vocab") command.append(str(save_vocab)) if read_vocab is not None: command.append("-read-vocab") command.append(str(read_vocab)) run_cmd(command, verbose=verbose)
[ "def", "word2vec", "(", "train", ",", "output", ",", "size", "=", "100", ",", "window", "=", "5", ",", "sample", "=", "\"1e-3\"", ",", "hs", "=", "0", ",", "negative", "=", "5", ",", "threads", "=", "12", ",", "iter_", "=", "5", ",", "min_count", "=", "5", ",", "alpha", "=", "0.025", ",", "debug", "=", "2", ",", "binary", "=", "1", ",", "cbow", "=", "1", ",", "save_vocab", "=", "None", ",", "read_vocab", "=", "None", ",", "verbose", "=", "False", ",", ")", ":", "command", "=", "[", "\"word2vec\"", "]", "args", "=", "[", "\"-train\"", ",", "\"-output\"", ",", "\"-size\"", ",", "\"-window\"", ",", "\"-sample\"", ",", "\"-hs\"", ",", "\"-negative\"", ",", "\"-threads\"", ",", "\"-iter\"", ",", "\"-min-count\"", ",", "\"-alpha\"", ",", "\"-debug\"", ",", "\"-binary\"", ",", "\"-cbow\"", ",", "]", "values", "=", "[", "train", ",", "output", ",", "size", ",", "window", ",", "sample", ",", "hs", ",", "negative", ",", "threads", ",", "iter_", ",", "min_count", ",", "alpha", ",", "debug", ",", "binary", ",", "cbow", ",", "]", "for", "arg", ",", "value", "in", "zip", "(", "args", ",", "values", ")", ":", "command", ".", "append", "(", "arg", ")", "command", ".", "append", "(", "str", "(", "value", ")", ")", "if", "save_vocab", "is", "not", "None", ":", "command", ".", "append", "(", "\"-save-vocab\"", ")", "command", ".", "append", "(", "str", "(", "save_vocab", ")", ")", "if", "read_vocab", "is", "not", "None", ":", "command", ".", "append", "(", "\"-read-vocab\"", ")", "command", ".", "append", "(", "str", "(", "read_vocab", ")", ")", "run_cmd", "(", "command", ",", "verbose", "=", "verbose", ")" ]
word2vec execution Parameters for training: train <file> Use text data from <file> to train the model output <file> Use <file> to save the resulting word vectors / word clusters size <int> Set size of word vectors; default is 100 window <int> Set max skip length between words; default is 5 sample <float> Set threshold for occurrence of words. Those that appear with higher frequency in the training data will be randomly down-sampled; default is 0 (off), useful value is 1e-5 hs <int> Use Hierarchical Softmax; default is 1 (0 = not used) negative <int> Number of negative examples; default is 0, common values are 5 - 10 (0 = not used) threads <int> Use <int> threads (default 1) min_count <int> This will discard words that appear less than <int> times; default is 5 alpha <float> Set the starting learning rate; default is 0.025 debug <int> Set the debug mode (default = 2 = more info during training) binary <int> Save the resulting vectors in binary moded; default is 0 (off) cbow <int> Use the continuous back of words model; default is 1 (use 0 for skip-gram model) save_vocab <file> The vocabulary will be saved to <file> read_vocab <file> The vocabulary will be read from <file>, not constructed from the training data verbose Print output from training
[ "word2vec", "execution" ]
python
train
26.401869
diogobaeder/pycket
pycket/session.py
https://github.com/diogobaeder/pycket/blob/b21d1553b4100d820a6922eeb55baa2329bc02c2/pycket/session.py#L89-L97
def get(self, name, default=None): ''' Gets the object for "name", or None if there's no such object. If "default" is provided, return it if no object is found. ''' session = self.__get_session_from_db() return session.get(name, default)
[ "def", "get", "(", "self", ",", "name", ",", "default", "=", "None", ")", ":", "session", "=", "self", ".", "__get_session_from_db", "(", ")", "return", "session", ".", "get", "(", "name", ",", "default", ")" ]
Gets the object for "name", or None if there's no such object. If "default" is provided, return it if no object is found.
[ "Gets", "the", "object", "for", "name", "or", "None", "if", "there", "s", "no", "such", "object", ".", "If", "default", "is", "provided", "return", "it", "if", "no", "object", "is", "found", "." ]
python
train
31
ethereum/py-evm
eth/vm/state.py
https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/vm/state.py#L283-L293
def get_computation(self, message: Message, transaction_context: 'BaseTransactionContext') -> 'BaseComputation': """ Return a computation instance for the given `message` and `transaction_context` """ if self.computation_class is None: raise AttributeError("No `computation_class` has been set for this State") else: computation = self.computation_class(self, message, transaction_context) return computation
[ "def", "get_computation", "(", "self", ",", "message", ":", "Message", ",", "transaction_context", ":", "'BaseTransactionContext'", ")", "->", "'BaseComputation'", ":", "if", "self", ".", "computation_class", "is", "None", ":", "raise", "AttributeError", "(", "\"No `computation_class` has been set for this State\"", ")", "else", ":", "computation", "=", "self", ".", "computation_class", "(", "self", ",", "message", ",", "transaction_context", ")", "return", "computation" ]
Return a computation instance for the given `message` and `transaction_context`
[ "Return", "a", "computation", "instance", "for", "the", "given", "message", "and", "transaction_context" ]
python
train
47.090909
log2timeline/plaso
plaso/preprocessors/windows.py
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/preprocessors/windows.py#L421-L438
def _GetUsernameFromProfilePath(self, path): """Retrieves the username from a Windows profile path. Trailing path path segment are ignored. Args: path (str): a Windows path with '\\' as path segment separator. Returns: str: basename which is the last path segment. """ # Strip trailing key separators. while path and path[-1] == '\\': path = path[:-1] if path: _, _, path = path.rpartition('\\') return path
[ "def", "_GetUsernameFromProfilePath", "(", "self", ",", "path", ")", ":", "# Strip trailing key separators.", "while", "path", "and", "path", "[", "-", "1", "]", "==", "'\\\\'", ":", "path", "=", "path", "[", ":", "-", "1", "]", "if", "path", ":", "_", ",", "_", ",", "path", "=", "path", ".", "rpartition", "(", "'\\\\'", ")", "return", "path" ]
Retrieves the username from a Windows profile path. Trailing path path segment are ignored. Args: path (str): a Windows path with '\\' as path segment separator. Returns: str: basename which is the last path segment.
[ "Retrieves", "the", "username", "from", "a", "Windows", "profile", "path", "." ]
python
train
25.222222
horazont/aioxmpp
aioxmpp/service.py
https://github.com/horazont/aioxmpp/blob/22a68e5e1d23f2a4dee470092adbd4672f9ef061/aioxmpp/service.py#L1520-L1530
def is_depsignal_handler(class_, signal_name, cb, *, defer=False): """ Return true if `cb` has been decorated with :func:`depsignal` for the given signal, class and connection mode. """ try: handlers = get_magic_attr(cb) except AttributeError: return False return _depsignal_spec(class_, signal_name, cb, defer) in handlers
[ "def", "is_depsignal_handler", "(", "class_", ",", "signal_name", ",", "cb", ",", "*", ",", "defer", "=", "False", ")", ":", "try", ":", "handlers", "=", "get_magic_attr", "(", "cb", ")", "except", "AttributeError", ":", "return", "False", "return", "_depsignal_spec", "(", "class_", ",", "signal_name", ",", "cb", ",", "defer", ")", "in", "handlers" ]
Return true if `cb` has been decorated with :func:`depsignal` for the given signal, class and connection mode.
[ "Return", "true", "if", "cb", "has", "been", "decorated", "with", ":", "func", ":", "depsignal", "for", "the", "given", "signal", "class", "and", "connection", "mode", "." ]
python
train
32.545455
spotify/luigi
luigi/worker.py
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/worker.py#L1041-L1109
def _handle_next_task(self): """ We have to catch three ways a task can be "done": 1. normal execution: the task runs/fails and puts a result back on the queue, 2. new dependencies: the task yielded new deps that were not complete and will be rescheduled and dependencies added, 3. child process dies: we need to catch this separately. """ self._idle_since = None while True: self._purge_children() # Deal with subprocess failures try: task_id, status, expl, missing, new_requirements = ( self._task_result_queue.get( timeout=self._config.wait_interval)) except Queue.Empty: return task = self._scheduled_tasks[task_id] if not task or task_id not in self._running_tasks: continue # Not a running task. Probably already removed. # Maybe it yielded something? # external task if run not implemented, retry-able if config option is enabled. external_task_retryable = _is_external(task) and self._config.retry_external_tasks if status == FAILED and not external_task_retryable: self._email_task_failure(task, expl) new_deps = [] if new_requirements: new_req = [load_task(module, name, params) for module, name, params in new_requirements] for t in new_req: self.add(t) new_deps = [t.task_id for t in new_req] self._add_task(worker=self._id, task_id=task_id, status=status, expl=json.dumps(expl), resources=task.process_resources(), runnable=None, params=task.to_str_params(), family=task.task_family, module=task.task_module, new_deps=new_deps, assistant=self._assistant, retry_policy_dict=_get_retry_policy_dict(task)) self._running_tasks.pop(task_id) # re-add task to reschedule missing dependencies if missing: reschedule = True # keep out of infinite loops by not rescheduling too many times for task_id in missing: self.unfulfilled_counts[task_id] += 1 if (self.unfulfilled_counts[task_id] > self._config.max_reschedules): reschedule = False if reschedule: self.add(task) self.run_succeeded &= (status == DONE) or (len(new_deps) > 0) return
[ "def", "_handle_next_task", "(", "self", ")", ":", "self", ".", "_idle_since", "=", "None", "while", "True", ":", "self", ".", "_purge_children", "(", ")", "# Deal with subprocess failures", "try", ":", "task_id", ",", "status", ",", "expl", ",", "missing", ",", "new_requirements", "=", "(", "self", ".", "_task_result_queue", ".", "get", "(", "timeout", "=", "self", ".", "_config", ".", "wait_interval", ")", ")", "except", "Queue", ".", "Empty", ":", "return", "task", "=", "self", ".", "_scheduled_tasks", "[", "task_id", "]", "if", "not", "task", "or", "task_id", "not", "in", "self", ".", "_running_tasks", ":", "continue", "# Not a running task. Probably already removed.", "# Maybe it yielded something?", "# external task if run not implemented, retry-able if config option is enabled.", "external_task_retryable", "=", "_is_external", "(", "task", ")", "and", "self", ".", "_config", ".", "retry_external_tasks", "if", "status", "==", "FAILED", "and", "not", "external_task_retryable", ":", "self", ".", "_email_task_failure", "(", "task", ",", "expl", ")", "new_deps", "=", "[", "]", "if", "new_requirements", ":", "new_req", "=", "[", "load_task", "(", "module", ",", "name", ",", "params", ")", "for", "module", ",", "name", ",", "params", "in", "new_requirements", "]", "for", "t", "in", "new_req", ":", "self", ".", "add", "(", "t", ")", "new_deps", "=", "[", "t", ".", "task_id", "for", "t", "in", "new_req", "]", "self", ".", "_add_task", "(", "worker", "=", "self", ".", "_id", ",", "task_id", "=", "task_id", ",", "status", "=", "status", ",", "expl", "=", "json", ".", "dumps", "(", "expl", ")", ",", "resources", "=", "task", ".", "process_resources", "(", ")", ",", "runnable", "=", "None", ",", "params", "=", "task", ".", "to_str_params", "(", ")", ",", "family", "=", "task", ".", "task_family", ",", "module", "=", "task", ".", "task_module", ",", "new_deps", "=", "new_deps", ",", "assistant", "=", "self", ".", "_assistant", ",", "retry_policy_dict", "=", "_get_retry_policy_dict", "(", "task", ")", ")", "self", ".", "_running_tasks", ".", "pop", "(", "task_id", ")", "# re-add task to reschedule missing dependencies", "if", "missing", ":", "reschedule", "=", "True", "# keep out of infinite loops by not rescheduling too many times", "for", "task_id", "in", "missing", ":", "self", ".", "unfulfilled_counts", "[", "task_id", "]", "+=", "1", "if", "(", "self", ".", "unfulfilled_counts", "[", "task_id", "]", ">", "self", ".", "_config", ".", "max_reschedules", ")", ":", "reschedule", "=", "False", "if", "reschedule", ":", "self", ".", "add", "(", "task", ")", "self", ".", "run_succeeded", "&=", "(", "status", "==", "DONE", ")", "or", "(", "len", "(", "new_deps", ")", ">", "0", ")", "return" ]
We have to catch three ways a task can be "done": 1. normal execution: the task runs/fails and puts a result back on the queue, 2. new dependencies: the task yielded new deps that were not complete and will be rescheduled and dependencies added, 3. child process dies: we need to catch this separately.
[ "We", "have", "to", "catch", "three", "ways", "a", "task", "can", "be", "done", ":" ]
python
train
41.434783
AkihikoITOH/capybara
capybara/virtualenv/lib/python2.7/site-packages/setuptools/package_index.py
https://github.com/AkihikoITOH/capybara/blob/e86c2173ea386654f4ae061148e8fbe3f25e715c/capybara/virtualenv/lib/python2.7/site-packages/setuptools/package_index.py#L1025-L1049
def local_open(url): """Read a local path, with special support for directories""" scheme, server, path, param, query, frag = urlparse(url) filename = url2pathname(path) if os.path.isfile(filename): return urllib2.urlopen(url) elif path.endswith('/') and os.path.isdir(filename): files = [] for f in os.listdir(filename): if f=='index.html': with open(os.path.join(filename,f),'r') as fp: body = fp.read() break elif os.path.isdir(os.path.join(filename,f)): f+='/' files.append("<a href=%r>%s</a>" % (f,f)) else: body = ("<html><head><title>%s</title>" % url) + \ "</head><body>%s</body></html>" % '\n'.join(files) status, message = 200, "OK" else: status, message, body = 404, "Path not found", "Not found" headers = {'content-type': 'text/html'} return HTTPError(url, status, message, headers, StringIO(body))
[ "def", "local_open", "(", "url", ")", ":", "scheme", ",", "server", ",", "path", ",", "param", ",", "query", ",", "frag", "=", "urlparse", "(", "url", ")", "filename", "=", "url2pathname", "(", "path", ")", "if", "os", ".", "path", ".", "isfile", "(", "filename", ")", ":", "return", "urllib2", ".", "urlopen", "(", "url", ")", "elif", "path", ".", "endswith", "(", "'/'", ")", "and", "os", ".", "path", ".", "isdir", "(", "filename", ")", ":", "files", "=", "[", "]", "for", "f", "in", "os", ".", "listdir", "(", "filename", ")", ":", "if", "f", "==", "'index.html'", ":", "with", "open", "(", "os", ".", "path", ".", "join", "(", "filename", ",", "f", ")", ",", "'r'", ")", "as", "fp", ":", "body", "=", "fp", ".", "read", "(", ")", "break", "elif", "os", ".", "path", ".", "isdir", "(", "os", ".", "path", ".", "join", "(", "filename", ",", "f", ")", ")", ":", "f", "+=", "'/'", "files", ".", "append", "(", "\"<a href=%r>%s</a>\"", "%", "(", "f", ",", "f", ")", ")", "else", ":", "body", "=", "(", "\"<html><head><title>%s</title>\"", "%", "url", ")", "+", "\"</head><body>%s</body></html>\"", "%", "'\\n'", ".", "join", "(", "files", ")", "status", ",", "message", "=", "200", ",", "\"OK\"", "else", ":", "status", ",", "message", ",", "body", "=", "404", ",", "\"Path not found\"", ",", "\"Not found\"", "headers", "=", "{", "'content-type'", ":", "'text/html'", "}", "return", "HTTPError", "(", "url", ",", "status", ",", "message", ",", "headers", ",", "StringIO", "(", "body", ")", ")" ]
Read a local path, with special support for directories
[ "Read", "a", "local", "path", "with", "special", "support", "for", "directories" ]
python
test
40
DataDog/integrations-core
tokumx/datadog_checks/tokumx/vendor/bson/__init__.py
https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/tokumx/datadog_checks/tokumx/vendor/bson/__init__.py#L623-L628
def _encode_long(name, value, dummy0, dummy1): """Encode a python long (python 2.x)""" try: return b"\x12" + name + _PACK_LONG(value) except struct.error: raise OverflowError("BSON can only handle up to 8-byte ints")
[ "def", "_encode_long", "(", "name", ",", "value", ",", "dummy0", ",", "dummy1", ")", ":", "try", ":", "return", "b\"\\x12\"", "+", "name", "+", "_PACK_LONG", "(", "value", ")", "except", "struct", ".", "error", ":", "raise", "OverflowError", "(", "\"BSON can only handle up to 8-byte ints\"", ")" ]
Encode a python long (python 2.x)
[ "Encode", "a", "python", "long", "(", "python", "2", ".", "x", ")" ]
python
train
39.833333
pgmpy/pgmpy
pgmpy/inference/mplp.py
https://github.com/pgmpy/pgmpy/blob/9381a66aba3c3871d3ccd00672b148d17d63239e/pgmpy/inference/mplp.py#L144-L203
def _update_message(self, sending_cluster): """ This is the message-update method. Parameters ---------- sending_cluster: The resulting messages are lambda_{c-->s} from the given cluster 'c' to all of its intersection_sets 's'. Here 's' are the elements of intersection_sets_for_cluster_c. Reference --------- Fixing Max-Product: Convergent Message-Passing Algorithms for MAP LP Relaxations by Amir Globerson and Tommi Jaakkola. Section 6, Page: 5; Beyond pairwise potentials: Generalized MPLP Later Modified by Sontag in "Introduction to Dual decomposition for Inference" Pg: 7 & 17 """ # The new updates will take place for the intersection_sets of this cluster. # The new updates are: # \delta_{f \rightarrow i}(x_i) = - \delta_i^{-f} + # 1/{\| f \|} max_{x_{f-i}}\left[{\theta_f(x_f) + \sum_{i' in f}{\delta_{i'}^{-f}}(x_i')} \right ] # Step. 1) Calculate {\theta_f(x_f) + \sum_{i' in f}{\delta_{i'}^{-f}}(x_i')} objective_cluster = self.objective[sending_cluster.cluster_variables] for current_intersect in sending_cluster.intersection_sets_for_cluster_c: objective_cluster += self.objective[current_intersect] updated_results = [] objective = [] for current_intersect in sending_cluster.intersection_sets_for_cluster_c: # Step. 2) Maximize step.1 result wrt variables present in the cluster but not in the current intersect. phi = objective_cluster.maximize(list(sending_cluster.cluster_variables - current_intersect), inplace=False) # Step. 3) Multiply 1/{\| f \|} intersection_length = len(sending_cluster.intersection_sets_for_cluster_c) phi *= (1 / intersection_length) objective.append(phi) # Step. 4) Subtract \delta_i^{-f} # These are the messages not emanating from the sending cluster but going into the current intersect. # which is = Objective[current_intersect_node] - messages from the cluster to the current intersect node. updated_results.append(phi + -1 * (self.objective[current_intersect] + -1 * sending_cluster. message_from_cluster[current_intersect])) # This loop is primarily for simultaneous updating: # 1. This cluster's message to each of the intersects. # 2. The value of the Objective for intersection_nodes. index = -1 cluster_potential = copy.deepcopy(sending_cluster.cluster_potential) for current_intersect in sending_cluster.intersection_sets_for_cluster_c: index += 1 sending_cluster.message_from_cluster[current_intersect] = updated_results[index] self.objective[current_intersect] = objective[index] cluster_potential += (-1) * updated_results[index] # Here we update the Objective for the current factor. self.objective[sending_cluster.cluster_variables] = cluster_potential
[ "def", "_update_message", "(", "self", ",", "sending_cluster", ")", ":", "# The new updates will take place for the intersection_sets of this cluster.", "# The new updates are:", "# \\delta_{f \\rightarrow i}(x_i) = - \\delta_i^{-f} +", "# 1/{\\| f \\|} max_{x_{f-i}}\\left[{\\theta_f(x_f) + \\sum_{i' in f}{\\delta_{i'}^{-f}}(x_i')} \\right ]", "# Step. 1) Calculate {\\theta_f(x_f) + \\sum_{i' in f}{\\delta_{i'}^{-f}}(x_i')}", "objective_cluster", "=", "self", ".", "objective", "[", "sending_cluster", ".", "cluster_variables", "]", "for", "current_intersect", "in", "sending_cluster", ".", "intersection_sets_for_cluster_c", ":", "objective_cluster", "+=", "self", ".", "objective", "[", "current_intersect", "]", "updated_results", "=", "[", "]", "objective", "=", "[", "]", "for", "current_intersect", "in", "sending_cluster", ".", "intersection_sets_for_cluster_c", ":", "# Step. 2) Maximize step.1 result wrt variables present in the cluster but not in the current intersect.", "phi", "=", "objective_cluster", ".", "maximize", "(", "list", "(", "sending_cluster", ".", "cluster_variables", "-", "current_intersect", ")", ",", "inplace", "=", "False", ")", "# Step. 3) Multiply 1/{\\| f \\|}", "intersection_length", "=", "len", "(", "sending_cluster", ".", "intersection_sets_for_cluster_c", ")", "phi", "*=", "(", "1", "/", "intersection_length", ")", "objective", ".", "append", "(", "phi", ")", "# Step. 4) Subtract \\delta_i^{-f}", "# These are the messages not emanating from the sending cluster but going into the current intersect.", "# which is = Objective[current_intersect_node] - messages from the cluster to the current intersect node.", "updated_results", ".", "append", "(", "phi", "+", "-", "1", "*", "(", "self", ".", "objective", "[", "current_intersect", "]", "+", "-", "1", "*", "sending_cluster", ".", "message_from_cluster", "[", "current_intersect", "]", ")", ")", "# This loop is primarily for simultaneous updating:", "# 1. This cluster's message to each of the intersects.", "# 2. The value of the Objective for intersection_nodes.", "index", "=", "-", "1", "cluster_potential", "=", "copy", ".", "deepcopy", "(", "sending_cluster", ".", "cluster_potential", ")", "for", "current_intersect", "in", "sending_cluster", ".", "intersection_sets_for_cluster_c", ":", "index", "+=", "1", "sending_cluster", ".", "message_from_cluster", "[", "current_intersect", "]", "=", "updated_results", "[", "index", "]", "self", ".", "objective", "[", "current_intersect", "]", "=", "objective", "[", "index", "]", "cluster_potential", "+=", "(", "-", "1", ")", "*", "updated_results", "[", "index", "]", "# Here we update the Objective for the current factor.", "self", ".", "objective", "[", "sending_cluster", ".", "cluster_variables", "]", "=", "cluster_potential" ]
This is the message-update method. Parameters ---------- sending_cluster: The resulting messages are lambda_{c-->s} from the given cluster 'c' to all of its intersection_sets 's'. Here 's' are the elements of intersection_sets_for_cluster_c. Reference --------- Fixing Max-Product: Convergent Message-Passing Algorithms for MAP LP Relaxations by Amir Globerson and Tommi Jaakkola. Section 6, Page: 5; Beyond pairwise potentials: Generalized MPLP Later Modified by Sontag in "Introduction to Dual decomposition for Inference" Pg: 7 & 17
[ "This", "is", "the", "message", "-", "update", "method", "." ]
python
train
51.716667
gem/oq-engine
openquake/hazardlib/geo/geodetic.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/geo/geodetic.py#L434-L486
def distance_to_semi_arc(alon, alat, aazimuth, plons, plats): """ In this method we use a reference system centerd on (alon, alat) and with the y-axis corresponding to aazimuth direction to calculate the minimum distance from a semiarc with generates in (alon, alat). Parameters are the same as for :func:`distance_to_arc`. """ if type(plons) is float: plons = numpy.array([plons]) plats = numpy.array([plats]) azimuth_to_target = azimuth(alon, alat, plons, plats) # Find the indexes of the points in the positive y halfspace idx = numpy.nonzero(numpy.cos( numpy.radians((aazimuth-azimuth_to_target))) > 0.0) # Find the indexes of the points in the negative y halfspace idx_not = numpy.nonzero(numpy.cos( numpy.radians((aazimuth-azimuth_to_target))) <= 0.0) idx_ll_quadr = numpy.nonzero( (numpy.cos(numpy.radians((aazimuth-azimuth_to_target))) <= 0.0) & (numpy.sin(numpy.radians((aazimuth-azimuth_to_target))) > 0.0)) # Initialise the array containing the final distances distance = numpy.zeros_like(plons) # Compute the distance between the semi-arc with 'aazimuth' direction # and the set of sites in the positive half-space. The shortest distance to # the semi-arc in this case can be computed using the function # :func:`openquake.hazardlib.geo.geodetic.distance_to_arc`. if len(idx): distance_to_target = geodetic_distance(alon, alat, plons[idx], plats[idx]) t_angle = (azimuth_to_target[idx] - aazimuth + 360) % 360 angle = numpy.arccos((numpy.sin(numpy.radians(t_angle)) * numpy.sin(distance_to_target / EARTH_RADIUS))) distance[idx] = (numpy.pi / 2 - angle) * EARTH_RADIUS # Compute the distance between the reference point and the set of sites # in the negative half-space. The shortest distance for the semi-arc for # all the points in the negative semi-space simply corresponds to the # shortest distance to its origin. if len(idx_not): distance[idx_not] = geodetic_distance(alon, alat, plons[idx_not], plats[idx_not]) distance[idx_ll_quadr] = -1 * distance[idx_ll_quadr] return distance
[ "def", "distance_to_semi_arc", "(", "alon", ",", "alat", ",", "aazimuth", ",", "plons", ",", "plats", ")", ":", "if", "type", "(", "plons", ")", "is", "float", ":", "plons", "=", "numpy", ".", "array", "(", "[", "plons", "]", ")", "plats", "=", "numpy", ".", "array", "(", "[", "plats", "]", ")", "azimuth_to_target", "=", "azimuth", "(", "alon", ",", "alat", ",", "plons", ",", "plats", ")", "# Find the indexes of the points in the positive y halfspace", "idx", "=", "numpy", ".", "nonzero", "(", "numpy", ".", "cos", "(", "numpy", ".", "radians", "(", "(", "aazimuth", "-", "azimuth_to_target", ")", ")", ")", ">", "0.0", ")", "# Find the indexes of the points in the negative y halfspace", "idx_not", "=", "numpy", ".", "nonzero", "(", "numpy", ".", "cos", "(", "numpy", ".", "radians", "(", "(", "aazimuth", "-", "azimuth_to_target", ")", ")", ")", "<=", "0.0", ")", "idx_ll_quadr", "=", "numpy", ".", "nonzero", "(", "(", "numpy", ".", "cos", "(", "numpy", ".", "radians", "(", "(", "aazimuth", "-", "azimuth_to_target", ")", ")", ")", "<=", "0.0", ")", "&", "(", "numpy", ".", "sin", "(", "numpy", ".", "radians", "(", "(", "aazimuth", "-", "azimuth_to_target", ")", ")", ")", ">", "0.0", ")", ")", "# Initialise the array containing the final distances", "distance", "=", "numpy", ".", "zeros_like", "(", "plons", ")", "# Compute the distance between the semi-arc with 'aazimuth' direction", "# and the set of sites in the positive half-space. The shortest distance to", "# the semi-arc in this case can be computed using the function", "# :func:`openquake.hazardlib.geo.geodetic.distance_to_arc`.", "if", "len", "(", "idx", ")", ":", "distance_to_target", "=", "geodetic_distance", "(", "alon", ",", "alat", ",", "plons", "[", "idx", "]", ",", "plats", "[", "idx", "]", ")", "t_angle", "=", "(", "azimuth_to_target", "[", "idx", "]", "-", "aazimuth", "+", "360", ")", "%", "360", "angle", "=", "numpy", ".", "arccos", "(", "(", "numpy", ".", "sin", "(", "numpy", ".", "radians", "(", "t_angle", ")", ")", "*", "numpy", ".", "sin", "(", "distance_to_target", "/", "EARTH_RADIUS", ")", ")", ")", "distance", "[", "idx", "]", "=", "(", "numpy", ".", "pi", "/", "2", "-", "angle", ")", "*", "EARTH_RADIUS", "# Compute the distance between the reference point and the set of sites", "# in the negative half-space. The shortest distance for the semi-arc for", "# all the points in the negative semi-space simply corresponds to the", "# shortest distance to its origin.", "if", "len", "(", "idx_not", ")", ":", "distance", "[", "idx_not", "]", "=", "geodetic_distance", "(", "alon", ",", "alat", ",", "plons", "[", "idx_not", "]", ",", "plats", "[", "idx_not", "]", ")", "distance", "[", "idx_ll_quadr", "]", "=", "-", "1", "*", "distance", "[", "idx_ll_quadr", "]", "return", "distance" ]
In this method we use a reference system centerd on (alon, alat) and with the y-axis corresponding to aazimuth direction to calculate the minimum distance from a semiarc with generates in (alon, alat). Parameters are the same as for :func:`distance_to_arc`.
[ "In", "this", "method", "we", "use", "a", "reference", "system", "centerd", "on", "(", "alon", "alat", ")", "and", "with", "the", "y", "-", "axis", "corresponding", "to", "aazimuth", "direction", "to", "calculate", "the", "minimum", "distance", "from", "a", "semiarc", "with", "generates", "in", "(", "alon", "alat", ")", "." ]
python
train
43.754717
saltstack/salt
salt/states/win_powercfg.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/win_powercfg.py#L40-L150
def set_timeout(name, value, power='ac', scheme=None): ''' Set the sleep timeouts of specific items such as disk, monitor, etc. Args: name (str) The setting to change, can be one of the following: - ``monitor`` - ``disk`` - ``standby`` - ``hibernate`` value (int): The amount of time in minutes before the item will timeout power (str): Set the value for AC or DC power. Default is ``ac``. Valid options are: - ``ac`` (AC Power) - ``dc`` (Battery) scheme (str): The scheme to use, leave as ``None`` to use the current. Default is ``None``. This can be the GUID or the Alias for the Scheme. Known Aliases are: - ``SCHEME_BALANCED`` - Balanced - ``SCHEME_MAX`` - Power saver - ``SCHEME_MIN`` - High performance CLI Example: .. code-block:: yaml # Set monitor timeout to 30 minutes on Battery monitor: powercfg.set_timeout: - value: 30 - power: dc # Set disk timeout to 10 minutes on AC Power disk: powercfg.set_timeout: - value: 10 - power: ac ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # Validate name values name = name.lower() if name not in ['monitor', 'disk', 'standby', 'hibernate']: ret['result'] = False ret['comment'] = '"{0}" is not a valid setting'.format(name) log.debug(ret['comment']) return ret # Validate power values power = power.lower() if power not in ['ac', 'dc']: ret['result'] = False ret['comment'] = '"{0}" is not a power type'.format(power) log.debug(ret['comment']) return ret # Get current settings old = __salt__['powercfg.get_{0}_timeout'.format(name)](scheme=scheme) # Check current settings if old[power] == value: ret['comment'] = '{0} timeout on {1} power is already set to {2}' \ ''.format(name.capitalize(), power.upper(), value) return ret else: ret['comment'] = '{0} timeout on {1} power will be set to {2}' \ ''.format(name.capitalize(), power.upper(), value) # Check for test=True if __opts__['test']: ret['result'] = None return ret # Set the timeout value __salt__['powercfg.set_{0}_timeout'.format(name)]( timeout=value, power=power, scheme=scheme) # Get the setting after the change new = __salt__['powercfg.get_{0}_timeout'.format(name)](scheme=scheme) changes = salt.utils.data.compare_dicts(old, new) if changes: ret['changes'] = {name: changes} ret['comment'] = '{0} timeout on {1} power set to {2}' \ ''.format(name.capitalize(), power.upper(), value) log.debug(ret['comment']) else: ret['changes'] = {} ret['comment'] = 'Failed to set {0} timeout on {1} power to {2}' \ ''.format(name, power.upper(), value) log.debug(ret['comment']) ret['result'] = False return ret
[ "def", "set_timeout", "(", "name", ",", "value", ",", "power", "=", "'ac'", ",", "scheme", "=", "None", ")", ":", "ret", "=", "{", "'name'", ":", "name", ",", "'result'", ":", "True", ",", "'comment'", ":", "''", ",", "'changes'", ":", "{", "}", "}", "# Validate name values", "name", "=", "name", ".", "lower", "(", ")", "if", "name", "not", "in", "[", "'monitor'", ",", "'disk'", ",", "'standby'", ",", "'hibernate'", "]", ":", "ret", "[", "'result'", "]", "=", "False", "ret", "[", "'comment'", "]", "=", "'\"{0}\" is not a valid setting'", ".", "format", "(", "name", ")", "log", ".", "debug", "(", "ret", "[", "'comment'", "]", ")", "return", "ret", "# Validate power values", "power", "=", "power", ".", "lower", "(", ")", "if", "power", "not", "in", "[", "'ac'", ",", "'dc'", "]", ":", "ret", "[", "'result'", "]", "=", "False", "ret", "[", "'comment'", "]", "=", "'\"{0}\" is not a power type'", ".", "format", "(", "power", ")", "log", ".", "debug", "(", "ret", "[", "'comment'", "]", ")", "return", "ret", "# Get current settings", "old", "=", "__salt__", "[", "'powercfg.get_{0}_timeout'", ".", "format", "(", "name", ")", "]", "(", "scheme", "=", "scheme", ")", "# Check current settings", "if", "old", "[", "power", "]", "==", "value", ":", "ret", "[", "'comment'", "]", "=", "'{0} timeout on {1} power is already set to {2}'", "''", ".", "format", "(", "name", ".", "capitalize", "(", ")", ",", "power", ".", "upper", "(", ")", ",", "value", ")", "return", "ret", "else", ":", "ret", "[", "'comment'", "]", "=", "'{0} timeout on {1} power will be set to {2}'", "''", ".", "format", "(", "name", ".", "capitalize", "(", ")", ",", "power", ".", "upper", "(", ")", ",", "value", ")", "# Check for test=True", "if", "__opts__", "[", "'test'", "]", ":", "ret", "[", "'result'", "]", "=", "None", "return", "ret", "# Set the timeout value", "__salt__", "[", "'powercfg.set_{0}_timeout'", ".", "format", "(", "name", ")", "]", "(", "timeout", "=", "value", ",", "power", "=", "power", ",", "scheme", "=", "scheme", ")", "# Get the setting after the change", "new", "=", "__salt__", "[", "'powercfg.get_{0}_timeout'", ".", "format", "(", "name", ")", "]", "(", "scheme", "=", "scheme", ")", "changes", "=", "salt", ".", "utils", ".", "data", ".", "compare_dicts", "(", "old", ",", "new", ")", "if", "changes", ":", "ret", "[", "'changes'", "]", "=", "{", "name", ":", "changes", "}", "ret", "[", "'comment'", "]", "=", "'{0} timeout on {1} power set to {2}'", "''", ".", "format", "(", "name", ".", "capitalize", "(", ")", ",", "power", ".", "upper", "(", ")", ",", "value", ")", "log", ".", "debug", "(", "ret", "[", "'comment'", "]", ")", "else", ":", "ret", "[", "'changes'", "]", "=", "{", "}", "ret", "[", "'comment'", "]", "=", "'Failed to set {0} timeout on {1} power to {2}'", "''", ".", "format", "(", "name", ",", "power", ".", "upper", "(", ")", ",", "value", ")", "log", ".", "debug", "(", "ret", "[", "'comment'", "]", ")", "ret", "[", "'result'", "]", "=", "False", "return", "ret" ]
Set the sleep timeouts of specific items such as disk, monitor, etc. Args: name (str) The setting to change, can be one of the following: - ``monitor`` - ``disk`` - ``standby`` - ``hibernate`` value (int): The amount of time in minutes before the item will timeout power (str): Set the value for AC or DC power. Default is ``ac``. Valid options are: - ``ac`` (AC Power) - ``dc`` (Battery) scheme (str): The scheme to use, leave as ``None`` to use the current. Default is ``None``. This can be the GUID or the Alias for the Scheme. Known Aliases are: - ``SCHEME_BALANCED`` - Balanced - ``SCHEME_MAX`` - Power saver - ``SCHEME_MIN`` - High performance CLI Example: .. code-block:: yaml # Set monitor timeout to 30 minutes on Battery monitor: powercfg.set_timeout: - value: 30 - power: dc # Set disk timeout to 10 minutes on AC Power disk: powercfg.set_timeout: - value: 10 - power: ac
[ "Set", "the", "sleep", "timeouts", "of", "specific", "items", "such", "as", "disk", "monitor", "etc", "." ]
python
train
29.216216
berkerpeksag/astor
astor/node_util.py
https://github.com/berkerpeksag/astor/blob/d9e893eb49d9eb2e30779680f90cd632c30e0ba1/astor/node_util.py#L174-L208
def fast_compare(tree1, tree2): """ This is optimized to compare two AST trees for equality. It makes several assumptions that are currently true for AST trees used by rtrip, and it doesn't examine the _attributes. """ geta = ast.AST.__getattribute__ work = [(tree1, tree2)] pop = work.pop extend = work.extend # TypeError in cPython, AttributeError in PyPy exception = TypeError, AttributeError zipl = zip_longest type_ = type list_ = list while work: n1, n2 = pop() try: f1 = geta(n1, '_fields') f2 = geta(n2, '_fields') except exception: if type_(n1) is list_: extend(zipl(n1, n2)) continue if n1 == n2: continue return False else: f1 = [x for x in f1 if x != 'ctx'] if f1 != [x for x in f2 if x != 'ctx']: return False extend((geta(n1, fname), geta(n2, fname)) for fname in f1) return True
[ "def", "fast_compare", "(", "tree1", ",", "tree2", ")", ":", "geta", "=", "ast", ".", "AST", ".", "__getattribute__", "work", "=", "[", "(", "tree1", ",", "tree2", ")", "]", "pop", "=", "work", ".", "pop", "extend", "=", "work", ".", "extend", "# TypeError in cPython, AttributeError in PyPy", "exception", "=", "TypeError", ",", "AttributeError", "zipl", "=", "zip_longest", "type_", "=", "type", "list_", "=", "list", "while", "work", ":", "n1", ",", "n2", "=", "pop", "(", ")", "try", ":", "f1", "=", "geta", "(", "n1", ",", "'_fields'", ")", "f2", "=", "geta", "(", "n2", ",", "'_fields'", ")", "except", "exception", ":", "if", "type_", "(", "n1", ")", "is", "list_", ":", "extend", "(", "zipl", "(", "n1", ",", "n2", ")", ")", "continue", "if", "n1", "==", "n2", ":", "continue", "return", "False", "else", ":", "f1", "=", "[", "x", "for", "x", "in", "f1", "if", "x", "!=", "'ctx'", "]", "if", "f1", "!=", "[", "x", "for", "x", "in", "f2", "if", "x", "!=", "'ctx'", "]", ":", "return", "False", "extend", "(", "(", "geta", "(", "n1", ",", "fname", ")", ",", "geta", "(", "n2", ",", "fname", ")", ")", "for", "fname", "in", "f1", ")", "return", "True" ]
This is optimized to compare two AST trees for equality. It makes several assumptions that are currently true for AST trees used by rtrip, and it doesn't examine the _attributes.
[ "This", "is", "optimized", "to", "compare", "two", "AST", "trees", "for", "equality", ".", "It", "makes", "several", "assumptions", "that", "are", "currently", "true", "for", "AST", "trees", "used", "by", "rtrip", "and", "it", "doesn", "t", "examine", "the", "_attributes", "." ]
python
train
29.2
jic-dtool/dtool-irods
dtool_irods/__init__.py
https://github.com/jic-dtool/dtool-irods/blob/65da4ebc7f71dc04e93698c154fdaa89064e17e8/dtool_irods/__init__.py#L24-L38
def _call_cmd_line(self): """Run the command line tool.""" try: logging.info("Calling Popen with: {}".format(self.args)) p = Popen(self.args, stdin=PIPE, stdout=PIPE, stderr=PIPE) except OSError: raise(RuntimeError("No such command found in PATH")) # Calling this command with newline as stdin as the # iCommnads hangs waiting for user input if the password # has not been set or has timed out. self.stdout, self.stderr = p.communicate("\n".encode()) self.stdout = self.stdout.decode("utf-8") self.stderr = self.stderr.decode("utf-8") self.returncode = p.returncode
[ "def", "_call_cmd_line", "(", "self", ")", ":", "try", ":", "logging", ".", "info", "(", "\"Calling Popen with: {}\"", ".", "format", "(", "self", ".", "args", ")", ")", "p", "=", "Popen", "(", "self", ".", "args", ",", "stdin", "=", "PIPE", ",", "stdout", "=", "PIPE", ",", "stderr", "=", "PIPE", ")", "except", "OSError", ":", "raise", "(", "RuntimeError", "(", "\"No such command found in PATH\"", ")", ")", "# Calling this command with newline as stdin as the", "# iCommnads hangs waiting for user input if the password", "# has not been set or has timed out.", "self", ".", "stdout", ",", "self", ".", "stderr", "=", "p", ".", "communicate", "(", "\"\\n\"", ".", "encode", "(", ")", ")", "self", ".", "stdout", "=", "self", ".", "stdout", ".", "decode", "(", "\"utf-8\"", ")", "self", ".", "stderr", "=", "self", ".", "stderr", ".", "decode", "(", "\"utf-8\"", ")", "self", ".", "returncode", "=", "p", ".", "returncode" ]
Run the command line tool.
[ "Run", "the", "command", "line", "tool", "." ]
python
train
44.533333
openstack/proliantutils
proliantutils/ilo/common.py
https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/ilo/common.py#L37-L81
def wait_for_operation_to_complete( has_operation_completed, retries=10, delay_bw_retries=5, delay_before_attempts=10, failover_exc=exception.IloError, failover_msg=("Operation did not complete even after multiple " "attempts."), is_silent_loop_exit=False): """Attempts the provided operation for a specified number of times. If it runs out of attempts, then it raises an exception. On success, it breaks out of the loop. :param has_operation_completed: the method to retry and it needs to return a boolean to indicate success or failure. :param retries: number of times the operation to be (re)tried, default 10 :param delay_bw_retries: delay in seconds before attempting after each failure, default 5. :param delay_before_attempts: delay in seconds before beginning any operation attempt, default 10. :param failover_exc: the exception which gets raised in case of failure upon exhausting all the attempts, default IloError. :param failover_msg: the msg with which the exception gets raised in case of failure upon exhausting all the attempts. :param is_silent_loop_exit: decides if exception has to be raised (in case of failure upon exhausting all the attempts) or not, default False (will be raised). :raises: failover_exc, if failure happens even after all the attempts, default IloError. """ retry_count = retries # Delay for ``delay_before_attempts`` secs, before beginning any attempt time.sleep(delay_before_attempts) while retry_count: try: LOG.debug("Calling '%s', retries left: %d", has_operation_completed.__name__, retry_count) if has_operation_completed(): break except exception.IloError: pass time.sleep(delay_bw_retries) retry_count -= 1 else: LOG.debug("Max retries exceeded with: '%s'", has_operation_completed.__name__) if not is_silent_loop_exit: raise failover_exc(failover_msg)
[ "def", "wait_for_operation_to_complete", "(", "has_operation_completed", ",", "retries", "=", "10", ",", "delay_bw_retries", "=", "5", ",", "delay_before_attempts", "=", "10", ",", "failover_exc", "=", "exception", ".", "IloError", ",", "failover_msg", "=", "(", "\"Operation did not complete even after multiple \"", "\"attempts.\"", ")", ",", "is_silent_loop_exit", "=", "False", ")", ":", "retry_count", "=", "retries", "# Delay for ``delay_before_attempts`` secs, before beginning any attempt", "time", ".", "sleep", "(", "delay_before_attempts", ")", "while", "retry_count", ":", "try", ":", "LOG", ".", "debug", "(", "\"Calling '%s', retries left: %d\"", ",", "has_operation_completed", ".", "__name__", ",", "retry_count", ")", "if", "has_operation_completed", "(", ")", ":", "break", "except", "exception", ".", "IloError", ":", "pass", "time", ".", "sleep", "(", "delay_bw_retries", ")", "retry_count", "-=", "1", "else", ":", "LOG", ".", "debug", "(", "\"Max retries exceeded with: '%s'\"", ",", "has_operation_completed", ".", "__name__", ")", "if", "not", "is_silent_loop_exit", ":", "raise", "failover_exc", "(", "failover_msg", ")" ]
Attempts the provided operation for a specified number of times. If it runs out of attempts, then it raises an exception. On success, it breaks out of the loop. :param has_operation_completed: the method to retry and it needs to return a boolean to indicate success or failure. :param retries: number of times the operation to be (re)tried, default 10 :param delay_bw_retries: delay in seconds before attempting after each failure, default 5. :param delay_before_attempts: delay in seconds before beginning any operation attempt, default 10. :param failover_exc: the exception which gets raised in case of failure upon exhausting all the attempts, default IloError. :param failover_msg: the msg with which the exception gets raised in case of failure upon exhausting all the attempts. :param is_silent_loop_exit: decides if exception has to be raised (in case of failure upon exhausting all the attempts) or not, default False (will be raised). :raises: failover_exc, if failure happens even after all the attempts, default IloError.
[ "Attempts", "the", "provided", "operation", "for", "a", "specified", "number", "of", "times", "." ]
python
train
50.044444
kapadia/usgs
usgs/api.py
https://github.com/kapadia/usgs/blob/0608346f0bc3c34e20f4ecc77ad71d0b514db7ee/usgs/api.py#L217-L244
def metadata(dataset, node, entityids, extended=False, api_key=None): """ Request metadata for a given scene in a USGS dataset. :param dataset: :param node: :param entityids: :param extended: Send a second request to the metadata url to get extended metadata on the scene. :param api_key: """ api_key = _get_api_key(api_key) url = '{}/metadata'.format(USGS_API) payload = { "jsonRequest": payloads.metadata(dataset, node, entityids, api_key=api_key) } r = requests.post(url, payload) response = r.json() _check_for_usgs_error(response) if extended: metadata_urls = map(_get_metadata_url, response['data']) results = _async_requests(metadata_urls) data = map(lambda idx: _get_extended(response['data'][idx], results[idx]), range(len(response['data']))) return response
[ "def", "metadata", "(", "dataset", ",", "node", ",", "entityids", ",", "extended", "=", "False", ",", "api_key", "=", "None", ")", ":", "api_key", "=", "_get_api_key", "(", "api_key", ")", "url", "=", "'{}/metadata'", ".", "format", "(", "USGS_API", ")", "payload", "=", "{", "\"jsonRequest\"", ":", "payloads", ".", "metadata", "(", "dataset", ",", "node", ",", "entityids", ",", "api_key", "=", "api_key", ")", "}", "r", "=", "requests", ".", "post", "(", "url", ",", "payload", ")", "response", "=", "r", ".", "json", "(", ")", "_check_for_usgs_error", "(", "response", ")", "if", "extended", ":", "metadata_urls", "=", "map", "(", "_get_metadata_url", ",", "response", "[", "'data'", "]", ")", "results", "=", "_async_requests", "(", "metadata_urls", ")", "data", "=", "map", "(", "lambda", "idx", ":", "_get_extended", "(", "response", "[", "'data'", "]", "[", "idx", "]", ",", "results", "[", "idx", "]", ")", ",", "range", "(", "len", "(", "response", "[", "'data'", "]", ")", ")", ")", "return", "response" ]
Request metadata for a given scene in a USGS dataset. :param dataset: :param node: :param entityids: :param extended: Send a second request to the metadata url to get extended metadata on the scene. :param api_key:
[ "Request", "metadata", "for", "a", "given", "scene", "in", "a", "USGS", "dataset", "." ]
python
train
30.464286
google/transitfeed
merge.py
https://github.com/google/transitfeed/blob/eb2991a3747ba541b2cb66502b305b6304a1f85f/merge.py#L283-L350
def WriteOutput(self, output_file, feed_merger, old_feed_path, new_feed_path, merged_feed_path): """Write the HTML output to a file. Args: output_file: The file object that the HTML output will be written to. feed_merger: The FeedMerger instance. old_feed_path: The path to the old feed file as a string. new_feed_path: The path to the new feed file as a string merged_feed_path: The path to the merged feed file as a string. This may be None if no merged feed was written. """ if merged_feed_path is None: html_merged_feed_path = '' else: html_merged_feed_path = '<p>Merged feed created: <code>%s</code></p>' % ( merged_feed_path) html_header = """<html> <head> <meta http-equiv="Content-Type" content="text/html; charset=UTF-8"/> <title>Feed Merger Results</title> <style> body {font-family: Georgia, serif; background-color: white} .path {color: gray} div.problem {max-width: 500px} td,th {background-color: khaki; padding: 2px; font-family:monospace} td.problem,th.problem {background-color: dc143c; color: white; padding: 2px; font-family:monospace} table {border-spacing: 5px 0px; margin-top: 3px} h3.issueHeader {padding-left: 1em} .notice {background-color: yellow} span.pass {background-color: lightgreen} span.fail {background-color: yellow} .pass, .fail {font-size: 16pt; padding: 3px} ol,.unused {padding-left: 40pt} .header {background-color: white; font-family: Georgia, serif; padding: 0px} th.header {text-align: right; font-weight: normal; color: gray} .footer {font-size: 10pt} </style> </head> <body> <h1>Feed merger results</h1> <p>Old feed: <code>%(old_feed_path)s</code></p> <p>New feed: <code>%(new_feed_path)s</code></p> %(html_merged_feed_path)s""" % locals() html_stats = self._GenerateStatsTable(feed_merger) html_summary = self._GenerateSummary() html_notices = self._GenerateNotices() html_errors = self._GenerateSection(transitfeed.TYPE_ERROR) html_warnings = self._GenerateSection(transitfeed.TYPE_WARNING) html_footer = """ <div class="footer"> Generated using transitfeed version %s on %s. </div> </body> </html>""" % (transitfeed.__version__, time.strftime('%B %d, %Y at %I:%M %p %Z')) output_file.write(transitfeed.EncodeUnicode(html_header)) output_file.write(transitfeed.EncodeUnicode(html_stats)) output_file.write(transitfeed.EncodeUnicode(html_summary)) output_file.write(transitfeed.EncodeUnicode(html_notices)) output_file.write(transitfeed.EncodeUnicode(html_errors)) output_file.write(transitfeed.EncodeUnicode(html_warnings)) output_file.write(transitfeed.EncodeUnicode(html_footer))
[ "def", "WriteOutput", "(", "self", ",", "output_file", ",", "feed_merger", ",", "old_feed_path", ",", "new_feed_path", ",", "merged_feed_path", ")", ":", "if", "merged_feed_path", "is", "None", ":", "html_merged_feed_path", "=", "''", "else", ":", "html_merged_feed_path", "=", "'<p>Merged feed created: <code>%s</code></p>'", "%", "(", "merged_feed_path", ")", "html_header", "=", "\"\"\"<html>\n<head>\n<meta http-equiv=\"Content-Type\" content=\"text/html; charset=UTF-8\"/>\n<title>Feed Merger Results</title>\n<style>\n body {font-family: Georgia, serif; background-color: white}\n .path {color: gray}\n div.problem {max-width: 500px}\n td,th {background-color: khaki; padding: 2px; font-family:monospace}\n td.problem,th.problem {background-color: dc143c; color: white; padding: 2px;\n font-family:monospace}\n table {border-spacing: 5px 0px; margin-top: 3px}\n h3.issueHeader {padding-left: 1em}\n .notice {background-color: yellow}\n span.pass {background-color: lightgreen}\n span.fail {background-color: yellow}\n .pass, .fail {font-size: 16pt; padding: 3px}\n ol,.unused {padding-left: 40pt}\n .header {background-color: white; font-family: Georgia, serif; padding: 0px}\n th.header {text-align: right; font-weight: normal; color: gray}\n .footer {font-size: 10pt}\n</style>\n</head>\n<body>\n<h1>Feed merger results</h1>\n<p>Old feed: <code>%(old_feed_path)s</code></p>\n<p>New feed: <code>%(new_feed_path)s</code></p>\n%(html_merged_feed_path)s\"\"\"", "%", "locals", "(", ")", "html_stats", "=", "self", ".", "_GenerateStatsTable", "(", "feed_merger", ")", "html_summary", "=", "self", ".", "_GenerateSummary", "(", ")", "html_notices", "=", "self", ".", "_GenerateNotices", "(", ")", "html_errors", "=", "self", ".", "_GenerateSection", "(", "transitfeed", ".", "TYPE_ERROR", ")", "html_warnings", "=", "self", ".", "_GenerateSection", "(", "transitfeed", ".", "TYPE_WARNING", ")", "html_footer", "=", "\"\"\"\n<div class=\"footer\">\nGenerated using transitfeed version %s on %s.\n</div>\n</body>\n</html>\"\"\"", "%", "(", "transitfeed", ".", "__version__", ",", "time", ".", "strftime", "(", "'%B %d, %Y at %I:%M %p %Z'", ")", ")", "output_file", ".", "write", "(", "transitfeed", ".", "EncodeUnicode", "(", "html_header", ")", ")", "output_file", ".", "write", "(", "transitfeed", ".", "EncodeUnicode", "(", "html_stats", ")", ")", "output_file", ".", "write", "(", "transitfeed", ".", "EncodeUnicode", "(", "html_summary", ")", ")", "output_file", ".", "write", "(", "transitfeed", ".", "EncodeUnicode", "(", "html_notices", ")", ")", "output_file", ".", "write", "(", "transitfeed", ".", "EncodeUnicode", "(", "html_errors", ")", ")", "output_file", ".", "write", "(", "transitfeed", ".", "EncodeUnicode", "(", "html_warnings", ")", ")", "output_file", ".", "write", "(", "transitfeed", ".", "EncodeUnicode", "(", "html_footer", ")", ")" ]
Write the HTML output to a file. Args: output_file: The file object that the HTML output will be written to. feed_merger: The FeedMerger instance. old_feed_path: The path to the old feed file as a string. new_feed_path: The path to the new feed file as a string merged_feed_path: The path to the merged feed file as a string. This may be None if no merged feed was written.
[ "Write", "the", "HTML", "output", "to", "a", "file", "." ]
python
train
39.779412
wglass/lighthouse
lighthouse/haproxy/stanzas/stanza.py
https://github.com/wglass/lighthouse/blob/f4ce6550895acc31e433ede0c05d366718a3ffe5/lighthouse/haproxy/stanzas/stanza.py#L50-L64
def is_valid_line(self, line): """ Validates a given line against the associated "section" (e.g. 'global' or 'frontend', etc.) of a stanza. If a line represents a directive that shouldn't be within the stanza it is rejected. See the `directives.json` file for a condensed look at valid directives based on section. """ adjusted_line = line.strip().lower() return any([ adjusted_line.startswith(directive) for directive in directives_by_section[self.section_name] ])
[ "def", "is_valid_line", "(", "self", ",", "line", ")", ":", "adjusted_line", "=", "line", ".", "strip", "(", ")", ".", "lower", "(", ")", "return", "any", "(", "[", "adjusted_line", ".", "startswith", "(", "directive", ")", "for", "directive", "in", "directives_by_section", "[", "self", ".", "section_name", "]", "]", ")" ]
Validates a given line against the associated "section" (e.g. 'global' or 'frontend', etc.) of a stanza. If a line represents a directive that shouldn't be within the stanza it is rejected. See the `directives.json` file for a condensed look at valid directives based on section.
[ "Validates", "a", "given", "line", "against", "the", "associated", "section", "(", "e", ".", "g", ".", "global", "or", "frontend", "etc", ".", ")", "of", "a", "stanza", "." ]
python
train
37.2
cocagne/txdbus
txdbus/interface.py
https://github.com/cocagne/txdbus/blob/eb424918764b7b93eecd2a4e2e5c2d0b2944407b/txdbus/interface.py#L162-L170
def addMethod(self, m): """ Adds a L{Method} to the interface """ if m.nargs == -1: m.nargs = len([a for a in marshal.genCompleteTypes(m.sigIn)]) m.nret = len([a for a in marshal.genCompleteTypes(m.sigOut)]) self.methods[m.name] = m self._xml = None
[ "def", "addMethod", "(", "self", ",", "m", ")", ":", "if", "m", ".", "nargs", "==", "-", "1", ":", "m", ".", "nargs", "=", "len", "(", "[", "a", "for", "a", "in", "marshal", ".", "genCompleteTypes", "(", "m", ".", "sigIn", ")", "]", ")", "m", ".", "nret", "=", "len", "(", "[", "a", "for", "a", "in", "marshal", ".", "genCompleteTypes", "(", "m", ".", "sigOut", ")", "]", ")", "self", ".", "methods", "[", "m", ".", "name", "]", "=", "m", "self", ".", "_xml", "=", "None" ]
Adds a L{Method} to the interface
[ "Adds", "a", "L", "{", "Method", "}", "to", "the", "interface" ]
python
train
34.777778
hyperledger/indy-node
indy_node/server/restarter.py
https://github.com/hyperledger/indy-node/blob/8fabd364eaf7d940a56df2911d9215b1e512a2de/indy_node/server/restarter.py#L177-L192
def _callRestartAgent(self, ev_data: RestartLogData, failTimeout) -> None: """ Callback which is called when restart time come. Writes restart record to restart log and asks node control service to perform restart :param ev_data: restart event data :param version: version to restart to """ logger.info("{}'s restart calling agent for restart".format(self)) self._actionLog.append_started(ev_data) self._action_start_callback() self.scheduledAction = None asyncio.ensure_future( self._sendUpdateRequest(ev_data, failTimeout))
[ "def", "_callRestartAgent", "(", "self", ",", "ev_data", ":", "RestartLogData", ",", "failTimeout", ")", "->", "None", ":", "logger", ".", "info", "(", "\"{}'s restart calling agent for restart\"", ".", "format", "(", "self", ")", ")", "self", ".", "_actionLog", ".", "append_started", "(", "ev_data", ")", "self", ".", "_action_start_callback", "(", ")", "self", ".", "scheduledAction", "=", "None", "asyncio", ".", "ensure_future", "(", "self", ".", "_sendUpdateRequest", "(", "ev_data", ",", "failTimeout", ")", ")" ]
Callback which is called when restart time come. Writes restart record to restart log and asks node control service to perform restart :param ev_data: restart event data :param version: version to restart to
[ "Callback", "which", "is", "called", "when", "restart", "time", "come", ".", "Writes", "restart", "record", "to", "restart", "log", "and", "asks", "node", "control", "service", "to", "perform", "restart" ]
python
train
38.75
FutunnOpen/futuquant
futuquant/quote/open_quote_context.py
https://github.com/FutunnOpen/futuquant/blob/1512b321845f92ec9c578ce2689aa4e8482669e4/futuquant/quote/open_quote_context.py#L40-L97
def on_api_socket_reconnected(self): """for API socket reconnected""" # auto subscriber resub_count = 0 subtype_list = [] code_list = [] resub_dict = copy(self._ctx_subscribe) subtype_all_cnt = len(resub_dict.keys()) subtype_cur_cnt = 0 ret_code = RET_OK ret_msg = '' for subtype in resub_dict.keys(): subtype_cur_cnt += 1 code_set = resub_dict[subtype] code_list_new = [code for code in code_set] if len(code_list_new) == 0: continue if len(code_list) == 0: code_list = code_list_new subtype_list = [subtype] is_need_sub = False if code_list == code_list_new: if subtype not in subtype_list: subtype_list.append(subtype) # 合并subtype请求 else: ret_code, ret_msg = self._reconnect_subscribe(code_list, subtype_list) logger.debug("reconnect subscribe code_count={} ret_code={} ret_msg={} subtype_list={} code_list={}".format( len(code_list), ret_code, ret_msg, subtype_list, code_list)) if ret_code != RET_OK: break resub_count += len(code_list) code_list = code_list_new subtype_list = [subtype] # 循环即将结束 if subtype_cur_cnt == subtype_all_cnt and len(code_list): ret_code, ret_msg = self._reconnect_subscribe(code_list, subtype_list) logger.debug("reconnect subscribe code_count={} ret_code={} ret_msg={} subtype_list={} code_list={}".format(len(code_list), ret_code, ret_msg, subtype_list, code_list)) if ret_code != RET_OK: break resub_count += len(code_list) code_list = [] subtype_list = [] logger.debug("reconnect subscribe all code_count={} ret_code={} ret_msg={}".format(resub_count, ret_code, ret_msg)) # 重定阅失败,重连 if ret_code != RET_OK: logger.error("reconnect subscribe error, close connect and retry!!") self._status = ContextStatus.Start self._wait_reconnect() return ret_code, ret_msg
[ "def", "on_api_socket_reconnected", "(", "self", ")", ":", "# auto subscriber", "resub_count", "=", "0", "subtype_list", "=", "[", "]", "code_list", "=", "[", "]", "resub_dict", "=", "copy", "(", "self", ".", "_ctx_subscribe", ")", "subtype_all_cnt", "=", "len", "(", "resub_dict", ".", "keys", "(", ")", ")", "subtype_cur_cnt", "=", "0", "ret_code", "=", "RET_OK", "ret_msg", "=", "''", "for", "subtype", "in", "resub_dict", ".", "keys", "(", ")", ":", "subtype_cur_cnt", "+=", "1", "code_set", "=", "resub_dict", "[", "subtype", "]", "code_list_new", "=", "[", "code", "for", "code", "in", "code_set", "]", "if", "len", "(", "code_list_new", ")", "==", "0", ":", "continue", "if", "len", "(", "code_list", ")", "==", "0", ":", "code_list", "=", "code_list_new", "subtype_list", "=", "[", "subtype", "]", "is_need_sub", "=", "False", "if", "code_list", "==", "code_list_new", ":", "if", "subtype", "not", "in", "subtype_list", ":", "subtype_list", ".", "append", "(", "subtype", ")", "# 合并subtype请求", "else", ":", "ret_code", ",", "ret_msg", "=", "self", ".", "_reconnect_subscribe", "(", "code_list", ",", "subtype_list", ")", "logger", ".", "debug", "(", "\"reconnect subscribe code_count={} ret_code={} ret_msg={} subtype_list={} code_list={}\"", ".", "format", "(", "len", "(", "code_list", ")", ",", "ret_code", ",", "ret_msg", ",", "subtype_list", ",", "code_list", ")", ")", "if", "ret_code", "!=", "RET_OK", ":", "break", "resub_count", "+=", "len", "(", "code_list", ")", "code_list", "=", "code_list_new", "subtype_list", "=", "[", "subtype", "]", "# 循环即将结束", "if", "subtype_cur_cnt", "==", "subtype_all_cnt", "and", "len", "(", "code_list", ")", ":", "ret_code", ",", "ret_msg", "=", "self", ".", "_reconnect_subscribe", "(", "code_list", ",", "subtype_list", ")", "logger", ".", "debug", "(", "\"reconnect subscribe code_count={} ret_code={} ret_msg={} subtype_list={} code_list={}\"", ".", "format", "(", "len", "(", "code_list", ")", ",", "ret_code", ",", "ret_msg", ",", "subtype_list", ",", "code_list", ")", ")", "if", "ret_code", "!=", "RET_OK", ":", "break", "resub_count", "+=", "len", "(", "code_list", ")", "code_list", "=", "[", "]", "subtype_list", "=", "[", "]", "logger", ".", "debug", "(", "\"reconnect subscribe all code_count={} ret_code={} ret_msg={}\"", ".", "format", "(", "resub_count", ",", "ret_code", ",", "ret_msg", ")", ")", "# 重定阅失败,重连", "if", "ret_code", "!=", "RET_OK", ":", "logger", ".", "error", "(", "\"reconnect subscribe error, close connect and retry!!\"", ")", "self", ".", "_status", "=", "ContextStatus", ".", "Start", "self", ".", "_wait_reconnect", "(", ")", "return", "ret_code", ",", "ret_msg" ]
for API socket reconnected
[ "for", "API", "socket", "reconnected" ]
python
train
38.948276
orb-framework/orb
orb/core/connection_types/sql/mysql/mysqlconnection.py
https://github.com/orb-framework/orb/blob/575be2689cb269e65a0a2678232ff940acc19e5a/orb/core/connection_types/sql/mysql/mysqlconnection.py#L40-L105
def _execute(self, native, command, data=None, returning=True, mapper=dict): """ Executes the inputted command into the current \ connection cursor. :param command | <str> data | <dict> || None :return [{<str> key: <variant>, ..}, ..], <int> count """ if data is None: data = {} with native.cursor() as cursor: log.debug('***********************') log.debug(command % data) log.debug('***********************') try: rowcount = 0 for cmd in command.split(';'): cmd = cmd.strip() if cmd: cursor.execute(cmd.strip(';') + ';', data) rowcount += cursor.rowcount # look for a disconnection error except pymysql.InterfaceError: raise orb.errors.ConnectionLost() # look for integrity errors except (pymysql.IntegrityError, pymysql.OperationalError) as err: native.rollback() # look for a duplicate error if err[0] == 1062: raise orb.errors.DuplicateEntryFound(err[1]) # look for a reference error reference_error = re.search('Key .* is still referenced from table ".*"', nstr(err)) if reference_error: msg = 'Cannot remove this record, it is still being referenced.' raise orb.errors.CannotDelete(msg) # unknown error log.debug(traceback.print_exc()) raise orb.errors.QueryFailed(command, data, nstr(err)) # connection has closed underneath the hood except pymysql.Error as err: native.rollback() log.error(traceback.print_exc()) raise orb.errors.QueryFailed(command, data, nstr(err)) try: raw = cursor.fetchall() results = [mapper(record) for record in raw] except pymysql.ProgrammingError: results = [] return results, rowcount
[ "def", "_execute", "(", "self", ",", "native", ",", "command", ",", "data", "=", "None", ",", "returning", "=", "True", ",", "mapper", "=", "dict", ")", ":", "if", "data", "is", "None", ":", "data", "=", "{", "}", "with", "native", ".", "cursor", "(", ")", "as", "cursor", ":", "log", ".", "debug", "(", "'***********************'", ")", "log", ".", "debug", "(", "command", "%", "data", ")", "log", ".", "debug", "(", "'***********************'", ")", "try", ":", "rowcount", "=", "0", "for", "cmd", "in", "command", ".", "split", "(", "';'", ")", ":", "cmd", "=", "cmd", ".", "strip", "(", ")", "if", "cmd", ":", "cursor", ".", "execute", "(", "cmd", ".", "strip", "(", "';'", ")", "+", "';'", ",", "data", ")", "rowcount", "+=", "cursor", ".", "rowcount", "# look for a disconnection error", "except", "pymysql", ".", "InterfaceError", ":", "raise", "orb", ".", "errors", ".", "ConnectionLost", "(", ")", "# look for integrity errors", "except", "(", "pymysql", ".", "IntegrityError", ",", "pymysql", ".", "OperationalError", ")", "as", "err", ":", "native", ".", "rollback", "(", ")", "# look for a duplicate error", "if", "err", "[", "0", "]", "==", "1062", ":", "raise", "orb", ".", "errors", ".", "DuplicateEntryFound", "(", "err", "[", "1", "]", ")", "# look for a reference error", "reference_error", "=", "re", ".", "search", "(", "'Key .* is still referenced from table \".*\"'", ",", "nstr", "(", "err", ")", ")", "if", "reference_error", ":", "msg", "=", "'Cannot remove this record, it is still being referenced.'", "raise", "orb", ".", "errors", ".", "CannotDelete", "(", "msg", ")", "# unknown error", "log", ".", "debug", "(", "traceback", ".", "print_exc", "(", ")", ")", "raise", "orb", ".", "errors", ".", "QueryFailed", "(", "command", ",", "data", ",", "nstr", "(", "err", ")", ")", "# connection has closed underneath the hood", "except", "pymysql", ".", "Error", "as", "err", ":", "native", ".", "rollback", "(", ")", "log", ".", "error", "(", "traceback", ".", "print_exc", "(", ")", ")", "raise", "orb", ".", "errors", ".", "QueryFailed", "(", "command", ",", "data", ",", "nstr", "(", "err", ")", ")", "try", ":", "raw", "=", "cursor", ".", "fetchall", "(", ")", "results", "=", "[", "mapper", "(", "record", ")", "for", "record", "in", "raw", "]", "except", "pymysql", ".", "ProgrammingError", ":", "results", "=", "[", "]", "return", "results", ",", "rowcount" ]
Executes the inputted command into the current \ connection cursor. :param command | <str> data | <dict> || None :return [{<str> key: <variant>, ..}, ..], <int> count
[ "Executes", "the", "inputted", "command", "into", "the", "current", "\\", "connection", "cursor", "." ]
python
train
34.227273
ceph/ceph-deploy
ceph_deploy/hosts/remotes.py
https://github.com/ceph/ceph-deploy/blob/86943fcc454cd4c99a86e3493e9e93a59c661fef/ceph_deploy/hosts/remotes.py#L384-L398
def zeroing(dev): """ zeroing last few blocks of device """ # this kills the crab # # sgdisk will wipe out the main copy of the GPT partition # table (sorry), but it doesn't remove the backup copies, and # subsequent commands will continue to complain and fail when # they see those. zeroing the last few blocks of the device # appears to do the trick. lba_size = 4096 size = 33 * lba_size return True with open(dev, 'wb') as f: f.seek(-size, os.SEEK_END) f.write(size*b'\0')
[ "def", "zeroing", "(", "dev", ")", ":", "# this kills the crab", "#", "# sgdisk will wipe out the main copy of the GPT partition", "# table (sorry), but it doesn't remove the backup copies, and", "# subsequent commands will continue to complain and fail when", "# they see those. zeroing the last few blocks of the device", "# appears to do the trick.", "lba_size", "=", "4096", "size", "=", "33", "*", "lba_size", "return", "True", "with", "open", "(", "dev", ",", "'wb'", ")", "as", "f", ":", "f", ".", "seek", "(", "-", "size", ",", "os", ".", "SEEK_END", ")", "f", ".", "write", "(", "size", "*", "b'\\0'", ")" ]
zeroing last few blocks of device
[ "zeroing", "last", "few", "blocks", "of", "device" ]
python
train
35.066667
mikedh/trimesh
trimesh/scene/transforms.py
https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/scene/transforms.py#L255-L298
def get(self, frame_to, frame_from=None): """ Get the transform from one frame to another, assuming they are connected in the transform tree. If the frames are not connected a NetworkXNoPath error will be raised. Parameters --------- frame_from: hashable object, usually a string (eg 'world'). If left as None it will be set to self.base_frame frame_to: hashable object, usually a string (eg 'mesh_0') Returns --------- transform: (4,4) homogenous transformation matrix """ if frame_from is None: frame_from = self.base_frame cache_key = str(frame_from) + ':' + str(frame_to) cached = self._cache[cache_key] if cached is not None: return cached transform = np.eye(4) path = self._get_path(frame_from, frame_to) for i in range(len(path) - 1): data, direction = self.transforms.get_edge_data_direction( path[i], path[i + 1]) matrix = data['matrix'] if direction < 0: matrix = np.linalg.inv(matrix) transform = np.dot(transform, matrix) geometry = None if 'geometry' in self.transforms.node[frame_to]: geometry = self.transforms.node[frame_to]['geometry'] self._cache[cache_key] = (transform, geometry) return transform, geometry
[ "def", "get", "(", "self", ",", "frame_to", ",", "frame_from", "=", "None", ")", ":", "if", "frame_from", "is", "None", ":", "frame_from", "=", "self", ".", "base_frame", "cache_key", "=", "str", "(", "frame_from", ")", "+", "':'", "+", "str", "(", "frame_to", ")", "cached", "=", "self", ".", "_cache", "[", "cache_key", "]", "if", "cached", "is", "not", "None", ":", "return", "cached", "transform", "=", "np", ".", "eye", "(", "4", ")", "path", "=", "self", ".", "_get_path", "(", "frame_from", ",", "frame_to", ")", "for", "i", "in", "range", "(", "len", "(", "path", ")", "-", "1", ")", ":", "data", ",", "direction", "=", "self", ".", "transforms", ".", "get_edge_data_direction", "(", "path", "[", "i", "]", ",", "path", "[", "i", "+", "1", "]", ")", "matrix", "=", "data", "[", "'matrix'", "]", "if", "direction", "<", "0", ":", "matrix", "=", "np", ".", "linalg", ".", "inv", "(", "matrix", ")", "transform", "=", "np", ".", "dot", "(", "transform", ",", "matrix", ")", "geometry", "=", "None", "if", "'geometry'", "in", "self", ".", "transforms", ".", "node", "[", "frame_to", "]", ":", "geometry", "=", "self", ".", "transforms", ".", "node", "[", "frame_to", "]", "[", "'geometry'", "]", "self", ".", "_cache", "[", "cache_key", "]", "=", "(", "transform", ",", "geometry", ")", "return", "transform", ",", "geometry" ]
Get the transform from one frame to another, assuming they are connected in the transform tree. If the frames are not connected a NetworkXNoPath error will be raised. Parameters --------- frame_from: hashable object, usually a string (eg 'world'). If left as None it will be set to self.base_frame frame_to: hashable object, usually a string (eg 'mesh_0') Returns --------- transform: (4,4) homogenous transformation matrix
[ "Get", "the", "transform", "from", "one", "frame", "to", "another", "assuming", "they", "are", "connected", "in", "the", "transform", "tree", "." ]
python
train
32.181818
JonathonReinhart/scuba
scuba/dockerutil.py
https://github.com/JonathonReinhart/scuba/blob/0244c81ec482d3c60202028bc075621447bc3ad1/scuba/dockerutil.py#L53-L60
def docker_pull(image): '''Pulls an image''' args = ['docker', 'pull', image] # If this fails, the default docker stdout/stderr looks good to the user. ret = call(args) if ret != 0: raise DockerError('Failed to pull image "{}"'.format(image))
[ "def", "docker_pull", "(", "image", ")", ":", "args", "=", "[", "'docker'", ",", "'pull'", ",", "image", "]", "# If this fails, the default docker stdout/stderr looks good to the user.", "ret", "=", "call", "(", "args", ")", "if", "ret", "!=", "0", ":", "raise", "DockerError", "(", "'Failed to pull image \"{}\"'", ".", "format", "(", "image", ")", ")" ]
Pulls an image
[ "Pulls", "an", "image" ]
python
train
33
insilichem/pychimera
pychimera/core.py
https://github.com/insilichem/pychimera/blob/5227d1c0e9e1ce165fc68157eda788cd0843842b/pychimera/core.py#L279-L318
def run_cli_options(args): """ Quick implementation of Python interpreter's -m, -c and file execution. The resulting dictionary is imported into global namespace, just in case someone is using interactive mode. We try to keep argument order as to pass them correctly to the subcommands. """ if _interactive_mode(args.interactive): os.environ['PYTHONINSPECT'] = '1' if in_ipython(): return exclusive_choices = [[None, args.command], ['-c', args.string], ['-m', args.module]] for flag_choice in exclusive_choices: try: a = sys.argv.index(flag_choice[0] or flag_choice[1]) except ValueError: a = 1000 flag_choice.append(a) exclusive_choices.sort(key=lambda v: v[2]) for i, (flag, choice, _) in enumerate(exclusive_choices): if not choice: continue sys.argv = [choice] + sys.argv[sys.argv.index(choice)+1:] if not flag: if choice == 'ipython': launch_ipython(argv=sys.argv[1:]) elif choice == 'notebook': launch_notebook() else: globals().update(runpy.run_path(choice, run_name="__main__")) elif flag == '-m': if '--' in sys.argv[1:2] : # -m syntax needs '--' for extra args sys.argv.pop(1) globals().update(runpy.run_module(choice, run_name="__main__")) elif flag == '-c': exec choice in globals(), locals() # workaround else: continue break
[ "def", "run_cli_options", "(", "args", ")", ":", "if", "_interactive_mode", "(", "args", ".", "interactive", ")", ":", "os", ".", "environ", "[", "'PYTHONINSPECT'", "]", "=", "'1'", "if", "in_ipython", "(", ")", ":", "return", "exclusive_choices", "=", "[", "[", "None", ",", "args", ".", "command", "]", ",", "[", "'-c'", ",", "args", ".", "string", "]", ",", "[", "'-m'", ",", "args", ".", "module", "]", "]", "for", "flag_choice", "in", "exclusive_choices", ":", "try", ":", "a", "=", "sys", ".", "argv", ".", "index", "(", "flag_choice", "[", "0", "]", "or", "flag_choice", "[", "1", "]", ")", "except", "ValueError", ":", "a", "=", "1000", "flag_choice", ".", "append", "(", "a", ")", "exclusive_choices", ".", "sort", "(", "key", "=", "lambda", "v", ":", "v", "[", "2", "]", ")", "for", "i", ",", "(", "flag", ",", "choice", ",", "_", ")", "in", "enumerate", "(", "exclusive_choices", ")", ":", "if", "not", "choice", ":", "continue", "sys", ".", "argv", "=", "[", "choice", "]", "+", "sys", ".", "argv", "[", "sys", ".", "argv", ".", "index", "(", "choice", ")", "+", "1", ":", "]", "if", "not", "flag", ":", "if", "choice", "==", "'ipython'", ":", "launch_ipython", "(", "argv", "=", "sys", ".", "argv", "[", "1", ":", "]", ")", "elif", "choice", "==", "'notebook'", ":", "launch_notebook", "(", ")", "else", ":", "globals", "(", ")", ".", "update", "(", "runpy", ".", "run_path", "(", "choice", ",", "run_name", "=", "\"__main__\"", ")", ")", "elif", "flag", "==", "'-m'", ":", "if", "'--'", "in", "sys", ".", "argv", "[", "1", ":", "2", "]", ":", "# -m syntax needs '--' for extra args", "sys", ".", "argv", ".", "pop", "(", "1", ")", "globals", "(", ")", ".", "update", "(", "runpy", ".", "run_module", "(", "choice", ",", "run_name", "=", "\"__main__\"", ")", ")", "elif", "flag", "==", "'-c'", ":", "exec", "choice", "in", "globals", "(", ")", ",", "locals", "(", ")", "# workaround", "else", ":", "continue", "break" ]
Quick implementation of Python interpreter's -m, -c and file execution. The resulting dictionary is imported into global namespace, just in case someone is using interactive mode. We try to keep argument order as to pass them correctly to the subcommands.
[ "Quick", "implementation", "of", "Python", "interpreter", "s", "-", "m", "-", "c", "and", "file", "execution", ".", "The", "resulting", "dictionary", "is", "imported", "into", "global", "namespace", "just", "in", "case", "someone", "is", "using", "interactive", "mode", "." ]
python
train
38.25
newville/asteval
asteval/asteval.py
https://github.com/newville/asteval/blob/bb7d3a95079f96ead75ea55662014bbcc82f9b28/asteval/asteval.py#L528-L533
def on_augassign(self, node): # ('target', 'op', 'value') """Augmented assign.""" return self.on_assign(ast.Assign(targets=[node.target], value=ast.BinOp(left=node.target, op=node.op, right=node.value)))
[ "def", "on_augassign", "(", "self", ",", "node", ")", ":", "# ('target', 'op', 'value')", "return", "self", ".", "on_assign", "(", "ast", ".", "Assign", "(", "targets", "=", "[", "node", ".", "target", "]", ",", "value", "=", "ast", ".", "BinOp", "(", "left", "=", "node", ".", "target", ",", "op", "=", "node", ".", "op", ",", "right", "=", "node", ".", "value", ")", ")", ")" ]
Augmented assign.
[ "Augmented", "assign", "." ]
python
train
62
OpenMath/py-openmath
openmath/convert_pickle.py
https://github.com/OpenMath/py-openmath/blob/4906aa9ccf606f533675c28823772e07c30fd220/openmath/convert_pickle.py#L331-L389
def cls_build(inst, state): """ Apply the setstate protocol to initialize `inst` from `state`. INPUT: - ``inst`` -- a raw instance of a class - ``state`` -- the state to restore; typically a dictionary mapping attribute names to their values EXAMPLES:: >>> from openmath.convert_pickle import cls_build >>> class A(object): pass >>> inst = A.__new__(A) >>> state = {"foo": 1, "bar": 4} >>> inst2 = cls_build(inst,state) >>> inst is inst2 True >>> inst.foo 1 >>> inst.bar 4 """ # Copied from Pickler.load_build setstate = getattr(inst, "__setstate__", None) if setstate: setstate(state) return inst slotstate = None if isinstance(state, tuple) and len(state) == 2: state, slotstate = state if state: try: d = inst.__dict__ try: for k, v in six.iteritems(state): d[six.moves.intern(k)] = v # keys in state don't have to be strings # don't blow up, but don't go out of our way except TypeError: d.update(state) except RuntimeError: # XXX In restricted execution, the instance's __dict__ # is not accessible. Use the old way of unpickling # the instance variables. This is a semantic # difference when unpickling in restricted # vs. unrestricted modes. # Note, however, that cPickle has never tried to do the # .update() business, and always uses # PyObject_SetItem(inst.__dict__, key, value) in a # loop over state.items(). for k, v in state.items(): setattr(inst, k, v) if slotstate: for k, v in slotstate.items(): setattr(inst, k, v) return inst
[ "def", "cls_build", "(", "inst", ",", "state", ")", ":", "# Copied from Pickler.load_build", "setstate", "=", "getattr", "(", "inst", ",", "\"__setstate__\"", ",", "None", ")", "if", "setstate", ":", "setstate", "(", "state", ")", "return", "inst", "slotstate", "=", "None", "if", "isinstance", "(", "state", ",", "tuple", ")", "and", "len", "(", "state", ")", "==", "2", ":", "state", ",", "slotstate", "=", "state", "if", "state", ":", "try", ":", "d", "=", "inst", ".", "__dict__", "try", ":", "for", "k", ",", "v", "in", "six", ".", "iteritems", "(", "state", ")", ":", "d", "[", "six", ".", "moves", ".", "intern", "(", "k", ")", "]", "=", "v", "# keys in state don't have to be strings", "# don't blow up, but don't go out of our way", "except", "TypeError", ":", "d", ".", "update", "(", "state", ")", "except", "RuntimeError", ":", "# XXX In restricted execution, the instance's __dict__", "# is not accessible. Use the old way of unpickling", "# the instance variables. This is a semantic", "# difference when unpickling in restricted", "# vs. unrestricted modes.", "# Note, however, that cPickle has never tried to do the", "# .update() business, and always uses", "# PyObject_SetItem(inst.__dict__, key, value) in a", "# loop over state.items().", "for", "k", ",", "v", "in", "state", ".", "items", "(", ")", ":", "setattr", "(", "inst", ",", "k", ",", "v", ")", "if", "slotstate", ":", "for", "k", ",", "v", "in", "slotstate", ".", "items", "(", ")", ":", "setattr", "(", "inst", ",", "k", ",", "v", ")", "return", "inst" ]
Apply the setstate protocol to initialize `inst` from `state`. INPUT: - ``inst`` -- a raw instance of a class - ``state`` -- the state to restore; typically a dictionary mapping attribute names to their values EXAMPLES:: >>> from openmath.convert_pickle import cls_build >>> class A(object): pass >>> inst = A.__new__(A) >>> state = {"foo": 1, "bar": 4} >>> inst2 = cls_build(inst,state) >>> inst is inst2 True >>> inst.foo 1 >>> inst.bar 4
[ "Apply", "the", "setstate", "protocol", "to", "initialize", "inst", "from", "state", "." ]
python
test
31.491525
saulpw/visidata
visidata/canvas.py
https://github.com/saulpw/visidata/blob/32771e0cea6c24fc7902683d14558391395c591f/visidata/canvas.py#L205-L214
def rowsWithin(self, bbox): 'return list of deduped rows within bbox' ret = {} for y in range(bbox.ymin, bbox.ymax+1): for x in range(bbox.xmin, bbox.xmax+1): for attr, rows in self.pixels[y][x].items(): if attr not in self.hiddenAttrs: for r in rows: ret[id(r)] = r return list(ret.values())
[ "def", "rowsWithin", "(", "self", ",", "bbox", ")", ":", "ret", "=", "{", "}", "for", "y", "in", "range", "(", "bbox", ".", "ymin", ",", "bbox", ".", "ymax", "+", "1", ")", ":", "for", "x", "in", "range", "(", "bbox", ".", "xmin", ",", "bbox", ".", "xmax", "+", "1", ")", ":", "for", "attr", ",", "rows", "in", "self", ".", "pixels", "[", "y", "]", "[", "x", "]", ".", "items", "(", ")", ":", "if", "attr", "not", "in", "self", ".", "hiddenAttrs", ":", "for", "r", "in", "rows", ":", "ret", "[", "id", "(", "r", ")", "]", "=", "r", "return", "list", "(", "ret", ".", "values", "(", ")", ")" ]
return list of deduped rows within bbox
[ "return", "list", "of", "deduped", "rows", "within", "bbox" ]
python
train
41.5
NUAA-Open-Source/NUAA-iCal-Python
NUAAiCal/AddToGCal.py
https://github.com/NUAA-Open-Source/NUAA-iCal-Python/blob/1bdc4016e4d8b236a12bba5047a5150f889bc880/NUAAiCal/AddToGCal.py#L25-L51
def get_credentials(): """Gets valid user credentials from storage. If nothing has been stored, or if the stored credentials are invalid, the OAuth2 flow is completed to obtain the new credentials. Returns: Credentials, the obtained credential. """ home_dir = os.path.expanduser('~') credential_dir = os.path.join(home_dir, '.credentials') if not os.path.exists(credential_dir): os.makedirs(credential_dir) credential_path = os.path.join(credential_dir, 'calendar-python-quickstart.json') store = Storage(credential_path) credentials = store.get() if not credentials or credentials.invalid: flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES) flow.user_agent = APPLICATION_NAME if flags: credentials = tools.run_flow(flow, store, flags) else: # Needed only for compatibility with Python 2.6 credentials = tools.run(flow, store) print('Storing credentials to ' + credential_path) return credentials
[ "def", "get_credentials", "(", ")", ":", "home_dir", "=", "os", ".", "path", ".", "expanduser", "(", "'~'", ")", "credential_dir", "=", "os", ".", "path", ".", "join", "(", "home_dir", ",", "'.credentials'", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "credential_dir", ")", ":", "os", ".", "makedirs", "(", "credential_dir", ")", "credential_path", "=", "os", ".", "path", ".", "join", "(", "credential_dir", ",", "'calendar-python-quickstart.json'", ")", "store", "=", "Storage", "(", "credential_path", ")", "credentials", "=", "store", ".", "get", "(", ")", "if", "not", "credentials", "or", "credentials", ".", "invalid", ":", "flow", "=", "client", ".", "flow_from_clientsecrets", "(", "CLIENT_SECRET_FILE", ",", "SCOPES", ")", "flow", ".", "user_agent", "=", "APPLICATION_NAME", "if", "flags", ":", "credentials", "=", "tools", ".", "run_flow", "(", "flow", ",", "store", ",", "flags", ")", "else", ":", "# Needed only for compatibility with Python 2.6", "credentials", "=", "tools", ".", "run", "(", "flow", ",", "store", ")", "print", "(", "'Storing credentials to '", "+", "credential_path", ")", "return", "credentials" ]
Gets valid user credentials from storage. If nothing has been stored, or if the stored credentials are invalid, the OAuth2 flow is completed to obtain the new credentials. Returns: Credentials, the obtained credential.
[ "Gets", "valid", "user", "credentials", "from", "storage", "." ]
python
train
39.074074
StackStorm/pybind
pybind/nos/v6_0_2f/qos/map_/dscp_mutation/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/nos/v6_0_2f/qos/map_/dscp_mutation/__init__.py#L131-L152
def _set_mark(self, v, load=False): """ Setter method for mark, mapped from YANG variable /qos/map/dscp_mutation/mark (list) If this variable is read-only (config: false) in the source YANG file, then _set_mark is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_mark() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGListType("dscp_in_values",mark.mark, yang_name="mark", rest_name="mark", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='dscp-in-values', extensions={u'tailf-common': {u'info': u'Map DSCP values to outbound DSCP value', u'cli-suppress-mode': None, u'callpoint': u'dscp_mark_list_mutation', u'cli-incomplete-command': None, u'cli-suppress-no': None}}), is_container='list', yang_name="mark", rest_name="mark", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Map DSCP values to outbound DSCP value', u'cli-suppress-mode': None, u'callpoint': u'dscp_mark_list_mutation', u'cli-incomplete-command': None, u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-qos', defining_module='brocade-qos', yang_type='list', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """mark must be of a type compatible with list""", 'defined-type': "list", 'generated-type': """YANGDynClass(base=YANGListType("dscp_in_values",mark.mark, yang_name="mark", rest_name="mark", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='dscp-in-values', extensions={u'tailf-common': {u'info': u'Map DSCP values to outbound DSCP value', u'cli-suppress-mode': None, u'callpoint': u'dscp_mark_list_mutation', u'cli-incomplete-command': None, u'cli-suppress-no': None}}), is_container='list', yang_name="mark", rest_name="mark", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Map DSCP values to outbound DSCP value', u'cli-suppress-mode': None, u'callpoint': u'dscp_mark_list_mutation', u'cli-incomplete-command': None, u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-qos', defining_module='brocade-qos', yang_type='list', is_config=True)""", }) self.__mark = t if hasattr(self, '_set'): self._set()
[ "def", "_set_mark", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base", "=", "YANGListType", "(", "\"dscp_in_values\"", ",", "mark", ".", "mark", ",", "yang_name", "=", "\"mark\"", ",", "rest_name", "=", "\"mark\"", ",", "parent", "=", "self", ",", "is_container", "=", "'list'", ",", "user_ordered", "=", "False", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "yang_keys", "=", "'dscp-in-values'", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'info'", ":", "u'Map DSCP values to outbound DSCP value'", ",", "u'cli-suppress-mode'", ":", "None", ",", "u'callpoint'", ":", "u'dscp_mark_list_mutation'", ",", "u'cli-incomplete-command'", ":", "None", ",", "u'cli-suppress-no'", ":", "None", "}", "}", ")", ",", "is_container", "=", "'list'", ",", "yang_name", "=", "\"mark\"", ",", "rest_name", "=", "\"mark\"", ",", "parent", "=", "self", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "extmethods", "=", "self", ".", "_extmethods", ",", "register_paths", "=", "True", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'info'", ":", "u'Map DSCP values to outbound DSCP value'", ",", "u'cli-suppress-mode'", ":", "None", ",", "u'callpoint'", ":", "u'dscp_mark_list_mutation'", ",", "u'cli-incomplete-command'", ":", "None", ",", "u'cli-suppress-no'", ":", "None", "}", "}", ",", "namespace", "=", "'urn:brocade.com:mgmt:brocade-qos'", ",", "defining_module", "=", "'brocade-qos'", ",", "yang_type", "=", "'list'", ",", "is_config", "=", "True", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "ValueError", "(", "{", "'error-string'", ":", "\"\"\"mark must be of a type compatible with list\"\"\"", ",", "'defined-type'", ":", "\"list\"", ",", "'generated-type'", ":", "\"\"\"YANGDynClass(base=YANGListType(\"dscp_in_values\",mark.mark, yang_name=\"mark\", rest_name=\"mark\", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='dscp-in-values', extensions={u'tailf-common': {u'info': u'Map DSCP values to outbound DSCP value', u'cli-suppress-mode': None, u'callpoint': u'dscp_mark_list_mutation', u'cli-incomplete-command': None, u'cli-suppress-no': None}}), is_container='list', yang_name=\"mark\", rest_name=\"mark\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Map DSCP values to outbound DSCP value', u'cli-suppress-mode': None, u'callpoint': u'dscp_mark_list_mutation', u'cli-incomplete-command': None, u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-qos', defining_module='brocade-qos', yang_type='list', is_config=True)\"\"\"", ",", "}", ")", "self", ".", "__mark", "=", "t", "if", "hasattr", "(", "self", ",", "'_set'", ")", ":", "self", ".", "_set", "(", ")" ]
Setter method for mark, mapped from YANG variable /qos/map/dscp_mutation/mark (list) If this variable is read-only (config: false) in the source YANG file, then _set_mark is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_mark() directly.
[ "Setter", "method", "for", "mark", "mapped", "from", "YANG", "variable", "/", "qos", "/", "map", "/", "dscp_mutation", "/", "mark", "(", "list", ")", "If", "this", "variable", "is", "read", "-", "only", "(", "config", ":", "false", ")", "in", "the", "source", "YANG", "file", "then", "_set_mark", "is", "considered", "as", "a", "private", "method", ".", "Backends", "looking", "to", "populate", "this", "variable", "should", "do", "so", "via", "calling", "thisObj", ".", "_set_mark", "()", "directly", "." ]
python
train
113.954545
albert12132/templar
templar/markdown.py
https://github.com/albert12132/templar/blob/39851c89730ab69e5c73d0a46adca2a44ecc4165/templar/markdown.py#L564-L605
def hash_reference_links(text, hashes, markdown_obj): """Hashes an <a> link or an <img> link. This function only converts reference link styles: [text here][ref id] ![alt text here][ref id] For inline style links, see hash_inline_links. Reference ids can be defined anywhere in the Markdown text. Reference ids can also be omitted, in which case te text in the first box is used as the reference id: [ref id][] This is known as an "implicit link" reference. """ def sub(match): is_img = match.group(1) != '' content = match.group(2) ref = match.group(3).strip().lower() if not ref: ref = content.strip().lower() ref = ref.replace('\n', ' ') if ref not in markdown_obj.references: link, title = '', '' else: link, title = markdown_obj.references[ref] if title: title = ' title="{0}"'.format(title) if is_img: result = '<img src="{0}" alt="{1}"{2}>'.format( link, content, title) else: result = '<a href="{0}"{2}>{1}</a>'.format(link, markdown_obj.convert(content).replace('<p>', '').replace('</p>', '').strip(), title) hashed = hash_text(result, 'link') hashes[hashed] = result return hashed return re_reference_link.sub(sub, text)
[ "def", "hash_reference_links", "(", "text", ",", "hashes", ",", "markdown_obj", ")", ":", "def", "sub", "(", "match", ")", ":", "is_img", "=", "match", ".", "group", "(", "1", ")", "!=", "''", "content", "=", "match", ".", "group", "(", "2", ")", "ref", "=", "match", ".", "group", "(", "3", ")", ".", "strip", "(", ")", ".", "lower", "(", ")", "if", "not", "ref", ":", "ref", "=", "content", ".", "strip", "(", ")", ".", "lower", "(", ")", "ref", "=", "ref", ".", "replace", "(", "'\\n'", ",", "' '", ")", "if", "ref", "not", "in", "markdown_obj", ".", "references", ":", "link", ",", "title", "=", "''", ",", "''", "else", ":", "link", ",", "title", "=", "markdown_obj", ".", "references", "[", "ref", "]", "if", "title", ":", "title", "=", "' title=\"{0}\"'", ".", "format", "(", "title", ")", "if", "is_img", ":", "result", "=", "'<img src=\"{0}\" alt=\"{1}\"{2}>'", ".", "format", "(", "link", ",", "content", ",", "title", ")", "else", ":", "result", "=", "'<a href=\"{0}\"{2}>{1}</a>'", ".", "format", "(", "link", ",", "markdown_obj", ".", "convert", "(", "content", ")", ".", "replace", "(", "'<p>'", ",", "''", ")", ".", "replace", "(", "'</p>'", ",", "''", ")", ".", "strip", "(", ")", ",", "title", ")", "hashed", "=", "hash_text", "(", "result", ",", "'link'", ")", "hashes", "[", "hashed", "]", "=", "result", "return", "hashed", "return", "re_reference_link", ".", "sub", "(", "sub", ",", "text", ")" ]
Hashes an <a> link or an <img> link. This function only converts reference link styles: [text here][ref id] ![alt text here][ref id] For inline style links, see hash_inline_links. Reference ids can be defined anywhere in the Markdown text. Reference ids can also be omitted, in which case te text in the first box is used as the reference id: [ref id][] This is known as an "implicit link" reference.
[ "Hashes", "an", "<a", ">", "link", "or", "an", "<img", ">", "link", "." ]
python
train
33.214286
goose3/goose3
goose3/extractors/content.py
https://github.com/goose3/goose3/blob/e6994b1b1826af2720a091d1bff5ca15594f558d/goose3/extractors/content.py#L321-L332
def nodes_to_check(self, docs): """\ returns a list of nodes we want to search on like paragraphs and tables """ nodes_to_check = [] for doc in docs: for tag in ['p', 'pre', 'td']: items = self.parser.getElementsByTag(doc, tag=tag) nodes_to_check += items return nodes_to_check
[ "def", "nodes_to_check", "(", "self", ",", "docs", ")", ":", "nodes_to_check", "=", "[", "]", "for", "doc", "in", "docs", ":", "for", "tag", "in", "[", "'p'", ",", "'pre'", ",", "'td'", "]", ":", "items", "=", "self", ".", "parser", ".", "getElementsByTag", "(", "doc", ",", "tag", "=", "tag", ")", "nodes_to_check", "+=", "items", "return", "nodes_to_check" ]
\ returns a list of nodes we want to search on like paragraphs and tables
[ "\\", "returns", "a", "list", "of", "nodes", "we", "want", "to", "search", "on", "like", "paragraphs", "and", "tables" ]
python
valid
30.583333
vladcalin/gemstone
gemstone/core/handlers.py
https://github.com/vladcalin/gemstone/blob/325a49d17621b9d45ffd2b5eca6f0de284de8ba4/gemstone/core/handlers.py#L246-L268
def prepare_method_call(self, method, args): """ Wraps a method so that method() will call ``method(*args)`` or ``method(**args)``, depending of args type :param method: a callable object (method) :param args: dict or list with the parameters for the function :return: a 'patched' callable """ if self._method_requires_handler_ref(method): if isinstance(args, list): args = [self] + args elif isinstance(args, dict): args["handler"] = self if isinstance(args, list): to_call = partial(method, *args) elif isinstance(args, dict): to_call = partial(method, **args) else: raise TypeError( "args must be list or dict but got {} instead".format(type(args).__name__)) return to_call
[ "def", "prepare_method_call", "(", "self", ",", "method", ",", "args", ")", ":", "if", "self", ".", "_method_requires_handler_ref", "(", "method", ")", ":", "if", "isinstance", "(", "args", ",", "list", ")", ":", "args", "=", "[", "self", "]", "+", "args", "elif", "isinstance", "(", "args", ",", "dict", ")", ":", "args", "[", "\"handler\"", "]", "=", "self", "if", "isinstance", "(", "args", ",", "list", ")", ":", "to_call", "=", "partial", "(", "method", ",", "*", "args", ")", "elif", "isinstance", "(", "args", ",", "dict", ")", ":", "to_call", "=", "partial", "(", "method", ",", "*", "*", "args", ")", "else", ":", "raise", "TypeError", "(", "\"args must be list or dict but got {} instead\"", ".", "format", "(", "type", "(", "args", ")", ".", "__name__", ")", ")", "return", "to_call" ]
Wraps a method so that method() will call ``method(*args)`` or ``method(**args)``, depending of args type :param method: a callable object (method) :param args: dict or list with the parameters for the function :return: a 'patched' callable
[ "Wraps", "a", "method", "so", "that", "method", "()", "will", "call", "method", "(", "*", "args", ")", "or", "method", "(", "**", "args", ")", "depending", "of", "args", "type" ]
python
train
37.391304
espressif/esptool
ecdsa/numbertheory.py
https://github.com/espressif/esptool/blob/c583756c118039cfcfe256f7a3285618914d16a5/ecdsa/numbertheory.py#L124-L145
def jacobi( a, n ): """Jacobi symbol""" # Based on the Handbook of Applied Cryptography (HAC), algorithm 2.149. # This function has been tested by comparison with a small # table printed in HAC, and by extensive use in calculating # modular square roots. assert n >= 3 assert n%2 == 1 a = a % n if a == 0: return 0 if a == 1: return 1 a1, e = a, 0 while a1%2 == 0: a1, e = a1//2, e+1 if e%2 == 0 or n%8 == 1 or n%8 == 7: s = 1 else: s = -1 if a1 == 1: return s if n%4 == 3 and a1%4 == 3: s = -s return s * jacobi( n % a1, a1 )
[ "def", "jacobi", "(", "a", ",", "n", ")", ":", "# Based on the Handbook of Applied Cryptography (HAC), algorithm 2.149.", "# This function has been tested by comparison with a small", "# table printed in HAC, and by extensive use in calculating", "# modular square roots.", "assert", "n", ">=", "3", "assert", "n", "%", "2", "==", "1", "a", "=", "a", "%", "n", "if", "a", "==", "0", ":", "return", "0", "if", "a", "==", "1", ":", "return", "1", "a1", ",", "e", "=", "a", ",", "0", "while", "a1", "%", "2", "==", "0", ":", "a1", ",", "e", "=", "a1", "//", "2", ",", "e", "+", "1", "if", "e", "%", "2", "==", "0", "or", "n", "%", "8", "==", "1", "or", "n", "%", "8", "==", "7", ":", "s", "=", "1", "else", ":", "s", "=", "-", "1", "if", "a1", "==", "1", ":", "return", "s", "if", "n", "%", "4", "==", "3", "and", "a1", "%", "4", "==", "3", ":", "s", "=", "-", "s", "return", "s", "*", "jacobi", "(", "n", "%", "a1", ",", "a1", ")" ]
Jacobi symbol
[ "Jacobi", "symbol" ]
python
train
24.818182
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/json_util.py
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/json_util.py#L183-L201
def get_value_for_datastore(self, model_instance): """Gets value for datastore. Args: model_instance: instance of the model class. Returns: datastore-compatible value. """ value = super(JsonProperty, self).get_value_for_datastore(model_instance) if not value: return None json_value = value if not isinstance(value, dict): json_value = value.to_json() if not json_value: return None return datastore_types.Text(json.dumps( json_value, sort_keys=True, cls=JsonEncoder))
[ "def", "get_value_for_datastore", "(", "self", ",", "model_instance", ")", ":", "value", "=", "super", "(", "JsonProperty", ",", "self", ")", ".", "get_value_for_datastore", "(", "model_instance", ")", "if", "not", "value", ":", "return", "None", "json_value", "=", "value", "if", "not", "isinstance", "(", "value", ",", "dict", ")", ":", "json_value", "=", "value", ".", "to_json", "(", ")", "if", "not", "json_value", ":", "return", "None", "return", "datastore_types", ".", "Text", "(", "json", ".", "dumps", "(", "json_value", ",", "sort_keys", "=", "True", ",", "cls", "=", "JsonEncoder", ")", ")" ]
Gets value for datastore. Args: model_instance: instance of the model class. Returns: datastore-compatible value.
[ "Gets", "value", "for", "datastore", "." ]
python
train
27.894737
apache/airflow
airflow/contrib/hooks/cassandra_hook.py
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/cassandra_hook.py#L108-L115
def get_conn(self): """ Returns a cassandra Session object """ if self.session and not self.session.is_shutdown: return self.session self.session = self.cluster.connect(self.keyspace) return self.session
[ "def", "get_conn", "(", "self", ")", ":", "if", "self", ".", "session", "and", "not", "self", ".", "session", ".", "is_shutdown", ":", "return", "self", ".", "session", "self", ".", "session", "=", "self", ".", "cluster", ".", "connect", "(", "self", ".", "keyspace", ")", "return", "self", ".", "session" ]
Returns a cassandra Session object
[ "Returns", "a", "cassandra", "Session", "object" ]
python
test
32
jazzband/django-mongonaut
mongonaut/forms/form_mixins.py
https://github.com/jazzband/django-mongonaut/blob/5485b2e029dff8ae267a4cb39c92d0a72cb5b144/mongonaut/forms/form_mixins.py#L82-L108
def get_form_field_dict(self, model_dict): """ Takes a model dictionary representation and creates a dictionary keyed by form field. Each value is a keyed 4 tuple of: (widget, mode_field_instance, model_field_type, field_key) """ return_dict = OrderedDict() # Workaround: mongoengine doesn't preserve form fields ordering from metaclass __new__ if hasattr(self.model, 'Meta') and hasattr(self.model.Meta, 'form_fields_ordering'): field_order_list = tuple(form_field for form_field in self.model.Meta.form_fields_ordering if form_field in model_dict.iterkeys()) order_dict = OrderedDict.fromkeys(field_order_list) return_dict = order_dict for field_key, field_dict in sorted(model_dict.items()): if not field_key.startswith("_"): widget = field_dict.get('_widget', None) if widget is None: return_dict[field_key] = self.get_form_field_dict(field_dict) return_dict[field_key].update({'_field_type': field_dict.get('_field_type', None)}) else: return_dict[field_key] = FieldTuple(widget, field_dict.get('_document_field', None), field_dict.get('_field_type', None), field_dict.get('_key', None)) return return_dict
[ "def", "get_form_field_dict", "(", "self", ",", "model_dict", ")", ":", "return_dict", "=", "OrderedDict", "(", ")", "# Workaround: mongoengine doesn't preserve form fields ordering from metaclass __new__", "if", "hasattr", "(", "self", ".", "model", ",", "'Meta'", ")", "and", "hasattr", "(", "self", ".", "model", ".", "Meta", ",", "'form_fields_ordering'", ")", ":", "field_order_list", "=", "tuple", "(", "form_field", "for", "form_field", "in", "self", ".", "model", ".", "Meta", ".", "form_fields_ordering", "if", "form_field", "in", "model_dict", ".", "iterkeys", "(", ")", ")", "order_dict", "=", "OrderedDict", ".", "fromkeys", "(", "field_order_list", ")", "return_dict", "=", "order_dict", "for", "field_key", ",", "field_dict", "in", "sorted", "(", "model_dict", ".", "items", "(", ")", ")", ":", "if", "not", "field_key", ".", "startswith", "(", "\"_\"", ")", ":", "widget", "=", "field_dict", ".", "get", "(", "'_widget'", ",", "None", ")", "if", "widget", "is", "None", ":", "return_dict", "[", "field_key", "]", "=", "self", ".", "get_form_field_dict", "(", "field_dict", ")", "return_dict", "[", "field_key", "]", ".", "update", "(", "{", "'_field_type'", ":", "field_dict", ".", "get", "(", "'_field_type'", ",", "None", ")", "}", ")", "else", ":", "return_dict", "[", "field_key", "]", "=", "FieldTuple", "(", "widget", ",", "field_dict", ".", "get", "(", "'_document_field'", ",", "None", ")", ",", "field_dict", ".", "get", "(", "'_field_type'", ",", "None", ")", ",", "field_dict", ".", "get", "(", "'_key'", ",", "None", ")", ")", "return", "return_dict" ]
Takes a model dictionary representation and creates a dictionary keyed by form field. Each value is a keyed 4 tuple of: (widget, mode_field_instance, model_field_type, field_key)
[ "Takes", "a", "model", "dictionary", "representation", "and", "creates", "a", "dictionary", "keyed", "by", "form", "field", ".", "Each", "value", "is", "a", "keyed", "4", "tuple", "of", ":", "(", "widget", "mode_field_instance", "model_field_type", "field_key", ")" ]
python
valid
56.777778
ARMmbed/mbed-cloud-sdk-python
src/mbed_cloud/_backends/iam/apis/developer_api.py
https://github.com/ARMmbed/mbed-cloud-sdk-python/blob/c0af86fb2cdd4dc7ed26f236139241067d293509/src/mbed_cloud/_backends/iam/apis/developer_api.py#L36-L56
def add_my_api_key_to_groups(self, body, **kwargs): # noqa: E501 """Add API key to a list of groups. # noqa: E501 An endpoint for adding API key to groups. **Example usage:** `curl -X POST https://api.us-east-1.mbedcloud.com/v3/api-keys/me/groups -d '[0162056a9a1586f30242590700000000,0117056a9a1586f30242590700000000]' -H 'content-type: application/json' -H 'Authorization: Bearer API_KEY'` # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass asynchronous=True >>> thread = api.add_my_api_key_to_groups(body, asynchronous=True) >>> result = thread.get() :param asynchronous bool :param list[str] body: A list of IDs of the groups to be updated. (required) :return: UpdatedResponse If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('asynchronous'): return self.add_my_api_key_to_groups_with_http_info(body, **kwargs) # noqa: E501 else: (data) = self.add_my_api_key_to_groups_with_http_info(body, **kwargs) # noqa: E501 return data
[ "def", "add_my_api_key_to_groups", "(", "self", ",", "body", ",", "*", "*", "kwargs", ")", ":", "# noqa: E501", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'asynchronous'", ")", ":", "return", "self", ".", "add_my_api_key_to_groups_with_http_info", "(", "body", ",", "*", "*", "kwargs", ")", "# noqa: E501", "else", ":", "(", "data", ")", "=", "self", ".", "add_my_api_key_to_groups_with_http_info", "(", "body", ",", "*", "*", "kwargs", ")", "# noqa: E501", "return", "data" ]
Add API key to a list of groups. # noqa: E501 An endpoint for adding API key to groups. **Example usage:** `curl -X POST https://api.us-east-1.mbedcloud.com/v3/api-keys/me/groups -d '[0162056a9a1586f30242590700000000,0117056a9a1586f30242590700000000]' -H 'content-type: application/json' -H 'Authorization: Bearer API_KEY'` # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass asynchronous=True >>> thread = api.add_my_api_key_to_groups(body, asynchronous=True) >>> result = thread.get() :param asynchronous bool :param list[str] body: A list of IDs of the groups to be updated. (required) :return: UpdatedResponse If the method is called asynchronously, returns the request thread.
[ "Add", "API", "key", "to", "a", "list", "of", "groups", ".", "#", "noqa", ":", "E501" ]
python
train
58.904762
mozilla-releng/scriptworker
scriptworker/utils.py
https://github.com/mozilla-releng/scriptworker/blob/8e97bbd83b9b578565ec57904c966dd6ae4ef0ae/scriptworker/utils.py#L501-L528
def read_from_file(path, file_type='text', exception=ScriptWorkerException): """Read from ``path``. Small helper function to read from ``file``. Args: path (str): the path to read from. file_type (str, optional): the type of file. Currently accepts ``text`` or ``binary``. Defaults to ``text``. exception (Exception, optional): the exception to raise if unable to read from the file. Defaults to ``ScriptWorkerException``. Returns: None: if unable to read from ``path`` and ``exception`` is ``None`` str or bytes: the contents of ``path`` Raises: Exception: if ``exception`` is set. """ FILE_TYPE_MAP = {'text': 'r', 'binary': 'rb'} if file_type not in FILE_TYPE_MAP: raise exception("Unknown file_type {} not in {}!".format(file_type, FILE_TYPE_MAP)) try: with open(path, FILE_TYPE_MAP[file_type]) as fh: return fh.read() except (OSError, FileNotFoundError) as exc: raise exception("Can't read_from_file {}: {}".format(path, str(exc)))
[ "def", "read_from_file", "(", "path", ",", "file_type", "=", "'text'", ",", "exception", "=", "ScriptWorkerException", ")", ":", "FILE_TYPE_MAP", "=", "{", "'text'", ":", "'r'", ",", "'binary'", ":", "'rb'", "}", "if", "file_type", "not", "in", "FILE_TYPE_MAP", ":", "raise", "exception", "(", "\"Unknown file_type {} not in {}!\"", ".", "format", "(", "file_type", ",", "FILE_TYPE_MAP", ")", ")", "try", ":", "with", "open", "(", "path", ",", "FILE_TYPE_MAP", "[", "file_type", "]", ")", "as", "fh", ":", "return", "fh", ".", "read", "(", ")", "except", "(", "OSError", ",", "FileNotFoundError", ")", "as", "exc", ":", "raise", "exception", "(", "\"Can't read_from_file {}: {}\"", ".", "format", "(", "path", ",", "str", "(", "exc", ")", ")", ")" ]
Read from ``path``. Small helper function to read from ``file``. Args: path (str): the path to read from. file_type (str, optional): the type of file. Currently accepts ``text`` or ``binary``. Defaults to ``text``. exception (Exception, optional): the exception to raise if unable to read from the file. Defaults to ``ScriptWorkerException``. Returns: None: if unable to read from ``path`` and ``exception`` is ``None`` str or bytes: the contents of ``path`` Raises: Exception: if ``exception`` is set.
[ "Read", "from", "path", "." ]
python
train
37.964286
boundary/pulse-api-cli
boundary/api_call.py
https://github.com/boundary/pulse-api-cli/blob/b01ca65b442eed19faac309c9d62bbc3cb2c098f/boundary/api_call.py#L111-L120
def method(self, value): """ Before assigning the value validate that is in one of the HTTP methods we implement """ keys = self._methods.keys() if value not in keys: raise AttributeError("Method value not in " + str(keys)) else: self._method = value
[ "def", "method", "(", "self", ",", "value", ")", ":", "keys", "=", "self", ".", "_methods", ".", "keys", "(", ")", "if", "value", "not", "in", "keys", ":", "raise", "AttributeError", "(", "\"Method value not in \"", "+", "str", "(", "keys", ")", ")", "else", ":", "self", ".", "_method", "=", "value" ]
Before assigning the value validate that is in one of the HTTP methods we implement
[ "Before", "assigning", "the", "value", "validate", "that", "is", "in", "one", "of", "the", "HTTP", "methods", "we", "implement" ]
python
test
32.1
cmbruns/pyopenvr
src/openvr/__init__.py
https://github.com/cmbruns/pyopenvr/blob/68395d26bb3df6ab1f0f059c38d441f962938be6/src/openvr/__init__.py#L5527-L5539
def createNotification(self, ulOverlayHandle, ulUserValue, type_, pchText, style): """ Create a notification and enqueue it to be shown to the user. An overlay handle is required to create a notification, as otherwise it would be impossible for a user to act on it. To create a two-line notification, use a line break ('\n') to split the text into two lines. The pImage argument may be NULL, in which case the specified overlay's icon will be used instead. """ fn = self.function_table.createNotification pImage = NotificationBitmap_t() pNotificationId = VRNotificationId() result = fn(ulOverlayHandle, ulUserValue, type_, pchText, style, byref(pImage), byref(pNotificationId)) return result, pImage, pNotificationId
[ "def", "createNotification", "(", "self", ",", "ulOverlayHandle", ",", "ulUserValue", ",", "type_", ",", "pchText", ",", "style", ")", ":", "fn", "=", "self", ".", "function_table", ".", "createNotification", "pImage", "=", "NotificationBitmap_t", "(", ")", "pNotificationId", "=", "VRNotificationId", "(", ")", "result", "=", "fn", "(", "ulOverlayHandle", ",", "ulUserValue", ",", "type_", ",", "pchText", ",", "style", ",", "byref", "(", "pImage", ")", ",", "byref", "(", "pNotificationId", ")", ")", "return", "result", ",", "pImage", ",", "pNotificationId" ]
Create a notification and enqueue it to be shown to the user. An overlay handle is required to create a notification, as otherwise it would be impossible for a user to act on it. To create a two-line notification, use a line break ('\n') to split the text into two lines. The pImage argument may be NULL, in which case the specified overlay's icon will be used instead.
[ "Create", "a", "notification", "and", "enqueue", "it", "to", "be", "shown", "to", "the", "user", ".", "An", "overlay", "handle", "is", "required", "to", "create", "a", "notification", "as", "otherwise", "it", "would", "be", "impossible", "for", "a", "user", "to", "act", "on", "it", ".", "To", "create", "a", "two", "-", "line", "notification", "use", "a", "line", "break", "(", "\\", "n", ")", "to", "split", "the", "text", "into", "two", "lines", ".", "The", "pImage", "argument", "may", "be", "NULL", "in", "which", "case", "the", "specified", "overlay", "s", "icon", "will", "be", "used", "instead", "." ]
python
train
61
phoebe-project/phoebe2
phoebe/backend/universe.py
https://github.com/phoebe-project/phoebe2/blob/e64b8be683977064e2d55dd1b3ac400f64c3e379/phoebe/backend/universe.py#L1699-L1716
def _populate_lp(self, dataset, **kwargs): """ Populate columns necessary for an LP dataset This should not be called directly, but rather via :meth:`Body.populate_observable` or :meth:`System.populate_observables` """ logger.debug("{}._populate_lp(dataset={})".format(self.component, dataset)) profile_rest = kwargs.get('profile_rest', self.lp_profile_rest.get(dataset)) rv_cols = self._populate_rv(dataset, **kwargs) cols = rv_cols # rvs = (rv_cols['rvs']*u.solRad/u.d).to(u.m/u.s).value # cols['dls'] = rv_cols['rvs']*profile_rest/c.c.si.value return cols
[ "def", "_populate_lp", "(", "self", ",", "dataset", ",", "*", "*", "kwargs", ")", ":", "logger", ".", "debug", "(", "\"{}._populate_lp(dataset={})\"", ".", "format", "(", "self", ".", "component", ",", "dataset", ")", ")", "profile_rest", "=", "kwargs", ".", "get", "(", "'profile_rest'", ",", "self", ".", "lp_profile_rest", ".", "get", "(", "dataset", ")", ")", "rv_cols", "=", "self", ".", "_populate_rv", "(", "dataset", ",", "*", "*", "kwargs", ")", "cols", "=", "rv_cols", "# rvs = (rv_cols['rvs']*u.solRad/u.d).to(u.m/u.s).value", "# cols['dls'] = rv_cols['rvs']*profile_rest/c.c.si.value", "return", "cols" ]
Populate columns necessary for an LP dataset This should not be called directly, but rather via :meth:`Body.populate_observable` or :meth:`System.populate_observables`
[ "Populate", "columns", "necessary", "for", "an", "LP", "dataset" ]
python
train
35.666667
tensorflow/probability
tensorflow_probability/python/internal/distribution_util.py
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/distribution_util.py#L506-L542
def pick_scalar_condition(pred, true_value, false_value, name=None): """Convenience function that chooses one of two values based on the predicate. This utility is equivalent to a version of `tf.where` that accepts only a scalar predicate and computes its result statically when possible. It may also be used in place of `tf.cond` when both branches yield a `Tensor` of the same shape; the operational difference is that `tf.cond` uses control flow to evaluate only the branch that's needed, while `tf.where` (and thus this method) may evaluate both branches before the predicate's truth is known. This means that `tf.cond` is preferred when one of the branches is expensive to evaluate (like performing a large matmul), while this method is preferred when both branches are cheap, e.g., constants. In the latter case, we expect this method to be substantially faster than `tf.cond` on GPU and to give similar performance on CPU. Args: pred: Scalar `bool` `Tensor` predicate. true_value: `Tensor` to return if `pred` is `True`. false_value: `Tensor` to return if `pred` is `False`. Must have the same shape as `true_value`. name: Python `str` name given to ops managed by this object. Returns: result: a `Tensor` (or `Tensor`-convertible Python value) equal to `true_value` if `pred` evaluates to `True` and `false_value` otherwise. If the condition can be evaluated statically, the result returned is one of the input Python values, with no graph side effects. """ with tf.name_scope(name or "pick_scalar_condition"): pred = tf.convert_to_tensor( value=pred, dtype_hint=tf.bool, name="pred") true_value = tf.convert_to_tensor(value=true_value, name="true_value") false_value = tf.convert_to_tensor(value=false_value, name="false_value") pred_ = tf.get_static_value(pred) if pred_ is None: return tf.where(pred, true_value, false_value) return true_value if pred_ else false_value
[ "def", "pick_scalar_condition", "(", "pred", ",", "true_value", ",", "false_value", ",", "name", "=", "None", ")", ":", "with", "tf", ".", "name_scope", "(", "name", "or", "\"pick_scalar_condition\"", ")", ":", "pred", "=", "tf", ".", "convert_to_tensor", "(", "value", "=", "pred", ",", "dtype_hint", "=", "tf", ".", "bool", ",", "name", "=", "\"pred\"", ")", "true_value", "=", "tf", ".", "convert_to_tensor", "(", "value", "=", "true_value", ",", "name", "=", "\"true_value\"", ")", "false_value", "=", "tf", ".", "convert_to_tensor", "(", "value", "=", "false_value", ",", "name", "=", "\"false_value\"", ")", "pred_", "=", "tf", ".", "get_static_value", "(", "pred", ")", "if", "pred_", "is", "None", ":", "return", "tf", ".", "where", "(", "pred", ",", "true_value", ",", "false_value", ")", "return", "true_value", "if", "pred_", "else", "false_value" ]
Convenience function that chooses one of two values based on the predicate. This utility is equivalent to a version of `tf.where` that accepts only a scalar predicate and computes its result statically when possible. It may also be used in place of `tf.cond` when both branches yield a `Tensor` of the same shape; the operational difference is that `tf.cond` uses control flow to evaluate only the branch that's needed, while `tf.where` (and thus this method) may evaluate both branches before the predicate's truth is known. This means that `tf.cond` is preferred when one of the branches is expensive to evaluate (like performing a large matmul), while this method is preferred when both branches are cheap, e.g., constants. In the latter case, we expect this method to be substantially faster than `tf.cond` on GPU and to give similar performance on CPU. Args: pred: Scalar `bool` `Tensor` predicate. true_value: `Tensor` to return if `pred` is `True`. false_value: `Tensor` to return if `pred` is `False`. Must have the same shape as `true_value`. name: Python `str` name given to ops managed by this object. Returns: result: a `Tensor` (or `Tensor`-convertible Python value) equal to `true_value` if `pred` evaluates to `True` and `false_value` otherwise. If the condition can be evaluated statically, the result returned is one of the input Python values, with no graph side effects.
[ "Convenience", "function", "that", "chooses", "one", "of", "two", "values", "based", "on", "the", "predicate", "." ]
python
test
53
wimglenn/advent-of-code-data
aocd/get.py
https://github.com/wimglenn/advent-of-code-data/blob/a3856459d225840f2b6919659fc65aa7a6a74533/aocd/get.py#L70-L134
def get_day_and_year(): """ Returns tuple (day, year). Here be dragons! The correct date is determined with introspection of the call stack, first finding the filename of the module from which ``aocd`` was imported. This means your filenames should be something sensible, which identify the day and year unambiguously. The examples below should all parse correctly, because they have unique digits in the file path that are recognisable as AoC years (2015+) or days (1-25). A filename like ``problem_one.py`` will not work, so don't do that. If you don't like weird frame hacks, just use the ``aocd.get_data()`` function directly instead and have a nice day! """ pattern_year = r"201[5-9]|202[0-9]" pattern_day = r"2[0-5]|1[0-9]|[1-9]" stack = [f[0] for f in traceback.extract_stack()] for name in stack: basename = os.path.basename(name) reasons_to_skip_frame = [ not re.search(pattern_day, basename), # no digits in filename name == __file__, # here "importlib" in name, # Python 3 import machinery "/IPython/" in name, # IPython adds a tonne of stack frames name.startswith("<"), # crap like <decorator-gen-57> name.endswith("ython3"), # ipython3 alias ] if not any(reasons_to_skip_frame): abspath = os.path.abspath(name) break log.debug("skipping frame %s", name) else: import __main__ try: __main__.__file__ except AttributeError: log.debug("running within REPL") day = current_day() year = most_recent_year() return day, year else: log.debug("non-interactive") raise AocdError("Failed introspection of filename") years = {int(year) for year in re.findall(pattern_year, abspath)} if len(years) > 1: raise AocdError("Failed introspection of year") year = years.pop() if years else None basename_no_years = re.sub(pattern_year, "", basename) try: [day] = set(re.findall(pattern_day, basename_no_years)) except ValueError: pass else: assert not day.startswith("0"), "regex pattern_day must prevent any leading 0" day = int(day) assert 1 <= day <= 25, "regex pattern_day must only match numbers in range 1-25" log.debug("year=%d day=%d", year, day) return day, year log.debug("giving up introspection for %s", abspath) raise AocdError("Failed introspection of day")
[ "def", "get_day_and_year", "(", ")", ":", "pattern_year", "=", "r\"201[5-9]|202[0-9]\"", "pattern_day", "=", "r\"2[0-5]|1[0-9]|[1-9]\"", "stack", "=", "[", "f", "[", "0", "]", "for", "f", "in", "traceback", ".", "extract_stack", "(", ")", "]", "for", "name", "in", "stack", ":", "basename", "=", "os", ".", "path", ".", "basename", "(", "name", ")", "reasons_to_skip_frame", "=", "[", "not", "re", ".", "search", "(", "pattern_day", ",", "basename", ")", ",", "# no digits in filename", "name", "==", "__file__", ",", "# here", "\"importlib\"", "in", "name", ",", "# Python 3 import machinery", "\"/IPython/\"", "in", "name", ",", "# IPython adds a tonne of stack frames", "name", ".", "startswith", "(", "\"<\"", ")", ",", "# crap like <decorator-gen-57>", "name", ".", "endswith", "(", "\"ython3\"", ")", ",", "# ipython3 alias", "]", "if", "not", "any", "(", "reasons_to_skip_frame", ")", ":", "abspath", "=", "os", ".", "path", ".", "abspath", "(", "name", ")", "break", "log", ".", "debug", "(", "\"skipping frame %s\"", ",", "name", ")", "else", ":", "import", "__main__", "try", ":", "__main__", ".", "__file__", "except", "AttributeError", ":", "log", ".", "debug", "(", "\"running within REPL\"", ")", "day", "=", "current_day", "(", ")", "year", "=", "most_recent_year", "(", ")", "return", "day", ",", "year", "else", ":", "log", ".", "debug", "(", "\"non-interactive\"", ")", "raise", "AocdError", "(", "\"Failed introspection of filename\"", ")", "years", "=", "{", "int", "(", "year", ")", "for", "year", "in", "re", ".", "findall", "(", "pattern_year", ",", "abspath", ")", "}", "if", "len", "(", "years", ")", ">", "1", ":", "raise", "AocdError", "(", "\"Failed introspection of year\"", ")", "year", "=", "years", ".", "pop", "(", ")", "if", "years", "else", "None", "basename_no_years", "=", "re", ".", "sub", "(", "pattern_year", ",", "\"\"", ",", "basename", ")", "try", ":", "[", "day", "]", "=", "set", "(", "re", ".", "findall", "(", "pattern_day", ",", "basename_no_years", ")", ")", "except", "ValueError", ":", "pass", "else", ":", "assert", "not", "day", ".", "startswith", "(", "\"0\"", ")", ",", "\"regex pattern_day must prevent any leading 0\"", "day", "=", "int", "(", "day", ")", "assert", "1", "<=", "day", "<=", "25", ",", "\"regex pattern_day must only match numbers in range 1-25\"", "log", ".", "debug", "(", "\"year=%d day=%d\"", ",", "year", ",", "day", ")", "return", "day", ",", "year", "log", ".", "debug", "(", "\"giving up introspection for %s\"", ",", "abspath", ")", "raise", "AocdError", "(", "\"Failed introspection of day\"", ")" ]
Returns tuple (day, year). Here be dragons! The correct date is determined with introspection of the call stack, first finding the filename of the module from which ``aocd`` was imported. This means your filenames should be something sensible, which identify the day and year unambiguously. The examples below should all parse correctly, because they have unique digits in the file path that are recognisable as AoC years (2015+) or days (1-25). A filename like ``problem_one.py`` will not work, so don't do that. If you don't like weird frame hacks, just use the ``aocd.get_data()`` function directly instead and have a nice day!
[ "Returns", "tuple", "(", "day", "year", ")", "." ]
python
train
39.030769
kkinder/NdbSearchableBase
NdbSearchableBase/SearchableModel.py
https://github.com/kkinder/NdbSearchableBase/blob/4f999336b464704a0929cec135c1f09fb1ddfb7c/NdbSearchableBase/SearchableModel.py#L49-L94
def search(cls, query_string, options=None, enable_facet_discovery=False, return_facets=None, facet_options=None, facet_refinements=None, deadline=None, **kwargs): """ Searches the index. Conveniently searches only for documents that belong to instances of this class. :param query_string: The query to match against documents in the index. See search.Query() for details. :param options: A QueryOptions describing post-processing of search results. :param enable_facet_discovery: discovery top relevent facets to this search query and return them. :param return_facets: An iterable of FacetRequest or basestring as facet name to return specific facet with the result. :param facet_options: A FacetOption describing processing of facets. :param facet_refinements: An iterable of FacetRefinement objects or refinement token strings used to filter out search results based on a facet value. refinements for different facets will be conjunction and refinements for the same facet will be disjunction. :param deadline: Deadline for RPC call in seconds; if None use the default. :param kwargs: A SearchResults containing a list of documents matched, number returned and number matched by the query. :return: A SearchResults containing a list of documents matched, number returned and number matched by the query. :raises: QueryError: If the query string is not parseable. TypeError: If any of the parameters have invalid types, or an unknown attribute is passed. ValueError: If any of the parameters have invalid values (e.g., a negative deadline). """ search_class = cls.search_get_class_names()[-1] query_string += ' ' + 'class_name:%s' % (search_class,) q = search.Query( query_string=query_string, options=options, enable_facet_discovery=enable_facet_discovery, return_facets=return_facets, facet_options=facet_options, facet_refinements=facet_refinements ) index = cls.search_get_index() return index.search(q, deadline=deadline, **kwargs)
[ "def", "search", "(", "cls", ",", "query_string", ",", "options", "=", "None", ",", "enable_facet_discovery", "=", "False", ",", "return_facets", "=", "None", ",", "facet_options", "=", "None", ",", "facet_refinements", "=", "None", ",", "deadline", "=", "None", ",", "*", "*", "kwargs", ")", ":", "search_class", "=", "cls", ".", "search_get_class_names", "(", ")", "[", "-", "1", "]", "query_string", "+=", "' '", "+", "'class_name:%s'", "%", "(", "search_class", ",", ")", "q", "=", "search", ".", "Query", "(", "query_string", "=", "query_string", ",", "options", "=", "options", ",", "enable_facet_discovery", "=", "enable_facet_discovery", ",", "return_facets", "=", "return_facets", ",", "facet_options", "=", "facet_options", ",", "facet_refinements", "=", "facet_refinements", ")", "index", "=", "cls", ".", "search_get_index", "(", ")", "return", "index", ".", "search", "(", "q", ",", "deadline", "=", "deadline", ",", "*", "*", "kwargs", ")" ]
Searches the index. Conveniently searches only for documents that belong to instances of this class. :param query_string: The query to match against documents in the index. See search.Query() for details. :param options: A QueryOptions describing post-processing of search results. :param enable_facet_discovery: discovery top relevent facets to this search query and return them. :param return_facets: An iterable of FacetRequest or basestring as facet name to return specific facet with the result. :param facet_options: A FacetOption describing processing of facets. :param facet_refinements: An iterable of FacetRefinement objects or refinement token strings used to filter out search results based on a facet value. refinements for different facets will be conjunction and refinements for the same facet will be disjunction. :param deadline: Deadline for RPC call in seconds; if None use the default. :param kwargs: A SearchResults containing a list of documents matched, number returned and number matched by the query. :return: A SearchResults containing a list of documents matched, number returned and number matched by the query. :raises: QueryError: If the query string is not parseable. TypeError: If any of the parameters have invalid types, or an unknown attribute is passed. ValueError: If any of the parameters have invalid values (e.g., a negative deadline).
[ "Searches", "the", "index", ".", "Conveniently", "searches", "only", "for", "documents", "that", "belong", "to", "instances", "of", "this", "class", "." ]
python
train
52.608696
choldgraf/download
download/download.py
https://github.com/choldgraf/download/blob/26007bb87751ee35791e30e4dfc54dd088bf15e6/download/download.py#L137-L232
def _fetch_file(url, file_name, resume=True, hash_=None, timeout=10., progressbar=True, verbose=True): """Load requested file, downloading it if needed or requested. Parameters ---------- url: string The url of file to be downloaded. file_name: string Name, along with the path, of where downloaded file will be saved. resume: bool, optional If true, try to resume partially downloaded files. hash_ : str | None The hash of the file to check. If None, no checking is performed. timeout : float The URL open timeout. verbose : bool Whether to print download status. """ # Adapted from NISL and MNE-python: # https://github.com/nisl/tutorial/blob/master/nisl/datasets.py # https://martinos.org/mne if hash_ is not None and (not isinstance(hash_, string_types) or len(hash_) != 32): raise ValueError('Bad hash value given, should be a 32-character ' 'string:\n%s' % (hash_,)) temp_file_name = file_name + ".part" try: if 'dropbox.com' in url: # Use requests to handle cookies. # XXX In the future, we should probably use requests everywhere. # Unless we want to minimize dependencies. try: import requests except ModuleNotFoundError: raise ValueError('To download Dropbox links, you need to ' 'install the `requests` module.') resp = requests.get(url) chunk_size = 8192 # 2 ** 13 with open(temp_file_name, 'wb') as ff: for chunk in resp.iter_content(chunk_size=chunk_size): if chunk: # filter out keep-alive new chunks ff.write(chunk) else: # Check file size and displaying it alongside the download url u = urllib.request.urlopen(url, timeout=timeout) u.close() # this is necessary to follow any redirects url = u.geturl() u = urllib.request.urlopen(url, timeout=timeout) try: file_size = int(u.headers.get('Content-Length', '1').strip()) finally: u.close() del u if verbose: tqdm.write('Downloading data from %s (%s)\n' % (url, sizeof_fmt(file_size))) # Triage resume if not os.path.exists(temp_file_name): resume = False if resume: with open(temp_file_name, 'rb', buffering=0) as local_file: local_file.seek(0, 2) initial_size = local_file.tell() del local_file else: initial_size = 0 # This should never happen if our functions work properly if initial_size > file_size: raise RuntimeError('Local file (%s) is larger than remote ' 'file (%s), cannot resume download' % (sizeof_fmt(initial_size), sizeof_fmt(file_size))) scheme = urllib.parse.urlparse(url).scheme fun = _get_http if scheme in ('http', 'https') else _get_ftp fun(url, temp_file_name, initial_size, file_size, verbose, progressbar, ncols=80) # check md5sum if hash_ is not None: if verbose: tqdm.write('Verifying download hash.') md5 = md5sum(temp_file_name) if hash_ != md5: raise RuntimeError('Hash mismatch for downloaded file %s, ' 'expected %s but got %s' % (temp_file_name, hash_, md5)) shutil.move(temp_file_name, file_name) except Exception as ee: raise RuntimeError('Error while fetching file %s.' ' Dataset fetching aborted.\nError: %s' % (url, ee))
[ "def", "_fetch_file", "(", "url", ",", "file_name", ",", "resume", "=", "True", ",", "hash_", "=", "None", ",", "timeout", "=", "10.", ",", "progressbar", "=", "True", ",", "verbose", "=", "True", ")", ":", "# Adapted from NISL and MNE-python:", "# https://github.com/nisl/tutorial/blob/master/nisl/datasets.py", "# https://martinos.org/mne", "if", "hash_", "is", "not", "None", "and", "(", "not", "isinstance", "(", "hash_", ",", "string_types", ")", "or", "len", "(", "hash_", ")", "!=", "32", ")", ":", "raise", "ValueError", "(", "'Bad hash value given, should be a 32-character '", "'string:\\n%s'", "%", "(", "hash_", ",", ")", ")", "temp_file_name", "=", "file_name", "+", "\".part\"", "try", ":", "if", "'dropbox.com'", "in", "url", ":", "# Use requests to handle cookies.", "# XXX In the future, we should probably use requests everywhere.", "# Unless we want to minimize dependencies.", "try", ":", "import", "requests", "except", "ModuleNotFoundError", ":", "raise", "ValueError", "(", "'To download Dropbox links, you need to '", "'install the `requests` module.'", ")", "resp", "=", "requests", ".", "get", "(", "url", ")", "chunk_size", "=", "8192", "# 2 ** 13", "with", "open", "(", "temp_file_name", ",", "'wb'", ")", "as", "ff", ":", "for", "chunk", "in", "resp", ".", "iter_content", "(", "chunk_size", "=", "chunk_size", ")", ":", "if", "chunk", ":", "# filter out keep-alive new chunks", "ff", ".", "write", "(", "chunk", ")", "else", ":", "# Check file size and displaying it alongside the download url", "u", "=", "urllib", ".", "request", ".", "urlopen", "(", "url", ",", "timeout", "=", "timeout", ")", "u", ".", "close", "(", ")", "# this is necessary to follow any redirects", "url", "=", "u", ".", "geturl", "(", ")", "u", "=", "urllib", ".", "request", ".", "urlopen", "(", "url", ",", "timeout", "=", "timeout", ")", "try", ":", "file_size", "=", "int", "(", "u", ".", "headers", ".", "get", "(", "'Content-Length'", ",", "'1'", ")", ".", "strip", "(", ")", ")", "finally", ":", "u", ".", "close", "(", ")", "del", "u", "if", "verbose", ":", "tqdm", ".", "write", "(", "'Downloading data from %s (%s)\\n'", "%", "(", "url", ",", "sizeof_fmt", "(", "file_size", ")", ")", ")", "# Triage resume", "if", "not", "os", ".", "path", ".", "exists", "(", "temp_file_name", ")", ":", "resume", "=", "False", "if", "resume", ":", "with", "open", "(", "temp_file_name", ",", "'rb'", ",", "buffering", "=", "0", ")", "as", "local_file", ":", "local_file", ".", "seek", "(", "0", ",", "2", ")", "initial_size", "=", "local_file", ".", "tell", "(", ")", "del", "local_file", "else", ":", "initial_size", "=", "0", "# This should never happen if our functions work properly", "if", "initial_size", ">", "file_size", ":", "raise", "RuntimeError", "(", "'Local file (%s) is larger than remote '", "'file (%s), cannot resume download'", "%", "(", "sizeof_fmt", "(", "initial_size", ")", ",", "sizeof_fmt", "(", "file_size", ")", ")", ")", "scheme", "=", "urllib", ".", "parse", ".", "urlparse", "(", "url", ")", ".", "scheme", "fun", "=", "_get_http", "if", "scheme", "in", "(", "'http'", ",", "'https'", ")", "else", "_get_ftp", "fun", "(", "url", ",", "temp_file_name", ",", "initial_size", ",", "file_size", ",", "verbose", ",", "progressbar", ",", "ncols", "=", "80", ")", "# check md5sum", "if", "hash_", "is", "not", "None", ":", "if", "verbose", ":", "tqdm", ".", "write", "(", "'Verifying download hash.'", ")", "md5", "=", "md5sum", "(", "temp_file_name", ")", "if", "hash_", "!=", "md5", ":", "raise", "RuntimeError", "(", "'Hash mismatch for downloaded file %s, '", "'expected %s but got %s'", "%", "(", "temp_file_name", ",", "hash_", ",", "md5", ")", ")", "shutil", ".", "move", "(", "temp_file_name", ",", "file_name", ")", "except", "Exception", "as", "ee", ":", "raise", "RuntimeError", "(", "'Error while fetching file %s.'", "' Dataset fetching aborted.\\nError: %s'", "%", "(", "url", ",", "ee", ")", ")" ]
Load requested file, downloading it if needed or requested. Parameters ---------- url: string The url of file to be downloaded. file_name: string Name, along with the path, of where downloaded file will be saved. resume: bool, optional If true, try to resume partially downloaded files. hash_ : str | None The hash of the file to check. If None, no checking is performed. timeout : float The URL open timeout. verbose : bool Whether to print download status.
[ "Load", "requested", "file", "downloading", "it", "if", "needed", "or", "requested", "." ]
python
train
42.291667
saltstack/salt
salt/modules/pkgng.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/pkgng.py#L2029-L2073
def list_locked(**kwargs): ''' Query the package database those packages which are locked against reinstallation, modification or deletion. Returns returns a list of package names with version strings CLI Example: .. code-block:: bash salt '*' pkg.list_locked jail List locked packages within the specified jail CLI Example: .. code-block:: bash salt '*' pkg.list_locked jail=<jail name or id> chroot List locked packages within the specified chroot (ignored if ``jail`` is specified) CLI Example: .. code-block:: bash salt '*' pkg.list_locked chroot=/path/to/chroot root List locked packages within the specified root (ignored if ``jail`` is specified) CLI Example: .. code-block:: bash salt '*' pkg.list_locked root=/path/to/chroot ''' return ['{0}-{1}'.format(pkgname, version(pkgname, **kwargs)) for pkgname in _lockcmd('lock', name=None, **kwargs)]
[ "def", "list_locked", "(", "*", "*", "kwargs", ")", ":", "return", "[", "'{0}-{1}'", ".", "format", "(", "pkgname", ",", "version", "(", "pkgname", ",", "*", "*", "kwargs", ")", ")", "for", "pkgname", "in", "_lockcmd", "(", "'lock'", ",", "name", "=", "None", ",", "*", "*", "kwargs", ")", "]" ]
Query the package database those packages which are locked against reinstallation, modification or deletion. Returns returns a list of package names with version strings CLI Example: .. code-block:: bash salt '*' pkg.list_locked jail List locked packages within the specified jail CLI Example: .. code-block:: bash salt '*' pkg.list_locked jail=<jail name or id> chroot List locked packages within the specified chroot (ignored if ``jail`` is specified) CLI Example: .. code-block:: bash salt '*' pkg.list_locked chroot=/path/to/chroot root List locked packages within the specified root (ignored if ``jail`` is specified) CLI Example: .. code-block:: bash salt '*' pkg.list_locked root=/path/to/chroot
[ "Query", "the", "package", "database", "those", "packages", "which", "are", "locked", "against", "reinstallation", "modification", "or", "deletion", "." ]
python
train
22.533333
Sheeprider/BitBucket-api
bitbucket/repository.py
https://github.com/Sheeprider/BitBucket-api/blob/be45515d506d87f14807a676f3c2f20d79674b75/bitbucket/repository.py#L79-L82
def create(self, repo_name, scm='git', private=True, **kwargs): """ Creates a new repository on own Bitbucket account and return it.""" url = self.bitbucket.url('CREATE_REPO') return self.bitbucket.dispatch('POST', url, auth=self.bitbucket.auth, name=repo_name, scm=scm, is_private=private, **kwargs)
[ "def", "create", "(", "self", ",", "repo_name", ",", "scm", "=", "'git'", ",", "private", "=", "True", ",", "*", "*", "kwargs", ")", ":", "url", "=", "self", ".", "bitbucket", ".", "url", "(", "'CREATE_REPO'", ")", "return", "self", ".", "bitbucket", ".", "dispatch", "(", "'POST'", ",", "url", ",", "auth", "=", "self", ".", "bitbucket", ".", "auth", ",", "name", "=", "repo_name", ",", "scm", "=", "scm", ",", "is_private", "=", "private", ",", "*", "*", "kwargs", ")" ]
Creates a new repository on own Bitbucket account and return it.
[ "Creates", "a", "new", "repository", "on", "own", "Bitbucket", "account", "and", "return", "it", "." ]
python
train
80.25
tanghaibao/goatools
goatools/grouper/grprobj_init.py
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/grouper/grprobj_init.py#L49-L62
def _init_usrgos(self, goids): """Return user GO IDs which have GO Terms.""" usrgos = set() goids_missing = set() _go2obj = self.gosubdag.go2obj for goid in goids: if goid in _go2obj: usrgos.add(goid) else: goids_missing.add(goid) if goids_missing: print("MISSING GO IDs: {GOs}".format(GOs=goids_missing)) print("{N} of {M} GO IDs ARE MISSING".format(N=len(goids_missing), M=len(goids))) return usrgos
[ "def", "_init_usrgos", "(", "self", ",", "goids", ")", ":", "usrgos", "=", "set", "(", ")", "goids_missing", "=", "set", "(", ")", "_go2obj", "=", "self", ".", "gosubdag", ".", "go2obj", "for", "goid", "in", "goids", ":", "if", "goid", "in", "_go2obj", ":", "usrgos", ".", "add", "(", "goid", ")", "else", ":", "goids_missing", ".", "add", "(", "goid", ")", "if", "goids_missing", ":", "print", "(", "\"MISSING GO IDs: {GOs}\"", ".", "format", "(", "GOs", "=", "goids_missing", ")", ")", "print", "(", "\"{N} of {M} GO IDs ARE MISSING\"", ".", "format", "(", "N", "=", "len", "(", "goids_missing", ")", ",", "M", "=", "len", "(", "goids", ")", ")", ")", "return", "usrgos" ]
Return user GO IDs which have GO Terms.
[ "Return", "user", "GO", "IDs", "which", "have", "GO", "Terms", "." ]
python
train
37.428571
BerkeleyAutomation/perception
perception/image.py
https://github.com/BerkeleyAutomation/perception/blob/03d9b37dd6b66896cdfe173905c9413c8c3c5df6/perception/image.py#L797-L807
def finite_pixels(self): """ Return an array of the finite pixels. Returns ------- :obj:`numpy.ndarray` Nx2 array of the finite pixels """ finite_px = np.where(np.isfinite(self.data)) finite_px = np.c_[finite_px[0], finite_px[1]] return finite_px
[ "def", "finite_pixels", "(", "self", ")", ":", "finite_px", "=", "np", ".", "where", "(", "np", ".", "isfinite", "(", "self", ".", "data", ")", ")", "finite_px", "=", "np", ".", "c_", "[", "finite_px", "[", "0", "]", ",", "finite_px", "[", "1", "]", "]", "return", "finite_px" ]
Return an array of the finite pixels. Returns ------- :obj:`numpy.ndarray` Nx2 array of the finite pixels
[ "Return", "an", "array", "of", "the", "finite", "pixels", "." ]
python
train
28.545455
DigitalGlobe/gbdxtools
gbdxtools/catalog.py
https://github.com/DigitalGlobe/gbdxtools/blob/def62f8f2d77b168aa2bd115290aaa0f9a08a4bb/gbdxtools/catalog.py#L73-L97
def get_strip_metadata(self, catID): '''Retrieves the strip catalog metadata given a cat ID. Args: catID (str): The source catalog ID from the platform catalog. Returns: metadata (dict): A metadata dictionary . TODO: have this return a class object with interesting information exposed. ''' self.logger.debug('Retrieving strip catalog metadata') url = '%(base_url)s/record/%(catID)s?includeRelationships=false' % { 'base_url': self.base_url, 'catID': catID } r = self.gbdx_connection.get(url) if r.status_code == 200: return r.json()['properties'] elif r.status_code == 404: self.logger.debug('Strip not found: %s' % catID) r.raise_for_status() else: self.logger.debug('There was a problem retrieving catid: %s' % catID) r.raise_for_status()
[ "def", "get_strip_metadata", "(", "self", ",", "catID", ")", ":", "self", ".", "logger", ".", "debug", "(", "'Retrieving strip catalog metadata'", ")", "url", "=", "'%(base_url)s/record/%(catID)s?includeRelationships=false'", "%", "{", "'base_url'", ":", "self", ".", "base_url", ",", "'catID'", ":", "catID", "}", "r", "=", "self", ".", "gbdx_connection", ".", "get", "(", "url", ")", "if", "r", ".", "status_code", "==", "200", ":", "return", "r", ".", "json", "(", ")", "[", "'properties'", "]", "elif", "r", ".", "status_code", "==", "404", ":", "self", ".", "logger", ".", "debug", "(", "'Strip not found: %s'", "%", "catID", ")", "r", ".", "raise_for_status", "(", ")", "else", ":", "self", ".", "logger", ".", "debug", "(", "'There was a problem retrieving catid: %s'", "%", "catID", ")", "r", ".", "raise_for_status", "(", ")" ]
Retrieves the strip catalog metadata given a cat ID. Args: catID (str): The source catalog ID from the platform catalog. Returns: metadata (dict): A metadata dictionary . TODO: have this return a class object with interesting information exposed.
[ "Retrieves", "the", "strip", "catalog", "metadata", "given", "a", "cat", "ID", "." ]
python
valid
36.68
mathiasertl/django-ca
ca/django_ca/models.py
https://github.com/mathiasertl/django-ca/blob/976d7ea05276320f20daed2a6d59c8f5660fe976/ca/django_ca/models.py#L721-L739
def max_pathlen(self): """The maximum pathlen for any intermediate CAs signed by this CA. This value is either ``None``, if this and all parent CAs don't have a ``pathlen`` attribute, or an ``int`` if any parent CA has the attribute. """ pathlen = self.pathlen if self.parent is None: return pathlen max_parent = self.parent.max_pathlen if max_parent is None: return pathlen elif pathlen is None: return max_parent - 1 else: return min(self.pathlen, max_parent - 1)
[ "def", "max_pathlen", "(", "self", ")", ":", "pathlen", "=", "self", ".", "pathlen", "if", "self", ".", "parent", "is", "None", ":", "return", "pathlen", "max_parent", "=", "self", ".", "parent", ".", "max_pathlen", "if", "max_parent", "is", "None", ":", "return", "pathlen", "elif", "pathlen", "is", "None", ":", "return", "max_parent", "-", "1", "else", ":", "return", "min", "(", "self", ".", "pathlen", ",", "max_parent", "-", "1", ")" ]
The maximum pathlen for any intermediate CAs signed by this CA. This value is either ``None``, if this and all parent CAs don't have a ``pathlen`` attribute, or an ``int`` if any parent CA has the attribute.
[ "The", "maximum", "pathlen", "for", "any", "intermediate", "CAs", "signed", "by", "this", "CA", "." ]
python
train
30.473684
ThreatConnect-Inc/tcex
tcex/tcex_cache.py
https://github.com/ThreatConnect-Inc/tcex/blob/dd4d7a1ef723af1561687120191886b9a2fd4b47/tcex/tcex_cache.py#L36-L48
def add(self, rid, data, raise_on_error=True): """Write cache data to the data store. Args: rid (str): The record identifier. data (dict): The record data. raise_on_error (bool): If True and not r.ok this method will raise a RunTimeError. Returns: object : Python request response. """ cache_data = {'cache-date': self._dt_to_epoch(datetime.now()), 'cache-data': data} return self.ds.post(rid, cache_data, raise_on_error)
[ "def", "add", "(", "self", ",", "rid", ",", "data", ",", "raise_on_error", "=", "True", ")", ":", "cache_data", "=", "{", "'cache-date'", ":", "self", ".", "_dt_to_epoch", "(", "datetime", ".", "now", "(", ")", ")", ",", "'cache-data'", ":", "data", "}", "return", "self", ".", "ds", ".", "post", "(", "rid", ",", "cache_data", ",", "raise_on_error", ")" ]
Write cache data to the data store. Args: rid (str): The record identifier. data (dict): The record data. raise_on_error (bool): If True and not r.ok this method will raise a RunTimeError. Returns: object : Python request response.
[ "Write", "cache", "data", "to", "the", "data", "store", "." ]
python
train
39
hydpy-dev/hydpy
hydpy/exe/servertools.py
https://github.com/hydpy-dev/hydpy/blob/1bc6a82cf30786521d86b36e27900c6717d3348d/hydpy/exe/servertools.py#L933-L940
def GET_close_server(self) -> None: """Stop and close the *HydPy* server.""" def _close_server(): self.server.shutdown() self.server.server_close() shutter = threading.Thread(target=_close_server) shutter.deamon = True shutter.start()
[ "def", "GET_close_server", "(", "self", ")", "->", "None", ":", "def", "_close_server", "(", ")", ":", "self", ".", "server", ".", "shutdown", "(", ")", "self", ".", "server", ".", "server_close", "(", ")", "shutter", "=", "threading", ".", "Thread", "(", "target", "=", "_close_server", ")", "shutter", ".", "deamon", "=", "True", "shutter", ".", "start", "(", ")" ]
Stop and close the *HydPy* server.
[ "Stop", "and", "close", "the", "*", "HydPy", "*", "server", "." ]
python
train
36.375
saltstack/salt
salt/modules/smartos_vmadm.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/smartos_vmadm.py#L492-L532
def info(vm, info_type='all', key='uuid'): ''' Lookup info on running kvm vm : string vm to be targeted info_type : string [all|block|blockstats|chardev|cpus|kvm|pci|spice|version|vnc] info type to return key : string [uuid|alias|hostname] value type of 'vm' parameter CLI Example: .. code-block:: bash salt '*' vmadm.info 186da9ab-7392-4f55-91a5-b8f1fe770543 salt '*' vmadm.info 186da9ab-7392-4f55-91a5-b8f1fe770543 vnc salt '*' vmadm.info nacl key=alias salt '*' vmadm.info nacl vnc key=alias ''' ret = {} if info_type not in ['all', 'block', 'blockstats', 'chardev', 'cpus', 'kvm', 'pci', 'spice', 'version', 'vnc']: ret['Error'] = 'Requested info_type is not available' return ret if key not in ['uuid', 'alias', 'hostname']: ret['Error'] = 'Key must be either uuid, alias or hostname' return ret vm = lookup('{0}={1}'.format(key, vm), one=True) if 'Error' in vm: return vm # vmadm info <uuid> [type,...] cmd = 'vmadm info {uuid} {type}'.format( uuid=vm, type=info_type ) res = __salt__['cmd.run_all'](cmd) retcode = res['retcode'] if retcode != 0: ret['Error'] = res['stderr'] if 'stderr' in res else _exit_status(retcode) return ret return salt.utils.json.loads(res['stdout'])
[ "def", "info", "(", "vm", ",", "info_type", "=", "'all'", ",", "key", "=", "'uuid'", ")", ":", "ret", "=", "{", "}", "if", "info_type", "not", "in", "[", "'all'", ",", "'block'", ",", "'blockstats'", ",", "'chardev'", ",", "'cpus'", ",", "'kvm'", ",", "'pci'", ",", "'spice'", ",", "'version'", ",", "'vnc'", "]", ":", "ret", "[", "'Error'", "]", "=", "'Requested info_type is not available'", "return", "ret", "if", "key", "not", "in", "[", "'uuid'", ",", "'alias'", ",", "'hostname'", "]", ":", "ret", "[", "'Error'", "]", "=", "'Key must be either uuid, alias or hostname'", "return", "ret", "vm", "=", "lookup", "(", "'{0}={1}'", ".", "format", "(", "key", ",", "vm", ")", ",", "one", "=", "True", ")", "if", "'Error'", "in", "vm", ":", "return", "vm", "# vmadm info <uuid> [type,...]", "cmd", "=", "'vmadm info {uuid} {type}'", ".", "format", "(", "uuid", "=", "vm", ",", "type", "=", "info_type", ")", "res", "=", "__salt__", "[", "'cmd.run_all'", "]", "(", "cmd", ")", "retcode", "=", "res", "[", "'retcode'", "]", "if", "retcode", "!=", "0", ":", "ret", "[", "'Error'", "]", "=", "res", "[", "'stderr'", "]", "if", "'stderr'", "in", "res", "else", "_exit_status", "(", "retcode", ")", "return", "ret", "return", "salt", ".", "utils", ".", "json", ".", "loads", "(", "res", "[", "'stdout'", "]", ")" ]
Lookup info on running kvm vm : string vm to be targeted info_type : string [all|block|blockstats|chardev|cpus|kvm|pci|spice|version|vnc] info type to return key : string [uuid|alias|hostname] value type of 'vm' parameter CLI Example: .. code-block:: bash salt '*' vmadm.info 186da9ab-7392-4f55-91a5-b8f1fe770543 salt '*' vmadm.info 186da9ab-7392-4f55-91a5-b8f1fe770543 vnc salt '*' vmadm.info nacl key=alias salt '*' vmadm.info nacl vnc key=alias
[ "Lookup", "info", "on", "running", "kvm" ]
python
train
33.02439
viveksck/changepoint
changepoint/mean_shift_model.py
https://github.com/viveksck/changepoint/blob/001792cb148c991ec704463d3213997ebb7171af/changepoint/mean_shift_model.py#L20-L25
def get_ts_stats_significance(self, x, ts, stat_ts_func, null_ts_func, B=1000, permute_fast=False, label_ts=''): """ Returns the statistics, pvalues and the actual number of bootstrap samples. """ stats_ts, pvals, nums = ts_stats_significance( ts, stat_ts_func, null_ts_func, B=B, permute_fast=permute_fast) return stats_ts, pvals, nums
[ "def", "get_ts_stats_significance", "(", "self", ",", "x", ",", "ts", ",", "stat_ts_func", ",", "null_ts_func", ",", "B", "=", "1000", ",", "permute_fast", "=", "False", ",", "label_ts", "=", "''", ")", ":", "stats_ts", ",", "pvals", ",", "nums", "=", "ts_stats_significance", "(", "ts", ",", "stat_ts_func", ",", "null_ts_func", ",", "B", "=", "B", ",", "permute_fast", "=", "permute_fast", ")", "return", "stats_ts", ",", "pvals", ",", "nums" ]
Returns the statistics, pvalues and the actual number of bootstrap samples.
[ "Returns", "the", "statistics", "pvalues", "and", "the", "actual", "number", "of", "bootstrap", "samples", "." ]
python
train
63.166667
vertexproject/synapse
synapse/glob.py
https://github.com/vertexproject/synapse/blob/22e67c5a8f6d7caddbcf34b39ab1bd2d6c4a6e0b/synapse/glob.py#L82-L115
def synchelp(f): ''' The synchelp decorator allows the transparent execution of a coroutine using the global loop from a thread other than the event loop. In both use cases, teh actual work is done by the global event loop. Examples: Use as a decorator:: @s_glob.synchelp async def stuff(x, y): await dostuff() Calling the stuff function as regular async code using the standard await syntax:: valu = await stuff(x, y) Calling the stuff function as regular sync code outside of the event loop thread:: valu = stuff(x, y) ''' def wrap(*args, **kwargs): coro = f(*args, **kwargs) if not iAmLoop(): return sync(coro) return coro return wrap
[ "def", "synchelp", "(", "f", ")", ":", "def", "wrap", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "coro", "=", "f", "(", "*", "args", ",", "*", "*", "kwargs", ")", "if", "not", "iAmLoop", "(", ")", ":", "return", "sync", "(", "coro", ")", "return", "coro", "return", "wrap" ]
The synchelp decorator allows the transparent execution of a coroutine using the global loop from a thread other than the event loop. In both use cases, teh actual work is done by the global event loop. Examples: Use as a decorator:: @s_glob.synchelp async def stuff(x, y): await dostuff() Calling the stuff function as regular async code using the standard await syntax:: valu = await stuff(x, y) Calling the stuff function as regular sync code outside of the event loop thread:: valu = stuff(x, y)
[ "The", "synchelp", "decorator", "allows", "the", "transparent", "execution", "of", "a", "coroutine", "using", "the", "global", "loop", "from", "a", "thread", "other", "than", "the", "event", "loop", ".", "In", "both", "use", "cases", "teh", "actual", "work", "is", "done", "by", "the", "global", "event", "loop", "." ]
python
train
22.852941
bokeh/bokeh
bokeh/server/views/ws.py
https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/bokeh/server/views/ws.py#L194-L230
def on_message(self, fragment): ''' Process an individual wire protocol fragment. The websocket RFC specifies opcodes for distinguishing text frames from binary frames. Tornado passes us either a text or binary string depending on that opcode, we have to look at the type of the fragment to see what we got. Args: fragment (unicode or bytes) : wire fragment to process ''' # We shouldn't throw exceptions from on_message because the caller is # just Tornado and it doesn't know what to do with them other than # report them as an unhandled Future try: message = yield self._receive(fragment) except Exception as e: # If you go look at self._receive, it's catching the # expected error types... here we have something weird. log.error("Unhandled exception receiving a message: %r: %r", e, fragment, exc_info=True) self._internal_error("server failed to parse a message") try: if message: if _message_test_port is not None: _message_test_port.received.append(message) work = yield self._handle(message) if work: yield self._schedule(work) except Exception as e: log.error("Handler or its work threw an exception: %r: %r", e, message, exc_info=True) self._internal_error("server failed to handle a message") raise gen.Return(None)
[ "def", "on_message", "(", "self", ",", "fragment", ")", ":", "# We shouldn't throw exceptions from on_message because the caller is", "# just Tornado and it doesn't know what to do with them other than", "# report them as an unhandled Future", "try", ":", "message", "=", "yield", "self", ".", "_receive", "(", "fragment", ")", "except", "Exception", "as", "e", ":", "# If you go look at self._receive, it's catching the", "# expected error types... here we have something weird.", "log", ".", "error", "(", "\"Unhandled exception receiving a message: %r: %r\"", ",", "e", ",", "fragment", ",", "exc_info", "=", "True", ")", "self", ".", "_internal_error", "(", "\"server failed to parse a message\"", ")", "try", ":", "if", "message", ":", "if", "_message_test_port", "is", "not", "None", ":", "_message_test_port", ".", "received", ".", "append", "(", "message", ")", "work", "=", "yield", "self", ".", "_handle", "(", "message", ")", "if", "work", ":", "yield", "self", ".", "_schedule", "(", "work", ")", "except", "Exception", "as", "e", ":", "log", ".", "error", "(", "\"Handler or its work threw an exception: %r: %r\"", ",", "e", ",", "message", ",", "exc_info", "=", "True", ")", "self", ".", "_internal_error", "(", "\"server failed to handle a message\"", ")", "raise", "gen", ".", "Return", "(", "None", ")" ]
Process an individual wire protocol fragment. The websocket RFC specifies opcodes for distinguishing text frames from binary frames. Tornado passes us either a text or binary string depending on that opcode, we have to look at the type of the fragment to see what we got. Args: fragment (unicode or bytes) : wire fragment to process
[ "Process", "an", "individual", "wire", "protocol", "fragment", "." ]
python
train
40.918919
radujica/baloo
baloo/weld/weld_utils.py
https://github.com/radujica/baloo/blob/f6e05e35b73a75e8a300754c6bdc575e5f2d53b9/baloo/weld/weld_utils.py#L182-L211
def weld_combine_scalars(scalars, weld_type): """Combine column-wise aggregations (so resulting scalars) into a single array. Parameters ---------- scalars : tuple of WeldObjects WeldObjects to combine. weld_type : WeldType The Weld type of the result. Currently expecting scalars to be of the same type. Returns ------- WeldObject Representation of this computation. """ weld_obj = create_empty_weld_object() obj_ids = (get_weld_obj_id(weld_obj, scalar) for scalar in scalars) merges = '\n'.join(('let res = merge(res, {});'.format(obj_id) for obj_id in obj_ids)) weld_template = """let res = appender[{type}]; {merges} result(res) """ weld_obj.weld_code = weld_template.format(type=weld_type, merges=merges) return weld_obj
[ "def", "weld_combine_scalars", "(", "scalars", ",", "weld_type", ")", ":", "weld_obj", "=", "create_empty_weld_object", "(", ")", "obj_ids", "=", "(", "get_weld_obj_id", "(", "weld_obj", ",", "scalar", ")", "for", "scalar", "in", "scalars", ")", "merges", "=", "'\\n'", ".", "join", "(", "(", "'let res = merge(res, {});'", ".", "format", "(", "obj_id", ")", "for", "obj_id", "in", "obj_ids", ")", ")", "weld_template", "=", "\"\"\"let res = appender[{type}];\n{merges}\nresult(res)\n\"\"\"", "weld_obj", ".", "weld_code", "=", "weld_template", ".", "format", "(", "type", "=", "weld_type", ",", "merges", "=", "merges", ")", "return", "weld_obj" ]
Combine column-wise aggregations (so resulting scalars) into a single array. Parameters ---------- scalars : tuple of WeldObjects WeldObjects to combine. weld_type : WeldType The Weld type of the result. Currently expecting scalars to be of the same type. Returns ------- WeldObject Representation of this computation.
[ "Combine", "column", "-", "wise", "aggregations", "(", "so", "resulting", "scalars", ")", "into", "a", "single", "array", "." ]
python
train
27.766667
Tanganelli/CoAPthon3
coapthon/layers/cachelayer.py
https://github.com/Tanganelli/CoAPthon3/blob/985763bfe2eb9e00f49ec100c5b8877c2ed7d531/coapthon/layers/cachelayer.py#L57-L70
def send_response(self, transaction): """ updates the cache with the response if there was a cache miss :param transaction: :return: """ if transaction.cacheHit is False: """ handling response based on the code """ logger.debug("handling response") self._handle_response(transaction) return transaction
[ "def", "send_response", "(", "self", ",", "transaction", ")", ":", "if", "transaction", ".", "cacheHit", "is", "False", ":", "\"\"\"\n handling response based on the code\n \"\"\"", "logger", ".", "debug", "(", "\"handling response\"", ")", "self", ".", "_handle_response", "(", "transaction", ")", "return", "transaction" ]
updates the cache with the response if there was a cache miss :param transaction: :return:
[ "updates", "the", "cache", "with", "the", "response", "if", "there", "was", "a", "cache", "miss" ]
python
train
29
angr/angr
angr/calling_conventions.py
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/calling_conventions.py#L467-L483
def arg(self, state, index, stack_base=None): """ Returns a bitvector expression representing the nth argument of a function. `stack_base` is an optional pointer to the top of the stack at the function start. If it is not specified, use the current stack pointer. WARNING: this assumes that none of the arguments are floating-point and they're all single-word-sized, unless you've customized this CC. """ session = self.arg_session if self.args is None: arg_loc = [session.next_arg(False) for _ in range(index + 1)][-1] else: arg_loc = self.args[index] return arg_loc.get_value(state, stack_base=stack_base)
[ "def", "arg", "(", "self", ",", "state", ",", "index", ",", "stack_base", "=", "None", ")", ":", "session", "=", "self", ".", "arg_session", "if", "self", ".", "args", "is", "None", ":", "arg_loc", "=", "[", "session", ".", "next_arg", "(", "False", ")", "for", "_", "in", "range", "(", "index", "+", "1", ")", "]", "[", "-", "1", "]", "else", ":", "arg_loc", "=", "self", ".", "args", "[", "index", "]", "return", "arg_loc", ".", "get_value", "(", "state", ",", "stack_base", "=", "stack_base", ")" ]
Returns a bitvector expression representing the nth argument of a function. `stack_base` is an optional pointer to the top of the stack at the function start. If it is not specified, use the current stack pointer. WARNING: this assumes that none of the arguments are floating-point and they're all single-word-sized, unless you've customized this CC.
[ "Returns", "a", "bitvector", "expression", "representing", "the", "nth", "argument", "of", "a", "function", "." ]
python
train
41.529412
Azure/azure-cosmos-python
azure/cosmos/cosmos_client.py
https://github.com/Azure/azure-cosmos-python/blob/dd01b3c5d308c6da83cfcaa0ab7083351a476353/azure/cosmos/cosmos_client.py#L1464-L1488
def UpsertStoredProcedure(self, collection_link, sproc, options=None): """Upserts a stored procedure in a collection. :param str collection_link: The link to the document collection. :param str sproc: :param dict options: The request options for the request. :return: The upserted Stored Procedure. :rtype: dict """ if options is None: options = {} collection_id, path, sproc = self._GetContainerIdWithPathForSproc(collection_link, sproc) return self.Upsert(sproc, path, 'sprocs', collection_id, None, options)
[ "def", "UpsertStoredProcedure", "(", "self", ",", "collection_link", ",", "sproc", ",", "options", "=", "None", ")", ":", "if", "options", "is", "None", ":", "options", "=", "{", "}", "collection_id", ",", "path", ",", "sproc", "=", "self", ".", "_GetContainerIdWithPathForSproc", "(", "collection_link", ",", "sproc", ")", "return", "self", ".", "Upsert", "(", "sproc", ",", "path", ",", "'sprocs'", ",", "collection_id", ",", "None", ",", "options", ")" ]
Upserts a stored procedure in a collection. :param str collection_link: The link to the document collection. :param str sproc: :param dict options: The request options for the request. :return: The upserted Stored Procedure. :rtype: dict
[ "Upserts", "a", "stored", "procedure", "in", "a", "collection", "." ]
python
train
30.6
google/identity-toolkit-python-client
identitytoolkit/gitkitclient.py
https://github.com/google/identity-toolkit-python-client/blob/4cfe3013569c21576daa5d22ad21f9f4f8b30c4d/identitytoolkit/gitkitclient.py#L343-L390
def GetOobResult(self, param, user_ip, gitkit_token=None): """Gets out-of-band code for ResetPassword/ChangeEmail request. Args: param: dict of HTTP POST params user_ip: string, end user's IP address gitkit_token: string, the gitkit token if user logged in Returns: A dict of { email: user email who initializes the request new_email: the requested new email, for ChangeEmail action only oob_link: the generated link to be send to user's email oob_code: the one time out-of-band code action: OobAction response_body: the http body to be returned to Gitkit widget } """ if 'action' in param: try: if param['action'] == GitkitClient.RESET_PASSWORD_ACTION: request = self._PasswordResetRequest(param, user_ip) oob_code, oob_link = self._BuildOobLink(request, param['action']) return { 'action': GitkitClient.RESET_PASSWORD_ACTION, 'email': param['email'], 'oob_link': oob_link, 'oob_code': oob_code, 'response_body': simplejson.dumps({'success': True}) } elif param['action'] == GitkitClient.CHANGE_EMAIL_ACTION: if not gitkit_token: return self._FailureOobResponse('login is required') request = self._ChangeEmailRequest(param, user_ip, gitkit_token) oob_code, oob_link = self._BuildOobLink(request, param['action']) return { 'action': GitkitClient.CHANGE_EMAIL_ACTION, 'email': param['oldEmail'], 'new_email': param['newEmail'], 'oob_link': oob_link, 'oob_code': oob_code, 'response_body': simplejson.dumps({'success': True}) } except errors.GitkitClientError as error: return self._FailureOobResponse(error.value) return self._FailureOobResponse('unknown request type')
[ "def", "GetOobResult", "(", "self", ",", "param", ",", "user_ip", ",", "gitkit_token", "=", "None", ")", ":", "if", "'action'", "in", "param", ":", "try", ":", "if", "param", "[", "'action'", "]", "==", "GitkitClient", ".", "RESET_PASSWORD_ACTION", ":", "request", "=", "self", ".", "_PasswordResetRequest", "(", "param", ",", "user_ip", ")", "oob_code", ",", "oob_link", "=", "self", ".", "_BuildOobLink", "(", "request", ",", "param", "[", "'action'", "]", ")", "return", "{", "'action'", ":", "GitkitClient", ".", "RESET_PASSWORD_ACTION", ",", "'email'", ":", "param", "[", "'email'", "]", ",", "'oob_link'", ":", "oob_link", ",", "'oob_code'", ":", "oob_code", ",", "'response_body'", ":", "simplejson", ".", "dumps", "(", "{", "'success'", ":", "True", "}", ")", "}", "elif", "param", "[", "'action'", "]", "==", "GitkitClient", ".", "CHANGE_EMAIL_ACTION", ":", "if", "not", "gitkit_token", ":", "return", "self", ".", "_FailureOobResponse", "(", "'login is required'", ")", "request", "=", "self", ".", "_ChangeEmailRequest", "(", "param", ",", "user_ip", ",", "gitkit_token", ")", "oob_code", ",", "oob_link", "=", "self", ".", "_BuildOobLink", "(", "request", ",", "param", "[", "'action'", "]", ")", "return", "{", "'action'", ":", "GitkitClient", ".", "CHANGE_EMAIL_ACTION", ",", "'email'", ":", "param", "[", "'oldEmail'", "]", ",", "'new_email'", ":", "param", "[", "'newEmail'", "]", ",", "'oob_link'", ":", "oob_link", ",", "'oob_code'", ":", "oob_code", ",", "'response_body'", ":", "simplejson", ".", "dumps", "(", "{", "'success'", ":", "True", "}", ")", "}", "except", "errors", ".", "GitkitClientError", "as", "error", ":", "return", "self", ".", "_FailureOobResponse", "(", "error", ".", "value", ")", "return", "self", ".", "_FailureOobResponse", "(", "'unknown request type'", ")" ]
Gets out-of-band code for ResetPassword/ChangeEmail request. Args: param: dict of HTTP POST params user_ip: string, end user's IP address gitkit_token: string, the gitkit token if user logged in Returns: A dict of { email: user email who initializes the request new_email: the requested new email, for ChangeEmail action only oob_link: the generated link to be send to user's email oob_code: the one time out-of-band code action: OobAction response_body: the http body to be returned to Gitkit widget }
[ "Gets", "out", "-", "of", "-", "band", "code", "for", "ResetPassword", "/", "ChangeEmail", "request", "." ]
python
train
42.041667
robotframework/Rammbock
src/Rammbock/core.py
https://github.com/robotframework/Rammbock/blob/c906058d055a6f7c68fe1a6096d78c2e3f642b1c/src/Rammbock/core.py#L136-L147
def reset_rammbock(self): """Closes all connections, deletes all servers, clients, and protocols. You should call this method before exiting your test run. This will close all the connections and the ports will therefore be available for reuse faster. """ for client in self._clients: client.close() for server in self._servers: server.close() self._init_caches()
[ "def", "reset_rammbock", "(", "self", ")", ":", "for", "client", "in", "self", ".", "_clients", ":", "client", ".", "close", "(", ")", "for", "server", "in", "self", ".", "_servers", ":", "server", ".", "close", "(", ")", "self", ".", "_init_caches", "(", ")" ]
Closes all connections, deletes all servers, clients, and protocols. You should call this method before exiting your test run. This will close all the connections and the ports will therefore be available for reuse faster.
[ "Closes", "all", "connections", "deletes", "all", "servers", "clients", "and", "protocols", "." ]
python
train
36.75
bcbio/bcbio-nextgen
bcbio/variation/vardict.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vardict.py#L295-L360
def _run_vardict_paired(align_bams, items, ref_file, assoc_files, region=None, out_file=None): """Detect variants with Vardict. This is used for paired tumor / normal samples. """ config = items[0]["config"] if out_file is None: out_file = "%s-paired-variants.vcf.gz" % os.path.splitext(align_bams[0])[0] if not utils.file_exists(out_file): with file_transaction(items[0], out_file) as tx_out_file: vrs = bedutils.population_variant_regions(items) target = shared.subset_variant_regions(vrs, region, out_file, items=items, do_merge=True) paired = vcfutils.get_paired_bams(align_bams, items) if not _is_bed_file(target): vcfutils.write_empty_vcf(tx_out_file, config, samples=[x for x in [paired.tumor_name, paired.normal_name] if x]) else: if not paired.normal_bam: ann_file = _run_vardict_caller(align_bams, items, ref_file, assoc_files, region, out_file) return ann_file vardict = get_vardict_command(items[0]) vcfstreamsort = config_utils.get_program("vcfstreamsort", config) compress_cmd = "| bgzip -c" if out_file.endswith("gz") else "" freq = float(utils.get_in(config, ("algorithm", "min_allele_fraction"), 10)) / 100.0 # merge bed file regions as amplicon VarDict is only supported in single sample mode opts, var2vcf_opts = _vardict_options_from_config(items, config, out_file, target) fix_ambig_ref = vcfutils.fix_ambiguous_cl() fix_ambig_alt = vcfutils.fix_ambiguous_cl(5) remove_dup = vcfutils.remove_dup_cl() if any("vardict_somatic_filter" in tz.get_in(("config", "algorithm", "tools_off"), data, []) for data in items): somatic_filter = "" freq_filter = "" else: var2vcf_opts += " -M " # this makes VarDict soft filter non-differential variants somatic_filter = ("| sed 's/\\\\.*Somatic\\\\/Somatic/' " "| sed 's/REJECT,Description=\".*\">/REJECT,Description=\"Not Somatic via VarDict\">/' " """| %s -c 'from bcbio.variation import freebayes; """ """freebayes.call_somatic("%s", "%s")' """ % (sys.executable, paired.tumor_name, paired.normal_name)) freq_filter = ("| bcftools filter -m '+' -s 'REJECT' -e 'STATUS !~ \".*Somatic\"' 2> /dev/null " "| %s -x 'bcbio.variation.vardict.add_db_germline_flag(x)' " "| %s " "| %s -x 'bcbio.variation.vardict.depth_freq_filter(x, %s, \"%s\")'" % (os.path.join(os.path.dirname(sys.executable), "py"), _lowfreq_linear_filter(0, True), os.path.join(os.path.dirname(sys.executable), "py"), 0, bam.aligner_from_header(paired.tumor_bam))) jvm_opts = _get_jvm_opts(items[0], tx_out_file) py_cl = os.path.join(utils.get_bcbio_bin(), "py") setup = ("%s && unset JAVA_HOME &&" % utils.get_R_exports()) contig_cl = vcfutils.add_contig_to_header_cl(ref_file, tx_out_file) cmd = ("{setup}{jvm_opts}{vardict} -G {ref_file} " "-N {paired.tumor_name} -b \"{paired.tumor_bam}|{paired.normal_bam}\" {opts} " "| awk 'NF>=48' | testsomatic.R " "| var2vcf_paired.pl -P 0.9 -m 4.25 {var2vcf_opts} " "-N \"{paired.tumor_name}|{paired.normal_name}\" " "| {contig_cl} {freq_filter} " "| bcftools filter -i 'QUAL >= 0' " "{somatic_filter} | {fix_ambig_ref} | {fix_ambig_alt} | {remove_dup} | {vcfstreamsort} " "{compress_cmd} > {tx_out_file}") do.run(cmd.format(**locals()), "Genotyping with VarDict: Inference", {}) return out_file
[ "def", "_run_vardict_paired", "(", "align_bams", ",", "items", ",", "ref_file", ",", "assoc_files", ",", "region", "=", "None", ",", "out_file", "=", "None", ")", ":", "config", "=", "items", "[", "0", "]", "[", "\"config\"", "]", "if", "out_file", "is", "None", ":", "out_file", "=", "\"%s-paired-variants.vcf.gz\"", "%", "os", ".", "path", ".", "splitext", "(", "align_bams", "[", "0", "]", ")", "[", "0", "]", "if", "not", "utils", ".", "file_exists", "(", "out_file", ")", ":", "with", "file_transaction", "(", "items", "[", "0", "]", ",", "out_file", ")", "as", "tx_out_file", ":", "vrs", "=", "bedutils", ".", "population_variant_regions", "(", "items", ")", "target", "=", "shared", ".", "subset_variant_regions", "(", "vrs", ",", "region", ",", "out_file", ",", "items", "=", "items", ",", "do_merge", "=", "True", ")", "paired", "=", "vcfutils", ".", "get_paired_bams", "(", "align_bams", ",", "items", ")", "if", "not", "_is_bed_file", "(", "target", ")", ":", "vcfutils", ".", "write_empty_vcf", "(", "tx_out_file", ",", "config", ",", "samples", "=", "[", "x", "for", "x", "in", "[", "paired", ".", "tumor_name", ",", "paired", ".", "normal_name", "]", "if", "x", "]", ")", "else", ":", "if", "not", "paired", ".", "normal_bam", ":", "ann_file", "=", "_run_vardict_caller", "(", "align_bams", ",", "items", ",", "ref_file", ",", "assoc_files", ",", "region", ",", "out_file", ")", "return", "ann_file", "vardict", "=", "get_vardict_command", "(", "items", "[", "0", "]", ")", "vcfstreamsort", "=", "config_utils", ".", "get_program", "(", "\"vcfstreamsort\"", ",", "config", ")", "compress_cmd", "=", "\"| bgzip -c\"", "if", "out_file", ".", "endswith", "(", "\"gz\"", ")", "else", "\"\"", "freq", "=", "float", "(", "utils", ".", "get_in", "(", "config", ",", "(", "\"algorithm\"", ",", "\"min_allele_fraction\"", ")", ",", "10", ")", ")", "/", "100.0", "# merge bed file regions as amplicon VarDict is only supported in single sample mode", "opts", ",", "var2vcf_opts", "=", "_vardict_options_from_config", "(", "items", ",", "config", ",", "out_file", ",", "target", ")", "fix_ambig_ref", "=", "vcfutils", ".", "fix_ambiguous_cl", "(", ")", "fix_ambig_alt", "=", "vcfutils", ".", "fix_ambiguous_cl", "(", "5", ")", "remove_dup", "=", "vcfutils", ".", "remove_dup_cl", "(", ")", "if", "any", "(", "\"vardict_somatic_filter\"", "in", "tz", ".", "get_in", "(", "(", "\"config\"", ",", "\"algorithm\"", ",", "\"tools_off\"", ")", ",", "data", ",", "[", "]", ")", "for", "data", "in", "items", ")", ":", "somatic_filter", "=", "\"\"", "freq_filter", "=", "\"\"", "else", ":", "var2vcf_opts", "+=", "\" -M \"", "# this makes VarDict soft filter non-differential variants", "somatic_filter", "=", "(", "\"| sed 's/\\\\\\\\.*Somatic\\\\\\\\/Somatic/' \"", "\"| sed 's/REJECT,Description=\\\".*\\\">/REJECT,Description=\\\"Not Somatic via VarDict\\\">/' \"", "\"\"\"| %s -c 'from bcbio.variation import freebayes; \"\"\"", "\"\"\"freebayes.call_somatic(\"%s\", \"%s\")' \"\"\"", "%", "(", "sys", ".", "executable", ",", "paired", ".", "tumor_name", ",", "paired", ".", "normal_name", ")", ")", "freq_filter", "=", "(", "\"| bcftools filter -m '+' -s 'REJECT' -e 'STATUS !~ \\\".*Somatic\\\"' 2> /dev/null \"", "\"| %s -x 'bcbio.variation.vardict.add_db_germline_flag(x)' \"", "\"| %s \"", "\"| %s -x 'bcbio.variation.vardict.depth_freq_filter(x, %s, \\\"%s\\\")'\"", "%", "(", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "sys", ".", "executable", ")", ",", "\"py\"", ")", ",", "_lowfreq_linear_filter", "(", "0", ",", "True", ")", ",", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "sys", ".", "executable", ")", ",", "\"py\"", ")", ",", "0", ",", "bam", ".", "aligner_from_header", "(", "paired", ".", "tumor_bam", ")", ")", ")", "jvm_opts", "=", "_get_jvm_opts", "(", "items", "[", "0", "]", ",", "tx_out_file", ")", "py_cl", "=", "os", ".", "path", ".", "join", "(", "utils", ".", "get_bcbio_bin", "(", ")", ",", "\"py\"", ")", "setup", "=", "(", "\"%s && unset JAVA_HOME &&\"", "%", "utils", ".", "get_R_exports", "(", ")", ")", "contig_cl", "=", "vcfutils", ".", "add_contig_to_header_cl", "(", "ref_file", ",", "tx_out_file", ")", "cmd", "=", "(", "\"{setup}{jvm_opts}{vardict} -G {ref_file} \"", "\"-N {paired.tumor_name} -b \\\"{paired.tumor_bam}|{paired.normal_bam}\\\" {opts} \"", "\"| awk 'NF>=48' | testsomatic.R \"", "\"| var2vcf_paired.pl -P 0.9 -m 4.25 {var2vcf_opts} \"", "\"-N \\\"{paired.tumor_name}|{paired.normal_name}\\\" \"", "\"| {contig_cl} {freq_filter} \"", "\"| bcftools filter -i 'QUAL >= 0' \"", "\"{somatic_filter} | {fix_ambig_ref} | {fix_ambig_alt} | {remove_dup} | {vcfstreamsort} \"", "\"{compress_cmd} > {tx_out_file}\"", ")", "do", ".", "run", "(", "cmd", ".", "format", "(", "*", "*", "locals", "(", ")", ")", ",", "\"Genotyping with VarDict: Inference\"", ",", "{", "}", ")", "return", "out_file" ]
Detect variants with Vardict. This is used for paired tumor / normal samples.
[ "Detect", "variants", "with", "Vardict", "." ]
python
train
66.984848
Kortemme-Lab/klab
klab/deprecated/rosettadb.py
https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/deprecated/rosettadb.py#L109-L111
def locked_execute(self, sql, parameters = None, cursorClass = DictCursor, quiet = False): '''We are lock-happy here but SQL performance is not currently an issue daemon-side.''' return self.execute(sql, parameters, cursorClass, quiet = quiet, locked = True)
[ "def", "locked_execute", "(", "self", ",", "sql", ",", "parameters", "=", "None", ",", "cursorClass", "=", "DictCursor", ",", "quiet", "=", "False", ")", ":", "return", "self", ".", "execute", "(", "sql", ",", "parameters", ",", "cursorClass", ",", "quiet", "=", "quiet", ",", "locked", "=", "True", ")" ]
We are lock-happy here but SQL performance is not currently an issue daemon-side.
[ "We", "are", "lock", "-", "happy", "here", "but", "SQL", "performance", "is", "not", "currently", "an", "issue", "daemon", "-", "side", "." ]
python
train
87
ome/omego
omego/upgrade.py
https://github.com/ome/omego/blob/2dadbf3c6342b6c995f9e0dceaf3c0b7fab030fb/omego/upgrade.py#L75-L132
def _handle_args(self, cmd, args): """ We need to support deprecated behaviour for now which makes this quite complicated Current behaviour: - install: Installs a new server, existing server causes an error - install --upgrade: Installs or upgrades a server - install --managedb: Automatically initialise or upgrade the db Deprecated: - install --upgradedb --initdb: Replaced by install --managedb - install --upgradedb: upgrade the db, must exist - install --initdb: initialise the db - upgrade: Upgrades a server, must already exist - upgrade --upgradedb: Automatically upgrade the db returns: - Modified args object, flag to indicate new/existing/auto install """ if cmd == 'install': if args.upgrade: # Current behaviour: install or upgrade if args.initdb or args.upgradedb: raise Stop(10, ( 'Deprecated --initdb --upgradedb flags ' 'are incompatible with --upgrade')) newinstall = None else: # Current behaviour: Server must not exist newinstall = True if args.managedb: # Current behaviour if args.initdb or args.upgradedb: raise Stop(10, ( 'Deprecated --initdb --upgradedb flags ' 'are incompatible with --managedb')) args.initdb = True args.upgradedb = True else: if args.initdb or args.upgradedb: log.warn('--initdb and --upgradedb are deprecated, ' 'use --managedb') elif cmd == 'upgrade': # Deprecated behaviour log.warn( '"omero upgrade" is deprecated, use "omego install --upgrade"') cmd = 'install' args.upgrade = True # Deprecated behaviour: Server must exist newinstall = False else: raise Exception('Unexpected command: %s' % cmd) return args, newinstall
[ "def", "_handle_args", "(", "self", ",", "cmd", ",", "args", ")", ":", "if", "cmd", "==", "'install'", ":", "if", "args", ".", "upgrade", ":", "# Current behaviour: install or upgrade", "if", "args", ".", "initdb", "or", "args", ".", "upgradedb", ":", "raise", "Stop", "(", "10", ",", "(", "'Deprecated --initdb --upgradedb flags '", "'are incompatible with --upgrade'", ")", ")", "newinstall", "=", "None", "else", ":", "# Current behaviour: Server must not exist", "newinstall", "=", "True", "if", "args", ".", "managedb", ":", "# Current behaviour", "if", "args", ".", "initdb", "or", "args", ".", "upgradedb", ":", "raise", "Stop", "(", "10", ",", "(", "'Deprecated --initdb --upgradedb flags '", "'are incompatible with --managedb'", ")", ")", "args", ".", "initdb", "=", "True", "args", ".", "upgradedb", "=", "True", "else", ":", "if", "args", ".", "initdb", "or", "args", ".", "upgradedb", ":", "log", ".", "warn", "(", "'--initdb and --upgradedb are deprecated, '", "'use --managedb'", ")", "elif", "cmd", "==", "'upgrade'", ":", "# Deprecated behaviour", "log", ".", "warn", "(", "'\"omero upgrade\" is deprecated, use \"omego install --upgrade\"'", ")", "cmd", "=", "'install'", "args", ".", "upgrade", "=", "True", "# Deprecated behaviour: Server must exist", "newinstall", "=", "False", "else", ":", "raise", "Exception", "(", "'Unexpected command: %s'", "%", "cmd", ")", "return", "args", ",", "newinstall" ]
We need to support deprecated behaviour for now which makes this quite complicated Current behaviour: - install: Installs a new server, existing server causes an error - install --upgrade: Installs or upgrades a server - install --managedb: Automatically initialise or upgrade the db Deprecated: - install --upgradedb --initdb: Replaced by install --managedb - install --upgradedb: upgrade the db, must exist - install --initdb: initialise the db - upgrade: Upgrades a server, must already exist - upgrade --upgradedb: Automatically upgrade the db returns: - Modified args object, flag to indicate new/existing/auto install
[ "We", "need", "to", "support", "deprecated", "behaviour", "for", "now", "which", "makes", "this", "quite", "complicated" ]
python
train
37.482759
pecan/pecan
pecan/commands/serve.py
https://github.com/pecan/pecan/blob/833d0653fa0e6bbfb52545b091c30182105f4a82/pecan/commands/serve.py#L213-L229
def log_message(self, format, *args): """ overrides the ``log_message`` method from the wsgiref server so that normal logging works with whatever configuration the application has been set to. Levels are inferred from the HTTP status code, 4XX codes are treated as warnings, 5XX as errors and everything else as INFO level. """ code = args[1][0] levels = { '4': 'warning', '5': 'error' } log_handler = getattr(logger, levels.get(code, 'info')) log_handler(format % args)
[ "def", "log_message", "(", "self", ",", "format", ",", "*", "args", ")", ":", "code", "=", "args", "[", "1", "]", "[", "0", "]", "levels", "=", "{", "'4'", ":", "'warning'", ",", "'5'", ":", "'error'", "}", "log_handler", "=", "getattr", "(", "logger", ",", "levels", ".", "get", "(", "code", ",", "'info'", ")", ")", "log_handler", "(", "format", "%", "args", ")" ]
overrides the ``log_message`` method from the wsgiref server so that normal logging works with whatever configuration the application has been set to. Levels are inferred from the HTTP status code, 4XX codes are treated as warnings, 5XX as errors and everything else as INFO level.
[ "overrides", "the", "log_message", "method", "from", "the", "wsgiref", "server", "so", "that", "normal", "logging", "works", "with", "whatever", "configuration", "the", "application", "has", "been", "set", "to", "." ]
python
train
33.882353
ethereum/py-evm
eth/chains/base.py
https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/chains/base.py#L671-L683
def get_transaction_result( self, transaction: BaseOrSpoofTransaction, at_header: BlockHeader) -> bytes: """ Return the result of running the given transaction. This is referred to as a `call()` in web3. """ with self.get_vm(at_header).state_in_temp_block() as state: computation = state.costless_execute_transaction(transaction) computation.raise_if_error() return computation.output
[ "def", "get_transaction_result", "(", "self", ",", "transaction", ":", "BaseOrSpoofTransaction", ",", "at_header", ":", "BlockHeader", ")", "->", "bytes", ":", "with", "self", ".", "get_vm", "(", "at_header", ")", ".", "state_in_temp_block", "(", ")", "as", "state", ":", "computation", "=", "state", ".", "costless_execute_transaction", "(", "transaction", ")", "computation", ".", "raise_if_error", "(", ")", "return", "computation", ".", "output" ]
Return the result of running the given transaction. This is referred to as a `call()` in web3.
[ "Return", "the", "result", "of", "running", "the", "given", "transaction", ".", "This", "is", "referred", "to", "as", "a", "call", "()", "in", "web3", "." ]
python
train
36.692308
log2timeline/plaso
plaso/cli/storage_media_tool.py
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/cli/storage_media_tool.py#L945-L959
def _ScanFileSystem(self, scan_node, base_path_specs): """Scans a file system scan node for file systems. Args: scan_node (SourceScanNode): file system scan node. base_path_specs (list[PathSpec]): file system base path specifications. Raises: SourceScannerError: if the scan node is invalid. """ if not scan_node or not scan_node.path_spec: raise errors.SourceScannerError( 'Invalid or missing file system scan node.') base_path_specs.append(scan_node.path_spec)
[ "def", "_ScanFileSystem", "(", "self", ",", "scan_node", ",", "base_path_specs", ")", ":", "if", "not", "scan_node", "or", "not", "scan_node", ".", "path_spec", ":", "raise", "errors", ".", "SourceScannerError", "(", "'Invalid or missing file system scan node.'", ")", "base_path_specs", ".", "append", "(", "scan_node", ".", "path_spec", ")" ]
Scans a file system scan node for file systems. Args: scan_node (SourceScanNode): file system scan node. base_path_specs (list[PathSpec]): file system base path specifications. Raises: SourceScannerError: if the scan node is invalid.
[ "Scans", "a", "file", "system", "scan", "node", "for", "file", "systems", "." ]
python
train
33.933333
edeposit/edeposit.amqp.ftp
src/edeposit/amqp/ftp/decoders/parser_csv.py
https://github.com/edeposit/edeposit.amqp.ftp/blob/fcdcbffb6e5d194e1bb4f85f0b8eaa9dbb08aa71/src/edeposit/amqp/ftp/decoders/parser_csv.py#L45-L94
def decode(data): """ Handles decoding of the CSV `data`. Args: data (str): Data which will be decoded. Returns: dict: Dictionary with decoded data. """ # try to guess dialect of the csv file dialect = None try: dialect = csv.Sniffer().sniff(data) except Exception: pass # parse data with csv parser handler = None try: data = data.splitlines() # used later handler = csv.reader(data, dialect) except Exception, e: raise MetaParsingException("Can't parse your CSV data: %s" % e.message) # make sure, that data are meaningful decoded = [] for cnt, line in enumerate(handler): usable_data = filter(lambda x: x.strip(), line) if not usable_data: continue if len(usable_data) != 2: raise MetaParsingException( "Bad number of elements - line %d:\n\t%s\n" % (cnt, data[cnt]) ) # remove trailing spaces, decode to utf-8 usable_data = map(lambda x: x.strip().decode("utf-8"), usable_data) # remove quotes if the csv.Sniffer failed to decode right `dialect` usable_data = map(lambda x: _remove_quotes(x), usable_data) decoded.append(usable_data) # apply another checks to data decoded = validator.check_structure(decoded) return decoded
[ "def", "decode", "(", "data", ")", ":", "# try to guess dialect of the csv file", "dialect", "=", "None", "try", ":", "dialect", "=", "csv", ".", "Sniffer", "(", ")", ".", "sniff", "(", "data", ")", "except", "Exception", ":", "pass", "# parse data with csv parser", "handler", "=", "None", "try", ":", "data", "=", "data", ".", "splitlines", "(", ")", "# used later", "handler", "=", "csv", ".", "reader", "(", "data", ",", "dialect", ")", "except", "Exception", ",", "e", ":", "raise", "MetaParsingException", "(", "\"Can't parse your CSV data: %s\"", "%", "e", ".", "message", ")", "# make sure, that data are meaningful", "decoded", "=", "[", "]", "for", "cnt", ",", "line", "in", "enumerate", "(", "handler", ")", ":", "usable_data", "=", "filter", "(", "lambda", "x", ":", "x", ".", "strip", "(", ")", ",", "line", ")", "if", "not", "usable_data", ":", "continue", "if", "len", "(", "usable_data", ")", "!=", "2", ":", "raise", "MetaParsingException", "(", "\"Bad number of elements - line %d:\\n\\t%s\\n\"", "%", "(", "cnt", ",", "data", "[", "cnt", "]", ")", ")", "# remove trailing spaces, decode to utf-8", "usable_data", "=", "map", "(", "lambda", "x", ":", "x", ".", "strip", "(", ")", ".", "decode", "(", "\"utf-8\"", ")", ",", "usable_data", ")", "# remove quotes if the csv.Sniffer failed to decode right `dialect`", "usable_data", "=", "map", "(", "lambda", "x", ":", "_remove_quotes", "(", "x", ")", ",", "usable_data", ")", "decoded", ".", "append", "(", "usable_data", ")", "# apply another checks to data", "decoded", "=", "validator", ".", "check_structure", "(", "decoded", ")", "return", "decoded" ]
Handles decoding of the CSV `data`. Args: data (str): Data which will be decoded. Returns: dict: Dictionary with decoded data.
[ "Handles", "decoding", "of", "the", "CSV", "data", "." ]
python
train
26.76
nickjj/ansigenome
ansigenome/export.py
https://github.com/nickjj/ansigenome/blob/70cd98d7a23d36c56f4e713ea820cfb4c485c81c/ansigenome/export.py#L258-L266
def dump(self): """ Dump the output to json. """ report_as_json_string = utils.dict_to_json(self.report) if self.out_file: utils.string_to_file(self.out_file, report_as_json_string) else: print report_as_json_string
[ "def", "dump", "(", "self", ")", ":", "report_as_json_string", "=", "utils", ".", "dict_to_json", "(", "self", ".", "report", ")", "if", "self", ".", "out_file", ":", "utils", ".", "string_to_file", "(", "self", ".", "out_file", ",", "report_as_json_string", ")", "else", ":", "print", "report_as_json_string" ]
Dump the output to json.
[ "Dump", "the", "output", "to", "json", "." ]
python
train
31
grangier/python-goose
goose/extractors/content.py
https://github.com/grangier/python-goose/blob/09023ec9f5ef26a628a2365616c0a7c864f0ecea/goose/extractors/content.py#L224-L250
def get_siblings_score(self, top_node): """\ we could have long articles that have tons of paragraphs so if we tried to calculate the base score against the total text score of those paragraphs it would be unfair. So we need to normalize the score based on the average scoring of the paragraphs within the top node. For example if our total score of 10 paragraphs was 1000 but each had an average value of 100 then 100 should be our base. """ base = 100000 paragraphs_number = 0 paragraphs_score = 0 nodes_to_check = self.parser.getElementsByTag(top_node, tag='p') for node in nodes_to_check: text_node = self.parser.getText(node) word_stats = self.stopwords_class(language=self.get_language()).get_stopword_count(text_node) high_link_density = self.is_highlink_density(node) if word_stats.get_stopword_count() > 2 and not high_link_density: paragraphs_number += 1 paragraphs_score += word_stats.get_stopword_count() if paragraphs_number > 0: base = paragraphs_score / paragraphs_number return base
[ "def", "get_siblings_score", "(", "self", ",", "top_node", ")", ":", "base", "=", "100000", "paragraphs_number", "=", "0", "paragraphs_score", "=", "0", "nodes_to_check", "=", "self", ".", "parser", ".", "getElementsByTag", "(", "top_node", ",", "tag", "=", "'p'", ")", "for", "node", "in", "nodes_to_check", ":", "text_node", "=", "self", ".", "parser", ".", "getText", "(", "node", ")", "word_stats", "=", "self", ".", "stopwords_class", "(", "language", "=", "self", ".", "get_language", "(", ")", ")", ".", "get_stopword_count", "(", "text_node", ")", "high_link_density", "=", "self", ".", "is_highlink_density", "(", "node", ")", "if", "word_stats", ".", "get_stopword_count", "(", ")", ">", "2", "and", "not", "high_link_density", ":", "paragraphs_number", "+=", "1", "paragraphs_score", "+=", "word_stats", ".", "get_stopword_count", "(", ")", "if", "paragraphs_number", ">", "0", ":", "base", "=", "paragraphs_score", "/", "paragraphs_number", "return", "base" ]
\ we could have long articles that have tons of paragraphs so if we tried to calculate the base score against the total text score of those paragraphs it would be unfair. So we need to normalize the score based on the average scoring of the paragraphs within the top node. For example if our total score of 10 paragraphs was 1000 but each had an average value of 100 then 100 should be our base.
[ "\\", "we", "could", "have", "long", "articles", "that", "have", "tons", "of", "paragraphs", "so", "if", "we", "tried", "to", "calculate", "the", "base", "score", "against", "the", "total", "text", "score", "of", "those", "paragraphs", "it", "would", "be", "unfair", ".", "So", "we", "need", "to", "normalize", "the", "score", "based", "on", "the", "average", "scoring", "of", "the", "paragraphs", "within", "the", "top", "node", ".", "For", "example", "if", "our", "total", "score", "of", "10", "paragraphs", "was", "1000", "but", "each", "had", "an", "average", "value", "of", "100", "then", "100", "should", "be", "our", "base", "." ]
python
train
44.259259
scanny/python-pptx
pptx/parts/presentation.py
https://github.com/scanny/python-pptx/blob/d6ab8234f8b03953d2f831ff9394b1852db34130/pptx/parts/presentation.py#L131-L139
def _next_slide_partname(self): """ Return |PackURI| instance containing the partname for a slide to be appended to this slide collection, e.g. ``/ppt/slides/slide9.xml`` for a slide collection containing 8 slides. """ sldIdLst = self._element.get_or_add_sldIdLst() partname_str = '/ppt/slides/slide%d.xml' % (len(sldIdLst)+1) return PackURI(partname_str)
[ "def", "_next_slide_partname", "(", "self", ")", ":", "sldIdLst", "=", "self", ".", "_element", ".", "get_or_add_sldIdLst", "(", ")", "partname_str", "=", "'/ppt/slides/slide%d.xml'", "%", "(", "len", "(", "sldIdLst", ")", "+", "1", ")", "return", "PackURI", "(", "partname_str", ")" ]
Return |PackURI| instance containing the partname for a slide to be appended to this slide collection, e.g. ``/ppt/slides/slide9.xml`` for a slide collection containing 8 slides.
[ "Return", "|PackURI|", "instance", "containing", "the", "partname", "for", "a", "slide", "to", "be", "appended", "to", "this", "slide", "collection", "e", ".", "g", ".", "/", "ppt", "/", "slides", "/", "slide9", ".", "xml", "for", "a", "slide", "collection", "containing", "8", "slides", "." ]
python
train
45.666667