repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
paramiko/paramiko
tasks.py
https://github.com/paramiko/paramiko/blob/cf7d49d66f3b1fbc8b0853518a54050182b3b5eb/tasks.py#L86-L91
def guard(ctx, opts=""): """ Execute all tests and then watch for changes, re-running. """ # TODO if coverage was run via pytest-cov, we could add coverage here too return test(ctx, include_slow=True, loop_on_fail=True, opts=opts)
[ "def", "guard", "(", "ctx", ",", "opts", "=", "\"\"", ")", ":", "# TODO if coverage was run via pytest-cov, we could add coverage here too", "return", "test", "(", "ctx", ",", "include_slow", "=", "True", ",", "loop_on_fail", "=", "True", ",", "opts", "=", "opts", ")" ]
Execute all tests and then watch for changes, re-running.
[ "Execute", "all", "tests", "and", "then", "watch", "for", "changes", "re", "-", "running", "." ]
python
train
bokeh/bokeh
bokeh/core/property/descriptors.py
https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/bokeh/core/property/descriptors.py#L620-L637
def trigger_if_changed(self, obj, old): ''' Send a change event notification if the property is set to a value is not equal to ``old``. Args: obj (HasProps) The object the property is being set on. old (obj) : The previous value of the property to compare Returns: None ''' new_value = self.__get__(obj, obj.__class__) if not self.property.matches(old, new_value): self._trigger(obj, old, new_value)
[ "def", "trigger_if_changed", "(", "self", ",", "obj", ",", "old", ")", ":", "new_value", "=", "self", ".", "__get__", "(", "obj", ",", "obj", ".", "__class__", ")", "if", "not", "self", ".", "property", ".", "matches", "(", "old", ",", "new_value", ")", ":", "self", ".", "_trigger", "(", "obj", ",", "old", ",", "new_value", ")" ]
Send a change event notification if the property is set to a value is not equal to ``old``. Args: obj (HasProps) The object the property is being set on. old (obj) : The previous value of the property to compare Returns: None
[ "Send", "a", "change", "event", "notification", "if", "the", "property", "is", "set", "to", "a", "value", "is", "not", "equal", "to", "old", "." ]
python
train
hazelcast/hazelcast-python-client
hazelcast/client.py
https://github.com/hazelcast/hazelcast-python-client/blob/3f6639443c23d6d036aa343f8e094f052250d2c1/hazelcast/client.py#L251-L264
def shutdown(self): """ Shuts down this HazelcastClient. """ if self.lifecycle.is_live: self.lifecycle.fire_lifecycle_event(LIFECYCLE_STATE_SHUTTING_DOWN) self.near_cache_manager.destroy_all_near_caches() self.statistics.shutdown() self.partition_service.shutdown() self.heartbeat.shutdown() self.cluster.shutdown() self.reactor.shutdown() self.lifecycle.fire_lifecycle_event(LIFECYCLE_STATE_SHUTDOWN) self.logger.info("Client shutdown.", extra=self._logger_extras)
[ "def", "shutdown", "(", "self", ")", ":", "if", "self", ".", "lifecycle", ".", "is_live", ":", "self", ".", "lifecycle", ".", "fire_lifecycle_event", "(", "LIFECYCLE_STATE_SHUTTING_DOWN", ")", "self", ".", "near_cache_manager", ".", "destroy_all_near_caches", "(", ")", "self", ".", "statistics", ".", "shutdown", "(", ")", "self", ".", "partition_service", ".", "shutdown", "(", ")", "self", ".", "heartbeat", ".", "shutdown", "(", ")", "self", ".", "cluster", ".", "shutdown", "(", ")", "self", ".", "reactor", ".", "shutdown", "(", ")", "self", ".", "lifecycle", ".", "fire_lifecycle_event", "(", "LIFECYCLE_STATE_SHUTDOWN", ")", "self", ".", "logger", ".", "info", "(", "\"Client shutdown.\"", ",", "extra", "=", "self", ".", "_logger_extras", ")" ]
Shuts down this HazelcastClient.
[ "Shuts", "down", "this", "HazelcastClient", "." ]
python
train
GoogleCloudPlatform/datastore-ndb-python
ndb/context.py
https://github.com/GoogleCloudPlatform/datastore-ndb-python/blob/cf4cab3f1f69cd04e1a9229871be466b53729f3f/ndb/context.py#L103-L126
def _make_ctx_options(ctx_options, config_cls=ContextOptions): """Helper to construct a ContextOptions object from keyword arguments. Args: ctx_options: A dict of keyword arguments. config_cls: Optional Configuration class to use, default ContextOptions. Note that either 'options' or 'config' can be used to pass another Configuration object, but not both. If another Configuration object is given it provides default values. Returns: A Configuration object, or None if ctx_options is empty. """ if not ctx_options: return None for key in list(ctx_options): translation = _OPTION_TRANSLATIONS.get(key) if translation: if translation in ctx_options: raise ValueError('Cannot specify %s and %s at the same time' % (key, translation)) ctx_options[translation] = ctx_options.pop(key) return config_cls(**ctx_options)
[ "def", "_make_ctx_options", "(", "ctx_options", ",", "config_cls", "=", "ContextOptions", ")", ":", "if", "not", "ctx_options", ":", "return", "None", "for", "key", "in", "list", "(", "ctx_options", ")", ":", "translation", "=", "_OPTION_TRANSLATIONS", ".", "get", "(", "key", ")", "if", "translation", ":", "if", "translation", "in", "ctx_options", ":", "raise", "ValueError", "(", "'Cannot specify %s and %s at the same time'", "%", "(", "key", ",", "translation", ")", ")", "ctx_options", "[", "translation", "]", "=", "ctx_options", ".", "pop", "(", "key", ")", "return", "config_cls", "(", "*", "*", "ctx_options", ")" ]
Helper to construct a ContextOptions object from keyword arguments. Args: ctx_options: A dict of keyword arguments. config_cls: Optional Configuration class to use, default ContextOptions. Note that either 'options' or 'config' can be used to pass another Configuration object, but not both. If another Configuration object is given it provides default values. Returns: A Configuration object, or None if ctx_options is empty.
[ "Helper", "to", "construct", "a", "ContextOptions", "object", "from", "keyword", "arguments", "." ]
python
train
dvdotsenko/jsonrpc.py
jsonrpcparts/serializers.py
https://github.com/dvdotsenko/jsonrpc.py/blob/19673edd77a9518ac5655bd407f6b93ffbb2cafc/jsonrpcparts/serializers.py#L526-L553
def parse_response(cls, response_string): """JSONRPC allows for **batch** responses to be communicated as arrays of dicts. This method parses out each individual element in the batch and returns a list of tuples, each tuple a result of parsing of each item in the batch. :Returns: | tuple of (results, is_batch_mode_flag) | where: | - results is a tuple describing the request | - Is_batch_mode_flag is a Bool indicating if the | request came in in batch mode (as array of requests) or not. :Raises: RPCParseError, RPCInvalidRequest """ try: batch = cls.json_loads(response_string) except ValueError as err: raise errors.RPCParseError("No valid JSON. (%s)" % str(err)) if isinstance(batch, (list, tuple)) and batch: # batch is true batch. # list of parsed request objects, is_batch_mode_flag return [cls._parse_single_response_trap_errors(response) for response in batch], True elif isinstance(batch, dict): # `batch` is actually single response object return [cls._parse_single_response_trap_errors(batch)], False raise errors.RPCParseError("Neither a batch array nor a single response object found in the response.")
[ "def", "parse_response", "(", "cls", ",", "response_string", ")", ":", "try", ":", "batch", "=", "cls", ".", "json_loads", "(", "response_string", ")", "except", "ValueError", "as", "err", ":", "raise", "errors", ".", "RPCParseError", "(", "\"No valid JSON. (%s)\"", "%", "str", "(", "err", ")", ")", "if", "isinstance", "(", "batch", ",", "(", "list", ",", "tuple", ")", ")", "and", "batch", ":", "# batch is true batch.", "# list of parsed request objects, is_batch_mode_flag", "return", "[", "cls", ".", "_parse_single_response_trap_errors", "(", "response", ")", "for", "response", "in", "batch", "]", ",", "True", "elif", "isinstance", "(", "batch", ",", "dict", ")", ":", "# `batch` is actually single response object", "return", "[", "cls", ".", "_parse_single_response_trap_errors", "(", "batch", ")", "]", ",", "False", "raise", "errors", ".", "RPCParseError", "(", "\"Neither a batch array nor a single response object found in the response.\"", ")" ]
JSONRPC allows for **batch** responses to be communicated as arrays of dicts. This method parses out each individual element in the batch and returns a list of tuples, each tuple a result of parsing of each item in the batch. :Returns: | tuple of (results, is_batch_mode_flag) | where: | - results is a tuple describing the request | - Is_batch_mode_flag is a Bool indicating if the | request came in in batch mode (as array of requests) or not. :Raises: RPCParseError, RPCInvalidRequest
[ "JSONRPC", "allows", "for", "**", "batch", "**", "responses", "to", "be", "communicated", "as", "arrays", "of", "dicts", ".", "This", "method", "parses", "out", "each", "individual", "element", "in", "the", "batch", "and", "returns", "a", "list", "of", "tuples", "each", "tuple", "a", "result", "of", "parsing", "of", "each", "item", "in", "the", "batch", "." ]
python
train
python-cmd2/cmd2
cmd2/argparse_completer.py
https://github.com/python-cmd2/cmd2/blob/b22c0bd891ed08c8b09df56df9d91f48166a5e2a/cmd2/argparse_completer.py#L622-L634
def complete_command_help(self, tokens: List[str], text: str, line: str, begidx: int, endidx: int) -> List[str]: """Supports the completion of sub-commands for commands through the cmd2 help command.""" for idx, token in enumerate(tokens): if idx >= self._token_start_index: if self._positional_completers: # For now argparse only allows 1 sub-command group per level # so this will only loop once. for completers in self._positional_completers.values(): if token in completers: return completers[token].complete_command_help(tokens, text, line, begidx, endidx) else: return self._cmd2_app.basic_complete(text, line, begidx, endidx, completers.keys()) return []
[ "def", "complete_command_help", "(", "self", ",", "tokens", ":", "List", "[", "str", "]", ",", "text", ":", "str", ",", "line", ":", "str", ",", "begidx", ":", "int", ",", "endidx", ":", "int", ")", "->", "List", "[", "str", "]", ":", "for", "idx", ",", "token", "in", "enumerate", "(", "tokens", ")", ":", "if", "idx", ">=", "self", ".", "_token_start_index", ":", "if", "self", ".", "_positional_completers", ":", "# For now argparse only allows 1 sub-command group per level", "# so this will only loop once.", "for", "completers", "in", "self", ".", "_positional_completers", ".", "values", "(", ")", ":", "if", "token", "in", "completers", ":", "return", "completers", "[", "token", "]", ".", "complete_command_help", "(", "tokens", ",", "text", ",", "line", ",", "begidx", ",", "endidx", ")", "else", ":", "return", "self", ".", "_cmd2_app", ".", "basic_complete", "(", "text", ",", "line", ",", "begidx", ",", "endidx", ",", "completers", ".", "keys", "(", ")", ")", "return", "[", "]" ]
Supports the completion of sub-commands for commands through the cmd2 help command.
[ "Supports", "the", "completion", "of", "sub", "-", "commands", "for", "commands", "through", "the", "cmd2", "help", "command", "." ]
python
train
gagneurlab/concise
concise/preprocessing/sequence.py
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/preprocessing/sequence.py#L243-L261
def encodeAA(seq_vec, maxlen=None, seq_align="start", encode_type="one_hot"): """Convert the Amino-acid sequence into 1-hot-encoding numpy array # Arguments seq_vec: List of strings/amino-acid sequences maxlen: Maximum sequence length. See `pad_sequences` for more detail seq_align: How to align the sequences of variable lengths. See `pad_sequences` for more detail encode_type: can be `"one_hot"` or `token` for token encoding of codons (incremental integer ). # Returns numpy.ndarray of shape `(len(seq_vec), maxlen, 22)` """ return encodeSequence(seq_vec, vocab=AMINO_ACIDS, neutral_vocab="_", maxlen=maxlen, seq_align=seq_align, pad_value="_", encode_type=encode_type)
[ "def", "encodeAA", "(", "seq_vec", ",", "maxlen", "=", "None", ",", "seq_align", "=", "\"start\"", ",", "encode_type", "=", "\"one_hot\"", ")", ":", "return", "encodeSequence", "(", "seq_vec", ",", "vocab", "=", "AMINO_ACIDS", ",", "neutral_vocab", "=", "\"_\"", ",", "maxlen", "=", "maxlen", ",", "seq_align", "=", "seq_align", ",", "pad_value", "=", "\"_\"", ",", "encode_type", "=", "encode_type", ")" ]
Convert the Amino-acid sequence into 1-hot-encoding numpy array # Arguments seq_vec: List of strings/amino-acid sequences maxlen: Maximum sequence length. See `pad_sequences` for more detail seq_align: How to align the sequences of variable lengths. See `pad_sequences` for more detail encode_type: can be `"one_hot"` or `token` for token encoding of codons (incremental integer ). # Returns numpy.ndarray of shape `(len(seq_vec), maxlen, 22)`
[ "Convert", "the", "Amino", "-", "acid", "sequence", "into", "1", "-", "hot", "-", "encoding", "numpy", "array" ]
python
train
idlesign/django-sitemessage
sitemessage/toolbox.py
https://github.com/idlesign/django-sitemessage/blob/25b179b798370354c5988042ec209e255d23793f/sitemessage/toolbox.py#L30-L54
def schedule_messages(messages, recipients=None, sender=None, priority=None): """Schedules a message or messages. :param MessageBase|str|list messages: str or MessageBase heir or list - use str to create PlainTextMessage. :param list|None recipients: recipients addresses or Django User model heir instances If `None` Dispatches should be created before send using `prepare_dispatches()`. :param User|None sender: User model heir instance :param int priority: number describing message priority. If set overrides priority provided with message type. :return: list of tuples - (message_model, dispatches_models) :rtype: list """ if not is_iterable(messages): messages = (messages,) results = [] for message in messages: if isinstance(message, six.string_types): message = PlainTextMessage(message) resulting_priority = message.priority if priority is not None: resulting_priority = priority results.append(message.schedule(sender=sender, recipients=recipients, priority=resulting_priority)) return results
[ "def", "schedule_messages", "(", "messages", ",", "recipients", "=", "None", ",", "sender", "=", "None", ",", "priority", "=", "None", ")", ":", "if", "not", "is_iterable", "(", "messages", ")", ":", "messages", "=", "(", "messages", ",", ")", "results", "=", "[", "]", "for", "message", "in", "messages", ":", "if", "isinstance", "(", "message", ",", "six", ".", "string_types", ")", ":", "message", "=", "PlainTextMessage", "(", "message", ")", "resulting_priority", "=", "message", ".", "priority", "if", "priority", "is", "not", "None", ":", "resulting_priority", "=", "priority", "results", ".", "append", "(", "message", ".", "schedule", "(", "sender", "=", "sender", ",", "recipients", "=", "recipients", ",", "priority", "=", "resulting_priority", ")", ")", "return", "results" ]
Schedules a message or messages. :param MessageBase|str|list messages: str or MessageBase heir or list - use str to create PlainTextMessage. :param list|None recipients: recipients addresses or Django User model heir instances If `None` Dispatches should be created before send using `prepare_dispatches()`. :param User|None sender: User model heir instance :param int priority: number describing message priority. If set overrides priority provided with message type. :return: list of tuples - (message_model, dispatches_models) :rtype: list
[ "Schedules", "a", "message", "or", "messages", "." ]
python
train
quodlibet/mutagen
mutagen/id3/_frames.py
https://github.com/quodlibet/mutagen/blob/e393df5971ba41ba5a50de9c2c9e7e5484d82c4e/mutagen/id3/_frames.py#L236-L292
def _fromData(cls, header, tflags, data): """Construct this ID3 frame from raw string data. Raises: ID3JunkFrameError in case parsing failed NotImplementedError in case parsing isn't implemented ID3EncryptionUnsupportedError in case the frame is encrypted. """ if header.version >= header._V24: if tflags & (Frame.FLAG24_COMPRESS | Frame.FLAG24_DATALEN): # The data length int is syncsafe in 2.4 (but not 2.3). # However, we don't actually need the data length int, # except to work around a QL 0.12 bug, and in that case # all we need are the raw bytes. datalen_bytes = data[:4] data = data[4:] if tflags & Frame.FLAG24_UNSYNCH or header.f_unsynch: try: data = unsynch.decode(data) except ValueError: # Some things write synch-unsafe data with either the frame # or global unsynch flag set. Try to load them as is. # https://github.com/quodlibet/mutagen/issues/210 # https://github.com/quodlibet/mutagen/issues/223 pass if tflags & Frame.FLAG24_ENCRYPT: raise ID3EncryptionUnsupportedError if tflags & Frame.FLAG24_COMPRESS: try: data = zlib.decompress(data) except zlib.error: # the initial mutagen that went out with QL 0.12 did not # write the 4 bytes of uncompressed size. Compensate. data = datalen_bytes + data try: data = zlib.decompress(data) except zlib.error as err: raise ID3JunkFrameError( 'zlib: %s: %r' % (err, data)) elif header.version >= header._V23: if tflags & Frame.FLAG23_COMPRESS: usize, = unpack('>L', data[:4]) data = data[4:] if tflags & Frame.FLAG23_ENCRYPT: raise ID3EncryptionUnsupportedError if tflags & Frame.FLAG23_COMPRESS: try: data = zlib.decompress(data) except zlib.error as err: raise ID3JunkFrameError('zlib: %s: %r' % (err, data)) frame = cls() frame._readData(header, data) return frame
[ "def", "_fromData", "(", "cls", ",", "header", ",", "tflags", ",", "data", ")", ":", "if", "header", ".", "version", ">=", "header", ".", "_V24", ":", "if", "tflags", "&", "(", "Frame", ".", "FLAG24_COMPRESS", "|", "Frame", ".", "FLAG24_DATALEN", ")", ":", "# The data length int is syncsafe in 2.4 (but not 2.3).", "# However, we don't actually need the data length int,", "# except to work around a QL 0.12 bug, and in that case", "# all we need are the raw bytes.", "datalen_bytes", "=", "data", "[", ":", "4", "]", "data", "=", "data", "[", "4", ":", "]", "if", "tflags", "&", "Frame", ".", "FLAG24_UNSYNCH", "or", "header", ".", "f_unsynch", ":", "try", ":", "data", "=", "unsynch", ".", "decode", "(", "data", ")", "except", "ValueError", ":", "# Some things write synch-unsafe data with either the frame", "# or global unsynch flag set. Try to load them as is.", "# https://github.com/quodlibet/mutagen/issues/210", "# https://github.com/quodlibet/mutagen/issues/223", "pass", "if", "tflags", "&", "Frame", ".", "FLAG24_ENCRYPT", ":", "raise", "ID3EncryptionUnsupportedError", "if", "tflags", "&", "Frame", ".", "FLAG24_COMPRESS", ":", "try", ":", "data", "=", "zlib", ".", "decompress", "(", "data", ")", "except", "zlib", ".", "error", ":", "# the initial mutagen that went out with QL 0.12 did not", "# write the 4 bytes of uncompressed size. Compensate.", "data", "=", "datalen_bytes", "+", "data", "try", ":", "data", "=", "zlib", ".", "decompress", "(", "data", ")", "except", "zlib", ".", "error", "as", "err", ":", "raise", "ID3JunkFrameError", "(", "'zlib: %s: %r'", "%", "(", "err", ",", "data", ")", ")", "elif", "header", ".", "version", ">=", "header", ".", "_V23", ":", "if", "tflags", "&", "Frame", ".", "FLAG23_COMPRESS", ":", "usize", ",", "=", "unpack", "(", "'>L'", ",", "data", "[", ":", "4", "]", ")", "data", "=", "data", "[", "4", ":", "]", "if", "tflags", "&", "Frame", ".", "FLAG23_ENCRYPT", ":", "raise", "ID3EncryptionUnsupportedError", "if", "tflags", "&", "Frame", ".", "FLAG23_COMPRESS", ":", "try", ":", "data", "=", "zlib", ".", "decompress", "(", "data", ")", "except", "zlib", ".", "error", "as", "err", ":", "raise", "ID3JunkFrameError", "(", "'zlib: %s: %r'", "%", "(", "err", ",", "data", ")", ")", "frame", "=", "cls", "(", ")", "frame", ".", "_readData", "(", "header", ",", "data", ")", "return", "frame" ]
Construct this ID3 frame from raw string data. Raises: ID3JunkFrameError in case parsing failed NotImplementedError in case parsing isn't implemented ID3EncryptionUnsupportedError in case the frame is encrypted.
[ "Construct", "this", "ID3", "frame", "from", "raw", "string", "data", "." ]
python
train
Galarzaa90/tibia.py
tibiapy/utils.py
https://github.com/Galarzaa90/tibia.py/blob/02ba1a8f1e18177ef5c7dcd44affc8d761d59e12/tibiapy/utils.py#L183-L207
def try_datetime(obj) -> Optional[datetime.datetime]: """Attempts to convert an object into a datetime. If the date format is known, it's recommended to use the corresponding function This is meant to be used in constructors. Parameters ---------- obj: :class:`str`, :class:`dict`, :class:`datetime.datetime` The object to convert. Returns ------- :class:`datetime.datetime`, optional The represented datetime, or ``None`` if conversion wasn't possible. """ if obj is None: return None if isinstance(obj, datetime.datetime): return obj res = parse_tibia_datetime(obj) if res is not None: return res res = parse_tibiadata_datetime(obj) return res
[ "def", "try_datetime", "(", "obj", ")", "->", "Optional", "[", "datetime", ".", "datetime", "]", ":", "if", "obj", "is", "None", ":", "return", "None", "if", "isinstance", "(", "obj", ",", "datetime", ".", "datetime", ")", ":", "return", "obj", "res", "=", "parse_tibia_datetime", "(", "obj", ")", "if", "res", "is", "not", "None", ":", "return", "res", "res", "=", "parse_tibiadata_datetime", "(", "obj", ")", "return", "res" ]
Attempts to convert an object into a datetime. If the date format is known, it's recommended to use the corresponding function This is meant to be used in constructors. Parameters ---------- obj: :class:`str`, :class:`dict`, :class:`datetime.datetime` The object to convert. Returns ------- :class:`datetime.datetime`, optional The represented datetime, or ``None`` if conversion wasn't possible.
[ "Attempts", "to", "convert", "an", "object", "into", "a", "datetime", "." ]
python
train
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/utils/openpy.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/utils/openpy.py#L163-L191
def read_py_url(url, errors='replace', skip_encoding_cookie=True): """Read a Python file from a URL, using the encoding declared inside the file. Parameters ---------- url : str The URL from which to fetch the file. errors : str How to handle decoding errors in the file. Options are the same as for bytes.decode(), but here 'replace' is the default. skip_encoding_cookie : bool If True (the default), and the encoding declaration is found in the first two lines, that line will be excluded from the output - compiling a unicode string with an encoding declaration is a SyntaxError in Python 2. Returns ------- A unicode string containing the contents of the file. """ response = urllib.urlopen(url) buffer = io.BytesIO(response.read()) encoding, lines = detect_encoding(buffer.readline) buffer.seek(0) text = TextIOWrapper(buffer, encoding, errors=errors, line_buffering=True) text.mode = 'r' if skip_encoding_cookie: return "".join(strip_encoding_cookie(text)) else: return text.read()
[ "def", "read_py_url", "(", "url", ",", "errors", "=", "'replace'", ",", "skip_encoding_cookie", "=", "True", ")", ":", "response", "=", "urllib", ".", "urlopen", "(", "url", ")", "buffer", "=", "io", ".", "BytesIO", "(", "response", ".", "read", "(", ")", ")", "encoding", ",", "lines", "=", "detect_encoding", "(", "buffer", ".", "readline", ")", "buffer", ".", "seek", "(", "0", ")", "text", "=", "TextIOWrapper", "(", "buffer", ",", "encoding", ",", "errors", "=", "errors", ",", "line_buffering", "=", "True", ")", "text", ".", "mode", "=", "'r'", "if", "skip_encoding_cookie", ":", "return", "\"\"", ".", "join", "(", "strip_encoding_cookie", "(", "text", ")", ")", "else", ":", "return", "text", ".", "read", "(", ")" ]
Read a Python file from a URL, using the encoding declared inside the file. Parameters ---------- url : str The URL from which to fetch the file. errors : str How to handle decoding errors in the file. Options are the same as for bytes.decode(), but here 'replace' is the default. skip_encoding_cookie : bool If True (the default), and the encoding declaration is found in the first two lines, that line will be excluded from the output - compiling a unicode string with an encoding declaration is a SyntaxError in Python 2. Returns ------- A unicode string containing the contents of the file.
[ "Read", "a", "Python", "file", "from", "a", "URL", "using", "the", "encoding", "declared", "inside", "the", "file", ".", "Parameters", "----------", "url", ":", "str", "The", "URL", "from", "which", "to", "fetch", "the", "file", ".", "errors", ":", "str", "How", "to", "handle", "decoding", "errors", "in", "the", "file", ".", "Options", "are", "the", "same", "as", "for", "bytes", ".", "decode", "()", "but", "here", "replace", "is", "the", "default", ".", "skip_encoding_cookie", ":", "bool", "If", "True", "(", "the", "default", ")", "and", "the", "encoding", "declaration", "is", "found", "in", "the", "first", "two", "lines", "that", "line", "will", "be", "excluded", "from", "the", "output", "-", "compiling", "a", "unicode", "string", "with", "an", "encoding", "declaration", "is", "a", "SyntaxError", "in", "Python", "2", ".", "Returns", "-------", "A", "unicode", "string", "containing", "the", "contents", "of", "the", "file", "." ]
python
test
jcalogovic/lightning
stormstats/misc.py
https://github.com/jcalogovic/lightning/blob/f9e52731c9dd40cb302295ec36a444e0377d0570/stormstats/misc.py#L81-L131
def count_lightning(datain, time_step): """**Count lightning strikes detected within a defined time_step** Generate time intervals according to the time_step defined and count lightning strikes in these intervals. Statistics are also calculated for lightning detection errors and the number of stations and added to an output dataframe. Time stamps in output dataframe correspond to center of time periods in which lightning are counted. :paramter datain: dataframe (lightning data) :parameter time_step: integer (time step in minutes) :Example: >>> count_lightning(LN_data, time_step) """ if(1440 % time_step == 0): # check if time_step is multiple of 1 day i = 0 # run for loop for all time steps in one day for time_interval in gen_time_intervals(extract_date(datain['datetime'].iloc[0]), (extract_date(datain['datetime'].iloc[0])+timedelta(days=1)), timedelta(minutes=time_step)): # select data in given time_interval tmp_LN_data = datain.loc[(datain['datetime'] >= time_interval) & (datain['datetime'] < time_interval + timedelta(minutes=time_step))] # calculate stats stats_err = gen_stats(tmp_LN_data['err']) stats_sta = gen_stats(tmp_LN_data['#sta']) d = {'count': stats_err['count'], 'err_mean': stats_err['mean'], 'err_std': stats_err['std'], 'err_min': stats_err['min'], 'err_max': stats_err['max'], '#sta_mean': stats_sta['mean'], '#sta_std': stats_sta['std'], '#sta_min': stats_sta['min'], '#sta_max': stats_sta['max']} col_names = [k for k in d.keys()] df_index = time_interval+timedelta(minutes=(time_step/2)) temp_LN_count = pd.DataFrame(d, index=[df_index], columns=col_names) # add data to existing df if(i >= 1): LN_count = LN_count.append(temp_LN_count) else: LN_count = temp_LN_count i = i + 1 return LN_count else: print("time_step {0} multiple of 1 day (1400 min)".format(time_step))
[ "def", "count_lightning", "(", "datain", ",", "time_step", ")", ":", "if", "(", "1440", "%", "time_step", "==", "0", ")", ":", "# check if time_step is multiple of 1 day", "i", "=", "0", "# run for loop for all time steps in one day", "for", "time_interval", "in", "gen_time_intervals", "(", "extract_date", "(", "datain", "[", "'datetime'", "]", ".", "iloc", "[", "0", "]", ")", ",", "(", "extract_date", "(", "datain", "[", "'datetime'", "]", ".", "iloc", "[", "0", "]", ")", "+", "timedelta", "(", "days", "=", "1", ")", ")", ",", "timedelta", "(", "minutes", "=", "time_step", ")", ")", ":", "# select data in given time_interval", "tmp_LN_data", "=", "datain", ".", "loc", "[", "(", "datain", "[", "'datetime'", "]", ">=", "time_interval", ")", "&", "(", "datain", "[", "'datetime'", "]", "<", "time_interval", "+", "timedelta", "(", "minutes", "=", "time_step", ")", ")", "]", "# calculate stats", "stats_err", "=", "gen_stats", "(", "tmp_LN_data", "[", "'err'", "]", ")", "stats_sta", "=", "gen_stats", "(", "tmp_LN_data", "[", "'#sta'", "]", ")", "d", "=", "{", "'count'", ":", "stats_err", "[", "'count'", "]", ",", "'err_mean'", ":", "stats_err", "[", "'mean'", "]", ",", "'err_std'", ":", "stats_err", "[", "'std'", "]", ",", "'err_min'", ":", "stats_err", "[", "'min'", "]", ",", "'err_max'", ":", "stats_err", "[", "'max'", "]", ",", "'#sta_mean'", ":", "stats_sta", "[", "'mean'", "]", ",", "'#sta_std'", ":", "stats_sta", "[", "'std'", "]", ",", "'#sta_min'", ":", "stats_sta", "[", "'min'", "]", ",", "'#sta_max'", ":", "stats_sta", "[", "'max'", "]", "}", "col_names", "=", "[", "k", "for", "k", "in", "d", ".", "keys", "(", ")", "]", "df_index", "=", "time_interval", "+", "timedelta", "(", "minutes", "=", "(", "time_step", "/", "2", ")", ")", "temp_LN_count", "=", "pd", ".", "DataFrame", "(", "d", ",", "index", "=", "[", "df_index", "]", ",", "columns", "=", "col_names", ")", "# add data to existing df", "if", "(", "i", ">=", "1", ")", ":", "LN_count", "=", "LN_count", ".", "append", "(", "temp_LN_count", ")", "else", ":", "LN_count", "=", "temp_LN_count", "i", "=", "i", "+", "1", "return", "LN_count", "else", ":", "print", "(", "\"time_step {0} multiple of 1 day (1400 min)\"", ".", "format", "(", "time_step", ")", ")" ]
**Count lightning strikes detected within a defined time_step** Generate time intervals according to the time_step defined and count lightning strikes in these intervals. Statistics are also calculated for lightning detection errors and the number of stations and added to an output dataframe. Time stamps in output dataframe correspond to center of time periods in which lightning are counted. :paramter datain: dataframe (lightning data) :parameter time_step: integer (time step in minutes) :Example: >>> count_lightning(LN_data, time_step)
[ "**", "Count", "lightning", "strikes", "detected", "within", "a", "defined", "time_step", "**" ]
python
train
elemoine/papyrus
papyrus/protocol.py
https://github.com/elemoine/papyrus/blob/764fb2326105df74fbd3dbcd7e58f4cb21956005/papyrus/protocol.py#L106-L141
def create_attr_filter(request, mapped_class): """Create an ``and_`` SQLAlchemy filter (a ClauseList object) based on the request params (``queryable``, ``eq``, ``ne``, ...). Arguments: request the request. mapped_class the SQLAlchemy mapped class. """ mapping = { 'eq': '__eq__', 'ne': '__ne__', 'lt': '__lt__', 'lte': '__le__', 'gt': '__gt__', 'gte': '__ge__', 'like': 'like', 'ilike': 'ilike' } filters = [] if 'queryable' in request.params: queryable = request.params['queryable'].split(',') for k in request.params: if len(request.params[k]) <= 0 or '__' not in k: continue col, op = k.split("__") if col not in queryable or op not in mapping: continue column = getattr(mapped_class, col) f = getattr(column, mapping[op])(request.params[k]) filters.append(f) return and_(*filters) if len(filters) > 0 else None
[ "def", "create_attr_filter", "(", "request", ",", "mapped_class", ")", ":", "mapping", "=", "{", "'eq'", ":", "'__eq__'", ",", "'ne'", ":", "'__ne__'", ",", "'lt'", ":", "'__lt__'", ",", "'lte'", ":", "'__le__'", ",", "'gt'", ":", "'__gt__'", ",", "'gte'", ":", "'__ge__'", ",", "'like'", ":", "'like'", ",", "'ilike'", ":", "'ilike'", "}", "filters", "=", "[", "]", "if", "'queryable'", "in", "request", ".", "params", ":", "queryable", "=", "request", ".", "params", "[", "'queryable'", "]", ".", "split", "(", "','", ")", "for", "k", "in", "request", ".", "params", ":", "if", "len", "(", "request", ".", "params", "[", "k", "]", ")", "<=", "0", "or", "'__'", "not", "in", "k", ":", "continue", "col", ",", "op", "=", "k", ".", "split", "(", "\"__\"", ")", "if", "col", "not", "in", "queryable", "or", "op", "not", "in", "mapping", ":", "continue", "column", "=", "getattr", "(", "mapped_class", ",", "col", ")", "f", "=", "getattr", "(", "column", ",", "mapping", "[", "op", "]", ")", "(", "request", ".", "params", "[", "k", "]", ")", "filters", ".", "append", "(", "f", ")", "return", "and_", "(", "*", "filters", ")", "if", "len", "(", "filters", ")", ">", "0", "else", "None" ]
Create an ``and_`` SQLAlchemy filter (a ClauseList object) based on the request params (``queryable``, ``eq``, ``ne``, ...). Arguments: request the request. mapped_class the SQLAlchemy mapped class.
[ "Create", "an", "and_", "SQLAlchemy", "filter", "(", "a", "ClauseList", "object", ")", "based", "on", "the", "request", "params", "(", "queryable", "eq", "ne", "...", ")", "." ]
python
train
steffann/pylisp
pylisp/packet/ip/ipv6/base.py
https://github.com/steffann/pylisp/blob/907340f0c7ef2c4d4fe0c8e0a48df5be0d969407/pylisp/packet/ip/ipv6/base.py#L158-L190
def to_bytes(self): ''' Create bytes from properties ''' # Verify that the properties make sense self.sanitize() # Write the version bitstream = BitStream('uint:4=%d' % self.version) # Write the traffic class bitstream += BitStream('uint:8=%d' % self.traffic_class) # Write the flow label bitstream += BitStream('uint:20=%d' % self.flow_label) # Write the payload length payload_bytes = bytes(self.payload) payload_length = len(payload_bytes) bitstream += BitStream('uint:16=%d' % payload_length) # Write the next header type bitstream += BitStream('uint:8=%d' % self.next_header) # Write the hop limit bitstream += BitStream('uint:8=%d' % self.hop_limit) # Write the source and destination addresses bitstream += BitStream('uint:128=%d, ' 'uint:128=%d' % (int(self.source), int(self.destination))) return bitstream.bytes + payload_bytes
[ "def", "to_bytes", "(", "self", ")", ":", "# Verify that the properties make sense", "self", ".", "sanitize", "(", ")", "# Write the version", "bitstream", "=", "BitStream", "(", "'uint:4=%d'", "%", "self", ".", "version", ")", "# Write the traffic class", "bitstream", "+=", "BitStream", "(", "'uint:8=%d'", "%", "self", ".", "traffic_class", ")", "# Write the flow label", "bitstream", "+=", "BitStream", "(", "'uint:20=%d'", "%", "self", ".", "flow_label", ")", "# Write the payload length", "payload_bytes", "=", "bytes", "(", "self", ".", "payload", ")", "payload_length", "=", "len", "(", "payload_bytes", ")", "bitstream", "+=", "BitStream", "(", "'uint:16=%d'", "%", "payload_length", ")", "# Write the next header type", "bitstream", "+=", "BitStream", "(", "'uint:8=%d'", "%", "self", ".", "next_header", ")", "# Write the hop limit", "bitstream", "+=", "BitStream", "(", "'uint:8=%d'", "%", "self", ".", "hop_limit", ")", "# Write the source and destination addresses", "bitstream", "+=", "BitStream", "(", "'uint:128=%d, '", "'uint:128=%d'", "%", "(", "int", "(", "self", ".", "source", ")", ",", "int", "(", "self", ".", "destination", ")", ")", ")", "return", "bitstream", ".", "bytes", "+", "payload_bytes" ]
Create bytes from properties
[ "Create", "bytes", "from", "properties" ]
python
train
tradenity/python-sdk
tradenity/resources/table_rate_shipping.py
https://github.com/tradenity/python-sdk/blob/d13fbe23f4d6ff22554c6d8d2deaf209371adaf1/tradenity/resources/table_rate_shipping.py#L660-L680
def get_table_rate_shipping_by_id(cls, table_rate_shipping_id, **kwargs): """Find TableRateShipping Return single instance of TableRateShipping by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_table_rate_shipping_by_id(table_rate_shipping_id, async=True) >>> result = thread.get() :param async bool :param str table_rate_shipping_id: ID of tableRateShipping to return (required) :return: TableRateShipping If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._get_table_rate_shipping_by_id_with_http_info(table_rate_shipping_id, **kwargs) else: (data) = cls._get_table_rate_shipping_by_id_with_http_info(table_rate_shipping_id, **kwargs) return data
[ "def", "get_table_rate_shipping_by_id", "(", "cls", ",", "table_rate_shipping_id", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async'", ")", ":", "return", "cls", ".", "_get_table_rate_shipping_by_id_with_http_info", "(", "table_rate_shipping_id", ",", "*", "*", "kwargs", ")", "else", ":", "(", "data", ")", "=", "cls", ".", "_get_table_rate_shipping_by_id_with_http_info", "(", "table_rate_shipping_id", ",", "*", "*", "kwargs", ")", "return", "data" ]
Find TableRateShipping Return single instance of TableRateShipping by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_table_rate_shipping_by_id(table_rate_shipping_id, async=True) >>> result = thread.get() :param async bool :param str table_rate_shipping_id: ID of tableRateShipping to return (required) :return: TableRateShipping If the method is called asynchronously, returns the request thread.
[ "Find", "TableRateShipping" ]
python
train
thespacedoctor/tastic
tastic/tastic.py
https://github.com/thespacedoctor/tastic/blob/a0a16cf329a50057906ac3f696bb60b6fcee25e0/tastic/tastic.py#L1117-L1195
def add_task( self, title, tags=None): """*Add a task to this taskpaper object* **Key Arguments:** - ``title`` -- the title for the task. - ``tags`` -- tag string (*'@one @two(data)'*) or list of tags (*['one', 'two(data)']*) **Return:** - ``task`` -- the new taskpaper task object **Usage:** To add a task to an object (document, project, or task) use: .. code-block:: python newTask = doc.add_task("this is a task I added", "@with @tags") """ self.refresh task = title.strip() if task[:2] != "- ": task = "- " + task if tags: if isinstance(tags, list): if "@" not in tags[0]: tagString = (" @").join(tags) tagString = "@" + tagString else: tagString = (" ").join(tags) else: tagString = tags tagString = tagString.strip() task += " " + tagString newTask = self._get_object( regex=re.compile( r'((?<=\n)|(?<=^))(?P<title>- ((?! @).)*)( *(?P<tagString>( *?@[^(\s]+(\([^)]*\))?)+))?(?P<content>(\n(( |\t)+\S.*)|\n( |\t)*)*)', re.UNICODE), objectType="task", content=task ) # ADD DIRECTLY TO CONTENT IF THE PROJECT IS BEING ADDED SPECIFICALLY TO # THIS OBJECT oldContent = self.to_string(indentLevel=1) newContent = self.to_string( indentLevel=1, tasks=self.tasks + newTask) if self.parent: doc = self.parent._update_document_tree( oldContent=oldContent, newContent=newContent ) self.content = self.content.replace(self.to_string(indentLevel=0, title=False), self.to_string( indentLevel=0, title=False, tasks=self.tasks + newTask)) doc = self while doc.parent: doc = doc.parent doc.refresh if not self.parent: parent = self else: parent = doc.get_project(self.title) if not parent: parent = doc.get_task(self.title) thisTask = parent.get_task(title) self.refresh return thisTask
[ "def", "add_task", "(", "self", ",", "title", ",", "tags", "=", "None", ")", ":", "self", ".", "refresh", "task", "=", "title", ".", "strip", "(", ")", "if", "task", "[", ":", "2", "]", "!=", "\"- \"", ":", "task", "=", "\"- \"", "+", "task", "if", "tags", ":", "if", "isinstance", "(", "tags", ",", "list", ")", ":", "if", "\"@\"", "not", "in", "tags", "[", "0", "]", ":", "tagString", "=", "(", "\" @\"", ")", ".", "join", "(", "tags", ")", "tagString", "=", "\"@\"", "+", "tagString", "else", ":", "tagString", "=", "(", "\" \"", ")", ".", "join", "(", "tags", ")", "else", ":", "tagString", "=", "tags", "tagString", "=", "tagString", ".", "strip", "(", ")", "task", "+=", "\" \"", "+", "tagString", "newTask", "=", "self", ".", "_get_object", "(", "regex", "=", "re", ".", "compile", "(", "r'((?<=\\n)|(?<=^))(?P<title>- ((?! @).)*)( *(?P<tagString>( *?@[^(\\s]+(\\([^)]*\\))?)+))?(?P<content>(\\n(( |\\t)+\\S.*)|\\n( |\\t)*)*)'", ",", "re", ".", "UNICODE", ")", ",", "objectType", "=", "\"task\"", ",", "content", "=", "task", ")", "# ADD DIRECTLY TO CONTENT IF THE PROJECT IS BEING ADDED SPECIFICALLY TO", "# THIS OBJECT", "oldContent", "=", "self", ".", "to_string", "(", "indentLevel", "=", "1", ")", "newContent", "=", "self", ".", "to_string", "(", "indentLevel", "=", "1", ",", "tasks", "=", "self", ".", "tasks", "+", "newTask", ")", "if", "self", ".", "parent", ":", "doc", "=", "self", ".", "parent", ".", "_update_document_tree", "(", "oldContent", "=", "oldContent", ",", "newContent", "=", "newContent", ")", "self", ".", "content", "=", "self", ".", "content", ".", "replace", "(", "self", ".", "to_string", "(", "indentLevel", "=", "0", ",", "title", "=", "False", ")", ",", "self", ".", "to_string", "(", "indentLevel", "=", "0", ",", "title", "=", "False", ",", "tasks", "=", "self", ".", "tasks", "+", "newTask", ")", ")", "doc", "=", "self", "while", "doc", ".", "parent", ":", "doc", "=", "doc", ".", "parent", "doc", ".", "refresh", "if", "not", "self", ".", "parent", ":", "parent", "=", "self", "else", ":", "parent", "=", "doc", ".", "get_project", "(", "self", ".", "title", ")", "if", "not", "parent", ":", "parent", "=", "doc", ".", "get_task", "(", "self", ".", "title", ")", "thisTask", "=", "parent", ".", "get_task", "(", "title", ")", "self", ".", "refresh", "return", "thisTask" ]
*Add a task to this taskpaper object* **Key Arguments:** - ``title`` -- the title for the task. - ``tags`` -- tag string (*'@one @two(data)'*) or list of tags (*['one', 'two(data)']*) **Return:** - ``task`` -- the new taskpaper task object **Usage:** To add a task to an object (document, project, or task) use: .. code-block:: python newTask = doc.add_task("this is a task I added", "@with @tags")
[ "*", "Add", "a", "task", "to", "this", "taskpaper", "object", "*" ]
python
train
fabioz/PyDev.Debugger
third_party/pep8/pycodestyle.py
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/third_party/pep8/pycodestyle.py#L859-L897
def whitespace_before_comment(logical_line, tokens): r"""Separate inline comments by at least two spaces. An inline comment is a comment on the same line as a statement. Inline comments should be separated by at least two spaces from the statement. They should start with a # and a single space. Each line of a block comment starts with a # and a single space (unless it is indented text inside the comment). Okay: x = x + 1 # Increment x Okay: x = x + 1 # Increment x Okay: # Block comment E261: x = x + 1 # Increment x E262: x = x + 1 #Increment x E262: x = x + 1 # Increment x E265: #Block comment E266: ### Block comment """ prev_end = (0, 0) for token_type, text, start, end, line in tokens: if token_type == tokenize.COMMENT: inline_comment = line[:start[1]].strip() if inline_comment: if prev_end[0] == start[0] and start[1] < prev_end[1] + 2: yield (prev_end, "E261 at least two spaces before inline comment") symbol, sp, comment = text.partition(' ') bad_prefix = symbol not in '#:' and (symbol.lstrip('#')[:1] or '#') if inline_comment: if bad_prefix or comment[:1] in WHITESPACE: yield start, "E262 inline comment should start with '# '" elif bad_prefix and (bad_prefix != '!' or start[0] > 1): if bad_prefix != '#': yield start, "E265 block comment should start with '# '" elif comment: yield start, "E266 too many leading '#' for block comment" elif token_type != tokenize.NL: prev_end = end
[ "def", "whitespace_before_comment", "(", "logical_line", ",", "tokens", ")", ":", "prev_end", "=", "(", "0", ",", "0", ")", "for", "token_type", ",", "text", ",", "start", ",", "end", ",", "line", "in", "tokens", ":", "if", "token_type", "==", "tokenize", ".", "COMMENT", ":", "inline_comment", "=", "line", "[", ":", "start", "[", "1", "]", "]", ".", "strip", "(", ")", "if", "inline_comment", ":", "if", "prev_end", "[", "0", "]", "==", "start", "[", "0", "]", "and", "start", "[", "1", "]", "<", "prev_end", "[", "1", "]", "+", "2", ":", "yield", "(", "prev_end", ",", "\"E261 at least two spaces before inline comment\"", ")", "symbol", ",", "sp", ",", "comment", "=", "text", ".", "partition", "(", "' '", ")", "bad_prefix", "=", "symbol", "not", "in", "'#:'", "and", "(", "symbol", ".", "lstrip", "(", "'#'", ")", "[", ":", "1", "]", "or", "'#'", ")", "if", "inline_comment", ":", "if", "bad_prefix", "or", "comment", "[", ":", "1", "]", "in", "WHITESPACE", ":", "yield", "start", ",", "\"E262 inline comment should start with '# '\"", "elif", "bad_prefix", "and", "(", "bad_prefix", "!=", "'!'", "or", "start", "[", "0", "]", ">", "1", ")", ":", "if", "bad_prefix", "!=", "'#'", ":", "yield", "start", ",", "\"E265 block comment should start with '# '\"", "elif", "comment", ":", "yield", "start", ",", "\"E266 too many leading '#' for block comment\"", "elif", "token_type", "!=", "tokenize", ".", "NL", ":", "prev_end", "=", "end" ]
r"""Separate inline comments by at least two spaces. An inline comment is a comment on the same line as a statement. Inline comments should be separated by at least two spaces from the statement. They should start with a # and a single space. Each line of a block comment starts with a # and a single space (unless it is indented text inside the comment). Okay: x = x + 1 # Increment x Okay: x = x + 1 # Increment x Okay: # Block comment E261: x = x + 1 # Increment x E262: x = x + 1 #Increment x E262: x = x + 1 # Increment x E265: #Block comment E266: ### Block comment
[ "r", "Separate", "inline", "comments", "by", "at", "least", "two", "spaces", "." ]
python
train
tensorflow/cleverhans
scripts/make_confidence_report_bundled.py
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/scripts/make_confidence_report_bundled.py#L42-L56
def main(argv=None): """ Make a confidence report and save it to disk. """ try: _name_of_script, filepath = argv except ValueError: raise ValueError(argv) print(filepath) make_confidence_report_bundled(filepath=filepath, test_start=FLAGS.test_start, test_end=FLAGS.test_end, which_set=FLAGS.which_set, recipe=FLAGS.recipe, report_path=FLAGS.report_path, batch_size=FLAGS.batch_size)
[ "def", "main", "(", "argv", "=", "None", ")", ":", "try", ":", "_name_of_script", ",", "filepath", "=", "argv", "except", "ValueError", ":", "raise", "ValueError", "(", "argv", ")", "print", "(", "filepath", ")", "make_confidence_report_bundled", "(", "filepath", "=", "filepath", ",", "test_start", "=", "FLAGS", ".", "test_start", ",", "test_end", "=", "FLAGS", ".", "test_end", ",", "which_set", "=", "FLAGS", ".", "which_set", ",", "recipe", "=", "FLAGS", ".", "recipe", ",", "report_path", "=", "FLAGS", ".", "report_path", ",", "batch_size", "=", "FLAGS", ".", "batch_size", ")" ]
Make a confidence report and save it to disk.
[ "Make", "a", "confidence", "report", "and", "save", "it", "to", "disk", "." ]
python
train
LogicalDash/LiSE
ELiDE/ELiDE/board/board.py
https://github.com/LogicalDash/LiSE/blob/fe6fd4f0a7c1780e065f4c9babb9bc443af6bb84/ELiDE/ELiDE/board/board.py#L203-L221
def on_touch_move(self, touch): """If an entity is selected, drag it.""" if hasattr(self, '_lasttouch') and self._lasttouch == touch: return if self.app.selection in self.selection_candidates: self.selection_candidates.remove(self.app.selection) if self.app.selection: if not self.selection_candidates: self.keep_selection = True ret = super().on_touch_move(touch) return ret elif self.selection_candidates: for cand in self.selection_candidates: if cand.collide_point(*touch.pos): self.app.selection = cand cand.selected = True touch.grab(cand) ret = super().on_touch_move(touch) return ret
[ "def", "on_touch_move", "(", "self", ",", "touch", ")", ":", "if", "hasattr", "(", "self", ",", "'_lasttouch'", ")", "and", "self", ".", "_lasttouch", "==", "touch", ":", "return", "if", "self", ".", "app", ".", "selection", "in", "self", ".", "selection_candidates", ":", "self", ".", "selection_candidates", ".", "remove", "(", "self", ".", "app", ".", "selection", ")", "if", "self", ".", "app", ".", "selection", ":", "if", "not", "self", ".", "selection_candidates", ":", "self", ".", "keep_selection", "=", "True", "ret", "=", "super", "(", ")", ".", "on_touch_move", "(", "touch", ")", "return", "ret", "elif", "self", ".", "selection_candidates", ":", "for", "cand", "in", "self", ".", "selection_candidates", ":", "if", "cand", ".", "collide_point", "(", "*", "touch", ".", "pos", ")", ":", "self", ".", "app", ".", "selection", "=", "cand", "cand", ".", "selected", "=", "True", "touch", ".", "grab", "(", "cand", ")", "ret", "=", "super", "(", ")", ".", "on_touch_move", "(", "touch", ")", "return", "ret" ]
If an entity is selected, drag it.
[ "If", "an", "entity", "is", "selected", "drag", "it", "." ]
python
train
ternaris/marv
docs/tutorial/code/marv_tutorial/__init__.py
https://github.com/ternaris/marv/blob/c221354d912ff869bbdb4f714a86a70be30d823e/docs/tutorial/code/marv_tutorial/__init__.py#L124-L147
def gallery_section(images, title): """Create detail section with gallery. Args: title (str): Title to be displayed for detail section. images: stream of marv image files Returns One detail section. """ # pull all images imgs = [] while True: img = yield marv.pull(images) if img is None: break imgs.append({'src': img.relpath}) if not imgs: return # create gallery widget and section containing it widget = {'title': images.title, 'gallery': {'images': imgs}} section = {'title': title, 'widgets': [widget]} yield marv.push(section)
[ "def", "gallery_section", "(", "images", ",", "title", ")", ":", "# pull all images", "imgs", "=", "[", "]", "while", "True", ":", "img", "=", "yield", "marv", ".", "pull", "(", "images", ")", "if", "img", "is", "None", ":", "break", "imgs", ".", "append", "(", "{", "'src'", ":", "img", ".", "relpath", "}", ")", "if", "not", "imgs", ":", "return", "# create gallery widget and section containing it", "widget", "=", "{", "'title'", ":", "images", ".", "title", ",", "'gallery'", ":", "{", "'images'", ":", "imgs", "}", "}", "section", "=", "{", "'title'", ":", "title", ",", "'widgets'", ":", "[", "widget", "]", "}", "yield", "marv", ".", "push", "(", "section", ")" ]
Create detail section with gallery. Args: title (str): Title to be displayed for detail section. images: stream of marv image files Returns One detail section.
[ "Create", "detail", "section", "with", "gallery", "." ]
python
train
mkoura/dump2polarion
dump2polarion/parselogs.py
https://github.com/mkoura/dump2polarion/blob/f4bd24e9d5070e282aad15f1e8bb514c0525cd37/dump2polarion/parselogs.py#L208-L214
def get_requirement_warn(self, line): """Gets name of test case that was not successfully imported.""" res = self.REQ_WARN_SEARCH.search(line) try: return LogItem(res.group(1), None, None) except (AttributeError, IndexError): return None
[ "def", "get_requirement_warn", "(", "self", ",", "line", ")", ":", "res", "=", "self", ".", "REQ_WARN_SEARCH", ".", "search", "(", "line", ")", "try", ":", "return", "LogItem", "(", "res", ".", "group", "(", "1", ")", ",", "None", ",", "None", ")", "except", "(", "AttributeError", ",", "IndexError", ")", ":", "return", "None" ]
Gets name of test case that was not successfully imported.
[ "Gets", "name", "of", "test", "case", "that", "was", "not", "successfully", "imported", "." ]
python
train
OSSOS/MOP
src/ossos/core/ossos/storage.py
https://github.com/OSSOS/MOP/blob/94f91d32ad5ec081d5a1ebd67604a838003465af/src/ossos/core/ossos/storage.py#L1546-L1554
def get_header(uri): """ Pull a FITS header from observation at the given URI @param uri: The URI of the image in VOSpace. """ if uri not in astheaders: astheaders[uri] = get_hdu(uri, cutout="[1:1,1:1]")[0].header return astheaders[uri]
[ "def", "get_header", "(", "uri", ")", ":", "if", "uri", "not", "in", "astheaders", ":", "astheaders", "[", "uri", "]", "=", "get_hdu", "(", "uri", ",", "cutout", "=", "\"[1:1,1:1]\"", ")", "[", "0", "]", ".", "header", "return", "astheaders", "[", "uri", "]" ]
Pull a FITS header from observation at the given URI @param uri: The URI of the image in VOSpace.
[ "Pull", "a", "FITS", "header", "from", "observation", "at", "the", "given", "URI" ]
python
train
greenbone/ospd
ospd/misc.py
https://github.com/greenbone/ospd/blob/cef773166b15a19c17764721d3fe404fa0e107bf/ospd/misc.py#L902-L917
def print_version(wrapper): """ Prints the server version and license information.""" scanner_name = wrapper.get_scanner_name() server_version = wrapper.get_server_version() print("OSP Server for {0} version {1}".format(scanner_name, server_version)) protocol_version = wrapper.get_protocol_version() print("OSP Version: {0}".format(protocol_version)) daemon_name = wrapper.get_daemon_name() daemon_version = wrapper.get_daemon_version() print("Using: {0} {1}".format(daemon_name, daemon_version)) print("Copyright (C) 2014, 2015 Greenbone Networks GmbH\n" "License GPLv2+: GNU GPL version 2 or later\n" "This is free software: you are free to change" " and redistribute it.\n" "There is NO WARRANTY, to the extent permitted by law.")
[ "def", "print_version", "(", "wrapper", ")", ":", "scanner_name", "=", "wrapper", ".", "get_scanner_name", "(", ")", "server_version", "=", "wrapper", ".", "get_server_version", "(", ")", "print", "(", "\"OSP Server for {0} version {1}\"", ".", "format", "(", "scanner_name", ",", "server_version", ")", ")", "protocol_version", "=", "wrapper", ".", "get_protocol_version", "(", ")", "print", "(", "\"OSP Version: {0}\"", ".", "format", "(", "protocol_version", ")", ")", "daemon_name", "=", "wrapper", ".", "get_daemon_name", "(", ")", "daemon_version", "=", "wrapper", ".", "get_daemon_version", "(", ")", "print", "(", "\"Using: {0} {1}\"", ".", "format", "(", "daemon_name", ",", "daemon_version", ")", ")", "print", "(", "\"Copyright (C) 2014, 2015 Greenbone Networks GmbH\\n\"", "\"License GPLv2+: GNU GPL version 2 or later\\n\"", "\"This is free software: you are free to change\"", "\" and redistribute it.\\n\"", "\"There is NO WARRANTY, to the extent permitted by law.\"", ")" ]
Prints the server version and license information.
[ "Prints", "the", "server", "version", "and", "license", "information", "." ]
python
train
rigetti/quantumflow
quantumflow/ops.py
https://github.com/rigetti/quantumflow/blob/13a66cabbe8aabf6e023cc675f4a4ebe6ccda8fb/quantumflow/ops.py#L337-L342
def choi(self) -> bk.BKTensor: """Return the Choi matrix representation of this super operator""" # Put superop axes in [ok, ib, ob, ik] and reshape to matrix N = self.qubit_nb return bk.reshape(self.sharp.tensor, [2**(N*2)] * 2)
[ "def", "choi", "(", "self", ")", "->", "bk", ".", "BKTensor", ":", "# Put superop axes in [ok, ib, ob, ik] and reshape to matrix", "N", "=", "self", ".", "qubit_nb", "return", "bk", ".", "reshape", "(", "self", ".", "sharp", ".", "tensor", ",", "[", "2", "**", "(", "N", "*", "2", ")", "]", "*", "2", ")" ]
Return the Choi matrix representation of this super operator
[ "Return", "the", "Choi", "matrix", "representation", "of", "this", "super", "operator" ]
python
train
apple/turicreate
src/external/coremltools_wrap/coremltools/coremltools/models/neural_network/flexible_shape_utils.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/neural_network/flexible_shape_utils.py#L220-L229
def isFlexible(self): """ Returns true if any one of the channel, height, or width ranges of this shape allow more than one input value. """ for key, value in self.arrayShapeRange.items(): if key in _CONSTRAINED_KEYS: if value.isFlexible: return True return False
[ "def", "isFlexible", "(", "self", ")", ":", "for", "key", ",", "value", "in", "self", ".", "arrayShapeRange", ".", "items", "(", ")", ":", "if", "key", "in", "_CONSTRAINED_KEYS", ":", "if", "value", ".", "isFlexible", ":", "return", "True", "return", "False" ]
Returns true if any one of the channel, height, or width ranges of this shape allow more than one input value.
[ "Returns", "true", "if", "any", "one", "of", "the", "channel", "height", "or", "width", "ranges", "of", "this", "shape", "allow", "more", "than", "one", "input", "value", "." ]
python
train
dcos/shakedown
shakedown/dcos/master.py
https://github.com/dcos/shakedown/blob/e2f9e2382788dbcd29bd18aa058b76e7c3b83b3e/shakedown/dcos/master.py#L93-L101
def get_all_masters(): """ Returns the json object that represents each of the masters. """ masters = [] for master in __master_zk_nodes_keys(): master_zk_str = get_zk_node_data(master)['str'] masters.append(json.loads(master_zk_str)) return masters
[ "def", "get_all_masters", "(", ")", ":", "masters", "=", "[", "]", "for", "master", "in", "__master_zk_nodes_keys", "(", ")", ":", "master_zk_str", "=", "get_zk_node_data", "(", "master", ")", "[", "'str'", "]", "masters", ".", "append", "(", "json", ".", "loads", "(", "master_zk_str", ")", ")", "return", "masters" ]
Returns the json object that represents each of the masters.
[ "Returns", "the", "json", "object", "that", "represents", "each", "of", "the", "masters", "." ]
python
train
dcoker/awsmfa
awsmfa/__main__.py
https://github.com/dcoker/awsmfa/blob/18a8216bfd3184c78b4067edf5198250f66e003d/awsmfa/__main__.py#L157-L170
def acquire_code(args, session, session3): """returns the user's token serial number, MFA token code, and an error code.""" serial_number = find_mfa_for_user(args.serial_number, session, session3) if not serial_number: print("There are no MFA devices associated with this user.", file=sys.stderr) return None, None, USER_RECOVERABLE_ERROR token_code = args.token_code if token_code is None: while token_code is None or len(token_code) != 6: token_code = getpass.getpass("MFA Token Code: ") return serial_number, token_code, OK
[ "def", "acquire_code", "(", "args", ",", "session", ",", "session3", ")", ":", "serial_number", "=", "find_mfa_for_user", "(", "args", ".", "serial_number", ",", "session", ",", "session3", ")", "if", "not", "serial_number", ":", "print", "(", "\"There are no MFA devices associated with this user.\"", ",", "file", "=", "sys", ".", "stderr", ")", "return", "None", ",", "None", ",", "USER_RECOVERABLE_ERROR", "token_code", "=", "args", ".", "token_code", "if", "token_code", "is", "None", ":", "while", "token_code", "is", "None", "or", "len", "(", "token_code", ")", "!=", "6", ":", "token_code", "=", "getpass", ".", "getpass", "(", "\"MFA Token Code: \"", ")", "return", "serial_number", ",", "token_code", ",", "OK" ]
returns the user's token serial number, MFA token code, and an error code.
[ "returns", "the", "user", "s", "token", "serial", "number", "MFA", "token", "code", "and", "an", "error", "code", "." ]
python
train
d0c-s4vage/pfp
pfp/interp.py
https://github.com/d0c-s4vage/pfp/blob/32f2d34fdec1c70019fa83c7006d5e3be0f92fcd/pfp/interp.py#L2060-L2090
def _handle_for(self, node, scope, ctxt, stream): """Handle For nodes :node: TODO :scope: TODO :ctxt: TODO :stream: TODO :returns: TODO """ self._dlog("handling for") if node.init is not None: # perform the init self._handle_node(node.init, scope, ctxt, stream) while node.cond is None or self._handle_node(node.cond, scope, ctxt, stream): if node.stmt is not None: try: # do the for body self._handle_node(node.stmt, scope, ctxt, stream) except errors.InterpBreak as e: break # we still need to interpret the "next" statement, # so just pass except errors.InterpContinue as e: pass if node.next is not None: # do the next statement self._handle_node(node.next, scope, ctxt, stream)
[ "def", "_handle_for", "(", "self", ",", "node", ",", "scope", ",", "ctxt", ",", "stream", ")", ":", "self", ".", "_dlog", "(", "\"handling for\"", ")", "if", "node", ".", "init", "is", "not", "None", ":", "# perform the init", "self", ".", "_handle_node", "(", "node", ".", "init", ",", "scope", ",", "ctxt", ",", "stream", ")", "while", "node", ".", "cond", "is", "None", "or", "self", ".", "_handle_node", "(", "node", ".", "cond", ",", "scope", ",", "ctxt", ",", "stream", ")", ":", "if", "node", ".", "stmt", "is", "not", "None", ":", "try", ":", "# do the for body", "self", ".", "_handle_node", "(", "node", ".", "stmt", ",", "scope", ",", "ctxt", ",", "stream", ")", "except", "errors", ".", "InterpBreak", "as", "e", ":", "break", "# we still need to interpret the \"next\" statement,", "# so just pass", "except", "errors", ".", "InterpContinue", "as", "e", ":", "pass", "if", "node", ".", "next", "is", "not", "None", ":", "# do the next statement", "self", ".", "_handle_node", "(", "node", ".", "next", ",", "scope", ",", "ctxt", ",", "stream", ")" ]
Handle For nodes :node: TODO :scope: TODO :ctxt: TODO :stream: TODO :returns: TODO
[ "Handle", "For", "nodes" ]
python
train
ianmiell/shutit
shutit_class.py
https://github.com/ianmiell/shutit/blob/19cd64cdfb23515b106b40213dccff4101617076/shutit_class.py#L552-L562
def get_shutit_pexpect_sessions(self, note=None): """Returns all the shutit_pexpect_session keys for this object. @return: list of all shutit_pexpect_session keys (pexpect_session_ids) """ self.handle_note(note) sessions = [] for key in self.shutit_pexpect_sessions: sessions.append(shutit_object.shutit_pexpect_sessions[key]) self.handle_note_after(note) return sessions
[ "def", "get_shutit_pexpect_sessions", "(", "self", ",", "note", "=", "None", ")", ":", "self", ".", "handle_note", "(", "note", ")", "sessions", "=", "[", "]", "for", "key", "in", "self", ".", "shutit_pexpect_sessions", ":", "sessions", ".", "append", "(", "shutit_object", ".", "shutit_pexpect_sessions", "[", "key", "]", ")", "self", ".", "handle_note_after", "(", "note", ")", "return", "sessions" ]
Returns all the shutit_pexpect_session keys for this object. @return: list of all shutit_pexpect_session keys (pexpect_session_ids)
[ "Returns", "all", "the", "shutit_pexpect_session", "keys", "for", "this", "object", "." ]
python
train
lrgar/scope
scope/scope.py
https://github.com/lrgar/scope/blob/f1c5815b0efd6be75ce54370d69e9b7eca854844/scope/scope.py#L112-L116
def set_children(self, value, defined): """Set the children of the object.""" self.children = value self.children_defined = defined return self
[ "def", "set_children", "(", "self", ",", "value", ",", "defined", ")", ":", "self", ".", "children", "=", "value", "self", ".", "children_defined", "=", "defined", "return", "self" ]
Set the children of the object.
[ "Set", "the", "children", "of", "the", "object", "." ]
python
train
RudolfCardinal/pythonlib
cardinal_pythonlib/rnc_db.py
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/rnc_db.py#L1969-L1981
def connect_to_database_odbc_mysql(self, database: str, user: str, password: str, server: str = "localhost", port: int = 3306, driver: str = "{MySQL ODBC 5.1 Driver}", autocommit: bool = True) -> None: """Connects to a MySQL database via ODBC.""" self.connect(engine=ENGINE_MYSQL, interface=INTERFACE_ODBC, database=database, user=user, password=password, host=server, port=port, driver=driver, autocommit=autocommit)
[ "def", "connect_to_database_odbc_mysql", "(", "self", ",", "database", ":", "str", ",", "user", ":", "str", ",", "password", ":", "str", ",", "server", ":", "str", "=", "\"localhost\"", ",", "port", ":", "int", "=", "3306", ",", "driver", ":", "str", "=", "\"{MySQL ODBC 5.1 Driver}\"", ",", "autocommit", ":", "bool", "=", "True", ")", "->", "None", ":", "self", ".", "connect", "(", "engine", "=", "ENGINE_MYSQL", ",", "interface", "=", "INTERFACE_ODBC", ",", "database", "=", "database", ",", "user", "=", "user", ",", "password", "=", "password", ",", "host", "=", "server", ",", "port", "=", "port", ",", "driver", "=", "driver", ",", "autocommit", "=", "autocommit", ")" ]
Connects to a MySQL database via ODBC.
[ "Connects", "to", "a", "MySQL", "database", "via", "ODBC", "." ]
python
train
schapman1974/tinymongo
tinymongo/tinymongo.py
https://github.com/schapman1974/tinymongo/blob/993048059dc0aa789d879b69feb79a0f237a60b3/tinymongo/tinymongo.py#L764-L775
def hasNext(self): """ Returns True if the cursor has a next position, False if not :return: """ cursor_pos = self.cursorpos + 1 try: self.cursordat[cursor_pos] return True except IndexError: return False
[ "def", "hasNext", "(", "self", ")", ":", "cursor_pos", "=", "self", ".", "cursorpos", "+", "1", "try", ":", "self", ".", "cursordat", "[", "cursor_pos", "]", "return", "True", "except", "IndexError", ":", "return", "False" ]
Returns True if the cursor has a next position, False if not :return:
[ "Returns", "True", "if", "the", "cursor", "has", "a", "next", "position", "False", "if", "not", ":", "return", ":" ]
python
train
wookayin/gpustat
gpustat/__main__.py
https://github.com/wookayin/gpustat/blob/28299cdcf55dd627fdd9800cf344988b43188ee8/gpustat/__main__.py#L14-L37
def print_gpustat(json=False, debug=False, **kwargs): ''' Display the GPU query results into standard output. ''' try: gpu_stats = GPUStatCollection.new_query() except Exception as e: sys.stderr.write('Error on querying NVIDIA devices.' ' Use --debug flag for details\n') if debug: try: import traceback traceback.print_exc(file=sys.stderr) except Exception: # NVMLError can't be processed by traceback: # https://bugs.python.org/issue28603 # as a workaround, simply re-throw the exception raise e sys.exit(1) if json: gpu_stats.print_json(sys.stdout) else: gpu_stats.print_formatted(sys.stdout, **kwargs)
[ "def", "print_gpustat", "(", "json", "=", "False", ",", "debug", "=", "False", ",", "*", "*", "kwargs", ")", ":", "try", ":", "gpu_stats", "=", "GPUStatCollection", ".", "new_query", "(", ")", "except", "Exception", "as", "e", ":", "sys", ".", "stderr", ".", "write", "(", "'Error on querying NVIDIA devices.'", "' Use --debug flag for details\\n'", ")", "if", "debug", ":", "try", ":", "import", "traceback", "traceback", ".", "print_exc", "(", "file", "=", "sys", ".", "stderr", ")", "except", "Exception", ":", "# NVMLError can't be processed by traceback:", "# https://bugs.python.org/issue28603", "# as a workaround, simply re-throw the exception", "raise", "e", "sys", ".", "exit", "(", "1", ")", "if", "json", ":", "gpu_stats", ".", "print_json", "(", "sys", ".", "stdout", ")", "else", ":", "gpu_stats", ".", "print_formatted", "(", "sys", ".", "stdout", ",", "*", "*", "kwargs", ")" ]
Display the GPU query results into standard output.
[ "Display", "the", "GPU", "query", "results", "into", "standard", "output", "." ]
python
train
jorisroovers/gitlint
gitlint/lint.py
https://github.com/jorisroovers/gitlint/blob/6248bd6cbc20c1be3bb6d196a5ec0425af99733b/gitlint/lint.py#L98-L108
def print_violations(self, violations): """ Print a given set of violations to the standard error output """ for v in violations: line_nr = v.line_nr if v.line_nr else "-" self.display.e(u"{0}: {1}".format(line_nr, v.rule_id), exact=True) self.display.ee(u"{0}: {1} {2}".format(line_nr, v.rule_id, v.message), exact=True) if v.content: self.display.eee(u"{0}: {1} {2}: \"{3}\"".format(line_nr, v.rule_id, v.message, v.content), exact=True) else: self.display.eee(u"{0}: {1} {2}".format(line_nr, v.rule_id, v.message), exact=True)
[ "def", "print_violations", "(", "self", ",", "violations", ")", ":", "for", "v", "in", "violations", ":", "line_nr", "=", "v", ".", "line_nr", "if", "v", ".", "line_nr", "else", "\"-\"", "self", ".", "display", ".", "e", "(", "u\"{0}: {1}\"", ".", "format", "(", "line_nr", ",", "v", ".", "rule_id", ")", ",", "exact", "=", "True", ")", "self", ".", "display", ".", "ee", "(", "u\"{0}: {1} {2}\"", ".", "format", "(", "line_nr", ",", "v", ".", "rule_id", ",", "v", ".", "message", ")", ",", "exact", "=", "True", ")", "if", "v", ".", "content", ":", "self", ".", "display", ".", "eee", "(", "u\"{0}: {1} {2}: \\\"{3}\\\"\"", ".", "format", "(", "line_nr", ",", "v", ".", "rule_id", ",", "v", ".", "message", ",", "v", ".", "content", ")", ",", "exact", "=", "True", ")", "else", ":", "self", ".", "display", ".", "eee", "(", "u\"{0}: {1} {2}\"", ".", "format", "(", "line_nr", ",", "v", ".", "rule_id", ",", "v", ".", "message", ")", ",", "exact", "=", "True", ")" ]
Print a given set of violations to the standard error output
[ "Print", "a", "given", "set", "of", "violations", "to", "the", "standard", "error", "output" ]
python
train
codeinthehole/django-async-messages
async_messages/middleware.py
https://github.com/codeinthehole/django-async-messages/blob/292cb2fc517521dabc67b90e7ca5b1617f59e214/async_messages/middleware.py#L8-L18
def process_response(self, request, response): """ Check for messages for this user and, if it exists, call the messages API with it """ if hasattr(request, "session") and hasattr(request, "user") and request.user.is_authenticated(): msgs = get_messages(request.user) if msgs: for msg, level in msgs: messages.add_message(request, level, msg) return response
[ "def", "process_response", "(", "self", ",", "request", ",", "response", ")", ":", "if", "hasattr", "(", "request", ",", "\"session\"", ")", "and", "hasattr", "(", "request", ",", "\"user\"", ")", "and", "request", ".", "user", ".", "is_authenticated", "(", ")", ":", "msgs", "=", "get_messages", "(", "request", ".", "user", ")", "if", "msgs", ":", "for", "msg", ",", "level", "in", "msgs", ":", "messages", ".", "add_message", "(", "request", ",", "level", ",", "msg", ")", "return", "response" ]
Check for messages for this user and, if it exists, call the messages API with it
[ "Check", "for", "messages", "for", "this", "user", "and", "if", "it", "exists", "call", "the", "messages", "API", "with", "it" ]
python
test
ejeschke/ginga
ginga/misc/Task.py
https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/misc/Task.py#L369-L395
def make_tasker(func): """make_tasker takes a callable (function, method, etc.) and returns a new factory function for generating tasks. Each factory function is designed to consume its arguments and return a task that, when executed, will call the function upon the arguments. TODO: deprecate this and just use FuncTask, which is easier to understand--must change a number of programs first. """ def anonFunc(*args, **kwdargs): class anonTask(Task): def execute(self): self.logger.debug("Executing fn %s" % func) try: val = func(*args, **kwdargs) self.logger.debug("Done executing fn %s" % func) return val except Exception as e: # Log error message and re-raise exception. self.logger.error("fn %s raised exception: %s" % ( func, str(e))) raise e return anonTask() return anonFunc
[ "def", "make_tasker", "(", "func", ")", ":", "def", "anonFunc", "(", "*", "args", ",", "*", "*", "kwdargs", ")", ":", "class", "anonTask", "(", "Task", ")", ":", "def", "execute", "(", "self", ")", ":", "self", ".", "logger", ".", "debug", "(", "\"Executing fn %s\"", "%", "func", ")", "try", ":", "val", "=", "func", "(", "*", "args", ",", "*", "*", "kwdargs", ")", "self", ".", "logger", ".", "debug", "(", "\"Done executing fn %s\"", "%", "func", ")", "return", "val", "except", "Exception", "as", "e", ":", "# Log error message and re-raise exception.", "self", ".", "logger", ".", "error", "(", "\"fn %s raised exception: %s\"", "%", "(", "func", ",", "str", "(", "e", ")", ")", ")", "raise", "e", "return", "anonTask", "(", ")", "return", "anonFunc" ]
make_tasker takes a callable (function, method, etc.) and returns a new factory function for generating tasks. Each factory function is designed to consume its arguments and return a task that, when executed, will call the function upon the arguments. TODO: deprecate this and just use FuncTask, which is easier to understand--must change a number of programs first.
[ "make_tasker", "takes", "a", "callable", "(", "function", "method", "etc", ".", ")", "and", "returns", "a", "new", "factory", "function", "for", "generating", "tasks", ".", "Each", "factory", "function", "is", "designed", "to", "consume", "its", "arguments", "and", "return", "a", "task", "that", "when", "executed", "will", "call", "the", "function", "upon", "the", "arguments", "." ]
python
train
zimeon/iiif
iiif/static.py
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/static.py#L214-L261
def generate(self, src=None, identifier=None): """Generate static files for one source image.""" self.src = src self.identifier = identifier # Get image details and calculate tiles im = self.manipulator_klass() im.srcfile = self.src im.set_max_image_pixels(self.max_image_pixels) im.do_first() width = im.width height = im.height scale_factors = im.scale_factors(self.tilesize) # Setup destination and IIIF identifier self.setup_destination() # Write out images for (region, size) in static_partial_tile_sizes(width, height, self.tilesize, scale_factors): self.generate_tile(region, size) sizes = [] for size in static_full_sizes(width, height, self.tilesize): # See https://github.com/zimeon/iiif/issues/9 sizes.append({'width': size[0], 'height': size[1]}) self.generate_tile('full', size) for request in self.extras: request.identifier = self.identifier if (request.is_scaled_full_image()): sizes.append({'width': request.size_wh[0], 'height': request.size_wh[1]}) self.generate_file(request) # Write info.json qualities = ['default'] if (self.api_version > '1.1') else ['native'] info = IIIFInfo(level=0, server_and_prefix=self.prefix, identifier=self.identifier, width=width, height=height, scale_factors=scale_factors, tile_width=self.tilesize, tile_height=self.tilesize, formats=['jpg'], qualities=qualities, sizes=sizes, api_version=self.api_version) json_file = os.path.join(self.dst, self.identifier, 'info.json') if (self.dryrun): self.logger.warning( "dryrun mode, would write the following files:") self.logger.warning("%s / %s/%s" % (self.dst, self.identifier, 'info.json')) else: with open(json_file, 'w') as f: f.write(info.as_json()) f.close() self.logger.info("%s / %s/%s" % (self.dst, self.identifier, 'info.json')) self.logger.debug("Written %s" % (json_file))
[ "def", "generate", "(", "self", ",", "src", "=", "None", ",", "identifier", "=", "None", ")", ":", "self", ".", "src", "=", "src", "self", ".", "identifier", "=", "identifier", "# Get image details and calculate tiles", "im", "=", "self", ".", "manipulator_klass", "(", ")", "im", ".", "srcfile", "=", "self", ".", "src", "im", ".", "set_max_image_pixels", "(", "self", ".", "max_image_pixels", ")", "im", ".", "do_first", "(", ")", "width", "=", "im", ".", "width", "height", "=", "im", ".", "height", "scale_factors", "=", "im", ".", "scale_factors", "(", "self", ".", "tilesize", ")", "# Setup destination and IIIF identifier", "self", ".", "setup_destination", "(", ")", "# Write out images", "for", "(", "region", ",", "size", ")", "in", "static_partial_tile_sizes", "(", "width", ",", "height", ",", "self", ".", "tilesize", ",", "scale_factors", ")", ":", "self", ".", "generate_tile", "(", "region", ",", "size", ")", "sizes", "=", "[", "]", "for", "size", "in", "static_full_sizes", "(", "width", ",", "height", ",", "self", ".", "tilesize", ")", ":", "# See https://github.com/zimeon/iiif/issues/9", "sizes", ".", "append", "(", "{", "'width'", ":", "size", "[", "0", "]", ",", "'height'", ":", "size", "[", "1", "]", "}", ")", "self", ".", "generate_tile", "(", "'full'", ",", "size", ")", "for", "request", "in", "self", ".", "extras", ":", "request", ".", "identifier", "=", "self", ".", "identifier", "if", "(", "request", ".", "is_scaled_full_image", "(", ")", ")", ":", "sizes", ".", "append", "(", "{", "'width'", ":", "request", ".", "size_wh", "[", "0", "]", ",", "'height'", ":", "request", ".", "size_wh", "[", "1", "]", "}", ")", "self", ".", "generate_file", "(", "request", ")", "# Write info.json", "qualities", "=", "[", "'default'", "]", "if", "(", "self", ".", "api_version", ">", "'1.1'", ")", "else", "[", "'native'", "]", "info", "=", "IIIFInfo", "(", "level", "=", "0", ",", "server_and_prefix", "=", "self", ".", "prefix", ",", "identifier", "=", "self", ".", "identifier", ",", "width", "=", "width", ",", "height", "=", "height", ",", "scale_factors", "=", "scale_factors", ",", "tile_width", "=", "self", ".", "tilesize", ",", "tile_height", "=", "self", ".", "tilesize", ",", "formats", "=", "[", "'jpg'", "]", ",", "qualities", "=", "qualities", ",", "sizes", "=", "sizes", ",", "api_version", "=", "self", ".", "api_version", ")", "json_file", "=", "os", ".", "path", ".", "join", "(", "self", ".", "dst", ",", "self", ".", "identifier", ",", "'info.json'", ")", "if", "(", "self", ".", "dryrun", ")", ":", "self", ".", "logger", ".", "warning", "(", "\"dryrun mode, would write the following files:\"", ")", "self", ".", "logger", ".", "warning", "(", "\"%s / %s/%s\"", "%", "(", "self", ".", "dst", ",", "self", ".", "identifier", ",", "'info.json'", ")", ")", "else", ":", "with", "open", "(", "json_file", ",", "'w'", ")", "as", "f", ":", "f", ".", "write", "(", "info", ".", "as_json", "(", ")", ")", "f", ".", "close", "(", ")", "self", ".", "logger", ".", "info", "(", "\"%s / %s/%s\"", "%", "(", "self", ".", "dst", ",", "self", ".", "identifier", ",", "'info.json'", ")", ")", "self", ".", "logger", ".", "debug", "(", "\"Written %s\"", "%", "(", "json_file", ")", ")" ]
Generate static files for one source image.
[ "Generate", "static", "files", "for", "one", "source", "image", "." ]
python
train
tradenity/python-sdk
tradenity/resources/zip_codes_geo_zone.py
https://github.com/tradenity/python-sdk/blob/d13fbe23f4d6ff22554c6d8d2deaf209371adaf1/tradenity/resources/zip_codes_geo_zone.py#L425-L445
def delete_zip_codes_geo_zone_by_id(cls, zip_codes_geo_zone_id, **kwargs): """Delete ZipCodesGeoZone Delete an instance of ZipCodesGeoZone by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.delete_zip_codes_geo_zone_by_id(zip_codes_geo_zone_id, async=True) >>> result = thread.get() :param async bool :param str zip_codes_geo_zone_id: ID of zipCodesGeoZone to delete. (required) :return: None If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._delete_zip_codes_geo_zone_by_id_with_http_info(zip_codes_geo_zone_id, **kwargs) else: (data) = cls._delete_zip_codes_geo_zone_by_id_with_http_info(zip_codes_geo_zone_id, **kwargs) return data
[ "def", "delete_zip_codes_geo_zone_by_id", "(", "cls", ",", "zip_codes_geo_zone_id", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async'", ")", ":", "return", "cls", ".", "_delete_zip_codes_geo_zone_by_id_with_http_info", "(", "zip_codes_geo_zone_id", ",", "*", "*", "kwargs", ")", "else", ":", "(", "data", ")", "=", "cls", ".", "_delete_zip_codes_geo_zone_by_id_with_http_info", "(", "zip_codes_geo_zone_id", ",", "*", "*", "kwargs", ")", "return", "data" ]
Delete ZipCodesGeoZone Delete an instance of ZipCodesGeoZone by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.delete_zip_codes_geo_zone_by_id(zip_codes_geo_zone_id, async=True) >>> result = thread.get() :param async bool :param str zip_codes_geo_zone_id: ID of zipCodesGeoZone to delete. (required) :return: None If the method is called asynchronously, returns the request thread.
[ "Delete", "ZipCodesGeoZone" ]
python
train
hydpy-dev/hydpy
hydpy/core/devicetools.py
https://github.com/hydpy-dev/hydpy/blob/1bc6a82cf30786521d86b36e27900c6717d3348d/hydpy/core/devicetools.py#L520-L575
def keywords(self) -> Set[str]: """A set of all keywords of all handled devices. In addition to attribute access via device names, |Nodes| and |Elements| objects allow for attribute access via keywords, allowing for an efficient search of certain groups of devices. Let us use the example from above, where the nodes `na` and `nb` have no keywords, but each of the other three nodes both belongs to either `group_a` or `group_b` and `group_1` or `group_2`: >>> from hydpy import Node, Nodes >>> nodes = Nodes('na', ... Node('nb', variable='W'), ... Node('nc', keywords=('group_a', 'group_1')), ... Node('nd', keywords=('group_a', 'group_2')), ... Node('ne', keywords=('group_b', 'group_1'))) >>> nodes Nodes("na", "nb", "nc", "nd", "ne") >>> sorted(nodes.keywords) ['group_1', 'group_2', 'group_a', 'group_b'] If you are interested in inspecting all devices belonging to `group_a`, select them via this keyword: >>> subgroup = nodes.group_1 >>> subgroup Nodes("nc", "ne") You can further restrict the search by also selecting the devices belonging to `group_b`, which holds only for node "e", in the given example: >>> subsubgroup = subgroup.group_b >>> subsubgroup Node("ne", variable="Q", keywords=["group_1", "group_b"]) Note that the keywords already used for building a device subgroup are not informative anymore (as they hold for each device) and are thus not shown anymore: >>> sorted(subgroup.keywords) ['group_a', 'group_b'] The latter might be confusing if you intend to work with a device subgroup for a longer time. After copying the subgroup, all keywords of the contained devices are available again: >>> from copy import copy >>> newgroup = copy(subgroup) >>> sorted(newgroup.keywords) ['group_1', 'group_a', 'group_b'] """ return set(keyword for device in self for keyword in device.keywords if keyword not in self._shadowed_keywords)
[ "def", "keywords", "(", "self", ")", "->", "Set", "[", "str", "]", ":", "return", "set", "(", "keyword", "for", "device", "in", "self", "for", "keyword", "in", "device", ".", "keywords", "if", "keyword", "not", "in", "self", ".", "_shadowed_keywords", ")" ]
A set of all keywords of all handled devices. In addition to attribute access via device names, |Nodes| and |Elements| objects allow for attribute access via keywords, allowing for an efficient search of certain groups of devices. Let us use the example from above, where the nodes `na` and `nb` have no keywords, but each of the other three nodes both belongs to either `group_a` or `group_b` and `group_1` or `group_2`: >>> from hydpy import Node, Nodes >>> nodes = Nodes('na', ... Node('nb', variable='W'), ... Node('nc', keywords=('group_a', 'group_1')), ... Node('nd', keywords=('group_a', 'group_2')), ... Node('ne', keywords=('group_b', 'group_1'))) >>> nodes Nodes("na", "nb", "nc", "nd", "ne") >>> sorted(nodes.keywords) ['group_1', 'group_2', 'group_a', 'group_b'] If you are interested in inspecting all devices belonging to `group_a`, select them via this keyword: >>> subgroup = nodes.group_1 >>> subgroup Nodes("nc", "ne") You can further restrict the search by also selecting the devices belonging to `group_b`, which holds only for node "e", in the given example: >>> subsubgroup = subgroup.group_b >>> subsubgroup Node("ne", variable="Q", keywords=["group_1", "group_b"]) Note that the keywords already used for building a device subgroup are not informative anymore (as they hold for each device) and are thus not shown anymore: >>> sorted(subgroup.keywords) ['group_a', 'group_b'] The latter might be confusing if you intend to work with a device subgroup for a longer time. After copying the subgroup, all keywords of the contained devices are available again: >>> from copy import copy >>> newgroup = copy(subgroup) >>> sorted(newgroup.keywords) ['group_1', 'group_a', 'group_b']
[ "A", "set", "of", "all", "keywords", "of", "all", "handled", "devices", "." ]
python
train
kata198/AdvancedHTMLParser
AdvancedHTMLParser/Parser.py
https://github.com/kata198/AdvancedHTMLParser/blob/06aeea5d8e2ea86e155aae0fc237623d3e9b7f9d/AdvancedHTMLParser/Parser.py#L826-L837
def getMiniHTML(self): ''' getMiniHTML - Gets the HTML representation of this document without any pretty formatting and disregarding original whitespace beyond the functional. @return <str> - HTML with only functional whitespace present ''' from .Formatter import AdvancedHTMLMiniFormatter html = self.getHTML() formatter = AdvancedHTMLMiniFormatter(None) # Do not double-encode formatter.feed(html) return formatter.getHTML()
[ "def", "getMiniHTML", "(", "self", ")", ":", "from", ".", "Formatter", "import", "AdvancedHTMLMiniFormatter", "html", "=", "self", ".", "getHTML", "(", ")", "formatter", "=", "AdvancedHTMLMiniFormatter", "(", "None", ")", "# Do not double-encode", "formatter", ".", "feed", "(", "html", ")", "return", "formatter", ".", "getHTML", "(", ")" ]
getMiniHTML - Gets the HTML representation of this document without any pretty formatting and disregarding original whitespace beyond the functional. @return <str> - HTML with only functional whitespace present
[ "getMiniHTML", "-", "Gets", "the", "HTML", "representation", "of", "this", "document", "without", "any", "pretty", "formatting", "and", "disregarding", "original", "whitespace", "beyond", "the", "functional", "." ]
python
train
opennode/waldur-core
waldur_core/cost_tracking/handlers.py
https://github.com/opennode/waldur-core/blob/d6c17a9592bb6c49c33567542eef8d099605a46a/waldur_core/cost_tracking/handlers.py#L94-L103
def _create_historical_estimates(resource, configuration): """ Create consumption details and price estimates for past months. Usually we need to update historical values on resource import. """ today = timezone.now() month_start = core_utils.month_start(today) while month_start > resource.created: month_start -= relativedelta(months=1) models.PriceEstimate.create_historical(resource, configuration, max(month_start, resource.created))
[ "def", "_create_historical_estimates", "(", "resource", ",", "configuration", ")", ":", "today", "=", "timezone", ".", "now", "(", ")", "month_start", "=", "core_utils", ".", "month_start", "(", "today", ")", "while", "month_start", ">", "resource", ".", "created", ":", "month_start", "-=", "relativedelta", "(", "months", "=", "1", ")", "models", ".", "PriceEstimate", ".", "create_historical", "(", "resource", ",", "configuration", ",", "max", "(", "month_start", ",", "resource", ".", "created", ")", ")" ]
Create consumption details and price estimates for past months. Usually we need to update historical values on resource import.
[ "Create", "consumption", "details", "and", "price", "estimates", "for", "past", "months", "." ]
python
train
twilio/twilio-python
twilio/rest/api/v2010/account/usage/record/__init__.py
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/api/v2010/account/usage/record/__init__.py#L187-L196
def daily(self): """ Access the daily :returns: twilio.rest.api.v2010.account.usage.record.daily.DailyList :rtype: twilio.rest.api.v2010.account.usage.record.daily.DailyList """ if self._daily is None: self._daily = DailyList(self._version, account_sid=self._solution['account_sid'], ) return self._daily
[ "def", "daily", "(", "self", ")", ":", "if", "self", ".", "_daily", "is", "None", ":", "self", ".", "_daily", "=", "DailyList", "(", "self", ".", "_version", ",", "account_sid", "=", "self", ".", "_solution", "[", "'account_sid'", "]", ",", ")", "return", "self", ".", "_daily" ]
Access the daily :returns: twilio.rest.api.v2010.account.usage.record.daily.DailyList :rtype: twilio.rest.api.v2010.account.usage.record.daily.DailyList
[ "Access", "the", "daily" ]
python
train
mbakker7/timml
timml/aquifer.py
https://github.com/mbakker7/timml/blob/91e99ad573cb8a9ad8ac1fa041c3ca44520c2390/timml/aquifer.py#L85-L98
def findlayer(self, z): ''' Returns layer-number, layer-type and model-layer-number''' if z > self.z[0]: modellayer, ltype = -1, 'above' layernumber = None elif z < self.z[-1]: modellayer, ltype = len(self.layernumber), 'below' layernumber = None else: modellayer = np.argwhere((z <= self.z[:-1]) & (z >= self.z[1:]))[0, 0] layernumber = self.layernumber[modellayer] ltype = self.ltype[modellayer] return layernumber, ltype, modellayer
[ "def", "findlayer", "(", "self", ",", "z", ")", ":", "if", "z", ">", "self", ".", "z", "[", "0", "]", ":", "modellayer", ",", "ltype", "=", "-", "1", ",", "'above'", "layernumber", "=", "None", "elif", "z", "<", "self", ".", "z", "[", "-", "1", "]", ":", "modellayer", ",", "ltype", "=", "len", "(", "self", ".", "layernumber", ")", ",", "'below'", "layernumber", "=", "None", "else", ":", "modellayer", "=", "np", ".", "argwhere", "(", "(", "z", "<=", "self", ".", "z", "[", ":", "-", "1", "]", ")", "&", "(", "z", ">=", "self", ".", "z", "[", "1", ":", "]", ")", ")", "[", "0", ",", "0", "]", "layernumber", "=", "self", ".", "layernumber", "[", "modellayer", "]", "ltype", "=", "self", ".", "ltype", "[", "modellayer", "]", "return", "layernumber", ",", "ltype", ",", "modellayer" ]
Returns layer-number, layer-type and model-layer-number
[ "Returns", "layer", "-", "number", "layer", "-", "type", "and", "model", "-", "layer", "-", "number" ]
python
train
MacHu-GWU/angora-project
angora/filesystem/filesystem.py
https://github.com/MacHu-GWU/angora-project/blob/689a60da51cd88680ddbe26e28dbe81e6b01d275/angora/filesystem/filesystem.py#L561-L569
def iterpaths(self): """Yield all WinFile's absolute path. """ try: for path in self.order: yield path except: for path in self.files: yield path
[ "def", "iterpaths", "(", "self", ")", ":", "try", ":", "for", "path", "in", "self", ".", "order", ":", "yield", "path", "except", ":", "for", "path", "in", "self", ".", "files", ":", "yield", "path" ]
Yield all WinFile's absolute path.
[ "Yield", "all", "WinFile", "s", "absolute", "path", "." ]
python
train
log2timeline/plaso
plaso/engine/processing_status.py
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/engine/processing_status.py#L265-L306
def UpdateNumberOfWarnings( self, number_of_consumed_warnings, number_of_produced_warnings): """Updates the number of warnings. Args: number_of_consumed_warnings (int): total number of warnings consumed by the process. number_of_produced_warnings (int): total number of warnings produced by the process. Returns: bool: True if either number of warnings has increased. Raises: ValueError: if the consumed or produced number of warnings is smaller than the value of the previous update. """ consumed_warnings_delta = 0 if number_of_consumed_warnings is not None: if number_of_consumed_warnings < self.number_of_consumed_warnings: raise ValueError( 'Number of consumed warnings smaller than previous update.') consumed_warnings_delta = ( number_of_consumed_warnings - self.number_of_consumed_warnings) self.number_of_consumed_warnings = number_of_consumed_warnings self.number_of_consumed_warnings_delta = consumed_warnings_delta produced_warnings_delta = 0 if number_of_produced_warnings is not None: if number_of_produced_warnings < self.number_of_produced_warnings: raise ValueError( 'Number of produced warnings smaller than previous update.') produced_warnings_delta = ( number_of_produced_warnings - self.number_of_produced_warnings) self.number_of_produced_warnings = number_of_produced_warnings self.number_of_produced_warnings_delta = produced_warnings_delta return consumed_warnings_delta > 0 or produced_warnings_delta > 0
[ "def", "UpdateNumberOfWarnings", "(", "self", ",", "number_of_consumed_warnings", ",", "number_of_produced_warnings", ")", ":", "consumed_warnings_delta", "=", "0", "if", "number_of_consumed_warnings", "is", "not", "None", ":", "if", "number_of_consumed_warnings", "<", "self", ".", "number_of_consumed_warnings", ":", "raise", "ValueError", "(", "'Number of consumed warnings smaller than previous update.'", ")", "consumed_warnings_delta", "=", "(", "number_of_consumed_warnings", "-", "self", ".", "number_of_consumed_warnings", ")", "self", ".", "number_of_consumed_warnings", "=", "number_of_consumed_warnings", "self", ".", "number_of_consumed_warnings_delta", "=", "consumed_warnings_delta", "produced_warnings_delta", "=", "0", "if", "number_of_produced_warnings", "is", "not", "None", ":", "if", "number_of_produced_warnings", "<", "self", ".", "number_of_produced_warnings", ":", "raise", "ValueError", "(", "'Number of produced warnings smaller than previous update.'", ")", "produced_warnings_delta", "=", "(", "number_of_produced_warnings", "-", "self", ".", "number_of_produced_warnings", ")", "self", ".", "number_of_produced_warnings", "=", "number_of_produced_warnings", "self", ".", "number_of_produced_warnings_delta", "=", "produced_warnings_delta", "return", "consumed_warnings_delta", ">", "0", "or", "produced_warnings_delta", ">", "0" ]
Updates the number of warnings. Args: number_of_consumed_warnings (int): total number of warnings consumed by the process. number_of_produced_warnings (int): total number of warnings produced by the process. Returns: bool: True if either number of warnings has increased. Raises: ValueError: if the consumed or produced number of warnings is smaller than the value of the previous update.
[ "Updates", "the", "number", "of", "warnings", "." ]
python
train
sci-bots/pygtkhelpers
pygtkhelpers/ui/views/shapes_canvas_view.py
https://github.com/sci-bots/pygtkhelpers/blob/3a6e6d6340221c686229cd1c951d7537dae81b07/pygtkhelpers/ui/views/shapes_canvas_view.py#L236-L252
def parse_args(args=None): """Parses arguments, returns (options, args).""" import sys from argparse import ArgumentParser from path_helpers import path if args is None: args = sys.argv parser = ArgumentParser(description='Example app for drawing shapes from ' 'dataframe, scaled to fit to GTK canvas while ' 'preserving aspect ratio (a.k.a., aspect fit).') parser.add_argument('svg_filepath', type=path, default=None) parser.add_argument('-p', '--padding-fraction', type=float, default=0) args = parser.parse_args() return args
[ "def", "parse_args", "(", "args", "=", "None", ")", ":", "import", "sys", "from", "argparse", "import", "ArgumentParser", "from", "path_helpers", "import", "path", "if", "args", "is", "None", ":", "args", "=", "sys", ".", "argv", "parser", "=", "ArgumentParser", "(", "description", "=", "'Example app for drawing shapes from '", "'dataframe, scaled to fit to GTK canvas while '", "'preserving aspect ratio (a.k.a., aspect fit).'", ")", "parser", ".", "add_argument", "(", "'svg_filepath'", ",", "type", "=", "path", ",", "default", "=", "None", ")", "parser", ".", "add_argument", "(", "'-p'", ",", "'--padding-fraction'", ",", "type", "=", "float", ",", "default", "=", "0", ")", "args", "=", "parser", ".", "parse_args", "(", ")", "return", "args" ]
Parses arguments, returns (options, args).
[ "Parses", "arguments", "returns", "(", "options", "args", ")", "." ]
python
train
pydata/xarray
xarray/core/common.py
https://github.com/pydata/xarray/blob/6d93a95d05bdbfc33fff24064f67d29dd891ab58/xarray/core/common.py#L361-L382
def assign_attrs(self, *args, **kwargs): """Assign new attrs to this object. Returns a new object equivalent to self.attrs.update(*args, **kwargs). Parameters ---------- args : positional arguments passed into ``attrs.update``. kwargs : keyword arguments passed into ``attrs.update``. Returns ------- assigned : same type as caller A new object with the new attrs in addition to the existing data. See also -------- Dataset.assign """ out = self.copy(deep=False) out.attrs.update(*args, **kwargs) return out
[ "def", "assign_attrs", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "out", "=", "self", ".", "copy", "(", "deep", "=", "False", ")", "out", ".", "attrs", ".", "update", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "out" ]
Assign new attrs to this object. Returns a new object equivalent to self.attrs.update(*args, **kwargs). Parameters ---------- args : positional arguments passed into ``attrs.update``. kwargs : keyword arguments passed into ``attrs.update``. Returns ------- assigned : same type as caller A new object with the new attrs in addition to the existing data. See also -------- Dataset.assign
[ "Assign", "new", "attrs", "to", "this", "object", "." ]
python
train
spyder-ide/spyder
spyder/app/mainwindow.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/app/mainwindow.py#L2774-L2777
def add_path_to_sys_path(self): """Add Spyder path to sys.path""" for path in reversed(self.get_spyder_pythonpath()): sys.path.insert(1, path)
[ "def", "add_path_to_sys_path", "(", "self", ")", ":", "for", "path", "in", "reversed", "(", "self", ".", "get_spyder_pythonpath", "(", ")", ")", ":", "sys", ".", "path", ".", "insert", "(", "1", ",", "path", ")" ]
Add Spyder path to sys.path
[ "Add", "Spyder", "path", "to", "sys", ".", "path" ]
python
train
DreamLab/VmShepherd
src/vmshepherd/iaas/abstract.py
https://github.com/DreamLab/VmShepherd/blob/709a412c372b897d53808039c5c64a8b69c12c8d/src/vmshepherd/iaas/abstract.py#L26-L46
async def create_vm(self, preset_name: str, image: str, flavor: str, security_groups: List=None, userdata: Dict=None, key_name: str=None, availability_zone: str=None, subnets: List=None) -> Any: """ Create (boot) a new server. :arg string preset_name: Name of vm group where vm is created. :arg string image: Image name. :arg string flavor: Flavor (or instance_type in AWS) name. :arg list security_groups: A list of security group names. :arg dict userdata: A dict of arbitrary key/value metadata to store in grains. :arg string key_name: (optional extension) name of previously created keypair to inject into the instance. :arg string availability_zone: Name of the availability zone for instance placement. :arg string subnets: List of the subnets for instance placement. Returns Any vm_id. """ raise NotImplementedError
[ "async", "def", "create_vm", "(", "self", ",", "preset_name", ":", "str", ",", "image", ":", "str", ",", "flavor", ":", "str", ",", "security_groups", ":", "List", "=", "None", ",", "userdata", ":", "Dict", "=", "None", ",", "key_name", ":", "str", "=", "None", ",", "availability_zone", ":", "str", "=", "None", ",", "subnets", ":", "List", "=", "None", ")", "->", "Any", ":", "raise", "NotImplementedError" ]
Create (boot) a new server. :arg string preset_name: Name of vm group where vm is created. :arg string image: Image name. :arg string flavor: Flavor (or instance_type in AWS) name. :arg list security_groups: A list of security group names. :arg dict userdata: A dict of arbitrary key/value metadata to store in grains. :arg string key_name: (optional extension) name of previously created keypair to inject into the instance. :arg string availability_zone: Name of the availability zone for instance placement. :arg string subnets: List of the subnets for instance placement. Returns Any vm_id.
[ "Create", "(", "boot", ")", "a", "new", "server", "." ]
python
train
Fantomas42/django-blog-zinnia
zinnia/templatetags/zinnia.py
https://github.com/Fantomas42/django-blog-zinnia/blob/b4949304b104a8e1a7a7a0773cbfd024313c3a15/zinnia/templatetags/zinnia.py#L424-L431
def comment_admin_urlname(action): """ Return the admin URLs for the comment app used. """ comment = get_comment_model() return 'admin:%s_%s_%s' % ( comment._meta.app_label, comment._meta.model_name, action)
[ "def", "comment_admin_urlname", "(", "action", ")", ":", "comment", "=", "get_comment_model", "(", ")", "return", "'admin:%s_%s_%s'", "%", "(", "comment", ".", "_meta", ".", "app_label", ",", "comment", ".", "_meta", ".", "model_name", ",", "action", ")" ]
Return the admin URLs for the comment app used.
[ "Return", "the", "admin", "URLs", "for", "the", "comment", "app", "used", "." ]
python
train
michaelpb/omnic
omnic/utils/filesystem.py
https://github.com/michaelpb/omnic/blob/1111cfd73c9dc1955afe42d9cf2a468c46f83cd6/omnic/utils/filesystem.py#L38-L47
def recursive_hardlink_dirs(source_d, destination_d): ''' Same as above, except creating hardlinks for all files ''' func = os.link if os.name == 'nt': func = shutil.copy if os.path.exists(destination_d): os.rmdir(destination_d) shutil.copytree(source_d, destination_d, copy_function=func)
[ "def", "recursive_hardlink_dirs", "(", "source_d", ",", "destination_d", ")", ":", "func", "=", "os", ".", "link", "if", "os", ".", "name", "==", "'nt'", ":", "func", "=", "shutil", ".", "copy", "if", "os", ".", "path", ".", "exists", "(", "destination_d", ")", ":", "os", ".", "rmdir", "(", "destination_d", ")", "shutil", ".", "copytree", "(", "source_d", ",", "destination_d", ",", "copy_function", "=", "func", ")" ]
Same as above, except creating hardlinks for all files
[ "Same", "as", "above", "except", "creating", "hardlinks", "for", "all", "files" ]
python
train
ludeeus/pycfdns
pycfdns/__init__.py
https://github.com/ludeeus/pycfdns/blob/0fd027be49d67250f85f2398d006a9409a7dae28/pycfdns/__init__.py#L26-L31
def get_zoneID(self, headers, zone): """Get the zone id for the zone.""" zoneIDurl = self.BASE_URL + '?name=' + zone zoneIDrequest = requests.get(zoneIDurl, headers=headers) zoneID = zoneIDrequest.json()['result'][0]['id'] return zoneID
[ "def", "get_zoneID", "(", "self", ",", "headers", ",", "zone", ")", ":", "zoneIDurl", "=", "self", ".", "BASE_URL", "+", "'?name='", "+", "zone", "zoneIDrequest", "=", "requests", ".", "get", "(", "zoneIDurl", ",", "headers", "=", "headers", ")", "zoneID", "=", "zoneIDrequest", ".", "json", "(", ")", "[", "'result'", "]", "[", "0", "]", "[", "'id'", "]", "return", "zoneID" ]
Get the zone id for the zone.
[ "Get", "the", "zone", "id", "for", "the", "zone", "." ]
python
train
CalebBell/thermo
thermo/safety.py
https://github.com/CalebBell/thermo/blob/3857ed023a3e64fd3039a32d53576c24990ef1c3/thermo/safety.py#L968-L1019
def UFL_mixture(ys=None, UFLs=None, CASRNs=None, AvailableMethods=False, Method=None): # pragma: no cover '''Inert gases are ignored. This API is considered experimental, and is expected to be removed in a future release in favor of a more complete object-oriented interface. >>> UFL_mixture(ys=normalize([0.0024, 0.0061, 0.0015]), UFLs=[.075, .15, .32]) 0.12927551844869378 >>> LFL_mixture(LFLs=[None, None, None, None, None, None, None, None, None, None, None, None, None, None, 0.143, 0.36, 0.63, 0.1097, 0.072], ys=[0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.10, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05], CASRNs=['7440-37-1', '124-38-9', '7440-59-7', '7440-01-9', '7727-37-9', '7440-63-3', '10102-43-9', '7782-44-7', '132259-10-0', '7439-90-9', '10043-92-2', '7732-18-5', '7782-50-5', '7782-41-4', '67-64-1', '67-56-1', '75-52-5', '590-19-2', '277-10-1']) 0.14550641757359664 ''' def list_methods(): methods = [] if CASRNs: CASRNs2 = list(CASRNs) UFLs2 = list(UFLs) for i in inerts: if i in CASRNs2: ind = CASRNs.index(i) CASRNs2.remove(i) UFLs2.remove(UFLs[ind]) if none_and_length_check([UFLs2]): methods.append('Summed Inverse, inerts removed') if none_and_length_check([UFLs, ys]): methods.append('Summed Inverse') methods.append('None') return methods if AvailableMethods: return list_methods() if not Method: Method = list_methods()[0] # This is the calculate, given the method section # if not none_and_length_check([UFLs, ys]): # check same-length inputs # raise Exception('Function inputs are incorrect format') if Method == 'Summed Inverse': return fire_mixing(ys, UFLs) elif Method == 'Summed Inverse, inerts removed': CASRNs2 = list(CASRNs) UFLs2 = list(UFLs) ys2 = list(ys) for i in inerts: if i in CASRNs2: ind = CASRNs2.index(i) CASRNs2.remove(i) UFLs2.pop(ind) ys2.pop(ind) return fire_mixing(normalize(ys2), UFLs2) elif Method == 'None': return None else: raise Exception('Failure in in function')
[ "def", "UFL_mixture", "(", "ys", "=", "None", ",", "UFLs", "=", "None", ",", "CASRNs", "=", "None", ",", "AvailableMethods", "=", "False", ",", "Method", "=", "None", ")", ":", "# pragma: no cover", "def", "list_methods", "(", ")", ":", "methods", "=", "[", "]", "if", "CASRNs", ":", "CASRNs2", "=", "list", "(", "CASRNs", ")", "UFLs2", "=", "list", "(", "UFLs", ")", "for", "i", "in", "inerts", ":", "if", "i", "in", "CASRNs2", ":", "ind", "=", "CASRNs", ".", "index", "(", "i", ")", "CASRNs2", ".", "remove", "(", "i", ")", "UFLs2", ".", "remove", "(", "UFLs", "[", "ind", "]", ")", "if", "none_and_length_check", "(", "[", "UFLs2", "]", ")", ":", "methods", ".", "append", "(", "'Summed Inverse, inerts removed'", ")", "if", "none_and_length_check", "(", "[", "UFLs", ",", "ys", "]", ")", ":", "methods", ".", "append", "(", "'Summed Inverse'", ")", "methods", ".", "append", "(", "'None'", ")", "return", "methods", "if", "AvailableMethods", ":", "return", "list_methods", "(", ")", "if", "not", "Method", ":", "Method", "=", "list_methods", "(", ")", "[", "0", "]", "# This is the calculate, given the method section", "# if not none_and_length_check([UFLs, ys]): # check same-length inputs", "# raise Exception('Function inputs are incorrect format')", "if", "Method", "==", "'Summed Inverse'", ":", "return", "fire_mixing", "(", "ys", ",", "UFLs", ")", "elif", "Method", "==", "'Summed Inverse, inerts removed'", ":", "CASRNs2", "=", "list", "(", "CASRNs", ")", "UFLs2", "=", "list", "(", "UFLs", ")", "ys2", "=", "list", "(", "ys", ")", "for", "i", "in", "inerts", ":", "if", "i", "in", "CASRNs2", ":", "ind", "=", "CASRNs2", ".", "index", "(", "i", ")", "CASRNs2", ".", "remove", "(", "i", ")", "UFLs2", ".", "pop", "(", "ind", ")", "ys2", ".", "pop", "(", "ind", ")", "return", "fire_mixing", "(", "normalize", "(", "ys2", ")", ",", "UFLs2", ")", "elif", "Method", "==", "'None'", ":", "return", "None", "else", ":", "raise", "Exception", "(", "'Failure in in function'", ")" ]
Inert gases are ignored. This API is considered experimental, and is expected to be removed in a future release in favor of a more complete object-oriented interface. >>> UFL_mixture(ys=normalize([0.0024, 0.0061, 0.0015]), UFLs=[.075, .15, .32]) 0.12927551844869378 >>> LFL_mixture(LFLs=[None, None, None, None, None, None, None, None, None, None, None, None, None, None, 0.143, 0.36, 0.63, 0.1097, 0.072], ys=[0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.10, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05], CASRNs=['7440-37-1', '124-38-9', '7440-59-7', '7440-01-9', '7727-37-9', '7440-63-3', '10102-43-9', '7782-44-7', '132259-10-0', '7439-90-9', '10043-92-2', '7732-18-5', '7782-50-5', '7782-41-4', '67-64-1', '67-56-1', '75-52-5', '590-19-2', '277-10-1']) 0.14550641757359664
[ "Inert", "gases", "are", "ignored", "." ]
python
valid
flashingpumpkin/django-socialregistration
socialregistration/clients/oauth.py
https://github.com/flashingpumpkin/django-socialregistration/blob/9da9fb83c9bf79997ff81fe1378ab5ca3074b32b/socialregistration/clients/oauth.py#L241-L254
def get_redirect_url(self, state='', **kwargs): """ Assemble the URL to where we'll be redirecting the user to to request permissions. """ params = { 'response_type': 'code', 'client_id': self.client_id, 'redirect_uri': self.get_callback_url(**kwargs), 'scope': self.scope or '', 'state': state, } return '%s?%s' % (self.auth_url, urllib.urlencode(params))
[ "def", "get_redirect_url", "(", "self", ",", "state", "=", "''", ",", "*", "*", "kwargs", ")", ":", "params", "=", "{", "'response_type'", ":", "'code'", ",", "'client_id'", ":", "self", ".", "client_id", ",", "'redirect_uri'", ":", "self", ".", "get_callback_url", "(", "*", "*", "kwargs", ")", ",", "'scope'", ":", "self", ".", "scope", "or", "''", ",", "'state'", ":", "state", ",", "}", "return", "'%s?%s'", "%", "(", "self", ".", "auth_url", ",", "urllib", ".", "urlencode", "(", "params", ")", ")" ]
Assemble the URL to where we'll be redirecting the user to to request permissions.
[ "Assemble", "the", "URL", "to", "where", "we", "ll", "be", "redirecting", "the", "user", "to", "to", "request", "permissions", "." ]
python
train
tensorflow/tensor2tensor
tensor2tensor/data_generators/allen_brain.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/allen_brain.py#L192-L260
def _generator(tmp_dir, training, size=_BASE_EXAMPLE_IMAGE_SIZE, training_fraction=0.95): """Base problem example generator for Allen Brain Atlas problems. Args: tmp_dir: str, a directory where raw example input data has been stored. training: bool, whether the mode of operation is training (or, alternatively, evaluation), determining whether examples in tmp_dir prefixed with train or dev will be used. size: int, the image size to add to the example annotation. training_fraction: float, the fraction of the sub-image path list to consider as the basis for training examples. Yields: A dictionary representing the images with the following fields: * image/encoded: The string encoding the image as JPEG. * image/format: The string "jpeg" indicating the image format. * image/height: The integer indicating the image height. * image/width: The integer indicating the image height. """ maybe_download_image_dataset(_IMAGE_IDS, tmp_dir) image_files = _get_case_file_paths(tmp_dir=tmp_dir, case=training, training_fraction=training_fraction) image_obj = PIL_Image() tf.logging.info("Loaded case file paths (n=%s)" % len(image_files)) height = size width = size for input_path in image_files: img = image_obj.open(input_path) img = np.float32(img) shape = np.shape(img) for h_index in range(0, int(math.floor(shape[0]/size))): h_offset = h_index * size h_end = h_offset + size - 1 for v_index in range(0, int(math.floor(shape[1]/size))): v_offset = v_index * size v_end = v_offset + size - 1 # Extract a sub-image tile. subimage = np.uint8(img[h_offset:h_end, v_offset:v_end]) # pylint: disable=invalid-sequence-index # Filter images that are likely background (not tissue). if np.amax(subimage) < 230: continue subimage = image_obj.fromarray(subimage) buff = BytesIO() subimage.save(buff, format="JPEG") subimage_encoded = buff.getvalue() yield { "image/encoded": [subimage_encoded], "image/format": ["jpeg"], "image/height": [height], "image/width": [width] }
[ "def", "_generator", "(", "tmp_dir", ",", "training", ",", "size", "=", "_BASE_EXAMPLE_IMAGE_SIZE", ",", "training_fraction", "=", "0.95", ")", ":", "maybe_download_image_dataset", "(", "_IMAGE_IDS", ",", "tmp_dir", ")", "image_files", "=", "_get_case_file_paths", "(", "tmp_dir", "=", "tmp_dir", ",", "case", "=", "training", ",", "training_fraction", "=", "training_fraction", ")", "image_obj", "=", "PIL_Image", "(", ")", "tf", ".", "logging", ".", "info", "(", "\"Loaded case file paths (n=%s)\"", "%", "len", "(", "image_files", ")", ")", "height", "=", "size", "width", "=", "size", "for", "input_path", "in", "image_files", ":", "img", "=", "image_obj", ".", "open", "(", "input_path", ")", "img", "=", "np", ".", "float32", "(", "img", ")", "shape", "=", "np", ".", "shape", "(", "img", ")", "for", "h_index", "in", "range", "(", "0", ",", "int", "(", "math", ".", "floor", "(", "shape", "[", "0", "]", "/", "size", ")", ")", ")", ":", "h_offset", "=", "h_index", "*", "size", "h_end", "=", "h_offset", "+", "size", "-", "1", "for", "v_index", "in", "range", "(", "0", ",", "int", "(", "math", ".", "floor", "(", "shape", "[", "1", "]", "/", "size", ")", ")", ")", ":", "v_offset", "=", "v_index", "*", "size", "v_end", "=", "v_offset", "+", "size", "-", "1", "# Extract a sub-image tile.", "subimage", "=", "np", ".", "uint8", "(", "img", "[", "h_offset", ":", "h_end", ",", "v_offset", ":", "v_end", "]", ")", "# pylint: disable=invalid-sequence-index", "# Filter images that are likely background (not tissue).", "if", "np", ".", "amax", "(", "subimage", ")", "<", "230", ":", "continue", "subimage", "=", "image_obj", ".", "fromarray", "(", "subimage", ")", "buff", "=", "BytesIO", "(", ")", "subimage", ".", "save", "(", "buff", ",", "format", "=", "\"JPEG\"", ")", "subimage_encoded", "=", "buff", ".", "getvalue", "(", ")", "yield", "{", "\"image/encoded\"", ":", "[", "subimage_encoded", "]", ",", "\"image/format\"", ":", "[", "\"jpeg\"", "]", ",", "\"image/height\"", ":", "[", "height", "]", ",", "\"image/width\"", ":", "[", "width", "]", "}" ]
Base problem example generator for Allen Brain Atlas problems. Args: tmp_dir: str, a directory where raw example input data has been stored. training: bool, whether the mode of operation is training (or, alternatively, evaluation), determining whether examples in tmp_dir prefixed with train or dev will be used. size: int, the image size to add to the example annotation. training_fraction: float, the fraction of the sub-image path list to consider as the basis for training examples. Yields: A dictionary representing the images with the following fields: * image/encoded: The string encoding the image as JPEG. * image/format: The string "jpeg" indicating the image format. * image/height: The integer indicating the image height. * image/width: The integer indicating the image height.
[ "Base", "problem", "example", "generator", "for", "Allen", "Brain", "Atlas", "problems", "." ]
python
train
coumbole/mailscanner
mailscanner/reader.py
https://github.com/coumbole/mailscanner/blob/ead19ac8c7dee27e507c1593032863232c13f636/mailscanner/reader.py#L35-L57
def get_body(self, msg): """ Extracts and returns the decoded body from an EmailMessage object""" body = "" charset = "" if msg.is_multipart(): for part in msg.walk(): ctype = part.get_content_type() cdispo = str(part.get('Content-Disposition')) # skip any text/plain (txt) attachments if ctype == 'text/plain' and 'attachment' not in cdispo: body = part.get_payload(decode=True) # decode charset = part.get_content_charset() break # not multipart - i.e. plain text, no attachments, keeping fingers crossed else: body = msg.get_payload(decode=True) charset = msg.get_content_charset() return body.decode(charset)
[ "def", "get_body", "(", "self", ",", "msg", ")", ":", "body", "=", "\"\"", "charset", "=", "\"\"", "if", "msg", ".", "is_multipart", "(", ")", ":", "for", "part", "in", "msg", ".", "walk", "(", ")", ":", "ctype", "=", "part", ".", "get_content_type", "(", ")", "cdispo", "=", "str", "(", "part", ".", "get", "(", "'Content-Disposition'", ")", ")", "# skip any text/plain (txt) attachments", "if", "ctype", "==", "'text/plain'", "and", "'attachment'", "not", "in", "cdispo", ":", "body", "=", "part", ".", "get_payload", "(", "decode", "=", "True", ")", "# decode", "charset", "=", "part", ".", "get_content_charset", "(", ")", "break", "# not multipart - i.e. plain text, no attachments, keeping fingers crossed", "else", ":", "body", "=", "msg", ".", "get_payload", "(", "decode", "=", "True", ")", "charset", "=", "msg", ".", "get_content_charset", "(", ")", "return", "body", ".", "decode", "(", "charset", ")" ]
Extracts and returns the decoded body from an EmailMessage object
[ "Extracts", "and", "returns", "the", "decoded", "body", "from", "an", "EmailMessage", "object" ]
python
train
gem/oq-engine
openquake/hazardlib/gsim/boore_atkinson_2011.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/gsim/boore_atkinson_2011.py#L67-L81
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types): """ See :meth:`superclass method <.base.GroundShakingIntensityModel.get_mean_and_stddevs>` for spec of input and result values. """ # get mean and std using the superclass mean, stddevs = super().get_mean_and_stddevs( sites, rup, dists, imt, stddev_types) A08 = self.A08_COEFFS[imt] f_ena = 10.0 ** (A08["c"] + A08["d"] * dists.rjb) return np.log(np.exp(mean)*f_ena), stddevs
[ "def", "get_mean_and_stddevs", "(", "self", ",", "sites", ",", "rup", ",", "dists", ",", "imt", ",", "stddev_types", ")", ":", "# get mean and std using the superclass", "mean", ",", "stddevs", "=", "super", "(", ")", ".", "get_mean_and_stddevs", "(", "sites", ",", "rup", ",", "dists", ",", "imt", ",", "stddev_types", ")", "A08", "=", "self", ".", "A08_COEFFS", "[", "imt", "]", "f_ena", "=", "10.0", "**", "(", "A08", "[", "\"c\"", "]", "+", "A08", "[", "\"d\"", "]", "*", "dists", ".", "rjb", ")", "return", "np", ".", "log", "(", "np", ".", "exp", "(", "mean", ")", "*", "f_ena", ")", ",", "stddevs" ]
See :meth:`superclass method <.base.GroundShakingIntensityModel.get_mean_and_stddevs>` for spec of input and result values.
[ "See", ":", "meth", ":", "superclass", "method", "<", ".", "base", ".", "GroundShakingIntensityModel", ".", "get_mean_and_stddevs", ">", "for", "spec", "of", "input", "and", "result", "values", "." ]
python
train
seomoz/shovel
shovel/tasks.py
https://github.com/seomoz/shovel/blob/fc29232b2b8be33972f8fb498a91a67e334f057f/shovel/tasks.py#L165-L170
def make(cls, obj): '''Given a callable object, return a new callable object''' try: cls._cache.append(Task(obj)) except Exception: logger.exception('Unable to make task for %s' % repr(obj))
[ "def", "make", "(", "cls", ",", "obj", ")", ":", "try", ":", "cls", ".", "_cache", ".", "append", "(", "Task", "(", "obj", ")", ")", "except", "Exception", ":", "logger", ".", "exception", "(", "'Unable to make task for %s'", "%", "repr", "(", "obj", ")", ")" ]
Given a callable object, return a new callable object
[ "Given", "a", "callable", "object", "return", "a", "new", "callable", "object" ]
python
train
Qiskit/qiskit-terra
qiskit/providers/basicaer/unitary_simulator.py
https://github.com/Qiskit/qiskit-terra/blob/d4f58d903bc96341b816f7c35df936d6421267d1/qiskit/providers/basicaer/unitary_simulator.py#L144-L155
def _validate_initial_unitary(self): """Validate an initial unitary matrix""" # If initial unitary isn't set we don't need to validate if self._initial_unitary is None: return # Check unitary is correct length for number of qubits shape = np.shape(self._initial_unitary) required_shape = (2 ** self._number_of_qubits, 2 ** self._number_of_qubits) if shape != required_shape: raise BasicAerError('initial unitary is incorrect shape: ' + '{} != 2 ** {}'.format(shape, required_shape))
[ "def", "_validate_initial_unitary", "(", "self", ")", ":", "# If initial unitary isn't set we don't need to validate", "if", "self", ".", "_initial_unitary", "is", "None", ":", "return", "# Check unitary is correct length for number of qubits", "shape", "=", "np", ".", "shape", "(", "self", ".", "_initial_unitary", ")", "required_shape", "=", "(", "2", "**", "self", ".", "_number_of_qubits", ",", "2", "**", "self", ".", "_number_of_qubits", ")", "if", "shape", "!=", "required_shape", ":", "raise", "BasicAerError", "(", "'initial unitary is incorrect shape: '", "+", "'{} != 2 ** {}'", ".", "format", "(", "shape", ",", "required_shape", ")", ")" ]
Validate an initial unitary matrix
[ "Validate", "an", "initial", "unitary", "matrix" ]
python
test
inveniosoftware/invenio-files-rest
invenio_files_rest/models.py
https://github.com/inveniosoftware/invenio-files-rest/blob/59a950da61cc8d5882a03c6fde6db2e2ed10befd/invenio_files_rest/models.py#L832-L844
def copy_contents(self, fileinstance, progress_callback=None, chunk_size=None, **kwargs): """Copy this file instance into another file instance.""" if not fileinstance.readable: raise ValueError('Source file instance is not readable.') if not self.size == 0: raise ValueError('File instance has data.') self.set_uri( *self.storage(**kwargs).copy( fileinstance.storage(**kwargs), chunk_size=chunk_size, progress_callback=progress_callback))
[ "def", "copy_contents", "(", "self", ",", "fileinstance", ",", "progress_callback", "=", "None", ",", "chunk_size", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "not", "fileinstance", ".", "readable", ":", "raise", "ValueError", "(", "'Source file instance is not readable.'", ")", "if", "not", "self", ".", "size", "==", "0", ":", "raise", "ValueError", "(", "'File instance has data.'", ")", "self", ".", "set_uri", "(", "*", "self", ".", "storage", "(", "*", "*", "kwargs", ")", ".", "copy", "(", "fileinstance", ".", "storage", "(", "*", "*", "kwargs", ")", ",", "chunk_size", "=", "chunk_size", ",", "progress_callback", "=", "progress_callback", ")", ")" ]
Copy this file instance into another file instance.
[ "Copy", "this", "file", "instance", "into", "another", "file", "instance", "." ]
python
train
numenta/htmresearch
htmresearch/frameworks/thalamus/thalamus.py
https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/frameworks/thalamus/thalamus.py#L282-L292
def relayIndextoCoord(self, i): """ Map 1D cell index to a 2D coordinate :param i: integer 1D cell index :return: (x, y), a 2D coordinate """ x = i % self.relayWidth y = i / self.relayWidth return x, y
[ "def", "relayIndextoCoord", "(", "self", ",", "i", ")", ":", "x", "=", "i", "%", "self", ".", "relayWidth", "y", "=", "i", "/", "self", ".", "relayWidth", "return", "x", ",", "y" ]
Map 1D cell index to a 2D coordinate :param i: integer 1D cell index :return: (x, y), a 2D coordinate
[ "Map", "1D", "cell", "index", "to", "a", "2D", "coordinate" ]
python
train
google/grumpy
third_party/stdlib/base64.py
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/stdlib/base64.py#L293-L305
def encode(input, output): """Encode a file.""" while True: s = input.read(MAXBINSIZE) if not s: break while len(s) < MAXBINSIZE: ns = input.read(MAXBINSIZE-len(s)) if not ns: break s += ns line = binascii.b2a_base64(s) output.write(line)
[ "def", "encode", "(", "input", ",", "output", ")", ":", "while", "True", ":", "s", "=", "input", ".", "read", "(", "MAXBINSIZE", ")", "if", "not", "s", ":", "break", "while", "len", "(", "s", ")", "<", "MAXBINSIZE", ":", "ns", "=", "input", ".", "read", "(", "MAXBINSIZE", "-", "len", "(", "s", ")", ")", "if", "not", "ns", ":", "break", "s", "+=", "ns", "line", "=", "binascii", ".", "b2a_base64", "(", "s", ")", "output", ".", "write", "(", "line", ")" ]
Encode a file.
[ "Encode", "a", "file", "." ]
python
valid
bgyori/pykqml
kqml/kqml_list.py
https://github.com/bgyori/pykqml/blob/c18b39868626215deb634567c6bd7c0838e443c0/kqml/kqml_list.py#L74-L97
def gets(self, keyword): """Return the element of the list after the given keyword as string. Parameters ---------- keyword : str The keyword parameter to find in the list. Putting a colon before the keyword is optional, if no colon is given, it is added automatically (e.g. "keyword" will be found as ":keyword" in the list). Returns ------- obj_str : str The string value corresponding to the keyword parameter Example: kl = KQMLList.from_string('(FAILURE :reason INVALID_PARAMETER)') kl.gets('reason') # 'INVALID_PARAMETER' """ param = self.get(keyword) if param is not None: return safe_decode(param.string_value()) return None
[ "def", "gets", "(", "self", ",", "keyword", ")", ":", "param", "=", "self", ".", "get", "(", "keyword", ")", "if", "param", "is", "not", "None", ":", "return", "safe_decode", "(", "param", ".", "string_value", "(", ")", ")", "return", "None" ]
Return the element of the list after the given keyword as string. Parameters ---------- keyword : str The keyword parameter to find in the list. Putting a colon before the keyword is optional, if no colon is given, it is added automatically (e.g. "keyword" will be found as ":keyword" in the list). Returns ------- obj_str : str The string value corresponding to the keyword parameter Example: kl = KQMLList.from_string('(FAILURE :reason INVALID_PARAMETER)') kl.gets('reason') # 'INVALID_PARAMETER'
[ "Return", "the", "element", "of", "the", "list", "after", "the", "given", "keyword", "as", "string", "." ]
python
train
dnanexus/dx-toolkit
src/python/dxpy/bindings/dxproject.py
https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/bindings/dxproject.py#L401-L417
def decrease_perms(self, member, level, **kwargs): """ :param member: Username (of the form "user-USERNAME") of the project member whose permissions will be decreased. :type member: string :param level: Permissions level that the member will have after this operation (None, "VIEW", "UPLOAD", or "CONTRIBUTE") :type level: string or None Decreases the permissions that the specified user has in the project. """ input_hash = {} input_hash[member] = level return dxpy.api.project_decrease_permissions(self._dxid, input_hash, **kwargs)
[ "def", "decrease_perms", "(", "self", ",", "member", ",", "level", ",", "*", "*", "kwargs", ")", ":", "input_hash", "=", "{", "}", "input_hash", "[", "member", "]", "=", "level", "return", "dxpy", ".", "api", ".", "project_decrease_permissions", "(", "self", ".", "_dxid", ",", "input_hash", ",", "*", "*", "kwargs", ")" ]
:param member: Username (of the form "user-USERNAME") of the project member whose permissions will be decreased. :type member: string :param level: Permissions level that the member will have after this operation (None, "VIEW", "UPLOAD", or "CONTRIBUTE") :type level: string or None Decreases the permissions that the specified user has in the project.
[ ":", "param", "member", ":", "Username", "(", "of", "the", "form", "user", "-", "USERNAME", ")", "of", "the", "project", "member", "whose", "permissions", "will", "be", "decreased", ".", ":", "type", "member", ":", "string", ":", "param", "level", ":", "Permissions", "level", "that", "the", "member", "will", "have", "after", "this", "operation", "(", "None", "VIEW", "UPLOAD", "or", "CONTRIBUTE", ")", ":", "type", "level", ":", "string", "or", "None" ]
python
train
happyleavesaoc/python-snapcast
snapcast/client/__init__.py
https://github.com/happyleavesaoc/python-snapcast/blob/9b3c483358677327c7fd6d0666bf474c19d87f19/snapcast/client/__init__.py#L71-L77
def _read_socket(self): """ Process incoming messages from socket. """ while True: base_bytes = self._socket.recv(BASE_SIZE) base = basemessage.parse(base_bytes) payload_bytes = self._socket.recv(base.payload_length) self._handle_message(packet.parse(base_bytes + payload_bytes))
[ "def", "_read_socket", "(", "self", ")", ":", "while", "True", ":", "base_bytes", "=", "self", ".", "_socket", ".", "recv", "(", "BASE_SIZE", ")", "base", "=", "basemessage", ".", "parse", "(", "base_bytes", ")", "payload_bytes", "=", "self", ".", "_socket", ".", "recv", "(", "base", ".", "payload_length", ")", "self", ".", "_handle_message", "(", "packet", ".", "parse", "(", "base_bytes", "+", "payload_bytes", ")", ")" ]
Process incoming messages from socket.
[ "Process", "incoming", "messages", "from", "socket", "." ]
python
train
alfred82santa/dirty-models
dirty_models/model_types.py
https://github.com/alfred82santa/dirty-models/blob/354becdb751b21f673515eae928c256c7e923c50/dirty_models/model_types.py#L300-L314
def export_original_data(self): """ Retrieves the original_data """ def export_field(value): """ Export item """ try: return value.export_original_data() except AttributeError: return value return [export_field(val) for val in self.__original_data__]
[ "def", "export_original_data", "(", "self", ")", ":", "def", "export_field", "(", "value", ")", ":", "\"\"\"\n Export item\n \"\"\"", "try", ":", "return", "value", ".", "export_original_data", "(", ")", "except", "AttributeError", ":", "return", "value", "return", "[", "export_field", "(", "val", ")", "for", "val", "in", "self", ".", "__original_data__", "]" ]
Retrieves the original_data
[ "Retrieves", "the", "original_data" ]
python
train
gwastro/pycbc
pycbc/workflow/jobsetup.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/workflow/jobsetup.py#L955-L996
def create_nodata_node(self, valid_seg, tags=None): """ A simplified version of create_node that creates a node that does not need to read in data. Parameters ----------- valid_seg : glue.segment The segment over which to declare the node valid. Usually this would be the duration of the analysis. Returns -------- node : pycbc.workflow.core.Node The instance corresponding to the created node. """ if tags is None: tags = [] node = Node(self) # Set the output file # Add the PSD file if needed if self.write_psd: node.new_output_file_opt(valid_seg, '.txt', '--psd-output', tags=tags+['PSD_FILE'], store_file=self.retain_files) node.new_output_file_opt(valid_seg, '.xml.gz', '--output-file', store_file=self.retain_files) if self.psd_files is not None: should_add = False # If any of the ifos for this job are in the set # of ifos for which a static psd was provided. for ifo in self.ifo_list: for psd_file in self.psd_files: if ifo in psd_file.ifo_list: should_add = True if should_add: node.add_input_opt('--psd-file', psd_file) return node
[ "def", "create_nodata_node", "(", "self", ",", "valid_seg", ",", "tags", "=", "None", ")", ":", "if", "tags", "is", "None", ":", "tags", "=", "[", "]", "node", "=", "Node", "(", "self", ")", "# Set the output file", "# Add the PSD file if needed", "if", "self", ".", "write_psd", ":", "node", ".", "new_output_file_opt", "(", "valid_seg", ",", "'.txt'", ",", "'--psd-output'", ",", "tags", "=", "tags", "+", "[", "'PSD_FILE'", "]", ",", "store_file", "=", "self", ".", "retain_files", ")", "node", ".", "new_output_file_opt", "(", "valid_seg", ",", "'.xml.gz'", ",", "'--output-file'", ",", "store_file", "=", "self", ".", "retain_files", ")", "if", "self", ".", "psd_files", "is", "not", "None", ":", "should_add", "=", "False", "# If any of the ifos for this job are in the set", "# of ifos for which a static psd was provided.", "for", "ifo", "in", "self", ".", "ifo_list", ":", "for", "psd_file", "in", "self", ".", "psd_files", ":", "if", "ifo", "in", "psd_file", ".", "ifo_list", ":", "should_add", "=", "True", "if", "should_add", ":", "node", ".", "add_input_opt", "(", "'--psd-file'", ",", "psd_file", ")", "return", "node" ]
A simplified version of create_node that creates a node that does not need to read in data. Parameters ----------- valid_seg : glue.segment The segment over which to declare the node valid. Usually this would be the duration of the analysis. Returns -------- node : pycbc.workflow.core.Node The instance corresponding to the created node.
[ "A", "simplified", "version", "of", "create_node", "that", "creates", "a", "node", "that", "does", "not", "need", "to", "read", "in", "data", "." ]
python
train
pytroll/satpy
satpy/demo/__init__.py
https://github.com/pytroll/satpy/blob/1f21d20ac686b745fb0da9b4030d139893e066dd/satpy/demo/__init__.py#L71-L98
def get_us_midlatitude_cyclone_abi(base_dir='.', method=None, force=False): """Get GOES-16 ABI (CONUS sector) data from 2019-03-14 00:00Z. Args: base_dir (str): Base directory for downloaded files. method (str): Force download method for the data if not already cached. Allowed options are: 'gcsfs'. Default of ``None`` will choose the best method based on environment settings. force (bool): Force re-download of data regardless of its existence on the local system. Warning: May delete non-demo files stored in download directory. Total size: ~110MB """ if method is None: method = 'gcsfs' if method not in ['gcsfs']: raise NotImplementedError("Demo data download method '{}' not " "implemented yet.".format(method)) from ._google_cloud_platform import get_bucket_files patterns = ['gs://gcp-public-data-goes-16/ABI-L1b-RadC/2019/073/00/*0002*.nc'] subdir = os.path.join(base_dir, 'abi_l1b', '20190314_us_midlatitude_cyclone') _makedirs(subdir, exist_ok=True) filenames = get_bucket_files(patterns, subdir, force=force) assert len(filenames) == 16, "Not all files could be downloaded" return filenames
[ "def", "get_us_midlatitude_cyclone_abi", "(", "base_dir", "=", "'.'", ",", "method", "=", "None", ",", "force", "=", "False", ")", ":", "if", "method", "is", "None", ":", "method", "=", "'gcsfs'", "if", "method", "not", "in", "[", "'gcsfs'", "]", ":", "raise", "NotImplementedError", "(", "\"Demo data download method '{}' not \"", "\"implemented yet.\"", ".", "format", "(", "method", ")", ")", "from", ".", "_google_cloud_platform", "import", "get_bucket_files", "patterns", "=", "[", "'gs://gcp-public-data-goes-16/ABI-L1b-RadC/2019/073/00/*0002*.nc'", "]", "subdir", "=", "os", ".", "path", ".", "join", "(", "base_dir", ",", "'abi_l1b'", ",", "'20190314_us_midlatitude_cyclone'", ")", "_makedirs", "(", "subdir", ",", "exist_ok", "=", "True", ")", "filenames", "=", "get_bucket_files", "(", "patterns", ",", "subdir", ",", "force", "=", "force", ")", "assert", "len", "(", "filenames", ")", "==", "16", ",", "\"Not all files could be downloaded\"", "return", "filenames" ]
Get GOES-16 ABI (CONUS sector) data from 2019-03-14 00:00Z. Args: base_dir (str): Base directory for downloaded files. method (str): Force download method for the data if not already cached. Allowed options are: 'gcsfs'. Default of ``None`` will choose the best method based on environment settings. force (bool): Force re-download of data regardless of its existence on the local system. Warning: May delete non-demo files stored in download directory. Total size: ~110MB
[ "Get", "GOES", "-", "16", "ABI", "(", "CONUS", "sector", ")", "data", "from", "2019", "-", "03", "-", "14", "00", ":", "00Z", "." ]
python
train
fabioz/PyDev.Debugger
pydevd_attach_to_process/winappdbg/textio.py
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/pydevd_attach_to_process/winappdbg/textio.py#L1009-L1015
def bk_default(cls): "Make the current background color the default." wAttributes = cls._get_text_attributes() wAttributes &= ~win32.BACKGROUND_MASK #wAttributes |= win32.BACKGROUND_BLACK wAttributes &= ~win32.BACKGROUND_INTENSITY cls._set_text_attributes(wAttributes)
[ "def", "bk_default", "(", "cls", ")", ":", "wAttributes", "=", "cls", ".", "_get_text_attributes", "(", ")", "wAttributes", "&=", "~", "win32", ".", "BACKGROUND_MASK", "#wAttributes |= win32.BACKGROUND_BLACK", "wAttributes", "&=", "~", "win32", ".", "BACKGROUND_INTENSITY", "cls", ".", "_set_text_attributes", "(", "wAttributes", ")" ]
Make the current background color the default.
[ "Make", "the", "current", "background", "color", "the", "default", "." ]
python
train
linkhub-sdk/popbill.py
popbill/statementService.py
https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/statementService.py#L523-L541
def getFiles(self, CorpNum, ItemCode, MgtKey): """ 첨부파일 목록 확인 args CorpNum : 팝빌회원 사업자번호 ItemCode : 명세서 종류 코드 [121 - 거래명세서], [122 - 청구서], [123 - 견적서], [124 - 발주서], [125 - 입금표], [126 - 영수증] MgtKey : 파트너 문서관리번호 return 첨부파일 목록 as List raise PopbillException """ if MgtKey == None or MgtKey == "": raise PopbillException(-99999999, "관리번호가 입력되지 않았습니다.") if ItemCode == None or ItemCode == "": raise PopbillException(-99999999, "명세서 종류 코드가 입력되지 않았습니다.") return self._httpget('/Statement/' + str(ItemCode) + '/' + MgtKey + '/Files', CorpNum)
[ "def", "getFiles", "(", "self", ",", "CorpNum", ",", "ItemCode", ",", "MgtKey", ")", ":", "if", "MgtKey", "==", "None", "or", "MgtKey", "==", "\"\"", ":", "raise", "PopbillException", "(", "-", "99999999", ",", "\"관리번호가 입력되지 않았습니다.\")\r", "", "if", "ItemCode", "==", "None", "or", "ItemCode", "==", "\"\"", ":", "raise", "PopbillException", "(", "-", "99999999", ",", "\"명세서 종류 코드가 입력되지 않았습니다.\")\r", "", "return", "self", ".", "_httpget", "(", "'/Statement/'", "+", "str", "(", "ItemCode", ")", "+", "'/'", "+", "MgtKey", "+", "'/Files'", ",", "CorpNum", ")" ]
첨부파일 목록 확인 args CorpNum : 팝빌회원 사업자번호 ItemCode : 명세서 종류 코드 [121 - 거래명세서], [122 - 청구서], [123 - 견적서], [124 - 발주서], [125 - 입금표], [126 - 영수증] MgtKey : 파트너 문서관리번호 return 첨부파일 목록 as List raise PopbillException
[ "첨부파일", "목록", "확인", "args", "CorpNum", ":", "팝빌회원", "사업자번호", "ItemCode", ":", "명세서", "종류", "코드", "[", "121", "-", "거래명세서", "]", "[", "122", "-", "청구서", "]", "[", "123", "-", "견적서", "]", "[", "124", "-", "발주서", "]", "[", "125", "-", "입금표", "]", "[", "126", "-", "영수증", "]", "MgtKey", ":", "파트너", "문서관리번호", "return", "첨부파일", "목록", "as", "List", "raise", "PopbillException" ]
python
train
dancsalo/TensorBase
tensorbase/base.py
https://github.com/dancsalo/TensorBase/blob/3d42a326452bd03427034916ff2fb90730020204/tensorbase/base.py#L218-L230
def load_config_yaml(self, flags, config_dict): """ Load config dict and yaml dict and then override both with flags dict. """ if config_dict is None: print('Config File not specified. Using only input flags.') return flags try: config_yaml_dict = self.cfg_from_file(flags['YAML_FILE'], config_dict) except KeyError: print('Yaml File not specified. Using only input flags and config file.') return config_dict print('Using input flags, config file, and yaml file.') config_yaml_flags_dict = self._merge_a_into_b_simple(flags, config_yaml_dict) return config_yaml_flags_dict
[ "def", "load_config_yaml", "(", "self", ",", "flags", ",", "config_dict", ")", ":", "if", "config_dict", "is", "None", ":", "print", "(", "'Config File not specified. Using only input flags.'", ")", "return", "flags", "try", ":", "config_yaml_dict", "=", "self", ".", "cfg_from_file", "(", "flags", "[", "'YAML_FILE'", "]", ",", "config_dict", ")", "except", "KeyError", ":", "print", "(", "'Yaml File not specified. Using only input flags and config file.'", ")", "return", "config_dict", "print", "(", "'Using input flags, config file, and yaml file.'", ")", "config_yaml_flags_dict", "=", "self", ".", "_merge_a_into_b_simple", "(", "flags", ",", "config_yaml_dict", ")", "return", "config_yaml_flags_dict" ]
Load config dict and yaml dict and then override both with flags dict.
[ "Load", "config", "dict", "and", "yaml", "dict", "and", "then", "override", "both", "with", "flags", "dict", "." ]
python
train
inasafe/inasafe
safe/impact_function/multi_exposure_wrapper.py
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/impact_function/multi_exposure_wrapper.py#L388-L395
def add_exposure(self, layer): """Add an exposure layer in the analysis. :param layer: An exposure layer to be used for the analysis. :type layer: QgsMapLayer """ self._exposures.append(layer) self._is_ready = False
[ "def", "add_exposure", "(", "self", ",", "layer", ")", ":", "self", ".", "_exposures", ".", "append", "(", "layer", ")", "self", ".", "_is_ready", "=", "False" ]
Add an exposure layer in the analysis. :param layer: An exposure layer to be used for the analysis. :type layer: QgsMapLayer
[ "Add", "an", "exposure", "layer", "in", "the", "analysis", "." ]
python
train
ray-project/ray
python/ray/services.py
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/services.py#L424-L435
def _autodetect_num_gpus(): """Attempt to detect the number of GPUs on this machine. TODO(rkn): This currently assumes Nvidia GPUs and Linux. Returns: The number of GPUs if any were detected, otherwise 0. """ proc_gpus_path = "/proc/driver/nvidia/gpus" if os.path.isdir(proc_gpus_path): return len(os.listdir(proc_gpus_path)) return 0
[ "def", "_autodetect_num_gpus", "(", ")", ":", "proc_gpus_path", "=", "\"/proc/driver/nvidia/gpus\"", "if", "os", ".", "path", ".", "isdir", "(", "proc_gpus_path", ")", ":", "return", "len", "(", "os", ".", "listdir", "(", "proc_gpus_path", ")", ")", "return", "0" ]
Attempt to detect the number of GPUs on this machine. TODO(rkn): This currently assumes Nvidia GPUs and Linux. Returns: The number of GPUs if any were detected, otherwise 0.
[ "Attempt", "to", "detect", "the", "number", "of", "GPUs", "on", "this", "machine", "." ]
python
train
allenai/allennlp
allennlp/tools/drop_eval.py
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/tools/drop_eval.py#L167-L226
def evaluate_json(annotations: Dict[str, Any], predicted_answers: Dict[str, Any]) -> Tuple[float, float]: """ Takes gold annotations and predicted answers and evaluates the predictions for each question in the gold annotations. Both JSON dictionaries must have query_id keys, which are used to match predictions to gold annotations (note that these are somewhat deep in the JSON for the gold annotations, but must be top-level keys in the predicted answers). The ``annotations`` are assumed to have the format of the dev set in the DROP data release. The ``predicted_answers`` JSON must be a dictionary keyed by query id, where the value is a string (or list of strings) that is the answer. """ instance_exact_match = [] instance_f1 = [] # for each type as well type_to_em: Dict[str, List[float]] = defaultdict(list) type_to_f1: Dict[str, List[float]] = defaultdict(list) for _, annotation in annotations.items(): for qa_pair in annotation["qa_pairs"]: query_id = qa_pair["query_id"] max_em_score = 0.0 max_f1_score = 0.0 max_type = None if query_id in predicted_answers: predicted = predicted_answers[query_id] candidate_answers = [qa_pair["answer"]] if "validated_answers" in qa_pair and qa_pair["validated_answers"]: candidate_answers += qa_pair["validated_answers"] for answer in candidate_answers: gold_answer, gold_type = answer_json_to_strings(answer) em_score, f1_score = get_metrics(predicted, gold_answer) if gold_answer[0].strip() != "": max_em_score = max(max_em_score, em_score) max_f1_score = max(max_f1_score, f1_score) if max_em_score == em_score or max_f1_score == f1_score: max_type = gold_type else: print("Missing prediction for question: {}".format(query_id)) if qa_pair and qa_pair["answer"]: max_type = answer_json_to_strings(qa_pair["answer"])[1] else: max_type = "number" max_em_score = 0.0 max_f1_score = 0.0 instance_exact_match.append(max_em_score) instance_f1.append(max_f1_score) type_to_em[max_type].append(max_em_score) type_to_f1[max_type].append(max_f1_score) global_em = np.mean(instance_exact_match) global_f1 = np.mean(instance_f1) print("Exact-match accuracy {0:.2f}".format(global_em * 100)) print("F1 score {0:.2f}".format(global_f1 * 100)) print("{0:.2f} & {1:.2f}".format(global_em * 100, global_f1 * 100)) print("----") total = np.sum([len(v) for v in type_to_em.values()]) for typ in sorted(type_to_em.keys()): print("{0}: {1} ({2:.2f}%)".format(typ, len(type_to_em[typ]), 100. * len(type_to_em[typ])/total)) print(" Exact-match accuracy {0:.3f}".format(100. * np.mean(type_to_em[typ]))) print(" F1 score {0:.3f}".format(100. * np.mean(type_to_f1[typ]))) return global_em, global_f1
[ "def", "evaluate_json", "(", "annotations", ":", "Dict", "[", "str", ",", "Any", "]", ",", "predicted_answers", ":", "Dict", "[", "str", ",", "Any", "]", ")", "->", "Tuple", "[", "float", ",", "float", "]", ":", "instance_exact_match", "=", "[", "]", "instance_f1", "=", "[", "]", "# for each type as well", "type_to_em", ":", "Dict", "[", "str", ",", "List", "[", "float", "]", "]", "=", "defaultdict", "(", "list", ")", "type_to_f1", ":", "Dict", "[", "str", ",", "List", "[", "float", "]", "]", "=", "defaultdict", "(", "list", ")", "for", "_", ",", "annotation", "in", "annotations", ".", "items", "(", ")", ":", "for", "qa_pair", "in", "annotation", "[", "\"qa_pairs\"", "]", ":", "query_id", "=", "qa_pair", "[", "\"query_id\"", "]", "max_em_score", "=", "0.0", "max_f1_score", "=", "0.0", "max_type", "=", "None", "if", "query_id", "in", "predicted_answers", ":", "predicted", "=", "predicted_answers", "[", "query_id", "]", "candidate_answers", "=", "[", "qa_pair", "[", "\"answer\"", "]", "]", "if", "\"validated_answers\"", "in", "qa_pair", "and", "qa_pair", "[", "\"validated_answers\"", "]", ":", "candidate_answers", "+=", "qa_pair", "[", "\"validated_answers\"", "]", "for", "answer", "in", "candidate_answers", ":", "gold_answer", ",", "gold_type", "=", "answer_json_to_strings", "(", "answer", ")", "em_score", ",", "f1_score", "=", "get_metrics", "(", "predicted", ",", "gold_answer", ")", "if", "gold_answer", "[", "0", "]", ".", "strip", "(", ")", "!=", "\"\"", ":", "max_em_score", "=", "max", "(", "max_em_score", ",", "em_score", ")", "max_f1_score", "=", "max", "(", "max_f1_score", ",", "f1_score", ")", "if", "max_em_score", "==", "em_score", "or", "max_f1_score", "==", "f1_score", ":", "max_type", "=", "gold_type", "else", ":", "print", "(", "\"Missing prediction for question: {}\"", ".", "format", "(", "query_id", ")", ")", "if", "qa_pair", "and", "qa_pair", "[", "\"answer\"", "]", ":", "max_type", "=", "answer_json_to_strings", "(", "qa_pair", "[", "\"answer\"", "]", ")", "[", "1", "]", "else", ":", "max_type", "=", "\"number\"", "max_em_score", "=", "0.0", "max_f1_score", "=", "0.0", "instance_exact_match", ".", "append", "(", "max_em_score", ")", "instance_f1", ".", "append", "(", "max_f1_score", ")", "type_to_em", "[", "max_type", "]", ".", "append", "(", "max_em_score", ")", "type_to_f1", "[", "max_type", "]", ".", "append", "(", "max_f1_score", ")", "global_em", "=", "np", ".", "mean", "(", "instance_exact_match", ")", "global_f1", "=", "np", ".", "mean", "(", "instance_f1", ")", "print", "(", "\"Exact-match accuracy {0:.2f}\"", ".", "format", "(", "global_em", "*", "100", ")", ")", "print", "(", "\"F1 score {0:.2f}\"", ".", "format", "(", "global_f1", "*", "100", ")", ")", "print", "(", "\"{0:.2f} & {1:.2f}\"", ".", "format", "(", "global_em", "*", "100", ",", "global_f1", "*", "100", ")", ")", "print", "(", "\"----\"", ")", "total", "=", "np", ".", "sum", "(", "[", "len", "(", "v", ")", "for", "v", "in", "type_to_em", ".", "values", "(", ")", "]", ")", "for", "typ", "in", "sorted", "(", "type_to_em", ".", "keys", "(", ")", ")", ":", "print", "(", "\"{0}: {1} ({2:.2f}%)\"", ".", "format", "(", "typ", ",", "len", "(", "type_to_em", "[", "typ", "]", ")", ",", "100.", "*", "len", "(", "type_to_em", "[", "typ", "]", ")", "/", "total", ")", ")", "print", "(", "\" Exact-match accuracy {0:.3f}\"", ".", "format", "(", "100.", "*", "np", ".", "mean", "(", "type_to_em", "[", "typ", "]", ")", ")", ")", "print", "(", "\" F1 score {0:.3f}\"", ".", "format", "(", "100.", "*", "np", ".", "mean", "(", "type_to_f1", "[", "typ", "]", ")", ")", ")", "return", "global_em", ",", "global_f1" ]
Takes gold annotations and predicted answers and evaluates the predictions for each question in the gold annotations. Both JSON dictionaries must have query_id keys, which are used to match predictions to gold annotations (note that these are somewhat deep in the JSON for the gold annotations, but must be top-level keys in the predicted answers). The ``annotations`` are assumed to have the format of the dev set in the DROP data release. The ``predicted_answers`` JSON must be a dictionary keyed by query id, where the value is a string (or list of strings) that is the answer.
[ "Takes", "gold", "annotations", "and", "predicted", "answers", "and", "evaluates", "the", "predictions", "for", "each", "question", "in", "the", "gold", "annotations", ".", "Both", "JSON", "dictionaries", "must", "have", "query_id", "keys", "which", "are", "used", "to", "match", "predictions", "to", "gold", "annotations", "(", "note", "that", "these", "are", "somewhat", "deep", "in", "the", "JSON", "for", "the", "gold", "annotations", "but", "must", "be", "top", "-", "level", "keys", "in", "the", "predicted", "answers", ")", "." ]
python
train
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/utils/dir2.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/utils/dir2.py#L34-L73
def dir2(obj): """dir2(obj) -> list of strings Extended version of the Python builtin dir(), which does a few extra checks, and supports common objects with unusual internals that confuse dir(), such as Traits and PyCrust. This version is guaranteed to return only a list of true strings, whereas dir() returns anything that objects inject into themselves, even if they are later not really valid for attribute access (many extension libraries have such bugs). """ # Start building the attribute list via dir(), and then complete it # with a few extra special-purpose calls. words = set(dir(obj)) if hasattr(obj, '__class__'): #words.add('__class__') words |= set(get_class_members(obj.__class__)) # for objects with Enthought's traits, add trait_names() list # for PyCrust-style, add _getAttributeNames() magic method list for attr in ('trait_names', '_getAttributeNames'): if hasattr(obj, attr): try: func = getattr(obj, attr) if callable(func): words |= set(func()) except: # TypeError: obj is class not instance pass # filter out non-string attributes which may be stuffed by dir() calls # and poor coding in third-party modules words = [w for w in words if isinstance(w, basestring)] return sorted(words)
[ "def", "dir2", "(", "obj", ")", ":", "# Start building the attribute list via dir(), and then complete it", "# with a few extra special-purpose calls.", "words", "=", "set", "(", "dir", "(", "obj", ")", ")", "if", "hasattr", "(", "obj", ",", "'__class__'", ")", ":", "#words.add('__class__')", "words", "|=", "set", "(", "get_class_members", "(", "obj", ".", "__class__", ")", ")", "# for objects with Enthought's traits, add trait_names() list", "# for PyCrust-style, add _getAttributeNames() magic method list", "for", "attr", "in", "(", "'trait_names'", ",", "'_getAttributeNames'", ")", ":", "if", "hasattr", "(", "obj", ",", "attr", ")", ":", "try", ":", "func", "=", "getattr", "(", "obj", ",", "attr", ")", "if", "callable", "(", "func", ")", ":", "words", "|=", "set", "(", "func", "(", ")", ")", "except", ":", "# TypeError: obj is class not instance", "pass", "# filter out non-string attributes which may be stuffed by dir() calls", "# and poor coding in third-party modules", "words", "=", "[", "w", "for", "w", "in", "words", "if", "isinstance", "(", "w", ",", "basestring", ")", "]", "return", "sorted", "(", "words", ")" ]
dir2(obj) -> list of strings Extended version of the Python builtin dir(), which does a few extra checks, and supports common objects with unusual internals that confuse dir(), such as Traits and PyCrust. This version is guaranteed to return only a list of true strings, whereas dir() returns anything that objects inject into themselves, even if they are later not really valid for attribute access (many extension libraries have such bugs).
[ "dir2", "(", "obj", ")", "-", ">", "list", "of", "strings" ]
python
test
volafiled/python-volapi
volapi/volapi.py
https://github.com/volafiled/python-volapi/blob/5f0bc03dbde703264ac6ed494e2050761f688a3e/volapi/volapi.py#L74-L89
def connect(self, username, checksum, password=None, key=None): """Connect to websocket through asyncio http interface""" ws_url = ( f"{BASE_WS_URL}?room={self.room.room_id}&cs={checksum}&nick={username}" f"&rn={random_id(6)}&t={int(time.time() * 1000)}&transport=websocket&EIO=3" ) if password: ws_url += f"&password={password}" elif key: ws_url += f"&key={key}" ARBITRATOR.create_connection( self.proto, ws_url, self.headers["User-Agent"], self.cookies ) self.__conn_barrier.wait()
[ "def", "connect", "(", "self", ",", "username", ",", "checksum", ",", "password", "=", "None", ",", "key", "=", "None", ")", ":", "ws_url", "=", "(", "f\"{BASE_WS_URL}?room={self.room.room_id}&cs={checksum}&nick={username}\"", "f\"&rn={random_id(6)}&t={int(time.time() * 1000)}&transport=websocket&EIO=3\"", ")", "if", "password", ":", "ws_url", "+=", "f\"&password={password}\"", "elif", "key", ":", "ws_url", "+=", "f\"&key={key}\"", "ARBITRATOR", ".", "create_connection", "(", "self", ".", "proto", ",", "ws_url", ",", "self", ".", "headers", "[", "\"User-Agent\"", "]", ",", "self", ".", "cookies", ")", "self", ".", "__conn_barrier", ".", "wait", "(", ")" ]
Connect to websocket through asyncio http interface
[ "Connect", "to", "websocket", "through", "asyncio", "http", "interface" ]
python
train
brmscheiner/ideogram
ideogram/polarfract/polarfract.py
https://github.com/brmscheiner/ideogram/blob/422bf566c51fd56f7bbb6e75b16d18d52b4c7568/ideogram/polarfract/polarfract.py#L23-L32
def calcPosition(self,parent_circle): ''' Position the circle tangent to the parent circle with the line connecting the centers of the two circles meeting the x axis at angle theta. ''' if r not in self: raise AttributeError("radius must be calculated before position.") if theta not in self: raise AttributeError("theta must be set before position can be calculated.") x_offset = math.cos(t_radians) * (parent_circle.r + self.r) y_offset = math.sin(t_radians) * (parent_circle.r + self.r) self.x = parent_circle.x + x_offset self.y = parent_circle.y + y_offset
[ "def", "calcPosition", "(", "self", ",", "parent_circle", ")", ":", "if", "r", "not", "in", "self", ":", "raise", "AttributeError", "(", "\"radius must be calculated before position.\"", ")", "if", "theta", "not", "in", "self", ":", "raise", "AttributeError", "(", "\"theta must be set before position can be calculated.\"", ")", "x_offset", "=", "math", ".", "cos", "(", "t_radians", ")", "*", "(", "parent_circle", ".", "r", "+", "self", ".", "r", ")", "y_offset", "=", "math", ".", "sin", "(", "t_radians", ")", "*", "(", "parent_circle", ".", "r", "+", "self", ".", "r", ")", "self", ".", "x", "=", "parent_circle", ".", "x", "+", "x_offset", "self", ".", "y", "=", "parent_circle", ".", "y", "+", "y_offset" ]
Position the circle tangent to the parent circle with the line connecting the centers of the two circles meeting the x axis at angle theta.
[ "Position", "the", "circle", "tangent", "to", "the", "parent", "circle", "with", "the", "line", "connecting", "the", "centers", "of", "the", "two", "circles", "meeting", "the", "x", "axis", "at", "angle", "theta", "." ]
python
train
combust/mleap
python/mleap/sklearn/preprocessing/data.py
https://github.com/combust/mleap/blob/dc6b79db03ec27a0ba08b289842551e73d517ab3/python/mleap/sklearn/preprocessing/data.py#L395-L415
def transform(self, y): """Transform labels to normalized encoding. Parameters ---------- y : array-like of shape [n_samples] Target values. Returns ------- y : array-like of shape [n_samples] """ check_is_fitted(self, 'classes_') y = column_or_1d(y, warn=True) classes = np.unique(y) _check_numpy_unicode_bug(classes) if len(np.intersect1d(classes, self.classes_)) < len(classes): diff = np.setdiff1d(classes, self.classes_) raise ValueError("y contains new labels: %s" % str(diff)) return np.searchsorted(self.classes_, y)
[ "def", "transform", "(", "self", ",", "y", ")", ":", "check_is_fitted", "(", "self", ",", "'classes_'", ")", "y", "=", "column_or_1d", "(", "y", ",", "warn", "=", "True", ")", "classes", "=", "np", ".", "unique", "(", "y", ")", "_check_numpy_unicode_bug", "(", "classes", ")", "if", "len", "(", "np", ".", "intersect1d", "(", "classes", ",", "self", ".", "classes_", ")", ")", "<", "len", "(", "classes", ")", ":", "diff", "=", "np", ".", "setdiff1d", "(", "classes", ",", "self", ".", "classes_", ")", "raise", "ValueError", "(", "\"y contains new labels: %s\"", "%", "str", "(", "diff", ")", ")", "return", "np", ".", "searchsorted", "(", "self", ".", "classes_", ",", "y", ")" ]
Transform labels to normalized encoding. Parameters ---------- y : array-like of shape [n_samples] Target values. Returns ------- y : array-like of shape [n_samples]
[ "Transform", "labels", "to", "normalized", "encoding", "." ]
python
train
Jajcus/pyxmpp2
pyxmpp2/streambase.py
https://github.com/Jajcus/pyxmpp2/blob/14a40a3950910a9cd008b55f0d8905aa0186ce18/pyxmpp2/streambase.py#L239-L249
def event(self, event): # pylint: disable-msg=R0201 """Handle a stream event. Called when connection state is changed. Should not be called with self.lock acquired! """ event.stream = self logger.debug(u"Stream event: {0}".format(event)) self.settings["event_queue"].put(event) return False
[ "def", "event", "(", "self", ",", "event", ")", ":", "# pylint: disable-msg=R0201", "event", ".", "stream", "=", "self", "logger", ".", "debug", "(", "u\"Stream event: {0}\"", ".", "format", "(", "event", ")", ")", "self", ".", "settings", "[", "\"event_queue\"", "]", ".", "put", "(", "event", ")", "return", "False" ]
Handle a stream event. Called when connection state is changed. Should not be called with self.lock acquired!
[ "Handle", "a", "stream", "event", "." ]
python
valid
manns/pyspread
pyspread/src/gui/_grid.py
https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/gui/_grid.py#L422-L435
def ForceRefresh(self, *args, **kwargs): """Refresh hook""" wx.grid.Grid.ForceRefresh(self, *args, **kwargs) for video_cell_key in self.grid_renderer.video_cells: if video_cell_key[2] == self.current_table: video_cell = self.grid_renderer.video_cells[video_cell_key] rect = self.CellToRect(video_cell_key[0], video_cell_key[1]) drawn_rect = self.grid_renderer._get_drawn_rect(self, video_cell_key, rect) video_cell.SetClientRect(drawn_rect) self._update_video_volume_cell_attributes(video_cell_key)
[ "def", "ForceRefresh", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "wx", ".", "grid", ".", "Grid", ".", "ForceRefresh", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", "for", "video_cell_key", "in", "self", ".", "grid_renderer", ".", "video_cells", ":", "if", "video_cell_key", "[", "2", "]", "==", "self", ".", "current_table", ":", "video_cell", "=", "self", ".", "grid_renderer", ".", "video_cells", "[", "video_cell_key", "]", "rect", "=", "self", ".", "CellToRect", "(", "video_cell_key", "[", "0", "]", ",", "video_cell_key", "[", "1", "]", ")", "drawn_rect", "=", "self", ".", "grid_renderer", ".", "_get_drawn_rect", "(", "self", ",", "video_cell_key", ",", "rect", ")", "video_cell", ".", "SetClientRect", "(", "drawn_rect", ")", "self", ".", "_update_video_volume_cell_attributes", "(", "video_cell_key", ")" ]
Refresh hook
[ "Refresh", "hook" ]
python
train
aiogram/aiogram
aiogram/utils/auth_widget.py
https://github.com/aiogram/aiogram/blob/2af930149ce2482547721e2c8755c10307295e48/aiogram/utils/auth_widget.py#L12-L24
def generate_hash(data: dict, token: str) -> str: """ Generate secret hash :param data: :param token: :return: """ secret = hashlib.sha256() secret.update(token.encode('utf-8')) sorted_params = collections.OrderedDict(sorted(data.items())) msg = '\n'.join("{}={}".format(k, v) for k, v in sorted_params.items() if k != 'hash') return hmac.new(secret.digest(), msg.encode('utf-8'), digestmod=hashlib.sha256).hexdigest()
[ "def", "generate_hash", "(", "data", ":", "dict", ",", "token", ":", "str", ")", "->", "str", ":", "secret", "=", "hashlib", ".", "sha256", "(", ")", "secret", ".", "update", "(", "token", ".", "encode", "(", "'utf-8'", ")", ")", "sorted_params", "=", "collections", ".", "OrderedDict", "(", "sorted", "(", "data", ".", "items", "(", ")", ")", ")", "msg", "=", "'\\n'", ".", "join", "(", "\"{}={}\"", ".", "format", "(", "k", ",", "v", ")", "for", "k", ",", "v", "in", "sorted_params", ".", "items", "(", ")", "if", "k", "!=", "'hash'", ")", "return", "hmac", ".", "new", "(", "secret", ".", "digest", "(", ")", ",", "msg", ".", "encode", "(", "'utf-8'", ")", ",", "digestmod", "=", "hashlib", ".", "sha256", ")", ".", "hexdigest", "(", ")" ]
Generate secret hash :param data: :param token: :return:
[ "Generate", "secret", "hash" ]
python
train
numba/llvmlite
llvmlite/binding/value.py
https://github.com/numba/llvmlite/blob/fcadf8af11947f3fd041c5d6526c5bf231564883/llvmlite/binding/value.py#L256-L266
def arguments(self): """ Return an iterator over this function's arguments. The iterator will yield a ValueRef for each argument. """ if not self.is_function: raise ValueError('expected function value, got %s' % (self._kind,)) it = ffi.lib.LLVMPY_FunctionArgumentsIter(self) parents = self._parents.copy() parents.update(function=self) return _ArgumentsIterator(it, parents)
[ "def", "arguments", "(", "self", ")", ":", "if", "not", "self", ".", "is_function", ":", "raise", "ValueError", "(", "'expected function value, got %s'", "%", "(", "self", ".", "_kind", ",", ")", ")", "it", "=", "ffi", ".", "lib", ".", "LLVMPY_FunctionArgumentsIter", "(", "self", ")", "parents", "=", "self", ".", "_parents", ".", "copy", "(", ")", "parents", ".", "update", "(", "function", "=", "self", ")", "return", "_ArgumentsIterator", "(", "it", ",", "parents", ")" ]
Return an iterator over this function's arguments. The iterator will yield a ValueRef for each argument.
[ "Return", "an", "iterator", "over", "this", "function", "s", "arguments", ".", "The", "iterator", "will", "yield", "a", "ValueRef", "for", "each", "argument", "." ]
python
train
funilrys/PyFunceble
PyFunceble/database.py
https://github.com/funilrys/PyFunceble/blob/cdf69cbde120199171f7158e1c33635753e6e2f5/PyFunceble/database.py#L816-L844
def get_expiration_date(self): """ Get the expiration date from the database. :return: The expiration date from the database. :rtype: str|None """ if self._authorization() and self.is_in_database() and not self.is_time_older(): # * We are authorized to work. # and # * The element we are testing is in the database. # and # * The expiration date is in the future. # We get the expiration date from the database. result = PyFunceble.INTERN["whois_db"][PyFunceble.INTERN["file_to_test"]][ PyFunceble.INTERN["to_test"] ]["expiration_date"] if result: # The expiration date from the database is not empty nor # equal to None. # We return it. return result # We return None, there is no data to work with. return None
[ "def", "get_expiration_date", "(", "self", ")", ":", "if", "self", ".", "_authorization", "(", ")", "and", "self", ".", "is_in_database", "(", ")", "and", "not", "self", ".", "is_time_older", "(", ")", ":", "# * We are authorized to work.", "# and", "# * The element we are testing is in the database.", "# and", "# * The expiration date is in the future.", "# We get the expiration date from the database.", "result", "=", "PyFunceble", ".", "INTERN", "[", "\"whois_db\"", "]", "[", "PyFunceble", ".", "INTERN", "[", "\"file_to_test\"", "]", "]", "[", "PyFunceble", ".", "INTERN", "[", "\"to_test\"", "]", "]", "[", "\"expiration_date\"", "]", "if", "result", ":", "# The expiration date from the database is not empty nor", "# equal to None.", "# We return it.", "return", "result", "# We return None, there is no data to work with.", "return", "None" ]
Get the expiration date from the database. :return: The expiration date from the database. :rtype: str|None
[ "Get", "the", "expiration", "date", "from", "the", "database", "." ]
python
test
wonambi-python/wonambi
wonambi/widgets/analysis.py
https://github.com/wonambi-python/wonambi/blob/1d8e3d7e53df8017c199f703bcab582914676e76/wonambi/widgets/analysis.py#L1911-L1982
def report_fooof(self, x, y, suffix): """Create FOOOF (fitting oscillations and 1/f) report. Parameters ---------- x : ndarray vector with frequencies y : ndarray vector with amplitudes """ filename = splitext(self.filename)[0] + '_' + suffix + '_fooof.csv' freq = self.frequency freq_range = [freq['fo_min_freq'].get_value(), freq['fo_max_freq'].get_value()] pk_thresh = freq['fo_pk_thresh'].get_value() pk_width = [freq['fo_pk_width_min'].get_value(), freq['fo_pk_width_max'].get_value()] max_n_pk = freq['fo_max_n_pk'].get_value() min_pk_amp = freq['fo_min_pk_amp'].get_value() bg_mode = freq['fo_bg_mode'].get_value() if max_n_pk == 0: max_n_pk = inf if FOOOF is None: lg.warning('"fooof" package is required for this function, run "pip install fooof"') return fm = FOOOF(peak_width_limits=pk_width, max_n_peaks=max_n_pk, min_peak_amplitude=min_pk_amp, peak_threshold=pk_thresh, background_mode=bg_mode) fm.fit(x, y, freq_range) with open(filename, 'w', newline='') as f: lg.info('Writing to ' + str(filename)) csv_file = writer(f) csv_file.writerow(['Wonambi v{}'.format(__version__)]) csv_file.writerow(['FOOOF - POWER SPECTRUM MODEL']) csv_file.writerow('') csv_file.writerow(['The model was run on the frequency range ' '{} - {} Hz'.format(int(floor(fm.freq_range[0])), int(ceil(fm.freq_range[1])))]) csv_file.writerow(['Frequency Resolution is {:1.2f} Hz'.format( fm.freq_res)]) csv_file.writerow('') csv_file.writerow(['Background Parameters (offset, ' + \ ('knee, ' if fm.background_mode == 'knee' else '') + \ 'slope): ' + ', '.join(['{:2.4f}'] * \ len(fm.background_params_)).format( *fm.background_params_)]) csv_file.writerow('') csv_file.writerow(['{} peaks were found:'.format( len(fm.peak_params_))]) csv_file.writerow('') csv_file.writerow(['Index', 'CF', 'Amp', 'BW']) for i, op in enumerate(fm.peak_params_): csv_file.writerow([i, op[0], op[1], op[2]]) csv_file.writerow('') csv_file.writerow(['Goodness of fit metrics:']) csv_file.writerow(['R^2 of model fit is {:5.4f}'.format( fm.r_squared_)]) csv_file.writerow(['Root mean squared error is {:5.4f}'.format( fm.error_)]) csv_file.writerow('') csv_file.writerow(['Haller M, Donoghue T, Peterson E, Varma P, ' 'Sebastian P, Gao R, Noto T, Knight RT, ' 'Shestyuk A, Voytek B (2018) Parameterizing ' 'Neural Power Spectra. bioRxiv, 299859. doi: ' 'https://doi.org/10.1101/299859'])
[ "def", "report_fooof", "(", "self", ",", "x", ",", "y", ",", "suffix", ")", ":", "filename", "=", "splitext", "(", "self", ".", "filename", ")", "[", "0", "]", "+", "'_'", "+", "suffix", "+", "'_fooof.csv'", "freq", "=", "self", ".", "frequency", "freq_range", "=", "[", "freq", "[", "'fo_min_freq'", "]", ".", "get_value", "(", ")", ",", "freq", "[", "'fo_max_freq'", "]", ".", "get_value", "(", ")", "]", "pk_thresh", "=", "freq", "[", "'fo_pk_thresh'", "]", ".", "get_value", "(", ")", "pk_width", "=", "[", "freq", "[", "'fo_pk_width_min'", "]", ".", "get_value", "(", ")", ",", "freq", "[", "'fo_pk_width_max'", "]", ".", "get_value", "(", ")", "]", "max_n_pk", "=", "freq", "[", "'fo_max_n_pk'", "]", ".", "get_value", "(", ")", "min_pk_amp", "=", "freq", "[", "'fo_min_pk_amp'", "]", ".", "get_value", "(", ")", "bg_mode", "=", "freq", "[", "'fo_bg_mode'", "]", ".", "get_value", "(", ")", "if", "max_n_pk", "==", "0", ":", "max_n_pk", "=", "inf", "if", "FOOOF", "is", "None", ":", "lg", ".", "warning", "(", "'\"fooof\" package is required for this function, run \"pip install fooof\"'", ")", "return", "fm", "=", "FOOOF", "(", "peak_width_limits", "=", "pk_width", ",", "max_n_peaks", "=", "max_n_pk", ",", "min_peak_amplitude", "=", "min_pk_amp", ",", "peak_threshold", "=", "pk_thresh", ",", "background_mode", "=", "bg_mode", ")", "fm", ".", "fit", "(", "x", ",", "y", ",", "freq_range", ")", "with", "open", "(", "filename", ",", "'w'", ",", "newline", "=", "''", ")", "as", "f", ":", "lg", ".", "info", "(", "'Writing to '", "+", "str", "(", "filename", ")", ")", "csv_file", "=", "writer", "(", "f", ")", "csv_file", ".", "writerow", "(", "[", "'Wonambi v{}'", ".", "format", "(", "__version__", ")", "]", ")", "csv_file", ".", "writerow", "(", "[", "'FOOOF - POWER SPECTRUM MODEL'", "]", ")", "csv_file", ".", "writerow", "(", "''", ")", "csv_file", ".", "writerow", "(", "[", "'The model was run on the frequency range '", "'{} - {} Hz'", ".", "format", "(", "int", "(", "floor", "(", "fm", ".", "freq_range", "[", "0", "]", ")", ")", ",", "int", "(", "ceil", "(", "fm", ".", "freq_range", "[", "1", "]", ")", ")", ")", "]", ")", "csv_file", ".", "writerow", "(", "[", "'Frequency Resolution is {:1.2f} Hz'", ".", "format", "(", "fm", ".", "freq_res", ")", "]", ")", "csv_file", ".", "writerow", "(", "''", ")", "csv_file", ".", "writerow", "(", "[", "'Background Parameters (offset, '", "+", "(", "'knee, '", "if", "fm", ".", "background_mode", "==", "'knee'", "else", "''", ")", "+", "'slope): '", "+", "', '", ".", "join", "(", "[", "'{:2.4f}'", "]", "*", "len", "(", "fm", ".", "background_params_", ")", ")", ".", "format", "(", "*", "fm", ".", "background_params_", ")", "]", ")", "csv_file", ".", "writerow", "(", "''", ")", "csv_file", ".", "writerow", "(", "[", "'{} peaks were found:'", ".", "format", "(", "len", "(", "fm", ".", "peak_params_", ")", ")", "]", ")", "csv_file", ".", "writerow", "(", "''", ")", "csv_file", ".", "writerow", "(", "[", "'Index'", ",", "'CF'", ",", "'Amp'", ",", "'BW'", "]", ")", "for", "i", ",", "op", "in", "enumerate", "(", "fm", ".", "peak_params_", ")", ":", "csv_file", ".", "writerow", "(", "[", "i", ",", "op", "[", "0", "]", ",", "op", "[", "1", "]", ",", "op", "[", "2", "]", "]", ")", "csv_file", ".", "writerow", "(", "''", ")", "csv_file", ".", "writerow", "(", "[", "'Goodness of fit metrics:'", "]", ")", "csv_file", ".", "writerow", "(", "[", "'R^2 of model fit is {:5.4f}'", ".", "format", "(", "fm", ".", "r_squared_", ")", "]", ")", "csv_file", ".", "writerow", "(", "[", "'Root mean squared error is {:5.4f}'", ".", "format", "(", "fm", ".", "error_", ")", "]", ")", "csv_file", ".", "writerow", "(", "''", ")", "csv_file", ".", "writerow", "(", "[", "'Haller M, Donoghue T, Peterson E, Varma P, '", "'Sebastian P, Gao R, Noto T, Knight RT, '", "'Shestyuk A, Voytek B (2018) Parameterizing '", "'Neural Power Spectra. bioRxiv, 299859. doi: '", "'https://doi.org/10.1101/299859'", "]", ")" ]
Create FOOOF (fitting oscillations and 1/f) report. Parameters ---------- x : ndarray vector with frequencies y : ndarray vector with amplitudes
[ "Create", "FOOOF", "(", "fitting", "oscillations", "and", "1", "/", "f", ")", "report", "." ]
python
train
cfhamlet/os-docid
src/os_docid/x.py
https://github.com/cfhamlet/os-docid/blob/d3730aa118182f903b540ea738cd47c83f6b5e89/src/os_docid/x.py#L172-L229
def docid(url, encoding='ascii'): """Get DocID from URL. DocID generation depends on bytes of the URL string. So, if non-ascii charactors in the URL, encoding should be considered properly. Args: url (str or bytes): Pre-encoded bytes or string will be encoded with the 'encoding' argument. encoding (str, optional): Defaults to 'ascii'. Used to encode url argument if it is not pre-encoded into bytes. Returns: DocID: The DocID object. Examples: >>> from os_docid import docid >>> docid('http://www.google.com/') 1d5920f4b44b27a8-ed646a3334ca891f-ff90821feeb2b02a33a6f9fc8e5f3fcd >>> docid('1d5920f4b44b27a8-ed646a3334ca891f-ff90821feeb2b02a33a6f9fc8e5f3fcd') 1d5920f4b44b27a8-ed646a3334ca891f-ff90821feeb2b02a33a6f9fc8e5f3fcd >>> docid('1d5920f4b44b27a8ed646a3334ca891fff90821feeb2b02a33a6f9fc8e5f3fcd') 1d5920f4b44b27a8-ed646a3334ca891f-ff90821feeb2b02a33a6f9fc8e5f3fcd >>> docid('abc') NotImplementedError: Not supported data format """ if not isinstance(url, bytes): url = url.encode(encoding) parser = _URL_PARSER idx = 0 for _c in url: if _c not in _HEX: if not (_c == _SYM_MINUS and (idx == _DOMAINID_LENGTH or idx == _HOSTID_LENGTH + 1)): return parser.parse(url, idx) idx += 1 if idx > 4: break _l = len(url) if _l == _DOCID_LENGTH: parser = _DOCID_PARSER elif _l == _READABLE_DOCID_LENGTH \ and url[_DOMAINID_LENGTH] == _SYM_MINUS \ and url[_HOSTID_LENGTH + 1] == _SYM_MINUS: parser = _R_DOCID_PARSER else: parser = _PARSER return parser.parse(url, idx)
[ "def", "docid", "(", "url", ",", "encoding", "=", "'ascii'", ")", ":", "if", "not", "isinstance", "(", "url", ",", "bytes", ")", ":", "url", "=", "url", ".", "encode", "(", "encoding", ")", "parser", "=", "_URL_PARSER", "idx", "=", "0", "for", "_c", "in", "url", ":", "if", "_c", "not", "in", "_HEX", ":", "if", "not", "(", "_c", "==", "_SYM_MINUS", "and", "(", "idx", "==", "_DOMAINID_LENGTH", "or", "idx", "==", "_HOSTID_LENGTH", "+", "1", ")", ")", ":", "return", "parser", ".", "parse", "(", "url", ",", "idx", ")", "idx", "+=", "1", "if", "idx", ">", "4", ":", "break", "_l", "=", "len", "(", "url", ")", "if", "_l", "==", "_DOCID_LENGTH", ":", "parser", "=", "_DOCID_PARSER", "elif", "_l", "==", "_READABLE_DOCID_LENGTH", "and", "url", "[", "_DOMAINID_LENGTH", "]", "==", "_SYM_MINUS", "and", "url", "[", "_HOSTID_LENGTH", "+", "1", "]", "==", "_SYM_MINUS", ":", "parser", "=", "_R_DOCID_PARSER", "else", ":", "parser", "=", "_PARSER", "return", "parser", ".", "parse", "(", "url", ",", "idx", ")" ]
Get DocID from URL. DocID generation depends on bytes of the URL string. So, if non-ascii charactors in the URL, encoding should be considered properly. Args: url (str or bytes): Pre-encoded bytes or string will be encoded with the 'encoding' argument. encoding (str, optional): Defaults to 'ascii'. Used to encode url argument if it is not pre-encoded into bytes. Returns: DocID: The DocID object. Examples: >>> from os_docid import docid >>> docid('http://www.google.com/') 1d5920f4b44b27a8-ed646a3334ca891f-ff90821feeb2b02a33a6f9fc8e5f3fcd >>> docid('1d5920f4b44b27a8-ed646a3334ca891f-ff90821feeb2b02a33a6f9fc8e5f3fcd') 1d5920f4b44b27a8-ed646a3334ca891f-ff90821feeb2b02a33a6f9fc8e5f3fcd >>> docid('1d5920f4b44b27a8ed646a3334ca891fff90821feeb2b02a33a6f9fc8e5f3fcd') 1d5920f4b44b27a8-ed646a3334ca891f-ff90821feeb2b02a33a6f9fc8e5f3fcd >>> docid('abc') NotImplementedError: Not supported data format
[ "Get", "DocID", "from", "URL", "." ]
python
train
Fizzadar/pyinfra
pyinfra/modules/apt.py
https://github.com/Fizzadar/pyinfra/blob/006f751f7db2e07d32522c0285160783de2feb79/pyinfra/modules/apt.py#L188-L213
def update(state, host, cache_time=None, touch_periodic=False): ''' Updates apt repos. + cache_time: cache updates for this many seconds + touch_periodic: touch ``/var/lib/apt/periodic/update-success-stamp`` after update ''' # If cache_time check when apt was last updated, prevent updates if within time if cache_time: # Ubuntu provides this handy file cache_info = host.fact.file(APT_UPDATE_FILENAME) # Time on files is not tz-aware, and will be the same tz as the server's time, # so we can safely remove the tzinfo from host.fact.date before comparison. host_cache_time = host.fact.date.replace(tzinfo=None) - timedelta(seconds=cache_time) if cache_info and cache_info['mtime'] and cache_info['mtime'] > host_cache_time: return yield 'apt-get update' # Some apt systems (Debian) have the /var/lib/apt/periodic directory, but # don't bother touching anything in there - so pyinfra does it, enabling # cache_time to work. if cache_time: yield 'touch {0}'.format(APT_UPDATE_FILENAME)
[ "def", "update", "(", "state", ",", "host", ",", "cache_time", "=", "None", ",", "touch_periodic", "=", "False", ")", ":", "# If cache_time check when apt was last updated, prevent updates if within time", "if", "cache_time", ":", "# Ubuntu provides this handy file", "cache_info", "=", "host", ".", "fact", ".", "file", "(", "APT_UPDATE_FILENAME", ")", "# Time on files is not tz-aware, and will be the same tz as the server's time,", "# so we can safely remove the tzinfo from host.fact.date before comparison.", "host_cache_time", "=", "host", ".", "fact", ".", "date", ".", "replace", "(", "tzinfo", "=", "None", ")", "-", "timedelta", "(", "seconds", "=", "cache_time", ")", "if", "cache_info", "and", "cache_info", "[", "'mtime'", "]", "and", "cache_info", "[", "'mtime'", "]", ">", "host_cache_time", ":", "return", "yield", "'apt-get update'", "# Some apt systems (Debian) have the /var/lib/apt/periodic directory, but", "# don't bother touching anything in there - so pyinfra does it, enabling", "# cache_time to work.", "if", "cache_time", ":", "yield", "'touch {0}'", ".", "format", "(", "APT_UPDATE_FILENAME", ")" ]
Updates apt repos. + cache_time: cache updates for this many seconds + touch_periodic: touch ``/var/lib/apt/periodic/update-success-stamp`` after update
[ "Updates", "apt", "repos", "." ]
python
train
jspricke/python-icstask
icstask.py
https://github.com/jspricke/python-icstask/blob/0802233cca569c2174bd96aed0682d04a2a63790/icstask.py#L191-L267
def to_task(self, vtodo, project=None, uuid=None): """Add or modify a task from vTodo to Taskwarrior vtodo -- the vTodo to add project -- the project to add (see get_filesnames() as well) uuid -- the UID of the task in Taskwarrior """ task = {} if project and project != 'all_projects' and project != 'unaffiliated': task['project'] = project if uuid: task['uuid'] = uuid if hasattr(vtodo, 'dtstamp'): task['entry'] = self._tw_timestamp(vtodo.dtstamp.value) if hasattr(vtodo, 'last_modified'): task['modified'] = self._tw_timestamp(vtodo.last_modified.value) if hasattr(vtodo, 'dtstart'): task['start'] = self._tw_timestamp(vtodo.dtstart.value) if hasattr(vtodo, 'due'): task['due'] = self._tw_timestamp(vtodo.due.value) if hasattr(vtodo, 'completed'): task['end'] = self._tw_timestamp(vtodo.completed.value) task['description'] = vtodo.summary.value if hasattr(vtodo, 'categories'): task['tags'] = vtodo.categories.value if hasattr(vtodo, 'priority'): priority = int(vtodo.priority.value) if priority < 3: task['priority'] = 'H' elif 3 < priority < 7: task['priority'] = 'M' else: task['priority'] = 'L' if hasattr(vtodo, 'description'): task['annotations'] = [] for delta, comment in enumerate(vtodo.description.value.split('\n')): # Hack because Taskwarrior import doesn't accept multiple annotations with the same timestamp stamp = self._tw_timestamp(vtodo.dtstamp.value + timedelta(seconds=delta)) if uuid in self._tasks.get(project, {}) and 'annotations' in self._tasks[project][uuid]: for annotation in self._tasks[project][uuid]['annotations']: if annotation['description'] == comment: stamp = annotation['entry'] break task['annotations'].append({'entry': stamp, 'description': comment}) if hasattr(vtodo, 'status'): if vtodo.status.value == 'IN-PROCESS': task['status'] = 'pending' if 'start' not in task: task['start'] = self._tw_timestamp(vtodo.dtstamp.value) elif vtodo.status.value == 'NEEDS-ACTION': task['status'] = 'pending' elif vtodo.status.value == 'COMPLETED': task['status'] = 'completed' if 'end' not in task: task['end'] = self._tw_timestamp(vtodo.dtstamp.value) elif vtodo.status.value == 'CANCELLED': task['status'] = 'deleted' if 'end' not in task: task['end'] = self._tw_timestamp(vtodo.dtstamp.value) json = dumps(task, separators=(',', ':'), sort_keys=True) with self._lock: p = run(['task', 'rc.verbose=nothing', 'rc.recurrence.confirmation=no', 'rc.data.location={self._data_location}'.format(**locals()), 'import', '-'], input=json, encoding='utf-8', stdout=PIPE) uuid = findall('(?:add|mod) ([0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}) ', p.stdout)[0] self._update() return self._gen_uid(uuid)
[ "def", "to_task", "(", "self", ",", "vtodo", ",", "project", "=", "None", ",", "uuid", "=", "None", ")", ":", "task", "=", "{", "}", "if", "project", "and", "project", "!=", "'all_projects'", "and", "project", "!=", "'unaffiliated'", ":", "task", "[", "'project'", "]", "=", "project", "if", "uuid", ":", "task", "[", "'uuid'", "]", "=", "uuid", "if", "hasattr", "(", "vtodo", ",", "'dtstamp'", ")", ":", "task", "[", "'entry'", "]", "=", "self", ".", "_tw_timestamp", "(", "vtodo", ".", "dtstamp", ".", "value", ")", "if", "hasattr", "(", "vtodo", ",", "'last_modified'", ")", ":", "task", "[", "'modified'", "]", "=", "self", ".", "_tw_timestamp", "(", "vtodo", ".", "last_modified", ".", "value", ")", "if", "hasattr", "(", "vtodo", ",", "'dtstart'", ")", ":", "task", "[", "'start'", "]", "=", "self", ".", "_tw_timestamp", "(", "vtodo", ".", "dtstart", ".", "value", ")", "if", "hasattr", "(", "vtodo", ",", "'due'", ")", ":", "task", "[", "'due'", "]", "=", "self", ".", "_tw_timestamp", "(", "vtodo", ".", "due", ".", "value", ")", "if", "hasattr", "(", "vtodo", ",", "'completed'", ")", ":", "task", "[", "'end'", "]", "=", "self", ".", "_tw_timestamp", "(", "vtodo", ".", "completed", ".", "value", ")", "task", "[", "'description'", "]", "=", "vtodo", ".", "summary", ".", "value", "if", "hasattr", "(", "vtodo", ",", "'categories'", ")", ":", "task", "[", "'tags'", "]", "=", "vtodo", ".", "categories", ".", "value", "if", "hasattr", "(", "vtodo", ",", "'priority'", ")", ":", "priority", "=", "int", "(", "vtodo", ".", "priority", ".", "value", ")", "if", "priority", "<", "3", ":", "task", "[", "'priority'", "]", "=", "'H'", "elif", "3", "<", "priority", "<", "7", ":", "task", "[", "'priority'", "]", "=", "'M'", "else", ":", "task", "[", "'priority'", "]", "=", "'L'", "if", "hasattr", "(", "vtodo", ",", "'description'", ")", ":", "task", "[", "'annotations'", "]", "=", "[", "]", "for", "delta", ",", "comment", "in", "enumerate", "(", "vtodo", ".", "description", ".", "value", ".", "split", "(", "'\\n'", ")", ")", ":", "# Hack because Taskwarrior import doesn't accept multiple annotations with the same timestamp", "stamp", "=", "self", ".", "_tw_timestamp", "(", "vtodo", ".", "dtstamp", ".", "value", "+", "timedelta", "(", "seconds", "=", "delta", ")", ")", "if", "uuid", "in", "self", ".", "_tasks", ".", "get", "(", "project", ",", "{", "}", ")", "and", "'annotations'", "in", "self", ".", "_tasks", "[", "project", "]", "[", "uuid", "]", ":", "for", "annotation", "in", "self", ".", "_tasks", "[", "project", "]", "[", "uuid", "]", "[", "'annotations'", "]", ":", "if", "annotation", "[", "'description'", "]", "==", "comment", ":", "stamp", "=", "annotation", "[", "'entry'", "]", "break", "task", "[", "'annotations'", "]", ".", "append", "(", "{", "'entry'", ":", "stamp", ",", "'description'", ":", "comment", "}", ")", "if", "hasattr", "(", "vtodo", ",", "'status'", ")", ":", "if", "vtodo", ".", "status", ".", "value", "==", "'IN-PROCESS'", ":", "task", "[", "'status'", "]", "=", "'pending'", "if", "'start'", "not", "in", "task", ":", "task", "[", "'start'", "]", "=", "self", ".", "_tw_timestamp", "(", "vtodo", ".", "dtstamp", ".", "value", ")", "elif", "vtodo", ".", "status", ".", "value", "==", "'NEEDS-ACTION'", ":", "task", "[", "'status'", "]", "=", "'pending'", "elif", "vtodo", ".", "status", ".", "value", "==", "'COMPLETED'", ":", "task", "[", "'status'", "]", "=", "'completed'", "if", "'end'", "not", "in", "task", ":", "task", "[", "'end'", "]", "=", "self", ".", "_tw_timestamp", "(", "vtodo", ".", "dtstamp", ".", "value", ")", "elif", "vtodo", ".", "status", ".", "value", "==", "'CANCELLED'", ":", "task", "[", "'status'", "]", "=", "'deleted'", "if", "'end'", "not", "in", "task", ":", "task", "[", "'end'", "]", "=", "self", ".", "_tw_timestamp", "(", "vtodo", ".", "dtstamp", ".", "value", ")", "json", "=", "dumps", "(", "task", ",", "separators", "=", "(", "','", ",", "':'", ")", ",", "sort_keys", "=", "True", ")", "with", "self", ".", "_lock", ":", "p", "=", "run", "(", "[", "'task'", ",", "'rc.verbose=nothing'", ",", "'rc.recurrence.confirmation=no'", ",", "'rc.data.location={self._data_location}'", ".", "format", "(", "*", "*", "locals", "(", ")", ")", ",", "'import'", ",", "'-'", "]", ",", "input", "=", "json", ",", "encoding", "=", "'utf-8'", ",", "stdout", "=", "PIPE", ")", "uuid", "=", "findall", "(", "'(?:add|mod) ([0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}) '", ",", "p", ".", "stdout", ")", "[", "0", "]", "self", ".", "_update", "(", ")", "return", "self", ".", "_gen_uid", "(", "uuid", ")" ]
Add or modify a task from vTodo to Taskwarrior vtodo -- the vTodo to add project -- the project to add (see get_filesnames() as well) uuid -- the UID of the task in Taskwarrior
[ "Add", "or", "modify", "a", "task", "from", "vTodo", "to", "Taskwarrior", "vtodo", "--", "the", "vTodo", "to", "add", "project", "--", "the", "project", "to", "add", "(", "see", "get_filesnames", "()", "as", "well", ")", "uuid", "--", "the", "UID", "of", "the", "task", "in", "Taskwarrior" ]
python
train
hhatto/autopep8
autopep8.py
https://github.com/hhatto/autopep8/blob/fda3bb39181437b6b8a0aa0185f21ae5f14385dd/autopep8.py#L1789-L1806
def get_diff_text(old, new, filename): """Return text of unified diff between old and new.""" newline = '\n' diff = difflib.unified_diff( old, new, 'original/' + filename, 'fixed/' + filename, lineterm=newline) text = '' for line in diff: text += line # Work around missing newline (http://bugs.python.org/issue2142). if text and not line.endswith(newline): text += newline + r'\ No newline at end of file' + newline return text
[ "def", "get_diff_text", "(", "old", ",", "new", ",", "filename", ")", ":", "newline", "=", "'\\n'", "diff", "=", "difflib", ".", "unified_diff", "(", "old", ",", "new", ",", "'original/'", "+", "filename", ",", "'fixed/'", "+", "filename", ",", "lineterm", "=", "newline", ")", "text", "=", "''", "for", "line", "in", "diff", ":", "text", "+=", "line", "# Work around missing newline (http://bugs.python.org/issue2142).", "if", "text", "and", "not", "line", ".", "endswith", "(", "newline", ")", ":", "text", "+=", "newline", "+", "r'\\ No newline at end of file'", "+", "newline", "return", "text" ]
Return text of unified diff between old and new.
[ "Return", "text", "of", "unified", "diff", "between", "old", "and", "new", "." ]
python
train
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_zone.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_zone.py#L41-L52
def zoning_defined_configuration_zone_zone_name(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") zoning = ET.SubElement(config, "zoning", xmlns="urn:brocade.com:mgmt:brocade-zone") defined_configuration = ET.SubElement(zoning, "defined-configuration") zone = ET.SubElement(defined_configuration, "zone") zone_name = ET.SubElement(zone, "zone-name") zone_name.text = kwargs.pop('zone_name') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "zoning_defined_configuration_zone_zone_name", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "zoning", "=", "ET", ".", "SubElement", "(", "config", ",", "\"zoning\"", ",", "xmlns", "=", "\"urn:brocade.com:mgmt:brocade-zone\"", ")", "defined_configuration", "=", "ET", ".", "SubElement", "(", "zoning", ",", "\"defined-configuration\"", ")", "zone", "=", "ET", ".", "SubElement", "(", "defined_configuration", ",", "\"zone\"", ")", "zone_name", "=", "ET", ".", "SubElement", "(", "zone", ",", "\"zone-name\"", ")", "zone_name", ".", "text", "=", "kwargs", ".", "pop", "(", "'zone_name'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
aio-libs/aiohttp
aiohttp/cookiejar.py
https://github.com/aio-libs/aiohttp/blob/9504fe2affaaff673fa4f3754c1c44221f8ba47d/aiohttp/cookiejar.py#L113-L186
def update_cookies(self, cookies: LooseCookies, response_url: URL=URL()) -> None: """Update cookies.""" hostname = response_url.raw_host if not self._unsafe and is_ip_address(hostname): # Don't accept cookies from IPs return if isinstance(cookies, Mapping): cookies = cookies.items() # type: ignore for name, cookie in cookies: if not isinstance(cookie, Morsel): tmp = SimpleCookie() tmp[name] = cookie # type: ignore cookie = tmp[name] domain = cookie["domain"] # ignore domains with trailing dots if domain.endswith('.'): domain = "" del cookie["domain"] if not domain and hostname is not None: # Set the cookie's domain to the response hostname # and set its host-only-flag self._host_only_cookies.add((hostname, name)) domain = cookie["domain"] = hostname if domain.startswith("."): # Remove leading dot domain = domain[1:] cookie["domain"] = domain if hostname and not self._is_domain_match(domain, hostname): # Setting cookies for different domains is not allowed continue path = cookie["path"] if not path or not path.startswith("/"): # Set the cookie's path to the response path path = response_url.path if not path.startswith("/"): path = "/" else: # Cut everything from the last slash to the end path = "/" + path[1:path.rfind("/")] cookie["path"] = path max_age = cookie["max-age"] if max_age: try: delta_seconds = int(max_age) self._expire_cookie(self._loop.time() + delta_seconds, domain, name) except ValueError: cookie["max-age"] = "" else: expires = cookie["expires"] if expires: expire_time = self._parse_date(expires) if expire_time: self._expire_cookie(expire_time.timestamp(), domain, name) else: cookie["expires"] = "" self._cookies[domain][name] = cookie self._do_expiration()
[ "def", "update_cookies", "(", "self", ",", "cookies", ":", "LooseCookies", ",", "response_url", ":", "URL", "=", "URL", "(", ")", ")", "->", "None", ":", "hostname", "=", "response_url", ".", "raw_host", "if", "not", "self", ".", "_unsafe", "and", "is_ip_address", "(", "hostname", ")", ":", "# Don't accept cookies from IPs", "return", "if", "isinstance", "(", "cookies", ",", "Mapping", ")", ":", "cookies", "=", "cookies", ".", "items", "(", ")", "# type: ignore", "for", "name", ",", "cookie", "in", "cookies", ":", "if", "not", "isinstance", "(", "cookie", ",", "Morsel", ")", ":", "tmp", "=", "SimpleCookie", "(", ")", "tmp", "[", "name", "]", "=", "cookie", "# type: ignore", "cookie", "=", "tmp", "[", "name", "]", "domain", "=", "cookie", "[", "\"domain\"", "]", "# ignore domains with trailing dots", "if", "domain", ".", "endswith", "(", "'.'", ")", ":", "domain", "=", "\"\"", "del", "cookie", "[", "\"domain\"", "]", "if", "not", "domain", "and", "hostname", "is", "not", "None", ":", "# Set the cookie's domain to the response hostname", "# and set its host-only-flag", "self", ".", "_host_only_cookies", ".", "add", "(", "(", "hostname", ",", "name", ")", ")", "domain", "=", "cookie", "[", "\"domain\"", "]", "=", "hostname", "if", "domain", ".", "startswith", "(", "\".\"", ")", ":", "# Remove leading dot", "domain", "=", "domain", "[", "1", ":", "]", "cookie", "[", "\"domain\"", "]", "=", "domain", "if", "hostname", "and", "not", "self", ".", "_is_domain_match", "(", "domain", ",", "hostname", ")", ":", "# Setting cookies for different domains is not allowed", "continue", "path", "=", "cookie", "[", "\"path\"", "]", "if", "not", "path", "or", "not", "path", ".", "startswith", "(", "\"/\"", ")", ":", "# Set the cookie's path to the response path", "path", "=", "response_url", ".", "path", "if", "not", "path", ".", "startswith", "(", "\"/\"", ")", ":", "path", "=", "\"/\"", "else", ":", "# Cut everything from the last slash to the end", "path", "=", "\"/\"", "+", "path", "[", "1", ":", "path", ".", "rfind", "(", "\"/\"", ")", "]", "cookie", "[", "\"path\"", "]", "=", "path", "max_age", "=", "cookie", "[", "\"max-age\"", "]", "if", "max_age", ":", "try", ":", "delta_seconds", "=", "int", "(", "max_age", ")", "self", ".", "_expire_cookie", "(", "self", ".", "_loop", ".", "time", "(", ")", "+", "delta_seconds", ",", "domain", ",", "name", ")", "except", "ValueError", ":", "cookie", "[", "\"max-age\"", "]", "=", "\"\"", "else", ":", "expires", "=", "cookie", "[", "\"expires\"", "]", "if", "expires", ":", "expire_time", "=", "self", ".", "_parse_date", "(", "expires", ")", "if", "expire_time", ":", "self", ".", "_expire_cookie", "(", "expire_time", ".", "timestamp", "(", ")", ",", "domain", ",", "name", ")", "else", ":", "cookie", "[", "\"expires\"", "]", "=", "\"\"", "self", ".", "_cookies", "[", "domain", "]", "[", "name", "]", "=", "cookie", "self", ".", "_do_expiration", "(", ")" ]
Update cookies.
[ "Update", "cookies", "." ]
python
train
bachya/pyairvisual
pyairvisual/api.py
https://github.com/bachya/pyairvisual/blob/1d4809998d87f85d53bb281e1eb54d43acee06fa/pyairvisual/api.py#L40-L45
async def nearest_city( self, latitude: Union[float, str] = None, longitude: Union[float, str] = None) -> dict: """Return data from nearest city (IP or coordinates).""" return await self._nearest('city', latitude, longitude)
[ "async", "def", "nearest_city", "(", "self", ",", "latitude", ":", "Union", "[", "float", ",", "str", "]", "=", "None", ",", "longitude", ":", "Union", "[", "float", ",", "str", "]", "=", "None", ")", "->", "dict", ":", "return", "await", "self", ".", "_nearest", "(", "'city'", ",", "latitude", ",", "longitude", ")" ]
Return data from nearest city (IP or coordinates).
[ "Return", "data", "from", "nearest", "city", "(", "IP", "or", "coordinates", ")", "." ]
python
train
SBRG/ssbio
ssbio/protein/sequence/utils/blast.py
https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/protein/sequence/utils/blast.py#L135-L177
def print_run_bidirectional_blast(reference, other_genome, dbtype, outdir): """Write torque submission files for running bidirectional blast on a server and print execution command. Args: reference (str): Path to "reference" genome, aka your "base strain" other_genome (str): Path to other genome which will be BLASTed to the reference dbtype (str): "nucl" or "prot" - what format your genome files are in outdir (str): Path to folder where Torque scripts should be placed """ # TODO: add force_rerun option if dbtype == 'nucl': command = 'blastn' elif dbtype == 'prot': command = 'blastp' else: raise ValueError('dbtype must be "nucl" or "prot"') r_folder, r_name, r_ext = utils.split_folder_and_path(reference) g_folder, g_name, g_ext = utils.split_folder_and_path(other_genome) # Reference vs genome r_vs_g_name = r_name + '_vs_' + g_name r_vs_g = r_vs_g_name + '_blast.out' if op.exists(op.join(outdir, r_vs_g)) and os.stat(op.join(outdir, r_vs_g)).st_size != 0: log.debug('{} vs {} BLAST already run'.format(r_name, g_name)) else: cmd = '{} -query {} -db {} -outfmt 6 -out {}'.format(command, reference, g_name, r_vs_g) utils.write_torque_script(command=cmd, err=r_vs_g_name, out=r_vs_g_name, name=r_vs_g_name, outfile=op.join(outdir, r_vs_g_name) + '.sh', walltime='00:15:00', queue='regular') # Genome vs reference g_vs_r_name = g_name + '_vs_' + r_name g_vs_r = g_vs_r_name + '_blast.out' if op.exists(op.join(outdir, g_vs_r)) and os.stat(op.join(outdir, g_vs_r)).st_size != 0: log.debug('{} vs {} BLAST already run'.format(g_name, r_name)) else: cmd = '{} -query {} -db {} -outfmt 6 -out {}'.format(command, other_genome, r_name, g_vs_r) utils.write_torque_script(command=cmd, err=g_vs_r_name, out=g_vs_r_name, name=g_vs_r_name, outfile=op.join(outdir, g_vs_r_name) + '.sh', walltime='00:15:00', queue='regular')
[ "def", "print_run_bidirectional_blast", "(", "reference", ",", "other_genome", ",", "dbtype", ",", "outdir", ")", ":", "# TODO: add force_rerun option", "if", "dbtype", "==", "'nucl'", ":", "command", "=", "'blastn'", "elif", "dbtype", "==", "'prot'", ":", "command", "=", "'blastp'", "else", ":", "raise", "ValueError", "(", "'dbtype must be \"nucl\" or \"prot\"'", ")", "r_folder", ",", "r_name", ",", "r_ext", "=", "utils", ".", "split_folder_and_path", "(", "reference", ")", "g_folder", ",", "g_name", ",", "g_ext", "=", "utils", ".", "split_folder_and_path", "(", "other_genome", ")", "# Reference vs genome", "r_vs_g_name", "=", "r_name", "+", "'_vs_'", "+", "g_name", "r_vs_g", "=", "r_vs_g_name", "+", "'_blast.out'", "if", "op", ".", "exists", "(", "op", ".", "join", "(", "outdir", ",", "r_vs_g", ")", ")", "and", "os", ".", "stat", "(", "op", ".", "join", "(", "outdir", ",", "r_vs_g", ")", ")", ".", "st_size", "!=", "0", ":", "log", ".", "debug", "(", "'{} vs {} BLAST already run'", ".", "format", "(", "r_name", ",", "g_name", ")", ")", "else", ":", "cmd", "=", "'{} -query {} -db {} -outfmt 6 -out {}'", ".", "format", "(", "command", ",", "reference", ",", "g_name", ",", "r_vs_g", ")", "utils", ".", "write_torque_script", "(", "command", "=", "cmd", ",", "err", "=", "r_vs_g_name", ",", "out", "=", "r_vs_g_name", ",", "name", "=", "r_vs_g_name", ",", "outfile", "=", "op", ".", "join", "(", "outdir", ",", "r_vs_g_name", ")", "+", "'.sh'", ",", "walltime", "=", "'00:15:00'", ",", "queue", "=", "'regular'", ")", "# Genome vs reference", "g_vs_r_name", "=", "g_name", "+", "'_vs_'", "+", "r_name", "g_vs_r", "=", "g_vs_r_name", "+", "'_blast.out'", "if", "op", ".", "exists", "(", "op", ".", "join", "(", "outdir", ",", "g_vs_r", ")", ")", "and", "os", ".", "stat", "(", "op", ".", "join", "(", "outdir", ",", "g_vs_r", ")", ")", ".", "st_size", "!=", "0", ":", "log", ".", "debug", "(", "'{} vs {} BLAST already run'", ".", "format", "(", "g_name", ",", "r_name", ")", ")", "else", ":", "cmd", "=", "'{} -query {} -db {} -outfmt 6 -out {}'", ".", "format", "(", "command", ",", "other_genome", ",", "r_name", ",", "g_vs_r", ")", "utils", ".", "write_torque_script", "(", "command", "=", "cmd", ",", "err", "=", "g_vs_r_name", ",", "out", "=", "g_vs_r_name", ",", "name", "=", "g_vs_r_name", ",", "outfile", "=", "op", ".", "join", "(", "outdir", ",", "g_vs_r_name", ")", "+", "'.sh'", ",", "walltime", "=", "'00:15:00'", ",", "queue", "=", "'regular'", ")" ]
Write torque submission files for running bidirectional blast on a server and print execution command. Args: reference (str): Path to "reference" genome, aka your "base strain" other_genome (str): Path to other genome which will be BLASTed to the reference dbtype (str): "nucl" or "prot" - what format your genome files are in outdir (str): Path to folder where Torque scripts should be placed
[ "Write", "torque", "submission", "files", "for", "running", "bidirectional", "blast", "on", "a", "server", "and", "print", "execution", "command", "." ]
python
train
SatelliteQE/nailgun
nailgun/entities.py
https://github.com/SatelliteQE/nailgun/blob/c36d8c20862e87bf6975bd48ac1ca40a9e634eaa/nailgun/entities.py#L7427-L7461
def path(self, which=None): """Extend ``nailgun.entity_mixins.Entity.path``. This method contains a workaround for `Bugzilla #1202917`_. Most entities are uniquely identified by an ID. ``System`` is a bit different: it has both an ID and a UUID, and the UUID is used to uniquely identify a ``System``. Return a path in the format ``katello/api/v2/systems/<uuid>`` if a UUID is available and: * ``which is None``, or * ``which == 'this'``. .. _Bugzilla #1202917: https://bugzilla.redhat.com/show_bug.cgi?id=1202917 Finally, return a path in the form ``katello/api/v2/systems/<uuid>/subscriptions`` if ``'subscriptions'`` is passed in. """ if which == 'subscriptions': return '{0}/{1}/{2}'.format( super(System, self).path('base'), self.uuid, # pylint:disable=no-member which, ) if hasattr(self, 'uuid') and (which is None or which == 'self'): return '{0}/{1}'.format( super(System, self).path('base'), self.uuid # pylint:disable=no-member ) return super(System, self).path(which)
[ "def", "path", "(", "self", ",", "which", "=", "None", ")", ":", "if", "which", "==", "'subscriptions'", ":", "return", "'{0}/{1}/{2}'", ".", "format", "(", "super", "(", "System", ",", "self", ")", ".", "path", "(", "'base'", ")", ",", "self", ".", "uuid", ",", "# pylint:disable=no-member", "which", ",", ")", "if", "hasattr", "(", "self", ",", "'uuid'", ")", "and", "(", "which", "is", "None", "or", "which", "==", "'self'", ")", ":", "return", "'{0}/{1}'", ".", "format", "(", "super", "(", "System", ",", "self", ")", ".", "path", "(", "'base'", ")", ",", "self", ".", "uuid", "# pylint:disable=no-member", ")", "return", "super", "(", "System", ",", "self", ")", ".", "path", "(", "which", ")" ]
Extend ``nailgun.entity_mixins.Entity.path``. This method contains a workaround for `Bugzilla #1202917`_. Most entities are uniquely identified by an ID. ``System`` is a bit different: it has both an ID and a UUID, and the UUID is used to uniquely identify a ``System``. Return a path in the format ``katello/api/v2/systems/<uuid>`` if a UUID is available and: * ``which is None``, or * ``which == 'this'``. .. _Bugzilla #1202917: https://bugzilla.redhat.com/show_bug.cgi?id=1202917 Finally, return a path in the form ``katello/api/v2/systems/<uuid>/subscriptions`` if ``'subscriptions'`` is passed in.
[ "Extend", "nailgun", ".", "entity_mixins", ".", "Entity", ".", "path", "." ]
python
train
econ-ark/HARK
HARK/utilities.py
https://github.com/econ-ark/HARK/blob/3d184153a189e618a87c9540df1cd12044039cc5/HARK/utilities.py#L524-L552
def approxMeanOneLognormal(N, sigma=1.0, **kwargs): ''' Calculate a discrete approximation to a mean one lognormal distribution. Based on function approxLognormal; see that function's documentation for further notes. Parameters ---------- N : int Size of discrete space vector to be returned. sigma : float standard deviation associated with underlying normal probability distribution. Returns ------- X : np.array Discrete points for discrete probability mass function. pmf : np.array Probability associated with each point in X. Written by Nathan M. Palmer Based on Matab function "setup_shocks.m," from Chris Carroll's [Solution Methods for Microeconomic Dynamic Optimization Problems] (http://www.econ2.jhu.edu/people/ccarroll/solvingmicrodsops/) toolkit. Latest update: 01 May 2015 ''' mu_adj = - 0.5*sigma**2; pmf,X = approxLognormal(N=N, mu=mu_adj, sigma=sigma, **kwargs) return [pmf,X]
[ "def", "approxMeanOneLognormal", "(", "N", ",", "sigma", "=", "1.0", ",", "*", "*", "kwargs", ")", ":", "mu_adj", "=", "-", "0.5", "*", "sigma", "**", "2", "pmf", ",", "X", "=", "approxLognormal", "(", "N", "=", "N", ",", "mu", "=", "mu_adj", ",", "sigma", "=", "sigma", ",", "*", "*", "kwargs", ")", "return", "[", "pmf", ",", "X", "]" ]
Calculate a discrete approximation to a mean one lognormal distribution. Based on function approxLognormal; see that function's documentation for further notes. Parameters ---------- N : int Size of discrete space vector to be returned. sigma : float standard deviation associated with underlying normal probability distribution. Returns ------- X : np.array Discrete points for discrete probability mass function. pmf : np.array Probability associated with each point in X. Written by Nathan M. Palmer Based on Matab function "setup_shocks.m," from Chris Carroll's [Solution Methods for Microeconomic Dynamic Optimization Problems] (http://www.econ2.jhu.edu/people/ccarroll/solvingmicrodsops/) toolkit. Latest update: 01 May 2015
[ "Calculate", "a", "discrete", "approximation", "to", "a", "mean", "one", "lognormal", "distribution", ".", "Based", "on", "function", "approxLognormal", ";", "see", "that", "function", "s", "documentation", "for", "further", "notes", "." ]
python
train
pudo/googlesheets
googlesheets/sheet.py
https://github.com/pudo/googlesheets/blob/c38725d79bfe048c0519a674019ba313dfc5bfb0/googlesheets/sheet.py#L61-L70
def headers(self): """ Return the name of all headers currently defined for the table. """ if self._headers is None: query = CellQuery() query.max_row = '1' feed = self._service.GetCellsFeed(self._ss.id, self.id, query=query) self._headers = feed.entry return [normalize_header(h.cell.text) for h in self._headers]
[ "def", "headers", "(", "self", ")", ":", "if", "self", ".", "_headers", "is", "None", ":", "query", "=", "CellQuery", "(", ")", "query", ".", "max_row", "=", "'1'", "feed", "=", "self", ".", "_service", ".", "GetCellsFeed", "(", "self", ".", "_ss", ".", "id", ",", "self", ".", "id", ",", "query", "=", "query", ")", "self", ".", "_headers", "=", "feed", ".", "entry", "return", "[", "normalize_header", "(", "h", ".", "cell", ".", "text", ")", "for", "h", "in", "self", ".", "_headers", "]" ]
Return the name of all headers currently defined for the table.
[ "Return", "the", "name", "of", "all", "headers", "currently", "defined", "for", "the", "table", "." ]
python
train
tanghaibao/goatools
goatools/obo_parser.py
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/obo_parser.py#L461-L474
def query_term(self, term, verbose=False): """Given a GO ID, return GO object.""" if term not in self: sys.stderr.write("Term %s not found!\n" % term) return rec = self[term] if verbose: print(rec) sys.stderr.write("all parents: {}\n".format( repr(rec.get_all_parents()))) sys.stderr.write("all children: {}\n".format( repr(rec.get_all_children()))) return rec
[ "def", "query_term", "(", "self", ",", "term", ",", "verbose", "=", "False", ")", ":", "if", "term", "not", "in", "self", ":", "sys", ".", "stderr", ".", "write", "(", "\"Term %s not found!\\n\"", "%", "term", ")", "return", "rec", "=", "self", "[", "term", "]", "if", "verbose", ":", "print", "(", "rec", ")", "sys", ".", "stderr", ".", "write", "(", "\"all parents: {}\\n\"", ".", "format", "(", "repr", "(", "rec", ".", "get_all_parents", "(", ")", ")", ")", ")", "sys", ".", "stderr", ".", "write", "(", "\"all children: {}\\n\"", ".", "format", "(", "repr", "(", "rec", ".", "get_all_children", "(", ")", ")", ")", ")", "return", "rec" ]
Given a GO ID, return GO object.
[ "Given", "a", "GO", "ID", "return", "GO", "object", "." ]
python
train
LordSputnik/mutagen
mutagen/ogg.py
https://github.com/LordSputnik/mutagen/blob/38e62c8dc35c72b16554f5dbe7c0fde91acc3411/mutagen/ogg.py#L454-L469
def load(self, filename): """Load file information from a filename.""" self.filename = filename fileobj = open(filename, "rb") try: try: self.info = self._Info(fileobj) self.tags = self._Tags(fileobj, self.info) self.info._post_tags(fileobj) except error as e: reraise(self._Error, e, sys.exc_info()[2]) except EOFError: raise self._Error("no appropriate stream found") finally: fileobj.close()
[ "def", "load", "(", "self", ",", "filename", ")", ":", "self", ".", "filename", "=", "filename", "fileobj", "=", "open", "(", "filename", ",", "\"rb\"", ")", "try", ":", "try", ":", "self", ".", "info", "=", "self", ".", "_Info", "(", "fileobj", ")", "self", ".", "tags", "=", "self", ".", "_Tags", "(", "fileobj", ",", "self", ".", "info", ")", "self", ".", "info", ".", "_post_tags", "(", "fileobj", ")", "except", "error", "as", "e", ":", "reraise", "(", "self", ".", "_Error", ",", "e", ",", "sys", ".", "exc_info", "(", ")", "[", "2", "]", ")", "except", "EOFError", ":", "raise", "self", ".", "_Error", "(", "\"no appropriate stream found\"", ")", "finally", ":", "fileobj", ".", "close", "(", ")" ]
Load file information from a filename.
[ "Load", "file", "information", "from", "a", "filename", "." ]
python
test
konstantint/matplotlib-venn
matplotlib_venn/_arc.py
https://github.com/konstantint/matplotlib-venn/blob/c26796c9925bdac512edf48387452fbd1848c791/matplotlib_venn/_arc.py#L134-L151
def segment_area(self): '''Returns the area of the corresponding arc segment. >>> Arc((0,0), 2, 0, 360, True).segment_area() 12.566... >>> Arc((0,0), 2, 0, 180, True).segment_area() 6.283... >>> Arc((0,0), 2, 0, 90, True).segment_area() 1.14159... >>> Arc((0,0), 2, 0, 90, False).segment_area() 11.42477796... >>> Arc((0,0), 2, 0, 0, False).segment_area() 0.0 >>> Arc((0, 9), 1, 89.99, 90, False).segment_area() 3.1415... ''' theta = self.length_radians() return self.radius**2 / 2 * (theta - np.sin(theta))
[ "def", "segment_area", "(", "self", ")", ":", "theta", "=", "self", ".", "length_radians", "(", ")", "return", "self", ".", "radius", "**", "2", "/", "2", "*", "(", "theta", "-", "np", ".", "sin", "(", "theta", ")", ")" ]
Returns the area of the corresponding arc segment. >>> Arc((0,0), 2, 0, 360, True).segment_area() 12.566... >>> Arc((0,0), 2, 0, 180, True).segment_area() 6.283... >>> Arc((0,0), 2, 0, 90, True).segment_area() 1.14159... >>> Arc((0,0), 2, 0, 90, False).segment_area() 11.42477796... >>> Arc((0,0), 2, 0, 0, False).segment_area() 0.0 >>> Arc((0, 9), 1, 89.99, 90, False).segment_area() 3.1415...
[ "Returns", "the", "area", "of", "the", "corresponding", "arc", "segment", ".", ">>>", "Arc", "((", "0", "0", ")", "2", "0", "360", "True", ")", ".", "segment_area", "()", "12", ".", "566", "...", ">>>", "Arc", "((", "0", "0", ")", "2", "0", "180", "True", ")", ".", "segment_area", "()", "6", ".", "283", "...", ">>>", "Arc", "((", "0", "0", ")", "2", "0", "90", "True", ")", ".", "segment_area", "()", "1", ".", "14159", "...", ">>>", "Arc", "((", "0", "0", ")", "2", "0", "90", "False", ")", ".", "segment_area", "()", "11", ".", "42477796", "...", ">>>", "Arc", "((", "0", "0", ")", "2", "0", "0", "False", ")", ".", "segment_area", "()", "0", ".", "0", ">>>", "Arc", "((", "0", "9", ")", "1", "89", ".", "99", "90", "False", ")", ".", "segment_area", "()", "3", ".", "1415", "..." ]
python
train
ellmetha/django-machina
machina/apps/forum_conversation/managers.py
https://github.com/ellmetha/django-machina/blob/89ac083c1eaf1cfdeae6686ee094cc86362e8c69/machina/apps/forum_conversation/managers.py#L13-L17
def get_queryset(self): """ Returns all the approved topics or posts. """ qs = super().get_queryset() qs = qs.filter(approved=True) return qs
[ "def", "get_queryset", "(", "self", ")", ":", "qs", "=", "super", "(", ")", ".", "get_queryset", "(", ")", "qs", "=", "qs", ".", "filter", "(", "approved", "=", "True", ")", "return", "qs" ]
Returns all the approved topics or posts.
[ "Returns", "all", "the", "approved", "topics", "or", "posts", "." ]
python
train
brian-rose/climlab
climlab/radiation/rrtm/utils.py
https://github.com/brian-rose/climlab/blob/eae188a2ae9308229b8cbb8fe0b65f51b50ee1e6/climlab/radiation/rrtm/utils.py#L54-L85
def _climlab_to_rrtm(field): '''Prepare field with proper dimension order. RRTM code expects arrays with (ncol, nlay) and with pressure decreasing from surface at element 0 climlab grid dimensions are any of: - (num_lev,) --> (1, num_lev) - (num_lat, num_lev) --> (num_lat, num_lev) - (num_lat, num_lon, num_lev) --> (num_lat*num_lon, num_lev) But lat-lon grids not yet supported here! Case single column ''' # Make this work just with 1D (KM,) arrays # (KM,) --> (1, nlay) try: # Flip along the last axis to reverse the pressure order field = field[..., ::-1] except: if np.isscalar(field): return field else: raise ValueError('field must be array_like or scalar.') shape = field.shape if len(shape)==1: # (num_lev) # Need to append an extra dimension for singleton horizontal ncol return field[np.newaxis, ...] elif len(shape)==2: # (num_lat, num_lev) return field elif len(shape) > 2: raise ValueError('lat-lon grids not yet supported here.')
[ "def", "_climlab_to_rrtm", "(", "field", ")", ":", "# Make this work just with 1D (KM,) arrays", "# (KM,) --> (1, nlay)", "try", ":", "# Flip along the last axis to reverse the pressure order", "field", "=", "field", "[", "...", ",", ":", ":", "-", "1", "]", "except", ":", "if", "np", ".", "isscalar", "(", "field", ")", ":", "return", "field", "else", ":", "raise", "ValueError", "(", "'field must be array_like or scalar.'", ")", "shape", "=", "field", ".", "shape", "if", "len", "(", "shape", ")", "==", "1", ":", "# (num_lev)", "# Need to append an extra dimension for singleton horizontal ncol", "return", "field", "[", "np", ".", "newaxis", ",", "...", "]", "elif", "len", "(", "shape", ")", "==", "2", ":", "# (num_lat, num_lev)", "return", "field", "elif", "len", "(", "shape", ")", ">", "2", ":", "raise", "ValueError", "(", "'lat-lon grids not yet supported here.'", ")" ]
Prepare field with proper dimension order. RRTM code expects arrays with (ncol, nlay) and with pressure decreasing from surface at element 0 climlab grid dimensions are any of: - (num_lev,) --> (1, num_lev) - (num_lat, num_lev) --> (num_lat, num_lev) - (num_lat, num_lon, num_lev) --> (num_lat*num_lon, num_lev) But lat-lon grids not yet supported here! Case single column
[ "Prepare", "field", "with", "proper", "dimension", "order", ".", "RRTM", "code", "expects", "arrays", "with", "(", "ncol", "nlay", ")", "and", "with", "pressure", "decreasing", "from", "surface", "at", "element", "0" ]
python
train