id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
sequence
docstring
stringlengths
3
17.3k
docstring_tokens
sequence
sha
stringlengths
40
40
url
stringlengths
87
242
1,000
ethereum/pyrlp
rlp/codec.py
decode
def decode(rlp, sedes=None, strict=True, recursive_cache=False, **kwargs): """Decode an RLP encoded object. If the deserialized result `obj` has an attribute :attr:`_cached_rlp` (e.g. if `sedes` is a subclass of :class:`rlp.Serializable`) it will be set to `rlp`, which will improve performance on subsequent :func:`rlp.encode` calls. Bear in mind however that `obj` needs to make sure that this value is updated whenever one of its fields changes or prevent such changes entirely (:class:`rlp.sedes.Serializable` does the latter). :param sedes: an object implementing a function ``deserialize(code)`` which will be applied after decoding, or ``None`` if no deserialization should be performed :param \*\*kwargs: additional keyword arguments that will be passed to the deserializer :param strict: if false inputs that are longer than necessary don't cause an exception :returns: the decoded and maybe deserialized Python object :raises: :exc:`rlp.DecodingError` if the input string does not end after the root item and `strict` is true :raises: :exc:`rlp.DeserializationError` if the deserialization fails """ if not is_bytes(rlp): raise DecodingError('Can only decode RLP bytes, got type %s' % type(rlp).__name__, rlp) try: item, per_item_rlp, end = consume_item(rlp, 0) except IndexError: raise DecodingError('RLP string too short', rlp) if end != len(rlp) and strict: msg = 'RLP string ends with {} superfluous bytes'.format(len(rlp) - end) raise DecodingError(msg, rlp) if sedes: obj = sedes.deserialize(item, **kwargs) if is_sequence(obj) or hasattr(obj, '_cached_rlp'): _apply_rlp_cache(obj, per_item_rlp, recursive_cache) return obj else: return item
python
def decode(rlp, sedes=None, strict=True, recursive_cache=False, **kwargs): """Decode an RLP encoded object. If the deserialized result `obj` has an attribute :attr:`_cached_rlp` (e.g. if `sedes` is a subclass of :class:`rlp.Serializable`) it will be set to `rlp`, which will improve performance on subsequent :func:`rlp.encode` calls. Bear in mind however that `obj` needs to make sure that this value is updated whenever one of its fields changes or prevent such changes entirely (:class:`rlp.sedes.Serializable` does the latter). :param sedes: an object implementing a function ``deserialize(code)`` which will be applied after decoding, or ``None`` if no deserialization should be performed :param \*\*kwargs: additional keyword arguments that will be passed to the deserializer :param strict: if false inputs that are longer than necessary don't cause an exception :returns: the decoded and maybe deserialized Python object :raises: :exc:`rlp.DecodingError` if the input string does not end after the root item and `strict` is true :raises: :exc:`rlp.DeserializationError` if the deserialization fails """ if not is_bytes(rlp): raise DecodingError('Can only decode RLP bytes, got type %s' % type(rlp).__name__, rlp) try: item, per_item_rlp, end = consume_item(rlp, 0) except IndexError: raise DecodingError('RLP string too short', rlp) if end != len(rlp) and strict: msg = 'RLP string ends with {} superfluous bytes'.format(len(rlp) - end) raise DecodingError(msg, rlp) if sedes: obj = sedes.deserialize(item, **kwargs) if is_sequence(obj) or hasattr(obj, '_cached_rlp'): _apply_rlp_cache(obj, per_item_rlp, recursive_cache) return obj else: return item
[ "def", "decode", "(", "rlp", ",", "sedes", "=", "None", ",", "strict", "=", "True", ",", "recursive_cache", "=", "False", ",", "*", "*", "kwargs", ")", ":", "if", "not", "is_bytes", "(", "rlp", ")", ":", "raise", "DecodingError", "(", "'Can only decode RLP bytes, got type %s'", "%", "type", "(", "rlp", ")", ".", "__name__", ",", "rlp", ")", "try", ":", "item", ",", "per_item_rlp", ",", "end", "=", "consume_item", "(", "rlp", ",", "0", ")", "except", "IndexError", ":", "raise", "DecodingError", "(", "'RLP string too short'", ",", "rlp", ")", "if", "end", "!=", "len", "(", "rlp", ")", "and", "strict", ":", "msg", "=", "'RLP string ends with {} superfluous bytes'", ".", "format", "(", "len", "(", "rlp", ")", "-", "end", ")", "raise", "DecodingError", "(", "msg", ",", "rlp", ")", "if", "sedes", ":", "obj", "=", "sedes", ".", "deserialize", "(", "item", ",", "*", "*", "kwargs", ")", "if", "is_sequence", "(", "obj", ")", "or", "hasattr", "(", "obj", ",", "'_cached_rlp'", ")", ":", "_apply_rlp_cache", "(", "obj", ",", "per_item_rlp", ",", "recursive_cache", ")", "return", "obj", "else", ":", "return", "item" ]
Decode an RLP encoded object. If the deserialized result `obj` has an attribute :attr:`_cached_rlp` (e.g. if `sedes` is a subclass of :class:`rlp.Serializable`) it will be set to `rlp`, which will improve performance on subsequent :func:`rlp.encode` calls. Bear in mind however that `obj` needs to make sure that this value is updated whenever one of its fields changes or prevent such changes entirely (:class:`rlp.sedes.Serializable` does the latter). :param sedes: an object implementing a function ``deserialize(code)`` which will be applied after decoding, or ``None`` if no deserialization should be performed :param \*\*kwargs: additional keyword arguments that will be passed to the deserializer :param strict: if false inputs that are longer than necessary don't cause an exception :returns: the decoded and maybe deserialized Python object :raises: :exc:`rlp.DecodingError` if the input string does not end after the root item and `strict` is true :raises: :exc:`rlp.DeserializationError` if the deserialization fails
[ "Decode", "an", "RLP", "encoded", "object", "." ]
bb898f8056da3973204c699621350bf9565e43df
https://github.com/ethereum/pyrlp/blob/bb898f8056da3973204c699621350bf9565e43df/rlp/codec.py#L209-L242
1,001
ethereum/pyrlp
rlp/codec.py
infer_sedes
def infer_sedes(obj): """Try to find a sedes objects suitable for a given Python object. The sedes objects considered are `obj`'s class, `big_endian_int` and `binary`. If `obj` is a sequence, a :class:`rlp.sedes.List` will be constructed recursively. :param obj: the python object for which to find a sedes object :raises: :exc:`TypeError` if no appropriate sedes could be found """ if is_sedes(obj.__class__): return obj.__class__ elif not isinstance(obj, bool) and isinstance(obj, int) and obj >= 0: return big_endian_int elif BinaryClass.is_valid_type(obj): return binary elif not isinstance(obj, str) and isinstance(obj, collections.Sequence): return List(map(infer_sedes, obj)) elif isinstance(obj, bool): return boolean elif isinstance(obj, str): return text msg = 'Did not find sedes handling type {}'.format(type(obj).__name__) raise TypeError(msg)
python
def infer_sedes(obj): """Try to find a sedes objects suitable for a given Python object. The sedes objects considered are `obj`'s class, `big_endian_int` and `binary`. If `obj` is a sequence, a :class:`rlp.sedes.List` will be constructed recursively. :param obj: the python object for which to find a sedes object :raises: :exc:`TypeError` if no appropriate sedes could be found """ if is_sedes(obj.__class__): return obj.__class__ elif not isinstance(obj, bool) and isinstance(obj, int) and obj >= 0: return big_endian_int elif BinaryClass.is_valid_type(obj): return binary elif not isinstance(obj, str) and isinstance(obj, collections.Sequence): return List(map(infer_sedes, obj)) elif isinstance(obj, bool): return boolean elif isinstance(obj, str): return text msg = 'Did not find sedes handling type {}'.format(type(obj).__name__) raise TypeError(msg)
[ "def", "infer_sedes", "(", "obj", ")", ":", "if", "is_sedes", "(", "obj", ".", "__class__", ")", ":", "return", "obj", ".", "__class__", "elif", "not", "isinstance", "(", "obj", ",", "bool", ")", "and", "isinstance", "(", "obj", ",", "int", ")", "and", "obj", ">=", "0", ":", "return", "big_endian_int", "elif", "BinaryClass", ".", "is_valid_type", "(", "obj", ")", ":", "return", "binary", "elif", "not", "isinstance", "(", "obj", ",", "str", ")", "and", "isinstance", "(", "obj", ",", "collections", ".", "Sequence", ")", ":", "return", "List", "(", "map", "(", "infer_sedes", ",", "obj", ")", ")", "elif", "isinstance", "(", "obj", ",", "bool", ")", ":", "return", "boolean", "elif", "isinstance", "(", "obj", ",", "str", ")", ":", "return", "text", "msg", "=", "'Did not find sedes handling type {}'", ".", "format", "(", "type", "(", "obj", ")", ".", "__name__", ")", "raise", "TypeError", "(", "msg", ")" ]
Try to find a sedes objects suitable for a given Python object. The sedes objects considered are `obj`'s class, `big_endian_int` and `binary`. If `obj` is a sequence, a :class:`rlp.sedes.List` will be constructed recursively. :param obj: the python object for which to find a sedes object :raises: :exc:`TypeError` if no appropriate sedes could be found
[ "Try", "to", "find", "a", "sedes", "objects", "suitable", "for", "a", "given", "Python", "object", "." ]
bb898f8056da3973204c699621350bf9565e43df
https://github.com/ethereum/pyrlp/blob/bb898f8056da3973204c699621350bf9565e43df/rlp/codec.py#L261-L284
1,002
graphite-project/carbonate
carbonate/config.py
Config.destinations
def destinations(self, cluster='main'): """Return a list of destinations for a cluster.""" if not self.config.has_section(cluster): raise SystemExit("Cluster '%s' not defined in %s" % (cluster, self.config_file)) destinations = self.config.get(cluster, 'destinations') return destinations.replace(' ', '').split(',')
python
def destinations(self, cluster='main'): """Return a list of destinations for a cluster.""" if not self.config.has_section(cluster): raise SystemExit("Cluster '%s' not defined in %s" % (cluster, self.config_file)) destinations = self.config.get(cluster, 'destinations') return destinations.replace(' ', '').split(',')
[ "def", "destinations", "(", "self", ",", "cluster", "=", "'main'", ")", ":", "if", "not", "self", ".", "config", ".", "has_section", "(", "cluster", ")", ":", "raise", "SystemExit", "(", "\"Cluster '%s' not defined in %s\"", "%", "(", "cluster", ",", "self", ".", "config_file", ")", ")", "destinations", "=", "self", ".", "config", ".", "get", "(", "cluster", ",", "'destinations'", ")", "return", "destinations", ".", "replace", "(", "' '", ",", "''", ")", ".", "split", "(", "','", ")" ]
Return a list of destinations for a cluster.
[ "Return", "a", "list", "of", "destinations", "for", "a", "cluster", "." ]
b876a85b321fbd7c18a6721bed2e7807b79b4929
https://github.com/graphite-project/carbonate/blob/b876a85b321fbd7c18a6721bed2e7807b79b4929/carbonate/config.py#L21-L27
1,003
graphite-project/carbonate
carbonate/config.py
Config.replication_factor
def replication_factor(self, cluster='main'): """Return the replication factor for a cluster as an integer.""" if not self.config.has_section(cluster): raise SystemExit("Cluster '%s' not defined in %s" % (cluster, self.config_file)) return int(self.config.get(cluster, 'replication_factor'))
python
def replication_factor(self, cluster='main'): """Return the replication factor for a cluster as an integer.""" if not self.config.has_section(cluster): raise SystemExit("Cluster '%s' not defined in %s" % (cluster, self.config_file)) return int(self.config.get(cluster, 'replication_factor'))
[ "def", "replication_factor", "(", "self", ",", "cluster", "=", "'main'", ")", ":", "if", "not", "self", ".", "config", ".", "has_section", "(", "cluster", ")", ":", "raise", "SystemExit", "(", "\"Cluster '%s' not defined in %s\"", "%", "(", "cluster", ",", "self", ".", "config_file", ")", ")", "return", "int", "(", "self", ".", "config", ".", "get", "(", "cluster", ",", "'replication_factor'", ")", ")" ]
Return the replication factor for a cluster as an integer.
[ "Return", "the", "replication", "factor", "for", "a", "cluster", "as", "an", "integer", "." ]
b876a85b321fbd7c18a6721bed2e7807b79b4929
https://github.com/graphite-project/carbonate/blob/b876a85b321fbd7c18a6721bed2e7807b79b4929/carbonate/config.py#L29-L34
1,004
graphite-project/carbonate
carbonate/config.py
Config.ssh_user
def ssh_user(self, cluster='main'): """Return the ssh user for a cluster or current user if undefined.""" if not self.config.has_section(cluster): raise SystemExit("Cluster '%s' not defined in %s" % (cluster, self.config_file)) try: return self.config.get(cluster, 'ssh_user') except NoOptionError: return pwd.getpwuid(os.getuid()).pw_name
python
def ssh_user(self, cluster='main'): """Return the ssh user for a cluster or current user if undefined.""" if not self.config.has_section(cluster): raise SystemExit("Cluster '%s' not defined in %s" % (cluster, self.config_file)) try: return self.config.get(cluster, 'ssh_user') except NoOptionError: return pwd.getpwuid(os.getuid()).pw_name
[ "def", "ssh_user", "(", "self", ",", "cluster", "=", "'main'", ")", ":", "if", "not", "self", ".", "config", ".", "has_section", "(", "cluster", ")", ":", "raise", "SystemExit", "(", "\"Cluster '%s' not defined in %s\"", "%", "(", "cluster", ",", "self", ".", "config_file", ")", ")", "try", ":", "return", "self", ".", "config", ".", "get", "(", "cluster", ",", "'ssh_user'", ")", "except", "NoOptionError", ":", "return", "pwd", ".", "getpwuid", "(", "os", ".", "getuid", "(", ")", ")", ".", "pw_name" ]
Return the ssh user for a cluster or current user if undefined.
[ "Return", "the", "ssh", "user", "for", "a", "cluster", "or", "current", "user", "if", "undefined", "." ]
b876a85b321fbd7c18a6721bed2e7807b79b4929
https://github.com/graphite-project/carbonate/blob/b876a85b321fbd7c18a6721bed2e7807b79b4929/carbonate/config.py#L36-L44
1,005
graphite-project/carbonate
carbonate/config.py
Config.whisper_lock_writes
def whisper_lock_writes(self, cluster='main'): """Lock whisper files during carbon-sync.""" if not self.config.has_section(cluster): raise SystemExit("Cluster '%s' not defined in %s" % (cluster, self.config_file)) try: return bool(self.config.get(cluster, 'whisper_lock_writes')) except NoOptionError: return False
python
def whisper_lock_writes(self, cluster='main'): """Lock whisper files during carbon-sync.""" if not self.config.has_section(cluster): raise SystemExit("Cluster '%s' not defined in %s" % (cluster, self.config_file)) try: return bool(self.config.get(cluster, 'whisper_lock_writes')) except NoOptionError: return False
[ "def", "whisper_lock_writes", "(", "self", ",", "cluster", "=", "'main'", ")", ":", "if", "not", "self", ".", "config", ".", "has_section", "(", "cluster", ")", ":", "raise", "SystemExit", "(", "\"Cluster '%s' not defined in %s\"", "%", "(", "cluster", ",", "self", ".", "config_file", ")", ")", "try", ":", "return", "bool", "(", "self", ".", "config", ".", "get", "(", "cluster", ",", "'whisper_lock_writes'", ")", ")", "except", "NoOptionError", ":", "return", "False" ]
Lock whisper files during carbon-sync.
[ "Lock", "whisper", "files", "during", "carbon", "-", "sync", "." ]
b876a85b321fbd7c18a6721bed2e7807b79b4929
https://github.com/graphite-project/carbonate/blob/b876a85b321fbd7c18a6721bed2e7807b79b4929/carbonate/config.py#L46-L54
1,006
graphite-project/carbonate
carbonate/config.py
Config.hashing_type
def hashing_type(self, cluster='main'): """Hashing type of cluster.""" if not self.config.has_section(cluster): raise SystemExit("Cluster '%s' not defined in %s" % (cluster, self.config_file)) hashing_type = 'carbon_ch' try: return self.config.get(cluster, 'hashing_type') except NoOptionError: return hashing_type
python
def hashing_type(self, cluster='main'): """Hashing type of cluster.""" if not self.config.has_section(cluster): raise SystemExit("Cluster '%s' not defined in %s" % (cluster, self.config_file)) hashing_type = 'carbon_ch' try: return self.config.get(cluster, 'hashing_type') except NoOptionError: return hashing_type
[ "def", "hashing_type", "(", "self", ",", "cluster", "=", "'main'", ")", ":", "if", "not", "self", ".", "config", ".", "has_section", "(", "cluster", ")", ":", "raise", "SystemExit", "(", "\"Cluster '%s' not defined in %s\"", "%", "(", "cluster", ",", "self", ".", "config_file", ")", ")", "hashing_type", "=", "'carbon_ch'", "try", ":", "return", "self", ".", "config", ".", "get", "(", "cluster", ",", "'hashing_type'", ")", "except", "NoOptionError", ":", "return", "hashing_type" ]
Hashing type of cluster.
[ "Hashing", "type", "of", "cluster", "." ]
b876a85b321fbd7c18a6721bed2e7807b79b4929
https://github.com/graphite-project/carbonate/blob/b876a85b321fbd7c18a6721bed2e7807b79b4929/carbonate/config.py#L56-L65
1,007
graphite-project/carbonate
carbonate/fill.py
fill_archives
def fill_archives(src, dst, startFrom, endAt=0, overwrite=False, lock_writes=False): """ Fills gaps in dst using data from src. src is the path as a string dst is the path as a string startFrom is the latest timestamp (archives are read backward) endAt is the earliest timestamp (archives are read backward). if absent, we take the earliest timestamp in the archive overwrite will write all non null points from src dst. lock using whisper lock if true """ if lock_writes is False: whisper.LOCK = False elif whisper.CAN_LOCK and lock_writes is True: whisper.LOCK = True header = whisper.info(dst) archives = header['archives'] archives = sorted(archives, key=lambda t: t['retention']) for archive in archives: fromTime = max(endAt, time.time() - archive['retention']) if fromTime >= startFrom: continue (timeInfo, values) = whisper.fetch(dst, fromTime, untilTime=startFrom) (start, end, step) = timeInfo gapstart = None for value in values: has_value = bool(value and not overwrite) if not has_value and not gapstart: gapstart = start elif has_value and gapstart: if (start - gapstart) >= archive['secondsPerPoint']: fill(src, dst, gapstart - step, start) gapstart = None start += step # fill if this gap continues to the end if gapstart: fill(src, dst, gapstart - step, end - step) # The next archive only needs to be filled up to the latest point # in time we updated. startFrom = fromTime
python
def fill_archives(src, dst, startFrom, endAt=0, overwrite=False, lock_writes=False): """ Fills gaps in dst using data from src. src is the path as a string dst is the path as a string startFrom is the latest timestamp (archives are read backward) endAt is the earliest timestamp (archives are read backward). if absent, we take the earliest timestamp in the archive overwrite will write all non null points from src dst. lock using whisper lock if true """ if lock_writes is False: whisper.LOCK = False elif whisper.CAN_LOCK and lock_writes is True: whisper.LOCK = True header = whisper.info(dst) archives = header['archives'] archives = sorted(archives, key=lambda t: t['retention']) for archive in archives: fromTime = max(endAt, time.time() - archive['retention']) if fromTime >= startFrom: continue (timeInfo, values) = whisper.fetch(dst, fromTime, untilTime=startFrom) (start, end, step) = timeInfo gapstart = None for value in values: has_value = bool(value and not overwrite) if not has_value and not gapstart: gapstart = start elif has_value and gapstart: if (start - gapstart) >= archive['secondsPerPoint']: fill(src, dst, gapstart - step, start) gapstart = None start += step # fill if this gap continues to the end if gapstart: fill(src, dst, gapstart - step, end - step) # The next archive only needs to be filled up to the latest point # in time we updated. startFrom = fromTime
[ "def", "fill_archives", "(", "src", ",", "dst", ",", "startFrom", ",", "endAt", "=", "0", ",", "overwrite", "=", "False", ",", "lock_writes", "=", "False", ")", ":", "if", "lock_writes", "is", "False", ":", "whisper", ".", "LOCK", "=", "False", "elif", "whisper", ".", "CAN_LOCK", "and", "lock_writes", "is", "True", ":", "whisper", ".", "LOCK", "=", "True", "header", "=", "whisper", ".", "info", "(", "dst", ")", "archives", "=", "header", "[", "'archives'", "]", "archives", "=", "sorted", "(", "archives", ",", "key", "=", "lambda", "t", ":", "t", "[", "'retention'", "]", ")", "for", "archive", "in", "archives", ":", "fromTime", "=", "max", "(", "endAt", ",", "time", ".", "time", "(", ")", "-", "archive", "[", "'retention'", "]", ")", "if", "fromTime", ">=", "startFrom", ":", "continue", "(", "timeInfo", ",", "values", ")", "=", "whisper", ".", "fetch", "(", "dst", ",", "fromTime", ",", "untilTime", "=", "startFrom", ")", "(", "start", ",", "end", ",", "step", ")", "=", "timeInfo", "gapstart", "=", "None", "for", "value", "in", "values", ":", "has_value", "=", "bool", "(", "value", "and", "not", "overwrite", ")", "if", "not", "has_value", "and", "not", "gapstart", ":", "gapstart", "=", "start", "elif", "has_value", "and", "gapstart", ":", "if", "(", "start", "-", "gapstart", ")", ">=", "archive", "[", "'secondsPerPoint'", "]", ":", "fill", "(", "src", ",", "dst", ",", "gapstart", "-", "step", ",", "start", ")", "gapstart", "=", "None", "start", "+=", "step", "# fill if this gap continues to the end", "if", "gapstart", ":", "fill", "(", "src", ",", "dst", ",", "gapstart", "-", "step", ",", "end", "-", "step", ")", "# The next archive only needs to be filled up to the latest point", "# in time we updated.", "startFrom", "=", "fromTime" ]
Fills gaps in dst using data from src. src is the path as a string dst is the path as a string startFrom is the latest timestamp (archives are read backward) endAt is the earliest timestamp (archives are read backward). if absent, we take the earliest timestamp in the archive overwrite will write all non null points from src dst. lock using whisper lock if true
[ "Fills", "gaps", "in", "dst", "using", "data", "from", "src", "." ]
b876a85b321fbd7c18a6721bed2e7807b79b4929
https://github.com/graphite-project/carbonate/blob/b876a85b321fbd7c18a6721bed2e7807b79b4929/carbonate/fill.py#L88-L133
1,008
graphite-project/carbonate
carbonate/stale.py
data
def data(path, hours, offset=0): """ Does the metric at ``path`` have any whisper data newer than ``hours``? If ``offset`` is not None, view the ``hours`` prior to ``offset`` hours ago, instead of from right now. """ now = time.time() end = now - _to_sec(offset) # Will default to now start = end - _to_sec(hours) _data = whisper.fetch(path, start, end) return all(x is None for x in _data[-1])
python
def data(path, hours, offset=0): """ Does the metric at ``path`` have any whisper data newer than ``hours``? If ``offset`` is not None, view the ``hours`` prior to ``offset`` hours ago, instead of from right now. """ now = time.time() end = now - _to_sec(offset) # Will default to now start = end - _to_sec(hours) _data = whisper.fetch(path, start, end) return all(x is None for x in _data[-1])
[ "def", "data", "(", "path", ",", "hours", ",", "offset", "=", "0", ")", ":", "now", "=", "time", ".", "time", "(", ")", "end", "=", "now", "-", "_to_sec", "(", "offset", ")", "# Will default to now", "start", "=", "end", "-", "_to_sec", "(", "hours", ")", "_data", "=", "whisper", ".", "fetch", "(", "path", ",", "start", ",", "end", ")", "return", "all", "(", "x", "is", "None", "for", "x", "in", "_data", "[", "-", "1", "]", ")" ]
Does the metric at ``path`` have any whisper data newer than ``hours``? If ``offset`` is not None, view the ``hours`` prior to ``offset`` hours ago, instead of from right now.
[ "Does", "the", "metric", "at", "path", "have", "any", "whisper", "data", "newer", "than", "hours", "?" ]
b876a85b321fbd7c18a6721bed2e7807b79b4929
https://github.com/graphite-project/carbonate/blob/b876a85b321fbd7c18a6721bed2e7807b79b4929/carbonate/stale.py#L11-L22
1,009
graphite-project/carbonate
carbonate/stale.py
stat
def stat(path, hours, offset=None): """ Has the metric file at ``path`` been modified since ``hours`` ago? .. note:: ``offset`` is only for compatibility with ``data()`` and is ignored. """ return os.stat(path).st_mtime < (time.time() - _to_sec(hours))
python
def stat(path, hours, offset=None): """ Has the metric file at ``path`` been modified since ``hours`` ago? .. note:: ``offset`` is only for compatibility with ``data()`` and is ignored. """ return os.stat(path).st_mtime < (time.time() - _to_sec(hours))
[ "def", "stat", "(", "path", ",", "hours", ",", "offset", "=", "None", ")", ":", "return", "os", ".", "stat", "(", "path", ")", ".", "st_mtime", "<", "(", "time", ".", "time", "(", ")", "-", "_to_sec", "(", "hours", ")", ")" ]
Has the metric file at ``path`` been modified since ``hours`` ago? .. note:: ``offset`` is only for compatibility with ``data()`` and is ignored.
[ "Has", "the", "metric", "file", "at", "path", "been", "modified", "since", "hours", "ago?" ]
b876a85b321fbd7c18a6721bed2e7807b79b4929
https://github.com/graphite-project/carbonate/blob/b876a85b321fbd7c18a6721bed2e7807b79b4929/carbonate/stale.py#L25-L32
1,010
holgern/pyedflib
util/refguide_check.py
short_path
def short_path(path, cwd=None): """ Return relative or absolute path name, whichever is shortest. """ if not isinstance(path, str): return path if cwd is None: cwd = os.getcwd() abspath = os.path.abspath(path) relpath = os.path.relpath(path, cwd) if len(abspath) <= len(relpath): return abspath return relpath
python
def short_path(path, cwd=None): """ Return relative or absolute path name, whichever is shortest. """ if not isinstance(path, str): return path if cwd is None: cwd = os.getcwd() abspath = os.path.abspath(path) relpath = os.path.relpath(path, cwd) if len(abspath) <= len(relpath): return abspath return relpath
[ "def", "short_path", "(", "path", ",", "cwd", "=", "None", ")", ":", "if", "not", "isinstance", "(", "path", ",", "str", ")", ":", "return", "path", "if", "cwd", "is", "None", ":", "cwd", "=", "os", ".", "getcwd", "(", ")", "abspath", "=", "os", ".", "path", ".", "abspath", "(", "path", ")", "relpath", "=", "os", ".", "path", ".", "relpath", "(", "path", ",", "cwd", ")", "if", "len", "(", "abspath", ")", "<=", "len", "(", "relpath", ")", ":", "return", "abspath", "return", "relpath" ]
Return relative or absolute path name, whichever is shortest.
[ "Return", "relative", "or", "absolute", "path", "name", "whichever", "is", "shortest", "." ]
0f787fc1202b84a6f30d098296acf72666eaeeb4
https://github.com/holgern/pyedflib/blob/0f787fc1202b84a6f30d098296acf72666eaeeb4/util/refguide_check.py#L74-L86
1,011
holgern/pyedflib
util/refguide_check.py
check_rest
def check_rest(module, names, dots=True): """ Check reStructuredText formatting of docstrings Returns: [(name, success_flag, output), ...] """ try: skip_types = (dict, str, unicode, float, int) except NameError: # python 3 skip_types = (dict, str, float, int) results = [] if module.__name__[6:] not in OTHER_MODULE_DOCS: results += [(module.__name__,) + validate_rst_syntax(inspect.getdoc(module), module.__name__, dots=dots)] for name in names: full_name = module.__name__ + '.' + name obj = getattr(module, name, None) if obj is None: results.append((full_name, False, "%s has no docstring" % (full_name,))) continue elif isinstance(obj, skip_types): continue if inspect.ismodule(obj): text = inspect.getdoc(obj) else: try: text = str(get_doc_object(obj)) except: import traceback results.append((full_name, False, "Error in docstring format!\n" + traceback.format_exc())) continue m = re.search("([\x00-\x09\x0b-\x1f])", text) if m: msg = ("Docstring contains a non-printable character %r! " "Maybe forgot r\"\"\"?" % (m.group(1),)) results.append((full_name, False, msg)) continue try: src_file = short_path(inspect.getsourcefile(obj)) except TypeError: src_file = None if src_file: file_full_name = src_file + ':' + full_name else: file_full_name = full_name results.append((full_name,) + validate_rst_syntax(text, file_full_name, dots=dots)) return results
python
def check_rest(module, names, dots=True): """ Check reStructuredText formatting of docstrings Returns: [(name, success_flag, output), ...] """ try: skip_types = (dict, str, unicode, float, int) except NameError: # python 3 skip_types = (dict, str, float, int) results = [] if module.__name__[6:] not in OTHER_MODULE_DOCS: results += [(module.__name__,) + validate_rst_syntax(inspect.getdoc(module), module.__name__, dots=dots)] for name in names: full_name = module.__name__ + '.' + name obj = getattr(module, name, None) if obj is None: results.append((full_name, False, "%s has no docstring" % (full_name,))) continue elif isinstance(obj, skip_types): continue if inspect.ismodule(obj): text = inspect.getdoc(obj) else: try: text = str(get_doc_object(obj)) except: import traceback results.append((full_name, False, "Error in docstring format!\n" + traceback.format_exc())) continue m = re.search("([\x00-\x09\x0b-\x1f])", text) if m: msg = ("Docstring contains a non-printable character %r! " "Maybe forgot r\"\"\"?" % (m.group(1),)) results.append((full_name, False, msg)) continue try: src_file = short_path(inspect.getsourcefile(obj)) except TypeError: src_file = None if src_file: file_full_name = src_file + ':' + full_name else: file_full_name = full_name results.append((full_name,) + validate_rst_syntax(text, file_full_name, dots=dots)) return results
[ "def", "check_rest", "(", "module", ",", "names", ",", "dots", "=", "True", ")", ":", "try", ":", "skip_types", "=", "(", "dict", ",", "str", ",", "unicode", ",", "float", ",", "int", ")", "except", "NameError", ":", "# python 3", "skip_types", "=", "(", "dict", ",", "str", ",", "float", ",", "int", ")", "results", "=", "[", "]", "if", "module", ".", "__name__", "[", "6", ":", "]", "not", "in", "OTHER_MODULE_DOCS", ":", "results", "+=", "[", "(", "module", ".", "__name__", ",", ")", "+", "validate_rst_syntax", "(", "inspect", ".", "getdoc", "(", "module", ")", ",", "module", ".", "__name__", ",", "dots", "=", "dots", ")", "]", "for", "name", "in", "names", ":", "full_name", "=", "module", ".", "__name__", "+", "'.'", "+", "name", "obj", "=", "getattr", "(", "module", ",", "name", ",", "None", ")", "if", "obj", "is", "None", ":", "results", ".", "append", "(", "(", "full_name", ",", "False", ",", "\"%s has no docstring\"", "%", "(", "full_name", ",", ")", ")", ")", "continue", "elif", "isinstance", "(", "obj", ",", "skip_types", ")", ":", "continue", "if", "inspect", ".", "ismodule", "(", "obj", ")", ":", "text", "=", "inspect", ".", "getdoc", "(", "obj", ")", "else", ":", "try", ":", "text", "=", "str", "(", "get_doc_object", "(", "obj", ")", ")", "except", ":", "import", "traceback", "results", ".", "append", "(", "(", "full_name", ",", "False", ",", "\"Error in docstring format!\\n\"", "+", "traceback", ".", "format_exc", "(", ")", ")", ")", "continue", "m", "=", "re", ".", "search", "(", "\"([\\x00-\\x09\\x0b-\\x1f])\"", ",", "text", ")", "if", "m", ":", "msg", "=", "(", "\"Docstring contains a non-printable character %r! \"", "\"Maybe forgot r\\\"\\\"\\\"?\"", "%", "(", "m", ".", "group", "(", "1", ")", ",", ")", ")", "results", ".", "append", "(", "(", "full_name", ",", "False", ",", "msg", ")", ")", "continue", "try", ":", "src_file", "=", "short_path", "(", "inspect", ".", "getsourcefile", "(", "obj", ")", ")", "except", "TypeError", ":", "src_file", "=", "None", "if", "src_file", ":", "file_full_name", "=", "src_file", "+", "':'", "+", "full_name", "else", ":", "file_full_name", "=", "full_name", "results", ".", "append", "(", "(", "full_name", ",", ")", "+", "validate_rst_syntax", "(", "text", ",", "file_full_name", ",", "dots", "=", "dots", ")", ")", "return", "results" ]
Check reStructuredText formatting of docstrings Returns: [(name, success_flag, output), ...]
[ "Check", "reStructuredText", "formatting", "of", "docstrings" ]
0f787fc1202b84a6f30d098296acf72666eaeeb4
https://github.com/holgern/pyedflib/blob/0f787fc1202b84a6f30d098296acf72666eaeeb4/util/refguide_check.py#L310-L372
1,012
holgern/pyedflib
pyedflib/edfwriter.py
EdfWriter.update_header
def update_header(self): """ Updates header to edffile struct """ set_technician(self.handle, du(self.technician)) set_recording_additional(self.handle, du(self.recording_additional)) set_patientname(self.handle, du(self.patient_name)) set_patientcode(self.handle, du(self.patient_code)) set_patient_additional(self.handle, du(self.patient_additional)) set_equipment(self.handle, du(self.equipment)) set_admincode(self.handle, du(self.admincode)) if isinstance(self.gender, int): set_gender(self.handle, self.gender) elif self.gender == "Male": set_gender(self.handle, 0) elif self.gender == "Female": set_gender(self.handle, 1) set_datarecord_duration(self.handle, self.duration) set_number_of_annotation_signals(self.handle, self.number_of_annotations) set_startdatetime(self.handle, self.recording_start_time.year, self.recording_start_time.month, self.recording_start_time.day, self.recording_start_time.hour, self.recording_start_time.minute, self.recording_start_time.second) if isstr(self.birthdate): if self.birthdate != '': birthday = datetime.strptime(self.birthdate, '%d %b %Y').date() set_birthdate(self.handle, birthday.year, birthday.month, birthday.day) else: set_birthdate(self.handle, self.birthdate.year, self.birthdate.month, self.birthdate.day) for i in np.arange(self.n_channels): set_samplefrequency(self.handle, i, self.channels[i]['sample_rate']) set_physical_maximum(self.handle, i, self.channels[i]['physical_max']) set_physical_minimum(self.handle, i, self.channels[i]['physical_min']) set_digital_maximum(self.handle, i, self.channels[i]['digital_max']) set_digital_minimum(self.handle, i, self.channels[i]['digital_min']) set_label(self.handle, i, du(self.channels[i]['label'])) set_physical_dimension(self.handle, i, du(self.channels[i]['dimension'])) set_transducer(self.handle, i, du(self.channels[i]['transducer'])) set_prefilter(self.handle, i, du(self.channels[i]['prefilter']))
python
def update_header(self): """ Updates header to edffile struct """ set_technician(self.handle, du(self.technician)) set_recording_additional(self.handle, du(self.recording_additional)) set_patientname(self.handle, du(self.patient_name)) set_patientcode(self.handle, du(self.patient_code)) set_patient_additional(self.handle, du(self.patient_additional)) set_equipment(self.handle, du(self.equipment)) set_admincode(self.handle, du(self.admincode)) if isinstance(self.gender, int): set_gender(self.handle, self.gender) elif self.gender == "Male": set_gender(self.handle, 0) elif self.gender == "Female": set_gender(self.handle, 1) set_datarecord_duration(self.handle, self.duration) set_number_of_annotation_signals(self.handle, self.number_of_annotations) set_startdatetime(self.handle, self.recording_start_time.year, self.recording_start_time.month, self.recording_start_time.day, self.recording_start_time.hour, self.recording_start_time.minute, self.recording_start_time.second) if isstr(self.birthdate): if self.birthdate != '': birthday = datetime.strptime(self.birthdate, '%d %b %Y').date() set_birthdate(self.handle, birthday.year, birthday.month, birthday.day) else: set_birthdate(self.handle, self.birthdate.year, self.birthdate.month, self.birthdate.day) for i in np.arange(self.n_channels): set_samplefrequency(self.handle, i, self.channels[i]['sample_rate']) set_physical_maximum(self.handle, i, self.channels[i]['physical_max']) set_physical_minimum(self.handle, i, self.channels[i]['physical_min']) set_digital_maximum(self.handle, i, self.channels[i]['digital_max']) set_digital_minimum(self.handle, i, self.channels[i]['digital_min']) set_label(self.handle, i, du(self.channels[i]['label'])) set_physical_dimension(self.handle, i, du(self.channels[i]['dimension'])) set_transducer(self.handle, i, du(self.channels[i]['transducer'])) set_prefilter(self.handle, i, du(self.channels[i]['prefilter']))
[ "def", "update_header", "(", "self", ")", ":", "set_technician", "(", "self", ".", "handle", ",", "du", "(", "self", ".", "technician", ")", ")", "set_recording_additional", "(", "self", ".", "handle", ",", "du", "(", "self", ".", "recording_additional", ")", ")", "set_patientname", "(", "self", ".", "handle", ",", "du", "(", "self", ".", "patient_name", ")", ")", "set_patientcode", "(", "self", ".", "handle", ",", "du", "(", "self", ".", "patient_code", ")", ")", "set_patient_additional", "(", "self", ".", "handle", ",", "du", "(", "self", ".", "patient_additional", ")", ")", "set_equipment", "(", "self", ".", "handle", ",", "du", "(", "self", ".", "equipment", ")", ")", "set_admincode", "(", "self", ".", "handle", ",", "du", "(", "self", ".", "admincode", ")", ")", "if", "isinstance", "(", "self", ".", "gender", ",", "int", ")", ":", "set_gender", "(", "self", ".", "handle", ",", "self", ".", "gender", ")", "elif", "self", ".", "gender", "==", "\"Male\"", ":", "set_gender", "(", "self", ".", "handle", ",", "0", ")", "elif", "self", ".", "gender", "==", "\"Female\"", ":", "set_gender", "(", "self", ".", "handle", ",", "1", ")", "set_datarecord_duration", "(", "self", ".", "handle", ",", "self", ".", "duration", ")", "set_number_of_annotation_signals", "(", "self", ".", "handle", ",", "self", ".", "number_of_annotations", ")", "set_startdatetime", "(", "self", ".", "handle", ",", "self", ".", "recording_start_time", ".", "year", ",", "self", ".", "recording_start_time", ".", "month", ",", "self", ".", "recording_start_time", ".", "day", ",", "self", ".", "recording_start_time", ".", "hour", ",", "self", ".", "recording_start_time", ".", "minute", ",", "self", ".", "recording_start_time", ".", "second", ")", "if", "isstr", "(", "self", ".", "birthdate", ")", ":", "if", "self", ".", "birthdate", "!=", "''", ":", "birthday", "=", "datetime", ".", "strptime", "(", "self", ".", "birthdate", ",", "'%d %b %Y'", ")", ".", "date", "(", ")", "set_birthdate", "(", "self", ".", "handle", ",", "birthday", ".", "year", ",", "birthday", ".", "month", ",", "birthday", ".", "day", ")", "else", ":", "set_birthdate", "(", "self", ".", "handle", ",", "self", ".", "birthdate", ".", "year", ",", "self", ".", "birthdate", ".", "month", ",", "self", ".", "birthdate", ".", "day", ")", "for", "i", "in", "np", ".", "arange", "(", "self", ".", "n_channels", ")", ":", "set_samplefrequency", "(", "self", ".", "handle", ",", "i", ",", "self", ".", "channels", "[", "i", "]", "[", "'sample_rate'", "]", ")", "set_physical_maximum", "(", "self", ".", "handle", ",", "i", ",", "self", ".", "channels", "[", "i", "]", "[", "'physical_max'", "]", ")", "set_physical_minimum", "(", "self", ".", "handle", ",", "i", ",", "self", ".", "channels", "[", "i", "]", "[", "'physical_min'", "]", ")", "set_digital_maximum", "(", "self", ".", "handle", ",", "i", ",", "self", ".", "channels", "[", "i", "]", "[", "'digital_max'", "]", ")", "set_digital_minimum", "(", "self", ".", "handle", ",", "i", ",", "self", ".", "channels", "[", "i", "]", "[", "'digital_min'", "]", ")", "set_label", "(", "self", ".", "handle", ",", "i", ",", "du", "(", "self", ".", "channels", "[", "i", "]", "[", "'label'", "]", ")", ")", "set_physical_dimension", "(", "self", ".", "handle", ",", "i", ",", "du", "(", "self", ".", "channels", "[", "i", "]", "[", "'dimension'", "]", ")", ")", "set_transducer", "(", "self", ".", "handle", ",", "i", ",", "du", "(", "self", ".", "channels", "[", "i", "]", "[", "'transducer'", "]", ")", ")", "set_prefilter", "(", "self", ".", "handle", ",", "i", ",", "du", "(", "self", ".", "channels", "[", "i", "]", "[", "'prefilter'", "]", ")", ")" ]
Updates header to edffile struct
[ "Updates", "header", "to", "edffile", "struct" ]
0f787fc1202b84a6f30d098296acf72666eaeeb4
https://github.com/holgern/pyedflib/blob/0f787fc1202b84a6f30d098296acf72666eaeeb4/pyedflib/edfwriter.py#L137-L175
1,013
holgern/pyedflib
pyedflib/edfwriter.py
EdfWriter.setHeader
def setHeader(self, fileHeader): """ Sets the file header """ self.technician = fileHeader["technician"] self.recording_additional = fileHeader["recording_additional"] self.patient_name = fileHeader["patientname"] self.patient_additional = fileHeader["patient_additional"] self.patient_code = fileHeader["patientcode"] self.equipment = fileHeader["equipment"] self.admincode = fileHeader["admincode"] self.gender = fileHeader["gender"] self.recording_start_time = fileHeader["startdate"] self.birthdate = fileHeader["birthdate"] self.update_header()
python
def setHeader(self, fileHeader): """ Sets the file header """ self.technician = fileHeader["technician"] self.recording_additional = fileHeader["recording_additional"] self.patient_name = fileHeader["patientname"] self.patient_additional = fileHeader["patient_additional"] self.patient_code = fileHeader["patientcode"] self.equipment = fileHeader["equipment"] self.admincode = fileHeader["admincode"] self.gender = fileHeader["gender"] self.recording_start_time = fileHeader["startdate"] self.birthdate = fileHeader["birthdate"] self.update_header()
[ "def", "setHeader", "(", "self", ",", "fileHeader", ")", ":", "self", ".", "technician", "=", "fileHeader", "[", "\"technician\"", "]", "self", ".", "recording_additional", "=", "fileHeader", "[", "\"recording_additional\"", "]", "self", ".", "patient_name", "=", "fileHeader", "[", "\"patientname\"", "]", "self", ".", "patient_additional", "=", "fileHeader", "[", "\"patient_additional\"", "]", "self", ".", "patient_code", "=", "fileHeader", "[", "\"patientcode\"", "]", "self", ".", "equipment", "=", "fileHeader", "[", "\"equipment\"", "]", "self", ".", "admincode", "=", "fileHeader", "[", "\"admincode\"", "]", "self", ".", "gender", "=", "fileHeader", "[", "\"gender\"", "]", "self", ".", "recording_start_time", "=", "fileHeader", "[", "\"startdate\"", "]", "self", ".", "birthdate", "=", "fileHeader", "[", "\"birthdate\"", "]", "self", ".", "update_header", "(", ")" ]
Sets the file header
[ "Sets", "the", "file", "header" ]
0f787fc1202b84a6f30d098296acf72666eaeeb4
https://github.com/holgern/pyedflib/blob/0f787fc1202b84a6f30d098296acf72666eaeeb4/pyedflib/edfwriter.py#L177-L191
1,014
holgern/pyedflib
pyedflib/edfwriter.py
EdfWriter.setSignalHeader
def setSignalHeader(self, edfsignal, channel_info): """ Sets the parameter for signal edfsignal. channel_info should be a dict with these values: 'label' : channel label (string, <= 16 characters, must be unique) 'dimension' : physical dimension (e.g., mV) (string, <= 8 characters) 'sample_rate' : sample frequency in hertz (int) 'physical_max' : maximum physical value (float) 'physical_min' : minimum physical value (float) 'digital_max' : maximum digital value (int, -2**15 <= x < 2**15) 'digital_min' : minimum digital value (int, -2**15 <= x < 2**15) """ if edfsignal < 0 or edfsignal > self.n_channels: raise ChannelDoesNotExist(edfsignal) self.channels[edfsignal] = channel_info self.update_header()
python
def setSignalHeader(self, edfsignal, channel_info): """ Sets the parameter for signal edfsignal. channel_info should be a dict with these values: 'label' : channel label (string, <= 16 characters, must be unique) 'dimension' : physical dimension (e.g., mV) (string, <= 8 characters) 'sample_rate' : sample frequency in hertz (int) 'physical_max' : maximum physical value (float) 'physical_min' : minimum physical value (float) 'digital_max' : maximum digital value (int, -2**15 <= x < 2**15) 'digital_min' : minimum digital value (int, -2**15 <= x < 2**15) """ if edfsignal < 0 or edfsignal > self.n_channels: raise ChannelDoesNotExist(edfsignal) self.channels[edfsignal] = channel_info self.update_header()
[ "def", "setSignalHeader", "(", "self", ",", "edfsignal", ",", "channel_info", ")", ":", "if", "edfsignal", "<", "0", "or", "edfsignal", ">", "self", ".", "n_channels", ":", "raise", "ChannelDoesNotExist", "(", "edfsignal", ")", "self", ".", "channels", "[", "edfsignal", "]", "=", "channel_info", "self", ".", "update_header", "(", ")" ]
Sets the parameter for signal edfsignal. channel_info should be a dict with these values: 'label' : channel label (string, <= 16 characters, must be unique) 'dimension' : physical dimension (e.g., mV) (string, <= 8 characters) 'sample_rate' : sample frequency in hertz (int) 'physical_max' : maximum physical value (float) 'physical_min' : minimum physical value (float) 'digital_max' : maximum digital value (int, -2**15 <= x < 2**15) 'digital_min' : minimum digital value (int, -2**15 <= x < 2**15)
[ "Sets", "the", "parameter", "for", "signal", "edfsignal", "." ]
0f787fc1202b84a6f30d098296acf72666eaeeb4
https://github.com/holgern/pyedflib/blob/0f787fc1202b84a6f30d098296acf72666eaeeb4/pyedflib/edfwriter.py#L193-L211
1,015
holgern/pyedflib
pyedflib/edfwriter.py
EdfWriter.setSignalHeaders
def setSignalHeaders(self, signalHeaders): """ Sets the parameter for all signals Parameters ---------- signalHeaders : array_like containing dict with 'label' : str channel label (string, <= 16 characters, must be unique) 'dimension' : str physical dimension (e.g., mV) (string, <= 8 characters) 'sample_rate' : int sample frequency in hertz 'physical_max' : float maximum physical value 'physical_min' : float minimum physical value 'digital_max' : int maximum digital value (-2**15 <= x < 2**15) 'digital_min' : int minimum digital value (-2**15 <= x < 2**15) """ for edfsignal in np.arange(self.n_channels): self.channels[edfsignal] = signalHeaders[edfsignal] self.update_header()
python
def setSignalHeaders(self, signalHeaders): """ Sets the parameter for all signals Parameters ---------- signalHeaders : array_like containing dict with 'label' : str channel label (string, <= 16 characters, must be unique) 'dimension' : str physical dimension (e.g., mV) (string, <= 8 characters) 'sample_rate' : int sample frequency in hertz 'physical_max' : float maximum physical value 'physical_min' : float minimum physical value 'digital_max' : int maximum digital value (-2**15 <= x < 2**15) 'digital_min' : int minimum digital value (-2**15 <= x < 2**15) """ for edfsignal in np.arange(self.n_channels): self.channels[edfsignal] = signalHeaders[edfsignal] self.update_header()
[ "def", "setSignalHeaders", "(", "self", ",", "signalHeaders", ")", ":", "for", "edfsignal", "in", "np", ".", "arange", "(", "self", ".", "n_channels", ")", ":", "self", ".", "channels", "[", "edfsignal", "]", "=", "signalHeaders", "[", "edfsignal", "]", "self", ".", "update_header", "(", ")" ]
Sets the parameter for all signals Parameters ---------- signalHeaders : array_like containing dict with 'label' : str channel label (string, <= 16 characters, must be unique) 'dimension' : str physical dimension (e.g., mV) (string, <= 8 characters) 'sample_rate' : int sample frequency in hertz 'physical_max' : float maximum physical value 'physical_min' : float minimum physical value 'digital_max' : int maximum digital value (-2**15 <= x < 2**15) 'digital_min' : int minimum digital value (-2**15 <= x < 2**15)
[ "Sets", "the", "parameter", "for", "all", "signals" ]
0f787fc1202b84a6f30d098296acf72666eaeeb4
https://github.com/holgern/pyedflib/blob/0f787fc1202b84a6f30d098296acf72666eaeeb4/pyedflib/edfwriter.py#L213-L238
1,016
holgern/pyedflib
pyedflib/edfwriter.py
EdfWriter.set_number_of_annotation_signals
def set_number_of_annotation_signals(self, number_of_annotations): """ Sets the number of annotation signals. The default value is 1 This function is optional and can be called only after opening a file in writemode and before the first sample write action Normally you don't need to change the default value. Only when the number of annotations you want to write is more than the number of seconds of the duration of the recording, you can use this function to increase the storage space for annotations Minimum is 1, maximum is 64 Parameters ---------- number_of_annotations : integer Sets the number of annotation signals """ number_of_annotations = max((min((int(number_of_annotations), 64)), 1)) self.number_of_annotations = number_of_annotations self.update_header()
python
def set_number_of_annotation_signals(self, number_of_annotations): """ Sets the number of annotation signals. The default value is 1 This function is optional and can be called only after opening a file in writemode and before the first sample write action Normally you don't need to change the default value. Only when the number of annotations you want to write is more than the number of seconds of the duration of the recording, you can use this function to increase the storage space for annotations Minimum is 1, maximum is 64 Parameters ---------- number_of_annotations : integer Sets the number of annotation signals """ number_of_annotations = max((min((int(number_of_annotations), 64)), 1)) self.number_of_annotations = number_of_annotations self.update_header()
[ "def", "set_number_of_annotation_signals", "(", "self", ",", "number_of_annotations", ")", ":", "number_of_annotations", "=", "max", "(", "(", "min", "(", "(", "int", "(", "number_of_annotations", ")", ",", "64", ")", ")", ",", "1", ")", ")", "self", ".", "number_of_annotations", "=", "number_of_annotations", "self", ".", "update_header", "(", ")" ]
Sets the number of annotation signals. The default value is 1 This function is optional and can be called only after opening a file in writemode and before the first sample write action Normally you don't need to change the default value. Only when the number of annotations you want to write is more than the number of seconds of the duration of the recording, you can use this function to increase the storage space for annotations Minimum is 1, maximum is 64 Parameters ---------- number_of_annotations : integer Sets the number of annotation signals
[ "Sets", "the", "number", "of", "annotation", "signals", ".", "The", "default", "value", "is", "1", "This", "function", "is", "optional", "and", "can", "be", "called", "only", "after", "opening", "a", "file", "in", "writemode", "and", "before", "the", "first", "sample", "write", "action", "Normally", "you", "don", "t", "need", "to", "change", "the", "default", "value", ".", "Only", "when", "the", "number", "of", "annotations", "you", "want", "to", "write", "is", "more", "than", "the", "number", "of", "seconds", "of", "the", "duration", "of", "the", "recording", "you", "can", "use", "this", "function", "to", "increase", "the", "storage", "space", "for", "annotations", "Minimum", "is", "1", "maximum", "is", "64" ]
0f787fc1202b84a6f30d098296acf72666eaeeb4
https://github.com/holgern/pyedflib/blob/0f787fc1202b84a6f30d098296acf72666eaeeb4/pyedflib/edfwriter.py#L364-L381
1,017
holgern/pyedflib
pyedflib/edfwriter.py
EdfWriter.setStartdatetime
def setStartdatetime(self, recording_start_time): """ Sets the recording start Time Parameters ---------- recording_start_time: datetime object Sets the recording start Time """ if isinstance(recording_start_time,datetime): self.recording_start_time = recording_start_time else: self.recording_start_time = datetime.strptime(recording_start_time,"%d %b %Y %H:%M:%S") self.update_header()
python
def setStartdatetime(self, recording_start_time): """ Sets the recording start Time Parameters ---------- recording_start_time: datetime object Sets the recording start Time """ if isinstance(recording_start_time,datetime): self.recording_start_time = recording_start_time else: self.recording_start_time = datetime.strptime(recording_start_time,"%d %b %Y %H:%M:%S") self.update_header()
[ "def", "setStartdatetime", "(", "self", ",", "recording_start_time", ")", ":", "if", "isinstance", "(", "recording_start_time", ",", "datetime", ")", ":", "self", ".", "recording_start_time", "=", "recording_start_time", "else", ":", "self", ".", "recording_start_time", "=", "datetime", ".", "strptime", "(", "recording_start_time", ",", "\"%d %b %Y %H:%M:%S\"", ")", "self", ".", "update_header", "(", ")" ]
Sets the recording start Time Parameters ---------- recording_start_time: datetime object Sets the recording start Time
[ "Sets", "the", "recording", "start", "Time" ]
0f787fc1202b84a6f30d098296acf72666eaeeb4
https://github.com/holgern/pyedflib/blob/0f787fc1202b84a6f30d098296acf72666eaeeb4/pyedflib/edfwriter.py#L383-L396
1,018
holgern/pyedflib
pyedflib/edfwriter.py
EdfWriter.setSamplefrequency
def setSamplefrequency(self, edfsignal, samplefrequency): """ Sets the samplefrequency of signal edfsignal. Notes ----- This function is required for every signal and can be called only after opening a file in writemode and before the first sample write action. """ if edfsignal < 0 or edfsignal > self.n_channels: raise ChannelDoesNotExist(edfsignal) self.channels[edfsignal]['sample_rate'] = samplefrequency self.update_header()
python
def setSamplefrequency(self, edfsignal, samplefrequency): """ Sets the samplefrequency of signal edfsignal. Notes ----- This function is required for every signal and can be called only after opening a file in writemode and before the first sample write action. """ if edfsignal < 0 or edfsignal > self.n_channels: raise ChannelDoesNotExist(edfsignal) self.channels[edfsignal]['sample_rate'] = samplefrequency self.update_header()
[ "def", "setSamplefrequency", "(", "self", ",", "edfsignal", ",", "samplefrequency", ")", ":", "if", "edfsignal", "<", "0", "or", "edfsignal", ">", "self", ".", "n_channels", ":", "raise", "ChannelDoesNotExist", "(", "edfsignal", ")", "self", ".", "channels", "[", "edfsignal", "]", "[", "'sample_rate'", "]", "=", "samplefrequency", "self", ".", "update_header", "(", ")" ]
Sets the samplefrequency of signal edfsignal. Notes ----- This function is required for every signal and can be called only after opening a file in writemode and before the first sample write action.
[ "Sets", "the", "samplefrequency", "of", "signal", "edfsignal", "." ]
0f787fc1202b84a6f30d098296acf72666eaeeb4
https://github.com/holgern/pyedflib/blob/0f787fc1202b84a6f30d098296acf72666eaeeb4/pyedflib/edfwriter.py#L421-L432
1,019
holgern/pyedflib
pyedflib/edfwriter.py
EdfWriter.setPhysicalMaximum
def setPhysicalMaximum(self, edfsignal, physical_maximum): """ Sets the physical_maximum of signal edfsignal. Parameters ---------- edfsignal: int signal number physical_maximum: float Sets the physical maximum Notes ----- This function is required for every signal and can be called only after opening a file in writemode and before the first sample write action. """ if edfsignal < 0 or edfsignal > self.n_channels: raise ChannelDoesNotExist(edfsignal) self.channels[edfsignal]['physical_max'] = physical_maximum self.update_header()
python
def setPhysicalMaximum(self, edfsignal, physical_maximum): """ Sets the physical_maximum of signal edfsignal. Parameters ---------- edfsignal: int signal number physical_maximum: float Sets the physical maximum Notes ----- This function is required for every signal and can be called only after opening a file in writemode and before the first sample write action. """ if edfsignal < 0 or edfsignal > self.n_channels: raise ChannelDoesNotExist(edfsignal) self.channels[edfsignal]['physical_max'] = physical_maximum self.update_header()
[ "def", "setPhysicalMaximum", "(", "self", ",", "edfsignal", ",", "physical_maximum", ")", ":", "if", "edfsignal", "<", "0", "or", "edfsignal", ">", "self", ".", "n_channels", ":", "raise", "ChannelDoesNotExist", "(", "edfsignal", ")", "self", ".", "channels", "[", "edfsignal", "]", "[", "'physical_max'", "]", "=", "physical_maximum", "self", ".", "update_header", "(", ")" ]
Sets the physical_maximum of signal edfsignal. Parameters ---------- edfsignal: int signal number physical_maximum: float Sets the physical maximum Notes ----- This function is required for every signal and can be called only after opening a file in writemode and before the first sample write action.
[ "Sets", "the", "physical_maximum", "of", "signal", "edfsignal", "." ]
0f787fc1202b84a6f30d098296acf72666eaeeb4
https://github.com/holgern/pyedflib/blob/0f787fc1202b84a6f30d098296acf72666eaeeb4/pyedflib/edfwriter.py#L434-L452
1,020
holgern/pyedflib
pyedflib/edfwriter.py
EdfWriter.setPhysicalMinimum
def setPhysicalMinimum(self, edfsignal, physical_minimum): """ Sets the physical_minimum of signal edfsignal. Parameters ---------- edfsignal: int signal number physical_minimum: float Sets the physical minimum Notes ----- This function is required for every signal and can be called only after opening a file in writemode and before the first sample write action. """ if (edfsignal < 0 or edfsignal > self.n_channels): raise ChannelDoesNotExist(edfsignal) self.channels[edfsignal]['physical_min'] = physical_minimum self.update_header()
python
def setPhysicalMinimum(self, edfsignal, physical_minimum): """ Sets the physical_minimum of signal edfsignal. Parameters ---------- edfsignal: int signal number physical_minimum: float Sets the physical minimum Notes ----- This function is required for every signal and can be called only after opening a file in writemode and before the first sample write action. """ if (edfsignal < 0 or edfsignal > self.n_channels): raise ChannelDoesNotExist(edfsignal) self.channels[edfsignal]['physical_min'] = physical_minimum self.update_header()
[ "def", "setPhysicalMinimum", "(", "self", ",", "edfsignal", ",", "physical_minimum", ")", ":", "if", "(", "edfsignal", "<", "0", "or", "edfsignal", ">", "self", ".", "n_channels", ")", ":", "raise", "ChannelDoesNotExist", "(", "edfsignal", ")", "self", ".", "channels", "[", "edfsignal", "]", "[", "'physical_min'", "]", "=", "physical_minimum", "self", ".", "update_header", "(", ")" ]
Sets the physical_minimum of signal edfsignal. Parameters ---------- edfsignal: int signal number physical_minimum: float Sets the physical minimum Notes ----- This function is required for every signal and can be called only after opening a file in writemode and before the first sample write action.
[ "Sets", "the", "physical_minimum", "of", "signal", "edfsignal", "." ]
0f787fc1202b84a6f30d098296acf72666eaeeb4
https://github.com/holgern/pyedflib/blob/0f787fc1202b84a6f30d098296acf72666eaeeb4/pyedflib/edfwriter.py#L454-L472
1,021
holgern/pyedflib
pyedflib/edfwriter.py
EdfWriter.setDigitalMaximum
def setDigitalMaximum(self, edfsignal, digital_maximum): """ Sets the samplefrequency of signal edfsignal. Usually, the value 32767 is used for EDF+ and 8388607 for BDF+. Parameters ---------- edfsignal : int signal number digital_maximum : int Sets the maximum digital value Notes ----- This function is optional and can be called only after opening a file in writemode and before the first sample write action. """ if (edfsignal < 0 or edfsignal > self.n_channels): raise ChannelDoesNotExist(edfsignal) self.channels[edfsignal]['digital_max'] = digital_maximum self.update_header()
python
def setDigitalMaximum(self, edfsignal, digital_maximum): """ Sets the samplefrequency of signal edfsignal. Usually, the value 32767 is used for EDF+ and 8388607 for BDF+. Parameters ---------- edfsignal : int signal number digital_maximum : int Sets the maximum digital value Notes ----- This function is optional and can be called only after opening a file in writemode and before the first sample write action. """ if (edfsignal < 0 or edfsignal > self.n_channels): raise ChannelDoesNotExist(edfsignal) self.channels[edfsignal]['digital_max'] = digital_maximum self.update_header()
[ "def", "setDigitalMaximum", "(", "self", ",", "edfsignal", ",", "digital_maximum", ")", ":", "if", "(", "edfsignal", "<", "0", "or", "edfsignal", ">", "self", ".", "n_channels", ")", ":", "raise", "ChannelDoesNotExist", "(", "edfsignal", ")", "self", ".", "channels", "[", "edfsignal", "]", "[", "'digital_max'", "]", "=", "digital_maximum", "self", ".", "update_header", "(", ")" ]
Sets the samplefrequency of signal edfsignal. Usually, the value 32767 is used for EDF+ and 8388607 for BDF+. Parameters ---------- edfsignal : int signal number digital_maximum : int Sets the maximum digital value Notes ----- This function is optional and can be called only after opening a file in writemode and before the first sample write action.
[ "Sets", "the", "samplefrequency", "of", "signal", "edfsignal", ".", "Usually", "the", "value", "32767", "is", "used", "for", "EDF", "+", "and", "8388607", "for", "BDF", "+", "." ]
0f787fc1202b84a6f30d098296acf72666eaeeb4
https://github.com/holgern/pyedflib/blob/0f787fc1202b84a6f30d098296acf72666eaeeb4/pyedflib/edfwriter.py#L474-L493
1,022
holgern/pyedflib
pyedflib/edfwriter.py
EdfWriter.setTransducer
def setTransducer(self, edfsignal, transducer): """ Sets the transducer of signal edfsignal :param edfsignal: int :param transducer: str Notes ----- This function is optional for every signal and can be called only after opening a file in writemode and before the first sample write action. """ if (edfsignal < 0 or edfsignal > self.n_channels): raise ChannelDoesNotExist(edfsignal) self.channels[edfsignal]['transducer'] = transducer self.update_header()
python
def setTransducer(self, edfsignal, transducer): """ Sets the transducer of signal edfsignal :param edfsignal: int :param transducer: str Notes ----- This function is optional for every signal and can be called only after opening a file in writemode and before the first sample write action. """ if (edfsignal < 0 or edfsignal > self.n_channels): raise ChannelDoesNotExist(edfsignal) self.channels[edfsignal]['transducer'] = transducer self.update_header()
[ "def", "setTransducer", "(", "self", ",", "edfsignal", ",", "transducer", ")", ":", "if", "(", "edfsignal", "<", "0", "or", "edfsignal", ">", "self", ".", "n_channels", ")", ":", "raise", "ChannelDoesNotExist", "(", "edfsignal", ")", "self", ".", "channels", "[", "edfsignal", "]", "[", "'transducer'", "]", "=", "transducer", "self", ".", "update_header", "(", ")" ]
Sets the transducer of signal edfsignal :param edfsignal: int :param transducer: str Notes ----- This function is optional for every signal and can be called only after opening a file in writemode and before the first sample write action.
[ "Sets", "the", "transducer", "of", "signal", "edfsignal" ]
0f787fc1202b84a6f30d098296acf72666eaeeb4
https://github.com/holgern/pyedflib/blob/0f787fc1202b84a6f30d098296acf72666eaeeb4/pyedflib/edfwriter.py#L552-L566
1,023
holgern/pyedflib
pyedflib/edfreader.py
EdfReader.readAnnotations
def readAnnotations(self): """ Annotations from a edf-file Parameters ---------- None """ annot = self.read_annotation() annot = np.array(annot) if (annot.shape[0] == 0): return np.array([]), np.array([]), np.array([]) ann_time = self._get_float(annot[:, 0]) ann_text = annot[:, 2] ann_text_out = ["" for x in range(len(annot[:, 1]))] for i in np.arange(len(annot[:, 1])): ann_text_out[i] = self._convert_string(ann_text[i]) if annot[i, 1] == '': annot[i, 1] = '-1' ann_duration = self._get_float(annot[:, 1]) return ann_time/10000000, ann_duration, np.array(ann_text_out)
python
def readAnnotations(self): """ Annotations from a edf-file Parameters ---------- None """ annot = self.read_annotation() annot = np.array(annot) if (annot.shape[0] == 0): return np.array([]), np.array([]), np.array([]) ann_time = self._get_float(annot[:, 0]) ann_text = annot[:, 2] ann_text_out = ["" for x in range(len(annot[:, 1]))] for i in np.arange(len(annot[:, 1])): ann_text_out[i] = self._convert_string(ann_text[i]) if annot[i, 1] == '': annot[i, 1] = '-1' ann_duration = self._get_float(annot[:, 1]) return ann_time/10000000, ann_duration, np.array(ann_text_out)
[ "def", "readAnnotations", "(", "self", ")", ":", "annot", "=", "self", ".", "read_annotation", "(", ")", "annot", "=", "np", ".", "array", "(", "annot", ")", "if", "(", "annot", ".", "shape", "[", "0", "]", "==", "0", ")", ":", "return", "np", ".", "array", "(", "[", "]", ")", ",", "np", ".", "array", "(", "[", "]", ")", ",", "np", ".", "array", "(", "[", "]", ")", "ann_time", "=", "self", ".", "_get_float", "(", "annot", "[", ":", ",", "0", "]", ")", "ann_text", "=", "annot", "[", ":", ",", "2", "]", "ann_text_out", "=", "[", "\"\"", "for", "x", "in", "range", "(", "len", "(", "annot", "[", ":", ",", "1", "]", ")", ")", "]", "for", "i", "in", "np", ".", "arange", "(", "len", "(", "annot", "[", ":", ",", "1", "]", ")", ")", ":", "ann_text_out", "[", "i", "]", "=", "self", ".", "_convert_string", "(", "ann_text", "[", "i", "]", ")", "if", "annot", "[", "i", ",", "1", "]", "==", "''", ":", "annot", "[", "i", ",", "1", "]", "=", "'-1'", "ann_duration", "=", "self", ".", "_get_float", "(", "annot", "[", ":", ",", "1", "]", ")", "return", "ann_time", "/", "10000000", ",", "ann_duration", ",", "np", ".", "array", "(", "ann_text_out", ")" ]
Annotations from a edf-file Parameters ---------- None
[ "Annotations", "from", "a", "edf", "-", "file" ]
0f787fc1202b84a6f30d098296acf72666eaeeb4
https://github.com/holgern/pyedflib/blob/0f787fc1202b84a6f30d098296acf72666eaeeb4/pyedflib/edfreader.py#L44-L64
1,024
holgern/pyedflib
pyedflib/edfreader.py
EdfReader.getHeader
def getHeader(self): """ Returns the file header as dict Parameters ---------- None """ return {"technician": self.getTechnician(), "recording_additional": self.getRecordingAdditional(), "patientname": self.getPatientName(), "patient_additional": self.getPatientAdditional(), "patientcode": self.getPatientCode(), "equipment": self.getEquipment(), "admincode": self.getAdmincode(), "gender": self.getGender(), "startdate": self.getStartdatetime(), "birthdate": self.getBirthdate()}
python
def getHeader(self): """ Returns the file header as dict Parameters ---------- None """ return {"technician": self.getTechnician(), "recording_additional": self.getRecordingAdditional(), "patientname": self.getPatientName(), "patient_additional": self.getPatientAdditional(), "patientcode": self.getPatientCode(), "equipment": self.getEquipment(), "admincode": self.getAdmincode(), "gender": self.getGender(), "startdate": self.getStartdatetime(), "birthdate": self.getBirthdate()}
[ "def", "getHeader", "(", "self", ")", ":", "return", "{", "\"technician\"", ":", "self", ".", "getTechnician", "(", ")", ",", "\"recording_additional\"", ":", "self", ".", "getRecordingAdditional", "(", ")", ",", "\"patientname\"", ":", "self", ".", "getPatientName", "(", ")", ",", "\"patient_additional\"", ":", "self", ".", "getPatientAdditional", "(", ")", ",", "\"patientcode\"", ":", "self", ".", "getPatientCode", "(", ")", ",", "\"equipment\"", ":", "self", ".", "getEquipment", "(", ")", ",", "\"admincode\"", ":", "self", ".", "getAdmincode", "(", ")", ",", "\"gender\"", ":", "self", ".", "getGender", "(", ")", ",", "\"startdate\"", ":", "self", ".", "getStartdatetime", "(", ")", ",", "\"birthdate\"", ":", "self", ".", "getBirthdate", "(", ")", "}" ]
Returns the file header as dict Parameters ---------- None
[ "Returns", "the", "file", "header", "as", "dict" ]
0f787fc1202b84a6f30d098296acf72666eaeeb4
https://github.com/holgern/pyedflib/blob/0f787fc1202b84a6f30d098296acf72666eaeeb4/pyedflib/edfreader.py#L92-L104
1,025
holgern/pyedflib
pyedflib/edfreader.py
EdfReader.getSignalHeader
def getSignalHeader(self, chn): """ Returns the header of one signal as dicts Parameters ---------- None """ return {'label': self.getLabel(chn), 'dimension': self.getPhysicalDimension(chn), 'sample_rate': self.getSampleFrequency(chn), 'physical_max':self.getPhysicalMaximum(chn), 'physical_min': self.getPhysicalMinimum(chn), 'digital_max': self.getDigitalMaximum(chn), 'digital_min': self.getDigitalMinimum(chn), 'prefilter':self.getPrefilter(chn), 'transducer': self.getTransducer(chn)}
python
def getSignalHeader(self, chn): """ Returns the header of one signal as dicts Parameters ---------- None """ return {'label': self.getLabel(chn), 'dimension': self.getPhysicalDimension(chn), 'sample_rate': self.getSampleFrequency(chn), 'physical_max':self.getPhysicalMaximum(chn), 'physical_min': self.getPhysicalMinimum(chn), 'digital_max': self.getDigitalMaximum(chn), 'digital_min': self.getDigitalMinimum(chn), 'prefilter':self.getPrefilter(chn), 'transducer': self.getTransducer(chn)}
[ "def", "getSignalHeader", "(", "self", ",", "chn", ")", ":", "return", "{", "'label'", ":", "self", ".", "getLabel", "(", "chn", ")", ",", "'dimension'", ":", "self", ".", "getPhysicalDimension", "(", "chn", ")", ",", "'sample_rate'", ":", "self", ".", "getSampleFrequency", "(", "chn", ")", ",", "'physical_max'", ":", "self", ".", "getPhysicalMaximum", "(", "chn", ")", ",", "'physical_min'", ":", "self", ".", "getPhysicalMinimum", "(", "chn", ")", ",", "'digital_max'", ":", "self", ".", "getDigitalMaximum", "(", "chn", ")", ",", "'digital_min'", ":", "self", ".", "getDigitalMinimum", "(", "chn", ")", ",", "'prefilter'", ":", "self", ".", "getPrefilter", "(", "chn", ")", ",", "'transducer'", ":", "self", ".", "getTransducer", "(", "chn", ")", "}" ]
Returns the header of one signal as dicts Parameters ---------- None
[ "Returns", "the", "header", "of", "one", "signal", "as", "dicts" ]
0f787fc1202b84a6f30d098296acf72666eaeeb4
https://github.com/holgern/pyedflib/blob/0f787fc1202b84a6f30d098296acf72666eaeeb4/pyedflib/edfreader.py#L106-L122
1,026
holgern/pyedflib
pyedflib/edfreader.py
EdfReader.getSignalHeaders
def getSignalHeaders(self): """ Returns the header of all signals as array of dicts Parameters ---------- None """ signalHeader = [] for chn in np.arange(self.signals_in_file): signalHeader.append(self.getSignalHeader(chn)) return signalHeader
python
def getSignalHeaders(self): """ Returns the header of all signals as array of dicts Parameters ---------- None """ signalHeader = [] for chn in np.arange(self.signals_in_file): signalHeader.append(self.getSignalHeader(chn)) return signalHeader
[ "def", "getSignalHeaders", "(", "self", ")", ":", "signalHeader", "=", "[", "]", "for", "chn", "in", "np", ".", "arange", "(", "self", ".", "signals_in_file", ")", ":", "signalHeader", ".", "append", "(", "self", ".", "getSignalHeader", "(", "chn", ")", ")", "return", "signalHeader" ]
Returns the header of all signals as array of dicts Parameters ---------- None
[ "Returns", "the", "header", "of", "all", "signals", "as", "array", "of", "dicts" ]
0f787fc1202b84a6f30d098296acf72666eaeeb4
https://github.com/holgern/pyedflib/blob/0f787fc1202b84a6f30d098296acf72666eaeeb4/pyedflib/edfreader.py#L124-L135
1,027
holgern/pyedflib
pyedflib/edfreader.py
EdfReader.getStartdatetime
def getStartdatetime(self): """ Returns the date and starttime as datetime object Parameters ---------- None Examples -------- >>> import pyedflib >>> f = pyedflib.data.test_generator() >>> f.getStartdatetime() datetime.datetime(2011, 4, 4, 12, 57, 2) >>> f._close() >>> del f """ return datetime(self.startdate_year, self.startdate_month, self.startdate_day, self.starttime_hour, self.starttime_minute, self.starttime_second)
python
def getStartdatetime(self): """ Returns the date and starttime as datetime object Parameters ---------- None Examples -------- >>> import pyedflib >>> f = pyedflib.data.test_generator() >>> f.getStartdatetime() datetime.datetime(2011, 4, 4, 12, 57, 2) >>> f._close() >>> del f """ return datetime(self.startdate_year, self.startdate_month, self.startdate_day, self.starttime_hour, self.starttime_minute, self.starttime_second)
[ "def", "getStartdatetime", "(", "self", ")", ":", "return", "datetime", "(", "self", ".", "startdate_year", ",", "self", ".", "startdate_month", ",", "self", ".", "startdate_day", ",", "self", ".", "starttime_hour", ",", "self", ".", "starttime_minute", ",", "self", ".", "starttime_second", ")" ]
Returns the date and starttime as datetime object Parameters ---------- None Examples -------- >>> import pyedflib >>> f = pyedflib.data.test_generator() >>> f.getStartdatetime() datetime.datetime(2011, 4, 4, 12, 57, 2) >>> f._close() >>> del f
[ "Returns", "the", "date", "and", "starttime", "as", "datetime", "object" ]
0f787fc1202b84a6f30d098296acf72666eaeeb4
https://github.com/holgern/pyedflib/blob/0f787fc1202b84a6f30d098296acf72666eaeeb4/pyedflib/edfreader.py#L317-L336
1,028
holgern/pyedflib
pyedflib/edfreader.py
EdfReader.getBirthdate
def getBirthdate(self, string=True): """ Returns the birthdate as string object Parameters ---------- None Examples -------- >>> import pyedflib >>> f = pyedflib.data.test_generator() >>> f.getBirthdate()=='30 jun 1969' True >>> f._close() >>> del f """ if string: return self._convert_string(self.birthdate.rstrip()) else: return datetime.strptime(self._convert_string(self.birthdate.rstrip()), "%d %b %Y")
python
def getBirthdate(self, string=True): """ Returns the birthdate as string object Parameters ---------- None Examples -------- >>> import pyedflib >>> f = pyedflib.data.test_generator() >>> f.getBirthdate()=='30 jun 1969' True >>> f._close() >>> del f """ if string: return self._convert_string(self.birthdate.rstrip()) else: return datetime.strptime(self._convert_string(self.birthdate.rstrip()), "%d %b %Y")
[ "def", "getBirthdate", "(", "self", ",", "string", "=", "True", ")", ":", "if", "string", ":", "return", "self", ".", "_convert_string", "(", "self", ".", "birthdate", ".", "rstrip", "(", ")", ")", "else", ":", "return", "datetime", ".", "strptime", "(", "self", ".", "_convert_string", "(", "self", ".", "birthdate", ".", "rstrip", "(", ")", ")", ",", "\"%d %b %Y\"", ")" ]
Returns the birthdate as string object Parameters ---------- None Examples -------- >>> import pyedflib >>> f = pyedflib.data.test_generator() >>> f.getBirthdate()=='30 jun 1969' True >>> f._close() >>> del f
[ "Returns", "the", "birthdate", "as", "string", "object" ]
0f787fc1202b84a6f30d098296acf72666eaeeb4
https://github.com/holgern/pyedflib/blob/0f787fc1202b84a6f30d098296acf72666eaeeb4/pyedflib/edfreader.py#L338-L360
1,029
holgern/pyedflib
pyedflib/edfreader.py
EdfReader.getSampleFrequencies
def getSampleFrequencies(self): """ Returns samplefrequencies of all signals. Parameters ---------- None Examples -------- >>> import pyedflib >>> f = pyedflib.data.test_generator() >>> all(f.getSampleFrequencies()==200.0) True >>> f._close() >>> del f """ return np.array([round(self.samplefrequency(chn)) for chn in np.arange(self.signals_in_file)])
python
def getSampleFrequencies(self): """ Returns samplefrequencies of all signals. Parameters ---------- None Examples -------- >>> import pyedflib >>> f = pyedflib.data.test_generator() >>> all(f.getSampleFrequencies()==200.0) True >>> f._close() >>> del f """ return np.array([round(self.samplefrequency(chn)) for chn in np.arange(self.signals_in_file)])
[ "def", "getSampleFrequencies", "(", "self", ")", ":", "return", "np", ".", "array", "(", "[", "round", "(", "self", ".", "samplefrequency", "(", "chn", ")", ")", "for", "chn", "in", "np", ".", "arange", "(", "self", ".", "signals_in_file", ")", "]", ")" ]
Returns samplefrequencies of all signals. Parameters ---------- None Examples -------- >>> import pyedflib >>> f = pyedflib.data.test_generator() >>> all(f.getSampleFrequencies()==200.0) True >>> f._close() >>> del f
[ "Returns", "samplefrequencies", "of", "all", "signals", "." ]
0f787fc1202b84a6f30d098296acf72666eaeeb4
https://github.com/holgern/pyedflib/blob/0f787fc1202b84a6f30d098296acf72666eaeeb4/pyedflib/edfreader.py#L362-L381
1,030
holgern/pyedflib
pyedflib/edfreader.py
EdfReader.getSampleFrequency
def getSampleFrequency(self,chn): """ Returns the samplefrequency of signal edfsignal. Parameters ---------- chn : int channel number Examples -------- >>> import pyedflib >>> f = pyedflib.data.test_generator() >>> f.getSampleFrequency(0)==200.0 True >>> f._close() >>> del f """ if 0 <= chn < self.signals_in_file: return round(self.samplefrequency(chn)) else: return 0
python
def getSampleFrequency(self,chn): """ Returns the samplefrequency of signal edfsignal. Parameters ---------- chn : int channel number Examples -------- >>> import pyedflib >>> f = pyedflib.data.test_generator() >>> f.getSampleFrequency(0)==200.0 True >>> f._close() >>> del f """ if 0 <= chn < self.signals_in_file: return round(self.samplefrequency(chn)) else: return 0
[ "def", "getSampleFrequency", "(", "self", ",", "chn", ")", ":", "if", "0", "<=", "chn", "<", "self", ".", "signals_in_file", ":", "return", "round", "(", "self", ".", "samplefrequency", "(", "chn", ")", ")", "else", ":", "return", "0" ]
Returns the samplefrequency of signal edfsignal. Parameters ---------- chn : int channel number Examples -------- >>> import pyedflib >>> f = pyedflib.data.test_generator() >>> f.getSampleFrequency(0)==200.0 True >>> f._close() >>> del f
[ "Returns", "the", "samplefrequency", "of", "signal", "edfsignal", "." ]
0f787fc1202b84a6f30d098296acf72666eaeeb4
https://github.com/holgern/pyedflib/blob/0f787fc1202b84a6f30d098296acf72666eaeeb4/pyedflib/edfreader.py#L383-L405
1,031
holgern/pyedflib
pyedflib/edfreader.py
EdfReader.getPhysicalMaximum
def getPhysicalMaximum(self,chn=None): """ Returns the maximum physical value of signal edfsignal. Parameters ---------- chn : int channel number Examples -------- >>> import pyedflib >>> f = pyedflib.data.test_generator() >>> f.getPhysicalMaximum(0)==1000.0 True >>> f._close() >>> del f """ if chn is not None: if 0 <= chn < self.signals_in_file: return self.physical_max(chn) else: return 0 else: physMax = np.zeros(self.signals_in_file) for i in np.arange(self.signals_in_file): physMax[i] = self.physical_max(i) return physMax
python
def getPhysicalMaximum(self,chn=None): """ Returns the maximum physical value of signal edfsignal. Parameters ---------- chn : int channel number Examples -------- >>> import pyedflib >>> f = pyedflib.data.test_generator() >>> f.getPhysicalMaximum(0)==1000.0 True >>> f._close() >>> del f """ if chn is not None: if 0 <= chn < self.signals_in_file: return self.physical_max(chn) else: return 0 else: physMax = np.zeros(self.signals_in_file) for i in np.arange(self.signals_in_file): physMax[i] = self.physical_max(i) return physMax
[ "def", "getPhysicalMaximum", "(", "self", ",", "chn", "=", "None", ")", ":", "if", "chn", "is", "not", "None", ":", "if", "0", "<=", "chn", "<", "self", ".", "signals_in_file", ":", "return", "self", ".", "physical_max", "(", "chn", ")", "else", ":", "return", "0", "else", ":", "physMax", "=", "np", ".", "zeros", "(", "self", ".", "signals_in_file", ")", "for", "i", "in", "np", ".", "arange", "(", "self", ".", "signals_in_file", ")", ":", "physMax", "[", "i", "]", "=", "self", ".", "physical_max", "(", "i", ")", "return", "physMax" ]
Returns the maximum physical value of signal edfsignal. Parameters ---------- chn : int channel number Examples -------- >>> import pyedflib >>> f = pyedflib.data.test_generator() >>> f.getPhysicalMaximum(0)==1000.0 True >>> f._close() >>> del f
[ "Returns", "the", "maximum", "physical", "value", "of", "signal", "edfsignal", "." ]
0f787fc1202b84a6f30d098296acf72666eaeeb4
https://github.com/holgern/pyedflib/blob/0f787fc1202b84a6f30d098296acf72666eaeeb4/pyedflib/edfreader.py#L476-L504
1,032
holgern/pyedflib
pyedflib/edfreader.py
EdfReader.getPhysicalMinimum
def getPhysicalMinimum(self,chn=None): """ Returns the minimum physical value of signal edfsignal. Parameters ---------- chn : int channel number Examples -------- >>> import pyedflib >>> f = pyedflib.data.test_generator() >>> f.getPhysicalMinimum(0)==-1000.0 True >>> f._close() >>> del f """ if chn is not None: if 0 <= chn < self.signals_in_file: return self.physical_min(chn) else: return 0 else: physMin = np.zeros(self.signals_in_file) for i in np.arange(self.signals_in_file): physMin[i] = self.physical_min(i) return physMin
python
def getPhysicalMinimum(self,chn=None): """ Returns the minimum physical value of signal edfsignal. Parameters ---------- chn : int channel number Examples -------- >>> import pyedflib >>> f = pyedflib.data.test_generator() >>> f.getPhysicalMinimum(0)==-1000.0 True >>> f._close() >>> del f """ if chn is not None: if 0 <= chn < self.signals_in_file: return self.physical_min(chn) else: return 0 else: physMin = np.zeros(self.signals_in_file) for i in np.arange(self.signals_in_file): physMin[i] = self.physical_min(i) return physMin
[ "def", "getPhysicalMinimum", "(", "self", ",", "chn", "=", "None", ")", ":", "if", "chn", "is", "not", "None", ":", "if", "0", "<=", "chn", "<", "self", ".", "signals_in_file", ":", "return", "self", ".", "physical_min", "(", "chn", ")", "else", ":", "return", "0", "else", ":", "physMin", "=", "np", ".", "zeros", "(", "self", ".", "signals_in_file", ")", "for", "i", "in", "np", ".", "arange", "(", "self", ".", "signals_in_file", ")", ":", "physMin", "[", "i", "]", "=", "self", ".", "physical_min", "(", "i", ")", "return", "physMin" ]
Returns the minimum physical value of signal edfsignal. Parameters ---------- chn : int channel number Examples -------- >>> import pyedflib >>> f = pyedflib.data.test_generator() >>> f.getPhysicalMinimum(0)==-1000.0 True >>> f._close() >>> del f
[ "Returns", "the", "minimum", "physical", "value", "of", "signal", "edfsignal", "." ]
0f787fc1202b84a6f30d098296acf72666eaeeb4
https://github.com/holgern/pyedflib/blob/0f787fc1202b84a6f30d098296acf72666eaeeb4/pyedflib/edfreader.py#L506-L534
1,033
holgern/pyedflib
pyedflib/edfreader.py
EdfReader.getDigitalMaximum
def getDigitalMaximum(self, chn=None): """ Returns the maximum digital value of signal edfsignal. Parameters ---------- chn : int channel number Examples -------- >>> import pyedflib >>> f = pyedflib.data.test_generator() >>> f.getDigitalMaximum(0) 32767 >>> f._close() >>> del f """ if chn is not None: if 0 <= chn < self.signals_in_file: return self.digital_max(chn) else: return 0 else: digMax = np.zeros(self.signals_in_file) for i in np.arange(self.signals_in_file): digMax[i] = self.digital_max(i) return digMax
python
def getDigitalMaximum(self, chn=None): """ Returns the maximum digital value of signal edfsignal. Parameters ---------- chn : int channel number Examples -------- >>> import pyedflib >>> f = pyedflib.data.test_generator() >>> f.getDigitalMaximum(0) 32767 >>> f._close() >>> del f """ if chn is not None: if 0 <= chn < self.signals_in_file: return self.digital_max(chn) else: return 0 else: digMax = np.zeros(self.signals_in_file) for i in np.arange(self.signals_in_file): digMax[i] = self.digital_max(i) return digMax
[ "def", "getDigitalMaximum", "(", "self", ",", "chn", "=", "None", ")", ":", "if", "chn", "is", "not", "None", ":", "if", "0", "<=", "chn", "<", "self", ".", "signals_in_file", ":", "return", "self", ".", "digital_max", "(", "chn", ")", "else", ":", "return", "0", "else", ":", "digMax", "=", "np", ".", "zeros", "(", "self", ".", "signals_in_file", ")", "for", "i", "in", "np", ".", "arange", "(", "self", ".", "signals_in_file", ")", ":", "digMax", "[", "i", "]", "=", "self", ".", "digital_max", "(", "i", ")", "return", "digMax" ]
Returns the maximum digital value of signal edfsignal. Parameters ---------- chn : int channel number Examples -------- >>> import pyedflib >>> f = pyedflib.data.test_generator() >>> f.getDigitalMaximum(0) 32767 >>> f._close() >>> del f
[ "Returns", "the", "maximum", "digital", "value", "of", "signal", "edfsignal", "." ]
0f787fc1202b84a6f30d098296acf72666eaeeb4
https://github.com/holgern/pyedflib/blob/0f787fc1202b84a6f30d098296acf72666eaeeb4/pyedflib/edfreader.py#L536-L564
1,034
holgern/pyedflib
pyedflib/edfreader.py
EdfReader.getDigitalMinimum
def getDigitalMinimum(self, chn=None): """ Returns the minimum digital value of signal edfsignal. Parameters ---------- chn : int channel number Examples -------- >>> import pyedflib >>> f = pyedflib.data.test_generator() >>> f.getDigitalMinimum(0) -32768 >>> f._close() >>> del f """ if chn is not None: if 0 <= chn < self.signals_in_file: return self.digital_min(chn) else: return 0 else: digMin = np.zeros(self.signals_in_file) for i in np.arange(self.signals_in_file): digMin[i] = self.digital_min(i) return digMin
python
def getDigitalMinimum(self, chn=None): """ Returns the minimum digital value of signal edfsignal. Parameters ---------- chn : int channel number Examples -------- >>> import pyedflib >>> f = pyedflib.data.test_generator() >>> f.getDigitalMinimum(0) -32768 >>> f._close() >>> del f """ if chn is not None: if 0 <= chn < self.signals_in_file: return self.digital_min(chn) else: return 0 else: digMin = np.zeros(self.signals_in_file) for i in np.arange(self.signals_in_file): digMin[i] = self.digital_min(i) return digMin
[ "def", "getDigitalMinimum", "(", "self", ",", "chn", "=", "None", ")", ":", "if", "chn", "is", "not", "None", ":", "if", "0", "<=", "chn", "<", "self", ".", "signals_in_file", ":", "return", "self", ".", "digital_min", "(", "chn", ")", "else", ":", "return", "0", "else", ":", "digMin", "=", "np", ".", "zeros", "(", "self", ".", "signals_in_file", ")", "for", "i", "in", "np", ".", "arange", "(", "self", ".", "signals_in_file", ")", ":", "digMin", "[", "i", "]", "=", "self", ".", "digital_min", "(", "i", ")", "return", "digMin" ]
Returns the minimum digital value of signal edfsignal. Parameters ---------- chn : int channel number Examples -------- >>> import pyedflib >>> f = pyedflib.data.test_generator() >>> f.getDigitalMinimum(0) -32768 >>> f._close() >>> del f
[ "Returns", "the", "minimum", "digital", "value", "of", "signal", "edfsignal", "." ]
0f787fc1202b84a6f30d098296acf72666eaeeb4
https://github.com/holgern/pyedflib/blob/0f787fc1202b84a6f30d098296acf72666eaeeb4/pyedflib/edfreader.py#L566-L594
1,035
holgern/pyedflib
pyedflib/edfreader.py
EdfReader.readSignal
def readSignal(self, chn, start=0, n=None): """ Returns the physical data of signal chn. When start and n is set, a subset is returned Parameters ---------- chn : int channel number start : int start pointer (default is 0) n : int length of data to read (default is None, by which the complete data of the channel are returned) Examples -------- >>> import pyedflib >>> f = pyedflib.data.test_generator() >>> x = f.readSignal(0,0,1000) >>> int(x.shape[0]) 1000 >>> x2 = f.readSignal(0) >>> int(x2.shape[0]) 120000 >>> f._close() >>> del f """ if start < 0: return np.array([]) if n is not None and n < 0: return np.array([]) nsamples = self.getNSamples() if chn < len(nsamples): if n is None: n = nsamples[chn] elif n > nsamples[chn]: return np.array([]) x = np.zeros(n, dtype=np.float64) self.readsignal(chn, start, n, x) return x else: return np.array([])
python
def readSignal(self, chn, start=0, n=None): """ Returns the physical data of signal chn. When start and n is set, a subset is returned Parameters ---------- chn : int channel number start : int start pointer (default is 0) n : int length of data to read (default is None, by which the complete data of the channel are returned) Examples -------- >>> import pyedflib >>> f = pyedflib.data.test_generator() >>> x = f.readSignal(0,0,1000) >>> int(x.shape[0]) 1000 >>> x2 = f.readSignal(0) >>> int(x2.shape[0]) 120000 >>> f._close() >>> del f """ if start < 0: return np.array([]) if n is not None and n < 0: return np.array([]) nsamples = self.getNSamples() if chn < len(nsamples): if n is None: n = nsamples[chn] elif n > nsamples[chn]: return np.array([]) x = np.zeros(n, dtype=np.float64) self.readsignal(chn, start, n, x) return x else: return np.array([])
[ "def", "readSignal", "(", "self", ",", "chn", ",", "start", "=", "0", ",", "n", "=", "None", ")", ":", "if", "start", "<", "0", ":", "return", "np", ".", "array", "(", "[", "]", ")", "if", "n", "is", "not", "None", "and", "n", "<", "0", ":", "return", "np", ".", "array", "(", "[", "]", ")", "nsamples", "=", "self", ".", "getNSamples", "(", ")", "if", "chn", "<", "len", "(", "nsamples", ")", ":", "if", "n", "is", "None", ":", "n", "=", "nsamples", "[", "chn", "]", "elif", "n", ">", "nsamples", "[", "chn", "]", ":", "return", "np", ".", "array", "(", "[", "]", ")", "x", "=", "np", ".", "zeros", "(", "n", ",", "dtype", "=", "np", ".", "float64", ")", "self", ".", "readsignal", "(", "chn", ",", "start", ",", "n", ",", "x", ")", "return", "x", "else", ":", "return", "np", ".", "array", "(", "[", "]", ")" ]
Returns the physical data of signal chn. When start and n is set, a subset is returned Parameters ---------- chn : int channel number start : int start pointer (default is 0) n : int length of data to read (default is None, by which the complete data of the channel are returned) Examples -------- >>> import pyedflib >>> f = pyedflib.data.test_generator() >>> x = f.readSignal(0,0,1000) >>> int(x.shape[0]) 1000 >>> x2 = f.readSignal(0) >>> int(x2.shape[0]) 120000 >>> f._close() >>> del f
[ "Returns", "the", "physical", "data", "of", "signal", "chn", ".", "When", "start", "and", "n", "is", "set", "a", "subset", "is", "returned" ]
0f787fc1202b84a6f30d098296acf72666eaeeb4
https://github.com/holgern/pyedflib/blob/0f787fc1202b84a6f30d098296acf72666eaeeb4/pyedflib/edfreader.py#L644-L685
1,036
holgern/pyedflib
demo/stacklineplot.py
stackplot
def stackplot(marray, seconds=None, start_time=None, ylabels=None): """ will plot a stack of traces one above the other assuming marray.shape = numRows, numSamples """ tarray = np.transpose(marray) stackplot_t(tarray, seconds=seconds, start_time=start_time, ylabels=ylabels) plt.show()
python
def stackplot(marray, seconds=None, start_time=None, ylabels=None): """ will plot a stack of traces one above the other assuming marray.shape = numRows, numSamples """ tarray = np.transpose(marray) stackplot_t(tarray, seconds=seconds, start_time=start_time, ylabels=ylabels) plt.show()
[ "def", "stackplot", "(", "marray", ",", "seconds", "=", "None", ",", "start_time", "=", "None", ",", "ylabels", "=", "None", ")", ":", "tarray", "=", "np", ".", "transpose", "(", "marray", ")", "stackplot_t", "(", "tarray", ",", "seconds", "=", "seconds", ",", "start_time", "=", "start_time", ",", "ylabels", "=", "ylabels", ")", "plt", ".", "show", "(", ")" ]
will plot a stack of traces one above the other assuming marray.shape = numRows, numSamples
[ "will", "plot", "a", "stack", "of", "traces", "one", "above", "the", "other", "assuming", "marray", ".", "shape", "=", "numRows", "numSamples" ]
0f787fc1202b84a6f30d098296acf72666eaeeb4
https://github.com/holgern/pyedflib/blob/0f787fc1202b84a6f30d098296acf72666eaeeb4/demo/stacklineplot.py#L10-L17
1,037
holgern/pyedflib
demo/stacklineplot.py
stackplot_t
def stackplot_t(tarray, seconds=None, start_time=None, ylabels=None): """ will plot a stack of traces one above the other assuming tarray.shape = numSamples, numRows """ data = tarray numSamples, numRows = tarray.shape # data = np.random.randn(numSamples,numRows) # test data # data.shape = numSamples, numRows if seconds: t = seconds * np.arange(numSamples, dtype=float)/numSamples # import pdb # pdb.set_trace() if start_time: t = t+start_time xlm = (start_time, start_time+seconds) else: xlm = (0,seconds) else: t = np.arange(numSamples, dtype=float) xlm = (0,numSamples) ticklocs = [] ax = plt.subplot(111) plt.xlim(*xlm) # xticks(np.linspace(xlm, 10)) dmin = data.min() dmax = data.max() dr = (dmax - dmin)*0.7 # Crowd them a bit. y0 = dmin y1 = (numRows-1) * dr + dmax plt.ylim(y0, y1) segs = [] for i in range(numRows): segs.append(np.hstack((t[:,np.newaxis], data[:,i,np.newaxis]))) # print "segs[-1].shape:", segs[-1].shape ticklocs.append(i*dr) offsets = np.zeros((numRows,2), dtype=float) offsets[:,1] = ticklocs lines = LineCollection(segs, offsets=offsets, transOffset=None, ) ax.add_collection(lines) # set the yticks to use axes coords on the y axis ax.set_yticks(ticklocs) # ax.set_yticklabels(['PG3', 'PG5', 'PG7', 'PG9']) # if not plt.ylabels: plt.ylabels = ["%d" % ii for ii in range(numRows)] ax.set_yticklabels(ylabels) plt.xlabel('time (s)')
python
def stackplot_t(tarray, seconds=None, start_time=None, ylabels=None): """ will plot a stack of traces one above the other assuming tarray.shape = numSamples, numRows """ data = tarray numSamples, numRows = tarray.shape # data = np.random.randn(numSamples,numRows) # test data # data.shape = numSamples, numRows if seconds: t = seconds * np.arange(numSamples, dtype=float)/numSamples # import pdb # pdb.set_trace() if start_time: t = t+start_time xlm = (start_time, start_time+seconds) else: xlm = (0,seconds) else: t = np.arange(numSamples, dtype=float) xlm = (0,numSamples) ticklocs = [] ax = plt.subplot(111) plt.xlim(*xlm) # xticks(np.linspace(xlm, 10)) dmin = data.min() dmax = data.max() dr = (dmax - dmin)*0.7 # Crowd them a bit. y0 = dmin y1 = (numRows-1) * dr + dmax plt.ylim(y0, y1) segs = [] for i in range(numRows): segs.append(np.hstack((t[:,np.newaxis], data[:,i,np.newaxis]))) # print "segs[-1].shape:", segs[-1].shape ticklocs.append(i*dr) offsets = np.zeros((numRows,2), dtype=float) offsets[:,1] = ticklocs lines = LineCollection(segs, offsets=offsets, transOffset=None, ) ax.add_collection(lines) # set the yticks to use axes coords on the y axis ax.set_yticks(ticklocs) # ax.set_yticklabels(['PG3', 'PG5', 'PG7', 'PG9']) # if not plt.ylabels: plt.ylabels = ["%d" % ii for ii in range(numRows)] ax.set_yticklabels(ylabels) plt.xlabel('time (s)')
[ "def", "stackplot_t", "(", "tarray", ",", "seconds", "=", "None", ",", "start_time", "=", "None", ",", "ylabels", "=", "None", ")", ":", "data", "=", "tarray", "numSamples", ",", "numRows", "=", "tarray", ".", "shape", "# data = np.random.randn(numSamples,numRows) # test data", "# data.shape = numSamples, numRows", "if", "seconds", ":", "t", "=", "seconds", "*", "np", ".", "arange", "(", "numSamples", ",", "dtype", "=", "float", ")", "/", "numSamples", "# import pdb", "# pdb.set_trace()", "if", "start_time", ":", "t", "=", "t", "+", "start_time", "xlm", "=", "(", "start_time", ",", "start_time", "+", "seconds", ")", "else", ":", "xlm", "=", "(", "0", ",", "seconds", ")", "else", ":", "t", "=", "np", ".", "arange", "(", "numSamples", ",", "dtype", "=", "float", ")", "xlm", "=", "(", "0", ",", "numSamples", ")", "ticklocs", "=", "[", "]", "ax", "=", "plt", ".", "subplot", "(", "111", ")", "plt", ".", "xlim", "(", "*", "xlm", ")", "# xticks(np.linspace(xlm, 10))", "dmin", "=", "data", ".", "min", "(", ")", "dmax", "=", "data", ".", "max", "(", ")", "dr", "=", "(", "dmax", "-", "dmin", ")", "*", "0.7", "# Crowd them a bit.", "y0", "=", "dmin", "y1", "=", "(", "numRows", "-", "1", ")", "*", "dr", "+", "dmax", "plt", ".", "ylim", "(", "y0", ",", "y1", ")", "segs", "=", "[", "]", "for", "i", "in", "range", "(", "numRows", ")", ":", "segs", ".", "append", "(", "np", ".", "hstack", "(", "(", "t", "[", ":", ",", "np", ".", "newaxis", "]", ",", "data", "[", ":", ",", "i", ",", "np", ".", "newaxis", "]", ")", ")", ")", "# print \"segs[-1].shape:\", segs[-1].shape", "ticklocs", ".", "append", "(", "i", "*", "dr", ")", "offsets", "=", "np", ".", "zeros", "(", "(", "numRows", ",", "2", ")", ",", "dtype", "=", "float", ")", "offsets", "[", ":", ",", "1", "]", "=", "ticklocs", "lines", "=", "LineCollection", "(", "segs", ",", "offsets", "=", "offsets", ",", "transOffset", "=", "None", ",", ")", "ax", ".", "add_collection", "(", "lines", ")", "# set the yticks to use axes coords on the y axis", "ax", ".", "set_yticks", "(", "ticklocs", ")", "# ax.set_yticklabels(['PG3', 'PG5', 'PG7', 'PG9'])", "# if not plt.ylabels:", "plt", ".", "ylabels", "=", "[", "\"%d\"", "%", "ii", "for", "ii", "in", "range", "(", "numRows", ")", "]", "ax", ".", "set_yticklabels", "(", "ylabels", ")", "plt", ".", "xlabel", "(", "'time (s)'", ")" ]
will plot a stack of traces one above the other assuming tarray.shape = numSamples, numRows
[ "will", "plot", "a", "stack", "of", "traces", "one", "above", "the", "other", "assuming", "tarray", ".", "shape", "=", "numSamples", "numRows" ]
0f787fc1202b84a6f30d098296acf72666eaeeb4
https://github.com/holgern/pyedflib/blob/0f787fc1202b84a6f30d098296acf72666eaeeb4/demo/stacklineplot.py#L20-L76
1,038
jrialland/python-astar
src/astar/__init__.py
find_path
def find_path(start, goal, neighbors_fnct, reversePath=False, heuristic_cost_estimate_fnct=lambda a, b: Infinite, distance_between_fnct=lambda a, b: 1.0, is_goal_reached_fnct=lambda a, b: a == b): """A non-class version of the path finding algorithm""" class FindPath(AStar): def heuristic_cost_estimate(self, current, goal): return heuristic_cost_estimate_fnct(current, goal) def distance_between(self, n1, n2): return distance_between_fnct(n1, n2) def neighbors(self, node): return neighbors_fnct(node) def is_goal_reached(self, current, goal): return is_goal_reached_fnct(current, goal) return FindPath().astar(start, goal, reversePath)
python
def find_path(start, goal, neighbors_fnct, reversePath=False, heuristic_cost_estimate_fnct=lambda a, b: Infinite, distance_between_fnct=lambda a, b: 1.0, is_goal_reached_fnct=lambda a, b: a == b): """A non-class version of the path finding algorithm""" class FindPath(AStar): def heuristic_cost_estimate(self, current, goal): return heuristic_cost_estimate_fnct(current, goal) def distance_between(self, n1, n2): return distance_between_fnct(n1, n2) def neighbors(self, node): return neighbors_fnct(node) def is_goal_reached(self, current, goal): return is_goal_reached_fnct(current, goal) return FindPath().astar(start, goal, reversePath)
[ "def", "find_path", "(", "start", ",", "goal", ",", "neighbors_fnct", ",", "reversePath", "=", "False", ",", "heuristic_cost_estimate_fnct", "=", "lambda", "a", ",", "b", ":", "Infinite", ",", "distance_between_fnct", "=", "lambda", "a", ",", "b", ":", "1.0", ",", "is_goal_reached_fnct", "=", "lambda", "a", ",", "b", ":", "a", "==", "b", ")", ":", "class", "FindPath", "(", "AStar", ")", ":", "def", "heuristic_cost_estimate", "(", "self", ",", "current", ",", "goal", ")", ":", "return", "heuristic_cost_estimate_fnct", "(", "current", ",", "goal", ")", "def", "distance_between", "(", "self", ",", "n1", ",", "n2", ")", ":", "return", "distance_between_fnct", "(", "n1", ",", "n2", ")", "def", "neighbors", "(", "self", ",", "node", ")", ":", "return", "neighbors_fnct", "(", "node", ")", "def", "is_goal_reached", "(", "self", ",", "current", ",", "goal", ")", ":", "return", "is_goal_reached_fnct", "(", "current", ",", "goal", ")", "return", "FindPath", "(", ")", ".", "astar", "(", "start", ",", "goal", ",", "reversePath", ")" ]
A non-class version of the path finding algorithm
[ "A", "non", "-", "class", "version", "of", "the", "path", "finding", "algorithm" ]
7a3f5b33bedd03bd09792fe0d5b6fe28d50f9514
https://github.com/jrialland/python-astar/blob/7a3f5b33bedd03bd09792fe0d5b6fe28d50f9514/src/astar/__init__.py#L109-L124
1,039
frictionlessdata/goodtables-py
goodtables/validate.py
validate
def validate(source, **options): """Validates a source file and returns a report. Args: source (Union[str, Dict, List[Dict], IO]): The source to be validated. It can be a local file path, URL, dict, list of dicts, or a file-like object. If it's a list of dicts and the `preset` is "nested", each of the dict key's will be used as if it was passed as a keyword argument to this method. The file can be a CSV, XLS, JSON, and any other format supported by `tabulator`_. Keyword Args: checks (List[str]): List of checks names to be enabled. They can be individual check names (e.g. `blank-headers`), or check types (e.g. `structure`). skip_checks (List[str]): List of checks names to be skipped. They can be individual check names (e.g. `blank-headers`), or check types (e.g. `structure`). infer_schema (bool): Infer schema if one wasn't passed as an argument. infer_fields (bool): Infer schema for columns not present in the received schema. order_fields (bool): Order source columns based on schema fields order. This is useful when you don't want to validate that the data columns' order is the same as the schema's. error_limit (int): Stop validation if the number of errors per table exceeds this value. table_limit (int): Maximum number of tables to validate. row_limit (int): Maximum number of rows to validate. preset (str): Dataset type could be `table` (default), `datapackage`, `nested` or custom. Usually, the preset can be inferred from the source, so you don't need to define it. Any (Any): Any additional arguments not defined here will be passed on, depending on the chosen `preset`. If the `preset` is `table`, the extra arguments will be passed on to `tabulator`_, if it is `datapackage`, they will be passed on to the `datapackage`_ constructor. # Table preset schema (Union[str, Dict, IO]): The Table Schema for the source. headers (Union[int, List[str]): Either the row number that contains the headers, or a list with them. If the row number is given, ????? scheme (str): The scheme used to access the source (e.g. `file`, `http`). This is usually inferred correctly from the source. See the `tabulator`_ documentation for the list of supported schemes. format (str): Format of the source data (`csv`, `datapackage`, ...). This is usually inferred correctly from the source. See the the `tabulator`_ documentation for the list of supported formats. encoding (str): Encoding of the source. skip_rows (Union[int, List[Union[int, str]]]): Row numbers or a string. Rows beginning with the string will be ignored (e.g. '#', '//'). Raises: GoodtablesException: Raised on any non-tabular error. Returns: dict: The validation report. .. _tabulator: https://github.com/frictionlessdata/tabulator-py .. _tabulator_schemes: https://github.com/frictionlessdata/tabulator-py .. _tabulator: https://github.com/frictionlessdata/datapackage-py """ source, options, inspector_settings = _parse_arguments(source, **options) # Validate inspector = Inspector(**inspector_settings) report = inspector.inspect(source, **options) return report
python
def validate(source, **options): """Validates a source file and returns a report. Args: source (Union[str, Dict, List[Dict], IO]): The source to be validated. It can be a local file path, URL, dict, list of dicts, or a file-like object. If it's a list of dicts and the `preset` is "nested", each of the dict key's will be used as if it was passed as a keyword argument to this method. The file can be a CSV, XLS, JSON, and any other format supported by `tabulator`_. Keyword Args: checks (List[str]): List of checks names to be enabled. They can be individual check names (e.g. `blank-headers`), or check types (e.g. `structure`). skip_checks (List[str]): List of checks names to be skipped. They can be individual check names (e.g. `blank-headers`), or check types (e.g. `structure`). infer_schema (bool): Infer schema if one wasn't passed as an argument. infer_fields (bool): Infer schema for columns not present in the received schema. order_fields (bool): Order source columns based on schema fields order. This is useful when you don't want to validate that the data columns' order is the same as the schema's. error_limit (int): Stop validation if the number of errors per table exceeds this value. table_limit (int): Maximum number of tables to validate. row_limit (int): Maximum number of rows to validate. preset (str): Dataset type could be `table` (default), `datapackage`, `nested` or custom. Usually, the preset can be inferred from the source, so you don't need to define it. Any (Any): Any additional arguments not defined here will be passed on, depending on the chosen `preset`. If the `preset` is `table`, the extra arguments will be passed on to `tabulator`_, if it is `datapackage`, they will be passed on to the `datapackage`_ constructor. # Table preset schema (Union[str, Dict, IO]): The Table Schema for the source. headers (Union[int, List[str]): Either the row number that contains the headers, or a list with them. If the row number is given, ????? scheme (str): The scheme used to access the source (e.g. `file`, `http`). This is usually inferred correctly from the source. See the `tabulator`_ documentation for the list of supported schemes. format (str): Format of the source data (`csv`, `datapackage`, ...). This is usually inferred correctly from the source. See the the `tabulator`_ documentation for the list of supported formats. encoding (str): Encoding of the source. skip_rows (Union[int, List[Union[int, str]]]): Row numbers or a string. Rows beginning with the string will be ignored (e.g. '#', '//'). Raises: GoodtablesException: Raised on any non-tabular error. Returns: dict: The validation report. .. _tabulator: https://github.com/frictionlessdata/tabulator-py .. _tabulator_schemes: https://github.com/frictionlessdata/tabulator-py .. _tabulator: https://github.com/frictionlessdata/datapackage-py """ source, options, inspector_settings = _parse_arguments(source, **options) # Validate inspector = Inspector(**inspector_settings) report = inspector.inspect(source, **options) return report
[ "def", "validate", "(", "source", ",", "*", "*", "options", ")", ":", "source", ",", "options", ",", "inspector_settings", "=", "_parse_arguments", "(", "source", ",", "*", "*", "options", ")", "# Validate", "inspector", "=", "Inspector", "(", "*", "*", "inspector_settings", ")", "report", "=", "inspector", ".", "inspect", "(", "source", ",", "*", "*", "options", ")", "return", "report" ]
Validates a source file and returns a report. Args: source (Union[str, Dict, List[Dict], IO]): The source to be validated. It can be a local file path, URL, dict, list of dicts, or a file-like object. If it's a list of dicts and the `preset` is "nested", each of the dict key's will be used as if it was passed as a keyword argument to this method. The file can be a CSV, XLS, JSON, and any other format supported by `tabulator`_. Keyword Args: checks (List[str]): List of checks names to be enabled. They can be individual check names (e.g. `blank-headers`), or check types (e.g. `structure`). skip_checks (List[str]): List of checks names to be skipped. They can be individual check names (e.g. `blank-headers`), or check types (e.g. `structure`). infer_schema (bool): Infer schema if one wasn't passed as an argument. infer_fields (bool): Infer schema for columns not present in the received schema. order_fields (bool): Order source columns based on schema fields order. This is useful when you don't want to validate that the data columns' order is the same as the schema's. error_limit (int): Stop validation if the number of errors per table exceeds this value. table_limit (int): Maximum number of tables to validate. row_limit (int): Maximum number of rows to validate. preset (str): Dataset type could be `table` (default), `datapackage`, `nested` or custom. Usually, the preset can be inferred from the source, so you don't need to define it. Any (Any): Any additional arguments not defined here will be passed on, depending on the chosen `preset`. If the `preset` is `table`, the extra arguments will be passed on to `tabulator`_, if it is `datapackage`, they will be passed on to the `datapackage`_ constructor. # Table preset schema (Union[str, Dict, IO]): The Table Schema for the source. headers (Union[int, List[str]): Either the row number that contains the headers, or a list with them. If the row number is given, ????? scheme (str): The scheme used to access the source (e.g. `file`, `http`). This is usually inferred correctly from the source. See the `tabulator`_ documentation for the list of supported schemes. format (str): Format of the source data (`csv`, `datapackage`, ...). This is usually inferred correctly from the source. See the the `tabulator`_ documentation for the list of supported formats. encoding (str): Encoding of the source. skip_rows (Union[int, List[Union[int, str]]]): Row numbers or a string. Rows beginning with the string will be ignored (e.g. '#', '//'). Raises: GoodtablesException: Raised on any non-tabular error. Returns: dict: The validation report. .. _tabulator: https://github.com/frictionlessdata/tabulator-py .. _tabulator_schemes: https://github.com/frictionlessdata/tabulator-py .. _tabulator: https://github.com/frictionlessdata/datapackage-py
[ "Validates", "a", "source", "file", "and", "returns", "a", "report", "." ]
3e7d6891d2f4e342dfafbe0e951e204ccc252a44
https://github.com/frictionlessdata/goodtables-py/blob/3e7d6891d2f4e342dfafbe0e951e204ccc252a44/goodtables/validate.py#L13-L87
1,040
frictionlessdata/goodtables-py
goodtables/validate.py
init_datapackage
def init_datapackage(resource_paths): """Create tabular data package with resources. It will also infer the tabular resources' schemas. Args: resource_paths (List[str]): Paths to the data package resources. Returns: datapackage.Package: The data package. """ dp = datapackage.Package({ 'name': 'change-me', 'schema': 'tabular-data-package', }) for path in resource_paths: dp.infer(path) return dp
python
def init_datapackage(resource_paths): """Create tabular data package with resources. It will also infer the tabular resources' schemas. Args: resource_paths (List[str]): Paths to the data package resources. Returns: datapackage.Package: The data package. """ dp = datapackage.Package({ 'name': 'change-me', 'schema': 'tabular-data-package', }) for path in resource_paths: dp.infer(path) return dp
[ "def", "init_datapackage", "(", "resource_paths", ")", ":", "dp", "=", "datapackage", ".", "Package", "(", "{", "'name'", ":", "'change-me'", ",", "'schema'", ":", "'tabular-data-package'", ",", "}", ")", "for", "path", "in", "resource_paths", ":", "dp", ".", "infer", "(", "path", ")", "return", "dp" ]
Create tabular data package with resources. It will also infer the tabular resources' schemas. Args: resource_paths (List[str]): Paths to the data package resources. Returns: datapackage.Package: The data package.
[ "Create", "tabular", "data", "package", "with", "resources", "." ]
3e7d6891d2f4e342dfafbe0e951e204ccc252a44
https://github.com/frictionlessdata/goodtables-py/blob/3e7d6891d2f4e342dfafbe0e951e204ccc252a44/goodtables/validate.py#L90-L109
1,041
frictionlessdata/goodtables-py
goodtables/cli.py
init
def init(paths, output, **kwargs): """Init data package from list of files. It will also infer tabular data's schemas from their contents. """ dp = goodtables.init_datapackage(paths) click.secho( json_module.dumps(dp.descriptor, indent=4), file=output ) exit(dp.valid)
python
def init(paths, output, **kwargs): """Init data package from list of files. It will also infer tabular data's schemas from their contents. """ dp = goodtables.init_datapackage(paths) click.secho( json_module.dumps(dp.descriptor, indent=4), file=output ) exit(dp.valid)
[ "def", "init", "(", "paths", ",", "output", ",", "*", "*", "kwargs", ")", ":", "dp", "=", "goodtables", ".", "init_datapackage", "(", "paths", ")", "click", ".", "secho", "(", "json_module", ".", "dumps", "(", "dp", ".", "descriptor", ",", "indent", "=", "4", ")", ",", "file", "=", "output", ")", "exit", "(", "dp", ".", "valid", ")" ]
Init data package from list of files. It will also infer tabular data's schemas from their contents.
[ "Init", "data", "package", "from", "list", "of", "files", "." ]
3e7d6891d2f4e342dfafbe0e951e204ccc252a44
https://github.com/frictionlessdata/goodtables-py/blob/3e7d6891d2f4e342dfafbe0e951e204ccc252a44/goodtables/cli.py#L121-L133
1,042
frictionlessdata/goodtables-py
goodtables/inspector.py
_clean_empty
def _clean_empty(d): """Remove None values from a dict.""" if not isinstance(d, (dict, list)): return d if isinstance(d, list): return [v for v in (_clean_empty(v) for v in d) if v is not None] return { k: v for k, v in ((k, _clean_empty(v)) for k, v in d.items()) if v is not None }
python
def _clean_empty(d): """Remove None values from a dict.""" if not isinstance(d, (dict, list)): return d if isinstance(d, list): return [v for v in (_clean_empty(v) for v in d) if v is not None] return { k: v for k, v in ((k, _clean_empty(v)) for k, v in d.items()) if v is not None }
[ "def", "_clean_empty", "(", "d", ")", ":", "if", "not", "isinstance", "(", "d", ",", "(", "dict", ",", "list", ")", ")", ":", "return", "d", "if", "isinstance", "(", "d", ",", "list", ")", ":", "return", "[", "v", "for", "v", "in", "(", "_clean_empty", "(", "v", ")", "for", "v", "in", "d", ")", "if", "v", "is", "not", "None", "]", "return", "{", "k", ":", "v", "for", "k", ",", "v", "in", "(", "(", "k", ",", "_clean_empty", "(", "v", ")", ")", "for", "k", ",", "v", "in", "d", ".", "items", "(", ")", ")", "if", "v", "is", "not", "None", "}" ]
Remove None values from a dict.
[ "Remove", "None", "values", "from", "a", "dict", "." ]
3e7d6891d2f4e342dfafbe0e951e204ccc252a44
https://github.com/frictionlessdata/goodtables-py/blob/3e7d6891d2f4e342dfafbe0e951e204ccc252a44/goodtables/inspector.py#L330-L340
1,043
frictionlessdata/goodtables-py
goodtables/cells.py
create_cells
def create_cells(headers, schema_fields, values=None, row_number=None): """Create list of cells from headers, fields and values. Args: headers (List[str]): The headers values. schema_fields (List[tableschema.field.Field]): The tableschema fields. values (List[Any], optional): The cells values. If not specified, the created cells will have the same values as their corresponding headers. This is useful for specifying headers cells. If the list has any `None` values, as is the case on empty cells, the resulting Cell will have an empty string value. If the `values` list has a different length than the `headers`, the resulting Cell will have value `None`. row_number (int, optional): The row number. Returns: List[dict]: List of cells. """ fillvalue = '_fillvalue' is_header_row = (values is None) cells = [] iterator = zip_longest(headers, schema_fields, values or [], fillvalue=fillvalue) for column_number, (header, field, value) in enumerate(iterator, start=1): if header == fillvalue: header = None elif is_header_row: value = header if field == fillvalue: field = None if value == fillvalue: value = None elif value is None: value = '' cell = create_cell(header, value, field, column_number, row_number) cells.append(cell) return cells
python
def create_cells(headers, schema_fields, values=None, row_number=None): """Create list of cells from headers, fields and values. Args: headers (List[str]): The headers values. schema_fields (List[tableschema.field.Field]): The tableschema fields. values (List[Any], optional): The cells values. If not specified, the created cells will have the same values as their corresponding headers. This is useful for specifying headers cells. If the list has any `None` values, as is the case on empty cells, the resulting Cell will have an empty string value. If the `values` list has a different length than the `headers`, the resulting Cell will have value `None`. row_number (int, optional): The row number. Returns: List[dict]: List of cells. """ fillvalue = '_fillvalue' is_header_row = (values is None) cells = [] iterator = zip_longest(headers, schema_fields, values or [], fillvalue=fillvalue) for column_number, (header, field, value) in enumerate(iterator, start=1): if header == fillvalue: header = None elif is_header_row: value = header if field == fillvalue: field = None if value == fillvalue: value = None elif value is None: value = '' cell = create_cell(header, value, field, column_number, row_number) cells.append(cell) return cells
[ "def", "create_cells", "(", "headers", ",", "schema_fields", ",", "values", "=", "None", ",", "row_number", "=", "None", ")", ":", "fillvalue", "=", "'_fillvalue'", "is_header_row", "=", "(", "values", "is", "None", ")", "cells", "=", "[", "]", "iterator", "=", "zip_longest", "(", "headers", ",", "schema_fields", ",", "values", "or", "[", "]", ",", "fillvalue", "=", "fillvalue", ")", "for", "column_number", ",", "(", "header", ",", "field", ",", "value", ")", "in", "enumerate", "(", "iterator", ",", "start", "=", "1", ")", ":", "if", "header", "==", "fillvalue", ":", "header", "=", "None", "elif", "is_header_row", ":", "value", "=", "header", "if", "field", "==", "fillvalue", ":", "field", "=", "None", "if", "value", "==", "fillvalue", ":", "value", "=", "None", "elif", "value", "is", "None", ":", "value", "=", "''", "cell", "=", "create_cell", "(", "header", ",", "value", ",", "field", ",", "column_number", ",", "row_number", ")", "cells", ".", "append", "(", "cell", ")", "return", "cells" ]
Create list of cells from headers, fields and values. Args: headers (List[str]): The headers values. schema_fields (List[tableschema.field.Field]): The tableschema fields. values (List[Any], optional): The cells values. If not specified, the created cells will have the same values as their corresponding headers. This is useful for specifying headers cells. If the list has any `None` values, as is the case on empty cells, the resulting Cell will have an empty string value. If the `values` list has a different length than the `headers`, the resulting Cell will have value `None`. row_number (int, optional): The row number. Returns: List[dict]: List of cells.
[ "Create", "list", "of", "cells", "from", "headers", "fields", "and", "values", "." ]
3e7d6891d2f4e342dfafbe0e951e204ccc252a44
https://github.com/frictionlessdata/goodtables-py/blob/3e7d6891d2f4e342dfafbe0e951e204ccc252a44/goodtables/cells.py#L4-L44
1,044
unixfreak0037/officeparser
officeparser.py
CompoundBinaryFile.__impl_read_chain
def __impl_read_chain(self, start, read_sector_f, read_fat_f): """Returns the entire contents of a chain starting at the given sector.""" sector = start check = [ sector ] # keep a list of sectors we've already read buffer = StringIO() while sector != ENDOFCHAIN: buffer.write(read_sector_f(sector)) next = read_fat_f(sector) if next in check: logging.error('infinite loop detected at {0} to {1} starting at {2}'.format( sector, next, sector_start)) return buffer.getvalue() check.append(next) sector = next return buffer.getvalue()
python
def __impl_read_chain(self, start, read_sector_f, read_fat_f): """Returns the entire contents of a chain starting at the given sector.""" sector = start check = [ sector ] # keep a list of sectors we've already read buffer = StringIO() while sector != ENDOFCHAIN: buffer.write(read_sector_f(sector)) next = read_fat_f(sector) if next in check: logging.error('infinite loop detected at {0} to {1} starting at {2}'.format( sector, next, sector_start)) return buffer.getvalue() check.append(next) sector = next return buffer.getvalue()
[ "def", "__impl_read_chain", "(", "self", ",", "start", ",", "read_sector_f", ",", "read_fat_f", ")", ":", "sector", "=", "start", "check", "=", "[", "sector", "]", "# keep a list of sectors we've already read", "buffer", "=", "StringIO", "(", ")", "while", "sector", "!=", "ENDOFCHAIN", ":", "buffer", ".", "write", "(", "read_sector_f", "(", "sector", ")", ")", "next", "=", "read_fat_f", "(", "sector", ")", "if", "next", "in", "check", ":", "logging", ".", "error", "(", "'infinite loop detected at {0} to {1} starting at {2}'", ".", "format", "(", "sector", ",", "next", ",", "sector_start", ")", ")", "return", "buffer", ".", "getvalue", "(", ")", "check", ".", "append", "(", "next", ")", "sector", "=", "next", "return", "buffer", ".", "getvalue", "(", ")" ]
Returns the entire contents of a chain starting at the given sector.
[ "Returns", "the", "entire", "contents", "of", "a", "chain", "starting", "at", "the", "given", "sector", "." ]
42c2d40372fe271f2039ca1adc145d2aef8c9545
https://github.com/unixfreak0037/officeparser/blob/42c2d40372fe271f2039ca1adc145d2aef8c9545/officeparser.py#L247-L261
1,045
billy-yoyo/RainbowSixSiege-Python-API
r6sapi/r6sapi.py
Rank.get_charm_url
def get_charm_url(self): """Get charm URL for the bracket this rank is in Returns ------- :class:`str` the URL for the charm """ if self.rank_id <= 4: return self.RANK_CHARMS[0] if self.rank_id <= 8: return self.RANK_CHARMS[1] if self.rank_id <= 12: return self.RANK_CHARMS[2] if self.rank_id <= 16: return self.RANK_CHARMS[3] if self.rank_id <= 19: return self.RANK_CHARMS[4] return self.RANK_CHARMS[5]
python
def get_charm_url(self): """Get charm URL for the bracket this rank is in Returns ------- :class:`str` the URL for the charm """ if self.rank_id <= 4: return self.RANK_CHARMS[0] if self.rank_id <= 8: return self.RANK_CHARMS[1] if self.rank_id <= 12: return self.RANK_CHARMS[2] if self.rank_id <= 16: return self.RANK_CHARMS[3] if self.rank_id <= 19: return self.RANK_CHARMS[4] return self.RANK_CHARMS[5]
[ "def", "get_charm_url", "(", "self", ")", ":", "if", "self", ".", "rank_id", "<=", "4", ":", "return", "self", ".", "RANK_CHARMS", "[", "0", "]", "if", "self", ".", "rank_id", "<=", "8", ":", "return", "self", ".", "RANK_CHARMS", "[", "1", "]", "if", "self", ".", "rank_id", "<=", "12", ":", "return", "self", ".", "RANK_CHARMS", "[", "2", "]", "if", "self", ".", "rank_id", "<=", "16", ":", "return", "self", ".", "RANK_CHARMS", "[", "3", "]", "if", "self", ".", "rank_id", "<=", "19", ":", "return", "self", ".", "RANK_CHARMS", "[", "4", "]", "return", "self", ".", "RANK_CHARMS", "[", "5", "]" ]
Get charm URL for the bracket this rank is in Returns ------- :class:`str` the URL for the charm
[ "Get", "charm", "URL", "for", "the", "bracket", "this", "rank", "is", "in" ]
9860fdfd9a78aabd977eaa71b0a4ab4ed69e94d0
https://github.com/billy-yoyo/RainbowSixSiege-Python-API/blob/9860fdfd9a78aabd977eaa71b0a4ab4ed69e94d0/r6sapi/r6sapi.py#L808-L822
1,046
billy-yoyo/RainbowSixSiege-Python-API
r6sapi/r6sapi.py
Player.load_rank
def load_rank(self, region, season=-1): """|coro| Loads the players rank for this region and season Parameters ---------- region : str the name of the region you want to get the rank for season : Optional[int] the season you want to get the rank for (defaults to -1, latest season) Returns ------- :class:`Rank` the players rank for this region and season""" data = yield from self.auth.get("https://public-ubiservices.ubi.com/v1/spaces/%s/sandboxes/%s/r6karma/players?board_id=pvp_ranked&profile_ids=%s&region_id=%s&season_id=%s" % (self.spaceid, self.platform_url, self.id, region, season)) if "players" in data and self.id in data["players"]: regionkey = "%s:%s" % (region, season) self.ranks[regionkey] = Rank(data["players"][self.id]) return self.ranks[regionkey] else: raise InvalidRequest("Missing players key in returned JSON object %s" % str(data))
python
def load_rank(self, region, season=-1): """|coro| Loads the players rank for this region and season Parameters ---------- region : str the name of the region you want to get the rank for season : Optional[int] the season you want to get the rank for (defaults to -1, latest season) Returns ------- :class:`Rank` the players rank for this region and season""" data = yield from self.auth.get("https://public-ubiservices.ubi.com/v1/spaces/%s/sandboxes/%s/r6karma/players?board_id=pvp_ranked&profile_ids=%s&region_id=%s&season_id=%s" % (self.spaceid, self.platform_url, self.id, region, season)) if "players" in data and self.id in data["players"]: regionkey = "%s:%s" % (region, season) self.ranks[regionkey] = Rank(data["players"][self.id]) return self.ranks[regionkey] else: raise InvalidRequest("Missing players key in returned JSON object %s" % str(data))
[ "def", "load_rank", "(", "self", ",", "region", ",", "season", "=", "-", "1", ")", ":", "data", "=", "yield", "from", "self", ".", "auth", ".", "get", "(", "\"https://public-ubiservices.ubi.com/v1/spaces/%s/sandboxes/%s/r6karma/players?board_id=pvp_ranked&profile_ids=%s&region_id=%s&season_id=%s\"", "%", "(", "self", ".", "spaceid", ",", "self", ".", "platform_url", ",", "self", ".", "id", ",", "region", ",", "season", ")", ")", "if", "\"players\"", "in", "data", "and", "self", ".", "id", "in", "data", "[", "\"players\"", "]", ":", "regionkey", "=", "\"%s:%s\"", "%", "(", "region", ",", "season", ")", "self", ".", "ranks", "[", "regionkey", "]", "=", "Rank", "(", "data", "[", "\"players\"", "]", "[", "self", ".", "id", "]", ")", "return", "self", ".", "ranks", "[", "regionkey", "]", "else", ":", "raise", "InvalidRequest", "(", "\"Missing players key in returned JSON object %s\"", "%", "str", "(", "data", ")", ")" ]
|coro| Loads the players rank for this region and season Parameters ---------- region : str the name of the region you want to get the rank for season : Optional[int] the season you want to get the rank for (defaults to -1, latest season) Returns ------- :class:`Rank` the players rank for this region and season
[ "|coro|", "Loads", "the", "players", "rank", "for", "this", "region", "and", "season" ]
9860fdfd9a78aabd977eaa71b0a4ab4ed69e94d0
https://github.com/billy-yoyo/RainbowSixSiege-Python-API/blob/9860fdfd9a78aabd977eaa71b0a4ab4ed69e94d0/r6sapi/r6sapi.py#L1114-L1136
1,047
NaturalHistoryMuseum/pylibdmtx
pylibdmtx/wrapper.py
libdmtx_function
def libdmtx_function(fname, restype, *args): """Returns a foreign function exported by `libdmtx`. Args: fname (:obj:`str`): Name of the exported function as string. restype (:obj:): Return type - one of the `ctypes` primitive C data types. *args: Arguments - a sequence of `ctypes` primitive C data types. Returns: cddl.CFunctionType: A wrapper around the function. """ prototype = CFUNCTYPE(restype, *args) return prototype((fname, load_libdmtx()))
python
def libdmtx_function(fname, restype, *args): """Returns a foreign function exported by `libdmtx`. Args: fname (:obj:`str`): Name of the exported function as string. restype (:obj:): Return type - one of the `ctypes` primitive C data types. *args: Arguments - a sequence of `ctypes` primitive C data types. Returns: cddl.CFunctionType: A wrapper around the function. """ prototype = CFUNCTYPE(restype, *args) return prototype((fname, load_libdmtx()))
[ "def", "libdmtx_function", "(", "fname", ",", "restype", ",", "*", "args", ")", ":", "prototype", "=", "CFUNCTYPE", "(", "restype", ",", "*", "args", ")", "return", "prototype", "(", "(", "fname", ",", "load_libdmtx", "(", ")", ")", ")" ]
Returns a foreign function exported by `libdmtx`. Args: fname (:obj:`str`): Name of the exported function as string. restype (:obj:): Return type - one of the `ctypes` primitive C data types. *args: Arguments - a sequence of `ctypes` primitive C data types. Returns: cddl.CFunctionType: A wrapper around the function.
[ "Returns", "a", "foreign", "function", "exported", "by", "libdmtx", "." ]
a425ec36050500af4875bf94eda02feb26ea62ad
https://github.com/NaturalHistoryMuseum/pylibdmtx/blob/a425ec36050500af4875bf94eda02feb26ea62ad/pylibdmtx/wrapper.py#L46-L59
1,048
NaturalHistoryMuseum/pylibdmtx
pylibdmtx/pylibdmtx.py
_image
def _image(pixels, width, height, pack): """A context manager for `DmtxImage`, created and destroyed by `dmtxImageCreate` and `dmtxImageDestroy`. Args: pixels (:obj:): width (int): height (int): pack (int): Yields: DmtxImage: The created image Raises: PyLibDMTXError: If the image could not be created. """ image = dmtxImageCreate(pixels, width, height, pack) if not image: raise PyLibDMTXError('Could not create image') else: try: yield image finally: dmtxImageDestroy(byref(image))
python
def _image(pixels, width, height, pack): """A context manager for `DmtxImage`, created and destroyed by `dmtxImageCreate` and `dmtxImageDestroy`. Args: pixels (:obj:): width (int): height (int): pack (int): Yields: DmtxImage: The created image Raises: PyLibDMTXError: If the image could not be created. """ image = dmtxImageCreate(pixels, width, height, pack) if not image: raise PyLibDMTXError('Could not create image') else: try: yield image finally: dmtxImageDestroy(byref(image))
[ "def", "_image", "(", "pixels", ",", "width", ",", "height", ",", "pack", ")", ":", "image", "=", "dmtxImageCreate", "(", "pixels", ",", "width", ",", "height", ",", "pack", ")", "if", "not", "image", ":", "raise", "PyLibDMTXError", "(", "'Could not create image'", ")", "else", ":", "try", ":", "yield", "image", "finally", ":", "dmtxImageDestroy", "(", "byref", "(", "image", ")", ")" ]
A context manager for `DmtxImage`, created and destroyed by `dmtxImageCreate` and `dmtxImageDestroy`. Args: pixels (:obj:): width (int): height (int): pack (int): Yields: DmtxImage: The created image Raises: PyLibDMTXError: If the image could not be created.
[ "A", "context", "manager", "for", "DmtxImage", "created", "and", "destroyed", "by", "dmtxImageCreate", "and", "dmtxImageDestroy", "." ]
a425ec36050500af4875bf94eda02feb26ea62ad
https://github.com/NaturalHistoryMuseum/pylibdmtx/blob/a425ec36050500af4875bf94eda02feb26ea62ad/pylibdmtx/pylibdmtx.py#L57-L80
1,049
NaturalHistoryMuseum/pylibdmtx
pylibdmtx/pylibdmtx.py
_decoder
def _decoder(image, shrink): """A context manager for `DmtxDecode`, created and destroyed by `dmtxDecodeCreate` and `dmtxDecodeDestroy`. Args: image (POINTER(DmtxImage)): shrink (int): Yields: POINTER(DmtxDecode): The created decoder Raises: PyLibDMTXError: If the decoder could not be created. """ decoder = dmtxDecodeCreate(image, shrink) if not decoder: raise PyLibDMTXError('Could not create decoder') else: try: yield decoder finally: dmtxDecodeDestroy(byref(decoder))
python
def _decoder(image, shrink): """A context manager for `DmtxDecode`, created and destroyed by `dmtxDecodeCreate` and `dmtxDecodeDestroy`. Args: image (POINTER(DmtxImage)): shrink (int): Yields: POINTER(DmtxDecode): The created decoder Raises: PyLibDMTXError: If the decoder could not be created. """ decoder = dmtxDecodeCreate(image, shrink) if not decoder: raise PyLibDMTXError('Could not create decoder') else: try: yield decoder finally: dmtxDecodeDestroy(byref(decoder))
[ "def", "_decoder", "(", "image", ",", "shrink", ")", ":", "decoder", "=", "dmtxDecodeCreate", "(", "image", ",", "shrink", ")", "if", "not", "decoder", ":", "raise", "PyLibDMTXError", "(", "'Could not create decoder'", ")", "else", ":", "try", ":", "yield", "decoder", "finally", ":", "dmtxDecodeDestroy", "(", "byref", "(", "decoder", ")", ")" ]
A context manager for `DmtxDecode`, created and destroyed by `dmtxDecodeCreate` and `dmtxDecodeDestroy`. Args: image (POINTER(DmtxImage)): shrink (int): Yields: POINTER(DmtxDecode): The created decoder Raises: PyLibDMTXError: If the decoder could not be created.
[ "A", "context", "manager", "for", "DmtxDecode", "created", "and", "destroyed", "by", "dmtxDecodeCreate", "and", "dmtxDecodeDestroy", "." ]
a425ec36050500af4875bf94eda02feb26ea62ad
https://github.com/NaturalHistoryMuseum/pylibdmtx/blob/a425ec36050500af4875bf94eda02feb26ea62ad/pylibdmtx/pylibdmtx.py#L84-L105
1,050
NaturalHistoryMuseum/pylibdmtx
pylibdmtx/pylibdmtx.py
_region
def _region(decoder, timeout): """A context manager for `DmtxRegion`, created and destroyed by `dmtxRegionFindNext` and `dmtxRegionDestroy`. Args: decoder (POINTER(DmtxDecode)): timeout (int or None): Yields: DmtxRegion: The next region or None, if all regions have been found. """ region = dmtxRegionFindNext(decoder, timeout) try: yield region finally: if region: dmtxRegionDestroy(byref(region))
python
def _region(decoder, timeout): """A context manager for `DmtxRegion`, created and destroyed by `dmtxRegionFindNext` and `dmtxRegionDestroy`. Args: decoder (POINTER(DmtxDecode)): timeout (int or None): Yields: DmtxRegion: The next region or None, if all regions have been found. """ region = dmtxRegionFindNext(decoder, timeout) try: yield region finally: if region: dmtxRegionDestroy(byref(region))
[ "def", "_region", "(", "decoder", ",", "timeout", ")", ":", "region", "=", "dmtxRegionFindNext", "(", "decoder", ",", "timeout", ")", "try", ":", "yield", "region", "finally", ":", "if", "region", ":", "dmtxRegionDestroy", "(", "byref", "(", "region", ")", ")" ]
A context manager for `DmtxRegion`, created and destroyed by `dmtxRegionFindNext` and `dmtxRegionDestroy`. Args: decoder (POINTER(DmtxDecode)): timeout (int or None): Yields: DmtxRegion: The next region or None, if all regions have been found.
[ "A", "context", "manager", "for", "DmtxRegion", "created", "and", "destroyed", "by", "dmtxRegionFindNext", "and", "dmtxRegionDestroy", "." ]
a425ec36050500af4875bf94eda02feb26ea62ad
https://github.com/NaturalHistoryMuseum/pylibdmtx/blob/a425ec36050500af4875bf94eda02feb26ea62ad/pylibdmtx/pylibdmtx.py#L109-L125
1,051
NaturalHistoryMuseum/pylibdmtx
pylibdmtx/pylibdmtx.py
_decoded_matrix_region
def _decoded_matrix_region(decoder, region, corrections): """A context manager for `DmtxMessage`, created and destoyed by `dmtxDecodeMatrixRegion` and `dmtxMessageDestroy`. Args: decoder (POINTER(DmtxDecode)): region (POINTER(DmtxRegion)): corrections (int): Yields: DmtxMessage: The message. """ message = dmtxDecodeMatrixRegion(decoder, region, corrections) try: yield message finally: if message: dmtxMessageDestroy(byref(message))
python
def _decoded_matrix_region(decoder, region, corrections): """A context manager for `DmtxMessage`, created and destoyed by `dmtxDecodeMatrixRegion` and `dmtxMessageDestroy`. Args: decoder (POINTER(DmtxDecode)): region (POINTER(DmtxRegion)): corrections (int): Yields: DmtxMessage: The message. """ message = dmtxDecodeMatrixRegion(decoder, region, corrections) try: yield message finally: if message: dmtxMessageDestroy(byref(message))
[ "def", "_decoded_matrix_region", "(", "decoder", ",", "region", ",", "corrections", ")", ":", "message", "=", "dmtxDecodeMatrixRegion", "(", "decoder", ",", "region", ",", "corrections", ")", "try", ":", "yield", "message", "finally", ":", "if", "message", ":", "dmtxMessageDestroy", "(", "byref", "(", "message", ")", ")" ]
A context manager for `DmtxMessage`, created and destoyed by `dmtxDecodeMatrixRegion` and `dmtxMessageDestroy`. Args: decoder (POINTER(DmtxDecode)): region (POINTER(DmtxRegion)): corrections (int): Yields: DmtxMessage: The message.
[ "A", "context", "manager", "for", "DmtxMessage", "created", "and", "destoyed", "by", "dmtxDecodeMatrixRegion", "and", "dmtxMessageDestroy", "." ]
a425ec36050500af4875bf94eda02feb26ea62ad
https://github.com/NaturalHistoryMuseum/pylibdmtx/blob/a425ec36050500af4875bf94eda02feb26ea62ad/pylibdmtx/pylibdmtx.py#L129-L146
1,052
NaturalHistoryMuseum/pylibdmtx
pylibdmtx/pylibdmtx.py
_decode_region
def _decode_region(decoder, region, corrections, shrink): """Decodes and returns the value in a region. Args: region (DmtxRegion): Yields: Decoded or None: The decoded value. """ with _decoded_matrix_region(decoder, region, corrections) as msg: if msg: # Coordinates p00 = DmtxVector2() p11 = DmtxVector2(1.0, 1.0) dmtxMatrix3VMultiplyBy( p00, region.contents.fit2raw ) dmtxMatrix3VMultiplyBy(p11, region.contents.fit2raw) x0 = int((shrink * p00.X) + 0.5) y0 = int((shrink * p00.Y) + 0.5) x1 = int((shrink * p11.X) + 0.5) y1 = int((shrink * p11.Y) + 0.5) return Decoded( string_at(msg.contents.output), Rect(x0, y0, x1 - x0, y1 - y0) ) else: return None
python
def _decode_region(decoder, region, corrections, shrink): """Decodes and returns the value in a region. Args: region (DmtxRegion): Yields: Decoded or None: The decoded value. """ with _decoded_matrix_region(decoder, region, corrections) as msg: if msg: # Coordinates p00 = DmtxVector2() p11 = DmtxVector2(1.0, 1.0) dmtxMatrix3VMultiplyBy( p00, region.contents.fit2raw ) dmtxMatrix3VMultiplyBy(p11, region.contents.fit2raw) x0 = int((shrink * p00.X) + 0.5) y0 = int((shrink * p00.Y) + 0.5) x1 = int((shrink * p11.X) + 0.5) y1 = int((shrink * p11.Y) + 0.5) return Decoded( string_at(msg.contents.output), Rect(x0, y0, x1 - x0, y1 - y0) ) else: return None
[ "def", "_decode_region", "(", "decoder", ",", "region", ",", "corrections", ",", "shrink", ")", ":", "with", "_decoded_matrix_region", "(", "decoder", ",", "region", ",", "corrections", ")", "as", "msg", ":", "if", "msg", ":", "# Coordinates", "p00", "=", "DmtxVector2", "(", ")", "p11", "=", "DmtxVector2", "(", "1.0", ",", "1.0", ")", "dmtxMatrix3VMultiplyBy", "(", "p00", ",", "region", ".", "contents", ".", "fit2raw", ")", "dmtxMatrix3VMultiplyBy", "(", "p11", ",", "region", ".", "contents", ".", "fit2raw", ")", "x0", "=", "int", "(", "(", "shrink", "*", "p00", ".", "X", ")", "+", "0.5", ")", "y0", "=", "int", "(", "(", "shrink", "*", "p00", ".", "Y", ")", "+", "0.5", ")", "x1", "=", "int", "(", "(", "shrink", "*", "p11", ".", "X", ")", "+", "0.5", ")", "y1", "=", "int", "(", "(", "shrink", "*", "p11", ".", "Y", ")", "+", "0.5", ")", "return", "Decoded", "(", "string_at", "(", "msg", ".", "contents", ".", "output", ")", ",", "Rect", "(", "x0", ",", "y0", ",", "x1", "-", "x0", ",", "y1", "-", "y0", ")", ")", "else", ":", "return", "None" ]
Decodes and returns the value in a region. Args: region (DmtxRegion): Yields: Decoded or None: The decoded value.
[ "Decodes", "and", "returns", "the", "value", "in", "a", "region", "." ]
a425ec36050500af4875bf94eda02feb26ea62ad
https://github.com/NaturalHistoryMuseum/pylibdmtx/blob/a425ec36050500af4875bf94eda02feb26ea62ad/pylibdmtx/pylibdmtx.py#L149-L177
1,053
NaturalHistoryMuseum/pylibdmtx
pylibdmtx/pylibdmtx.py
encode
def encode(data, scheme=None, size=None): """ Encodes `data` in a DataMatrix image. For now bpp is the libdmtx default which is 24 Args: data: bytes instance scheme: encoding scheme - one of `ENCODING_SCHEME_NAMES`, or `None`. If `None`, defaults to 'Ascii'. size: image dimensions - one of `ENCODING_SIZE_NAMES`, or `None`. If `None`, defaults to 'ShapeAuto'. Returns: Encoded: with properties `(width, height, bpp, pixels)`. You can use that result to build a PIL image: Image.frombytes('RGB', (width, height), pixels) """ size = size if size else 'ShapeAuto' size_name = '{0}{1}'.format(ENCODING_SIZE_PREFIX, size) if not hasattr(DmtxSymbolSize, size_name): raise PyLibDMTXError( 'Invalid size [{0}]: should be one of {1}'.format( size, ENCODING_SIZE_NAMES ) ) size = getattr(DmtxSymbolSize, size_name) scheme = scheme if scheme else 'Ascii' scheme_name = '{0}{1}'.format( ENCODING_SCHEME_PREFIX, scheme.capitalize() ) if not hasattr(DmtxScheme, scheme_name): raise PyLibDMTXError( 'Invalid scheme [{0}]: should be one of {1}'.format( scheme, ENCODING_SCHEME_NAMES ) ) scheme = getattr(DmtxScheme, scheme_name) with _encoder() as encoder: dmtxEncodeSetProp(encoder, DmtxProperty.DmtxPropScheme, scheme) dmtxEncodeSetProp(encoder, DmtxProperty.DmtxPropSizeRequest, size) if dmtxEncodeDataMatrix(encoder, len(data), cast(data, c_ubyte_p)) == 0: raise PyLibDMTXError( 'Could not encode data, possibly because the image is not ' 'large enough to contain the data' ) w, h, bpp = map( partial(dmtxImageGetProp, encoder[0].image), ( DmtxProperty.DmtxPropWidth, DmtxProperty.DmtxPropHeight, DmtxProperty.DmtxPropBitsPerPixel ) ) size = w * h * bpp // 8 pixels = cast( encoder[0].image[0].pxl, ctypes.POINTER(ctypes.c_ubyte * size) ) return Encoded( width=w, height=h, bpp=bpp, pixels=ctypes.string_at(pixels, size) )
python
def encode(data, scheme=None, size=None): """ Encodes `data` in a DataMatrix image. For now bpp is the libdmtx default which is 24 Args: data: bytes instance scheme: encoding scheme - one of `ENCODING_SCHEME_NAMES`, or `None`. If `None`, defaults to 'Ascii'. size: image dimensions - one of `ENCODING_SIZE_NAMES`, or `None`. If `None`, defaults to 'ShapeAuto'. Returns: Encoded: with properties `(width, height, bpp, pixels)`. You can use that result to build a PIL image: Image.frombytes('RGB', (width, height), pixels) """ size = size if size else 'ShapeAuto' size_name = '{0}{1}'.format(ENCODING_SIZE_PREFIX, size) if not hasattr(DmtxSymbolSize, size_name): raise PyLibDMTXError( 'Invalid size [{0}]: should be one of {1}'.format( size, ENCODING_SIZE_NAMES ) ) size = getattr(DmtxSymbolSize, size_name) scheme = scheme if scheme else 'Ascii' scheme_name = '{0}{1}'.format( ENCODING_SCHEME_PREFIX, scheme.capitalize() ) if not hasattr(DmtxScheme, scheme_name): raise PyLibDMTXError( 'Invalid scheme [{0}]: should be one of {1}'.format( scheme, ENCODING_SCHEME_NAMES ) ) scheme = getattr(DmtxScheme, scheme_name) with _encoder() as encoder: dmtxEncodeSetProp(encoder, DmtxProperty.DmtxPropScheme, scheme) dmtxEncodeSetProp(encoder, DmtxProperty.DmtxPropSizeRequest, size) if dmtxEncodeDataMatrix(encoder, len(data), cast(data, c_ubyte_p)) == 0: raise PyLibDMTXError( 'Could not encode data, possibly because the image is not ' 'large enough to contain the data' ) w, h, bpp = map( partial(dmtxImageGetProp, encoder[0].image), ( DmtxProperty.DmtxPropWidth, DmtxProperty.DmtxPropHeight, DmtxProperty.DmtxPropBitsPerPixel ) ) size = w * h * bpp // 8 pixels = cast( encoder[0].image[0].pxl, ctypes.POINTER(ctypes.c_ubyte * size) ) return Encoded( width=w, height=h, bpp=bpp, pixels=ctypes.string_at(pixels, size) )
[ "def", "encode", "(", "data", ",", "scheme", "=", "None", ",", "size", "=", "None", ")", ":", "size", "=", "size", "if", "size", "else", "'ShapeAuto'", "size_name", "=", "'{0}{1}'", ".", "format", "(", "ENCODING_SIZE_PREFIX", ",", "size", ")", "if", "not", "hasattr", "(", "DmtxSymbolSize", ",", "size_name", ")", ":", "raise", "PyLibDMTXError", "(", "'Invalid size [{0}]: should be one of {1}'", ".", "format", "(", "size", ",", "ENCODING_SIZE_NAMES", ")", ")", "size", "=", "getattr", "(", "DmtxSymbolSize", ",", "size_name", ")", "scheme", "=", "scheme", "if", "scheme", "else", "'Ascii'", "scheme_name", "=", "'{0}{1}'", ".", "format", "(", "ENCODING_SCHEME_PREFIX", ",", "scheme", ".", "capitalize", "(", ")", ")", "if", "not", "hasattr", "(", "DmtxScheme", ",", "scheme_name", ")", ":", "raise", "PyLibDMTXError", "(", "'Invalid scheme [{0}]: should be one of {1}'", ".", "format", "(", "scheme", ",", "ENCODING_SCHEME_NAMES", ")", ")", "scheme", "=", "getattr", "(", "DmtxScheme", ",", "scheme_name", ")", "with", "_encoder", "(", ")", "as", "encoder", ":", "dmtxEncodeSetProp", "(", "encoder", ",", "DmtxProperty", ".", "DmtxPropScheme", ",", "scheme", ")", "dmtxEncodeSetProp", "(", "encoder", ",", "DmtxProperty", ".", "DmtxPropSizeRequest", ",", "size", ")", "if", "dmtxEncodeDataMatrix", "(", "encoder", ",", "len", "(", "data", ")", ",", "cast", "(", "data", ",", "c_ubyte_p", ")", ")", "==", "0", ":", "raise", "PyLibDMTXError", "(", "'Could not encode data, possibly because the image is not '", "'large enough to contain the data'", ")", "w", ",", "h", ",", "bpp", "=", "map", "(", "partial", "(", "dmtxImageGetProp", ",", "encoder", "[", "0", "]", ".", "image", ")", ",", "(", "DmtxProperty", ".", "DmtxPropWidth", ",", "DmtxProperty", ".", "DmtxPropHeight", ",", "DmtxProperty", ".", "DmtxPropBitsPerPixel", ")", ")", "size", "=", "w", "*", "h", "*", "bpp", "//", "8", "pixels", "=", "cast", "(", "encoder", "[", "0", "]", ".", "image", "[", "0", "]", ".", "pxl", ",", "ctypes", ".", "POINTER", "(", "ctypes", ".", "c_ubyte", "*", "size", ")", ")", "return", "Encoded", "(", "width", "=", "w", ",", "height", "=", "h", ",", "bpp", "=", "bpp", ",", "pixels", "=", "ctypes", ".", "string_at", "(", "pixels", ",", "size", ")", ")" ]
Encodes `data` in a DataMatrix image. For now bpp is the libdmtx default which is 24 Args: data: bytes instance scheme: encoding scheme - one of `ENCODING_SCHEME_NAMES`, or `None`. If `None`, defaults to 'Ascii'. size: image dimensions - one of `ENCODING_SIZE_NAMES`, or `None`. If `None`, defaults to 'ShapeAuto'. Returns: Encoded: with properties `(width, height, bpp, pixels)`. You can use that result to build a PIL image: Image.frombytes('RGB', (width, height), pixels)
[ "Encodes", "data", "in", "a", "DataMatrix", "image", "." ]
a425ec36050500af4875bf94eda02feb26ea62ad
https://github.com/NaturalHistoryMuseum/pylibdmtx/blob/a425ec36050500af4875bf94eda02feb26ea62ad/pylibdmtx/pylibdmtx.py#L312-L378
1,054
GeoPyTool/GeoPyTool
Experimental/Alpah_Shape_2D.py
add_edge
def add_edge(edges, edge_points, coords, i, j): """ Add a line between the i-th and j-th points, if not in the list already """ if (i, j) in edges or (j, i) in edges: # already added return( edges.add((i, j)), edge_points.append(coords[[i, j]]))
python
def add_edge(edges, edge_points, coords, i, j): """ Add a line between the i-th and j-th points, if not in the list already """ if (i, j) in edges or (j, i) in edges: # already added return( edges.add((i, j)), edge_points.append(coords[[i, j]]))
[ "def", "add_edge", "(", "edges", ",", "edge_points", ",", "coords", ",", "i", ",", "j", ")", ":", "if", "(", "i", ",", "j", ")", "in", "edges", "or", "(", "j", ",", "i", ")", "in", "edges", ":", "# already added", "return", "(", "edges", ".", "add", "(", "(", "i", ",", "j", ")", ")", ",", "edge_points", ".", "append", "(", "coords", "[", "[", "i", ",", "j", "]", "]", ")", ")" ]
Add a line between the i-th and j-th points, if not in the list already
[ "Add", "a", "line", "between", "the", "i", "-", "th", "and", "j", "-", "th", "points", "if", "not", "in", "the", "list", "already" ]
8c198aa42e4fbdf62fac05d40cbf4d1086328da3
https://github.com/GeoPyTool/GeoPyTool/blob/8c198aa42e4fbdf62fac05d40cbf4d1086328da3/Experimental/Alpah_Shape_2D.py#L28-L35
1,055
GeoPyTool/GeoPyTool
geopytool/CustomClass.py
Line.sequence
def sequence(self): ''' sort the points in the line with given option ''' if (len(self.Points[0]) == 2): if (self.Sort == 'X' or self.Sort == 'x'): self.Points.sort(key=lambda x: x[0]) self.order(self.Points) elif (self.Sort == 'Y' or self.Sort == 'y'): self.Points.sort(key=lambda x: x[1]) self.order(self.Points) else: self.order(self.Points) if (len(self.Points[0]) == 3): if (self.Sort == 'X' or self.Sort == 'x'): self.Points.sort(key=lambda x: x[0]) self.order(self.Points) elif (self.Sort == 'Y' or self.Sort == 'y'): self.Points.sort(key=lambda x: x[1]) self.order(self.Points) elif (self.Sort == 'Z' or self.Sort == 'Z'): self.Points.sort(key=lambda x: x[2]) self.order(self.Points) else: self.order(self.Points)
python
def sequence(self): ''' sort the points in the line with given option ''' if (len(self.Points[0]) == 2): if (self.Sort == 'X' or self.Sort == 'x'): self.Points.sort(key=lambda x: x[0]) self.order(self.Points) elif (self.Sort == 'Y' or self.Sort == 'y'): self.Points.sort(key=lambda x: x[1]) self.order(self.Points) else: self.order(self.Points) if (len(self.Points[0]) == 3): if (self.Sort == 'X' or self.Sort == 'x'): self.Points.sort(key=lambda x: x[0]) self.order(self.Points) elif (self.Sort == 'Y' or self.Sort == 'y'): self.Points.sort(key=lambda x: x[1]) self.order(self.Points) elif (self.Sort == 'Z' or self.Sort == 'Z'): self.Points.sort(key=lambda x: x[2]) self.order(self.Points) else: self.order(self.Points)
[ "def", "sequence", "(", "self", ")", ":", "if", "(", "len", "(", "self", ".", "Points", "[", "0", "]", ")", "==", "2", ")", ":", "if", "(", "self", ".", "Sort", "==", "'X'", "or", "self", ".", "Sort", "==", "'x'", ")", ":", "self", ".", "Points", ".", "sort", "(", "key", "=", "lambda", "x", ":", "x", "[", "0", "]", ")", "self", ".", "order", "(", "self", ".", "Points", ")", "elif", "(", "self", ".", "Sort", "==", "'Y'", "or", "self", ".", "Sort", "==", "'y'", ")", ":", "self", ".", "Points", ".", "sort", "(", "key", "=", "lambda", "x", ":", "x", "[", "1", "]", ")", "self", ".", "order", "(", "self", ".", "Points", ")", "else", ":", "self", ".", "order", "(", "self", ".", "Points", ")", "if", "(", "len", "(", "self", ".", "Points", "[", "0", "]", ")", "==", "3", ")", ":", "if", "(", "self", ".", "Sort", "==", "'X'", "or", "self", ".", "Sort", "==", "'x'", ")", ":", "self", ".", "Points", ".", "sort", "(", "key", "=", "lambda", "x", ":", "x", "[", "0", "]", ")", "self", ".", "order", "(", "self", ".", "Points", ")", "elif", "(", "self", ".", "Sort", "==", "'Y'", "or", "self", ".", "Sort", "==", "'y'", ")", ":", "self", ".", "Points", ".", "sort", "(", "key", "=", "lambda", "x", ":", "x", "[", "1", "]", ")", "self", ".", "order", "(", "self", ".", "Points", ")", "elif", "(", "self", ".", "Sort", "==", "'Z'", "or", "self", ".", "Sort", "==", "'Z'", ")", ":", "self", ".", "Points", ".", "sort", "(", "key", "=", "lambda", "x", ":", "x", "[", "2", "]", ")", "self", ".", "order", "(", "self", ".", "Points", ")", "else", ":", "self", ".", "order", "(", "self", ".", "Points", ")" ]
sort the points in the line with given option
[ "sort", "the", "points", "in", "the", "line", "with", "given", "option" ]
8c198aa42e4fbdf62fac05d40cbf4d1086328da3
https://github.com/GeoPyTool/GeoPyTool/blob/8c198aa42e4fbdf62fac05d40cbf4d1086328da3/geopytool/CustomClass.py#L367-L394
1,056
HDI-Project/MLPrimitives
mlprimitives/adapters/pandas.py
resample
def resample(df, rule, time_index, groupby=None, aggregation='mean'): """pd.DataFrame.resample adapter. Call the `df.resample` method on the given time_index and afterwards call the indicated aggregation. Optionally group the dataframe by the indicated columns before performing the resampling. If groupby option is used, the result is a multi-index datagrame. Args: df (pandas.DataFrame): DataFrame to resample. rule (str): The offset string or object representing target conversion. groupby (list): Optional list of columns to group by. time_index (str): Name of the column to use as the time index. aggregation (str): Name of the aggregation function to use. Returns: pandas.Dataframe: resampled dataframe """ if groupby: df = df.groupby(groupby) df = df.resample(rule, on=time_index) df = getattr(df, aggregation)() for column in groupby: del df[column] return df
python
def resample(df, rule, time_index, groupby=None, aggregation='mean'): """pd.DataFrame.resample adapter. Call the `df.resample` method on the given time_index and afterwards call the indicated aggregation. Optionally group the dataframe by the indicated columns before performing the resampling. If groupby option is used, the result is a multi-index datagrame. Args: df (pandas.DataFrame): DataFrame to resample. rule (str): The offset string or object representing target conversion. groupby (list): Optional list of columns to group by. time_index (str): Name of the column to use as the time index. aggregation (str): Name of the aggregation function to use. Returns: pandas.Dataframe: resampled dataframe """ if groupby: df = df.groupby(groupby) df = df.resample(rule, on=time_index) df = getattr(df, aggregation)() for column in groupby: del df[column] return df
[ "def", "resample", "(", "df", ",", "rule", ",", "time_index", ",", "groupby", "=", "None", ",", "aggregation", "=", "'mean'", ")", ":", "if", "groupby", ":", "df", "=", "df", ".", "groupby", "(", "groupby", ")", "df", "=", "df", ".", "resample", "(", "rule", ",", "on", "=", "time_index", ")", "df", "=", "getattr", "(", "df", ",", "aggregation", ")", "(", ")", "for", "column", "in", "groupby", ":", "del", "df", "[", "column", "]", "return", "df" ]
pd.DataFrame.resample adapter. Call the `df.resample` method on the given time_index and afterwards call the indicated aggregation. Optionally group the dataframe by the indicated columns before performing the resampling. If groupby option is used, the result is a multi-index datagrame. Args: df (pandas.DataFrame): DataFrame to resample. rule (str): The offset string or object representing target conversion. groupby (list): Optional list of columns to group by. time_index (str): Name of the column to use as the time index. aggregation (str): Name of the aggregation function to use. Returns: pandas.Dataframe: resampled dataframe
[ "pd", ".", "DataFrame", ".", "resample", "adapter", "." ]
bf415f9f751724ff545a1156ddfd7524e320f469
https://github.com/HDI-Project/MLPrimitives/blob/bf415f9f751724ff545a1156ddfd7524e320f469/mlprimitives/adapters/pandas.py#L1-L30
1,057
HDI-Project/MLPrimitives
mlprimitives/adapters/pandas.py
_join_names
def _join_names(names): """Join the names of a multi-level index with an underscore.""" levels = (str(name) for name in names if name != '') return '_'.join(levels)
python
def _join_names(names): """Join the names of a multi-level index with an underscore.""" levels = (str(name) for name in names if name != '') return '_'.join(levels)
[ "def", "_join_names", "(", "names", ")", ":", "levels", "=", "(", "str", "(", "name", ")", "for", "name", "in", "names", "if", "name", "!=", "''", ")", "return", "'_'", ".", "join", "(", "levels", ")" ]
Join the names of a multi-level index with an underscore.
[ "Join", "the", "names", "of", "a", "multi", "-", "level", "index", "with", "an", "underscore", "." ]
bf415f9f751724ff545a1156ddfd7524e320f469
https://github.com/HDI-Project/MLPrimitives/blob/bf415f9f751724ff545a1156ddfd7524e320f469/mlprimitives/adapters/pandas.py#L33-L37
1,058
HDI-Project/MLPrimitives
mlprimitives/adapters/pandas.py
unstack
def unstack(df, level=-1, reset_index=True): """pd.DataFrame.unstack adapter. Call the `df.unstack` method using the indicated level and afterwards join the column names using an underscore. Args: df (pandas.DataFrame): DataFrame to unstack. level (str, int or list): Level(s) of index to unstack, can pass level name reset_index (bool): Whether to reset the index after unstacking Returns: pandas.Dataframe: unstacked dataframe """ df = df.unstack(level=level) if reset_index: df = df.reset_index() df.columns = df.columns.map(_join_names) return df
python
def unstack(df, level=-1, reset_index=True): """pd.DataFrame.unstack adapter. Call the `df.unstack` method using the indicated level and afterwards join the column names using an underscore. Args: df (pandas.DataFrame): DataFrame to unstack. level (str, int or list): Level(s) of index to unstack, can pass level name reset_index (bool): Whether to reset the index after unstacking Returns: pandas.Dataframe: unstacked dataframe """ df = df.unstack(level=level) if reset_index: df = df.reset_index() df.columns = df.columns.map(_join_names) return df
[ "def", "unstack", "(", "df", ",", "level", "=", "-", "1", ",", "reset_index", "=", "True", ")", ":", "df", "=", "df", ".", "unstack", "(", "level", "=", "level", ")", "if", "reset_index", ":", "df", "=", "df", ".", "reset_index", "(", ")", "df", ".", "columns", "=", "df", ".", "columns", ".", "map", "(", "_join_names", ")", "return", "df" ]
pd.DataFrame.unstack adapter. Call the `df.unstack` method using the indicated level and afterwards join the column names using an underscore. Args: df (pandas.DataFrame): DataFrame to unstack. level (str, int or list): Level(s) of index to unstack, can pass level name reset_index (bool): Whether to reset the index after unstacking Returns: pandas.Dataframe: unstacked dataframe
[ "pd", ".", "DataFrame", ".", "unstack", "adapter", "." ]
bf415f9f751724ff545a1156ddfd7524e320f469
https://github.com/HDI-Project/MLPrimitives/blob/bf415f9f751724ff545a1156ddfd7524e320f469/mlprimitives/adapters/pandas.py#L40-L59
1,059
HDI-Project/MLPrimitives
mlprimitives/datasets.py
load_boston_multitask
def load_boston_multitask(): """Boston House Prices Dataset with a synthetic multitask output. The multitask output is obtained by applying a linear transformation to the original y and adding it as a second output column. """ dataset = datasets.load_boston() y = dataset.target target = np.column_stack([y, 2 * y + 5]) return Dataset(load_boston.__doc__, dataset.data, target, r2_score)
python
def load_boston_multitask(): """Boston House Prices Dataset with a synthetic multitask output. The multitask output is obtained by applying a linear transformation to the original y and adding it as a second output column. """ dataset = datasets.load_boston() y = dataset.target target = np.column_stack([y, 2 * y + 5]) return Dataset(load_boston.__doc__, dataset.data, target, r2_score)
[ "def", "load_boston_multitask", "(", ")", ":", "dataset", "=", "datasets", ".", "load_boston", "(", ")", "y", "=", "dataset", ".", "target", "target", "=", "np", ".", "column_stack", "(", "[", "y", ",", "2", "*", "y", "+", "5", "]", ")", "return", "Dataset", "(", "load_boston", ".", "__doc__", ",", "dataset", ".", "data", ",", "target", ",", "r2_score", ")" ]
Boston House Prices Dataset with a synthetic multitask output. The multitask output is obtained by applying a linear transformation to the original y and adding it as a second output column.
[ "Boston", "House", "Prices", "Dataset", "with", "a", "synthetic", "multitask", "output", "." ]
bf415f9f751724ff545a1156ddfd7524e320f469
https://github.com/HDI-Project/MLPrimitives/blob/bf415f9f751724ff545a1156ddfd7524e320f469/mlprimitives/datasets.py#L482-L491
1,060
HDI-Project/MLPrimitives
mlprimitives/candidates/audio_featurization.py
energy
def energy(data): """Computes signal energy of data""" data = np.mean(data, axis=1) return np.sum(data ** 2) / np.float64(len(data))
python
def energy(data): """Computes signal energy of data""" data = np.mean(data, axis=1) return np.sum(data ** 2) / np.float64(len(data))
[ "def", "energy", "(", "data", ")", ":", "data", "=", "np", ".", "mean", "(", "data", ",", "axis", "=", "1", ")", "return", "np", ".", "sum", "(", "data", "**", "2", ")", "/", "np", ".", "float64", "(", "len", "(", "data", ")", ")" ]
Computes signal energy of data
[ "Computes", "signal", "energy", "of", "data" ]
bf415f9f751724ff545a1156ddfd7524e320f469
https://github.com/HDI-Project/MLPrimitives/blob/bf415f9f751724ff545a1156ddfd7524e320f469/mlprimitives/candidates/audio_featurization.py#L10-L13
1,061
HDI-Project/MLPrimitives
mlprimitives/candidates/audio_featurization.py
zcr
def zcr(data): """Computes zero crossing rate of segment""" data = np.mean(data, axis=1) count = len(data) countZ = np.sum(np.abs(np.diff(np.sign(data)))) / 2 return (np.float64(countZ) / np.float64(count - 1.0))
python
def zcr(data): """Computes zero crossing rate of segment""" data = np.mean(data, axis=1) count = len(data) countZ = np.sum(np.abs(np.diff(np.sign(data)))) / 2 return (np.float64(countZ) / np.float64(count - 1.0))
[ "def", "zcr", "(", "data", ")", ":", "data", "=", "np", ".", "mean", "(", "data", ",", "axis", "=", "1", ")", "count", "=", "len", "(", "data", ")", "countZ", "=", "np", ".", "sum", "(", "np", ".", "abs", "(", "np", ".", "diff", "(", "np", ".", "sign", "(", "data", ")", ")", ")", ")", "/", "2", "return", "(", "np", ".", "float64", "(", "countZ", ")", "/", "np", ".", "float64", "(", "count", "-", "1.0", ")", ")" ]
Computes zero crossing rate of segment
[ "Computes", "zero", "crossing", "rate", "of", "segment" ]
bf415f9f751724ff545a1156ddfd7524e320f469
https://github.com/HDI-Project/MLPrimitives/blob/bf415f9f751724ff545a1156ddfd7524e320f469/mlprimitives/candidates/audio_featurization.py#L51-L57
1,062
HDI-Project/MLPrimitives
mlprimitives/candidates/audio_featurization.py
spectral_flux
def spectral_flux(d0, d1): """ Computes the spectral flux feature of the current frame """ # compute the spectral flux as the sum of square distances: d0 = np.mean(d0, axis=1) d1 = np.mean(d1, axis=1) nFFT = min(len(d0) // 2, len(d1) // 2) X = FFT(d0, nFFT) Xprev = FFT(d1, nFFT) # L = min(len(X), len(Xprev)) sumX = np.sum(X + EPSILON) sumPrevX = np.sum(Xprev + EPSILON) return np.sum((X / sumX - Xprev / sumPrevX) ** 2)
python
def spectral_flux(d0, d1): """ Computes the spectral flux feature of the current frame """ # compute the spectral flux as the sum of square distances: d0 = np.mean(d0, axis=1) d1 = np.mean(d1, axis=1) nFFT = min(len(d0) // 2, len(d1) // 2) X = FFT(d0, nFFT) Xprev = FFT(d1, nFFT) # L = min(len(X), len(Xprev)) sumX = np.sum(X + EPSILON) sumPrevX = np.sum(Xprev + EPSILON) return np.sum((X / sumX - Xprev / sumPrevX) ** 2)
[ "def", "spectral_flux", "(", "d0", ",", "d1", ")", ":", "# compute the spectral flux as the sum of square distances:", "d0", "=", "np", ".", "mean", "(", "d0", ",", "axis", "=", "1", ")", "d1", "=", "np", ".", "mean", "(", "d1", ",", "axis", "=", "1", ")", "nFFT", "=", "min", "(", "len", "(", "d0", ")", "//", "2", ",", "len", "(", "d1", ")", "//", "2", ")", "X", "=", "FFT", "(", "d0", ",", "nFFT", ")", "Xprev", "=", "FFT", "(", "d1", ",", "nFFT", ")", "# L = min(len(X), len(Xprev))", "sumX", "=", "np", ".", "sum", "(", "X", "+", "EPSILON", ")", "sumPrevX", "=", "np", ".", "sum", "(", "Xprev", "+", "EPSILON", ")", "return", "np", ".", "sum", "(", "(", "X", "/", "sumX", "-", "Xprev", "/", "sumPrevX", ")", "**", "2", ")" ]
Computes the spectral flux feature of the current frame
[ "Computes", "the", "spectral", "flux", "feature", "of", "the", "current", "frame" ]
bf415f9f751724ff545a1156ddfd7524e320f469
https://github.com/HDI-Project/MLPrimitives/blob/bf415f9f751724ff545a1156ddfd7524e320f469/mlprimitives/candidates/audio_featurization.py#L60-L75
1,063
HDI-Project/MLPrimitives
mlprimitives/custom/timeseries_preprocessing.py
rolling_window_sequences
def rolling_window_sequences(X, index, window_size, target_size, target_column): """Create rolling window sequences out of timeseries data.""" out_X = list() out_y = list() X_index = list() y_index = list() target = X[:, target_column] for start in range(len(X) - window_size - target_size + 1): end = start + window_size out_X.append(X[start:end]) out_y.append(target[end:end + target_size]) X_index.append(index[start]) y_index.append(index[end]) return np.asarray(out_X), np.asarray(out_y), np.asarray(X_index), np.asarray(y_index)
python
def rolling_window_sequences(X, index, window_size, target_size, target_column): """Create rolling window sequences out of timeseries data.""" out_X = list() out_y = list() X_index = list() y_index = list() target = X[:, target_column] for start in range(len(X) - window_size - target_size + 1): end = start + window_size out_X.append(X[start:end]) out_y.append(target[end:end + target_size]) X_index.append(index[start]) y_index.append(index[end]) return np.asarray(out_X), np.asarray(out_y), np.asarray(X_index), np.asarray(y_index)
[ "def", "rolling_window_sequences", "(", "X", ",", "index", ",", "window_size", ",", "target_size", ",", "target_column", ")", ":", "out_X", "=", "list", "(", ")", "out_y", "=", "list", "(", ")", "X_index", "=", "list", "(", ")", "y_index", "=", "list", "(", ")", "target", "=", "X", "[", ":", ",", "target_column", "]", "for", "start", "in", "range", "(", "len", "(", "X", ")", "-", "window_size", "-", "target_size", "+", "1", ")", ":", "end", "=", "start", "+", "window_size", "out_X", ".", "append", "(", "X", "[", "start", ":", "end", "]", ")", "out_y", ".", "append", "(", "target", "[", "end", ":", "end", "+", "target_size", "]", ")", "X_index", ".", "append", "(", "index", "[", "start", "]", ")", "y_index", ".", "append", "(", "index", "[", "end", "]", ")", "return", "np", ".", "asarray", "(", "out_X", ")", ",", "np", ".", "asarray", "(", "out_y", ")", ",", "np", ".", "asarray", "(", "X_index", ")", ",", "np", ".", "asarray", "(", "y_index", ")" ]
Create rolling window sequences out of timeseries data.
[ "Create", "rolling", "window", "sequences", "out", "of", "timeseries", "data", "." ]
bf415f9f751724ff545a1156ddfd7524e320f469
https://github.com/HDI-Project/MLPrimitives/blob/bf415f9f751724ff545a1156ddfd7524e320f469/mlprimitives/custom/timeseries_preprocessing.py#L7-L23
1,064
HDI-Project/MLPrimitives
mlprimitives/custom/timeseries_preprocessing.py
time_segments_average
def time_segments_average(X, interval, time_column): """Compute average of values over fixed length time segments.""" warnings.warn(_TIME_SEGMENTS_AVERAGE_DEPRECATION_WARNING, DeprecationWarning) if isinstance(X, np.ndarray): X = pd.DataFrame(X) X = X.sort_values(time_column).set_index(time_column) start_ts = X.index.values[0] max_ts = X.index.values[-1] values = list() index = list() while start_ts <= max_ts: end_ts = start_ts + interval subset = X.loc[start_ts:end_ts - 1] means = subset.mean(skipna=True).values values.append(means) index.append(start_ts) start_ts = end_ts return np.asarray(values), np.asarray(index)
python
def time_segments_average(X, interval, time_column): """Compute average of values over fixed length time segments.""" warnings.warn(_TIME_SEGMENTS_AVERAGE_DEPRECATION_WARNING, DeprecationWarning) if isinstance(X, np.ndarray): X = pd.DataFrame(X) X = X.sort_values(time_column).set_index(time_column) start_ts = X.index.values[0] max_ts = X.index.values[-1] values = list() index = list() while start_ts <= max_ts: end_ts = start_ts + interval subset = X.loc[start_ts:end_ts - 1] means = subset.mean(skipna=True).values values.append(means) index.append(start_ts) start_ts = end_ts return np.asarray(values), np.asarray(index)
[ "def", "time_segments_average", "(", "X", ",", "interval", ",", "time_column", ")", ":", "warnings", ".", "warn", "(", "_TIME_SEGMENTS_AVERAGE_DEPRECATION_WARNING", ",", "DeprecationWarning", ")", "if", "isinstance", "(", "X", ",", "np", ".", "ndarray", ")", ":", "X", "=", "pd", ".", "DataFrame", "(", "X", ")", "X", "=", "X", ".", "sort_values", "(", "time_column", ")", ".", "set_index", "(", "time_column", ")", "start_ts", "=", "X", ".", "index", ".", "values", "[", "0", "]", "max_ts", "=", "X", ".", "index", ".", "values", "[", "-", "1", "]", "values", "=", "list", "(", ")", "index", "=", "list", "(", ")", "while", "start_ts", "<=", "max_ts", ":", "end_ts", "=", "start_ts", "+", "interval", "subset", "=", "X", ".", "loc", "[", "start_ts", ":", "end_ts", "-", "1", "]", "means", "=", "subset", ".", "mean", "(", "skipna", "=", "True", ")", ".", "values", "values", ".", "append", "(", "means", ")", "index", ".", "append", "(", "start_ts", ")", "start_ts", "=", "end_ts", "return", "np", ".", "asarray", "(", "values", ")", ",", "np", ".", "asarray", "(", "index", ")" ]
Compute average of values over fixed length time segments.
[ "Compute", "average", "of", "values", "over", "fixed", "length", "time", "segments", "." ]
bf415f9f751724ff545a1156ddfd7524e320f469
https://github.com/HDI-Project/MLPrimitives/blob/bf415f9f751724ff545a1156ddfd7524e320f469/mlprimitives/custom/timeseries_preprocessing.py#L33-L55
1,065
HDI-Project/MLPrimitives
mlprimitives/custom/timeseries_preprocessing.py
time_segments_aggregate
def time_segments_aggregate(X, interval, time_column, method=['mean']): """Aggregate values over fixed length time segments.""" if isinstance(X, np.ndarray): X = pd.DataFrame(X) X = X.sort_values(time_column).set_index(time_column) if isinstance(method, str): method = [method] start_ts = X.index.values[0] max_ts = X.index.values[-1] values = list() index = list() while start_ts <= max_ts: end_ts = start_ts + interval subset = X.loc[start_ts:end_ts - 1] aggregated = [ getattr(subset, agg)(skipna=True).values for agg in method ] values.append(np.concatenate(aggregated)) index.append(start_ts) start_ts = end_ts return np.asarray(values), np.asarray(index)
python
def time_segments_aggregate(X, interval, time_column, method=['mean']): """Aggregate values over fixed length time segments.""" if isinstance(X, np.ndarray): X = pd.DataFrame(X) X = X.sort_values(time_column).set_index(time_column) if isinstance(method, str): method = [method] start_ts = X.index.values[0] max_ts = X.index.values[-1] values = list() index = list() while start_ts <= max_ts: end_ts = start_ts + interval subset = X.loc[start_ts:end_ts - 1] aggregated = [ getattr(subset, agg)(skipna=True).values for agg in method ] values.append(np.concatenate(aggregated)) index.append(start_ts) start_ts = end_ts return np.asarray(values), np.asarray(index)
[ "def", "time_segments_aggregate", "(", "X", ",", "interval", ",", "time_column", ",", "method", "=", "[", "'mean'", "]", ")", ":", "if", "isinstance", "(", "X", ",", "np", ".", "ndarray", ")", ":", "X", "=", "pd", ".", "DataFrame", "(", "X", ")", "X", "=", "X", ".", "sort_values", "(", "time_column", ")", ".", "set_index", "(", "time_column", ")", "if", "isinstance", "(", "method", ",", "str", ")", ":", "method", "=", "[", "method", "]", "start_ts", "=", "X", ".", "index", ".", "values", "[", "0", "]", "max_ts", "=", "X", ".", "index", ".", "values", "[", "-", "1", "]", "values", "=", "list", "(", ")", "index", "=", "list", "(", ")", "while", "start_ts", "<=", "max_ts", ":", "end_ts", "=", "start_ts", "+", "interval", "subset", "=", "X", ".", "loc", "[", "start_ts", ":", "end_ts", "-", "1", "]", "aggregated", "=", "[", "getattr", "(", "subset", ",", "agg", ")", "(", "skipna", "=", "True", ")", ".", "values", "for", "agg", "in", "method", "]", "values", ".", "append", "(", "np", ".", "concatenate", "(", "aggregated", ")", ")", "index", ".", "append", "(", "start_ts", ")", "start_ts", "=", "end_ts", "return", "np", ".", "asarray", "(", "values", ")", ",", "np", ".", "asarray", "(", "index", ")" ]
Aggregate values over fixed length time segments.
[ "Aggregate", "values", "over", "fixed", "length", "time", "segments", "." ]
bf415f9f751724ff545a1156ddfd7524e320f469
https://github.com/HDI-Project/MLPrimitives/blob/bf415f9f751724ff545a1156ddfd7524e320f469/mlprimitives/custom/timeseries_preprocessing.py#L58-L84
1,066
HDI-Project/MLPrimitives
mlprimitives/utils.py
image_transform
def image_transform(X, function, reshape_before=False, reshape_after=False, width=None, height=None, **kwargs): """Apply a function image by image. Args: reshape_before: whether 1d array needs to be reshaped to a 2d image reshape_after: whether the returned values need to be reshaped back to a 1d array width: image width used to rebuild the 2d images. Required if the image is not square. height: image height used to rebuild the 2d images. Required if the image is not square. """ if not callable(function): function = import_object(function) elif not callable(function): raise ValueError("function must be a str or a callable") flat_image = len(X[0].shape) == 1 if reshape_before and flat_image: if not (width and height): side_length = math.sqrt(X.shape[1]) if side_length.is_integer(): side_length = int(side_length) width = side_length height = side_length else: raise ValueError("Image sizes must be given for non-square images") else: reshape_before = False new_X = [] for image in X: if reshape_before: image = image.reshape((width, height)) features = function( image, **kwargs ) if reshape_after: features = np.reshape(features, X.shape[1]) new_X.append(features) return np.array(new_X)
python
def image_transform(X, function, reshape_before=False, reshape_after=False, width=None, height=None, **kwargs): """Apply a function image by image. Args: reshape_before: whether 1d array needs to be reshaped to a 2d image reshape_after: whether the returned values need to be reshaped back to a 1d array width: image width used to rebuild the 2d images. Required if the image is not square. height: image height used to rebuild the 2d images. Required if the image is not square. """ if not callable(function): function = import_object(function) elif not callable(function): raise ValueError("function must be a str or a callable") flat_image = len(X[0].shape) == 1 if reshape_before and flat_image: if not (width and height): side_length = math.sqrt(X.shape[1]) if side_length.is_integer(): side_length = int(side_length) width = side_length height = side_length else: raise ValueError("Image sizes must be given for non-square images") else: reshape_before = False new_X = [] for image in X: if reshape_before: image = image.reshape((width, height)) features = function( image, **kwargs ) if reshape_after: features = np.reshape(features, X.shape[1]) new_X.append(features) return np.array(new_X)
[ "def", "image_transform", "(", "X", ",", "function", ",", "reshape_before", "=", "False", ",", "reshape_after", "=", "False", ",", "width", "=", "None", ",", "height", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "not", "callable", "(", "function", ")", ":", "function", "=", "import_object", "(", "function", ")", "elif", "not", "callable", "(", "function", ")", ":", "raise", "ValueError", "(", "\"function must be a str or a callable\"", ")", "flat_image", "=", "len", "(", "X", "[", "0", "]", ".", "shape", ")", "==", "1", "if", "reshape_before", "and", "flat_image", ":", "if", "not", "(", "width", "and", "height", ")", ":", "side_length", "=", "math", ".", "sqrt", "(", "X", ".", "shape", "[", "1", "]", ")", "if", "side_length", ".", "is_integer", "(", ")", ":", "side_length", "=", "int", "(", "side_length", ")", "width", "=", "side_length", "height", "=", "side_length", "else", ":", "raise", "ValueError", "(", "\"Image sizes must be given for non-square images\"", ")", "else", ":", "reshape_before", "=", "False", "new_X", "=", "[", "]", "for", "image", "in", "X", ":", "if", "reshape_before", ":", "image", "=", "image", ".", "reshape", "(", "(", "width", ",", "height", ")", ")", "features", "=", "function", "(", "image", ",", "*", "*", "kwargs", ")", "if", "reshape_after", ":", "features", "=", "np", ".", "reshape", "(", "features", ",", "X", ".", "shape", "[", "1", "]", ")", "new_X", ".", "append", "(", "features", ")", "return", "np", ".", "array", "(", "new_X", ")" ]
Apply a function image by image. Args: reshape_before: whether 1d array needs to be reshaped to a 2d image reshape_after: whether the returned values need to be reshaped back to a 1d array width: image width used to rebuild the 2d images. Required if the image is not square. height: image height used to rebuild the 2d images. Required if the image is not square.
[ "Apply", "a", "function", "image", "by", "image", "." ]
bf415f9f751724ff545a1156ddfd7524e320f469
https://github.com/HDI-Project/MLPrimitives/blob/bf415f9f751724ff545a1156ddfd7524e320f469/mlprimitives/utils.py#L18-L65
1,067
HDI-Project/MLPrimitives
mlprimitives/custom/timeseries_anomalies.py
regression_errors
def regression_errors(y, y_hat, smoothing_window=0.01, smooth=True): """Compute an array of absolute errors comparing predictions and expected output. If smooth is True, apply EWMA to the resulting array of errors. Args: y (array): Ground truth. y_hat (array): Predictions array. smoothing_window (float): Size of the smoothing window, expressed as a proportion of the total length of y. smooth (bool): whether the returned errors should be smoothed with EWMA. Returns: (array): errors """ errors = np.abs(y - y_hat)[:, 0] if not smooth: return errors smoothing_window = int(smoothing_window * len(y)) return pd.Series(errors).ewm(span=smoothing_window).mean().values
python
def regression_errors(y, y_hat, smoothing_window=0.01, smooth=True): """Compute an array of absolute errors comparing predictions and expected output. If smooth is True, apply EWMA to the resulting array of errors. Args: y (array): Ground truth. y_hat (array): Predictions array. smoothing_window (float): Size of the smoothing window, expressed as a proportion of the total length of y. smooth (bool): whether the returned errors should be smoothed with EWMA. Returns: (array): errors """ errors = np.abs(y - y_hat)[:, 0] if not smooth: return errors smoothing_window = int(smoothing_window * len(y)) return pd.Series(errors).ewm(span=smoothing_window).mean().values
[ "def", "regression_errors", "(", "y", ",", "y_hat", ",", "smoothing_window", "=", "0.01", ",", "smooth", "=", "True", ")", ":", "errors", "=", "np", ".", "abs", "(", "y", "-", "y_hat", ")", "[", ":", ",", "0", "]", "if", "not", "smooth", ":", "return", "errors", "smoothing_window", "=", "int", "(", "smoothing_window", "*", "len", "(", "y", ")", ")", "return", "pd", ".", "Series", "(", "errors", ")", ".", "ewm", "(", "span", "=", "smoothing_window", ")", ".", "mean", "(", ")", ".", "values" ]
Compute an array of absolute errors comparing predictions and expected output. If smooth is True, apply EWMA to the resulting array of errors. Args: y (array): Ground truth. y_hat (array): Predictions array. smoothing_window (float): Size of the smoothing window, expressed as a proportion of the total length of y. smooth (bool): whether the returned errors should be smoothed with EWMA. Returns: (array): errors
[ "Compute", "an", "array", "of", "absolute", "errors", "comparing", "predictions", "and", "expected", "output", "." ]
bf415f9f751724ff545a1156ddfd7524e320f469
https://github.com/HDI-Project/MLPrimitives/blob/bf415f9f751724ff545a1156ddfd7524e320f469/mlprimitives/custom/timeseries_anomalies.py#L11-L33
1,068
HDI-Project/MLPrimitives
mlprimitives/custom/timeseries_anomalies.py
deltas
def deltas(errors, epsilon, mean, std): """Compute mean and std deltas. delta_mean = mean(errors) - mean(all errors below epsilon) delta_std = std(errors) - std(all errors below epsilon) """ below = errors[errors <= epsilon] if not len(below): return 0, 0 return mean - below.mean(), std - below.std()
python
def deltas(errors, epsilon, mean, std): """Compute mean and std deltas. delta_mean = mean(errors) - mean(all errors below epsilon) delta_std = std(errors) - std(all errors below epsilon) """ below = errors[errors <= epsilon] if not len(below): return 0, 0 return mean - below.mean(), std - below.std()
[ "def", "deltas", "(", "errors", ",", "epsilon", ",", "mean", ",", "std", ")", ":", "below", "=", "errors", "[", "errors", "<=", "epsilon", "]", "if", "not", "len", "(", "below", ")", ":", "return", "0", ",", "0", "return", "mean", "-", "below", ".", "mean", "(", ")", ",", "std", "-", "below", ".", "std", "(", ")" ]
Compute mean and std deltas. delta_mean = mean(errors) - mean(all errors below epsilon) delta_std = std(errors) - std(all errors below epsilon)
[ "Compute", "mean", "and", "std", "deltas", "." ]
bf415f9f751724ff545a1156ddfd7524e320f469
https://github.com/HDI-Project/MLPrimitives/blob/bf415f9f751724ff545a1156ddfd7524e320f469/mlprimitives/custom/timeseries_anomalies.py#L36-L46
1,069
HDI-Project/MLPrimitives
mlprimitives/custom/timeseries_anomalies.py
count_above
def count_above(errors, epsilon): """Count number of errors and continuous sequences above epsilon. Continuous sequences are counted by shifting and counting the number of positions where there was a change and the original value was true, which means that a sequence started at that position. """ above = errors > epsilon total_above = len(errors[above]) above = pd.Series(above) shift = above.shift(1) change = above != shift total_consecutive = sum(above & change) return total_above, total_consecutive
python
def count_above(errors, epsilon): """Count number of errors and continuous sequences above epsilon. Continuous sequences are counted by shifting and counting the number of positions where there was a change and the original value was true, which means that a sequence started at that position. """ above = errors > epsilon total_above = len(errors[above]) above = pd.Series(above) shift = above.shift(1) change = above != shift total_consecutive = sum(above & change) return total_above, total_consecutive
[ "def", "count_above", "(", "errors", ",", "epsilon", ")", ":", "above", "=", "errors", ">", "epsilon", "total_above", "=", "len", "(", "errors", "[", "above", "]", ")", "above", "=", "pd", ".", "Series", "(", "above", ")", "shift", "=", "above", ".", "shift", "(", "1", ")", "change", "=", "above", "!=", "shift", "total_consecutive", "=", "sum", "(", "above", "&", "change", ")", "return", "total_above", ",", "total_consecutive" ]
Count number of errors and continuous sequences above epsilon. Continuous sequences are counted by shifting and counting the number of positions where there was a change and the original value was true, which means that a sequence started at that position.
[ "Count", "number", "of", "errors", "and", "continuous", "sequences", "above", "epsilon", "." ]
bf415f9f751724ff545a1156ddfd7524e320f469
https://github.com/HDI-Project/MLPrimitives/blob/bf415f9f751724ff545a1156ddfd7524e320f469/mlprimitives/custom/timeseries_anomalies.py#L49-L65
1,070
HDI-Project/MLPrimitives
mlprimitives/custom/timeseries_anomalies.py
z_cost
def z_cost(z, errors, mean, std): """Compute how bad a z value is. The original formula is:: (delta_mean/mean) + (delta_std/std) ------------------------------------------------------ number of errors above + (number of sequences above)^2 which computes the "goodness" of `z`, meaning that the higher the value the better the `z`. In this case, we return this value inverted (we make it negative), to convert it into a cost function, as later on we will use scipy to minimize it. """ epsilon = mean + z * std delta_mean, delta_std = deltas(errors, epsilon, mean, std) above, consecutive = count_above(errors, epsilon) numerator = -(delta_mean / mean + delta_std / std) denominator = above + consecutive ** 2 if denominator == 0: return np.inf return numerator / denominator
python
def z_cost(z, errors, mean, std): """Compute how bad a z value is. The original formula is:: (delta_mean/mean) + (delta_std/std) ------------------------------------------------------ number of errors above + (number of sequences above)^2 which computes the "goodness" of `z`, meaning that the higher the value the better the `z`. In this case, we return this value inverted (we make it negative), to convert it into a cost function, as later on we will use scipy to minimize it. """ epsilon = mean + z * std delta_mean, delta_std = deltas(errors, epsilon, mean, std) above, consecutive = count_above(errors, epsilon) numerator = -(delta_mean / mean + delta_std / std) denominator = above + consecutive ** 2 if denominator == 0: return np.inf return numerator / denominator
[ "def", "z_cost", "(", "z", ",", "errors", ",", "mean", ",", "std", ")", ":", "epsilon", "=", "mean", "+", "z", "*", "std", "delta_mean", ",", "delta_std", "=", "deltas", "(", "errors", ",", "epsilon", ",", "mean", ",", "std", ")", "above", ",", "consecutive", "=", "count_above", "(", "errors", ",", "epsilon", ")", "numerator", "=", "-", "(", "delta_mean", "/", "mean", "+", "delta_std", "/", "std", ")", "denominator", "=", "above", "+", "consecutive", "**", "2", "if", "denominator", "==", "0", ":", "return", "np", ".", "inf", "return", "numerator", "/", "denominator" ]
Compute how bad a z value is. The original formula is:: (delta_mean/mean) + (delta_std/std) ------------------------------------------------------ number of errors above + (number of sequences above)^2 which computes the "goodness" of `z`, meaning that the higher the value the better the `z`. In this case, we return this value inverted (we make it negative), to convert it into a cost function, as later on we will use scipy to minimize it.
[ "Compute", "how", "bad", "a", "z", "value", "is", "." ]
bf415f9f751724ff545a1156ddfd7524e320f469
https://github.com/HDI-Project/MLPrimitives/blob/bf415f9f751724ff545a1156ddfd7524e320f469/mlprimitives/custom/timeseries_anomalies.py#L68-L95
1,071
HDI-Project/MLPrimitives
mlprimitives/custom/timeseries_anomalies.py
find_threshold
def find_threshold(errors, z_range=(0, 10)): """Find the ideal threshold. The ideal threshold is the one that minimizes the z_cost function. """ mean = errors.mean() std = errors.std() min_z, max_z = z_range best_z = min_z best_cost = np.inf for z in range(min_z, max_z): best = fmin(z_cost, z, args=(errors, mean, std), full_output=True, disp=False) z, cost = best[0:2] if cost < best_cost: best_z = z[0] return mean + best_z * std
python
def find_threshold(errors, z_range=(0, 10)): """Find the ideal threshold. The ideal threshold is the one that minimizes the z_cost function. """ mean = errors.mean() std = errors.std() min_z, max_z = z_range best_z = min_z best_cost = np.inf for z in range(min_z, max_z): best = fmin(z_cost, z, args=(errors, mean, std), full_output=True, disp=False) z, cost = best[0:2] if cost < best_cost: best_z = z[0] return mean + best_z * std
[ "def", "find_threshold", "(", "errors", ",", "z_range", "=", "(", "0", ",", "10", ")", ")", ":", "mean", "=", "errors", ".", "mean", "(", ")", "std", "=", "errors", ".", "std", "(", ")", "min_z", ",", "max_z", "=", "z_range", "best_z", "=", "min_z", "best_cost", "=", "np", ".", "inf", "for", "z", "in", "range", "(", "min_z", ",", "max_z", ")", ":", "best", "=", "fmin", "(", "z_cost", ",", "z", ",", "args", "=", "(", "errors", ",", "mean", ",", "std", ")", ",", "full_output", "=", "True", ",", "disp", "=", "False", ")", "z", ",", "cost", "=", "best", "[", "0", ":", "2", "]", "if", "cost", "<", "best_cost", ":", "best_z", "=", "z", "[", "0", "]", "return", "mean", "+", "best_z", "*", "std" ]
Find the ideal threshold. The ideal threshold is the one that minimizes the z_cost function.
[ "Find", "the", "ideal", "threshold", "." ]
bf415f9f751724ff545a1156ddfd7524e320f469
https://github.com/HDI-Project/MLPrimitives/blob/bf415f9f751724ff545a1156ddfd7524e320f469/mlprimitives/custom/timeseries_anomalies.py#L98-L116
1,072
HDI-Project/MLPrimitives
mlprimitives/custom/timeseries_anomalies.py
find_sequences
def find_sequences(errors, epsilon): """Find sequences of values that are above epsilon. This is done following this steps: * create a boolean mask that indicates which value are above epsilon. * shift this mask by one place, filing the empty gap with a False * compare the shifted mask with the original one to see if there are changes. * Consider a sequence start any point which was true and has changed * Consider a sequence end any point which was false and has changed """ above = pd.Series(errors > epsilon) shift = above.shift(1).fillna(False) change = above != shift index = above.index starts = index[above & change].tolist() ends = (index[~above & change] - 1).tolist() if len(ends) == len(starts) - 1: ends.append(len(above) - 1) return list(zip(starts, ends))
python
def find_sequences(errors, epsilon): """Find sequences of values that are above epsilon. This is done following this steps: * create a boolean mask that indicates which value are above epsilon. * shift this mask by one place, filing the empty gap with a False * compare the shifted mask with the original one to see if there are changes. * Consider a sequence start any point which was true and has changed * Consider a sequence end any point which was false and has changed """ above = pd.Series(errors > epsilon) shift = above.shift(1).fillna(False) change = above != shift index = above.index starts = index[above & change].tolist() ends = (index[~above & change] - 1).tolist() if len(ends) == len(starts) - 1: ends.append(len(above) - 1) return list(zip(starts, ends))
[ "def", "find_sequences", "(", "errors", ",", "epsilon", ")", ":", "above", "=", "pd", ".", "Series", "(", "errors", ">", "epsilon", ")", "shift", "=", "above", ".", "shift", "(", "1", ")", ".", "fillna", "(", "False", ")", "change", "=", "above", "!=", "shift", "index", "=", "above", ".", "index", "starts", "=", "index", "[", "above", "&", "change", "]", ".", "tolist", "(", ")", "ends", "=", "(", "index", "[", "~", "above", "&", "change", "]", "-", "1", ")", ".", "tolist", "(", ")", "if", "len", "(", "ends", ")", "==", "len", "(", "starts", ")", "-", "1", ":", "ends", ".", "append", "(", "len", "(", "above", ")", "-", "1", ")", "return", "list", "(", "zip", "(", "starts", ",", "ends", ")", ")" ]
Find sequences of values that are above epsilon. This is done following this steps: * create a boolean mask that indicates which value are above epsilon. * shift this mask by one place, filing the empty gap with a False * compare the shifted mask with the original one to see if there are changes. * Consider a sequence start any point which was true and has changed * Consider a sequence end any point which was false and has changed
[ "Find", "sequences", "of", "values", "that", "are", "above", "epsilon", "." ]
bf415f9f751724ff545a1156ddfd7524e320f469
https://github.com/HDI-Project/MLPrimitives/blob/bf415f9f751724ff545a1156ddfd7524e320f469/mlprimitives/custom/timeseries_anomalies.py#L119-L140
1,073
HDI-Project/MLPrimitives
mlprimitives/custom/timeseries_anomalies.py
find_anomalies
def find_anomalies(errors, index, z_range=(0, 10)): """Find sequences of values that are anomalous. We first find the ideal threshold for the set of errors that we have, and then find the sequences of values that are above this threshold. Lastly, we compute a score proportional to the maximum error in the sequence, and finally return the index pairs that correspond to each sequence, along with its score. """ threshold = find_threshold(errors, z_range) sequences = find_sequences(errors, threshold) anomalies = list() denominator = errors.mean() + errors.std() for start, stop in sequences: max_error = errors[start:stop + 1].max() score = (max_error - threshold) / denominator anomalies.append([index[start], index[stop], score]) return np.asarray(anomalies)
python
def find_anomalies(errors, index, z_range=(0, 10)): """Find sequences of values that are anomalous. We first find the ideal threshold for the set of errors that we have, and then find the sequences of values that are above this threshold. Lastly, we compute a score proportional to the maximum error in the sequence, and finally return the index pairs that correspond to each sequence, along with its score. """ threshold = find_threshold(errors, z_range) sequences = find_sequences(errors, threshold) anomalies = list() denominator = errors.mean() + errors.std() for start, stop in sequences: max_error = errors[start:stop + 1].max() score = (max_error - threshold) / denominator anomalies.append([index[start], index[stop], score]) return np.asarray(anomalies)
[ "def", "find_anomalies", "(", "errors", ",", "index", ",", "z_range", "=", "(", "0", ",", "10", ")", ")", ":", "threshold", "=", "find_threshold", "(", "errors", ",", "z_range", ")", "sequences", "=", "find_sequences", "(", "errors", ",", "threshold", ")", "anomalies", "=", "list", "(", ")", "denominator", "=", "errors", ".", "mean", "(", ")", "+", "errors", ".", "std", "(", ")", "for", "start", ",", "stop", "in", "sequences", ":", "max_error", "=", "errors", "[", "start", ":", "stop", "+", "1", "]", ".", "max", "(", ")", "score", "=", "(", "max_error", "-", "threshold", ")", "/", "denominator", "anomalies", ".", "append", "(", "[", "index", "[", "start", "]", ",", "index", "[", "stop", "]", ",", "score", "]", ")", "return", "np", ".", "asarray", "(", "anomalies", ")" ]
Find sequences of values that are anomalous. We first find the ideal threshold for the set of errors that we have, and then find the sequences of values that are above this threshold. Lastly, we compute a score proportional to the maximum error in the sequence, and finally return the index pairs that correspond to each sequence, along with its score.
[ "Find", "sequences", "of", "values", "that", "are", "anomalous", "." ]
bf415f9f751724ff545a1156ddfd7524e320f469
https://github.com/HDI-Project/MLPrimitives/blob/bf415f9f751724ff545a1156ddfd7524e320f469/mlprimitives/custom/timeseries_anomalies.py#L143-L164
1,074
HDI-Project/MLPrimitives
mlprimitives/adapters/cv2.py
GaussianBlur
def GaussianBlur(X, ksize_width, ksize_height, sigma_x, sigma_y): """Apply Gaussian blur to the given data. Args: X: data to blur kernel_size: Gaussian kernel size stddev: Gaussian kernel standard deviation (in both X and Y directions) """ return image_transform( X, cv2.GaussianBlur, ksize=(ksize_width, ksize_height), sigmaX=sigma_x, sigmaY=sigma_y )
python
def GaussianBlur(X, ksize_width, ksize_height, sigma_x, sigma_y): """Apply Gaussian blur to the given data. Args: X: data to blur kernel_size: Gaussian kernel size stddev: Gaussian kernel standard deviation (in both X and Y directions) """ return image_transform( X, cv2.GaussianBlur, ksize=(ksize_width, ksize_height), sigmaX=sigma_x, sigmaY=sigma_y )
[ "def", "GaussianBlur", "(", "X", ",", "ksize_width", ",", "ksize_height", ",", "sigma_x", ",", "sigma_y", ")", ":", "return", "image_transform", "(", "X", ",", "cv2", ".", "GaussianBlur", ",", "ksize", "=", "(", "ksize_width", ",", "ksize_height", ")", ",", "sigmaX", "=", "sigma_x", ",", "sigmaY", "=", "sigma_y", ")" ]
Apply Gaussian blur to the given data. Args: X: data to blur kernel_size: Gaussian kernel size stddev: Gaussian kernel standard deviation (in both X and Y directions)
[ "Apply", "Gaussian", "blur", "to", "the", "given", "data", "." ]
bf415f9f751724ff545a1156ddfd7524e320f469
https://github.com/HDI-Project/MLPrimitives/blob/bf415f9f751724ff545a1156ddfd7524e320f469/mlprimitives/adapters/cv2.py#L8-L22
1,075
HDI-Project/MLPrimitives
mlprimitives/candidates/timeseries_errors.py
get_anomalies
def get_anomalies(smoothed_errors, y_true, z, window, all_anomalies, error_buffer): """ Helper method to get anomalies. """ mu = np.mean(smoothed_errors) sigma = np.std(smoothed_errors) epsilon = mu + (z * sigma) # compare to epsilon errors_seq, anomaly_indices, max_error_below_e = group_consecutive_anomalies( smoothed_errors, epsilon, y_true, error_buffer, window, all_anomalies ) if len(errors_seq) > 0: anomaly_indices = prune_anomalies( errors_seq, smoothed_errors, max_error_below_e, anomaly_indices ) return anomaly_indices
python
def get_anomalies(smoothed_errors, y_true, z, window, all_anomalies, error_buffer): """ Helper method to get anomalies. """ mu = np.mean(smoothed_errors) sigma = np.std(smoothed_errors) epsilon = mu + (z * sigma) # compare to epsilon errors_seq, anomaly_indices, max_error_below_e = group_consecutive_anomalies( smoothed_errors, epsilon, y_true, error_buffer, window, all_anomalies ) if len(errors_seq) > 0: anomaly_indices = prune_anomalies( errors_seq, smoothed_errors, max_error_below_e, anomaly_indices ) return anomaly_indices
[ "def", "get_anomalies", "(", "smoothed_errors", ",", "y_true", ",", "z", ",", "window", ",", "all_anomalies", ",", "error_buffer", ")", ":", "mu", "=", "np", ".", "mean", "(", "smoothed_errors", ")", "sigma", "=", "np", ".", "std", "(", "smoothed_errors", ")", "epsilon", "=", "mu", "+", "(", "z", "*", "sigma", ")", "# compare to epsilon", "errors_seq", ",", "anomaly_indices", ",", "max_error_below_e", "=", "group_consecutive_anomalies", "(", "smoothed_errors", ",", "epsilon", ",", "y_true", ",", "error_buffer", ",", "window", ",", "all_anomalies", ")", "if", "len", "(", "errors_seq", ")", ">", "0", ":", "anomaly_indices", "=", "prune_anomalies", "(", "errors_seq", ",", "smoothed_errors", ",", "max_error_below_e", ",", "anomaly_indices", ")", "return", "anomaly_indices" ]
Helper method to get anomalies.
[ "Helper", "method", "to", "get", "anomalies", "." ]
bf415f9f751724ff545a1156ddfd7524e320f469
https://github.com/HDI-Project/MLPrimitives/blob/bf415f9f751724ff545a1156ddfd7524e320f469/mlprimitives/candidates/timeseries_errors.py#L186-L214
1,076
HDI-Project/MLPrimitives
mlprimitives/candidates/timeseries_errors.py
prune_anomalies
def prune_anomalies(e_seq, smoothed_errors, max_error_below_e, anomaly_indices): """ Helper method that removes anomalies which don't meet a minimum separation from next anomaly. """ # min accepted perc decrease btwn max errors in anomalous sequences MIN_PERCENT_DECREASE = 0.05 e_seq_max, smoothed_errors_max = [], [] for error_seq in e_seq: if len(smoothed_errors[error_seq[0]:error_seq[1]]) > 0: sliced_errors = smoothed_errors[error_seq[0]:error_seq[1]] e_seq_max.append(max(sliced_errors)) smoothed_errors_max.append(max(sliced_errors)) smoothed_errors_max.sort(reverse=True) if max_error_below_e > 0: smoothed_errors_max.append(max_error_below_e) indices_remove = [] for i in range(len(smoothed_errors_max)): if i < len(smoothed_errors_max) - 1: delta = smoothed_errors_max[i] - smoothed_errors_max[i + 1] perc_change = delta / smoothed_errors_max[i] if perc_change < MIN_PERCENT_DECREASE: indices_remove.append(e_seq_max.index(smoothed_errors_max[i])) for index in sorted(indices_remove, reverse=True): del e_seq[index] pruned_indices = [] for i in anomaly_indices: for error_seq in e_seq: if i >= error_seq[0] and i <= error_seq[1]: pruned_indices.append(i) return pruned_indices
python
def prune_anomalies(e_seq, smoothed_errors, max_error_below_e, anomaly_indices): """ Helper method that removes anomalies which don't meet a minimum separation from next anomaly. """ # min accepted perc decrease btwn max errors in anomalous sequences MIN_PERCENT_DECREASE = 0.05 e_seq_max, smoothed_errors_max = [], [] for error_seq in e_seq: if len(smoothed_errors[error_seq[0]:error_seq[1]]) > 0: sliced_errors = smoothed_errors[error_seq[0]:error_seq[1]] e_seq_max.append(max(sliced_errors)) smoothed_errors_max.append(max(sliced_errors)) smoothed_errors_max.sort(reverse=True) if max_error_below_e > 0: smoothed_errors_max.append(max_error_below_e) indices_remove = [] for i in range(len(smoothed_errors_max)): if i < len(smoothed_errors_max) - 1: delta = smoothed_errors_max[i] - smoothed_errors_max[i + 1] perc_change = delta / smoothed_errors_max[i] if perc_change < MIN_PERCENT_DECREASE: indices_remove.append(e_seq_max.index(smoothed_errors_max[i])) for index in sorted(indices_remove, reverse=True): del e_seq[index] pruned_indices = [] for i in anomaly_indices: for error_seq in e_seq: if i >= error_seq[0] and i <= error_seq[1]: pruned_indices.append(i) return pruned_indices
[ "def", "prune_anomalies", "(", "e_seq", ",", "smoothed_errors", ",", "max_error_below_e", ",", "anomaly_indices", ")", ":", "# min accepted perc decrease btwn max errors in anomalous sequences", "MIN_PERCENT_DECREASE", "=", "0.05", "e_seq_max", ",", "smoothed_errors_max", "=", "[", "]", ",", "[", "]", "for", "error_seq", "in", "e_seq", ":", "if", "len", "(", "smoothed_errors", "[", "error_seq", "[", "0", "]", ":", "error_seq", "[", "1", "]", "]", ")", ">", "0", ":", "sliced_errors", "=", "smoothed_errors", "[", "error_seq", "[", "0", "]", ":", "error_seq", "[", "1", "]", "]", "e_seq_max", ".", "append", "(", "max", "(", "sliced_errors", ")", ")", "smoothed_errors_max", ".", "append", "(", "max", "(", "sliced_errors", ")", ")", "smoothed_errors_max", ".", "sort", "(", "reverse", "=", "True", ")", "if", "max_error_below_e", ">", "0", ":", "smoothed_errors_max", ".", "append", "(", "max_error_below_e", ")", "indices_remove", "=", "[", "]", "for", "i", "in", "range", "(", "len", "(", "smoothed_errors_max", ")", ")", ":", "if", "i", "<", "len", "(", "smoothed_errors_max", ")", "-", "1", ":", "delta", "=", "smoothed_errors_max", "[", "i", "]", "-", "smoothed_errors_max", "[", "i", "+", "1", "]", "perc_change", "=", "delta", "/", "smoothed_errors_max", "[", "i", "]", "if", "perc_change", "<", "MIN_PERCENT_DECREASE", ":", "indices_remove", ".", "append", "(", "e_seq_max", ".", "index", "(", "smoothed_errors_max", "[", "i", "]", ")", ")", "for", "index", "in", "sorted", "(", "indices_remove", ",", "reverse", "=", "True", ")", ":", "del", "e_seq", "[", "index", "]", "pruned_indices", "=", "[", "]", "for", "i", "in", "anomaly_indices", ":", "for", "error_seq", "in", "e_seq", ":", "if", "i", ">=", "error_seq", "[", "0", "]", "and", "i", "<=", "error_seq", "[", "1", "]", ":", "pruned_indices", ".", "append", "(", "i", ")", "return", "pruned_indices" ]
Helper method that removes anomalies which don't meet a minimum separation from next anomaly.
[ "Helper", "method", "that", "removes", "anomalies", "which", "don", "t", "meet", "a", "minimum", "separation", "from", "next", "anomaly", "." ]
bf415f9f751724ff545a1156ddfd7524e320f469
https://github.com/HDI-Project/MLPrimitives/blob/bf415f9f751724ff545a1156ddfd7524e320f469/mlprimitives/candidates/timeseries_errors.py#L262-L299
1,077
ultrabug/uhashring
uhashring/ring.py
HashRing._configure_nodes
def _configure_nodes(self, nodes): """Parse and set up the given nodes. :param nodes: nodes used to create the continuum (see doc for format). """ if isinstance(nodes, str): nodes = [nodes] elif not isinstance(nodes, (dict, list)): raise ValueError( 'nodes configuration should be a list or a dict,' ' got {}'.format(type(nodes))) conf_changed = False for node in nodes: conf = { 'hostname': node, 'instance': None, 'nodename': node, 'port': None, 'vnodes': self._default_vnodes, 'weight': 1 } current_conf = self.runtime._nodes.get(node, {}) nodename = node # new node, trigger a ring update if not current_conf: conf_changed = True # complex config if isinstance(nodes, dict): node_conf = nodes[node] if isinstance(node_conf, int): conf['weight'] = node_conf elif isinstance(node_conf, dict): for k, v in node_conf.items(): if k in conf: conf[k] = v # changing those config trigger a ring update if k in ['nodename', 'vnodes', 'weight']: if current_conf.get(k) != v: conf_changed = True else: raise ValueError( 'node configuration should be a dict or an int,' ' got {}'.format(type(node_conf))) if self._weight_fn: conf['weight'] = self._weight_fn(**conf) # changing the weight of a node trigger a ring update if current_conf.get('weight') != conf['weight']: conf_changed = True self.runtime._nodes[nodename] = conf return conf_changed
python
def _configure_nodes(self, nodes): """Parse and set up the given nodes. :param nodes: nodes used to create the continuum (see doc for format). """ if isinstance(nodes, str): nodes = [nodes] elif not isinstance(nodes, (dict, list)): raise ValueError( 'nodes configuration should be a list or a dict,' ' got {}'.format(type(nodes))) conf_changed = False for node in nodes: conf = { 'hostname': node, 'instance': None, 'nodename': node, 'port': None, 'vnodes': self._default_vnodes, 'weight': 1 } current_conf = self.runtime._nodes.get(node, {}) nodename = node # new node, trigger a ring update if not current_conf: conf_changed = True # complex config if isinstance(nodes, dict): node_conf = nodes[node] if isinstance(node_conf, int): conf['weight'] = node_conf elif isinstance(node_conf, dict): for k, v in node_conf.items(): if k in conf: conf[k] = v # changing those config trigger a ring update if k in ['nodename', 'vnodes', 'weight']: if current_conf.get(k) != v: conf_changed = True else: raise ValueError( 'node configuration should be a dict or an int,' ' got {}'.format(type(node_conf))) if self._weight_fn: conf['weight'] = self._weight_fn(**conf) # changing the weight of a node trigger a ring update if current_conf.get('weight') != conf['weight']: conf_changed = True self.runtime._nodes[nodename] = conf return conf_changed
[ "def", "_configure_nodes", "(", "self", ",", "nodes", ")", ":", "if", "isinstance", "(", "nodes", ",", "str", ")", ":", "nodes", "=", "[", "nodes", "]", "elif", "not", "isinstance", "(", "nodes", ",", "(", "dict", ",", "list", ")", ")", ":", "raise", "ValueError", "(", "'nodes configuration should be a list or a dict,'", "' got {}'", ".", "format", "(", "type", "(", "nodes", ")", ")", ")", "conf_changed", "=", "False", "for", "node", "in", "nodes", ":", "conf", "=", "{", "'hostname'", ":", "node", ",", "'instance'", ":", "None", ",", "'nodename'", ":", "node", ",", "'port'", ":", "None", ",", "'vnodes'", ":", "self", ".", "_default_vnodes", ",", "'weight'", ":", "1", "}", "current_conf", "=", "self", ".", "runtime", ".", "_nodes", ".", "get", "(", "node", ",", "{", "}", ")", "nodename", "=", "node", "# new node, trigger a ring update", "if", "not", "current_conf", ":", "conf_changed", "=", "True", "# complex config", "if", "isinstance", "(", "nodes", ",", "dict", ")", ":", "node_conf", "=", "nodes", "[", "node", "]", "if", "isinstance", "(", "node_conf", ",", "int", ")", ":", "conf", "[", "'weight'", "]", "=", "node_conf", "elif", "isinstance", "(", "node_conf", ",", "dict", ")", ":", "for", "k", ",", "v", "in", "node_conf", ".", "items", "(", ")", ":", "if", "k", "in", "conf", ":", "conf", "[", "k", "]", "=", "v", "# changing those config trigger a ring update", "if", "k", "in", "[", "'nodename'", ",", "'vnodes'", ",", "'weight'", "]", ":", "if", "current_conf", ".", "get", "(", "k", ")", "!=", "v", ":", "conf_changed", "=", "True", "else", ":", "raise", "ValueError", "(", "'node configuration should be a dict or an int,'", "' got {}'", ".", "format", "(", "type", "(", "node_conf", ")", ")", ")", "if", "self", ".", "_weight_fn", ":", "conf", "[", "'weight'", "]", "=", "self", ".", "_weight_fn", "(", "*", "*", "conf", ")", "# changing the weight of a node trigger a ring update", "if", "current_conf", ".", "get", "(", "'weight'", ")", "!=", "conf", "[", "'weight'", "]", ":", "conf_changed", "=", "True", "self", ".", "runtime", ".", "_nodes", "[", "nodename", "]", "=", "conf", "return", "conf_changed" ]
Parse and set up the given nodes. :param nodes: nodes used to create the continuum (see doc for format).
[ "Parse", "and", "set", "up", "the", "given", "nodes", "." ]
2297471a392e28ed913b3276c2f48d0c01523375
https://github.com/ultrabug/uhashring/blob/2297471a392e28ed913b3276c2f48d0c01523375/uhashring/ring.py#L44-L94
1,078
ultrabug/uhashring
uhashring/ring.py
HashRing._get_pos
def _get_pos(self, key): """Get the index of the given key in the sorted key list. We return the position with the nearest hash based on the provided key unless we reach the end of the continuum/ring in which case we return the 0 (beginning) index position. :param key: the key to hash and look for. """ p = bisect(self.runtime._keys, self.hashi(key)) if p == len(self.runtime._keys): return 0 else: return p
python
def _get_pos(self, key): """Get the index of the given key in the sorted key list. We return the position with the nearest hash based on the provided key unless we reach the end of the continuum/ring in which case we return the 0 (beginning) index position. :param key: the key to hash and look for. """ p = bisect(self.runtime._keys, self.hashi(key)) if p == len(self.runtime._keys): return 0 else: return p
[ "def", "_get_pos", "(", "self", ",", "key", ")", ":", "p", "=", "bisect", "(", "self", ".", "runtime", ".", "_keys", ",", "self", ".", "hashi", "(", "key", ")", ")", "if", "p", "==", "len", "(", "self", ".", "runtime", ".", "_keys", ")", ":", "return", "0", "else", ":", "return", "p" ]
Get the index of the given key in the sorted key list. We return the position with the nearest hash based on the provided key unless we reach the end of the continuum/ring in which case we return the 0 (beginning) index position. :param key: the key to hash and look for.
[ "Get", "the", "index", "of", "the", "given", "key", "in", "the", "sorted", "key", "list", "." ]
2297471a392e28ed913b3276c2f48d0c01523375
https://github.com/ultrabug/uhashring/blob/2297471a392e28ed913b3276c2f48d0c01523375/uhashring/ring.py#L125-L138
1,079
ultrabug/uhashring
uhashring/ring.py
HashRing._get
def _get(self, key, what): """Generic getter magic method. The node with the nearest but not less hash value is returned. :param key: the key to look for. :param what: the information to look for in, allowed values: - instance (default): associated node instance - nodename: node name - pos: index of the given key in the ring - tuple: ketama compatible (pos, name) tuple - weight: node weight """ if not self.runtime._ring: return None pos = self._get_pos(key) if what == 'pos': return pos nodename = self.runtime._ring[self.runtime._keys[pos]] if what in ['hostname', 'instance', 'port', 'weight']: return self.runtime._nodes[nodename][what] elif what == 'dict': return self.runtime._nodes[nodename] elif what == 'nodename': return nodename elif what == 'tuple': return (self.runtime._keys[pos], nodename)
python
def _get(self, key, what): """Generic getter magic method. The node with the nearest but not less hash value is returned. :param key: the key to look for. :param what: the information to look for in, allowed values: - instance (default): associated node instance - nodename: node name - pos: index of the given key in the ring - tuple: ketama compatible (pos, name) tuple - weight: node weight """ if not self.runtime._ring: return None pos = self._get_pos(key) if what == 'pos': return pos nodename = self.runtime._ring[self.runtime._keys[pos]] if what in ['hostname', 'instance', 'port', 'weight']: return self.runtime._nodes[nodename][what] elif what == 'dict': return self.runtime._nodes[nodename] elif what == 'nodename': return nodename elif what == 'tuple': return (self.runtime._keys[pos], nodename)
[ "def", "_get", "(", "self", ",", "key", ",", "what", ")", ":", "if", "not", "self", ".", "runtime", ".", "_ring", ":", "return", "None", "pos", "=", "self", ".", "_get_pos", "(", "key", ")", "if", "what", "==", "'pos'", ":", "return", "pos", "nodename", "=", "self", ".", "runtime", ".", "_ring", "[", "self", ".", "runtime", ".", "_keys", "[", "pos", "]", "]", "if", "what", "in", "[", "'hostname'", ",", "'instance'", ",", "'port'", ",", "'weight'", "]", ":", "return", "self", ".", "runtime", ".", "_nodes", "[", "nodename", "]", "[", "what", "]", "elif", "what", "==", "'dict'", ":", "return", "self", ".", "runtime", ".", "_nodes", "[", "nodename", "]", "elif", "what", "==", "'nodename'", ":", "return", "nodename", "elif", "what", "==", "'tuple'", ":", "return", "(", "self", ".", "runtime", ".", "_keys", "[", "pos", "]", ",", "nodename", ")" ]
Generic getter magic method. The node with the nearest but not less hash value is returned. :param key: the key to look for. :param what: the information to look for in, allowed values: - instance (default): associated node instance - nodename: node name - pos: index of the given key in the ring - tuple: ketama compatible (pos, name) tuple - weight: node weight
[ "Generic", "getter", "magic", "method", "." ]
2297471a392e28ed913b3276c2f48d0c01523375
https://github.com/ultrabug/uhashring/blob/2297471a392e28ed913b3276c2f48d0c01523375/uhashring/ring.py#L140-L168
1,080
ultrabug/uhashring
uhashring/ring.py
HashRing.get_instances
def get_instances(self): """Returns a list of the instances of all the configured nodes. """ return [c.get('instance') for c in self.runtime._nodes.values() if c.get('instance')]
python
def get_instances(self): """Returns a list of the instances of all the configured nodes. """ return [c.get('instance') for c in self.runtime._nodes.values() if c.get('instance')]
[ "def", "get_instances", "(", "self", ")", ":", "return", "[", "c", ".", "get", "(", "'instance'", ")", "for", "c", "in", "self", ".", "runtime", ".", "_nodes", ".", "values", "(", ")", "if", "c", ".", "get", "(", "'instance'", ")", "]" ]
Returns a list of the instances of all the configured nodes.
[ "Returns", "a", "list", "of", "the", "instances", "of", "all", "the", "configured", "nodes", "." ]
2297471a392e28ed913b3276c2f48d0c01523375
https://github.com/ultrabug/uhashring/blob/2297471a392e28ed913b3276c2f48d0c01523375/uhashring/ring.py#L177-L181
1,081
ultrabug/uhashring
uhashring/ring.py
HashRing.iterate_nodes
def iterate_nodes(self, key, distinct=True): """hash_ring compatibility implementation. Given a string key it returns the nodes as a generator that can hold the key. The generator iterates one time through the ring starting at the correct position. if `distinct` is set, then the nodes returned will be unique, i.e. no virtual copies will be returned. """ if not self.runtime._ring: yield None else: for node in self.range(key, unique=distinct): yield node['nodename']
python
def iterate_nodes(self, key, distinct=True): """hash_ring compatibility implementation. Given a string key it returns the nodes as a generator that can hold the key. The generator iterates one time through the ring starting at the correct position. if `distinct` is set, then the nodes returned will be unique, i.e. no virtual copies will be returned. """ if not self.runtime._ring: yield None else: for node in self.range(key, unique=distinct): yield node['nodename']
[ "def", "iterate_nodes", "(", "self", ",", "key", ",", "distinct", "=", "True", ")", ":", "if", "not", "self", ".", "runtime", ".", "_ring", ":", "yield", "None", "else", ":", "for", "node", "in", "self", ".", "range", "(", "key", ",", "unique", "=", "distinct", ")", ":", "yield", "node", "[", "'nodename'", "]" ]
hash_ring compatibility implementation. Given a string key it returns the nodes as a generator that can hold the key. The generator iterates one time through the ring starting at the correct position. if `distinct` is set, then the nodes returned will be unique, i.e. no virtual copies will be returned.
[ "hash_ring", "compatibility", "implementation", "." ]
2297471a392e28ed913b3276c2f48d0c01523375
https://github.com/ultrabug/uhashring/blob/2297471a392e28ed913b3276c2f48d0c01523375/uhashring/ring.py#L244-L258
1,082
ultrabug/uhashring
uhashring/ring.py
HashRing.print_continuum
def print_continuum(self): """Prints a ketama compatible continuum report. """ numpoints = len(self.runtime._keys) if numpoints: print('Numpoints in continuum: {}'.format(numpoints)) else: print('Continuum empty') for p in self.get_points(): point, node = p print('{} ({})'.format(node, point))
python
def print_continuum(self): """Prints a ketama compatible continuum report. """ numpoints = len(self.runtime._keys) if numpoints: print('Numpoints in continuum: {}'.format(numpoints)) else: print('Continuum empty') for p in self.get_points(): point, node = p print('{} ({})'.format(node, point))
[ "def", "print_continuum", "(", "self", ")", ":", "numpoints", "=", "len", "(", "self", ".", "runtime", ".", "_keys", ")", "if", "numpoints", ":", "print", "(", "'Numpoints in continuum: {}'", ".", "format", "(", "numpoints", ")", ")", "else", ":", "print", "(", "'Continuum empty'", ")", "for", "p", "in", "self", ".", "get_points", "(", ")", ":", "point", ",", "node", "=", "p", "print", "(", "'{} ({})'", ".", "format", "(", "node", ",", "point", ")", ")" ]
Prints a ketama compatible continuum report.
[ "Prints", "a", "ketama", "compatible", "continuum", "report", "." ]
2297471a392e28ed913b3276c2f48d0c01523375
https://github.com/ultrabug/uhashring/blob/2297471a392e28ed913b3276c2f48d0c01523375/uhashring/ring.py#L260-L270
1,083
ultrabug/uhashring
uhashring/monkey.py
patch_memcache
def patch_memcache(): """Monkey patch python-memcached to implement our consistent hashring in its node selection and operations. """ def _init(self, servers, *k, **kw): self._old_init(servers, *k, **kw) nodes = {} for server in self.servers: conf = { 'hostname': server.ip, 'instance': server, 'port': server.port, 'weight': server.weight } nodes[server.ip] = conf self.uhashring = HashRing(nodes) def _get_server(self, key): if isinstance(key, tuple): return self._old_get_server(key) for i in range(self._SERVER_RETRIES): for node in self.uhashring.range(key): if node['instance'].connect(): return node['instance'], key return None, None memcache = __import__('memcache') memcache.Client._old_get_server = memcache.Client._get_server memcache.Client._old_init = memcache.Client.__init__ memcache.Client.__init__ = _init memcache.Client._get_server = _get_server
python
def patch_memcache(): """Monkey patch python-memcached to implement our consistent hashring in its node selection and operations. """ def _init(self, servers, *k, **kw): self._old_init(servers, *k, **kw) nodes = {} for server in self.servers: conf = { 'hostname': server.ip, 'instance': server, 'port': server.port, 'weight': server.weight } nodes[server.ip] = conf self.uhashring = HashRing(nodes) def _get_server(self, key): if isinstance(key, tuple): return self._old_get_server(key) for i in range(self._SERVER_RETRIES): for node in self.uhashring.range(key): if node['instance'].connect(): return node['instance'], key return None, None memcache = __import__('memcache') memcache.Client._old_get_server = memcache.Client._get_server memcache.Client._old_init = memcache.Client.__init__ memcache.Client.__init__ = _init memcache.Client._get_server = _get_server
[ "def", "patch_memcache", "(", ")", ":", "def", "_init", "(", "self", ",", "servers", ",", "*", "k", ",", "*", "*", "kw", ")", ":", "self", ".", "_old_init", "(", "servers", ",", "*", "k", ",", "*", "*", "kw", ")", "nodes", "=", "{", "}", "for", "server", "in", "self", ".", "servers", ":", "conf", "=", "{", "'hostname'", ":", "server", ".", "ip", ",", "'instance'", ":", "server", ",", "'port'", ":", "server", ".", "port", ",", "'weight'", ":", "server", ".", "weight", "}", "nodes", "[", "server", ".", "ip", "]", "=", "conf", "self", ".", "uhashring", "=", "HashRing", "(", "nodes", ")", "def", "_get_server", "(", "self", ",", "key", ")", ":", "if", "isinstance", "(", "key", ",", "tuple", ")", ":", "return", "self", ".", "_old_get_server", "(", "key", ")", "for", "i", "in", "range", "(", "self", ".", "_SERVER_RETRIES", ")", ":", "for", "node", "in", "self", ".", "uhashring", ".", "range", "(", "key", ")", ":", "if", "node", "[", "'instance'", "]", ".", "connect", "(", ")", ":", "return", "node", "[", "'instance'", "]", ",", "key", "return", "None", ",", "None", "memcache", "=", "__import__", "(", "'memcache'", ")", "memcache", ".", "Client", ".", "_old_get_server", "=", "memcache", ".", "Client", ".", "_get_server", "memcache", ".", "Client", ".", "_old_init", "=", "memcache", ".", "Client", ".", "__init__", "memcache", ".", "Client", ".", "__init__", "=", "_init", "memcache", ".", "Client", ".", "_get_server", "=", "_get_server" ]
Monkey patch python-memcached to implement our consistent hashring in its node selection and operations.
[ "Monkey", "patch", "python", "-", "memcached", "to", "implement", "our", "consistent", "hashring", "in", "its", "node", "selection", "and", "operations", "." ]
2297471a392e28ed913b3276c2f48d0c01523375
https://github.com/ultrabug/uhashring/blob/2297471a392e28ed913b3276c2f48d0c01523375/uhashring/monkey.py#L8-L42
1,084
ultrabug/uhashring
uhashring/ring_ketama.py
KetamaRing.hashi
def hashi(self, key, replica=0): """Returns a ketama compatible hash from the given key. """ dh = self._listbytes(md5(str(key).encode('utf-8')).digest()) rd = replica * 4 return ( (dh[3 + rd] << 24) | (dh[2 + rd] << 16) | (dh[1 + rd] << 8) | dh[0 + rd])
python
def hashi(self, key, replica=0): """Returns a ketama compatible hash from the given key. """ dh = self._listbytes(md5(str(key).encode('utf-8')).digest()) rd = replica * 4 return ( (dh[3 + rd] << 24) | (dh[2 + rd] << 16) | (dh[1 + rd] << 8) | dh[0 + rd])
[ "def", "hashi", "(", "self", ",", "key", ",", "replica", "=", "0", ")", ":", "dh", "=", "self", ".", "_listbytes", "(", "md5", "(", "str", "(", "key", ")", ".", "encode", "(", "'utf-8'", ")", ")", ".", "digest", "(", ")", ")", "rd", "=", "replica", "*", "4", "return", "(", "(", "dh", "[", "3", "+", "rd", "]", "<<", "24", ")", "|", "(", "dh", "[", "2", "+", "rd", "]", "<<", "16", ")", "|", "(", "dh", "[", "1", "+", "rd", "]", "<<", "8", ")", "|", "dh", "[", "0", "+", "rd", "]", ")" ]
Returns a ketama compatible hash from the given key.
[ "Returns", "a", "ketama", "compatible", "hash", "from", "the", "given", "key", "." ]
2297471a392e28ed913b3276c2f48d0c01523375
https://github.com/ultrabug/uhashring/blob/2297471a392e28ed913b3276c2f48d0c01523375/uhashring/ring_ketama.py#L24-L31
1,085
ultrabug/uhashring
uhashring/ring_ketama.py
KetamaRing._hashi_weight_generator
def _hashi_weight_generator(self, node_name, node_conf): """Calculate the weight factor of the given node and yield its hash key for every configured replica. :param node_name: the node name. """ ks = (node_conf['vnodes'] * len(self._nodes) * node_conf['weight']) // self._weight_sum for w in range(0, ks): w_node_name = '%s-%s' % (node_name, w) for i in range(0, self._replicas): yield self.hashi(w_node_name, replica=i)
python
def _hashi_weight_generator(self, node_name, node_conf): """Calculate the weight factor of the given node and yield its hash key for every configured replica. :param node_name: the node name. """ ks = (node_conf['vnodes'] * len(self._nodes) * node_conf['weight']) // self._weight_sum for w in range(0, ks): w_node_name = '%s-%s' % (node_name, w) for i in range(0, self._replicas): yield self.hashi(w_node_name, replica=i)
[ "def", "_hashi_weight_generator", "(", "self", ",", "node_name", ",", "node_conf", ")", ":", "ks", "=", "(", "node_conf", "[", "'vnodes'", "]", "*", "len", "(", "self", ".", "_nodes", ")", "*", "node_conf", "[", "'weight'", "]", ")", "//", "self", ".", "_weight_sum", "for", "w", "in", "range", "(", "0", ",", "ks", ")", ":", "w_node_name", "=", "'%s-%s'", "%", "(", "node_name", ",", "w", ")", "for", "i", "in", "range", "(", "0", ",", "self", ".", "_replicas", ")", ":", "yield", "self", ".", "hashi", "(", "w_node_name", ",", "replica", "=", "i", ")" ]
Calculate the weight factor of the given node and yield its hash key for every configured replica. :param node_name: the node name.
[ "Calculate", "the", "weight", "factor", "of", "the", "given", "node", "and", "yield", "its", "hash", "key", "for", "every", "configured", "replica", "." ]
2297471a392e28ed913b3276c2f48d0c01523375
https://github.com/ultrabug/uhashring/blob/2297471a392e28ed913b3276c2f48d0c01523375/uhashring/ring_ketama.py#L33-L44
1,086
gatagat/lap
lap/lapmod.py
lapmod
def lapmod(n, cc, ii, kk, fast=True, return_cost=True, fp_version=FP_DYNAMIC): """Solve sparse linear assignment problem using Jonker-Volgenant algorithm. n: number of rows of the assignment cost matrix cc: 1D array of all finite elements of the assignement cost matrix ii: 1D array of indices of the row starts in cc. The following must hold: ii[0] = 0 and ii[n+1] = len(cc). kk: 1D array of the column indices so that: cost[i, kk[ii[i] + k]] == cc[ii[i] + k]. Indices within one row must be sorted. extend_cost: whether or not extend a non-square matrix [default: False] cost_limit: an upper limit for a cost of a single assignment [default: np.inf] return_cost: whether or not to return the assignment cost Returns (opt, x, y) where: opt: cost of the assignment x: vector of columns assigned to rows y: vector of rows assigned to columns or (x, y) if return_cost is not True. When extend_cost and/or cost_limit is set, all unmatched entries will be marked by -1 in x/y. """ # log = logging.getLogger('lapmod') check_cost(n, cc, ii, kk) if fast is True: # log.debug('[----CR & RT & ARR & augmentation ----]') x, y = _lapmod(n, cc, ii, kk, fp_version=fp_version) else: cc = np.ascontiguousarray(cc, dtype=np.float64) ii = np.ascontiguousarray(ii, dtype=np.int32) kk = np.ascontiguousarray(kk, dtype=np.int32) x = np.empty((n,), dtype=np.int32) y = np.empty((n,), dtype=np.int32) v = np.empty((n,), dtype=np.float64) free_rows = np.empty((n,), dtype=np.int32) # log.debug('[----Column reduction & reduction transfer----]') n_free_rows = _pycrrt(n, cc, ii, kk, free_rows, x, y, v) # log.debug( # 'free, x, y, v: %s %s %s %s', free_rows[:n_free_rows], x, y, v) if n_free_rows == 0: # log.info('Reduction solved it.') if return_cost is True: return get_cost(n, cc, ii, kk, x), x, y else: return x, y for it in range(2): # log.debug('[---Augmenting row reduction (iteration: %d)---]', it) n_free_rows = _pyarr( n, cc, ii, kk, n_free_rows, free_rows, x, y, v) # log.debug( # 'free, x, y, v: %s %s %s %s', free_rows[:n_free_rows], x, y, v) if n_free_rows == 0: # log.info('Augmenting row reduction solved it.') if return_cost is True: return get_cost(n, cc, ii, kk, x), x, y else: return x, y # log.info('[----Augmentation----]') _pya(n, cc, ii, kk, n_free_rows, free_rows, x, y, v) # log.debug('x, y, v: %s %s %s', x, y, v) if return_cost is True: return get_cost(n, cc, ii, kk, x), x, y else: return x, y
python
def lapmod(n, cc, ii, kk, fast=True, return_cost=True, fp_version=FP_DYNAMIC): """Solve sparse linear assignment problem using Jonker-Volgenant algorithm. n: number of rows of the assignment cost matrix cc: 1D array of all finite elements of the assignement cost matrix ii: 1D array of indices of the row starts in cc. The following must hold: ii[0] = 0 and ii[n+1] = len(cc). kk: 1D array of the column indices so that: cost[i, kk[ii[i] + k]] == cc[ii[i] + k]. Indices within one row must be sorted. extend_cost: whether or not extend a non-square matrix [default: False] cost_limit: an upper limit for a cost of a single assignment [default: np.inf] return_cost: whether or not to return the assignment cost Returns (opt, x, y) where: opt: cost of the assignment x: vector of columns assigned to rows y: vector of rows assigned to columns or (x, y) if return_cost is not True. When extend_cost and/or cost_limit is set, all unmatched entries will be marked by -1 in x/y. """ # log = logging.getLogger('lapmod') check_cost(n, cc, ii, kk) if fast is True: # log.debug('[----CR & RT & ARR & augmentation ----]') x, y = _lapmod(n, cc, ii, kk, fp_version=fp_version) else: cc = np.ascontiguousarray(cc, dtype=np.float64) ii = np.ascontiguousarray(ii, dtype=np.int32) kk = np.ascontiguousarray(kk, dtype=np.int32) x = np.empty((n,), dtype=np.int32) y = np.empty((n,), dtype=np.int32) v = np.empty((n,), dtype=np.float64) free_rows = np.empty((n,), dtype=np.int32) # log.debug('[----Column reduction & reduction transfer----]') n_free_rows = _pycrrt(n, cc, ii, kk, free_rows, x, y, v) # log.debug( # 'free, x, y, v: %s %s %s %s', free_rows[:n_free_rows], x, y, v) if n_free_rows == 0: # log.info('Reduction solved it.') if return_cost is True: return get_cost(n, cc, ii, kk, x), x, y else: return x, y for it in range(2): # log.debug('[---Augmenting row reduction (iteration: %d)---]', it) n_free_rows = _pyarr( n, cc, ii, kk, n_free_rows, free_rows, x, y, v) # log.debug( # 'free, x, y, v: %s %s %s %s', free_rows[:n_free_rows], x, y, v) if n_free_rows == 0: # log.info('Augmenting row reduction solved it.') if return_cost is True: return get_cost(n, cc, ii, kk, x), x, y else: return x, y # log.info('[----Augmentation----]') _pya(n, cc, ii, kk, n_free_rows, free_rows, x, y, v) # log.debug('x, y, v: %s %s %s', x, y, v) if return_cost is True: return get_cost(n, cc, ii, kk, x), x, y else: return x, y
[ "def", "lapmod", "(", "n", ",", "cc", ",", "ii", ",", "kk", ",", "fast", "=", "True", ",", "return_cost", "=", "True", ",", "fp_version", "=", "FP_DYNAMIC", ")", ":", "# log = logging.getLogger('lapmod')", "check_cost", "(", "n", ",", "cc", ",", "ii", ",", "kk", ")", "if", "fast", "is", "True", ":", "# log.debug('[----CR & RT & ARR & augmentation ----]')", "x", ",", "y", "=", "_lapmod", "(", "n", ",", "cc", ",", "ii", ",", "kk", ",", "fp_version", "=", "fp_version", ")", "else", ":", "cc", "=", "np", ".", "ascontiguousarray", "(", "cc", ",", "dtype", "=", "np", ".", "float64", ")", "ii", "=", "np", ".", "ascontiguousarray", "(", "ii", ",", "dtype", "=", "np", ".", "int32", ")", "kk", "=", "np", ".", "ascontiguousarray", "(", "kk", ",", "dtype", "=", "np", ".", "int32", ")", "x", "=", "np", ".", "empty", "(", "(", "n", ",", ")", ",", "dtype", "=", "np", ".", "int32", ")", "y", "=", "np", ".", "empty", "(", "(", "n", ",", ")", ",", "dtype", "=", "np", ".", "int32", ")", "v", "=", "np", ".", "empty", "(", "(", "n", ",", ")", ",", "dtype", "=", "np", ".", "float64", ")", "free_rows", "=", "np", ".", "empty", "(", "(", "n", ",", ")", ",", "dtype", "=", "np", ".", "int32", ")", "# log.debug('[----Column reduction & reduction transfer----]')", "n_free_rows", "=", "_pycrrt", "(", "n", ",", "cc", ",", "ii", ",", "kk", ",", "free_rows", ",", "x", ",", "y", ",", "v", ")", "# log.debug(", "# 'free, x, y, v: %s %s %s %s', free_rows[:n_free_rows], x, y, v)", "if", "n_free_rows", "==", "0", ":", "# log.info('Reduction solved it.')", "if", "return_cost", "is", "True", ":", "return", "get_cost", "(", "n", ",", "cc", ",", "ii", ",", "kk", ",", "x", ")", ",", "x", ",", "y", "else", ":", "return", "x", ",", "y", "for", "it", "in", "range", "(", "2", ")", ":", "# log.debug('[---Augmenting row reduction (iteration: %d)---]', it)", "n_free_rows", "=", "_pyarr", "(", "n", ",", "cc", ",", "ii", ",", "kk", ",", "n_free_rows", ",", "free_rows", ",", "x", ",", "y", ",", "v", ")", "# log.debug(", "# 'free, x, y, v: %s %s %s %s', free_rows[:n_free_rows], x, y, v)", "if", "n_free_rows", "==", "0", ":", "# log.info('Augmenting row reduction solved it.')", "if", "return_cost", "is", "True", ":", "return", "get_cost", "(", "n", ",", "cc", ",", "ii", ",", "kk", ",", "x", ")", ",", "x", ",", "y", "else", ":", "return", "x", ",", "y", "# log.info('[----Augmentation----]')", "_pya", "(", "n", ",", "cc", ",", "ii", ",", "kk", ",", "n_free_rows", ",", "free_rows", ",", "x", ",", "y", ",", "v", ")", "# log.debug('x, y, v: %s %s %s', x, y, v)", "if", "return_cost", "is", "True", ":", "return", "get_cost", "(", "n", ",", "cc", ",", "ii", ",", "kk", ",", "x", ")", ",", "x", ",", "y", "else", ":", "return", "x", ",", "y" ]
Solve sparse linear assignment problem using Jonker-Volgenant algorithm. n: number of rows of the assignment cost matrix cc: 1D array of all finite elements of the assignement cost matrix ii: 1D array of indices of the row starts in cc. The following must hold: ii[0] = 0 and ii[n+1] = len(cc). kk: 1D array of the column indices so that: cost[i, kk[ii[i] + k]] == cc[ii[i] + k]. Indices within one row must be sorted. extend_cost: whether or not extend a non-square matrix [default: False] cost_limit: an upper limit for a cost of a single assignment [default: np.inf] return_cost: whether or not to return the assignment cost Returns (opt, x, y) where: opt: cost of the assignment x: vector of columns assigned to rows y: vector of rows assigned to columns or (x, y) if return_cost is not True. When extend_cost and/or cost_limit is set, all unmatched entries will be marked by -1 in x/y.
[ "Solve", "sparse", "linear", "assignment", "problem", "using", "Jonker", "-", "Volgenant", "algorithm", "." ]
c2b6309ba246d18205a71228cdaea67210e1a039
https://github.com/gatagat/lap/blob/c2b6309ba246d18205a71228cdaea67210e1a039/lap/lapmod.py#L273-L341
1,087
simonvh/genomepy
genomepy/provider.py
ProviderBase.register_provider
def register_provider(cls, provider): """Register method to keep list of providers.""" def decorator(subclass): """Register as decorator function.""" cls._providers[provider] = subclass subclass.name = provider return subclass return decorator
python
def register_provider(cls, provider): """Register method to keep list of providers.""" def decorator(subclass): """Register as decorator function.""" cls._providers[provider] = subclass subclass.name = provider return subclass return decorator
[ "def", "register_provider", "(", "cls", ",", "provider", ")", ":", "def", "decorator", "(", "subclass", ")", ":", "\"\"\"Register as decorator function.\"\"\"", "cls", ".", "_providers", "[", "provider", "]", "=", "subclass", "subclass", ".", "name", "=", "provider", "return", "subclass", "return", "decorator" ]
Register method to keep list of providers.
[ "Register", "method", "to", "keep", "list", "of", "providers", "." ]
abace2366511dbe855fe1430b1f7d9ec4cbf6d29
https://github.com/simonvh/genomepy/blob/abace2366511dbe855fe1430b1f7d9ec4cbf6d29/genomepy/provider.py#L73-L80
1,088
simonvh/genomepy
genomepy/provider.py
ProviderBase.tar_to_bigfile
def tar_to_bigfile(self, fname, outfile): """Convert tar of multiple FASTAs to one file.""" fnames = [] tmpdir = mkdtemp() # Extract files to temporary directory with tarfile.open(fname) as tar: tar.extractall(path=tmpdir) for root, _, files in os.walk(tmpdir): fnames += [os.path.join(root, fname) for fname in files] # Concatenate with open(outfile, "w") as out: for infile in fnames: for line in open(infile): out.write(line) os.unlink(infile) # Remove temp dir shutil.rmtree(tmpdir)
python
def tar_to_bigfile(self, fname, outfile): """Convert tar of multiple FASTAs to one file.""" fnames = [] tmpdir = mkdtemp() # Extract files to temporary directory with tarfile.open(fname) as tar: tar.extractall(path=tmpdir) for root, _, files in os.walk(tmpdir): fnames += [os.path.join(root, fname) for fname in files] # Concatenate with open(outfile, "w") as out: for infile in fnames: for line in open(infile): out.write(line) os.unlink(infile) # Remove temp dir shutil.rmtree(tmpdir)
[ "def", "tar_to_bigfile", "(", "self", ",", "fname", ",", "outfile", ")", ":", "fnames", "=", "[", "]", "tmpdir", "=", "mkdtemp", "(", ")", "# Extract files to temporary directory", "with", "tarfile", ".", "open", "(", "fname", ")", "as", "tar", ":", "tar", ".", "extractall", "(", "path", "=", "tmpdir", ")", "for", "root", ",", "_", ",", "files", "in", "os", ".", "walk", "(", "tmpdir", ")", ":", "fnames", "+=", "[", "os", ".", "path", ".", "join", "(", "root", ",", "fname", ")", "for", "fname", "in", "files", "]", "# Concatenate", "with", "open", "(", "outfile", ",", "\"w\"", ")", "as", "out", ":", "for", "infile", "in", "fnames", ":", "for", "line", "in", "open", "(", "infile", ")", ":", "out", ".", "write", "(", "line", ")", "os", ".", "unlink", "(", "infile", ")", "# Remove temp dir", "shutil", ".", "rmtree", "(", "tmpdir", ")" ]
Convert tar of multiple FASTAs to one file.
[ "Convert", "tar", "of", "multiple", "FASTAs", "to", "one", "file", "." ]
abace2366511dbe855fe1430b1f7d9ec4cbf6d29
https://github.com/simonvh/genomepy/blob/abace2366511dbe855fe1430b1f7d9ec4cbf6d29/genomepy/provider.py#L90-L109
1,089
simonvh/genomepy
genomepy/plugin.py
find_plugins
def find_plugins(): """Locate and initialize all available plugins. """ plugin_dir = os.path.dirname(os.path.realpath(__file__)) plugin_dir = os.path.join(plugin_dir, "plugins") plugin_files = [x[:-3] for x in os.listdir(plugin_dir) if x.endswith(".py")] sys.path.insert(0, plugin_dir) for plugin in plugin_files: __import__(plugin)
python
def find_plugins(): """Locate and initialize all available plugins. """ plugin_dir = os.path.dirname(os.path.realpath(__file__)) plugin_dir = os.path.join(plugin_dir, "plugins") plugin_files = [x[:-3] for x in os.listdir(plugin_dir) if x.endswith(".py")] sys.path.insert(0, plugin_dir) for plugin in plugin_files: __import__(plugin)
[ "def", "find_plugins", "(", ")", ":", "plugin_dir", "=", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "realpath", "(", "__file__", ")", ")", "plugin_dir", "=", "os", ".", "path", ".", "join", "(", "plugin_dir", ",", "\"plugins\"", ")", "plugin_files", "=", "[", "x", "[", ":", "-", "3", "]", "for", "x", "in", "os", ".", "listdir", "(", "plugin_dir", ")", "if", "x", ".", "endswith", "(", "\".py\"", ")", "]", "sys", ".", "path", ".", "insert", "(", "0", ",", "plugin_dir", ")", "for", "plugin", "in", "plugin_files", ":", "__import__", "(", "plugin", ")" ]
Locate and initialize all available plugins.
[ "Locate", "and", "initialize", "all", "available", "plugins", "." ]
abace2366511dbe855fe1430b1f7d9ec4cbf6d29
https://github.com/simonvh/genomepy/blob/abace2366511dbe855fe1430b1f7d9ec4cbf6d29/genomepy/plugin.py#L30-L38
1,090
simonvh/genomepy
genomepy/plugin.py
convert
def convert(name): """Convert CamelCase to underscore Parameters ---------- name : str Camelcase string Returns ------- name : str Converted name """ s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name) return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
python
def convert(name): """Convert CamelCase to underscore Parameters ---------- name : str Camelcase string Returns ------- name : str Converted name """ s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name) return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
[ "def", "convert", "(", "name", ")", ":", "s1", "=", "re", ".", "sub", "(", "'(.)([A-Z][a-z]+)'", ",", "r'\\1_\\2'", ",", "name", ")", "return", "re", ".", "sub", "(", "'([a-z0-9])([A-Z])'", ",", "r'\\1_\\2'", ",", "s1", ")", ".", "lower", "(", ")" ]
Convert CamelCase to underscore Parameters ---------- name : str Camelcase string Returns ------- name : str Converted name
[ "Convert", "CamelCase", "to", "underscore" ]
abace2366511dbe855fe1430b1f7d9ec4cbf6d29
https://github.com/simonvh/genomepy/blob/abace2366511dbe855fe1430b1f7d9ec4cbf6d29/genomepy/plugin.py#L40-L54
1,091
simonvh/genomepy
genomepy/plugin.py
init_plugins
def init_plugins(): """Return dictionary of available plugins Returns ------- plugins : dictionary key is plugin name, value Plugin object """ find_plugins() d = {} for c in Plugin.__subclasses__(): ins = c() if ins.name() in config.get("plugin", []): ins.activate() d[ins.name()] = ins return d
python
def init_plugins(): """Return dictionary of available plugins Returns ------- plugins : dictionary key is plugin name, value Plugin object """ find_plugins() d = {} for c in Plugin.__subclasses__(): ins = c() if ins.name() in config.get("plugin", []): ins.activate() d[ins.name()] = ins return d
[ "def", "init_plugins", "(", ")", ":", "find_plugins", "(", ")", "d", "=", "{", "}", "for", "c", "in", "Plugin", ".", "__subclasses__", "(", ")", ":", "ins", "=", "c", "(", ")", "if", "ins", ".", "name", "(", ")", "in", "config", ".", "get", "(", "\"plugin\"", ",", "[", "]", ")", ":", "ins", ".", "activate", "(", ")", "d", "[", "ins", ".", "name", "(", ")", "]", "=", "ins", "return", "d" ]
Return dictionary of available plugins Returns ------- plugins : dictionary key is plugin name, value Plugin object
[ "Return", "dictionary", "of", "available", "plugins" ]
abace2366511dbe855fe1430b1f7d9ec4cbf6d29
https://github.com/simonvh/genomepy/blob/abace2366511dbe855fe1430b1f7d9ec4cbf6d29/genomepy/plugin.py#L56-L74
1,092
simonvh/genomepy
genomepy/plugin.py
activate
def activate(name): """Activate plugin. Parameters ---------- name : str Plugin name. """ if name in plugins: plugins[name].activate() else: raise Exception("plugin {} not found".format(name))
python
def activate(name): """Activate plugin. Parameters ---------- name : str Plugin name. """ if name in plugins: plugins[name].activate() else: raise Exception("plugin {} not found".format(name))
[ "def", "activate", "(", "name", ")", ":", "if", "name", "in", "plugins", ":", "plugins", "[", "name", "]", ".", "activate", "(", ")", "else", ":", "raise", "Exception", "(", "\"plugin {} not found\"", ".", "format", "(", "name", ")", ")" ]
Activate plugin. Parameters ---------- name : str Plugin name.
[ "Activate", "plugin", "." ]
abace2366511dbe855fe1430b1f7d9ec4cbf6d29
https://github.com/simonvh/genomepy/blob/abace2366511dbe855fe1430b1f7d9ec4cbf6d29/genomepy/plugin.py#L76-L87
1,093
simonvh/genomepy
genomepy/plugin.py
deactivate
def deactivate(name): """Deactivate plugin. Parameters ---------- name : str Plugin name. """ if name in plugins: plugins[name].deactivate() else: raise Exception("plugin {} not found".format(name))
python
def deactivate(name): """Deactivate plugin. Parameters ---------- name : str Plugin name. """ if name in plugins: plugins[name].deactivate() else: raise Exception("plugin {} not found".format(name))
[ "def", "deactivate", "(", "name", ")", ":", "if", "name", "in", "plugins", ":", "plugins", "[", "name", "]", ".", "deactivate", "(", ")", "else", ":", "raise", "Exception", "(", "\"plugin {} not found\"", ".", "format", "(", "name", ")", ")" ]
Deactivate plugin. Parameters ---------- name : str Plugin name.
[ "Deactivate", "plugin", "." ]
abace2366511dbe855fe1430b1f7d9ec4cbf6d29
https://github.com/simonvh/genomepy/blob/abace2366511dbe855fe1430b1f7d9ec4cbf6d29/genomepy/plugin.py#L89-L100
1,094
simonvh/genomepy
genomepy/functions.py
manage_config
def manage_config(cmd, *args): """Manage genomepy config file.""" if cmd == "file": print(config.config_file) elif cmd == "show": with open(config.config_file) as f: print(f.read()) elif cmd == "generate": fname = os.path.join( user_config_dir("genomepy"), "{}.yaml".format("genomepy") ) if not os.path.exists(user_config_dir("genomepy")): os.makedirs(user_config_dir("genomepy")) with open(fname, "w") as fout: with open(config.config_file) as fin: fout.write(fin.read()) print("Created config file {}".format(fname))
python
def manage_config(cmd, *args): """Manage genomepy config file.""" if cmd == "file": print(config.config_file) elif cmd == "show": with open(config.config_file) as f: print(f.read()) elif cmd == "generate": fname = os.path.join( user_config_dir("genomepy"), "{}.yaml".format("genomepy") ) if not os.path.exists(user_config_dir("genomepy")): os.makedirs(user_config_dir("genomepy")) with open(fname, "w") as fout: with open(config.config_file) as fin: fout.write(fin.read()) print("Created config file {}".format(fname))
[ "def", "manage_config", "(", "cmd", ",", "*", "args", ")", ":", "if", "cmd", "==", "\"file\"", ":", "print", "(", "config", ".", "config_file", ")", "elif", "cmd", "==", "\"show\"", ":", "with", "open", "(", "config", ".", "config_file", ")", "as", "f", ":", "print", "(", "f", ".", "read", "(", ")", ")", "elif", "cmd", "==", "\"generate\"", ":", "fname", "=", "os", ".", "path", ".", "join", "(", "user_config_dir", "(", "\"genomepy\"", ")", ",", "\"{}.yaml\"", ".", "format", "(", "\"genomepy\"", ")", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "user_config_dir", "(", "\"genomepy\"", ")", ")", ":", "os", ".", "makedirs", "(", "user_config_dir", "(", "\"genomepy\"", ")", ")", "with", "open", "(", "fname", ",", "\"w\"", ")", "as", "fout", ":", "with", "open", "(", "config", ".", "config_file", ")", "as", "fin", ":", "fout", ".", "write", "(", "fin", ".", "read", "(", ")", ")", "print", "(", "\"Created config file {}\"", ".", "format", "(", "fname", ")", ")" ]
Manage genomepy config file.
[ "Manage", "genomepy", "config", "file", "." ]
abace2366511dbe855fe1430b1f7d9ec4cbf6d29
https://github.com/simonvh/genomepy/blob/abace2366511dbe855fe1430b1f7d9ec4cbf6d29/genomepy/functions.py#L26-L44
1,095
simonvh/genomepy
genomepy/functions.py
search
def search(term, provider=None): """ Search for a genome. If provider is specified, search only that specific provider, else search all providers. Both the name and description are used for the search. Seacrch term is case-insensitive. Parameters ---------- term : str Search term, case-insensitive. provider : str , optional Provider name Yields ------ tuple genome information (name/identfier and description) """ if provider: providers = [ProviderBase.create(provider)] else: # if provider is not specified search all providers providers = [ProviderBase.create(p) for p in ProviderBase.list_providers()] for p in providers: for row in p.search(term): yield [x.encode('latin-1') for x in [p.name] + list(row)]
python
def search(term, provider=None): """ Search for a genome. If provider is specified, search only that specific provider, else search all providers. Both the name and description are used for the search. Seacrch term is case-insensitive. Parameters ---------- term : str Search term, case-insensitive. provider : str , optional Provider name Yields ------ tuple genome information (name/identfier and description) """ if provider: providers = [ProviderBase.create(provider)] else: # if provider is not specified search all providers providers = [ProviderBase.create(p) for p in ProviderBase.list_providers()] for p in providers: for row in p.search(term): yield [x.encode('latin-1') for x in [p.name] + list(row)]
[ "def", "search", "(", "term", ",", "provider", "=", "None", ")", ":", "if", "provider", ":", "providers", "=", "[", "ProviderBase", ".", "create", "(", "provider", ")", "]", "else", ":", "# if provider is not specified search all providers", "providers", "=", "[", "ProviderBase", ".", "create", "(", "p", ")", "for", "p", "in", "ProviderBase", ".", "list_providers", "(", ")", "]", "for", "p", "in", "providers", ":", "for", "row", "in", "p", ".", "search", "(", "term", ")", ":", "yield", "[", "x", ".", "encode", "(", "'latin-1'", ")", "for", "x", "in", "[", "p", ".", "name", "]", "+", "list", "(", "row", ")", "]" ]
Search for a genome. If provider is specified, search only that specific provider, else search all providers. Both the name and description are used for the search. Seacrch term is case-insensitive. Parameters ---------- term : str Search term, case-insensitive. provider : str , optional Provider name Yields ------ tuple genome information (name/identfier and description)
[ "Search", "for", "a", "genome", "." ]
abace2366511dbe855fe1430b1f7d9ec4cbf6d29
https://github.com/simonvh/genomepy/blob/abace2366511dbe855fe1430b1f7d9ec4cbf6d29/genomepy/functions.py#L117-L147
1,096
simonvh/genomepy
genomepy/functions.py
install_genome
def install_genome(name, provider, version=None, genome_dir=None, localname=None, mask="soft", regex=None, invert_match=False, annotation=False): """ Install a genome. Parameters ---------- name : str Genome name provider : str Provider name version : str Version (only for Ensembl) genome_dir : str , optional Where to store the fasta files localname : str , optional Custom name for this genome. mask : str , optional Default is 'soft', specify 'hard' for hard masking. regex : str , optional Regular expression to select specific chromosome / scaffold names. invert_match : bool , optional Set to True to select all chromosomes that don't match the regex. annotation : bool , optional If set to True, download gene annotation in BED and GTF format. """ if not genome_dir: genome_dir = config.get("genome_dir", None) if not genome_dir: raise norns.exceptions.ConfigError("Please provide or configure a genome_dir") genome_dir = os.path.expanduser(genome_dir) localname = get_localname(name, localname) # Download genome from provider p = ProviderBase.create(provider) p.download_genome( name, genome_dir, version=version, mask=mask, localname=localname, regex=regex, invert_match=invert_match) if annotation: # Download annotation from provider p.download_annotation(name, genome_dir, localname=localname, version=version) g = Genome(localname, genome_dir=genome_dir) for plugin in get_active_plugins(): plugin.after_genome_download(g) generate_env()
python
def install_genome(name, provider, version=None, genome_dir=None, localname=None, mask="soft", regex=None, invert_match=False, annotation=False): """ Install a genome. Parameters ---------- name : str Genome name provider : str Provider name version : str Version (only for Ensembl) genome_dir : str , optional Where to store the fasta files localname : str , optional Custom name for this genome. mask : str , optional Default is 'soft', specify 'hard' for hard masking. regex : str , optional Regular expression to select specific chromosome / scaffold names. invert_match : bool , optional Set to True to select all chromosomes that don't match the regex. annotation : bool , optional If set to True, download gene annotation in BED and GTF format. """ if not genome_dir: genome_dir = config.get("genome_dir", None) if not genome_dir: raise norns.exceptions.ConfigError("Please provide or configure a genome_dir") genome_dir = os.path.expanduser(genome_dir) localname = get_localname(name, localname) # Download genome from provider p = ProviderBase.create(provider) p.download_genome( name, genome_dir, version=version, mask=mask, localname=localname, regex=regex, invert_match=invert_match) if annotation: # Download annotation from provider p.download_annotation(name, genome_dir, localname=localname, version=version) g = Genome(localname, genome_dir=genome_dir) for plugin in get_active_plugins(): plugin.after_genome_download(g) generate_env()
[ "def", "install_genome", "(", "name", ",", "provider", ",", "version", "=", "None", ",", "genome_dir", "=", "None", ",", "localname", "=", "None", ",", "mask", "=", "\"soft\"", ",", "regex", "=", "None", ",", "invert_match", "=", "False", ",", "annotation", "=", "False", ")", ":", "if", "not", "genome_dir", ":", "genome_dir", "=", "config", ".", "get", "(", "\"genome_dir\"", ",", "None", ")", "if", "not", "genome_dir", ":", "raise", "norns", ".", "exceptions", ".", "ConfigError", "(", "\"Please provide or configure a genome_dir\"", ")", "genome_dir", "=", "os", ".", "path", ".", "expanduser", "(", "genome_dir", ")", "localname", "=", "get_localname", "(", "name", ",", "localname", ")", "# Download genome from provider", "p", "=", "ProviderBase", ".", "create", "(", "provider", ")", "p", ".", "download_genome", "(", "name", ",", "genome_dir", ",", "version", "=", "version", ",", "mask", "=", "mask", ",", "localname", "=", "localname", ",", "regex", "=", "regex", ",", "invert_match", "=", "invert_match", ")", "if", "annotation", ":", "# Download annotation from provider", "p", ".", "download_annotation", "(", "name", ",", "genome_dir", ",", "localname", "=", "localname", ",", "version", "=", "version", ")", "g", "=", "Genome", "(", "localname", ",", "genome_dir", "=", "genome_dir", ")", "for", "plugin", "in", "get_active_plugins", "(", ")", ":", "plugin", ".", "after_genome_download", "(", "g", ")", "generate_env", "(", ")" ]
Install a genome. Parameters ---------- name : str Genome name provider : str Provider name version : str Version (only for Ensembl) genome_dir : str , optional Where to store the fasta files localname : str , optional Custom name for this genome. mask : str , optional Default is 'soft', specify 'hard' for hard masking. regex : str , optional Regular expression to select specific chromosome / scaffold names. invert_match : bool , optional Set to True to select all chromosomes that don't match the regex. annotation : bool , optional If set to True, download gene annotation in BED and GTF format.
[ "Install", "a", "genome", "." ]
abace2366511dbe855fe1430b1f7d9ec4cbf6d29
https://github.com/simonvh/genomepy/blob/abace2366511dbe855fe1430b1f7d9ec4cbf6d29/genomepy/functions.py#L149-L209
1,097
simonvh/genomepy
genomepy/functions.py
generate_exports
def generate_exports(): """Print export commands for setting environment variables. """ env = [] for name in list_installed_genomes(): try: g = Genome(name) env_name = re.sub(r'[^\w]+', "_", name).upper() env.append("export {}={}".format(env_name, g.filename)) except: pass return env
python
def generate_exports(): """Print export commands for setting environment variables. """ env = [] for name in list_installed_genomes(): try: g = Genome(name) env_name = re.sub(r'[^\w]+', "_", name).upper() env.append("export {}={}".format(env_name, g.filename)) except: pass return env
[ "def", "generate_exports", "(", ")", ":", "env", "=", "[", "]", "for", "name", "in", "list_installed_genomes", "(", ")", ":", "try", ":", "g", "=", "Genome", "(", "name", ")", "env_name", "=", "re", ".", "sub", "(", "r'[^\\w]+'", ",", "\"_\"", ",", "name", ")", ".", "upper", "(", ")", "env", ".", "append", "(", "\"export {}={}\"", ".", "format", "(", "env_name", ",", "g", ".", "filename", ")", ")", "except", ":", "pass", "return", "env" ]
Print export commands for setting environment variables.
[ "Print", "export", "commands", "for", "setting", "environment", "variables", "." ]
abace2366511dbe855fe1430b1f7d9ec4cbf6d29
https://github.com/simonvh/genomepy/blob/abace2366511dbe855fe1430b1f7d9ec4cbf6d29/genomepy/functions.py#L238-L249
1,098
simonvh/genomepy
genomepy/functions.py
generate_env
def generate_env(fname=None): """Generate file with exports. By default this is in .config/genomepy/exports.txt. Parameters ---------- fname: strs, optional Name of the output file. """ config_dir = user_config_dir("genomepy") if os.path.exists(config_dir): fname = os.path.join(config_dir, "exports.txt") with open(fname, "w") as fout: for env in generate_exports(): fout.write("{}\n".format(env))
python
def generate_env(fname=None): """Generate file with exports. By default this is in .config/genomepy/exports.txt. Parameters ---------- fname: strs, optional Name of the output file. """ config_dir = user_config_dir("genomepy") if os.path.exists(config_dir): fname = os.path.join(config_dir, "exports.txt") with open(fname, "w") as fout: for env in generate_exports(): fout.write("{}\n".format(env))
[ "def", "generate_env", "(", "fname", "=", "None", ")", ":", "config_dir", "=", "user_config_dir", "(", "\"genomepy\"", ")", "if", "os", ".", "path", ".", "exists", "(", "config_dir", ")", ":", "fname", "=", "os", ".", "path", ".", "join", "(", "config_dir", ",", "\"exports.txt\"", ")", "with", "open", "(", "fname", ",", "\"w\"", ")", "as", "fout", ":", "for", "env", "in", "generate_exports", "(", ")", ":", "fout", ".", "write", "(", "\"{}\\n\"", ".", "format", "(", "env", ")", ")" ]
Generate file with exports. By default this is in .config/genomepy/exports.txt. Parameters ---------- fname: strs, optional Name of the output file.
[ "Generate", "file", "with", "exports", "." ]
abace2366511dbe855fe1430b1f7d9ec4cbf6d29
https://github.com/simonvh/genomepy/blob/abace2366511dbe855fe1430b1f7d9ec4cbf6d29/genomepy/functions.py#L251-L266
1,099
simonvh/genomepy
genomepy/functions.py
manage_plugins
def manage_plugins(command, plugin_names=None): """Enable or disable plugins. """ if plugin_names is None: plugin_names = [] active_plugins = config.get("plugin", []) plugins = init_plugins() if command == "enable": for name in plugin_names: if name not in plugins: raise ValueError("Unknown plugin: {}".format(name)) if name not in active_plugins: active_plugins.append(name) elif command == "disable": for name in plugin_names: if name in active_plugins: active_plugins.remove(name) elif command == "list": print("{:20}{}".format("plugin", "enabled")) for plugin in sorted(plugins): print("{:20}{}".format(plugin, {False:"", True:"*"}[plugin in active_plugins])) else: raise ValueError("Invalid plugin command") config["plugin"] = active_plugins config.save() if command in ["enable", "disable"]: print("Enabled plugins: {}".format(", ".join(sorted(active_plugins))))
python
def manage_plugins(command, plugin_names=None): """Enable or disable plugins. """ if plugin_names is None: plugin_names = [] active_plugins = config.get("plugin", []) plugins = init_plugins() if command == "enable": for name in plugin_names: if name not in plugins: raise ValueError("Unknown plugin: {}".format(name)) if name not in active_plugins: active_plugins.append(name) elif command == "disable": for name in plugin_names: if name in active_plugins: active_plugins.remove(name) elif command == "list": print("{:20}{}".format("plugin", "enabled")) for plugin in sorted(plugins): print("{:20}{}".format(plugin, {False:"", True:"*"}[plugin in active_plugins])) else: raise ValueError("Invalid plugin command") config["plugin"] = active_plugins config.save() if command in ["enable", "disable"]: print("Enabled plugins: {}".format(", ".join(sorted(active_plugins))))
[ "def", "manage_plugins", "(", "command", ",", "plugin_names", "=", "None", ")", ":", "if", "plugin_names", "is", "None", ":", "plugin_names", "=", "[", "]", "active_plugins", "=", "config", ".", "get", "(", "\"plugin\"", ",", "[", "]", ")", "plugins", "=", "init_plugins", "(", ")", "if", "command", "==", "\"enable\"", ":", "for", "name", "in", "plugin_names", ":", "if", "name", "not", "in", "plugins", ":", "raise", "ValueError", "(", "\"Unknown plugin: {}\"", ".", "format", "(", "name", ")", ")", "if", "name", "not", "in", "active_plugins", ":", "active_plugins", ".", "append", "(", "name", ")", "elif", "command", "==", "\"disable\"", ":", "for", "name", "in", "plugin_names", ":", "if", "name", "in", "active_plugins", ":", "active_plugins", ".", "remove", "(", "name", ")", "elif", "command", "==", "\"list\"", ":", "print", "(", "\"{:20}{}\"", ".", "format", "(", "\"plugin\"", ",", "\"enabled\"", ")", ")", "for", "plugin", "in", "sorted", "(", "plugins", ")", ":", "print", "(", "\"{:20}{}\"", ".", "format", "(", "plugin", ",", "{", "False", ":", "\"\"", ",", "True", ":", "\"*\"", "}", "[", "plugin", "in", "active_plugins", "]", ")", ")", "else", ":", "raise", "ValueError", "(", "\"Invalid plugin command\"", ")", "config", "[", "\"plugin\"", "]", "=", "active_plugins", "config", ".", "save", "(", ")", "if", "command", "in", "[", "\"enable\"", ",", "\"disable\"", "]", ":", "print", "(", "\"Enabled plugins: {}\"", ".", "format", "(", "\", \"", ".", "join", "(", "sorted", "(", "active_plugins", ")", ")", ")", ")" ]
Enable or disable plugins.
[ "Enable", "or", "disable", "plugins", "." ]
abace2366511dbe855fe1430b1f7d9ec4cbf6d29
https://github.com/simonvh/genomepy/blob/abace2366511dbe855fe1430b1f7d9ec4cbf6d29/genomepy/functions.py#L514-L541