id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
51
19.8k
code_tokens
sequence
docstring
stringlengths
3
17.3k
docstring_tokens
sequence
sha
stringlengths
40
40
url
stringlengths
87
242
2,300
ergoithz/browsepy
browsepy/file.py
generic_filename
def generic_filename(path): ''' Extract filename of given path os-indepently, taking care of known path separators. :param path: path :return: filename :rtype: str or unicode (depending on given path) ''' for sep in common_path_separators: if sep in path: _, path = path.rsplit(sep, 1) return path
python
def generic_filename(path): ''' Extract filename of given path os-indepently, taking care of known path separators. :param path: path :return: filename :rtype: str or unicode (depending on given path) ''' for sep in common_path_separators: if sep in path: _, path = path.rsplit(sep, 1) return path
[ "def", "generic_filename", "(", "path", ")", ":", "for", "sep", "in", "common_path_separators", ":", "if", "sep", "in", "path", ":", "_", ",", "path", "=", "path", ".", "rsplit", "(", "sep", ",", "1", ")", "return", "path" ]
Extract filename of given path os-indepently, taking care of known path separators. :param path: path :return: filename :rtype: str or unicode (depending on given path)
[ "Extract", "filename", "of", "given", "path", "os", "-", "indepently", "taking", "care", "of", "known", "path", "separators", "." ]
1612a930ef220fae507e1b152c531707e555bd92
https://github.com/ergoithz/browsepy/blob/1612a930ef220fae507e1b152c531707e555bd92/browsepy/file.py#L799-L812
2,301
ergoithz/browsepy
browsepy/file.py
clean_restricted_chars
def clean_restricted_chars(path, restricted_chars=restricted_chars): ''' Get path without restricted characters. :param path: path :return: path without restricted characters :rtype: str or unicode (depending on given path) ''' for character in restricted_chars: path = path.replace(character, '_') return path
python
def clean_restricted_chars(path, restricted_chars=restricted_chars): ''' Get path without restricted characters. :param path: path :return: path without restricted characters :rtype: str or unicode (depending on given path) ''' for character in restricted_chars: path = path.replace(character, '_') return path
[ "def", "clean_restricted_chars", "(", "path", ",", "restricted_chars", "=", "restricted_chars", ")", ":", "for", "character", "in", "restricted_chars", ":", "path", "=", "path", ".", "replace", "(", "character", ",", "'_'", ")", "return", "path" ]
Get path without restricted characters. :param path: path :return: path without restricted characters :rtype: str or unicode (depending on given path)
[ "Get", "path", "without", "restricted", "characters", "." ]
1612a930ef220fae507e1b152c531707e555bd92
https://github.com/ergoithz/browsepy/blob/1612a930ef220fae507e1b152c531707e555bd92/browsepy/file.py#L815-L825
2,302
ergoithz/browsepy
browsepy/file.py
check_forbidden_filename
def check_forbidden_filename(filename, destiny_os=os.name, restricted_names=restricted_names): ''' Get if given filename is forbidden for current OS or filesystem. :param filename: :param destiny_os: destination operative system :param fs_encoding: destination filesystem filename encoding :return: wether is forbidden on given OS (or filesystem) or not :rtype: bool ''' return ( filename in restricted_names or destiny_os == 'nt' and filename.split('.', 1)[0].upper() in nt_device_names )
python
def check_forbidden_filename(filename, destiny_os=os.name, restricted_names=restricted_names): ''' Get if given filename is forbidden for current OS or filesystem. :param filename: :param destiny_os: destination operative system :param fs_encoding: destination filesystem filename encoding :return: wether is forbidden on given OS (or filesystem) or not :rtype: bool ''' return ( filename in restricted_names or destiny_os == 'nt' and filename.split('.', 1)[0].upper() in nt_device_names )
[ "def", "check_forbidden_filename", "(", "filename", ",", "destiny_os", "=", "os", ".", "name", ",", "restricted_names", "=", "restricted_names", ")", ":", "return", "(", "filename", "in", "restricted_names", "or", "destiny_os", "==", "'nt'", "and", "filename", ".", "split", "(", "'.'", ",", "1", ")", "[", "0", "]", ".", "upper", "(", ")", "in", "nt_device_names", ")" ]
Get if given filename is forbidden for current OS or filesystem. :param filename: :param destiny_os: destination operative system :param fs_encoding: destination filesystem filename encoding :return: wether is forbidden on given OS (or filesystem) or not :rtype: bool
[ "Get", "if", "given", "filename", "is", "forbidden", "for", "current", "OS", "or", "filesystem", "." ]
1612a930ef220fae507e1b152c531707e555bd92
https://github.com/ergoithz/browsepy/blob/1612a930ef220fae507e1b152c531707e555bd92/browsepy/file.py#L828-L844
2,303
ergoithz/browsepy
browsepy/file.py
check_path
def check_path(path, base, os_sep=os.sep): ''' Check if both given paths are equal. :param path: absolute path :type path: str :param base: absolute base path :type base: str :param os_sep: path separator, defaults to os.sep :type base: str :return: wether two path are equal or not :rtype: bool ''' base = base[:-len(os_sep)] if base.endswith(os_sep) else base return os.path.normcase(path) == os.path.normcase(base)
python
def check_path(path, base, os_sep=os.sep): ''' Check if both given paths are equal. :param path: absolute path :type path: str :param base: absolute base path :type base: str :param os_sep: path separator, defaults to os.sep :type base: str :return: wether two path are equal or not :rtype: bool ''' base = base[:-len(os_sep)] if base.endswith(os_sep) else base return os.path.normcase(path) == os.path.normcase(base)
[ "def", "check_path", "(", "path", ",", "base", ",", "os_sep", "=", "os", ".", "sep", ")", ":", "base", "=", "base", "[", ":", "-", "len", "(", "os_sep", ")", "]", "if", "base", ".", "endswith", "(", "os_sep", ")", "else", "base", "return", "os", ".", "path", ".", "normcase", "(", "path", ")", "==", "os", ".", "path", ".", "normcase", "(", "base", ")" ]
Check if both given paths are equal. :param path: absolute path :type path: str :param base: absolute base path :type base: str :param os_sep: path separator, defaults to os.sep :type base: str :return: wether two path are equal or not :rtype: bool
[ "Check", "if", "both", "given", "paths", "are", "equal", "." ]
1612a930ef220fae507e1b152c531707e555bd92
https://github.com/ergoithz/browsepy/blob/1612a930ef220fae507e1b152c531707e555bd92/browsepy/file.py#L847-L861
2,304
ergoithz/browsepy
browsepy/file.py
check_base
def check_base(path, base, os_sep=os.sep): ''' Check if given absolute path is under or given base. :param path: absolute path :type path: str :param base: absolute base path :type base: str :param os_sep: path separator, defaults to os.sep :return: wether path is under given base or not :rtype: bool ''' return ( check_path(path, base, os_sep) or check_under_base(path, base, os_sep) )
python
def check_base(path, base, os_sep=os.sep): ''' Check if given absolute path is under or given base. :param path: absolute path :type path: str :param base: absolute base path :type base: str :param os_sep: path separator, defaults to os.sep :return: wether path is under given base or not :rtype: bool ''' return ( check_path(path, base, os_sep) or check_under_base(path, base, os_sep) )
[ "def", "check_base", "(", "path", ",", "base", ",", "os_sep", "=", "os", ".", "sep", ")", ":", "return", "(", "check_path", "(", "path", ",", "base", ",", "os_sep", ")", "or", "check_under_base", "(", "path", ",", "base", ",", "os_sep", ")", ")" ]
Check if given absolute path is under or given base. :param path: absolute path :type path: str :param base: absolute base path :type base: str :param os_sep: path separator, defaults to os.sep :return: wether path is under given base or not :rtype: bool
[ "Check", "if", "given", "absolute", "path", "is", "under", "or", "given", "base", "." ]
1612a930ef220fae507e1b152c531707e555bd92
https://github.com/ergoithz/browsepy/blob/1612a930ef220fae507e1b152c531707e555bd92/browsepy/file.py#L864-L879
2,305
ergoithz/browsepy
browsepy/file.py
check_under_base
def check_under_base(path, base, os_sep=os.sep): ''' Check if given absolute path is under given base. :param path: absolute path :type path: str :param base: absolute base path :type base: str :param os_sep: path separator, defaults to os.sep :return: wether file is under given base or not :rtype: bool ''' prefix = base if base.endswith(os_sep) else base + os_sep return os.path.normcase(path).startswith(os.path.normcase(prefix))
python
def check_under_base(path, base, os_sep=os.sep): ''' Check if given absolute path is under given base. :param path: absolute path :type path: str :param base: absolute base path :type base: str :param os_sep: path separator, defaults to os.sep :return: wether file is under given base or not :rtype: bool ''' prefix = base if base.endswith(os_sep) else base + os_sep return os.path.normcase(path).startswith(os.path.normcase(prefix))
[ "def", "check_under_base", "(", "path", ",", "base", ",", "os_sep", "=", "os", ".", "sep", ")", ":", "prefix", "=", "base", "if", "base", ".", "endswith", "(", "os_sep", ")", "else", "base", "+", "os_sep", "return", "os", ".", "path", ".", "normcase", "(", "path", ")", ".", "startswith", "(", "os", ".", "path", ".", "normcase", "(", "prefix", ")", ")" ]
Check if given absolute path is under given base. :param path: absolute path :type path: str :param base: absolute base path :type base: str :param os_sep: path separator, defaults to os.sep :return: wether file is under given base or not :rtype: bool
[ "Check", "if", "given", "absolute", "path", "is", "under", "given", "base", "." ]
1612a930ef220fae507e1b152c531707e555bd92
https://github.com/ergoithz/browsepy/blob/1612a930ef220fae507e1b152c531707e555bd92/browsepy/file.py#L882-L895
2,306
ergoithz/browsepy
browsepy/file.py
secure_filename
def secure_filename(path, destiny_os=os.name, fs_encoding=compat.FS_ENCODING): ''' Get rid of parent path components and special filenames. If path is invalid or protected, return empty string. :param path: unsafe path, only basename will be used :type: str :param destiny_os: destination operative system (defaults to os.name) :type destiny_os: str :param fs_encoding: fs path encoding (defaults to detected) :type fs_encoding: str :return: filename or empty string :rtype: str ''' path = generic_filename(path) path = clean_restricted_chars( path, restricted_chars=( nt_restricted_chars if destiny_os == 'nt' else restricted_chars )) path = path.strip(' .') # required by nt, recommended for others if check_forbidden_filename(path, destiny_os=destiny_os): return '' if isinstance(path, bytes): path = path.decode('latin-1', errors=underscore_replace) # Decode and recover from filesystem encoding in order to strip unwanted # characters out kwargs = { 'os_name': destiny_os, 'fs_encoding': fs_encoding, 'errors': underscore_replace, } fs_encoded_path = compat.fsencode(path, **kwargs) fs_decoded_path = compat.fsdecode(fs_encoded_path, **kwargs) return fs_decoded_path
python
def secure_filename(path, destiny_os=os.name, fs_encoding=compat.FS_ENCODING): ''' Get rid of parent path components and special filenames. If path is invalid or protected, return empty string. :param path: unsafe path, only basename will be used :type: str :param destiny_os: destination operative system (defaults to os.name) :type destiny_os: str :param fs_encoding: fs path encoding (defaults to detected) :type fs_encoding: str :return: filename or empty string :rtype: str ''' path = generic_filename(path) path = clean_restricted_chars( path, restricted_chars=( nt_restricted_chars if destiny_os == 'nt' else restricted_chars )) path = path.strip(' .') # required by nt, recommended for others if check_forbidden_filename(path, destiny_os=destiny_os): return '' if isinstance(path, bytes): path = path.decode('latin-1', errors=underscore_replace) # Decode and recover from filesystem encoding in order to strip unwanted # characters out kwargs = { 'os_name': destiny_os, 'fs_encoding': fs_encoding, 'errors': underscore_replace, } fs_encoded_path = compat.fsencode(path, **kwargs) fs_decoded_path = compat.fsdecode(fs_encoded_path, **kwargs) return fs_decoded_path
[ "def", "secure_filename", "(", "path", ",", "destiny_os", "=", "os", ".", "name", ",", "fs_encoding", "=", "compat", ".", "FS_ENCODING", ")", ":", "path", "=", "generic_filename", "(", "path", ")", "path", "=", "clean_restricted_chars", "(", "path", ",", "restricted_chars", "=", "(", "nt_restricted_chars", "if", "destiny_os", "==", "'nt'", "else", "restricted_chars", ")", ")", "path", "=", "path", ".", "strip", "(", "' .'", ")", "# required by nt, recommended for others", "if", "check_forbidden_filename", "(", "path", ",", "destiny_os", "=", "destiny_os", ")", ":", "return", "''", "if", "isinstance", "(", "path", ",", "bytes", ")", ":", "path", "=", "path", ".", "decode", "(", "'latin-1'", ",", "errors", "=", "underscore_replace", ")", "# Decode and recover from filesystem encoding in order to strip unwanted", "# characters out", "kwargs", "=", "{", "'os_name'", ":", "destiny_os", ",", "'fs_encoding'", ":", "fs_encoding", ",", "'errors'", ":", "underscore_replace", ",", "}", "fs_encoded_path", "=", "compat", ".", "fsencode", "(", "path", ",", "*", "*", "kwargs", ")", "fs_decoded_path", "=", "compat", ".", "fsdecode", "(", "fs_encoded_path", ",", "*", "*", "kwargs", ")", "return", "fs_decoded_path" ]
Get rid of parent path components and special filenames. If path is invalid or protected, return empty string. :param path: unsafe path, only basename will be used :type: str :param destiny_os: destination operative system (defaults to os.name) :type destiny_os: str :param fs_encoding: fs path encoding (defaults to detected) :type fs_encoding: str :return: filename or empty string :rtype: str
[ "Get", "rid", "of", "parent", "path", "components", "and", "special", "filenames", "." ]
1612a930ef220fae507e1b152c531707e555bd92
https://github.com/ergoithz/browsepy/blob/1612a930ef220fae507e1b152c531707e555bd92/browsepy/file.py#L898-L938
2,307
ergoithz/browsepy
browsepy/file.py
alternative_filename
def alternative_filename(filename, attempt=None): ''' Generates an alternative version of given filename. If an number attempt parameter is given, will be used on the alternative name, a random value will be used otherwise. :param filename: original filename :param attempt: optional attempt number, defaults to null :return: new filename :rtype: str or unicode ''' filename_parts = filename.rsplit(u'.', 2) name = filename_parts[0] ext = ''.join(u'.%s' % ext for ext in filename_parts[1:]) if attempt is None: choose = random.choice extra = u' %s' % ''.join(choose(fs_safe_characters) for i in range(8)) else: extra = u' (%d)' % attempt return u'%s%s%s' % (name, extra, ext)
python
def alternative_filename(filename, attempt=None): ''' Generates an alternative version of given filename. If an number attempt parameter is given, will be used on the alternative name, a random value will be used otherwise. :param filename: original filename :param attempt: optional attempt number, defaults to null :return: new filename :rtype: str or unicode ''' filename_parts = filename.rsplit(u'.', 2) name = filename_parts[0] ext = ''.join(u'.%s' % ext for ext in filename_parts[1:]) if attempt is None: choose = random.choice extra = u' %s' % ''.join(choose(fs_safe_characters) for i in range(8)) else: extra = u' (%d)' % attempt return u'%s%s%s' % (name, extra, ext)
[ "def", "alternative_filename", "(", "filename", ",", "attempt", "=", "None", ")", ":", "filename_parts", "=", "filename", ".", "rsplit", "(", "u'.'", ",", "2", ")", "name", "=", "filename_parts", "[", "0", "]", "ext", "=", "''", ".", "join", "(", "u'.%s'", "%", "ext", "for", "ext", "in", "filename_parts", "[", "1", ":", "]", ")", "if", "attempt", "is", "None", ":", "choose", "=", "random", ".", "choice", "extra", "=", "u' %s'", "%", "''", ".", "join", "(", "choose", "(", "fs_safe_characters", ")", "for", "i", "in", "range", "(", "8", ")", ")", "else", ":", "extra", "=", "u' (%d)'", "%", "attempt", "return", "u'%s%s%s'", "%", "(", "name", ",", "extra", ",", "ext", ")" ]
Generates an alternative version of given filename. If an number attempt parameter is given, will be used on the alternative name, a random value will be used otherwise. :param filename: original filename :param attempt: optional attempt number, defaults to null :return: new filename :rtype: str or unicode
[ "Generates", "an", "alternative", "version", "of", "given", "filename", "." ]
1612a930ef220fae507e1b152c531707e555bd92
https://github.com/ergoithz/browsepy/blob/1612a930ef220fae507e1b152c531707e555bd92/browsepy/file.py#L941-L961
2,308
ergoithz/browsepy
browsepy/file.py
scandir
def scandir(path, app=None): ''' Config-aware scandir. Currently, only aware of ``exclude_fnc``. :param path: absolute path :type path: str :param app: flask application :type app: flask.Flask or None :returns: filtered scandir entries :rtype: iterator ''' exclude = app and app.config.get('exclude_fnc') if exclude: return ( item for item in compat.scandir(path) if not exclude(item.path) ) return compat.scandir(path)
python
def scandir(path, app=None): ''' Config-aware scandir. Currently, only aware of ``exclude_fnc``. :param path: absolute path :type path: str :param app: flask application :type app: flask.Flask or None :returns: filtered scandir entries :rtype: iterator ''' exclude = app and app.config.get('exclude_fnc') if exclude: return ( item for item in compat.scandir(path) if not exclude(item.path) ) return compat.scandir(path)
[ "def", "scandir", "(", "path", ",", "app", "=", "None", ")", ":", "exclude", "=", "app", "and", "app", ".", "config", ".", "get", "(", "'exclude_fnc'", ")", "if", "exclude", ":", "return", "(", "item", "for", "item", "in", "compat", ".", "scandir", "(", "path", ")", "if", "not", "exclude", "(", "item", ".", "path", ")", ")", "return", "compat", ".", "scandir", "(", "path", ")" ]
Config-aware scandir. Currently, only aware of ``exclude_fnc``. :param path: absolute path :type path: str :param app: flask application :type app: flask.Flask or None :returns: filtered scandir entries :rtype: iterator
[ "Config", "-", "aware", "scandir", ".", "Currently", "only", "aware", "of", "exclude_fnc", "." ]
1612a930ef220fae507e1b152c531707e555bd92
https://github.com/ergoithz/browsepy/blob/1612a930ef220fae507e1b152c531707e555bd92/browsepy/file.py#L964-L982
2,309
ergoithz/browsepy
browsepy/file.py
Node.link
def link(self): ''' Get last widget with place "entry-link". :returns: widget on entry-link (ideally a link one) :rtype: namedtuple instance ''' link = None for widget in self.widgets: if widget.place == 'entry-link': link = widget return link
python
def link(self): ''' Get last widget with place "entry-link". :returns: widget on entry-link (ideally a link one) :rtype: namedtuple instance ''' link = None for widget in self.widgets: if widget.place == 'entry-link': link = widget return link
[ "def", "link", "(", "self", ")", ":", "link", "=", "None", "for", "widget", "in", "self", ".", "widgets", ":", "if", "widget", ".", "place", "==", "'entry-link'", ":", "link", "=", "widget", "return", "link" ]
Get last widget with place "entry-link". :returns: widget on entry-link (ideally a link one) :rtype: namedtuple instance
[ "Get", "last", "widget", "with", "place", "entry", "-", "link", "." ]
1612a930ef220fae507e1b152c531707e555bd92
https://github.com/ergoithz/browsepy/blob/1612a930ef220fae507e1b152c531707e555bd92/browsepy/file.py#L111-L122
2,310
ergoithz/browsepy
browsepy/file.py
Node.can_remove
def can_remove(self): ''' Get if current node can be removed based on app config's directory_remove. :returns: True if current node can be removed, False otherwise. :rtype: bool ''' dirbase = self.app.config["directory_remove"] return bool(dirbase and check_under_base(self.path, dirbase))
python
def can_remove(self): ''' Get if current node can be removed based on app config's directory_remove. :returns: True if current node can be removed, False otherwise. :rtype: bool ''' dirbase = self.app.config["directory_remove"] return bool(dirbase and check_under_base(self.path, dirbase))
[ "def", "can_remove", "(", "self", ")", ":", "dirbase", "=", "self", ".", "app", ".", "config", "[", "\"directory_remove\"", "]", "return", "bool", "(", "dirbase", "and", "check_under_base", "(", "self", ".", "path", ",", "dirbase", ")", ")" ]
Get if current node can be removed based on app config's directory_remove. :returns: True if current node can be removed, False otherwise. :rtype: bool
[ "Get", "if", "current", "node", "can", "be", "removed", "based", "on", "app", "config", "s", "directory_remove", "." ]
1612a930ef220fae507e1b152c531707e555bd92
https://github.com/ergoithz/browsepy/blob/1612a930ef220fae507e1b152c531707e555bd92/browsepy/file.py#L125-L134
2,311
ergoithz/browsepy
browsepy/file.py
Node.parent
def parent(self): ''' Get parent node if available based on app config's directory_base. :returns: parent object if available :rtype: Node instance or None ''' if check_path(self.path, self.app.config['directory_base']): return None parent = os.path.dirname(self.path) if self.path else None return self.directory_class(parent, self.app) if parent else None
python
def parent(self): ''' Get parent node if available based on app config's directory_base. :returns: parent object if available :rtype: Node instance or None ''' if check_path(self.path, self.app.config['directory_base']): return None parent = os.path.dirname(self.path) if self.path else None return self.directory_class(parent, self.app) if parent else None
[ "def", "parent", "(", "self", ")", ":", "if", "check_path", "(", "self", ".", "path", ",", "self", ".", "app", ".", "config", "[", "'directory_base'", "]", ")", ":", "return", "None", "parent", "=", "os", ".", "path", ".", "dirname", "(", "self", ".", "path", ")", "if", "self", ".", "path", "else", "None", "return", "self", ".", "directory_class", "(", "parent", ",", "self", ".", "app", ")", "if", "parent", "else", "None" ]
Get parent node if available based on app config's directory_base. :returns: parent object if available :rtype: Node instance or None
[ "Get", "parent", "node", "if", "available", "based", "on", "app", "config", "s", "directory_base", "." ]
1612a930ef220fae507e1b152c531707e555bd92
https://github.com/ergoithz/browsepy/blob/1612a930ef220fae507e1b152c531707e555bd92/browsepy/file.py#L158-L168
2,312
ergoithz/browsepy
browsepy/file.py
Node.ancestors
def ancestors(self): ''' Get list of ancestors until app config's directory_base is reached. :returns: list of ancestors starting from nearest. :rtype: list of Node objects ''' ancestors = [] parent = self.parent while parent: ancestors.append(parent) parent = parent.parent return ancestors
python
def ancestors(self): ''' Get list of ancestors until app config's directory_base is reached. :returns: list of ancestors starting from nearest. :rtype: list of Node objects ''' ancestors = [] parent = self.parent while parent: ancestors.append(parent) parent = parent.parent return ancestors
[ "def", "ancestors", "(", "self", ")", ":", "ancestors", "=", "[", "]", "parent", "=", "self", ".", "parent", "while", "parent", ":", "ancestors", ".", "append", "(", "parent", ")", "parent", "=", "parent", ".", "parent", "return", "ancestors" ]
Get list of ancestors until app config's directory_base is reached. :returns: list of ancestors starting from nearest. :rtype: list of Node objects
[ "Get", "list", "of", "ancestors", "until", "app", "config", "s", "directory_base", "is", "reached", "." ]
1612a930ef220fae507e1b152c531707e555bd92
https://github.com/ergoithz/browsepy/blob/1612a930ef220fae507e1b152c531707e555bd92/browsepy/file.py#L171-L183
2,313
ergoithz/browsepy
browsepy/file.py
Node.modified
def modified(self): ''' Get human-readable last modification date-time. :returns: iso9008-like date-time string (without timezone) :rtype: str ''' try: dt = datetime.datetime.fromtimestamp(self.stats.st_mtime) return dt.strftime('%Y.%m.%d %H:%M:%S') except OSError: return None
python
def modified(self): ''' Get human-readable last modification date-time. :returns: iso9008-like date-time string (without timezone) :rtype: str ''' try: dt = datetime.datetime.fromtimestamp(self.stats.st_mtime) return dt.strftime('%Y.%m.%d %H:%M:%S') except OSError: return None
[ "def", "modified", "(", "self", ")", ":", "try", ":", "dt", "=", "datetime", ".", "datetime", ".", "fromtimestamp", "(", "self", ".", "stats", ".", "st_mtime", ")", "return", "dt", ".", "strftime", "(", "'%Y.%m.%d %H:%M:%S'", ")", "except", "OSError", ":", "return", "None" ]
Get human-readable last modification date-time. :returns: iso9008-like date-time string (without timezone) :rtype: str
[ "Get", "human", "-", "readable", "last", "modification", "date", "-", "time", "." ]
1612a930ef220fae507e1b152c531707e555bd92
https://github.com/ergoithz/browsepy/blob/1612a930ef220fae507e1b152c531707e555bd92/browsepy/file.py#L186-L197
2,314
ergoithz/browsepy
browsepy/file.py
Node.from_urlpath
def from_urlpath(cls, path, app=None): ''' Alternative constructor which accepts a path as taken from URL and uses the given app or the current app config to get the real path. If class has attribute `generic` set to True, `directory_class` or `file_class` will be used as type. :param path: relative path as from URL :param app: optional, flask application :return: file object pointing to path :rtype: File ''' app = app or current_app base = app.config['directory_base'] path = urlpath_to_abspath(path, base) if not cls.generic: kls = cls elif os.path.isdir(path): kls = cls.directory_class else: kls = cls.file_class return kls(path=path, app=app)
python
def from_urlpath(cls, path, app=None): ''' Alternative constructor which accepts a path as taken from URL and uses the given app or the current app config to get the real path. If class has attribute `generic` set to True, `directory_class` or `file_class` will be used as type. :param path: relative path as from URL :param app: optional, flask application :return: file object pointing to path :rtype: File ''' app = app or current_app base = app.config['directory_base'] path = urlpath_to_abspath(path, base) if not cls.generic: kls = cls elif os.path.isdir(path): kls = cls.directory_class else: kls = cls.file_class return kls(path=path, app=app)
[ "def", "from_urlpath", "(", "cls", ",", "path", ",", "app", "=", "None", ")", ":", "app", "=", "app", "or", "current_app", "base", "=", "app", ".", "config", "[", "'directory_base'", "]", "path", "=", "urlpath_to_abspath", "(", "path", ",", "base", ")", "if", "not", "cls", ".", "generic", ":", "kls", "=", "cls", "elif", "os", ".", "path", ".", "isdir", "(", "path", ")", ":", "kls", "=", "cls", ".", "directory_class", "else", ":", "kls", "=", "cls", ".", "file_class", "return", "kls", "(", "path", "=", "path", ",", "app", "=", "app", ")" ]
Alternative constructor which accepts a path as taken from URL and uses the given app or the current app config to get the real path. If class has attribute `generic` set to True, `directory_class` or `file_class` will be used as type. :param path: relative path as from URL :param app: optional, flask application :return: file object pointing to path :rtype: File
[ "Alternative", "constructor", "which", "accepts", "a", "path", "as", "taken", "from", "URL", "and", "uses", "the", "given", "app", "or", "the", "current", "app", "config", "to", "get", "the", "real", "path", "." ]
1612a930ef220fae507e1b152c531707e555bd92
https://github.com/ergoithz/browsepy/blob/1612a930ef220fae507e1b152c531707e555bd92/browsepy/file.py#L274-L296
2,315
brutasse/graphite-api
graphite_api/finders/whisper.py
WhisperFinder._find_paths
def _find_paths(self, current_dir, patterns): """Recursively generates absolute paths whose components underneath current_dir match the corresponding pattern in patterns""" pattern = patterns[0] patterns = patterns[1:] has_wildcard = is_pattern(pattern) using_globstar = pattern == "**" # This avoids os.listdir() for performance if has_wildcard: entries = [x.name for x in scandir(current_dir)] else: entries = [pattern] if using_globstar: matching_subdirs = map(lambda x: x[0], walk(current_dir)) else: subdirs = [e for e in entries if os.path.isdir(os.path.join(current_dir, e))] matching_subdirs = match_entries(subdirs, pattern) # For terminal globstar, add a pattern for all files in subdirs if using_globstar and not patterns: patterns = ['*'] if patterns: # we've still got more directories to traverse for subdir in matching_subdirs: absolute_path = os.path.join(current_dir, subdir) for match in self._find_paths(absolute_path, patterns): yield match else: # we've got the last pattern if not has_wildcard: entries = [pattern + '.wsp', pattern + '.wsp.gz'] files = [e for e in entries if os.path.isfile(os.path.join(current_dir, e))] matching_files = match_entries(files, pattern + '.*') for _basename in matching_files + matching_subdirs: yield os.path.join(current_dir, _basename)
python
def _find_paths(self, current_dir, patterns): pattern = patterns[0] patterns = patterns[1:] has_wildcard = is_pattern(pattern) using_globstar = pattern == "**" # This avoids os.listdir() for performance if has_wildcard: entries = [x.name for x in scandir(current_dir)] else: entries = [pattern] if using_globstar: matching_subdirs = map(lambda x: x[0], walk(current_dir)) else: subdirs = [e for e in entries if os.path.isdir(os.path.join(current_dir, e))] matching_subdirs = match_entries(subdirs, pattern) # For terminal globstar, add a pattern for all files in subdirs if using_globstar and not patterns: patterns = ['*'] if patterns: # we've still got more directories to traverse for subdir in matching_subdirs: absolute_path = os.path.join(current_dir, subdir) for match in self._find_paths(absolute_path, patterns): yield match else: # we've got the last pattern if not has_wildcard: entries = [pattern + '.wsp', pattern + '.wsp.gz'] files = [e for e in entries if os.path.isfile(os.path.join(current_dir, e))] matching_files = match_entries(files, pattern + '.*') for _basename in matching_files + matching_subdirs: yield os.path.join(current_dir, _basename)
[ "def", "_find_paths", "(", "self", ",", "current_dir", ",", "patterns", ")", ":", "pattern", "=", "patterns", "[", "0", "]", "patterns", "=", "patterns", "[", "1", ":", "]", "has_wildcard", "=", "is_pattern", "(", "pattern", ")", "using_globstar", "=", "pattern", "==", "\"**\"", "# This avoids os.listdir() for performance", "if", "has_wildcard", ":", "entries", "=", "[", "x", ".", "name", "for", "x", "in", "scandir", "(", "current_dir", ")", "]", "else", ":", "entries", "=", "[", "pattern", "]", "if", "using_globstar", ":", "matching_subdirs", "=", "map", "(", "lambda", "x", ":", "x", "[", "0", "]", ",", "walk", "(", "current_dir", ")", ")", "else", ":", "subdirs", "=", "[", "e", "for", "e", "in", "entries", "if", "os", ".", "path", ".", "isdir", "(", "os", ".", "path", ".", "join", "(", "current_dir", ",", "e", ")", ")", "]", "matching_subdirs", "=", "match_entries", "(", "subdirs", ",", "pattern", ")", "# For terminal globstar, add a pattern for all files in subdirs", "if", "using_globstar", "and", "not", "patterns", ":", "patterns", "=", "[", "'*'", "]", "if", "patterns", ":", "# we've still got more directories to traverse", "for", "subdir", "in", "matching_subdirs", ":", "absolute_path", "=", "os", ".", "path", ".", "join", "(", "current_dir", ",", "subdir", ")", "for", "match", "in", "self", ".", "_find_paths", "(", "absolute_path", ",", "patterns", ")", ":", "yield", "match", "else", ":", "# we've got the last pattern", "if", "not", "has_wildcard", ":", "entries", "=", "[", "pattern", "+", "'.wsp'", ",", "pattern", "+", "'.wsp.gz'", "]", "files", "=", "[", "e", "for", "e", "in", "entries", "if", "os", ".", "path", ".", "isfile", "(", "os", ".", "path", ".", "join", "(", "current_dir", ",", "e", ")", ")", "]", "matching_files", "=", "match_entries", "(", "files", ",", "pattern", "+", "'.*'", ")", "for", "_basename", "in", "matching_files", "+", "matching_subdirs", ":", "yield", "os", ".", "path", ".", "join", "(", "current_dir", ",", "_basename", ")" ]
Recursively generates absolute paths whose components underneath current_dir match the corresponding pattern in patterns
[ "Recursively", "generates", "absolute", "paths", "whose", "components", "underneath", "current_dir", "match", "the", "corresponding", "pattern", "in", "patterns" ]
0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff
https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/finders/whisper.py#L73-L113
2,316
brutasse/graphite-api
graphite_api/intervals.py
union_overlapping
def union_overlapping(intervals): """Union any overlapping intervals in the given set.""" disjoint_intervals = [] for interval in intervals: if disjoint_intervals and disjoint_intervals[-1].overlaps(interval): disjoint_intervals[-1] = disjoint_intervals[-1].union(interval) else: disjoint_intervals.append(interval) return disjoint_intervals
python
def union_overlapping(intervals): disjoint_intervals = [] for interval in intervals: if disjoint_intervals and disjoint_intervals[-1].overlaps(interval): disjoint_intervals[-1] = disjoint_intervals[-1].union(interval) else: disjoint_intervals.append(interval) return disjoint_intervals
[ "def", "union_overlapping", "(", "intervals", ")", ":", "disjoint_intervals", "=", "[", "]", "for", "interval", "in", "intervals", ":", "if", "disjoint_intervals", "and", "disjoint_intervals", "[", "-", "1", "]", ".", "overlaps", "(", "interval", ")", ":", "disjoint_intervals", "[", "-", "1", "]", "=", "disjoint_intervals", "[", "-", "1", "]", ".", "union", "(", "interval", ")", "else", ":", "disjoint_intervals", ".", "append", "(", "interval", ")", "return", "disjoint_intervals" ]
Union any overlapping intervals in the given set.
[ "Union", "any", "overlapping", "intervals", "in", "the", "given", "set", "." ]
0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff
https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/intervals.py#L128-L138
2,317
brutasse/graphite-api
graphite_api/app.py
recurse
def recurse(query, index): """ Recursively walk across paths, adding leaves to the index as they're found. """ for node in app.store.find(query): if node.is_leaf: index.add(node.path) else: recurse('{0}.*'.format(node.path), index)
python
def recurse(query, index): for node in app.store.find(query): if node.is_leaf: index.add(node.path) else: recurse('{0}.*'.format(node.path), index)
[ "def", "recurse", "(", "query", ",", "index", ")", ":", "for", "node", "in", "app", ".", "store", ".", "find", "(", "query", ")", ":", "if", "node", ".", "is_leaf", ":", "index", ".", "add", "(", "node", ".", "path", ")", "else", ":", "recurse", "(", "'{0}.*'", ".", "format", "(", "node", ".", "path", ")", ",", "index", ")" ]
Recursively walk across paths, adding leaves to the index as they're found.
[ "Recursively", "walk", "across", "paths", "adding", "leaves", "to", "the", "index", "as", "they", "re", "found", "." ]
0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff
https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/app.py#L204-L212
2,318
brutasse/graphite-api
graphite_api/_vendor/whisper.py
__archive_fetch
def __archive_fetch(fh, archive, fromTime, untilTime): """ Fetch data from a single archive. Note that checks for validity of the time period requested happen above this level so it's possible to wrap around the archive on a read and request data older than the archive's retention """ fromInterval = int( fromTime - (fromTime % archive['secondsPerPoint']) ) + archive['secondsPerPoint'] untilInterval = int( untilTime - (untilTime % archive['secondsPerPoint']) ) + archive['secondsPerPoint'] if fromInterval == untilInterval: # Zero-length time range: always include the next point untilInterval += archive['secondsPerPoint'] fh.seek(archive['offset']) packedPoint = fh.read(pointSize) (baseInterval,baseValue) = struct.unpack(pointFormat,packedPoint) if baseInterval == 0: step = archive['secondsPerPoint'] points = (untilInterval - fromInterval) // step timeInfo = (fromInterval,untilInterval,step) valueList = [None] * points return (timeInfo,valueList) #Determine fromOffset timeDistance = fromInterval - baseInterval pointDistance = timeDistance // archive['secondsPerPoint'] byteDistance = pointDistance * pointSize fromOffset = archive['offset'] + (byteDistance % archive['size']) #Determine untilOffset timeDistance = untilInterval - baseInterval pointDistance = timeDistance // archive['secondsPerPoint'] byteDistance = pointDistance * pointSize untilOffset = archive['offset'] + (byteDistance % archive['size']) #Read all the points in the interval fh.seek(fromOffset) if fromOffset < untilOffset: #If we don't wrap around the archive seriesString = fh.read(untilOffset - fromOffset) else: #We do wrap around the archive, so we need two reads archiveEnd = archive['offset'] + archive['size'] seriesString = fh.read(archiveEnd - fromOffset) fh.seek(archive['offset']) seriesString += fh.read(untilOffset - archive['offset']) #Now we unpack the series data we just read (anything faster than unpack?) byteOrder,pointTypes = pointFormat[0],pointFormat[1:] points = len(seriesString) // pointSize seriesFormat = byteOrder + (pointTypes * points) unpackedSeries = struct.unpack(seriesFormat, seriesString) #And finally we construct a list of values (optimize this!) valueList = [None] * points #pre-allocate entire list for speed currentInterval = fromInterval step = archive['secondsPerPoint'] for i in xrange(0,len(unpackedSeries),2): pointTime = unpackedSeries[i] if pointTime == currentInterval: pointValue = unpackedSeries[i+1] valueList[i//2] = pointValue #in-place reassignment is faster than append() currentInterval += step timeInfo = (fromInterval,untilInterval,step) return (timeInfo,valueList)
python
def __archive_fetch(fh, archive, fromTime, untilTime): fromInterval = int( fromTime - (fromTime % archive['secondsPerPoint']) ) + archive['secondsPerPoint'] untilInterval = int( untilTime - (untilTime % archive['secondsPerPoint']) ) + archive['secondsPerPoint'] if fromInterval == untilInterval: # Zero-length time range: always include the next point untilInterval += archive['secondsPerPoint'] fh.seek(archive['offset']) packedPoint = fh.read(pointSize) (baseInterval,baseValue) = struct.unpack(pointFormat,packedPoint) if baseInterval == 0: step = archive['secondsPerPoint'] points = (untilInterval - fromInterval) // step timeInfo = (fromInterval,untilInterval,step) valueList = [None] * points return (timeInfo,valueList) #Determine fromOffset timeDistance = fromInterval - baseInterval pointDistance = timeDistance // archive['secondsPerPoint'] byteDistance = pointDistance * pointSize fromOffset = archive['offset'] + (byteDistance % archive['size']) #Determine untilOffset timeDistance = untilInterval - baseInterval pointDistance = timeDistance // archive['secondsPerPoint'] byteDistance = pointDistance * pointSize untilOffset = archive['offset'] + (byteDistance % archive['size']) #Read all the points in the interval fh.seek(fromOffset) if fromOffset < untilOffset: #If we don't wrap around the archive seriesString = fh.read(untilOffset - fromOffset) else: #We do wrap around the archive, so we need two reads archiveEnd = archive['offset'] + archive['size'] seriesString = fh.read(archiveEnd - fromOffset) fh.seek(archive['offset']) seriesString += fh.read(untilOffset - archive['offset']) #Now we unpack the series data we just read (anything faster than unpack?) byteOrder,pointTypes = pointFormat[0],pointFormat[1:] points = len(seriesString) // pointSize seriesFormat = byteOrder + (pointTypes * points) unpackedSeries = struct.unpack(seriesFormat, seriesString) #And finally we construct a list of values (optimize this!) valueList = [None] * points #pre-allocate entire list for speed currentInterval = fromInterval step = archive['secondsPerPoint'] for i in xrange(0,len(unpackedSeries),2): pointTime = unpackedSeries[i] if pointTime == currentInterval: pointValue = unpackedSeries[i+1] valueList[i//2] = pointValue #in-place reassignment is faster than append() currentInterval += step timeInfo = (fromInterval,untilInterval,step) return (timeInfo,valueList)
[ "def", "__archive_fetch", "(", "fh", ",", "archive", ",", "fromTime", ",", "untilTime", ")", ":", "fromInterval", "=", "int", "(", "fromTime", "-", "(", "fromTime", "%", "archive", "[", "'secondsPerPoint'", "]", ")", ")", "+", "archive", "[", "'secondsPerPoint'", "]", "untilInterval", "=", "int", "(", "untilTime", "-", "(", "untilTime", "%", "archive", "[", "'secondsPerPoint'", "]", ")", ")", "+", "archive", "[", "'secondsPerPoint'", "]", "if", "fromInterval", "==", "untilInterval", ":", "# Zero-length time range: always include the next point", "untilInterval", "+=", "archive", "[", "'secondsPerPoint'", "]", "fh", ".", "seek", "(", "archive", "[", "'offset'", "]", ")", "packedPoint", "=", "fh", ".", "read", "(", "pointSize", ")", "(", "baseInterval", ",", "baseValue", ")", "=", "struct", ".", "unpack", "(", "pointFormat", ",", "packedPoint", ")", "if", "baseInterval", "==", "0", ":", "step", "=", "archive", "[", "'secondsPerPoint'", "]", "points", "=", "(", "untilInterval", "-", "fromInterval", ")", "//", "step", "timeInfo", "=", "(", "fromInterval", ",", "untilInterval", ",", "step", ")", "valueList", "=", "[", "None", "]", "*", "points", "return", "(", "timeInfo", ",", "valueList", ")", "#Determine fromOffset", "timeDistance", "=", "fromInterval", "-", "baseInterval", "pointDistance", "=", "timeDistance", "//", "archive", "[", "'secondsPerPoint'", "]", "byteDistance", "=", "pointDistance", "*", "pointSize", "fromOffset", "=", "archive", "[", "'offset'", "]", "+", "(", "byteDistance", "%", "archive", "[", "'size'", "]", ")", "#Determine untilOffset", "timeDistance", "=", "untilInterval", "-", "baseInterval", "pointDistance", "=", "timeDistance", "//", "archive", "[", "'secondsPerPoint'", "]", "byteDistance", "=", "pointDistance", "*", "pointSize", "untilOffset", "=", "archive", "[", "'offset'", "]", "+", "(", "byteDistance", "%", "archive", "[", "'size'", "]", ")", "#Read all the points in the interval", "fh", ".", "seek", "(", "fromOffset", ")", "if", "fromOffset", "<", "untilOffset", ":", "#If we don't wrap around the archive", "seriesString", "=", "fh", ".", "read", "(", "untilOffset", "-", "fromOffset", ")", "else", ":", "#We do wrap around the archive, so we need two reads", "archiveEnd", "=", "archive", "[", "'offset'", "]", "+", "archive", "[", "'size'", "]", "seriesString", "=", "fh", ".", "read", "(", "archiveEnd", "-", "fromOffset", ")", "fh", ".", "seek", "(", "archive", "[", "'offset'", "]", ")", "seriesString", "+=", "fh", ".", "read", "(", "untilOffset", "-", "archive", "[", "'offset'", "]", ")", "#Now we unpack the series data we just read (anything faster than unpack?)", "byteOrder", ",", "pointTypes", "=", "pointFormat", "[", "0", "]", ",", "pointFormat", "[", "1", ":", "]", "points", "=", "len", "(", "seriesString", ")", "//", "pointSize", "seriesFormat", "=", "byteOrder", "+", "(", "pointTypes", "*", "points", ")", "unpackedSeries", "=", "struct", ".", "unpack", "(", "seriesFormat", ",", "seriesString", ")", "#And finally we construct a list of values (optimize this!)", "valueList", "=", "[", "None", "]", "*", "points", "#pre-allocate entire list for speed", "currentInterval", "=", "fromInterval", "step", "=", "archive", "[", "'secondsPerPoint'", "]", "for", "i", "in", "xrange", "(", "0", ",", "len", "(", "unpackedSeries", ")", ",", "2", ")", ":", "pointTime", "=", "unpackedSeries", "[", "i", "]", "if", "pointTime", "==", "currentInterval", ":", "pointValue", "=", "unpackedSeries", "[", "i", "+", "1", "]", "valueList", "[", "i", "//", "2", "]", "=", "pointValue", "#in-place reassignment is faster than append()", "currentInterval", "+=", "step", "timeInfo", "=", "(", "fromInterval", ",", "untilInterval", ",", "step", ")", "return", "(", "timeInfo", ",", "valueList", ")" ]
Fetch data from a single archive. Note that checks for validity of the time period requested happen above this level so it's possible to wrap around the archive on a read and request data older than the archive's retention
[ "Fetch", "data", "from", "a", "single", "archive", ".", "Note", "that", "checks", "for", "validity", "of", "the", "time", "period", "requested", "happen", "above", "this", "level", "so", "it", "s", "possible", "to", "wrap", "around", "the", "archive", "on", "a", "read", "and", "request", "data", "older", "than", "the", "archive", "s", "retention" ]
0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff
https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/_vendor/whisper.py#L797-L860
2,319
brutasse/graphite-api
graphite_api/_vendor/whisper.py
merge
def merge(path_from, path_to): """ Merges the data from one whisper file into another. Each file must have the same archive configuration """ fh_from = open(path_from, 'rb') fh_to = open(path_to, 'rb+') return file_merge(fh_from, fh_to)
python
def merge(path_from, path_to): fh_from = open(path_from, 'rb') fh_to = open(path_to, 'rb+') return file_merge(fh_from, fh_to)
[ "def", "merge", "(", "path_from", ",", "path_to", ")", ":", "fh_from", "=", "open", "(", "path_from", ",", "'rb'", ")", "fh_to", "=", "open", "(", "path_to", ",", "'rb+'", ")", "return", "file_merge", "(", "fh_from", ",", "fh_to", ")" ]
Merges the data from one whisper file into another. Each file must have the same archive configuration
[ "Merges", "the", "data", "from", "one", "whisper", "file", "into", "another", ".", "Each", "file", "must", "have", "the", "same", "archive", "configuration" ]
0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff
https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/_vendor/whisper.py#L862-L868
2,320
brutasse/graphite-api
graphite_api/_vendor/whisper.py
diff
def diff(path_from, path_to, ignore_empty = False): """ Compare two whisper databases. Each file must have the same archive configuration """ fh_from = open(path_from, 'rb') fh_to = open(path_to, 'rb') diffs = file_diff(fh_from, fh_to, ignore_empty) fh_to.close() fh_from.close() return diffs
python
def diff(path_from, path_to, ignore_empty = False): fh_from = open(path_from, 'rb') fh_to = open(path_to, 'rb') diffs = file_diff(fh_from, fh_to, ignore_empty) fh_to.close() fh_from.close() return diffs
[ "def", "diff", "(", "path_from", ",", "path_to", ",", "ignore_empty", "=", "False", ")", ":", "fh_from", "=", "open", "(", "path_from", ",", "'rb'", ")", "fh_to", "=", "open", "(", "path_to", ",", "'rb'", ")", "diffs", "=", "file_diff", "(", "fh_from", ",", "fh_to", ",", "ignore_empty", ")", "fh_to", ".", "close", "(", ")", "fh_from", ".", "close", "(", ")", "return", "diffs" ]
Compare two whisper databases. Each file must have the same archive configuration
[ "Compare", "two", "whisper", "databases", ".", "Each", "file", "must", "have", "the", "same", "archive", "configuration" ]
0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff
https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/_vendor/whisper.py#L895-L902
2,321
brutasse/graphite-api
graphite_api/render/datalib.py
DataStore.add_data
def add_data(self, path, time_info, data, exprs): """ Stores data before it can be put into a time series """ # Dont add if empty if not nonempty(data): for d in self.data[path]: if nonempty(d['values']): return # Add data to path for expr in exprs: self.paths[expr].add(path) self.data[path].append({ 'time_info': time_info, 'values': data })
python
def add_data(self, path, time_info, data, exprs): # Dont add if empty if not nonempty(data): for d in self.data[path]: if nonempty(d['values']): return # Add data to path for expr in exprs: self.paths[expr].add(path) self.data[path].append({ 'time_info': time_info, 'values': data })
[ "def", "add_data", "(", "self", ",", "path", ",", "time_info", ",", "data", ",", "exprs", ")", ":", "# Dont add if empty", "if", "not", "nonempty", "(", "data", ")", ":", "for", "d", "in", "self", ".", "data", "[", "path", "]", ":", "if", "nonempty", "(", "d", "[", "'values'", "]", ")", ":", "return", "# Add data to path", "for", "expr", "in", "exprs", ":", "self", ".", "paths", "[", "expr", "]", ".", "add", "(", "path", ")", "self", ".", "data", "[", "path", "]", ".", "append", "(", "{", "'time_info'", ":", "time_info", ",", "'values'", ":", "data", "}", ")" ]
Stores data before it can be put into a time series
[ "Stores", "data", "before", "it", "can", "be", "put", "into", "a", "time", "series" ]
0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff
https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/render/datalib.py#L117-L133
2,322
brutasse/graphite-api
graphite_api/carbonlink.py
CarbonLinkPool.select_host
def select_host(self, metric): """ Returns the carbon host that has data for the given metric. """ key = self.keyfunc(metric) nodes = [] servers = set() for node in self.hash_ring.get_nodes(key): server, instance = node if server in servers: continue servers.add(server) nodes.append(node) if len(servers) >= self.replication_factor: break available = [n for n in nodes if self.is_available(n)] return random.choice(available or nodes)
python
def select_host(self, metric): key = self.keyfunc(metric) nodes = [] servers = set() for node in self.hash_ring.get_nodes(key): server, instance = node if server in servers: continue servers.add(server) nodes.append(node) if len(servers) >= self.replication_factor: break available = [n for n in nodes if self.is_available(n)] return random.choice(available or nodes)
[ "def", "select_host", "(", "self", ",", "metric", ")", ":", "key", "=", "self", ".", "keyfunc", "(", "metric", ")", "nodes", "=", "[", "]", "servers", "=", "set", "(", ")", "for", "node", "in", "self", ".", "hash_ring", ".", "get_nodes", "(", "key", ")", ":", "server", ",", "instance", "=", "node", "if", "server", "in", "servers", ":", "continue", "servers", ".", "add", "(", "server", ")", "nodes", ".", "append", "(", "node", ")", "if", "len", "(", "servers", ")", ">=", "self", ".", "replication_factor", ":", "break", "available", "=", "[", "n", "for", "n", "in", "nodes", "if", "self", ".", "is_available", "(", "n", ")", "]", "return", "random", ".", "choice", "(", "available", "or", "nodes", ")" ]
Returns the carbon host that has data for the given metric.
[ "Returns", "the", "carbon", "host", "that", "has", "data", "for", "the", "given", "metric", "." ]
0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff
https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/carbonlink.py#L180-L196
2,323
brutasse/graphite-api
graphite_api/render/glyph.py
safeArgs
def safeArgs(args): """Iterate over valid, finite values in an iterable. Skip any items that are None, NaN, or infinite. """ return (arg for arg in args if arg is not None and not math.isnan(arg) and not math.isinf(arg))
python
def safeArgs(args): return (arg for arg in args if arg is not None and not math.isnan(arg) and not math.isinf(arg))
[ "def", "safeArgs", "(", "args", ")", ":", "return", "(", "arg", "for", "arg", "in", "args", "if", "arg", "is", "not", "None", "and", "not", "math", ".", "isnan", "(", "arg", ")", "and", "not", "math", ".", "isinf", "(", "arg", ")", ")" ]
Iterate over valid, finite values in an iterable. Skip any items that are None, NaN, or infinite.
[ "Iterate", "over", "valid", "finite", "values", "in", "an", "iterable", "." ]
0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff
https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/render/glyph.py#L2147-L2153
2,324
brutasse/graphite-api
graphite_api/render/glyph.py
format_units
def format_units(v, step=None, system="si", units=None): """Format the given value in standardized units. ``system`` is either 'binary' or 'si' For more info, see: http://en.wikipedia.org/wiki/SI_prefix http://en.wikipedia.org/wiki/Binary_prefix """ if v is None: return 0, '' for prefix, size in UnitSystems[system]: if condition(v, size, step): v2 = v / size if v2 - math.floor(v2) < 0.00000000001 and v > 1: v2 = float(math.floor(v2)) if units: prefix = "%s%s" % (prefix, units) return v2, prefix if v - math.floor(v) < 0.00000000001 and v > 1: v = float(math.floor(v)) if units: prefix = units else: prefix = '' return v, prefix
python
def format_units(v, step=None, system="si", units=None): if v is None: return 0, '' for prefix, size in UnitSystems[system]: if condition(v, size, step): v2 = v / size if v2 - math.floor(v2) < 0.00000000001 and v > 1: v2 = float(math.floor(v2)) if units: prefix = "%s%s" % (prefix, units) return v2, prefix if v - math.floor(v) < 0.00000000001 and v > 1: v = float(math.floor(v)) if units: prefix = units else: prefix = '' return v, prefix
[ "def", "format_units", "(", "v", ",", "step", "=", "None", ",", "system", "=", "\"si\"", ",", "units", "=", "None", ")", ":", "if", "v", "is", "None", ":", "return", "0", ",", "''", "for", "prefix", ",", "size", "in", "UnitSystems", "[", "system", "]", ":", "if", "condition", "(", "v", ",", "size", ",", "step", ")", ":", "v2", "=", "v", "/", "size", "if", "v2", "-", "math", ".", "floor", "(", "v2", ")", "<", "0.00000000001", "and", "v", ">", "1", ":", "v2", "=", "float", "(", "math", ".", "floor", "(", "v2", ")", ")", "if", "units", ":", "prefix", "=", "\"%s%s\"", "%", "(", "prefix", ",", "units", ")", "return", "v2", ",", "prefix", "if", "v", "-", "math", ".", "floor", "(", "v", ")", "<", "0.00000000001", "and", "v", ">", "1", ":", "v", "=", "float", "(", "math", ".", "floor", "(", "v", ")", ")", "if", "units", ":", "prefix", "=", "units", "else", ":", "prefix", "=", "''", "return", "v", ",", "prefix" ]
Format the given value in standardized units. ``system`` is either 'binary' or 'si' For more info, see: http://en.wikipedia.org/wiki/SI_prefix http://en.wikipedia.org/wiki/Binary_prefix
[ "Format", "the", "given", "value", "in", "standardized", "units", "." ]
0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff
https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/render/glyph.py#L2219-L2246
2,325
brutasse/graphite-api
graphite_api/render/glyph.py
_AxisTics.checkFinite
def checkFinite(value, name='value'): """Check that value is a finite number. If it is, return it. If not, raise GraphError describing the problem, using name in the error message. """ if math.isnan(value): raise GraphError('Encountered NaN %s' % (name,)) elif math.isinf(value): raise GraphError('Encountered infinite %s' % (name,)) return value
python
def checkFinite(value, name='value'): if math.isnan(value): raise GraphError('Encountered NaN %s' % (name,)) elif math.isinf(value): raise GraphError('Encountered infinite %s' % (name,)) return value
[ "def", "checkFinite", "(", "value", ",", "name", "=", "'value'", ")", ":", "if", "math", ".", "isnan", "(", "value", ")", ":", "raise", "GraphError", "(", "'Encountered NaN %s'", "%", "(", "name", ",", ")", ")", "elif", "math", ".", "isinf", "(", "value", ")", ":", "raise", "GraphError", "(", "'Encountered infinite %s'", "%", "(", "name", ",", ")", ")", "return", "value" ]
Check that value is a finite number. If it is, return it. If not, raise GraphError describing the problem, using name in the error message.
[ "Check", "that", "value", "is", "a", "finite", "number", "." ]
0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff
https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/render/glyph.py#L347-L357
2,326
brutasse/graphite-api
graphite_api/render/glyph.py
_AxisTics.reconcileLimits
def reconcileLimits(self): """If self.minValue is not less than self.maxValue, fix the problem. If self.minValue is not less than self.maxValue, adjust self.minValue and/or self.maxValue (depending on which was not specified explicitly by the user) to make self.minValue < self.maxValue. If the user specified both limits explicitly, then raise GraphError. """ if self.minValue < self.maxValue: # The limits are already OK. return minFixed = (self.minValueSource in ['min']) maxFixed = (self.maxValueSource in ['max', 'limit']) if minFixed and maxFixed: raise GraphError('The %s must be less than the %s' % (self.minValueSource, self.maxValueSource)) elif minFixed: self.maxValue = self.minValue + self.chooseDelta(self.minValue) elif maxFixed: self.minValue = self.maxValue - self.chooseDelta(self.maxValue) else: delta = self.chooseDelta(max(abs(self.minValue), abs(self.maxValue))) average = (self.minValue + self.maxValue) / 2.0 self.minValue = average - delta self.maxValue = average + delta
python
def reconcileLimits(self): if self.minValue < self.maxValue: # The limits are already OK. return minFixed = (self.minValueSource in ['min']) maxFixed = (self.maxValueSource in ['max', 'limit']) if minFixed and maxFixed: raise GraphError('The %s must be less than the %s' % (self.minValueSource, self.maxValueSource)) elif minFixed: self.maxValue = self.minValue + self.chooseDelta(self.minValue) elif maxFixed: self.minValue = self.maxValue - self.chooseDelta(self.maxValue) else: delta = self.chooseDelta(max(abs(self.minValue), abs(self.maxValue))) average = (self.minValue + self.maxValue) / 2.0 self.minValue = average - delta self.maxValue = average + delta
[ "def", "reconcileLimits", "(", "self", ")", ":", "if", "self", ".", "minValue", "<", "self", ".", "maxValue", ":", "# The limits are already OK.", "return", "minFixed", "=", "(", "self", ".", "minValueSource", "in", "[", "'min'", "]", ")", "maxFixed", "=", "(", "self", ".", "maxValueSource", "in", "[", "'max'", ",", "'limit'", "]", ")", "if", "minFixed", "and", "maxFixed", ":", "raise", "GraphError", "(", "'The %s must be less than the %s'", "%", "(", "self", ".", "minValueSource", ",", "self", ".", "maxValueSource", ")", ")", "elif", "minFixed", ":", "self", ".", "maxValue", "=", "self", ".", "minValue", "+", "self", ".", "chooseDelta", "(", "self", ".", "minValue", ")", "elif", "maxFixed", ":", "self", ".", "minValue", "=", "self", ".", "maxValue", "-", "self", ".", "chooseDelta", "(", "self", ".", "maxValue", ")", "else", ":", "delta", "=", "self", ".", "chooseDelta", "(", "max", "(", "abs", "(", "self", ".", "minValue", ")", ",", "abs", "(", "self", ".", "maxValue", ")", ")", ")", "average", "=", "(", "self", ".", "minValue", "+", "self", ".", "maxValue", ")", "/", "2.0", "self", ".", "minValue", "=", "average", "-", "delta", "self", ".", "maxValue", "=", "average", "+", "delta" ]
If self.minValue is not less than self.maxValue, fix the problem. If self.minValue is not less than self.maxValue, adjust self.minValue and/or self.maxValue (depending on which was not specified explicitly by the user) to make self.minValue < self.maxValue. If the user specified both limits explicitly, then raise GraphError.
[ "If", "self", ".", "minValue", "is", "not", "less", "than", "self", ".", "maxValue", "fix", "the", "problem", "." ]
0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff
https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/render/glyph.py#L371-L399
2,327
brutasse/graphite-api
graphite_api/render/glyph.py
_AxisTics.applySettings
def applySettings(self, axisMin=None, axisMax=None, axisLimit=None): """Apply the specified settings to this axis. Set self.minValue, self.minValueSource, self.maxValue, self.maxValueSource, and self.axisLimit reasonably based on the parameters provided. Arguments: axisMin -- a finite number, or None to choose a round minimum limit that includes all of the data. axisMax -- a finite number, 'max' to use the maximum value contained in the data, or None to choose a round maximum limit that includes all of the data. axisLimit -- a finite number to use as an upper limit on maxValue, or None to impose no upper limit. """ if axisMin is not None and not math.isnan(axisMin): self.minValueSource = 'min' self.minValue = self.checkFinite(axisMin, 'axis min') if axisMax == 'max': self.maxValueSource = 'extremum' elif axisMax is not None and not math.isnan(axisMax): self.maxValueSource = 'max' self.maxValue = self.checkFinite(axisMax, 'axis max') if axisLimit is None or math.isnan(axisLimit): self.axisLimit = None elif axisLimit < self.maxValue: self.maxValue = self.checkFinite(axisLimit, 'axis limit') self.maxValueSource = 'limit' # The limit has already been imposed, so there is no need to # remember it: self.axisLimit = None elif math.isinf(axisLimit): # It must be positive infinity, which is the same as no limit: self.axisLimit = None else: # We still need to remember axisLimit to avoid rounding top to # a value larger than axisLimit: self.axisLimit = axisLimit self.reconcileLimits()
python
def applySettings(self, axisMin=None, axisMax=None, axisLimit=None): if axisMin is not None and not math.isnan(axisMin): self.minValueSource = 'min' self.minValue = self.checkFinite(axisMin, 'axis min') if axisMax == 'max': self.maxValueSource = 'extremum' elif axisMax is not None and not math.isnan(axisMax): self.maxValueSource = 'max' self.maxValue = self.checkFinite(axisMax, 'axis max') if axisLimit is None or math.isnan(axisLimit): self.axisLimit = None elif axisLimit < self.maxValue: self.maxValue = self.checkFinite(axisLimit, 'axis limit') self.maxValueSource = 'limit' # The limit has already been imposed, so there is no need to # remember it: self.axisLimit = None elif math.isinf(axisLimit): # It must be positive infinity, which is the same as no limit: self.axisLimit = None else: # We still need to remember axisLimit to avoid rounding top to # a value larger than axisLimit: self.axisLimit = axisLimit self.reconcileLimits()
[ "def", "applySettings", "(", "self", ",", "axisMin", "=", "None", ",", "axisMax", "=", "None", ",", "axisLimit", "=", "None", ")", ":", "if", "axisMin", "is", "not", "None", "and", "not", "math", ".", "isnan", "(", "axisMin", ")", ":", "self", ".", "minValueSource", "=", "'min'", "self", ".", "minValue", "=", "self", ".", "checkFinite", "(", "axisMin", ",", "'axis min'", ")", "if", "axisMax", "==", "'max'", ":", "self", ".", "maxValueSource", "=", "'extremum'", "elif", "axisMax", "is", "not", "None", "and", "not", "math", ".", "isnan", "(", "axisMax", ")", ":", "self", ".", "maxValueSource", "=", "'max'", "self", ".", "maxValue", "=", "self", ".", "checkFinite", "(", "axisMax", ",", "'axis max'", ")", "if", "axisLimit", "is", "None", "or", "math", ".", "isnan", "(", "axisLimit", ")", ":", "self", ".", "axisLimit", "=", "None", "elif", "axisLimit", "<", "self", ".", "maxValue", ":", "self", ".", "maxValue", "=", "self", ".", "checkFinite", "(", "axisLimit", ",", "'axis limit'", ")", "self", ".", "maxValueSource", "=", "'limit'", "# The limit has already been imposed, so there is no need to", "# remember it:", "self", ".", "axisLimit", "=", "None", "elif", "math", ".", "isinf", "(", "axisLimit", ")", ":", "# It must be positive infinity, which is the same as no limit:", "self", ".", "axisLimit", "=", "None", "else", ":", "# We still need to remember axisLimit to avoid rounding top to", "# a value larger than axisLimit:", "self", ".", "axisLimit", "=", "axisLimit", "self", ".", "reconcileLimits", "(", ")" ]
Apply the specified settings to this axis. Set self.minValue, self.minValueSource, self.maxValue, self.maxValueSource, and self.axisLimit reasonably based on the parameters provided. Arguments: axisMin -- a finite number, or None to choose a round minimum limit that includes all of the data. axisMax -- a finite number, 'max' to use the maximum value contained in the data, or None to choose a round maximum limit that includes all of the data. axisLimit -- a finite number to use as an upper limit on maxValue, or None to impose no upper limit.
[ "Apply", "the", "specified", "settings", "to", "this", "axis", "." ]
0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff
https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/render/glyph.py#L401-L446
2,328
brutasse/graphite-api
graphite_api/render/glyph.py
_AxisTics.makeLabel
def makeLabel(self, value): """Create a label for the specified value. Create a label string containing the value and its units (if any), based on the values of self.step, self.span, and self.unitSystem. """ value, prefix = format_units(value, self.step, system=self.unitSystem) span, spanPrefix = format_units(self.span, self.step, system=self.unitSystem) if prefix: prefix += " " if value < 0.1: return "%g %s" % (float(value), prefix) elif value < 1.0: return "%.2f %s" % (float(value), prefix) if span > 10 or spanPrefix != prefix: if type(value) is float: return "%.1f %s" % (value, prefix) else: return "%d %s" % (int(value), prefix) elif span > 3: return "%.1f %s" % (float(value), prefix) elif span > 0.1: return "%.2f %s" % (float(value), prefix) else: return "%g %s" % (float(value), prefix)
python
def makeLabel(self, value): value, prefix = format_units(value, self.step, system=self.unitSystem) span, spanPrefix = format_units(self.span, self.step, system=self.unitSystem) if prefix: prefix += " " if value < 0.1: return "%g %s" % (float(value), prefix) elif value < 1.0: return "%.2f %s" % (float(value), prefix) if span > 10 or spanPrefix != prefix: if type(value) is float: return "%.1f %s" % (value, prefix) else: return "%d %s" % (int(value), prefix) elif span > 3: return "%.1f %s" % (float(value), prefix) elif span > 0.1: return "%.2f %s" % (float(value), prefix) else: return "%g %s" % (float(value), prefix)
[ "def", "makeLabel", "(", "self", ",", "value", ")", ":", "value", ",", "prefix", "=", "format_units", "(", "value", ",", "self", ".", "step", ",", "system", "=", "self", ".", "unitSystem", ")", "span", ",", "spanPrefix", "=", "format_units", "(", "self", ".", "span", ",", "self", ".", "step", ",", "system", "=", "self", ".", "unitSystem", ")", "if", "prefix", ":", "prefix", "+=", "\" \"", "if", "value", "<", "0.1", ":", "return", "\"%g %s\"", "%", "(", "float", "(", "value", ")", ",", "prefix", ")", "elif", "value", "<", "1.0", ":", "return", "\"%.2f %s\"", "%", "(", "float", "(", "value", ")", ",", "prefix", ")", "if", "span", ">", "10", "or", "spanPrefix", "!=", "prefix", ":", "if", "type", "(", "value", ")", "is", "float", ":", "return", "\"%.1f %s\"", "%", "(", "value", ",", "prefix", ")", "else", ":", "return", "\"%d %s\"", "%", "(", "int", "(", "value", ")", ",", "prefix", ")", "elif", "span", ">", "3", ":", "return", "\"%.1f %s\"", "%", "(", "float", "(", "value", ")", ",", "prefix", ")", "elif", "span", ">", "0.1", ":", "return", "\"%.2f %s\"", "%", "(", "float", "(", "value", ")", ",", "prefix", ")", "else", ":", "return", "\"%g %s\"", "%", "(", "float", "(", "value", ")", ",", "prefix", ")" ]
Create a label for the specified value. Create a label string containing the value and its units (if any), based on the values of self.step, self.span, and self.unitSystem.
[ "Create", "a", "label", "for", "the", "specified", "value", "." ]
0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff
https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/render/glyph.py#L448-L474
2,329
brutasse/graphite-api
graphite_api/render/glyph.py
_LinearAxisTics.generateSteps
def generateSteps(self, minStep): """Generate allowed steps with step >= minStep in increasing order.""" self.checkFinite(minStep) if self.binary: base = 2.0 mantissas = [1.0] exponent = math.floor(math.log(minStep, 2) - EPSILON) else: base = 10.0 mantissas = [1.0, 2.0, 5.0] exponent = math.floor(math.log10(minStep) - EPSILON) while True: multiplier = base ** exponent for mantissa in mantissas: value = mantissa * multiplier if value >= minStep * (1.0 - EPSILON): yield value exponent += 1
python
def generateSteps(self, minStep): self.checkFinite(minStep) if self.binary: base = 2.0 mantissas = [1.0] exponent = math.floor(math.log(minStep, 2) - EPSILON) else: base = 10.0 mantissas = [1.0, 2.0, 5.0] exponent = math.floor(math.log10(minStep) - EPSILON) while True: multiplier = base ** exponent for mantissa in mantissas: value = mantissa * multiplier if value >= minStep * (1.0 - EPSILON): yield value exponent += 1
[ "def", "generateSteps", "(", "self", ",", "minStep", ")", ":", "self", ".", "checkFinite", "(", "minStep", ")", "if", "self", ".", "binary", ":", "base", "=", "2.0", "mantissas", "=", "[", "1.0", "]", "exponent", "=", "math", ".", "floor", "(", "math", ".", "log", "(", "minStep", ",", "2", ")", "-", "EPSILON", ")", "else", ":", "base", "=", "10.0", "mantissas", "=", "[", "1.0", ",", "2.0", ",", "5.0", "]", "exponent", "=", "math", ".", "floor", "(", "math", ".", "log10", "(", "minStep", ")", "-", "EPSILON", ")", "while", "True", ":", "multiplier", "=", "base", "**", "exponent", "for", "mantissa", "in", "mantissas", ":", "value", "=", "mantissa", "*", "multiplier", "if", "value", ">=", "minStep", "*", "(", "1.0", "-", "EPSILON", ")", ":", "yield", "value", "exponent", "+=", "1" ]
Generate allowed steps with step >= minStep in increasing order.
[ "Generate", "allowed", "steps", "with", "step", ">", "=", "minStep", "in", "increasing", "order", "." ]
0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff
https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/render/glyph.py#L490-L509
2,330
brutasse/graphite-api
graphite_api/render/glyph.py
_LinearAxisTics.computeSlop
def computeSlop(self, step, divisor): """Compute the slop that would result from step and divisor. Return the slop, or None if this combination can't cover the full range. See chooseStep() for the definition of "slop". """ bottom = step * math.floor(self.minValue / float(step) + EPSILON) top = bottom + step * divisor if top >= self.maxValue - EPSILON * step: return max(top - self.maxValue, self.minValue - bottom) else: return None
python
def computeSlop(self, step, divisor): bottom = step * math.floor(self.minValue / float(step) + EPSILON) top = bottom + step * divisor if top >= self.maxValue - EPSILON * step: return max(top - self.maxValue, self.minValue - bottom) else: return None
[ "def", "computeSlop", "(", "self", ",", "step", ",", "divisor", ")", ":", "bottom", "=", "step", "*", "math", ".", "floor", "(", "self", ".", "minValue", "/", "float", "(", "step", ")", "+", "EPSILON", ")", "top", "=", "bottom", "+", "step", "*", "divisor", "if", "top", ">=", "self", ".", "maxValue", "-", "EPSILON", "*", "step", ":", "return", "max", "(", "top", "-", "self", ".", "maxValue", ",", "self", ".", "minValue", "-", "bottom", ")", "else", ":", "return", "None" ]
Compute the slop that would result from step and divisor. Return the slop, or None if this combination can't cover the full range. See chooseStep() for the definition of "slop".
[ "Compute", "the", "slop", "that", "would", "result", "from", "step", "and", "divisor", "." ]
0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff
https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/render/glyph.py#L511-L524
2,331
brutasse/graphite-api
graphite_api/render/glyph.py
_LinearAxisTics.chooseStep
def chooseStep(self, divisors=None, binary=False): """Choose a nice, pretty size for the steps between axis labels. Our main constraint is that the number of divisions must be taken from the divisors list. We pick a number of divisions and a step size that minimizes the amount of whitespace ("slop") that would need to be included outside of the range [self.minValue, self.maxValue] if we were to push out the axis values to the next larger multiples of the step size. The minimum step that could possibly cover the variance satisfies minStep * max(divisors) >= variance or minStep = variance / max(divisors) It's not necessarily possible to cover the variance with a step that size, but we know that any smaller step definitely *cannot* cover it. So we can start there. For a sufficiently large step size, it is definitely possible to cover the variance, but at some point the slop will start growing. Let's define the slop to be slop = max(minValue - bottom, top - maxValue) Then for a given, step size, we know that slop >= (1/2) * (step * min(divisors) - variance) (the factor of 1/2 is for the best-case scenario that the slop is distributed equally on the two sides of the range). So suppose we already have a choice that yields bestSlop. Then there is no need to choose steps so large that the slop is guaranteed to be larger than bestSlop. Therefore, the maximum step size that we need to consider is maxStep = (2 * bestSlop + variance) / min(divisors) """ self.binary = binary if divisors is None: divisors = [4, 5, 6] else: for divisor in divisors: self.checkFinite(divisor, 'divisor') if divisor < 1: raise GraphError('Divisors must be greater than or equal ' 'to one') if self.minValue == self.maxValue: if self.minValue == 0.0: self.maxValue = 1.0 elif self.minValue < 0.0: self.minValue *= 1.1 self.maxValue *= 0.9 else: self.minValue *= 0.9 self.maxValue *= 1.1 variance = self.maxValue - self.minValue bestSlop = None bestStep = None for step in self.generateSteps(variance / float(max(divisors))): if ( bestSlop is not None and step * min(divisors) >= 2 * bestSlop + variance ): break for divisor in divisors: slop = self.computeSlop(step, divisor) if slop is not None and (bestSlop is None or slop < bestSlop): bestSlop = slop bestStep = step self.step = bestStep
python
def chooseStep(self, divisors=None, binary=False): self.binary = binary if divisors is None: divisors = [4, 5, 6] else: for divisor in divisors: self.checkFinite(divisor, 'divisor') if divisor < 1: raise GraphError('Divisors must be greater than or equal ' 'to one') if self.minValue == self.maxValue: if self.minValue == 0.0: self.maxValue = 1.0 elif self.minValue < 0.0: self.minValue *= 1.1 self.maxValue *= 0.9 else: self.minValue *= 0.9 self.maxValue *= 1.1 variance = self.maxValue - self.minValue bestSlop = None bestStep = None for step in self.generateSteps(variance / float(max(divisors))): if ( bestSlop is not None and step * min(divisors) >= 2 * bestSlop + variance ): break for divisor in divisors: slop = self.computeSlop(step, divisor) if slop is not None and (bestSlop is None or slop < bestSlop): bestSlop = slop bestStep = step self.step = bestStep
[ "def", "chooseStep", "(", "self", ",", "divisors", "=", "None", ",", "binary", "=", "False", ")", ":", "self", ".", "binary", "=", "binary", "if", "divisors", "is", "None", ":", "divisors", "=", "[", "4", ",", "5", ",", "6", "]", "else", ":", "for", "divisor", "in", "divisors", ":", "self", ".", "checkFinite", "(", "divisor", ",", "'divisor'", ")", "if", "divisor", "<", "1", ":", "raise", "GraphError", "(", "'Divisors must be greater than or equal '", "'to one'", ")", "if", "self", ".", "minValue", "==", "self", ".", "maxValue", ":", "if", "self", ".", "minValue", "==", "0.0", ":", "self", ".", "maxValue", "=", "1.0", "elif", "self", ".", "minValue", "<", "0.0", ":", "self", ".", "minValue", "*=", "1.1", "self", ".", "maxValue", "*=", "0.9", "else", ":", "self", ".", "minValue", "*=", "0.9", "self", ".", "maxValue", "*=", "1.1", "variance", "=", "self", ".", "maxValue", "-", "self", ".", "minValue", "bestSlop", "=", "None", "bestStep", "=", "None", "for", "step", "in", "self", ".", "generateSteps", "(", "variance", "/", "float", "(", "max", "(", "divisors", ")", ")", ")", ":", "if", "(", "bestSlop", "is", "not", "None", "and", "step", "*", "min", "(", "divisors", ")", ">=", "2", "*", "bestSlop", "+", "variance", ")", ":", "break", "for", "divisor", "in", "divisors", ":", "slop", "=", "self", ".", "computeSlop", "(", "step", ",", "divisor", ")", "if", "slop", "is", "not", "None", "and", "(", "bestSlop", "is", "None", "or", "slop", "<", "bestSlop", ")", ":", "bestSlop", "=", "slop", "bestStep", "=", "step", "self", ".", "step", "=", "bestStep" ]
Choose a nice, pretty size for the steps between axis labels. Our main constraint is that the number of divisions must be taken from the divisors list. We pick a number of divisions and a step size that minimizes the amount of whitespace ("slop") that would need to be included outside of the range [self.minValue, self.maxValue] if we were to push out the axis values to the next larger multiples of the step size. The minimum step that could possibly cover the variance satisfies minStep * max(divisors) >= variance or minStep = variance / max(divisors) It's not necessarily possible to cover the variance with a step that size, but we know that any smaller step definitely *cannot* cover it. So we can start there. For a sufficiently large step size, it is definitely possible to cover the variance, but at some point the slop will start growing. Let's define the slop to be slop = max(minValue - bottom, top - maxValue) Then for a given, step size, we know that slop >= (1/2) * (step * min(divisors) - variance) (the factor of 1/2 is for the best-case scenario that the slop is distributed equally on the two sides of the range). So suppose we already have a choice that yields bestSlop. Then there is no need to choose steps so large that the slop is guaranteed to be larger than bestSlop. Therefore, the maximum step size that we need to consider is maxStep = (2 * bestSlop + variance) / min(divisors)
[ "Choose", "a", "nice", "pretty", "size", "for", "the", "steps", "between", "axis", "labels", "." ]
0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff
https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/render/glyph.py#L526-L604
2,332
brutasse/graphite-api
graphite_api/functions.py
formatPathExpressions
def formatPathExpressions(seriesList): """ Returns a comma-separated list of unique path expressions. """ pathExpressions = sorted(set([s.pathExpression for s in seriesList])) return ','.join(pathExpressions)
python
def formatPathExpressions(seriesList): pathExpressions = sorted(set([s.pathExpression for s in seriesList])) return ','.join(pathExpressions)
[ "def", "formatPathExpressions", "(", "seriesList", ")", ":", "pathExpressions", "=", "sorted", "(", "set", "(", "[", "s", ".", "pathExpression", "for", "s", "in", "seriesList", "]", ")", ")", "return", "','", ".", "join", "(", "pathExpressions", ")" ]
Returns a comma-separated list of unique path expressions.
[ "Returns", "a", "comma", "-", "separated", "list", "of", "unique", "path", "expressions", "." ]
0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff
https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L185-L190
2,333
brutasse/graphite-api
graphite_api/functions.py
rangeOfSeries
def rangeOfSeries(requestContext, *seriesLists): """ Takes a wildcard seriesList. Distills down a set of inputs into the range of the series Example:: &target=rangeOfSeries(Server*.connections.total) """ if not seriesLists or not any(seriesLists): return [] seriesList, start, end, step = normalize(seriesLists) name = "rangeOfSeries(%s)" % formatPathExpressions(seriesList) values = (safeSubtract(max(row), min(row)) for row in zip_longest(*seriesList)) series = TimeSeries(name, start, end, step, values) series.pathExpression = name return [series]
python
def rangeOfSeries(requestContext, *seriesLists): if not seriesLists or not any(seriesLists): return [] seriesList, start, end, step = normalize(seriesLists) name = "rangeOfSeries(%s)" % formatPathExpressions(seriesList) values = (safeSubtract(max(row), min(row)) for row in zip_longest(*seriesList)) series = TimeSeries(name, start, end, step, values) series.pathExpression = name return [series]
[ "def", "rangeOfSeries", "(", "requestContext", ",", "*", "seriesLists", ")", ":", "if", "not", "seriesLists", "or", "not", "any", "(", "seriesLists", ")", ":", "return", "[", "]", "seriesList", ",", "start", ",", "end", ",", "step", "=", "normalize", "(", "seriesLists", ")", "name", "=", "\"rangeOfSeries(%s)\"", "%", "formatPathExpressions", "(", "seriesList", ")", "values", "=", "(", "safeSubtract", "(", "max", "(", "row", ")", ",", "min", "(", "row", ")", ")", "for", "row", "in", "zip_longest", "(", "*", "seriesList", ")", ")", "series", "=", "TimeSeries", "(", "name", ",", "start", ",", "end", ",", "step", ",", "values", ")", "series", ".", "pathExpression", "=", "name", "return", "[", "series", "]" ]
Takes a wildcard seriesList. Distills down a set of inputs into the range of the series Example:: &target=rangeOfSeries(Server*.connections.total)
[ "Takes", "a", "wildcard", "seriesList", ".", "Distills", "down", "a", "set", "of", "inputs", "into", "the", "range", "of", "the", "series" ]
0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff
https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L434-L452
2,334
brutasse/graphite-api
graphite_api/functions.py
percentileOfSeries
def percentileOfSeries(requestContext, seriesList, n, interpolate=False): """ percentileOfSeries returns a single series which is composed of the n-percentile values taken across a wildcard series at each point. Unless `interpolate` is set to True, percentile values are actual values contained in one of the supplied series. """ if n <= 0: raise ValueError( 'The requested percent is required to be greater than 0') if not seriesList: return [] name = 'percentileOfSeries(%s,%g)' % (seriesList[0].pathExpression, n) start, end, step = normalize([seriesList])[1:] values = [_getPercentile(row, n, interpolate) for row in zip_longest(*seriesList)] resultSeries = TimeSeries(name, start, end, step, values) resultSeries.pathExpression = name return [resultSeries]
python
def percentileOfSeries(requestContext, seriesList, n, interpolate=False): if n <= 0: raise ValueError( 'The requested percent is required to be greater than 0') if not seriesList: return [] name = 'percentileOfSeries(%s,%g)' % (seriesList[0].pathExpression, n) start, end, step = normalize([seriesList])[1:] values = [_getPercentile(row, n, interpolate) for row in zip_longest(*seriesList)] resultSeries = TimeSeries(name, start, end, step, values) resultSeries.pathExpression = name return [resultSeries]
[ "def", "percentileOfSeries", "(", "requestContext", ",", "seriesList", ",", "n", ",", "interpolate", "=", "False", ")", ":", "if", "n", "<=", "0", ":", "raise", "ValueError", "(", "'The requested percent is required to be greater than 0'", ")", "if", "not", "seriesList", ":", "return", "[", "]", "name", "=", "'percentileOfSeries(%s,%g)'", "%", "(", "seriesList", "[", "0", "]", ".", "pathExpression", ",", "n", ")", "start", ",", "end", ",", "step", "=", "normalize", "(", "[", "seriesList", "]", ")", "[", "1", ":", "]", "values", "=", "[", "_getPercentile", "(", "row", ",", "n", ",", "interpolate", ")", "for", "row", "in", "zip_longest", "(", "*", "seriesList", ")", "]", "resultSeries", "=", "TimeSeries", "(", "name", ",", "start", ",", "end", ",", "step", ",", "values", ")", "resultSeries", ".", "pathExpression", "=", "name", "return", "[", "resultSeries", "]" ]
percentileOfSeries returns a single series which is composed of the n-percentile values taken across a wildcard series at each point. Unless `interpolate` is set to True, percentile values are actual values contained in one of the supplied series.
[ "percentileOfSeries", "returns", "a", "single", "series", "which", "is", "composed", "of", "the", "n", "-", "percentile", "values", "taken", "across", "a", "wildcard", "series", "at", "each", "point", ".", "Unless", "interpolate", "is", "set", "to", "True", "percentile", "values", "are", "actual", "values", "contained", "in", "one", "of", "the", "supplied", "series", "." ]
0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff
https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L455-L474
2,335
brutasse/graphite-api
graphite_api/functions.py
weightedAverage
def weightedAverage(requestContext, seriesListAvg, seriesListWeight, *nodes): """ Takes a series of average values and a series of weights and produces a weighted average for all values. The corresponding values should share one or more zero-indexed nodes. Example:: &target=weightedAverage(*.transactions.mean,*.transactions.count,0) &target=weightedAverage(*.transactions.mean,*.transactions.count,1,3,4) """ if isinstance(nodes, int): nodes = [nodes] sortedSeries = {} for seriesAvg, seriesWeight in zip_longest( seriesListAvg, seriesListWeight): key = '' for node in nodes: key += seriesAvg.name.split(".")[node] sortedSeries.setdefault(key, {}) sortedSeries[key]['avg'] = seriesAvg key = '' for node in nodes: key += seriesWeight.name.split(".")[node] sortedSeries.setdefault(key, {}) sortedSeries[key]['weight'] = seriesWeight productList = [] for key in sortedSeries: if 'weight' not in sortedSeries[key]: continue if 'avg' not in sortedSeries[key]: continue seriesWeight = sortedSeries[key]['weight'] seriesAvg = sortedSeries[key]['avg'] productValues = [safeMul(val1, val2) for val1, val2 in zip_longest(seriesAvg, seriesWeight)] name = 'product(%s,%s)' % (seriesWeight.name, seriesAvg.name) productSeries = TimeSeries(name, seriesAvg.start, seriesAvg.end, seriesAvg.step, productValues) productSeries.pathExpression = name productList.append(productSeries) if not productList: return [] [sumProducts] = sumSeries(requestContext, productList) [sumWeights] = sumSeries(requestContext, seriesListWeight) resultValues = [safeDiv(val1, val2) for val1, val2 in zip_longest(sumProducts, sumWeights)] name = "weightedAverage(%s, %s, %s)" % ( ','.join(sorted(set(s.pathExpression for s in seriesListAvg))), ','.join(sorted(set(s.pathExpression for s in seriesListWeight))), ','.join(map(str, nodes))) resultSeries = TimeSeries(name, sumProducts.start, sumProducts.end, sumProducts.step, resultValues) resultSeries.pathExpression = name return resultSeries
python
def weightedAverage(requestContext, seriesListAvg, seriesListWeight, *nodes): if isinstance(nodes, int): nodes = [nodes] sortedSeries = {} for seriesAvg, seriesWeight in zip_longest( seriesListAvg, seriesListWeight): key = '' for node in nodes: key += seriesAvg.name.split(".")[node] sortedSeries.setdefault(key, {}) sortedSeries[key]['avg'] = seriesAvg key = '' for node in nodes: key += seriesWeight.name.split(".")[node] sortedSeries.setdefault(key, {}) sortedSeries[key]['weight'] = seriesWeight productList = [] for key in sortedSeries: if 'weight' not in sortedSeries[key]: continue if 'avg' not in sortedSeries[key]: continue seriesWeight = sortedSeries[key]['weight'] seriesAvg = sortedSeries[key]['avg'] productValues = [safeMul(val1, val2) for val1, val2 in zip_longest(seriesAvg, seriesWeight)] name = 'product(%s,%s)' % (seriesWeight.name, seriesAvg.name) productSeries = TimeSeries(name, seriesAvg.start, seriesAvg.end, seriesAvg.step, productValues) productSeries.pathExpression = name productList.append(productSeries) if not productList: return [] [sumProducts] = sumSeries(requestContext, productList) [sumWeights] = sumSeries(requestContext, seriesListWeight) resultValues = [safeDiv(val1, val2) for val1, val2 in zip_longest(sumProducts, sumWeights)] name = "weightedAverage(%s, %s, %s)" % ( ','.join(sorted(set(s.pathExpression for s in seriesListAvg))), ','.join(sorted(set(s.pathExpression for s in seriesListWeight))), ','.join(map(str, nodes))) resultSeries = TimeSeries(name, sumProducts.start, sumProducts.end, sumProducts.step, resultValues) resultSeries.pathExpression = name return resultSeries
[ "def", "weightedAverage", "(", "requestContext", ",", "seriesListAvg", ",", "seriesListWeight", ",", "*", "nodes", ")", ":", "if", "isinstance", "(", "nodes", ",", "int", ")", ":", "nodes", "=", "[", "nodes", "]", "sortedSeries", "=", "{", "}", "for", "seriesAvg", ",", "seriesWeight", "in", "zip_longest", "(", "seriesListAvg", ",", "seriesListWeight", ")", ":", "key", "=", "''", "for", "node", "in", "nodes", ":", "key", "+=", "seriesAvg", ".", "name", ".", "split", "(", "\".\"", ")", "[", "node", "]", "sortedSeries", ".", "setdefault", "(", "key", ",", "{", "}", ")", "sortedSeries", "[", "key", "]", "[", "'avg'", "]", "=", "seriesAvg", "key", "=", "''", "for", "node", "in", "nodes", ":", "key", "+=", "seriesWeight", ".", "name", ".", "split", "(", "\".\"", ")", "[", "node", "]", "sortedSeries", ".", "setdefault", "(", "key", ",", "{", "}", ")", "sortedSeries", "[", "key", "]", "[", "'weight'", "]", "=", "seriesWeight", "productList", "=", "[", "]", "for", "key", "in", "sortedSeries", ":", "if", "'weight'", "not", "in", "sortedSeries", "[", "key", "]", ":", "continue", "if", "'avg'", "not", "in", "sortedSeries", "[", "key", "]", ":", "continue", "seriesWeight", "=", "sortedSeries", "[", "key", "]", "[", "'weight'", "]", "seriesAvg", "=", "sortedSeries", "[", "key", "]", "[", "'avg'", "]", "productValues", "=", "[", "safeMul", "(", "val1", ",", "val2", ")", "for", "val1", ",", "val2", "in", "zip_longest", "(", "seriesAvg", ",", "seriesWeight", ")", "]", "name", "=", "'product(%s,%s)'", "%", "(", "seriesWeight", ".", "name", ",", "seriesAvg", ".", "name", ")", "productSeries", "=", "TimeSeries", "(", "name", ",", "seriesAvg", ".", "start", ",", "seriesAvg", ".", "end", ",", "seriesAvg", ".", "step", ",", "productValues", ")", "productSeries", ".", "pathExpression", "=", "name", "productList", ".", "append", "(", "productSeries", ")", "if", "not", "productList", ":", "return", "[", "]", "[", "sumProducts", "]", "=", "sumSeries", "(", "requestContext", ",", "productList", ")", "[", "sumWeights", "]", "=", "sumSeries", "(", "requestContext", ",", "seriesListWeight", ")", "resultValues", "=", "[", "safeDiv", "(", "val1", ",", "val2", ")", "for", "val1", ",", "val2", "in", "zip_longest", "(", "sumProducts", ",", "sumWeights", ")", "]", "name", "=", "\"weightedAverage(%s, %s, %s)\"", "%", "(", "','", ".", "join", "(", "sorted", "(", "set", "(", "s", ".", "pathExpression", "for", "s", "in", "seriesListAvg", ")", ")", ")", ",", "','", ".", "join", "(", "sorted", "(", "set", "(", "s", ".", "pathExpression", "for", "s", "in", "seriesListWeight", ")", ")", ")", ",", "','", ".", "join", "(", "map", "(", "str", ",", "nodes", ")", ")", ")", "resultSeries", "=", "TimeSeries", "(", "name", ",", "sumProducts", ".", "start", ",", "sumProducts", ".", "end", ",", "sumProducts", ".", "step", ",", "resultValues", ")", "resultSeries", ".", "pathExpression", "=", "name", "return", "resultSeries" ]
Takes a series of average values and a series of weights and produces a weighted average for all values. The corresponding values should share one or more zero-indexed nodes. Example:: &target=weightedAverage(*.transactions.mean,*.transactions.count,0) &target=weightedAverage(*.transactions.mean,*.transactions.count,1,3,4)
[ "Takes", "a", "series", "of", "average", "values", "and", "a", "series", "of", "weights", "and", "produces", "a", "weighted", "average", "for", "all", "values", "." ]
0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff
https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L773-L843
2,336
brutasse/graphite-api
graphite_api/functions.py
scale
def scale(requestContext, seriesList, factor): """ Takes one metric or a wildcard seriesList followed by a constant, and multiplies the datapoint by the constant provided at each point. Example:: &target=scale(Server.instance01.threads.busy,10) &target=scale(Server.instance*.threads.busy,10) """ for series in seriesList: series.name = "scale(%s,%g)" % (series.name, float(factor)) series.pathExpression = series.name for i, value in enumerate(series): series[i] = safeMul(value, factor) return seriesList
python
def scale(requestContext, seriesList, factor): for series in seriesList: series.name = "scale(%s,%g)" % (series.name, float(factor)) series.pathExpression = series.name for i, value in enumerate(series): series[i] = safeMul(value, factor) return seriesList
[ "def", "scale", "(", "requestContext", ",", "seriesList", ",", "factor", ")", ":", "for", "series", "in", "seriesList", ":", "series", ".", "name", "=", "\"scale(%s,%g)\"", "%", "(", "series", ".", "name", ",", "float", "(", "factor", ")", ")", "series", ".", "pathExpression", "=", "series", ".", "name", "for", "i", ",", "value", "in", "enumerate", "(", "series", ")", ":", "series", "[", "i", "]", "=", "safeMul", "(", "value", ",", "factor", ")", "return", "seriesList" ]
Takes one metric or a wildcard seriesList followed by a constant, and multiplies the datapoint by the constant provided at each point. Example:: &target=scale(Server.instance01.threads.busy,10) &target=scale(Server.instance*.threads.busy,10)
[ "Takes", "one", "metric", "or", "a", "wildcard", "seriesList", "followed", "by", "a", "constant", "and", "multiplies", "the", "datapoint", "by", "the", "constant", "provided", "at", "each", "point", "." ]
0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff
https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L994-L1010
2,337
brutasse/graphite-api
graphite_api/functions.py
scaleToSeconds
def scaleToSeconds(requestContext, seriesList, seconds): """ Takes one metric or a wildcard seriesList and returns "value per seconds" where seconds is a last argument to this functions. Useful in conjunction with derivative or integral function if you want to normalize its result to a known resolution for arbitrary retentions """ for series in seriesList: series.name = "scaleToSeconds(%s,%d)" % (series.name, seconds) series.pathExpression = series.name factor = seconds * 1.0 / series.step for i, value in enumerate(series): series[i] = safeMul(value, factor) return seriesList
python
def scaleToSeconds(requestContext, seriesList, seconds): for series in seriesList: series.name = "scaleToSeconds(%s,%d)" % (series.name, seconds) series.pathExpression = series.name factor = seconds * 1.0 / series.step for i, value in enumerate(series): series[i] = safeMul(value, factor) return seriesList
[ "def", "scaleToSeconds", "(", "requestContext", ",", "seriesList", ",", "seconds", ")", ":", "for", "series", "in", "seriesList", ":", "series", ".", "name", "=", "\"scaleToSeconds(%s,%d)\"", "%", "(", "series", ".", "name", ",", "seconds", ")", "series", ".", "pathExpression", "=", "series", ".", "name", "factor", "=", "seconds", "*", "1.0", "/", "series", ".", "step", "for", "i", ",", "value", "in", "enumerate", "(", "series", ")", ":", "series", "[", "i", "]", "=", "safeMul", "(", "value", ",", "factor", ")", "return", "seriesList" ]
Takes one metric or a wildcard seriesList and returns "value per seconds" where seconds is a last argument to this functions. Useful in conjunction with derivative or integral function if you want to normalize its result to a known resolution for arbitrary retentions
[ "Takes", "one", "metric", "or", "a", "wildcard", "seriesList", "and", "returns", "value", "per", "seconds", "where", "seconds", "is", "a", "last", "argument", "to", "this", "functions", "." ]
0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff
https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L1013-L1028
2,338
brutasse/graphite-api
graphite_api/functions.py
pow
def pow(requestContext, seriesList, factor): """ Takes one metric or a wildcard seriesList followed by a constant, and raises the datapoint by the power of the constant provided at each point. Example:: &target=pow(Server.instance01.threads.busy,10) &target=pow(Server.instance*.threads.busy,10) """ for series in seriesList: series.name = "pow(%s,%g)" % (series.name, float(factor)) series.pathExpression = series.name for i, value in enumerate(series): series[i] = safePow(value, factor) return seriesList
python
def pow(requestContext, seriesList, factor): for series in seriesList: series.name = "pow(%s,%g)" % (series.name, float(factor)) series.pathExpression = series.name for i, value in enumerate(series): series[i] = safePow(value, factor) return seriesList
[ "def", "pow", "(", "requestContext", ",", "seriesList", ",", "factor", ")", ":", "for", "series", "in", "seriesList", ":", "series", ".", "name", "=", "\"pow(%s,%g)\"", "%", "(", "series", ".", "name", ",", "float", "(", "factor", ")", ")", "series", ".", "pathExpression", "=", "series", ".", "name", "for", "i", ",", "value", "in", "enumerate", "(", "series", ")", ":", "series", "[", "i", "]", "=", "safePow", "(", "value", ",", "factor", ")", "return", "seriesList" ]
Takes one metric or a wildcard seriesList followed by a constant, and raises the datapoint by the power of the constant provided at each point. Example:: &target=pow(Server.instance01.threads.busy,10) &target=pow(Server.instance*.threads.busy,10)
[ "Takes", "one", "metric", "or", "a", "wildcard", "seriesList", "followed", "by", "a", "constant", "and", "raises", "the", "datapoint", "by", "the", "power", "of", "the", "constant", "provided", "at", "each", "point", "." ]
0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff
https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L1031-L1047
2,339
brutasse/graphite-api
graphite_api/functions.py
powSeries
def powSeries(requestContext, *seriesLists): """ Takes two or more series and pows their points. A constant line may be used. Example:: &target=powSeries(Server.instance01.app.requests, Server.instance01.app.replies) """ if not seriesLists or not any(seriesLists): return [] seriesList, start, end, step = normalize(seriesLists) name = "powSeries(%s)" % ','.join([s.name for s in seriesList]) values = [] for row in zip_longest(*seriesList): first = True tmpVal = None for element in row: # If it is a first iteration - tmpVal needs to be element if first: tmpVal = element first = False else: tmpVal = safePow(tmpVal, element) values.append(tmpVal) series = TimeSeries(name, start, end, step, values) series.pathExpression = name return [series]
python
def powSeries(requestContext, *seriesLists): if not seriesLists or not any(seriesLists): return [] seriesList, start, end, step = normalize(seriesLists) name = "powSeries(%s)" % ','.join([s.name for s in seriesList]) values = [] for row in zip_longest(*seriesList): first = True tmpVal = None for element in row: # If it is a first iteration - tmpVal needs to be element if first: tmpVal = element first = False else: tmpVal = safePow(tmpVal, element) values.append(tmpVal) series = TimeSeries(name, start, end, step, values) series.pathExpression = name return [series]
[ "def", "powSeries", "(", "requestContext", ",", "*", "seriesLists", ")", ":", "if", "not", "seriesLists", "or", "not", "any", "(", "seriesLists", ")", ":", "return", "[", "]", "seriesList", ",", "start", ",", "end", ",", "step", "=", "normalize", "(", "seriesLists", ")", "name", "=", "\"powSeries(%s)\"", "%", "','", ".", "join", "(", "[", "s", ".", "name", "for", "s", "in", "seriesList", "]", ")", "values", "=", "[", "]", "for", "row", "in", "zip_longest", "(", "*", "seriesList", ")", ":", "first", "=", "True", "tmpVal", "=", "None", "for", "element", "in", "row", ":", "# If it is a first iteration - tmpVal needs to be element", "if", "first", ":", "tmpVal", "=", "element", "first", "=", "False", "else", ":", "tmpVal", "=", "safePow", "(", "tmpVal", ",", "element", ")", "values", ".", "append", "(", "tmpVal", ")", "series", "=", "TimeSeries", "(", "name", ",", "start", ",", "end", ",", "step", ",", "values", ")", "series", ".", "pathExpression", "=", "name", "return", "[", "series", "]" ]
Takes two or more series and pows their points. A constant line may be used. Example:: &target=powSeries(Server.instance01.app.requests, Server.instance01.app.replies)
[ "Takes", "two", "or", "more", "series", "and", "pows", "their", "points", ".", "A", "constant", "line", "may", "be", "used", "." ]
0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff
https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L1050-L1079
2,340
brutasse/graphite-api
graphite_api/functions.py
squareRoot
def squareRoot(requestContext, seriesList): """ Takes one metric or a wildcard seriesList, and computes the square root of each datapoint. Example:: &target=squareRoot(Server.instance01.threads.busy) """ for series in seriesList: series.name = "squareRoot(%s)" % (series.name) for i, value in enumerate(series): series[i] = safePow(value, 0.5) return seriesList
python
def squareRoot(requestContext, seriesList): for series in seriesList: series.name = "squareRoot(%s)" % (series.name) for i, value in enumerate(series): series[i] = safePow(value, 0.5) return seriesList
[ "def", "squareRoot", "(", "requestContext", ",", "seriesList", ")", ":", "for", "series", "in", "seriesList", ":", "series", ".", "name", "=", "\"squareRoot(%s)\"", "%", "(", "series", ".", "name", ")", "for", "i", ",", "value", "in", "enumerate", "(", "series", ")", ":", "series", "[", "i", "]", "=", "safePow", "(", "value", ",", "0.5", ")", "return", "seriesList" ]
Takes one metric or a wildcard seriesList, and computes the square root of each datapoint. Example:: &target=squareRoot(Server.instance01.threads.busy)
[ "Takes", "one", "metric", "or", "a", "wildcard", "seriesList", "and", "computes", "the", "square", "root", "of", "each", "datapoint", "." ]
0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff
https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L1082-L1096
2,341
brutasse/graphite-api
graphite_api/functions.py
absolute
def absolute(requestContext, seriesList): """ Takes one metric or a wildcard seriesList and applies the mathematical abs function to each datapoint transforming it to its absolute value. Example:: &target=absolute(Server.instance01.threads.busy) &target=absolute(Server.instance*.threads.busy) """ for series in seriesList: series.name = "absolute(%s)" % (series.name) series.pathExpression = series.name for i, value in enumerate(series): series[i] = safeAbs(value) return seriesList
python
def absolute(requestContext, seriesList): for series in seriesList: series.name = "absolute(%s)" % (series.name) series.pathExpression = series.name for i, value in enumerate(series): series[i] = safeAbs(value) return seriesList
[ "def", "absolute", "(", "requestContext", ",", "seriesList", ")", ":", "for", "series", "in", "seriesList", ":", "series", ".", "name", "=", "\"absolute(%s)\"", "%", "(", "series", ".", "name", ")", "series", ".", "pathExpression", "=", "series", ".", "name", "for", "i", ",", "value", "in", "enumerate", "(", "series", ")", ":", "series", "[", "i", "]", "=", "safeAbs", "(", "value", ")", "return", "seriesList" ]
Takes one metric or a wildcard seriesList and applies the mathematical abs function to each datapoint transforming it to its absolute value. Example:: &target=absolute(Server.instance01.threads.busy) &target=absolute(Server.instance*.threads.busy)
[ "Takes", "one", "metric", "or", "a", "wildcard", "seriesList", "and", "applies", "the", "mathematical", "abs", "function", "to", "each", "datapoint", "transforming", "it", "to", "its", "absolute", "value", "." ]
0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff
https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L1116-L1131
2,342
brutasse/graphite-api
graphite_api/functions.py
offset
def offset(requestContext, seriesList, factor): """ Takes one metric or a wildcard seriesList followed by a constant, and adds the constant to each datapoint. Example:: &target=offset(Server.instance01.threads.busy,10) """ for series in seriesList: series.name = "offset(%s,%g)" % (series.name, float(factor)) series.pathExpression = series.name for i, value in enumerate(series): if value is not None: series[i] = value + factor return seriesList
python
def offset(requestContext, seriesList, factor): for series in seriesList: series.name = "offset(%s,%g)" % (series.name, float(factor)) series.pathExpression = series.name for i, value in enumerate(series): if value is not None: series[i] = value + factor return seriesList
[ "def", "offset", "(", "requestContext", ",", "seriesList", ",", "factor", ")", ":", "for", "series", "in", "seriesList", ":", "series", ".", "name", "=", "\"offset(%s,%g)\"", "%", "(", "series", ".", "name", ",", "float", "(", "factor", ")", ")", "series", ".", "pathExpression", "=", "series", ".", "name", "for", "i", ",", "value", "in", "enumerate", "(", "series", ")", ":", "if", "value", "is", "not", "None", ":", "series", "[", "i", "]", "=", "value", "+", "factor", "return", "seriesList" ]
Takes one metric or a wildcard seriesList followed by a constant, and adds the constant to each datapoint. Example:: &target=offset(Server.instance01.threads.busy,10)
[ "Takes", "one", "metric", "or", "a", "wildcard", "seriesList", "followed", "by", "a", "constant", "and", "adds", "the", "constant", "to", "each", "datapoint", "." ]
0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff
https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L1134-L1150
2,343
brutasse/graphite-api
graphite_api/functions.py
offsetToZero
def offsetToZero(requestContext, seriesList): """ Offsets a metric or wildcard seriesList by subtracting the minimum value in the series from each datapoint. Useful to compare different series where the values in each series may be higher or lower on average but you're only interested in the relative difference. An example use case is for comparing different round trip time results. When measuring RTT (like pinging a server), different devices may come back with consistently different results due to network latency which will be different depending on how many network hops between the probe and the device. To compare different devices in the same graph, the network latency to each has to be factored out of the results. This is a shortcut that takes the fastest response (lowest number in the series) and sets that to zero and then offsets all of the other datapoints in that series by that amount. This makes the assumption that the lowest response is the fastest the device can respond, of course the more datapoints that are in the series the more accurate this assumption is. Example:: &target=offsetToZero(Server.instance01.responseTime) &target=offsetToZero(Server.instance*.responseTime) """ for series in seriesList: series.name = "offsetToZero(%s)" % (series.name) minimum = safeMin(series) for i, value in enumerate(series): if value is not None: series[i] = value - minimum return seriesList
python
def offsetToZero(requestContext, seriesList): for series in seriesList: series.name = "offsetToZero(%s)" % (series.name) minimum = safeMin(series) for i, value in enumerate(series): if value is not None: series[i] = value - minimum return seriesList
[ "def", "offsetToZero", "(", "requestContext", ",", "seriesList", ")", ":", "for", "series", "in", "seriesList", ":", "series", ".", "name", "=", "\"offsetToZero(%s)\"", "%", "(", "series", ".", "name", ")", "minimum", "=", "safeMin", "(", "series", ")", "for", "i", ",", "value", "in", "enumerate", "(", "series", ")", ":", "if", "value", "is", "not", "None", ":", "series", "[", "i", "]", "=", "value", "-", "minimum", "return", "seriesList" ]
Offsets a metric or wildcard seriesList by subtracting the minimum value in the series from each datapoint. Useful to compare different series where the values in each series may be higher or lower on average but you're only interested in the relative difference. An example use case is for comparing different round trip time results. When measuring RTT (like pinging a server), different devices may come back with consistently different results due to network latency which will be different depending on how many network hops between the probe and the device. To compare different devices in the same graph, the network latency to each has to be factored out of the results. This is a shortcut that takes the fastest response (lowest number in the series) and sets that to zero and then offsets all of the other datapoints in that series by that amount. This makes the assumption that the lowest response is the fastest the device can respond, of course the more datapoints that are in the series the more accurate this assumption is. Example:: &target=offsetToZero(Server.instance01.responseTime) &target=offsetToZero(Server.instance*.responseTime)
[ "Offsets", "a", "metric", "or", "wildcard", "seriesList", "by", "subtracting", "the", "minimum", "value", "in", "the", "series", "from", "each", "datapoint", "." ]
0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff
https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L1153-L1187
2,344
brutasse/graphite-api
graphite_api/functions.py
consolidateBy
def consolidateBy(requestContext, seriesList, consolidationFunc): """ Takes one metric or a wildcard seriesList and a consolidation function name. Valid function names are 'sum', 'average', 'min', and 'max'. When a graph is drawn where width of the graph size in pixels is smaller than the number of datapoints to be graphed, Graphite consolidates the values to to prevent line overlap. The consolidateBy() function changes the consolidation function from the default of 'average' to one of 'sum', 'max', or 'min'. This is especially useful in sales graphs, where fractional values make no sense and a 'sum' of consolidated values is appropriate. Example:: &target=consolidateBy(Sales.widgets.largeBlue, 'sum') &target=consolidateBy(Servers.web01.sda1.free_space, 'max') """ for series in seriesList: # datalib will throw an exception, so it's not necessary to validate # here series.consolidationFunc = consolidationFunc series.name = 'consolidateBy(%s,"%s")' % (series.name, series.consolidationFunc) series.pathExpression = series.name return seriesList
python
def consolidateBy(requestContext, seriesList, consolidationFunc): for series in seriesList: # datalib will throw an exception, so it's not necessary to validate # here series.consolidationFunc = consolidationFunc series.name = 'consolidateBy(%s,"%s")' % (series.name, series.consolidationFunc) series.pathExpression = series.name return seriesList
[ "def", "consolidateBy", "(", "requestContext", ",", "seriesList", ",", "consolidationFunc", ")", ":", "for", "series", "in", "seriesList", ":", "# datalib will throw an exception, so it's not necessary to validate", "# here", "series", ".", "consolidationFunc", "=", "consolidationFunc", "series", ".", "name", "=", "'consolidateBy(%s,\"%s\")'", "%", "(", "series", ".", "name", ",", "series", ".", "consolidationFunc", ")", "series", ".", "pathExpression", "=", "series", ".", "name", "return", "seriesList" ]
Takes one metric or a wildcard seriesList and a consolidation function name. Valid function names are 'sum', 'average', 'min', and 'max'. When a graph is drawn where width of the graph size in pixels is smaller than the number of datapoints to be graphed, Graphite consolidates the values to to prevent line overlap. The consolidateBy() function changes the consolidation function from the default of 'average' to one of 'sum', 'max', or 'min'. This is especially useful in sales graphs, where fractional values make no sense and a 'sum' of consolidated values is appropriate. Example:: &target=consolidateBy(Sales.widgets.largeBlue, 'sum') &target=consolidateBy(Servers.web01.sda1.free_space, 'max')
[ "Takes", "one", "metric", "or", "a", "wildcard", "seriesList", "and", "a", "consolidation", "function", "name", "." ]
0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff
https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L1464-L1492
2,345
brutasse/graphite-api
graphite_api/functions.py
integral
def integral(requestContext, seriesList): """ This will show the sum over time, sort of like a continuous addition function. Useful for finding totals or trends in metrics that are collected per minute. Example:: &target=integral(company.sales.perMinute) This would start at zero on the left side of the graph, adding the sales each minute, and show the total sales for the time period selected at the right side, (time now, or the time specified by '&until='). """ results = [] for series in seriesList: newValues = [] current = 0.0 for val in series: if val is None: newValues.append(None) else: current += val newValues.append(current) newName = "integral(%s)" % series.name newSeries = TimeSeries(newName, series.start, series.end, series.step, newValues) newSeries.pathExpression = newName results.append(newSeries) return results
python
def integral(requestContext, seriesList): results = [] for series in seriesList: newValues = [] current = 0.0 for val in series: if val is None: newValues.append(None) else: current += val newValues.append(current) newName = "integral(%s)" % series.name newSeries = TimeSeries(newName, series.start, series.end, series.step, newValues) newSeries.pathExpression = newName results.append(newSeries) return results
[ "def", "integral", "(", "requestContext", ",", "seriesList", ")", ":", "results", "=", "[", "]", "for", "series", "in", "seriesList", ":", "newValues", "=", "[", "]", "current", "=", "0.0", "for", "val", "in", "series", ":", "if", "val", "is", "None", ":", "newValues", ".", "append", "(", "None", ")", "else", ":", "current", "+=", "val", "newValues", ".", "append", "(", "current", ")", "newName", "=", "\"integral(%s)\"", "%", "series", ".", "name", "newSeries", "=", "TimeSeries", "(", "newName", ",", "series", ".", "start", ",", "series", ".", "end", ",", "series", ".", "step", ",", "newValues", ")", "newSeries", ".", "pathExpression", "=", "newName", "results", ".", "append", "(", "newSeries", ")", "return", "results" ]
This will show the sum over time, sort of like a continuous addition function. Useful for finding totals or trends in metrics that are collected per minute. Example:: &target=integral(company.sales.perMinute) This would start at zero on the left side of the graph, adding the sales each minute, and show the total sales for the time period selected at the right side, (time now, or the time specified by '&until=').
[ "This", "will", "show", "the", "sum", "over", "time", "sort", "of", "like", "a", "continuous", "addition", "function", ".", "Useful", "for", "finding", "totals", "or", "trends", "in", "metrics", "that", "are", "collected", "per", "minute", "." ]
0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff
https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L1617-L1646
2,346
brutasse/graphite-api
graphite_api/functions.py
areaBetween
def areaBetween(requestContext, *seriesLists): """ Draws the vertical area in between the two series in seriesList. Useful for visualizing a range such as the minimum and maximum latency for a service. areaBetween expects **exactly one argument** that results in exactly two series (see example below). The order of the lower and higher values series does not matter. The visualization only works when used in conjunction with ``areaMode=stacked``. Most likely use case is to provide a band within which another metric should move. In such case applying an ``alpha()``, as in the second example, gives best visual results. Example:: &target=areaBetween(service.latency.{min,max})&areaMode=stacked &target=alpha(areaBetween(service.latency.{min,max}),0.3)&areaMode=stacked If for instance, you need to build a seriesList, you should use the ``group`` function, like so:: &target=areaBetween(group(minSeries(a.*.min),maxSeries(a.*.max))) """ if len(seriesLists) == 1: [seriesLists] = seriesLists assert len(seriesLists) == 2, ("areaBetween series argument must " "reference *exactly* 2 series") lower, upper = seriesLists if len(lower) == 1: [lower] = lower if len(upper) == 1: [upper] = upper lower.options['stacked'] = True lower.options['invisible'] = True upper.options['stacked'] = True lower.name = upper.name = "areaBetween(%s)" % upper.pathExpression return [lower, upper]
python
def areaBetween(requestContext, *seriesLists): if len(seriesLists) == 1: [seriesLists] = seriesLists assert len(seriesLists) == 2, ("areaBetween series argument must " "reference *exactly* 2 series") lower, upper = seriesLists if len(lower) == 1: [lower] = lower if len(upper) == 1: [upper] = upper lower.options['stacked'] = True lower.options['invisible'] = True upper.options['stacked'] = True lower.name = upper.name = "areaBetween(%s)" % upper.pathExpression return [lower, upper]
[ "def", "areaBetween", "(", "requestContext", ",", "*", "seriesLists", ")", ":", "if", "len", "(", "seriesLists", ")", "==", "1", ":", "[", "seriesLists", "]", "=", "seriesLists", "assert", "len", "(", "seriesLists", ")", "==", "2", ",", "(", "\"areaBetween series argument must \"", "\"reference *exactly* 2 series\"", ")", "lower", ",", "upper", "=", "seriesLists", "if", "len", "(", "lower", ")", "==", "1", ":", "[", "lower", "]", "=", "lower", "if", "len", "(", "upper", ")", "==", "1", ":", "[", "upper", "]", "=", "upper", "lower", ".", "options", "[", "'stacked'", "]", "=", "True", "lower", ".", "options", "[", "'invisible'", "]", "=", "True", "upper", ".", "options", "[", "'stacked'", "]", "=", "True", "lower", ".", "name", "=", "upper", ".", "name", "=", "\"areaBetween(%s)\"", "%", "upper", ".", "pathExpression", "return", "[", "lower", ",", "upper", "]" ]
Draws the vertical area in between the two series in seriesList. Useful for visualizing a range such as the minimum and maximum latency for a service. areaBetween expects **exactly one argument** that results in exactly two series (see example below). The order of the lower and higher values series does not matter. The visualization only works when used in conjunction with ``areaMode=stacked``. Most likely use case is to provide a band within which another metric should move. In such case applying an ``alpha()``, as in the second example, gives best visual results. Example:: &target=areaBetween(service.latency.{min,max})&areaMode=stacked &target=alpha(areaBetween(service.latency.{min,max}),0.3)&areaMode=stacked If for instance, you need to build a seriesList, you should use the ``group`` function, like so:: &target=areaBetween(group(minSeries(a.*.min),maxSeries(a.*.max)))
[ "Draws", "the", "vertical", "area", "in", "between", "the", "two", "series", "in", "seriesList", ".", "Useful", "for", "visualizing", "a", "range", "such", "as", "the", "minimum", "and", "maximum", "latency", "for", "a", "service", "." ]
0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff
https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L1789-L1828
2,347
brutasse/graphite-api
graphite_api/functions.py
alias
def alias(requestContext, seriesList, newName): """ Takes one metric or a wildcard seriesList and a string in quotes. Prints the string instead of the metric name in the legend. Example:: &target=alias(Sales.widgets.largeBlue,"Large Blue Widgets") """ try: seriesList.name = newName except AttributeError: for series in seriesList: series.name = newName return seriesList
python
def alias(requestContext, seriesList, newName): try: seriesList.name = newName except AttributeError: for series in seriesList: series.name = newName return seriesList
[ "def", "alias", "(", "requestContext", ",", "seriesList", ",", "newName", ")", ":", "try", ":", "seriesList", ".", "name", "=", "newName", "except", "AttributeError", ":", "for", "series", "in", "seriesList", ":", "series", ".", "name", "=", "newName", "return", "seriesList" ]
Takes one metric or a wildcard seriesList and a string in quotes. Prints the string instead of the metric name in the legend. Example:: &target=alias(Sales.widgets.largeBlue,"Large Blue Widgets")
[ "Takes", "one", "metric", "or", "a", "wildcard", "seriesList", "and", "a", "string", "in", "quotes", ".", "Prints", "the", "string", "instead", "of", "the", "metric", "name", "in", "the", "legend", "." ]
0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff
https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L1847-L1862
2,348
brutasse/graphite-api
graphite_api/functions.py
_getFirstPathExpression
def _getFirstPathExpression(name): """Returns the first metric path in an expression.""" tokens = grammar.parseString(name) pathExpression = None while pathExpression is None: if tokens.pathExpression: pathExpression = tokens.pathExpression elif tokens.expression: tokens = tokens.expression elif tokens.call: tokens = tokens.call.args[0] else: break return pathExpression
python
def _getFirstPathExpression(name): tokens = grammar.parseString(name) pathExpression = None while pathExpression is None: if tokens.pathExpression: pathExpression = tokens.pathExpression elif tokens.expression: tokens = tokens.expression elif tokens.call: tokens = tokens.call.args[0] else: break return pathExpression
[ "def", "_getFirstPathExpression", "(", "name", ")", ":", "tokens", "=", "grammar", ".", "parseString", "(", "name", ")", "pathExpression", "=", "None", "while", "pathExpression", "is", "None", ":", "if", "tokens", ".", "pathExpression", ":", "pathExpression", "=", "tokens", ".", "pathExpression", "elif", "tokens", ".", "expression", ":", "tokens", "=", "tokens", ".", "expression", "elif", "tokens", ".", "call", ":", "tokens", "=", "tokens", ".", "call", ".", "args", "[", "0", "]", "else", ":", "break", "return", "pathExpression" ]
Returns the first metric path in an expression.
[ "Returns", "the", "first", "metric", "path", "in", "an", "expression", "." ]
0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff
https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L1921-L1934
2,349
brutasse/graphite-api
graphite_api/functions.py
alpha
def alpha(requestContext, seriesList, alpha): """ Assigns the given alpha transparency setting to the series. Takes a float value between 0 and 1. """ for series in seriesList: series.options['alpha'] = alpha return seriesList
python
def alpha(requestContext, seriesList, alpha): for series in seriesList: series.options['alpha'] = alpha return seriesList
[ "def", "alpha", "(", "requestContext", ",", "seriesList", ",", "alpha", ")", ":", "for", "series", "in", "seriesList", ":", "series", ".", "options", "[", "'alpha'", "]", "=", "alpha", "return", "seriesList" ]
Assigns the given alpha transparency setting to the series. Takes a float value between 0 and 1.
[ "Assigns", "the", "given", "alpha", "transparency", "setting", "to", "the", "series", ".", "Takes", "a", "float", "value", "between", "0", "and", "1", "." ]
0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff
https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L2006-L2013
2,350
brutasse/graphite-api
graphite_api/functions.py
color
def color(requestContext, seriesList, theColor): """ Assigns the given color to the seriesList Example:: &target=color(collectd.hostname.cpu.0.user, 'green') &target=color(collectd.hostname.cpu.0.system, 'ff0000') &target=color(collectd.hostname.cpu.0.idle, 'gray') &target=color(collectd.hostname.cpu.0.idle, '6464ffaa') """ for series in seriesList: series.color = theColor return seriesList
python
def color(requestContext, seriesList, theColor): for series in seriesList: series.color = theColor return seriesList
[ "def", "color", "(", "requestContext", ",", "seriesList", ",", "theColor", ")", ":", "for", "series", "in", "seriesList", ":", "series", ".", "color", "=", "theColor", "return", "seriesList" ]
Assigns the given color to the seriesList Example:: &target=color(collectd.hostname.cpu.0.user, 'green') &target=color(collectd.hostname.cpu.0.system, 'ff0000') &target=color(collectd.hostname.cpu.0.idle, 'gray') &target=color(collectd.hostname.cpu.0.idle, '6464ffaa')
[ "Assigns", "the", "given", "color", "to", "the", "seriesList" ]
0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff
https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L2016-L2030
2,351
brutasse/graphite-api
graphite_api/functions.py
logarithm
def logarithm(requestContext, seriesList, base=10): """ Takes one metric or a wildcard seriesList, a base, and draws the y-axis in logarithmic format. If base is omitted, the function defaults to base 10. Example:: &target=log(carbon.agents.hostname.avgUpdateTime,2) """ results = [] for series in seriesList: newValues = [] for val in series: if val is None: newValues.append(None) elif val <= 0: newValues.append(None) else: newValues.append(math.log(val, base)) newName = "log(%s, %s)" % (series.name, base) newSeries = TimeSeries(newName, series.start, series.end, series.step, newValues) newSeries.pathExpression = newName results.append(newSeries) return results
python
def logarithm(requestContext, seriesList, base=10): results = [] for series in seriesList: newValues = [] for val in series: if val is None: newValues.append(None) elif val <= 0: newValues.append(None) else: newValues.append(math.log(val, base)) newName = "log(%s, %s)" % (series.name, base) newSeries = TimeSeries(newName, series.start, series.end, series.step, newValues) newSeries.pathExpression = newName results.append(newSeries) return results
[ "def", "logarithm", "(", "requestContext", ",", "seriesList", ",", "base", "=", "10", ")", ":", "results", "=", "[", "]", "for", "series", "in", "seriesList", ":", "newValues", "=", "[", "]", "for", "val", "in", "series", ":", "if", "val", "is", "None", ":", "newValues", ".", "append", "(", "None", ")", "elif", "val", "<=", "0", ":", "newValues", ".", "append", "(", "None", ")", "else", ":", "newValues", ".", "append", "(", "math", ".", "log", "(", "val", ",", "base", ")", ")", "newName", "=", "\"log(%s, %s)\"", "%", "(", "series", ".", "name", ",", "base", ")", "newSeries", "=", "TimeSeries", "(", "newName", ",", "series", ".", "start", ",", "series", ".", "end", ",", "series", ".", "step", ",", "newValues", ")", "newSeries", ".", "pathExpression", "=", "newName", "results", ".", "append", "(", "newSeries", ")", "return", "results" ]
Takes one metric or a wildcard seriesList, a base, and draws the y-axis in logarithmic format. If base is omitted, the function defaults to base 10. Example:: &target=log(carbon.agents.hostname.avgUpdateTime,2)
[ "Takes", "one", "metric", "or", "a", "wildcard", "seriesList", "a", "base", "and", "draws", "the", "y", "-", "axis", "in", "logarithmic", "format", ".", "If", "base", "is", "omitted", "the", "function", "defaults", "to", "base", "10", "." ]
0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff
https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L2065-L2090
2,352
brutasse/graphite-api
graphite_api/functions.py
maximumBelow
def maximumBelow(requestContext, seriesList, n): """ Takes one metric or a wildcard seriesList followed by a constant n. Draws only the metrics with a maximum value below n. Example:: &target=maximumBelow(system.interface.eth*.packetsSent,1000) This would only display interfaces which always sent less than 1000 packets/min. """ results = [] for series in seriesList: val = safeMax(series) if val is None or val <= n: results.append(series) return results
python
def maximumBelow(requestContext, seriesList, n): results = [] for series in seriesList: val = safeMax(series) if val is None or val <= n: results.append(series) return results
[ "def", "maximumBelow", "(", "requestContext", ",", "seriesList", ",", "n", ")", ":", "results", "=", "[", "]", "for", "series", "in", "seriesList", ":", "val", "=", "safeMax", "(", "series", ")", "if", "val", "is", "None", "or", "val", "<=", "n", ":", "results", ".", "append", "(", "series", ")", "return", "results" ]
Takes one metric or a wildcard seriesList followed by a constant n. Draws only the metrics with a maximum value below n. Example:: &target=maximumBelow(system.interface.eth*.packetsSent,1000) This would only display interfaces which always sent less than 1000 packets/min.
[ "Takes", "one", "metric", "or", "a", "wildcard", "seriesList", "followed", "by", "a", "constant", "n", ".", "Draws", "only", "the", "metrics", "with", "a", "maximum", "value", "below", "n", "." ]
0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff
https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L2133-L2150
2,353
brutasse/graphite-api
graphite_api/functions.py
minimumBelow
def minimumBelow(requestContext, seriesList, n): """ Takes one metric or a wildcard seriesList followed by a constant n. Draws only the metrics with a minimum value below n. Example:: &target=minimumBelow(system.interface.eth*.packetsSent,1000) This would only display interfaces which sent at one point less than 1000 packets/min. """ results = [] for series in seriesList: val = safeMin(series) if val is None or val <= n: results.append(series) return results
python
def minimumBelow(requestContext, seriesList, n): results = [] for series in seriesList: val = safeMin(series) if val is None or val <= n: results.append(series) return results
[ "def", "minimumBelow", "(", "requestContext", ",", "seriesList", ",", "n", ")", ":", "results", "=", "[", "]", "for", "series", "in", "seriesList", ":", "val", "=", "safeMin", "(", "series", ")", "if", "val", "is", "None", "or", "val", "<=", "n", ":", "results", ".", "append", "(", "series", ")", "return", "results" ]
Takes one metric or a wildcard seriesList followed by a constant n. Draws only the metrics with a minimum value below n. Example:: &target=minimumBelow(system.interface.eth*.packetsSent,1000) This would only display interfaces which sent at one point less than 1000 packets/min.
[ "Takes", "one", "metric", "or", "a", "wildcard", "seriesList", "followed", "by", "a", "constant", "n", ".", "Draws", "only", "the", "metrics", "with", "a", "minimum", "value", "below", "n", "." ]
0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff
https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L2153-L2170
2,354
brutasse/graphite-api
graphite_api/functions.py
highestMax
def highestMax(requestContext, seriesList, n=1): """ Takes one metric or a wildcard seriesList followed by an integer N. Out of all metrics passed, draws only the N metrics with the highest maximum value in the time period specified. Example:: &target=highestMax(server*.instance*.threads.busy,5) Draws the top 5 servers who have had the most busy threads during the time period specified. """ result_list = sorted(seriesList, key=lambda s: safeMax(s))[-n:] return sorted(result_list, key=lambda s: max(s), reverse=True)
python
def highestMax(requestContext, seriesList, n=1): result_list = sorted(seriesList, key=lambda s: safeMax(s))[-n:] return sorted(result_list, key=lambda s: max(s), reverse=True)
[ "def", "highestMax", "(", "requestContext", ",", "seriesList", ",", "n", "=", "1", ")", ":", "result_list", "=", "sorted", "(", "seriesList", ",", "key", "=", "lambda", "s", ":", "safeMax", "(", "s", ")", ")", "[", "-", "n", ":", "]", "return", "sorted", "(", "result_list", ",", "key", "=", "lambda", "s", ":", "max", "(", "s", ")", ",", "reverse", "=", "True", ")" ]
Takes one metric or a wildcard seriesList followed by an integer N. Out of all metrics passed, draws only the N metrics with the highest maximum value in the time period specified. Example:: &target=highestMax(server*.instance*.threads.busy,5) Draws the top 5 servers who have had the most busy threads during the time period specified.
[ "Takes", "one", "metric", "or", "a", "wildcard", "seriesList", "followed", "by", "an", "integer", "N", "." ]
0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff
https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L2189-L2205
2,355
brutasse/graphite-api
graphite_api/functions.py
currentAbove
def currentAbove(requestContext, seriesList, n): """ Takes one metric or a wildcard seriesList followed by an integer N. Out of all metrics passed, draws only the metrics whose value is above N at the end of the time period specified. Example:: &target=currentAbove(server*.instance*.threads.busy,50) Draws the servers with more than 50 busy threads. """ results = [] for series in seriesList: val = safeLast(series) if val is not None and val >= n: results.append(series) return results
python
def currentAbove(requestContext, seriesList, n): results = [] for series in seriesList: val = safeLast(series) if val is not None and val >= n: results.append(series) return results
[ "def", "currentAbove", "(", "requestContext", ",", "seriesList", ",", "n", ")", ":", "results", "=", "[", "]", "for", "series", "in", "seriesList", ":", "val", "=", "safeLast", "(", "series", ")", "if", "val", "is", "not", "None", "and", "val", ">=", "n", ":", "results", ".", "append", "(", "series", ")", "return", "results" ]
Takes one metric or a wildcard seriesList followed by an integer N. Out of all metrics passed, draws only the metrics whose value is above N at the end of the time period specified. Example:: &target=currentAbove(server*.instance*.threads.busy,50) Draws the servers with more than 50 busy threads.
[ "Takes", "one", "metric", "or", "a", "wildcard", "seriesList", "followed", "by", "an", "integer", "N", ".", "Out", "of", "all", "metrics", "passed", "draws", "only", "the", "metrics", "whose", "value", "is", "above", "N", "at", "the", "end", "of", "the", "time", "period", "specified", "." ]
0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff
https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L2224-L2242
2,356
brutasse/graphite-api
graphite_api/functions.py
averageAbove
def averageAbove(requestContext, seriesList, n): """ Takes one metric or a wildcard seriesList followed by an integer N. Out of all metrics passed, draws only the metrics with an average value above N for the time period specified. Example:: &target=averageAbove(server*.instance*.threads.busy,25) Draws the servers with average values above 25. """ results = [] for series in seriesList: val = safeAvg(series) if val is not None and val >= n: results.append(series) return results
python
def averageAbove(requestContext, seriesList, n): results = [] for series in seriesList: val = safeAvg(series) if val is not None and val >= n: results.append(series) return results
[ "def", "averageAbove", "(", "requestContext", ",", "seriesList", ",", "n", ")", ":", "results", "=", "[", "]", "for", "series", "in", "seriesList", ":", "val", "=", "safeAvg", "(", "series", ")", "if", "val", "is", "not", "None", "and", "val", ">=", "n", ":", "results", ".", "append", "(", "series", ")", "return", "results" ]
Takes one metric or a wildcard seriesList followed by an integer N. Out of all metrics passed, draws only the metrics with an average value above N for the time period specified. Example:: &target=averageAbove(server*.instance*.threads.busy,25) Draws the servers with average values above 25.
[ "Takes", "one", "metric", "or", "a", "wildcard", "seriesList", "followed", "by", "an", "integer", "N", ".", "Out", "of", "all", "metrics", "passed", "draws", "only", "the", "metrics", "with", "an", "average", "value", "above", "N", "for", "the", "time", "period", "specified", "." ]
0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff
https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L2298-L2316
2,357
brutasse/graphite-api
graphite_api/functions.py
nPercentile
def nPercentile(requestContext, seriesList, n): """Returns n-percent of each series in the seriesList.""" assert n, 'The requested percent is required to be greater than 0' results = [] for s in seriesList: # Create a sorted copy of the TimeSeries excluding None values in the # values list. s_copy = TimeSeries(s.name, s.start, s.end, s.step, sorted(not_none(s))) if not s_copy: continue # Skip this series because it is empty. perc_val = _getPercentile(s_copy, n) if perc_val is not None: name = 'nPercentile(%s, %g)' % (s_copy.name, n) point_count = int((s.end - s.start)/s.step) perc_series = TimeSeries(name, s_copy.start, s_copy.end, s_copy.step, [perc_val] * point_count) perc_series.pathExpression = name results.append(perc_series) return results
python
def nPercentile(requestContext, seriesList, n): assert n, 'The requested percent is required to be greater than 0' results = [] for s in seriesList: # Create a sorted copy of the TimeSeries excluding None values in the # values list. s_copy = TimeSeries(s.name, s.start, s.end, s.step, sorted(not_none(s))) if not s_copy: continue # Skip this series because it is empty. perc_val = _getPercentile(s_copy, n) if perc_val is not None: name = 'nPercentile(%s, %g)' % (s_copy.name, n) point_count = int((s.end - s.start)/s.step) perc_series = TimeSeries(name, s_copy.start, s_copy.end, s_copy.step, [perc_val] * point_count) perc_series.pathExpression = name results.append(perc_series) return results
[ "def", "nPercentile", "(", "requestContext", ",", "seriesList", ",", "n", ")", ":", "assert", "n", ",", "'The requested percent is required to be greater than 0'", "results", "=", "[", "]", "for", "s", "in", "seriesList", ":", "# Create a sorted copy of the TimeSeries excluding None values in the", "# values list.", "s_copy", "=", "TimeSeries", "(", "s", ".", "name", ",", "s", ".", "start", ",", "s", ".", "end", ",", "s", ".", "step", ",", "sorted", "(", "not_none", "(", "s", ")", ")", ")", "if", "not", "s_copy", ":", "continue", "# Skip this series because it is empty.", "perc_val", "=", "_getPercentile", "(", "s_copy", ",", "n", ")", "if", "perc_val", "is", "not", "None", ":", "name", "=", "'nPercentile(%s, %g)'", "%", "(", "s_copy", ".", "name", ",", "n", ")", "point_count", "=", "int", "(", "(", "s", ".", "end", "-", "s", ".", "start", ")", "/", "s", ".", "step", ")", "perc_series", "=", "TimeSeries", "(", "name", ",", "s_copy", ".", "start", ",", "s_copy", ".", "end", ",", "s_copy", ".", "step", ",", "[", "perc_val", "]", "*", "point_count", ")", "perc_series", ".", "pathExpression", "=", "name", "results", ".", "append", "(", "perc_series", ")", "return", "results" ]
Returns n-percent of each series in the seriesList.
[ "Returns", "n", "-", "percent", "of", "each", "series", "in", "the", "seriesList", "." ]
0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff
https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L2371-L2392
2,358
brutasse/graphite-api
graphite_api/functions.py
averageOutsidePercentile
def averageOutsidePercentile(requestContext, seriesList, n): """ Removes functions lying inside an average percentile interval """ averages = [safeAvg(s) for s in seriesList] if n < 50: n = 100 - n lowPercentile = _getPercentile(averages, 100 - n) highPercentile = _getPercentile(averages, n) return [s for s in seriesList if not lowPercentile < safeAvg(s) < highPercentile]
python
def averageOutsidePercentile(requestContext, seriesList, n): averages = [safeAvg(s) for s in seriesList] if n < 50: n = 100 - n lowPercentile = _getPercentile(averages, 100 - n) highPercentile = _getPercentile(averages, n) return [s for s in seriesList if not lowPercentile < safeAvg(s) < highPercentile]
[ "def", "averageOutsidePercentile", "(", "requestContext", ",", "seriesList", ",", "n", ")", ":", "averages", "=", "[", "safeAvg", "(", "s", ")", "for", "s", "in", "seriesList", "]", "if", "n", "<", "50", ":", "n", "=", "100", "-", "n", "lowPercentile", "=", "_getPercentile", "(", "averages", ",", "100", "-", "n", ")", "highPercentile", "=", "_getPercentile", "(", "averages", ",", "n", ")", "return", "[", "s", "for", "s", "in", "seriesList", "if", "not", "lowPercentile", "<", "safeAvg", "(", "s", ")", "<", "highPercentile", "]" ]
Removes functions lying inside an average percentile interval
[ "Removes", "functions", "lying", "inside", "an", "average", "percentile", "interval" ]
0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff
https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L2395-L2408
2,359
brutasse/graphite-api
graphite_api/functions.py
removeBetweenPercentile
def removeBetweenPercentile(requestContext, seriesList, n): """ Removes lines who do not have an value lying in the x-percentile of all the values at a moment """ if n < 50: n = 100 - n transposed = list(zip_longest(*seriesList)) lowPercentiles = [_getPercentile(col, 100-n) for col in transposed] highPercentiles = [_getPercentile(col, n) for col in transposed] return [l for l in seriesList if sum([not lowPercentiles[index] < val < highPercentiles[index] for index, val in enumerate(l)]) > 0]
python
def removeBetweenPercentile(requestContext, seriesList, n): if n < 50: n = 100 - n transposed = list(zip_longest(*seriesList)) lowPercentiles = [_getPercentile(col, 100-n) for col in transposed] highPercentiles = [_getPercentile(col, n) for col in transposed] return [l for l in seriesList if sum([not lowPercentiles[index] < val < highPercentiles[index] for index, val in enumerate(l)]) > 0]
[ "def", "removeBetweenPercentile", "(", "requestContext", ",", "seriesList", ",", "n", ")", ":", "if", "n", "<", "50", ":", "n", "=", "100", "-", "n", "transposed", "=", "list", "(", "zip_longest", "(", "*", "seriesList", ")", ")", "lowPercentiles", "=", "[", "_getPercentile", "(", "col", ",", "100", "-", "n", ")", "for", "col", "in", "transposed", "]", "highPercentiles", "=", "[", "_getPercentile", "(", "col", ",", "n", ")", "for", "col", "in", "transposed", "]", "return", "[", "l", "for", "l", "in", "seriesList", "if", "sum", "(", "[", "not", "lowPercentiles", "[", "index", "]", "<", "val", "<", "highPercentiles", "[", "index", "]", "for", "index", ",", "val", "in", "enumerate", "(", "l", ")", "]", ")", ">", "0", "]" ]
Removes lines who do not have an value lying in the x-percentile of all the values at a moment
[ "Removes", "lines", "who", "do", "not", "have", "an", "value", "lying", "in", "the", "x", "-", "percentile", "of", "all", "the", "values", "at", "a", "moment" ]
0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff
https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L2411-L2426
2,360
brutasse/graphite-api
graphite_api/functions.py
removeAboveValue
def removeAboveValue(requestContext, seriesList, n): """ Removes data above the given threshold from the series or list of series provided. Values above this threshold are assigned a value of None. """ for s in seriesList: s.name = 'removeAboveValue(%s, %g)' % (s.name, n) s.pathExpression = s.name for (index, val) in enumerate(s): if val is None: continue if val > n: s[index] = None return seriesList
python
def removeAboveValue(requestContext, seriesList, n): for s in seriesList: s.name = 'removeAboveValue(%s, %g)' % (s.name, n) s.pathExpression = s.name for (index, val) in enumerate(s): if val is None: continue if val > n: s[index] = None return seriesList
[ "def", "removeAboveValue", "(", "requestContext", ",", "seriesList", ",", "n", ")", ":", "for", "s", "in", "seriesList", ":", "s", ".", "name", "=", "'removeAboveValue(%s, %g)'", "%", "(", "s", ".", "name", ",", "n", ")", "s", ".", "pathExpression", "=", "s", ".", "name", "for", "(", "index", ",", "val", ")", "in", "enumerate", "(", "s", ")", ":", "if", "val", "is", "None", ":", "continue", "if", "val", ">", "n", ":", "s", "[", "index", "]", "=", "None", "return", "seriesList" ]
Removes data above the given threshold from the series or list of series provided. Values above this threshold are assigned a value of None.
[ "Removes", "data", "above", "the", "given", "threshold", "from", "the", "series", "or", "list", "of", "series", "provided", ".", "Values", "above", "this", "threshold", "are", "assigned", "a", "value", "of", "None", "." ]
0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff
https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L2450-L2464
2,361
brutasse/graphite-api
graphite_api/functions.py
removeBelowPercentile
def removeBelowPercentile(requestContext, seriesList, n): """ Removes data below the nth percentile from the series or list of series provided. Values below this percentile are assigned a value of None. """ for s in seriesList: s.name = 'removeBelowPercentile(%s, %g)' % (s.name, n) s.pathExpression = s.name try: percentile = nPercentile(requestContext, [s], n)[0][0] except IndexError: continue for (index, val) in enumerate(s): if val is None: continue if val < percentile: s[index] = None return seriesList
python
def removeBelowPercentile(requestContext, seriesList, n): for s in seriesList: s.name = 'removeBelowPercentile(%s, %g)' % (s.name, n) s.pathExpression = s.name try: percentile = nPercentile(requestContext, [s], n)[0][0] except IndexError: continue for (index, val) in enumerate(s): if val is None: continue if val < percentile: s[index] = None return seriesList
[ "def", "removeBelowPercentile", "(", "requestContext", ",", "seriesList", ",", "n", ")", ":", "for", "s", "in", "seriesList", ":", "s", ".", "name", "=", "'removeBelowPercentile(%s, %g)'", "%", "(", "s", ".", "name", ",", "n", ")", "s", ".", "pathExpression", "=", "s", ".", "name", "try", ":", "percentile", "=", "nPercentile", "(", "requestContext", ",", "[", "s", "]", ",", "n", ")", "[", "0", "]", "[", "0", "]", "except", "IndexError", ":", "continue", "for", "(", "index", ",", "val", ")", "in", "enumerate", "(", "s", ")", ":", "if", "val", "is", "None", ":", "continue", "if", "val", "<", "percentile", ":", "s", "[", "index", "]", "=", "None", "return", "seriesList" ]
Removes data below the nth percentile from the series or list of series provided. Values below this percentile are assigned a value of None.
[ "Removes", "data", "below", "the", "nth", "percentile", "from", "the", "series", "or", "list", "of", "series", "provided", ".", "Values", "below", "this", "percentile", "are", "assigned", "a", "value", "of", "None", "." ]
0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff
https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L2467-L2485
2,362
brutasse/graphite-api
graphite_api/functions.py
useSeriesAbove
def useSeriesAbove(requestContext, seriesList, value, search, replace): """ Compares the maximum of each series against the given `value`. If the series maximum is greater than `value`, the regular expression search and replace is applied against the series name to plot a related metric. e.g. given useSeriesAbove(ganglia.metric1.reqs,10,'reqs','time'), the response time metric will be plotted only when the maximum value of the corresponding request/s metric is > 10 Example:: &target=useSeriesAbove(ganglia.metric1.reqs,10,"reqs","time") """ newSeries = [] for series in seriesList: newname = re.sub(search, replace, series.name) if safeMax(series) > value: n = evaluateTarget(requestContext, newname) if n is not None and len(n) > 0: newSeries.append(n[0]) return newSeries
python
def useSeriesAbove(requestContext, seriesList, value, search, replace): newSeries = [] for series in seriesList: newname = re.sub(search, replace, series.name) if safeMax(series) > value: n = evaluateTarget(requestContext, newname) if n is not None and len(n) > 0: newSeries.append(n[0]) return newSeries
[ "def", "useSeriesAbove", "(", "requestContext", ",", "seriesList", ",", "value", ",", "search", ",", "replace", ")", ":", "newSeries", "=", "[", "]", "for", "series", "in", "seriesList", ":", "newname", "=", "re", ".", "sub", "(", "search", ",", "replace", ",", "series", ".", "name", ")", "if", "safeMax", "(", "series", ")", ">", "value", ":", "n", "=", "evaluateTarget", "(", "requestContext", ",", "newname", ")", "if", "n", "is", "not", "None", "and", "len", "(", "n", ")", ">", "0", ":", "newSeries", ".", "append", "(", "n", "[", "0", "]", ")", "return", "newSeries" ]
Compares the maximum of each series against the given `value`. If the series maximum is greater than `value`, the regular expression search and replace is applied against the series name to plot a related metric. e.g. given useSeriesAbove(ganglia.metric1.reqs,10,'reqs','time'), the response time metric will be plotted only when the maximum value of the corresponding request/s metric is > 10 Example:: &target=useSeriesAbove(ganglia.metric1.reqs,10,"reqs","time")
[ "Compares", "the", "maximum", "of", "each", "series", "against", "the", "given", "value", ".", "If", "the", "series", "maximum", "is", "greater", "than", "value", "the", "regular", "expression", "search", "and", "replace", "is", "applied", "against", "the", "series", "name", "to", "plot", "a", "related", "metric", "." ]
0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff
https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L2585-L2608
2,363
brutasse/graphite-api
graphite_api/functions.py
secondYAxis
def secondYAxis(requestContext, seriesList): """ Graph the series on the secondary Y axis. """ for series in seriesList: series.options['secondYAxis'] = True series.name = 'secondYAxis(%s)' % series.name return seriesList
python
def secondYAxis(requestContext, seriesList): for series in seriesList: series.options['secondYAxis'] = True series.name = 'secondYAxis(%s)' % series.name return seriesList
[ "def", "secondYAxis", "(", "requestContext", ",", "seriesList", ")", ":", "for", "series", "in", "seriesList", ":", "series", ".", "options", "[", "'secondYAxis'", "]", "=", "True", "series", ".", "name", "=", "'secondYAxis(%s)'", "%", "series", ".", "name", "return", "seriesList" ]
Graph the series on the secondary Y axis.
[ "Graph", "the", "series", "on", "the", "secondary", "Y", "axis", "." ]
0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff
https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L2731-L2738
2,364
brutasse/graphite-api
graphite_api/functions.py
holtWintersForecast
def holtWintersForecast(requestContext, seriesList): """ Performs a Holt-Winters forecast using the series as input data. Data from one week previous to the series is used to bootstrap the initial forecast. """ previewSeconds = 7 * 86400 # 7 days # ignore original data and pull new, including our preview newContext = requestContext.copy() newContext['startTime'] = (requestContext['startTime'] - timedelta(seconds=previewSeconds)) previewList = evaluateTokens(newContext, requestContext['args'][0]) results = [] for series in previewList: analysis = holtWintersAnalysis(series) predictions = analysis['predictions'] windowPoints = previewSeconds // predictions.step result = TimeSeries("holtWintersForecast(%s)" % series.name, predictions.start + previewSeconds, predictions.end, predictions.step, predictions[windowPoints:]) result.pathExpression = result.name results.append(result) return results
python
def holtWintersForecast(requestContext, seriesList): previewSeconds = 7 * 86400 # 7 days # ignore original data and pull new, including our preview newContext = requestContext.copy() newContext['startTime'] = (requestContext['startTime'] - timedelta(seconds=previewSeconds)) previewList = evaluateTokens(newContext, requestContext['args'][0]) results = [] for series in previewList: analysis = holtWintersAnalysis(series) predictions = analysis['predictions'] windowPoints = previewSeconds // predictions.step result = TimeSeries("holtWintersForecast(%s)" % series.name, predictions.start + previewSeconds, predictions.end, predictions.step, predictions[windowPoints:]) result.pathExpression = result.name results.append(result) return results
[ "def", "holtWintersForecast", "(", "requestContext", ",", "seriesList", ")", ":", "previewSeconds", "=", "7", "*", "86400", "# 7 days", "# ignore original data and pull new, including our preview", "newContext", "=", "requestContext", ".", "copy", "(", ")", "newContext", "[", "'startTime'", "]", "=", "(", "requestContext", "[", "'startTime'", "]", "-", "timedelta", "(", "seconds", "=", "previewSeconds", ")", ")", "previewList", "=", "evaluateTokens", "(", "newContext", ",", "requestContext", "[", "'args'", "]", "[", "0", "]", ")", "results", "=", "[", "]", "for", "series", "in", "previewList", ":", "analysis", "=", "holtWintersAnalysis", "(", "series", ")", "predictions", "=", "analysis", "[", "'predictions'", "]", "windowPoints", "=", "previewSeconds", "//", "predictions", ".", "step", "result", "=", "TimeSeries", "(", "\"holtWintersForecast(%s)\"", "%", "series", ".", "name", ",", "predictions", ".", "start", "+", "previewSeconds", ",", "predictions", ".", "end", ",", "predictions", ".", "step", ",", "predictions", "[", "windowPoints", ":", "]", ")", "result", ".", "pathExpression", "=", "result", ".", "name", "results", ".", "append", "(", "result", ")", "return", "results" ]
Performs a Holt-Winters forecast using the series as input data. Data from one week previous to the series is used to bootstrap the initial forecast.
[ "Performs", "a", "Holt", "-", "Winters", "forecast", "using", "the", "series", "as", "input", "data", ".", "Data", "from", "one", "week", "previous", "to", "the", "series", "is", "used", "to", "bootstrap", "the", "initial", "forecast", "." ]
0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff
https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L2854-L2876
2,365
brutasse/graphite-api
graphite_api/functions.py
holtWintersConfidenceBands
def holtWintersConfidenceBands(requestContext, seriesList, delta=3): """ Performs a Holt-Winters forecast using the series as input data and plots upper and lower bands with the predicted forecast deviations. """ previewSeconds = 7 * 86400 # 7 days # ignore original data and pull new, including our preview newContext = requestContext.copy() newContext['startTime'] = (requestContext['startTime'] - timedelta(seconds=previewSeconds)) previewList = evaluateTokens(newContext, requestContext['args'][0]) results = [] for series in previewList: analysis = holtWintersAnalysis(series) data = analysis['predictions'] windowPoints = previewSeconds // data.step forecast = TimeSeries(data.name, data.start + previewSeconds, data.end, data.step, data[windowPoints:]) forecast.pathExpression = data.pathExpression data = analysis['deviations'] windowPoints = previewSeconds // data.step deviation = TimeSeries(data.name, data.start + previewSeconds, data.end, data.step, data[windowPoints:]) deviation.pathExpression = data.pathExpression seriesLength = len(forecast) i = 0 upperBand = list() lowerBand = list() while i < seriesLength: forecast_item = forecast[i] deviation_item = deviation[i] i = i + 1 if forecast_item is None or deviation_item is None: upperBand.append(None) lowerBand.append(None) else: scaled_deviation = delta * deviation_item upperBand.append(forecast_item + scaled_deviation) lowerBand.append(forecast_item - scaled_deviation) upperName = "holtWintersConfidenceUpper(%s)" % series.name lowerName = "holtWintersConfidenceLower(%s)" % series.name upperSeries = TimeSeries(upperName, forecast.start, forecast.end, forecast.step, upperBand) lowerSeries = TimeSeries(lowerName, forecast.start, forecast.end, forecast.step, lowerBand) upperSeries.pathExpression = series.pathExpression lowerSeries.pathExpression = series.pathExpression results.append(lowerSeries) results.append(upperSeries) return results
python
def holtWintersConfidenceBands(requestContext, seriesList, delta=3): previewSeconds = 7 * 86400 # 7 days # ignore original data and pull new, including our preview newContext = requestContext.copy() newContext['startTime'] = (requestContext['startTime'] - timedelta(seconds=previewSeconds)) previewList = evaluateTokens(newContext, requestContext['args'][0]) results = [] for series in previewList: analysis = holtWintersAnalysis(series) data = analysis['predictions'] windowPoints = previewSeconds // data.step forecast = TimeSeries(data.name, data.start + previewSeconds, data.end, data.step, data[windowPoints:]) forecast.pathExpression = data.pathExpression data = analysis['deviations'] windowPoints = previewSeconds // data.step deviation = TimeSeries(data.name, data.start + previewSeconds, data.end, data.step, data[windowPoints:]) deviation.pathExpression = data.pathExpression seriesLength = len(forecast) i = 0 upperBand = list() lowerBand = list() while i < seriesLength: forecast_item = forecast[i] deviation_item = deviation[i] i = i + 1 if forecast_item is None or deviation_item is None: upperBand.append(None) lowerBand.append(None) else: scaled_deviation = delta * deviation_item upperBand.append(forecast_item + scaled_deviation) lowerBand.append(forecast_item - scaled_deviation) upperName = "holtWintersConfidenceUpper(%s)" % series.name lowerName = "holtWintersConfidenceLower(%s)" % series.name upperSeries = TimeSeries(upperName, forecast.start, forecast.end, forecast.step, upperBand) lowerSeries = TimeSeries(lowerName, forecast.start, forecast.end, forecast.step, lowerBand) upperSeries.pathExpression = series.pathExpression lowerSeries.pathExpression = series.pathExpression results.append(lowerSeries) results.append(upperSeries) return results
[ "def", "holtWintersConfidenceBands", "(", "requestContext", ",", "seriesList", ",", "delta", "=", "3", ")", ":", "previewSeconds", "=", "7", "*", "86400", "# 7 days", "# ignore original data and pull new, including our preview", "newContext", "=", "requestContext", ".", "copy", "(", ")", "newContext", "[", "'startTime'", "]", "=", "(", "requestContext", "[", "'startTime'", "]", "-", "timedelta", "(", "seconds", "=", "previewSeconds", ")", ")", "previewList", "=", "evaluateTokens", "(", "newContext", ",", "requestContext", "[", "'args'", "]", "[", "0", "]", ")", "results", "=", "[", "]", "for", "series", "in", "previewList", ":", "analysis", "=", "holtWintersAnalysis", "(", "series", ")", "data", "=", "analysis", "[", "'predictions'", "]", "windowPoints", "=", "previewSeconds", "//", "data", ".", "step", "forecast", "=", "TimeSeries", "(", "data", ".", "name", ",", "data", ".", "start", "+", "previewSeconds", ",", "data", ".", "end", ",", "data", ".", "step", ",", "data", "[", "windowPoints", ":", "]", ")", "forecast", ".", "pathExpression", "=", "data", ".", "pathExpression", "data", "=", "analysis", "[", "'deviations'", "]", "windowPoints", "=", "previewSeconds", "//", "data", ".", "step", "deviation", "=", "TimeSeries", "(", "data", ".", "name", ",", "data", ".", "start", "+", "previewSeconds", ",", "data", ".", "end", ",", "data", ".", "step", ",", "data", "[", "windowPoints", ":", "]", ")", "deviation", ".", "pathExpression", "=", "data", ".", "pathExpression", "seriesLength", "=", "len", "(", "forecast", ")", "i", "=", "0", "upperBand", "=", "list", "(", ")", "lowerBand", "=", "list", "(", ")", "while", "i", "<", "seriesLength", ":", "forecast_item", "=", "forecast", "[", "i", "]", "deviation_item", "=", "deviation", "[", "i", "]", "i", "=", "i", "+", "1", "if", "forecast_item", "is", "None", "or", "deviation_item", "is", "None", ":", "upperBand", ".", "append", "(", "None", ")", "lowerBand", ".", "append", "(", "None", ")", "else", ":", "scaled_deviation", "=", "delta", "*", "deviation_item", "upperBand", ".", "append", "(", "forecast_item", "+", "scaled_deviation", ")", "lowerBand", ".", "append", "(", "forecast_item", "-", "scaled_deviation", ")", "upperName", "=", "\"holtWintersConfidenceUpper(%s)\"", "%", "series", ".", "name", "lowerName", "=", "\"holtWintersConfidenceLower(%s)\"", "%", "series", ".", "name", "upperSeries", "=", "TimeSeries", "(", "upperName", ",", "forecast", ".", "start", ",", "forecast", ".", "end", ",", "forecast", ".", "step", ",", "upperBand", ")", "lowerSeries", "=", "TimeSeries", "(", "lowerName", ",", "forecast", ".", "start", ",", "forecast", ".", "end", ",", "forecast", ".", "step", ",", "lowerBand", ")", "upperSeries", ".", "pathExpression", "=", "series", ".", "pathExpression", "lowerSeries", ".", "pathExpression", "=", "series", ".", "pathExpression", "results", ".", "append", "(", "lowerSeries", ")", "results", ".", "append", "(", "upperSeries", ")", "return", "results" ]
Performs a Holt-Winters forecast using the series as input data and plots upper and lower bands with the predicted forecast deviations.
[ "Performs", "a", "Holt", "-", "Winters", "forecast", "using", "the", "series", "as", "input", "data", "and", "plots", "upper", "and", "lower", "bands", "with", "the", "predicted", "forecast", "deviations", "." ]
0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff
https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L2879-L2932
2,366
brutasse/graphite-api
graphite_api/functions.py
holtWintersAberration
def holtWintersAberration(requestContext, seriesList, delta=3): """ Performs a Holt-Winters forecast using the series as input data and plots the positive or negative deviation of the series data from the forecast. """ results = [] for series in seriesList: confidenceBands = holtWintersConfidenceBands(requestContext, [series], delta) lowerBand = confidenceBands[0] upperBand = confidenceBands[1] aberration = list() for i, actual in enumerate(series): if actual is None: aberration.append(0) elif upperBand[i] is not None and actual > upperBand[i]: aberration.append(actual - upperBand[i]) elif lowerBand[i] is not None and actual < lowerBand[i]: aberration.append(actual - lowerBand[i]) else: aberration.append(0) newName = "holtWintersAberration(%s)" % series.name results.append(TimeSeries(newName, series.start, series.end, series.step, aberration)) return results
python
def holtWintersAberration(requestContext, seriesList, delta=3): results = [] for series in seriesList: confidenceBands = holtWintersConfidenceBands(requestContext, [series], delta) lowerBand = confidenceBands[0] upperBand = confidenceBands[1] aberration = list() for i, actual in enumerate(series): if actual is None: aberration.append(0) elif upperBand[i] is not None and actual > upperBand[i]: aberration.append(actual - upperBand[i]) elif lowerBand[i] is not None and actual < lowerBand[i]: aberration.append(actual - lowerBand[i]) else: aberration.append(0) newName = "holtWintersAberration(%s)" % series.name results.append(TimeSeries(newName, series.start, series.end, series.step, aberration)) return results
[ "def", "holtWintersAberration", "(", "requestContext", ",", "seriesList", ",", "delta", "=", "3", ")", ":", "results", "=", "[", "]", "for", "series", "in", "seriesList", ":", "confidenceBands", "=", "holtWintersConfidenceBands", "(", "requestContext", ",", "[", "series", "]", ",", "delta", ")", "lowerBand", "=", "confidenceBands", "[", "0", "]", "upperBand", "=", "confidenceBands", "[", "1", "]", "aberration", "=", "list", "(", ")", "for", "i", ",", "actual", "in", "enumerate", "(", "series", ")", ":", "if", "actual", "is", "None", ":", "aberration", ".", "append", "(", "0", ")", "elif", "upperBand", "[", "i", "]", "is", "not", "None", "and", "actual", ">", "upperBand", "[", "i", "]", ":", "aberration", ".", "append", "(", "actual", "-", "upperBand", "[", "i", "]", ")", "elif", "lowerBand", "[", "i", "]", "is", "not", "None", "and", "actual", "<", "lowerBand", "[", "i", "]", ":", "aberration", ".", "append", "(", "actual", "-", "lowerBand", "[", "i", "]", ")", "else", ":", "aberration", ".", "append", "(", "0", ")", "newName", "=", "\"holtWintersAberration(%s)\"", "%", "series", ".", "name", "results", ".", "append", "(", "TimeSeries", "(", "newName", ",", "series", ".", "start", ",", "series", ".", "end", ",", "series", ".", "step", ",", "aberration", ")", ")", "return", "results" ]
Performs a Holt-Winters forecast using the series as input data and plots the positive or negative deviation of the series data from the forecast.
[ "Performs", "a", "Holt", "-", "Winters", "forecast", "using", "the", "series", "as", "input", "data", "and", "plots", "the", "positive", "or", "negative", "deviation", "of", "the", "series", "data", "from", "the", "forecast", "." ]
0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff
https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L2935-L2960
2,367
brutasse/graphite-api
graphite_api/functions.py
holtWintersConfidenceArea
def holtWintersConfidenceArea(requestContext, seriesList, delta=3): """ Performs a Holt-Winters forecast using the series as input data and plots the area between the upper and lower bands of the predicted forecast deviations. """ bands = holtWintersConfidenceBands(requestContext, seriesList, delta) results = areaBetween(requestContext, bands) for series in results: series.name = series.name.replace('areaBetween', 'holtWintersConfidenceArea') return results
python
def holtWintersConfidenceArea(requestContext, seriesList, delta=3): bands = holtWintersConfidenceBands(requestContext, seriesList, delta) results = areaBetween(requestContext, bands) for series in results: series.name = series.name.replace('areaBetween', 'holtWintersConfidenceArea') return results
[ "def", "holtWintersConfidenceArea", "(", "requestContext", ",", "seriesList", ",", "delta", "=", "3", ")", ":", "bands", "=", "holtWintersConfidenceBands", "(", "requestContext", ",", "seriesList", ",", "delta", ")", "results", "=", "areaBetween", "(", "requestContext", ",", "bands", ")", "for", "series", "in", "results", ":", "series", ".", "name", "=", "series", ".", "name", ".", "replace", "(", "'areaBetween'", ",", "'holtWintersConfidenceArea'", ")", "return", "results" ]
Performs a Holt-Winters forecast using the series as input data and plots the area between the upper and lower bands of the predicted forecast deviations.
[ "Performs", "a", "Holt", "-", "Winters", "forecast", "using", "the", "series", "as", "input", "data", "and", "plots", "the", "area", "between", "the", "upper", "and", "lower", "bands", "of", "the", "predicted", "forecast", "deviations", "." ]
0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff
https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L2963-L2974
2,368
brutasse/graphite-api
graphite_api/functions.py
linearRegressionAnalysis
def linearRegressionAnalysis(series): """ Returns factor and offset of linear regression function by least squares method. """ n = safeLen(series) sumI = sum([i for i, v in enumerate(series) if v is not None]) sumV = sum([v for i, v in enumerate(series) if v is not None]) sumII = sum([i * i for i, v in enumerate(series) if v is not None]) sumIV = sum([i * v for i, v in enumerate(series) if v is not None]) denominator = float(n * sumII - sumI * sumI) if denominator == 0: return None else: factor = (n * sumIV - sumI * sumV) / denominator / series.step offset = sumII * sumV - sumIV * sumI offset = offset / denominator - factor * series.start return factor, offset
python
def linearRegressionAnalysis(series): n = safeLen(series) sumI = sum([i for i, v in enumerate(series) if v is not None]) sumV = sum([v for i, v in enumerate(series) if v is not None]) sumII = sum([i * i for i, v in enumerate(series) if v is not None]) sumIV = sum([i * v for i, v in enumerate(series) if v is not None]) denominator = float(n * sumII - sumI * sumI) if denominator == 0: return None else: factor = (n * sumIV - sumI * sumV) / denominator / series.step offset = sumII * sumV - sumIV * sumI offset = offset / denominator - factor * series.start return factor, offset
[ "def", "linearRegressionAnalysis", "(", "series", ")", ":", "n", "=", "safeLen", "(", "series", ")", "sumI", "=", "sum", "(", "[", "i", "for", "i", ",", "v", "in", "enumerate", "(", "series", ")", "if", "v", "is", "not", "None", "]", ")", "sumV", "=", "sum", "(", "[", "v", "for", "i", ",", "v", "in", "enumerate", "(", "series", ")", "if", "v", "is", "not", "None", "]", ")", "sumII", "=", "sum", "(", "[", "i", "*", "i", "for", "i", ",", "v", "in", "enumerate", "(", "series", ")", "if", "v", "is", "not", "None", "]", ")", "sumIV", "=", "sum", "(", "[", "i", "*", "v", "for", "i", ",", "v", "in", "enumerate", "(", "series", ")", "if", "v", "is", "not", "None", "]", ")", "denominator", "=", "float", "(", "n", "*", "sumII", "-", "sumI", "*", "sumI", ")", "if", "denominator", "==", "0", ":", "return", "None", "else", ":", "factor", "=", "(", "n", "*", "sumIV", "-", "sumI", "*", "sumV", ")", "/", "denominator", "/", "series", ".", "step", "offset", "=", "sumII", "*", "sumV", "-", "sumIV", "*", "sumI", "offset", "=", "offset", "/", "denominator", "-", "factor", "*", "series", ".", "start", "return", "factor", ",", "offset" ]
Returns factor and offset of linear regression function by least squares method.
[ "Returns", "factor", "and", "offset", "of", "linear", "regression", "function", "by", "least", "squares", "method", "." ]
0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff
https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L2977-L2995
2,369
brutasse/graphite-api
graphite_api/functions.py
linearRegression
def linearRegression(requestContext, seriesList, startSourceAt=None, endSourceAt=None): """ Graphs the liner regression function by least squares method. Takes one metric or a wildcard seriesList, followed by a quoted string with the time to start the line and another quoted string with the time to end the line. The start and end times are inclusive (default range is from to until). See ``from / until`` in the render\_api_ for examples of time formats. Datapoints in the range is used to regression. Example:: &target=linearRegression(Server.instance01.threads.busy,'-1d') &target=linearRegression(Server.instance*.threads.busy, "00:00 20140101","11:59 20140630") """ from .app import evaluateTarget results = [] sourceContext = requestContext.copy() if startSourceAt is not None: sourceContext['startTime'] = parseATTime(startSourceAt) if endSourceAt is not None: sourceContext['endTime'] = parseATTime(endSourceAt) sourceList = [] for series in seriesList: source = evaluateTarget(sourceContext, series.pathExpression) sourceList.extend(source) for source, series in zip(sourceList, seriesList): newName = 'linearRegression(%s, %s, %s)' % ( series.name, int(epoch(sourceContext['startTime'])), int(epoch(sourceContext['endTime']))) forecast = linearRegressionAnalysis(source) if forecast is None: continue factor, offset = forecast values = [offset + (series.start + i * series.step) * factor for i in range(len(series))] newSeries = TimeSeries(newName, series.start, series.end, series.step, values) newSeries.pathExpression = newSeries.name results.append(newSeries) return results
python
def linearRegression(requestContext, seriesList, startSourceAt=None, endSourceAt=None): from .app import evaluateTarget results = [] sourceContext = requestContext.copy() if startSourceAt is not None: sourceContext['startTime'] = parseATTime(startSourceAt) if endSourceAt is not None: sourceContext['endTime'] = parseATTime(endSourceAt) sourceList = [] for series in seriesList: source = evaluateTarget(sourceContext, series.pathExpression) sourceList.extend(source) for source, series in zip(sourceList, seriesList): newName = 'linearRegression(%s, %s, %s)' % ( series.name, int(epoch(sourceContext['startTime'])), int(epoch(sourceContext['endTime']))) forecast = linearRegressionAnalysis(source) if forecast is None: continue factor, offset = forecast values = [offset + (series.start + i * series.step) * factor for i in range(len(series))] newSeries = TimeSeries(newName, series.start, series.end, series.step, values) newSeries.pathExpression = newSeries.name results.append(newSeries) return results
[ "def", "linearRegression", "(", "requestContext", ",", "seriesList", ",", "startSourceAt", "=", "None", ",", "endSourceAt", "=", "None", ")", ":", "from", ".", "app", "import", "evaluateTarget", "results", "=", "[", "]", "sourceContext", "=", "requestContext", ".", "copy", "(", ")", "if", "startSourceAt", "is", "not", "None", ":", "sourceContext", "[", "'startTime'", "]", "=", "parseATTime", "(", "startSourceAt", ")", "if", "endSourceAt", "is", "not", "None", ":", "sourceContext", "[", "'endTime'", "]", "=", "parseATTime", "(", "endSourceAt", ")", "sourceList", "=", "[", "]", "for", "series", "in", "seriesList", ":", "source", "=", "evaluateTarget", "(", "sourceContext", ",", "series", ".", "pathExpression", ")", "sourceList", ".", "extend", "(", "source", ")", "for", "source", ",", "series", "in", "zip", "(", "sourceList", ",", "seriesList", ")", ":", "newName", "=", "'linearRegression(%s, %s, %s)'", "%", "(", "series", ".", "name", ",", "int", "(", "epoch", "(", "sourceContext", "[", "'startTime'", "]", ")", ")", ",", "int", "(", "epoch", "(", "sourceContext", "[", "'endTime'", "]", ")", ")", ")", "forecast", "=", "linearRegressionAnalysis", "(", "source", ")", "if", "forecast", "is", "None", ":", "continue", "factor", ",", "offset", "=", "forecast", "values", "=", "[", "offset", "+", "(", "series", ".", "start", "+", "i", "*", "series", ".", "step", ")", "*", "factor", "for", "i", "in", "range", "(", "len", "(", "series", ")", ")", "]", "newSeries", "=", "TimeSeries", "(", "newName", ",", "series", ".", "start", ",", "series", ".", "end", ",", "series", ".", "step", ",", "values", ")", "newSeries", ".", "pathExpression", "=", "newSeries", ".", "name", "results", ".", "append", "(", "newSeries", ")", "return", "results" ]
Graphs the liner regression function by least squares method. Takes one metric or a wildcard seriesList, followed by a quoted string with the time to start the line and another quoted string with the time to end the line. The start and end times are inclusive (default range is from to until). See ``from / until`` in the render\_api_ for examples of time formats. Datapoints in the range is used to regression. Example:: &target=linearRegression(Server.instance01.threads.busy,'-1d') &target=linearRegression(Server.instance*.threads.busy, "00:00 20140101","11:59 20140630")
[ "Graphs", "the", "liner", "regression", "function", "by", "least", "squares", "method", "." ]
0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff
https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L2998-L3044
2,370
brutasse/graphite-api
graphite_api/functions.py
drawAsInfinite
def drawAsInfinite(requestContext, seriesList): """ Takes one metric or a wildcard seriesList. If the value is zero, draw the line at 0. If the value is above zero, draw the line at infinity. If the value is null or less than zero, do not draw the line. Useful for displaying on/off metrics, such as exit codes. (0 = success, anything else = failure.) Example:: drawAsInfinite(Testing.script.exitCode) """ for series in seriesList: series.options['drawAsInfinite'] = True series.name = 'drawAsInfinite(%s)' % series.name return seriesList
python
def drawAsInfinite(requestContext, seriesList): for series in seriesList: series.options['drawAsInfinite'] = True series.name = 'drawAsInfinite(%s)' % series.name return seriesList
[ "def", "drawAsInfinite", "(", "requestContext", ",", "seriesList", ")", ":", "for", "series", "in", "seriesList", ":", "series", ".", "options", "[", "'drawAsInfinite'", "]", "=", "True", "series", ".", "name", "=", "'drawAsInfinite(%s)'", "%", "series", ".", "name", "return", "seriesList" ]
Takes one metric or a wildcard seriesList. If the value is zero, draw the line at 0. If the value is above zero, draw the line at infinity. If the value is null or less than zero, do not draw the line. Useful for displaying on/off metrics, such as exit codes. (0 = success, anything else = failure.) Example:: drawAsInfinite(Testing.script.exitCode)
[ "Takes", "one", "metric", "or", "a", "wildcard", "seriesList", ".", "If", "the", "value", "is", "zero", "draw", "the", "line", "at", "0", ".", "If", "the", "value", "is", "above", "zero", "draw", "the", "line", "at", "infinity", ".", "If", "the", "value", "is", "null", "or", "less", "than", "zero", "do", "not", "draw", "the", "line", "." ]
0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff
https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L3047-L3065
2,371
brutasse/graphite-api
graphite_api/functions.py
constantLine
def constantLine(requestContext, value): """ Takes a float F. Draws a horizontal line at value F across the graph. Example:: &target=constantLine(123.456) """ name = "constantLine(%s)" % str(value) start = int(epoch(requestContext['startTime'])) end = int(epoch(requestContext['endTime'])) step = int((end - start) / 2.0) series = TimeSeries(str(value), start, end, step, [value, value, value]) series.pathExpression = name return [series]
python
def constantLine(requestContext, value): name = "constantLine(%s)" % str(value) start = int(epoch(requestContext['startTime'])) end = int(epoch(requestContext['endTime'])) step = int((end - start) / 2.0) series = TimeSeries(str(value), start, end, step, [value, value, value]) series.pathExpression = name return [series]
[ "def", "constantLine", "(", "requestContext", ",", "value", ")", ":", "name", "=", "\"constantLine(%s)\"", "%", "str", "(", "value", ")", "start", "=", "int", "(", "epoch", "(", "requestContext", "[", "'startTime'", "]", ")", ")", "end", "=", "int", "(", "epoch", "(", "requestContext", "[", "'endTime'", "]", ")", ")", "step", "=", "int", "(", "(", "end", "-", "start", ")", "/", "2.0", ")", "series", "=", "TimeSeries", "(", "str", "(", "value", ")", ",", "start", ",", "end", ",", "step", ",", "[", "value", ",", "value", ",", "value", "]", ")", "series", ".", "pathExpression", "=", "name", "return", "[", "series", "]" ]
Takes a float F. Draws a horizontal line at value F across the graph. Example:: &target=constantLine(123.456)
[ "Takes", "a", "float", "F", "." ]
0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff
https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L3278-L3295
2,372
brutasse/graphite-api
graphite_api/functions.py
aggregateLine
def aggregateLine(requestContext, seriesList, func='avg'): """ Takes a metric or wildcard seriesList and draws a horizontal line based on the function applied to each series. Note: By default, the graphite renderer consolidates data points by averaging data points over time. If you are using the 'min' or 'max' function for aggregateLine, this can cause an unusual gap in the line drawn by this function and the data itself. To fix this, you should use the consolidateBy() function with the same function argument you are using for aggregateLine. This will ensure that the proper data points are retained and the graph should line up correctly. Example:: &target=aggregateLine(server01.connections.total, 'avg') &target=aggregateLine(server*.connections.total, 'avg') """ t_funcs = {'avg': safeAvg, 'min': safeMin, 'max': safeMax} if func not in t_funcs: raise ValueError("Invalid function %s" % func) results = [] for series in seriesList: value = t_funcs[func](series) if value is not None: name = 'aggregateLine(%s, %g)' % (series.name, value) else: name = 'aggregateLine(%s, None)' % (series.name) [series] = constantLine(requestContext, value) series.name = name series.pathExpression = series.name results.append(series) return results
python
def aggregateLine(requestContext, seriesList, func='avg'): t_funcs = {'avg': safeAvg, 'min': safeMin, 'max': safeMax} if func not in t_funcs: raise ValueError("Invalid function %s" % func) results = [] for series in seriesList: value = t_funcs[func](series) if value is not None: name = 'aggregateLine(%s, %g)' % (series.name, value) else: name = 'aggregateLine(%s, None)' % (series.name) [series] = constantLine(requestContext, value) series.name = name series.pathExpression = series.name results.append(series) return results
[ "def", "aggregateLine", "(", "requestContext", ",", "seriesList", ",", "func", "=", "'avg'", ")", ":", "t_funcs", "=", "{", "'avg'", ":", "safeAvg", ",", "'min'", ":", "safeMin", ",", "'max'", ":", "safeMax", "}", "if", "func", "not", "in", "t_funcs", ":", "raise", "ValueError", "(", "\"Invalid function %s\"", "%", "func", ")", "results", "=", "[", "]", "for", "series", "in", "seriesList", ":", "value", "=", "t_funcs", "[", "func", "]", "(", "series", ")", "if", "value", "is", "not", "None", ":", "name", "=", "'aggregateLine(%s, %g)'", "%", "(", "series", ".", "name", ",", "value", ")", "else", ":", "name", "=", "'aggregateLine(%s, None)'", "%", "(", "series", ".", "name", ")", "[", "series", "]", "=", "constantLine", "(", "requestContext", ",", "value", ")", "series", ".", "name", "=", "name", "series", ".", "pathExpression", "=", "series", ".", "name", "results", ".", "append", "(", "series", ")", "return", "results" ]
Takes a metric or wildcard seriesList and draws a horizontal line based on the function applied to each series. Note: By default, the graphite renderer consolidates data points by averaging data points over time. If you are using the 'min' or 'max' function for aggregateLine, this can cause an unusual gap in the line drawn by this function and the data itself. To fix this, you should use the consolidateBy() function with the same function argument you are using for aggregateLine. This will ensure that the proper data points are retained and the graph should line up correctly. Example:: &target=aggregateLine(server01.connections.total, 'avg') &target=aggregateLine(server*.connections.total, 'avg')
[ "Takes", "a", "metric", "or", "wildcard", "seriesList", "and", "draws", "a", "horizontal", "line", "based", "on", "the", "function", "applied", "to", "each", "series", "." ]
0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff
https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L3298-L3335
2,373
brutasse/graphite-api
graphite_api/functions.py
verticalLine
def verticalLine(requestContext, ts, label=None, color=None): """ Takes a timestamp string ts. Draws a vertical line at the designated timestamp with optional 'label' and 'color'. Supported timestamp formats include both relative (e.g. -3h) and absolute (e.g. 16:00_20110501) strings, such as those used with ``from`` and ``until`` parameters. When set, the 'label' will appear in the graph legend. Note: Any timestamps defined outside the requested range will raise a 'ValueError' exception. Example:: &target=verticalLine("12:3420131108","event","blue") &target=verticalLine("16:00_20110501","event") &target=verticalLine("-5mins") """ ts = int(epoch(parseATTime(ts, requestContext['tzinfo']))) start = int(epoch(requestContext['startTime'])) end = int(epoch(requestContext['endTime'])) if ts < start: raise ValueError("verticalLine(): timestamp %s exists " "before start of range" % ts) elif ts > end: raise ValueError("verticalLine(): timestamp %s exists " "after end of range" % ts) start = end = ts step = 1.0 series = TimeSeries(label, start, end, step, [1.0, 1.0]) series.options['drawAsInfinite'] = True if color: series.color = color return [series]
python
def verticalLine(requestContext, ts, label=None, color=None): ts = int(epoch(parseATTime(ts, requestContext['tzinfo']))) start = int(epoch(requestContext['startTime'])) end = int(epoch(requestContext['endTime'])) if ts < start: raise ValueError("verticalLine(): timestamp %s exists " "before start of range" % ts) elif ts > end: raise ValueError("verticalLine(): timestamp %s exists " "after end of range" % ts) start = end = ts step = 1.0 series = TimeSeries(label, start, end, step, [1.0, 1.0]) series.options['drawAsInfinite'] = True if color: series.color = color return [series]
[ "def", "verticalLine", "(", "requestContext", ",", "ts", ",", "label", "=", "None", ",", "color", "=", "None", ")", ":", "ts", "=", "int", "(", "epoch", "(", "parseATTime", "(", "ts", ",", "requestContext", "[", "'tzinfo'", "]", ")", ")", ")", "start", "=", "int", "(", "epoch", "(", "requestContext", "[", "'startTime'", "]", ")", ")", "end", "=", "int", "(", "epoch", "(", "requestContext", "[", "'endTime'", "]", ")", ")", "if", "ts", "<", "start", ":", "raise", "ValueError", "(", "\"verticalLine(): timestamp %s exists \"", "\"before start of range\"", "%", "ts", ")", "elif", "ts", ">", "end", ":", "raise", "ValueError", "(", "\"verticalLine(): timestamp %s exists \"", "\"after end of range\"", "%", "ts", ")", "start", "=", "end", "=", "ts", "step", "=", "1.0", "series", "=", "TimeSeries", "(", "label", ",", "start", ",", "end", ",", "step", ",", "[", "1.0", ",", "1.0", "]", ")", "series", ".", "options", "[", "'drawAsInfinite'", "]", "=", "True", "if", "color", ":", "series", ".", "color", "=", "color", "return", "[", "series", "]" ]
Takes a timestamp string ts. Draws a vertical line at the designated timestamp with optional 'label' and 'color'. Supported timestamp formats include both relative (e.g. -3h) and absolute (e.g. 16:00_20110501) strings, such as those used with ``from`` and ``until`` parameters. When set, the 'label' will appear in the graph legend. Note: Any timestamps defined outside the requested range will raise a 'ValueError' exception. Example:: &target=verticalLine("12:3420131108","event","blue") &target=verticalLine("16:00_20110501","event") &target=verticalLine("-5mins")
[ "Takes", "a", "timestamp", "string", "ts", "." ]
0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff
https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L3338-L3373
2,374
brutasse/graphite-api
graphite_api/functions.py
transformNull
def transformNull(requestContext, seriesList, default=0, referenceSeries=None): """ Takes a metric or wildcard seriesList and replaces null values with the value specified by `default`. The value 0 used if not specified. The optional referenceSeries, if specified, is a metric or wildcard series list that governs which time intervals nulls should be replaced. If specified, nulls are replaced only in intervals where a non-null is found for the same interval in any of referenceSeries. This method compliments the drawNullAsZero function in graphical mode, but also works in text-only mode. Example:: &target=transformNull(webapp.pages.*.views,-1) This would take any page that didn't have values and supply negative 1 as a default. Any other numeric value may be used as well. """ def transform(v, d): if v is None: return d else: return v if referenceSeries: defaults = [default if any(v is not None for v in x) else None for x in zip_longest(*referenceSeries)] else: defaults = None for series in seriesList: if referenceSeries: series.name = "transformNull(%s,%g,referenceSeries)" % ( series.name, default) else: series.name = "transformNull(%s,%g)" % (series.name, default) series.pathExpression = series.name if defaults: values = [transform(v, d) for v, d in zip_longest(series, defaults)] else: values = [transform(v, default) for v in series] series.extend(values) del series[:len(values)] return seriesList
python
def transformNull(requestContext, seriesList, default=0, referenceSeries=None): def transform(v, d): if v is None: return d else: return v if referenceSeries: defaults = [default if any(v is not None for v in x) else None for x in zip_longest(*referenceSeries)] else: defaults = None for series in seriesList: if referenceSeries: series.name = "transformNull(%s,%g,referenceSeries)" % ( series.name, default) else: series.name = "transformNull(%s,%g)" % (series.name, default) series.pathExpression = series.name if defaults: values = [transform(v, d) for v, d in zip_longest(series, defaults)] else: values = [transform(v, default) for v in series] series.extend(values) del series[:len(values)] return seriesList
[ "def", "transformNull", "(", "requestContext", ",", "seriesList", ",", "default", "=", "0", ",", "referenceSeries", "=", "None", ")", ":", "def", "transform", "(", "v", ",", "d", ")", ":", "if", "v", "is", "None", ":", "return", "d", "else", ":", "return", "v", "if", "referenceSeries", ":", "defaults", "=", "[", "default", "if", "any", "(", "v", "is", "not", "None", "for", "v", "in", "x", ")", "else", "None", "for", "x", "in", "zip_longest", "(", "*", "referenceSeries", ")", "]", "else", ":", "defaults", "=", "None", "for", "series", "in", "seriesList", ":", "if", "referenceSeries", ":", "series", ".", "name", "=", "\"transformNull(%s,%g,referenceSeries)\"", "%", "(", "series", ".", "name", ",", "default", ")", "else", ":", "series", ".", "name", "=", "\"transformNull(%s,%g)\"", "%", "(", "series", ".", "name", ",", "default", ")", "series", ".", "pathExpression", "=", "series", ".", "name", "if", "defaults", ":", "values", "=", "[", "transform", "(", "v", ",", "d", ")", "for", "v", ",", "d", "in", "zip_longest", "(", "series", ",", "defaults", ")", "]", "else", ":", "values", "=", "[", "transform", "(", "v", ",", "default", ")", "for", "v", "in", "series", "]", "series", ".", "extend", "(", "values", ")", "del", "series", "[", ":", "len", "(", "values", ")", "]", "return", "seriesList" ]
Takes a metric or wildcard seriesList and replaces null values with the value specified by `default`. The value 0 used if not specified. The optional referenceSeries, if specified, is a metric or wildcard series list that governs which time intervals nulls should be replaced. If specified, nulls are replaced only in intervals where a non-null is found for the same interval in any of referenceSeries. This method compliments the drawNullAsZero function in graphical mode, but also works in text-only mode. Example:: &target=transformNull(webapp.pages.*.views,-1) This would take any page that didn't have values and supply negative 1 as a default. Any other numeric value may be used as well.
[ "Takes", "a", "metric", "or", "wildcard", "seriesList", "and", "replaces", "null", "values", "with", "the", "value", "specified", "by", "default", ".", "The", "value", "0", "used", "if", "not", "specified", ".", "The", "optional", "referenceSeries", "if", "specified", "is", "a", "metric", "or", "wildcard", "series", "list", "that", "governs", "which", "time", "intervals", "nulls", "should", "be", "replaced", ".", "If", "specified", "nulls", "are", "replaced", "only", "in", "intervals", "where", "a", "non", "-", "null", "is", "found", "for", "the", "same", "interval", "in", "any", "of", "referenceSeries", ".", "This", "method", "compliments", "the", "drawNullAsZero", "function", "in", "graphical", "mode", "but", "also", "works", "in", "text", "-", "only", "mode", "." ]
0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff
https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L3396-L3440
2,375
brutasse/graphite-api
graphite_api/functions.py
countSeries
def countSeries(requestContext, *seriesLists): """ Draws a horizontal line representing the number of nodes found in the seriesList. Example:: &target=countSeries(carbon.agents.*.*) """ if not seriesLists or not any(seriesLists): series = constantLine(requestContext, 0).pop() series.pathExpression = "countSeries()" else: seriesList, start, end, step = normalize(seriesLists) name = "countSeries(%s)" % formatPathExpressions(seriesList) values = (int(len(row)) for row in zip_longest(*seriesList)) series = TimeSeries(name, start, end, step, values) series.pathExpression = name return [series]
python
def countSeries(requestContext, *seriesLists): if not seriesLists or not any(seriesLists): series = constantLine(requestContext, 0).pop() series.pathExpression = "countSeries()" else: seriesList, start, end, step = normalize(seriesLists) name = "countSeries(%s)" % formatPathExpressions(seriesList) values = (int(len(row)) for row in zip_longest(*seriesList)) series = TimeSeries(name, start, end, step, values) series.pathExpression = name return [series]
[ "def", "countSeries", "(", "requestContext", ",", "*", "seriesLists", ")", ":", "if", "not", "seriesLists", "or", "not", "any", "(", "seriesLists", ")", ":", "series", "=", "constantLine", "(", "requestContext", ",", "0", ")", ".", "pop", "(", ")", "series", ".", "pathExpression", "=", "\"countSeries()\"", "else", ":", "seriesList", ",", "start", ",", "end", ",", "step", "=", "normalize", "(", "seriesLists", ")", "name", "=", "\"countSeries(%s)\"", "%", "formatPathExpressions", "(", "seriesList", ")", "values", "=", "(", "int", "(", "len", "(", "row", ")", ")", "for", "row", "in", "zip_longest", "(", "*", "seriesList", ")", ")", "series", "=", "TimeSeries", "(", "name", ",", "start", ",", "end", ",", "step", ",", "values", ")", "series", ".", "pathExpression", "=", "name", "return", "[", "series", "]" ]
Draws a horizontal line representing the number of nodes found in the seriesList. Example:: &target=countSeries(carbon.agents.*.*)
[ "Draws", "a", "horizontal", "line", "representing", "the", "number", "of", "nodes", "found", "in", "the", "seriesList", "." ]
0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff
https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L3499-L3519
2,376
brutasse/graphite-api
graphite_api/functions.py
group
def group(requestContext, *seriesLists): """ Takes an arbitrary number of seriesLists and adds them to a single seriesList. This is used to pass multiple seriesLists to a function which only takes one. """ seriesGroup = [] for s in seriesLists: seriesGroup.extend(s) return seriesGroup
python
def group(requestContext, *seriesLists): seriesGroup = [] for s in seriesLists: seriesGroup.extend(s) return seriesGroup
[ "def", "group", "(", "requestContext", ",", "*", "seriesLists", ")", ":", "seriesGroup", "=", "[", "]", "for", "s", "in", "seriesLists", ":", "seriesGroup", ".", "extend", "(", "s", ")", "return", "seriesGroup" ]
Takes an arbitrary number of seriesLists and adds them to a single seriesList. This is used to pass multiple seriesLists to a function which only takes one.
[ "Takes", "an", "arbitrary", "number", "of", "seriesLists", "and", "adds", "them", "to", "a", "single", "seriesList", ".", "This", "is", "used", "to", "pass", "multiple", "seriesLists", "to", "a", "function", "which", "only", "takes", "one", "." ]
0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff
https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L3522-L3532
2,377
brutasse/graphite-api
graphite_api/functions.py
groupByNode
def groupByNode(requestContext, seriesList, nodeNum, callback): """ Takes a serieslist and maps a callback to subgroups within as defined by a common node. Example:: &target=groupByNode(ganglia.by-function.*.*.cpu.load5,2,"sumSeries") Would return multiple series which are each the result of applying the "sumSeries" function to groups joined on the second node (0 indexed) resulting in a list of targets like:: sumSeries(ganglia.by-function.server1.*.cpu.load5), sumSeries(ganglia.by-function.server2.*.cpu.load5),... """ return groupByNodes(requestContext, seriesList, callback, nodeNum)
python
def groupByNode(requestContext, seriesList, nodeNum, callback): return groupByNodes(requestContext, seriesList, callback, nodeNum)
[ "def", "groupByNode", "(", "requestContext", ",", "seriesList", ",", "nodeNum", ",", "callback", ")", ":", "return", "groupByNodes", "(", "requestContext", ",", "seriesList", ",", "callback", ",", "nodeNum", ")" ]
Takes a serieslist and maps a callback to subgroups within as defined by a common node. Example:: &target=groupByNode(ganglia.by-function.*.*.cpu.load5,2,"sumSeries") Would return multiple series which are each the result of applying the "sumSeries" function to groups joined on the second node (0 indexed) resulting in a list of targets like:: sumSeries(ganglia.by-function.server1.*.cpu.load5), sumSeries(ganglia.by-function.server2.*.cpu.load5),...
[ "Takes", "a", "serieslist", "and", "maps", "a", "callback", "to", "subgroups", "within", "as", "defined", "by", "a", "common", "node", "." ]
0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff
https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L3680-L3697
2,378
brutasse/graphite-api
graphite_api/functions.py
groupByNodes
def groupByNodes(requestContext, seriesList, callback, *nodes): """ Takes a serieslist and maps a callback to subgroups within as defined by multiple nodes. Example:: &target=groupByNodes(ganglia.server*.*.cpu.load*,"sumSeries",1,4) Would return multiple series which are each the result of applying the "sumSeries" function to groups joined on the nodes' list (0 indexed) resulting in a list of targets like:: sumSeries(ganglia.server1.*.cpu.load5), sumSeries(ganglia.server1.*.cpu.load10), sumSeries(ganglia.server1.*.cpu.load15), sumSeries(ganglia.server2.*.cpu.load5), sumSeries(ganglia.server2.*.cpu.load10), sumSeries(ganglia.server2.*.cpu.load15), ... """ from .app import app metaSeries = {} keys = [] if isinstance(nodes, int): nodes = [nodes] for series in seriesList: key = '.'.join(series.name.split(".")[n] for n in nodes) if key not in metaSeries: metaSeries[key] = [series] keys.append(key) else: metaSeries[key].append(series) for key in metaSeries: metaSeries[key] = app.functions[callback](requestContext, metaSeries[key])[0] metaSeries[key].name = key return [metaSeries[key] for key in keys]
python
def groupByNodes(requestContext, seriesList, callback, *nodes): from .app import app metaSeries = {} keys = [] if isinstance(nodes, int): nodes = [nodes] for series in seriesList: key = '.'.join(series.name.split(".")[n] for n in nodes) if key not in metaSeries: metaSeries[key] = [series] keys.append(key) else: metaSeries[key].append(series) for key in metaSeries: metaSeries[key] = app.functions[callback](requestContext, metaSeries[key])[0] metaSeries[key].name = key return [metaSeries[key] for key in keys]
[ "def", "groupByNodes", "(", "requestContext", ",", "seriesList", ",", "callback", ",", "*", "nodes", ")", ":", "from", ".", "app", "import", "app", "metaSeries", "=", "{", "}", "keys", "=", "[", "]", "if", "isinstance", "(", "nodes", ",", "int", ")", ":", "nodes", "=", "[", "nodes", "]", "for", "series", "in", "seriesList", ":", "key", "=", "'.'", ".", "join", "(", "series", ".", "name", ".", "split", "(", "\".\"", ")", "[", "n", "]", "for", "n", "in", "nodes", ")", "if", "key", "not", "in", "metaSeries", ":", "metaSeries", "[", "key", "]", "=", "[", "series", "]", "keys", ".", "append", "(", "key", ")", "else", ":", "metaSeries", "[", "key", "]", ".", "append", "(", "series", ")", "for", "key", "in", "metaSeries", ":", "metaSeries", "[", "key", "]", "=", "app", ".", "functions", "[", "callback", "]", "(", "requestContext", ",", "metaSeries", "[", "key", "]", ")", "[", "0", "]", "metaSeries", "[", "key", "]", ".", "name", "=", "key", "return", "[", "metaSeries", "[", "key", "]", "for", "key", "in", "keys", "]" ]
Takes a serieslist and maps a callback to subgroups within as defined by multiple nodes. Example:: &target=groupByNodes(ganglia.server*.*.cpu.load*,"sumSeries",1,4) Would return multiple series which are each the result of applying the "sumSeries" function to groups joined on the nodes' list (0 indexed) resulting in a list of targets like:: sumSeries(ganglia.server1.*.cpu.load5), sumSeries(ganglia.server1.*.cpu.load10), sumSeries(ganglia.server1.*.cpu.load15), sumSeries(ganglia.server2.*.cpu.load5), sumSeries(ganglia.server2.*.cpu.load10), sumSeries(ganglia.server2.*.cpu.load15), ...
[ "Takes", "a", "serieslist", "and", "maps", "a", "callback", "to", "subgroups", "within", "as", "defined", "by", "multiple", "nodes", "." ]
0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff
https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L3700-L3737
2,379
brutasse/graphite-api
graphite_api/functions.py
exclude
def exclude(requestContext, seriesList, pattern): """ Takes a metric or a wildcard seriesList, followed by a regular expression in double quotes. Excludes metrics that match the regular expression. Example:: &target=exclude(servers*.instance*.threads.busy,"server02") """ regex = re.compile(pattern) return [s for s in seriesList if not regex.search(s.name)]
python
def exclude(requestContext, seriesList, pattern): regex = re.compile(pattern) return [s for s in seriesList if not regex.search(s.name)]
[ "def", "exclude", "(", "requestContext", ",", "seriesList", ",", "pattern", ")", ":", "regex", "=", "re", ".", "compile", "(", "pattern", ")", "return", "[", "s", "for", "s", "in", "seriesList", "if", "not", "regex", ".", "search", "(", "s", ".", "name", ")", "]" ]
Takes a metric or a wildcard seriesList, followed by a regular expression in double quotes. Excludes metrics that match the regular expression. Example:: &target=exclude(servers*.instance*.threads.busy,"server02")
[ "Takes", "a", "metric", "or", "a", "wildcard", "seriesList", "followed", "by", "a", "regular", "expression", "in", "double", "quotes", ".", "Excludes", "metrics", "that", "match", "the", "regular", "expression", "." ]
0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff
https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L3740-L3750
2,380
brutasse/graphite-api
graphite_api/functions.py
summarize
def summarize(requestContext, seriesList, intervalString, func='sum', alignToFrom=False): """ Summarize the data into interval buckets of a certain size. By default, the contents of each interval bucket are summed together. This is useful for counters where each increment represents a discrete event and retrieving a "per X" value requires summing all the events in that interval. Specifying 'avg' instead will return the mean for each bucket, which can be more useful when the value is a gauge that represents a certain value in time. 'max', 'min' or 'last' can also be specified. By default, buckets are calculated by rounding to the nearest interval. This works well for intervals smaller than a day. For example, 22:32 will end up in the bucket 22:00-23:00 when the interval=1hour. Passing alignToFrom=true will instead create buckets starting at the from time. In this case, the bucket for 22:32 depends on the from time. If from=6:30 then the 1hour bucket for 22:32 is 22:30-23:30. Example:: # total errors per hour &target=summarize(counter.errors, "1hour") # new users per week &target=summarize(nonNegativeDerivative(gauge.num_users), "1week") # average queue size per hour &target=summarize(queue.size, "1hour", "avg") # maximum queue size during each hour &target=summarize(queue.size, "1hour", "max") # 2010 Q1-4 &target=summarize(metric, "13week", "avg", true)&from=midnight+20100101 """ results = [] delta = parseTimeOffset(intervalString) interval = to_seconds(delta) for series in seriesList: buckets = {} timestamps = range(int(series.start), int(series.end) + 1, int(series.step)) datapoints = zip_longest(timestamps, series) for timestamp, value in datapoints: if timestamp is None: continue if alignToFrom: bucketInterval = int((timestamp - series.start) / interval) else: bucketInterval = timestamp - (timestamp % interval) if bucketInterval not in buckets: buckets[bucketInterval] = [] if value is not None: buckets[bucketInterval].append(value) if alignToFrom: newStart = series.start newEnd = series.end else: newStart = series.start - (series.start % interval) newEnd = series.end - (series.end % interval) + interval newValues = [] for timestamp in range(newStart, newEnd, interval): if alignToFrom: newEnd = timestamp bucketInterval = int((timestamp - series.start) / interval) else: bucketInterval = timestamp - (timestamp % interval) bucket = buckets.get(bucketInterval, []) if bucket: if func == 'avg': newValues.append(float(sum(bucket)) / float(len(bucket))) elif func == 'last': newValues.append(bucket[len(bucket)-1]) elif func == 'max': newValues.append(max(bucket)) elif func == 'min': newValues.append(min(bucket)) else: newValues.append(sum(bucket)) else: newValues.append(None) if alignToFrom: newEnd += interval newName = "summarize(%s, \"%s\", \"%s\"%s)" % ( series.name, intervalString, func, alignToFrom and ", true" or "") newSeries = TimeSeries(newName, newStart, newEnd, interval, newValues) newSeries.pathExpression = newName results.append(newSeries) return results
python
def summarize(requestContext, seriesList, intervalString, func='sum', alignToFrom=False): results = [] delta = parseTimeOffset(intervalString) interval = to_seconds(delta) for series in seriesList: buckets = {} timestamps = range(int(series.start), int(series.end) + 1, int(series.step)) datapoints = zip_longest(timestamps, series) for timestamp, value in datapoints: if timestamp is None: continue if alignToFrom: bucketInterval = int((timestamp - series.start) / interval) else: bucketInterval = timestamp - (timestamp % interval) if bucketInterval not in buckets: buckets[bucketInterval] = [] if value is not None: buckets[bucketInterval].append(value) if alignToFrom: newStart = series.start newEnd = series.end else: newStart = series.start - (series.start % interval) newEnd = series.end - (series.end % interval) + interval newValues = [] for timestamp in range(newStart, newEnd, interval): if alignToFrom: newEnd = timestamp bucketInterval = int((timestamp - series.start) / interval) else: bucketInterval = timestamp - (timestamp % interval) bucket = buckets.get(bucketInterval, []) if bucket: if func == 'avg': newValues.append(float(sum(bucket)) / float(len(bucket))) elif func == 'last': newValues.append(bucket[len(bucket)-1]) elif func == 'max': newValues.append(max(bucket)) elif func == 'min': newValues.append(min(bucket)) else: newValues.append(sum(bucket)) else: newValues.append(None) if alignToFrom: newEnd += interval newName = "summarize(%s, \"%s\", \"%s\"%s)" % ( series.name, intervalString, func, alignToFrom and ", true" or "") newSeries = TimeSeries(newName, newStart, newEnd, interval, newValues) newSeries.pathExpression = newName results.append(newSeries) return results
[ "def", "summarize", "(", "requestContext", ",", "seriesList", ",", "intervalString", ",", "func", "=", "'sum'", ",", "alignToFrom", "=", "False", ")", ":", "results", "=", "[", "]", "delta", "=", "parseTimeOffset", "(", "intervalString", ")", "interval", "=", "to_seconds", "(", "delta", ")", "for", "series", "in", "seriesList", ":", "buckets", "=", "{", "}", "timestamps", "=", "range", "(", "int", "(", "series", ".", "start", ")", ",", "int", "(", "series", ".", "end", ")", "+", "1", ",", "int", "(", "series", ".", "step", ")", ")", "datapoints", "=", "zip_longest", "(", "timestamps", ",", "series", ")", "for", "timestamp", ",", "value", "in", "datapoints", ":", "if", "timestamp", "is", "None", ":", "continue", "if", "alignToFrom", ":", "bucketInterval", "=", "int", "(", "(", "timestamp", "-", "series", ".", "start", ")", "/", "interval", ")", "else", ":", "bucketInterval", "=", "timestamp", "-", "(", "timestamp", "%", "interval", ")", "if", "bucketInterval", "not", "in", "buckets", ":", "buckets", "[", "bucketInterval", "]", "=", "[", "]", "if", "value", "is", "not", "None", ":", "buckets", "[", "bucketInterval", "]", ".", "append", "(", "value", ")", "if", "alignToFrom", ":", "newStart", "=", "series", ".", "start", "newEnd", "=", "series", ".", "end", "else", ":", "newStart", "=", "series", ".", "start", "-", "(", "series", ".", "start", "%", "interval", ")", "newEnd", "=", "series", ".", "end", "-", "(", "series", ".", "end", "%", "interval", ")", "+", "interval", "newValues", "=", "[", "]", "for", "timestamp", "in", "range", "(", "newStart", ",", "newEnd", ",", "interval", ")", ":", "if", "alignToFrom", ":", "newEnd", "=", "timestamp", "bucketInterval", "=", "int", "(", "(", "timestamp", "-", "series", ".", "start", ")", "/", "interval", ")", "else", ":", "bucketInterval", "=", "timestamp", "-", "(", "timestamp", "%", "interval", ")", "bucket", "=", "buckets", ".", "get", "(", "bucketInterval", ",", "[", "]", ")", "if", "bucket", ":", "if", "func", "==", "'avg'", ":", "newValues", ".", "append", "(", "float", "(", "sum", "(", "bucket", ")", ")", "/", "float", "(", "len", "(", "bucket", ")", ")", ")", "elif", "func", "==", "'last'", ":", "newValues", ".", "append", "(", "bucket", "[", "len", "(", "bucket", ")", "-", "1", "]", ")", "elif", "func", "==", "'max'", ":", "newValues", ".", "append", "(", "max", "(", "bucket", ")", ")", "elif", "func", "==", "'min'", ":", "newValues", ".", "append", "(", "min", "(", "bucket", ")", ")", "else", ":", "newValues", ".", "append", "(", "sum", "(", "bucket", ")", ")", "else", ":", "newValues", ".", "append", "(", "None", ")", "if", "alignToFrom", ":", "newEnd", "+=", "interval", "newName", "=", "\"summarize(%s, \\\"%s\\\", \\\"%s\\\"%s)\"", "%", "(", "series", ".", "name", ",", "intervalString", ",", "func", ",", "alignToFrom", "and", "\", true\"", "or", "\"\"", ")", "newSeries", "=", "TimeSeries", "(", "newName", ",", "newStart", ",", "newEnd", ",", "interval", ",", "newValues", ")", "newSeries", ".", "pathExpression", "=", "newName", "results", ".", "append", "(", "newSeries", ")", "return", "results" ]
Summarize the data into interval buckets of a certain size. By default, the contents of each interval bucket are summed together. This is useful for counters where each increment represents a discrete event and retrieving a "per X" value requires summing all the events in that interval. Specifying 'avg' instead will return the mean for each bucket, which can be more useful when the value is a gauge that represents a certain value in time. 'max', 'min' or 'last' can also be specified. By default, buckets are calculated by rounding to the nearest interval. This works well for intervals smaller than a day. For example, 22:32 will end up in the bucket 22:00-23:00 when the interval=1hour. Passing alignToFrom=true will instead create buckets starting at the from time. In this case, the bucket for 22:32 depends on the from time. If from=6:30 then the 1hour bucket for 22:32 is 22:30-23:30. Example:: # total errors per hour &target=summarize(counter.errors, "1hour") # new users per week &target=summarize(nonNegativeDerivative(gauge.num_users), "1week") # average queue size per hour &target=summarize(queue.size, "1hour", "avg") # maximum queue size during each hour &target=summarize(queue.size, "1hour", "max") # 2010 Q1-4 &target=summarize(metric, "13week", "avg", true)&from=midnight+20100101
[ "Summarize", "the", "data", "into", "interval", "buckets", "of", "a", "certain", "size", "." ]
0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff
https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L3857-L3963
2,381
opencobra/memote
memote/experimental/medium.py
Medium.apply
def apply(self, model): """Set the defined medium on the given model.""" model.medium = {row.exchange: row.uptake for row in self.data.itertuples(index=False)}
python
def apply(self, model): model.medium = {row.exchange: row.uptake for row in self.data.itertuples(index=False)}
[ "def", "apply", "(", "self", ",", "model", ")", ":", "model", ".", "medium", "=", "{", "row", ".", "exchange", ":", "row", ".", "uptake", "for", "row", "in", "self", ".", "data", ".", "itertuples", "(", "index", "=", "False", ")", "}" ]
Set the defined medium on the given model.
[ "Set", "the", "defined", "medium", "on", "the", "given", "model", "." ]
276630fcd4449fb7b914186edfd38c239e7052df
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/experimental/medium.py#L56-L59
2,382
opencobra/memote
memote/suite/results/result.py
MemoteResult.add_environment_information
def add_environment_information(meta): """Record environment information.""" meta["timestamp"] = datetime.utcnow().isoformat(" ") meta["platform"] = platform.system() meta["release"] = platform.release() meta["python"] = platform.python_version() meta["packages"] = get_pkg_info("memote")
python
def add_environment_information(meta): meta["timestamp"] = datetime.utcnow().isoformat(" ") meta["platform"] = platform.system() meta["release"] = platform.release() meta["python"] = platform.python_version() meta["packages"] = get_pkg_info("memote")
[ "def", "add_environment_information", "(", "meta", ")", ":", "meta", "[", "\"timestamp\"", "]", "=", "datetime", ".", "utcnow", "(", ")", ".", "isoformat", "(", "\" \"", ")", "meta", "[", "\"platform\"", "]", "=", "platform", ".", "system", "(", ")", "meta", "[", "\"release\"", "]", "=", "platform", ".", "release", "(", ")", "meta", "[", "\"python\"", "]", "=", "platform", ".", "python_version", "(", ")", "meta", "[", "\"packages\"", "]", "=", "get_pkg_info", "(", "\"memote\"", ")" ]
Record environment information.
[ "Record", "environment", "information", "." ]
276630fcd4449fb7b914186edfd38c239e7052df
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/suite/results/result.py#L46-L52
2,383
opencobra/memote
memote/support/helpers.py
find_transported_elements
def find_transported_elements(rxn): """ Return a dictionary showing the amount of transported elements of a rxn. Collects the elements for each metabolite participating in a reaction, multiplies the amount by the metabolite's stoichiometry in the reaction and bins the result according to the compartment that metabolite is in. This produces a dictionary of dictionaries such as this ``{'p': {'C': -1, 'H': -4}, c: {'C': 1, 'H': 4}}`` which shows the transported entities. This dictionary is then simplified to only include the non-zero elements of one single compartment i.e. showing the precise elements that are transported. Parameters ---------- rxn : cobra.Reaction Any cobra.Reaction containing metabolites. """ element_dist = defaultdict() # Collecting elements for each metabolite. for met in rxn.metabolites: if met.compartment not in element_dist: # Multiplication by the metabolite stoichiometry. element_dist[met.compartment] = \ {k: v * rxn.metabolites[met] for (k, v) in iteritems(met.elements)} else: x = {k: v * rxn.metabolites[met] for (k, v) in iteritems(met.elements)} y = element_dist[met.compartment] element_dist[met.compartment] = \ {k: x.get(k, 0) + y.get(k, 0) for k in set(x) | set(y)} delta_dict = defaultdict() # Simplification of the resulting dictionary of dictionaries. for elements in itervalues(element_dist): delta_dict.update(elements) # Only non-zero values get included in the returned delta-dict. delta_dict = {k: abs(v) for (k, v) in iteritems(delta_dict) if v != 0} return delta_dict
python
def find_transported_elements(rxn): element_dist = defaultdict() # Collecting elements for each metabolite. for met in rxn.metabolites: if met.compartment not in element_dist: # Multiplication by the metabolite stoichiometry. element_dist[met.compartment] = \ {k: v * rxn.metabolites[met] for (k, v) in iteritems(met.elements)} else: x = {k: v * rxn.metabolites[met] for (k, v) in iteritems(met.elements)} y = element_dist[met.compartment] element_dist[met.compartment] = \ {k: x.get(k, 0) + y.get(k, 0) for k in set(x) | set(y)} delta_dict = defaultdict() # Simplification of the resulting dictionary of dictionaries. for elements in itervalues(element_dist): delta_dict.update(elements) # Only non-zero values get included in the returned delta-dict. delta_dict = {k: abs(v) for (k, v) in iteritems(delta_dict) if v != 0} return delta_dict
[ "def", "find_transported_elements", "(", "rxn", ")", ":", "element_dist", "=", "defaultdict", "(", ")", "# Collecting elements for each metabolite.", "for", "met", "in", "rxn", ".", "metabolites", ":", "if", "met", ".", "compartment", "not", "in", "element_dist", ":", "# Multiplication by the metabolite stoichiometry.", "element_dist", "[", "met", ".", "compartment", "]", "=", "{", "k", ":", "v", "*", "rxn", ".", "metabolites", "[", "met", "]", "for", "(", "k", ",", "v", ")", "in", "iteritems", "(", "met", ".", "elements", ")", "}", "else", ":", "x", "=", "{", "k", ":", "v", "*", "rxn", ".", "metabolites", "[", "met", "]", "for", "(", "k", ",", "v", ")", "in", "iteritems", "(", "met", ".", "elements", ")", "}", "y", "=", "element_dist", "[", "met", ".", "compartment", "]", "element_dist", "[", "met", ".", "compartment", "]", "=", "{", "k", ":", "x", ".", "get", "(", "k", ",", "0", ")", "+", "y", ".", "get", "(", "k", ",", "0", ")", "for", "k", "in", "set", "(", "x", ")", "|", "set", "(", "y", ")", "}", "delta_dict", "=", "defaultdict", "(", ")", "# Simplification of the resulting dictionary of dictionaries.", "for", "elements", "in", "itervalues", "(", "element_dist", ")", ":", "delta_dict", ".", "update", "(", "elements", ")", "# Only non-zero values get included in the returned delta-dict.", "delta_dict", "=", "{", "k", ":", "abs", "(", "v", ")", "for", "(", "k", ",", "v", ")", "in", "iteritems", "(", "delta_dict", ")", "if", "v", "!=", "0", "}", "return", "delta_dict" ]
Return a dictionary showing the amount of transported elements of a rxn. Collects the elements for each metabolite participating in a reaction, multiplies the amount by the metabolite's stoichiometry in the reaction and bins the result according to the compartment that metabolite is in. This produces a dictionary of dictionaries such as this ``{'p': {'C': -1, 'H': -4}, c: {'C': 1, 'H': 4}}`` which shows the transported entities. This dictionary is then simplified to only include the non-zero elements of one single compartment i.e. showing the precise elements that are transported. Parameters ---------- rxn : cobra.Reaction Any cobra.Reaction containing metabolites.
[ "Return", "a", "dictionary", "showing", "the", "amount", "of", "transported", "elements", "of", "a", "rxn", "." ]
276630fcd4449fb7b914186edfd38c239e7052df
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/support/helpers.py#L81-L120
2,384
opencobra/memote
memote/support/helpers.py
find_transport_reactions
def find_transport_reactions(model): """ Return a list of all transport reactions. Parameters ---------- model : cobra.Model The metabolic model under investigation. Notes ----- A transport reaction is defined as follows: 1. It contains metabolites from at least 2 compartments and 2. at least 1 metabolite undergoes no chemical reaction, i.e., the formula and/or annotation stays the same on both sides of the equation. A notable exception is transport via PTS, which also contains the following restriction: 3. The transported metabolite(s) are transported into a compartment through the exchange of a phosphate group. An example of transport via PTS would be pep(c) + glucose(e) -> glucose-6-phosphate(c) + pyr(c) Reactions similar to transport via PTS (referred to as "modified transport reactions") follow a similar pattern: A(x) + B-R(y) -> A-R(y) + B(y) Such modified transport reactions can be detected, but only when a formula field exists for all metabolites in a particular reaction. If this is not the case, transport reactions are identified through annotations, which cannot detect modified transport reactions. """ transport_reactions = [] transport_rxn_candidates = set(model.reactions) - set(model.boundary) \ - set(find_biomass_reaction(model)) transport_rxn_candidates = set( [rxn for rxn in transport_rxn_candidates if len(rxn.compartments) >= 2] ) # Add all labeled transport reactions sbo_matches = set([rxn for rxn in transport_rxn_candidates if rxn.annotation is not None and 'sbo' in rxn.annotation and rxn.annotation['sbo'] in TRANSPORT_RXN_SBO_TERMS]) if len(sbo_matches) > 0: transport_reactions += list(sbo_matches) # Find unlabeled transport reactions via formula or annotation checks for rxn in transport_rxn_candidates: # Check if metabolites have formula field rxn_mets = set([met.formula for met in rxn.metabolites]) if (None not in rxn_mets) and (len(rxn_mets) != 0): if is_transport_reaction_formulae(rxn): transport_reactions.append(rxn) elif is_transport_reaction_annotations(rxn): transport_reactions.append(rxn) return set(transport_reactions)
python
def find_transport_reactions(model): transport_reactions = [] transport_rxn_candidates = set(model.reactions) - set(model.boundary) \ - set(find_biomass_reaction(model)) transport_rxn_candidates = set( [rxn for rxn in transport_rxn_candidates if len(rxn.compartments) >= 2] ) # Add all labeled transport reactions sbo_matches = set([rxn for rxn in transport_rxn_candidates if rxn.annotation is not None and 'sbo' in rxn.annotation and rxn.annotation['sbo'] in TRANSPORT_RXN_SBO_TERMS]) if len(sbo_matches) > 0: transport_reactions += list(sbo_matches) # Find unlabeled transport reactions via formula or annotation checks for rxn in transport_rxn_candidates: # Check if metabolites have formula field rxn_mets = set([met.formula for met in rxn.metabolites]) if (None not in rxn_mets) and (len(rxn_mets) != 0): if is_transport_reaction_formulae(rxn): transport_reactions.append(rxn) elif is_transport_reaction_annotations(rxn): transport_reactions.append(rxn) return set(transport_reactions)
[ "def", "find_transport_reactions", "(", "model", ")", ":", "transport_reactions", "=", "[", "]", "transport_rxn_candidates", "=", "set", "(", "model", ".", "reactions", ")", "-", "set", "(", "model", ".", "boundary", ")", "-", "set", "(", "find_biomass_reaction", "(", "model", ")", ")", "transport_rxn_candidates", "=", "set", "(", "[", "rxn", "for", "rxn", "in", "transport_rxn_candidates", "if", "len", "(", "rxn", ".", "compartments", ")", ">=", "2", "]", ")", "# Add all labeled transport reactions", "sbo_matches", "=", "set", "(", "[", "rxn", "for", "rxn", "in", "transport_rxn_candidates", "if", "rxn", ".", "annotation", "is", "not", "None", "and", "'sbo'", "in", "rxn", ".", "annotation", "and", "rxn", ".", "annotation", "[", "'sbo'", "]", "in", "TRANSPORT_RXN_SBO_TERMS", "]", ")", "if", "len", "(", "sbo_matches", ")", ">", "0", ":", "transport_reactions", "+=", "list", "(", "sbo_matches", ")", "# Find unlabeled transport reactions via formula or annotation checks", "for", "rxn", "in", "transport_rxn_candidates", ":", "# Check if metabolites have formula field", "rxn_mets", "=", "set", "(", "[", "met", ".", "formula", "for", "met", "in", "rxn", ".", "metabolites", "]", ")", "if", "(", "None", "not", "in", "rxn_mets", ")", "and", "(", "len", "(", "rxn_mets", ")", "!=", "0", ")", ":", "if", "is_transport_reaction_formulae", "(", "rxn", ")", ":", "transport_reactions", ".", "append", "(", "rxn", ")", "elif", "is_transport_reaction_annotations", "(", "rxn", ")", ":", "transport_reactions", ".", "append", "(", "rxn", ")", "return", "set", "(", "transport_reactions", ")" ]
Return a list of all transport reactions. Parameters ---------- model : cobra.Model The metabolic model under investigation. Notes ----- A transport reaction is defined as follows: 1. It contains metabolites from at least 2 compartments and 2. at least 1 metabolite undergoes no chemical reaction, i.e., the formula and/or annotation stays the same on both sides of the equation. A notable exception is transport via PTS, which also contains the following restriction: 3. The transported metabolite(s) are transported into a compartment through the exchange of a phosphate group. An example of transport via PTS would be pep(c) + glucose(e) -> glucose-6-phosphate(c) + pyr(c) Reactions similar to transport via PTS (referred to as "modified transport reactions") follow a similar pattern: A(x) + B-R(y) -> A-R(y) + B(y) Such modified transport reactions can be detected, but only when a formula field exists for all metabolites in a particular reaction. If this is not the case, transport reactions are identified through annotations, which cannot detect modified transport reactions.
[ "Return", "a", "list", "of", "all", "transport", "reactions", "." ]
276630fcd4449fb7b914186edfd38c239e7052df
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/support/helpers.py#L124-L181
2,385
opencobra/memote
memote/support/helpers.py
find_converting_reactions
def find_converting_reactions(model, pair): """ Find all reactions which convert a given metabolite pair. Parameters ---------- model : cobra.Model The metabolic model under investigation. pair: tuple or list A pair of metabolite identifiers without compartment suffix. Returns ------- frozenset The set of reactions that have one of the pair on their left-hand side and the other on the right-hand side. """ first = set(find_met_in_model(model, pair[0])) second = set(find_met_in_model(model, pair[1])) hits = list() for rxn in model.reactions: # FIXME: Use `set.issubset` much more idiomatic. if len(first & set(rxn.reactants)) > 0 and len( second & set(rxn.products)) > 0: hits.append(rxn) elif len(first & set(rxn.products)) > 0 and len( second & set(rxn.reactants)) > 0: hits.append(rxn) return frozenset(hits)
python
def find_converting_reactions(model, pair): first = set(find_met_in_model(model, pair[0])) second = set(find_met_in_model(model, pair[1])) hits = list() for rxn in model.reactions: # FIXME: Use `set.issubset` much more idiomatic. if len(first & set(rxn.reactants)) > 0 and len( second & set(rxn.products)) > 0: hits.append(rxn) elif len(first & set(rxn.products)) > 0 and len( second & set(rxn.reactants)) > 0: hits.append(rxn) return frozenset(hits)
[ "def", "find_converting_reactions", "(", "model", ",", "pair", ")", ":", "first", "=", "set", "(", "find_met_in_model", "(", "model", ",", "pair", "[", "0", "]", ")", ")", "second", "=", "set", "(", "find_met_in_model", "(", "model", ",", "pair", "[", "1", "]", ")", ")", "hits", "=", "list", "(", ")", "for", "rxn", "in", "model", ".", "reactions", ":", "# FIXME: Use `set.issubset` much more idiomatic.", "if", "len", "(", "first", "&", "set", "(", "rxn", ".", "reactants", ")", ")", ">", "0", "and", "len", "(", "second", "&", "set", "(", "rxn", ".", "products", ")", ")", ">", "0", ":", "hits", ".", "append", "(", "rxn", ")", "elif", "len", "(", "first", "&", "set", "(", "rxn", ".", "products", ")", ")", ">", "0", "and", "len", "(", "second", "&", "set", "(", "rxn", ".", "reactants", ")", ")", ">", "0", ":", "hits", ".", "append", "(", "rxn", ")", "return", "frozenset", "(", "hits", ")" ]
Find all reactions which convert a given metabolite pair. Parameters ---------- model : cobra.Model The metabolic model under investigation. pair: tuple or list A pair of metabolite identifiers without compartment suffix. Returns ------- frozenset The set of reactions that have one of the pair on their left-hand side and the other on the right-hand side.
[ "Find", "all", "reactions", "which", "convert", "a", "given", "metabolite", "pair", "." ]
276630fcd4449fb7b914186edfd38c239e7052df
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/support/helpers.py#L249-L278
2,386
opencobra/memote
memote/support/helpers.py
find_demand_reactions
def find_demand_reactions(model): u""" Return a list of demand reactions. Parameters ---------- model : cobra.Model The metabolic model under investigation. Notes ----- [1] defines demand reactions as: -- 'unbalanced network reactions that allow the accumulation of a compound' -- reactions that are chiefly added during the gap-filling process -- as a means of dealing with 'compounds that are known to be produced by the organism [..] (i) for which no information is available about their fractional distribution to the biomass or (ii) which may only be produced in some environmental conditions -- reactions with a formula such as: 'met_c -> ' Demand reactions differ from exchange reactions in that the metabolites are not removed from the extracellular environment, but from any of the organism's compartments. References ---------- .. [1] Thiele, I., & Palsson, B. Ø. (2010, January). A protocol for generating a high-quality genome-scale metabolic reconstruction. Nature protocols. Nature Publishing Group. http://doi.org/10.1038/nprot.2009.203 """ try: extracellular = find_compartment_id_in_model(model, 'e') except KeyError: extracellular = None return find_boundary_types(model, 'demand', extracellular)
python
def find_demand_reactions(model): u""" Return a list of demand reactions. Parameters ---------- model : cobra.Model The metabolic model under investigation. Notes ----- [1] defines demand reactions as: -- 'unbalanced network reactions that allow the accumulation of a compound' -- reactions that are chiefly added during the gap-filling process -- as a means of dealing with 'compounds that are known to be produced by the organism [..] (i) for which no information is available about their fractional distribution to the biomass or (ii) which may only be produced in some environmental conditions -- reactions with a formula such as: 'met_c -> ' Demand reactions differ from exchange reactions in that the metabolites are not removed from the extracellular environment, but from any of the organism's compartments. References ---------- .. [1] Thiele, I., & Palsson, B. Ø. (2010, January). A protocol for generating a high-quality genome-scale metabolic reconstruction. Nature protocols. Nature Publishing Group. http://doi.org/10.1038/nprot.2009.203 """ try: extracellular = find_compartment_id_in_model(model, 'e') except KeyError: extracellular = None return find_boundary_types(model, 'demand', extracellular)
[ "def", "find_demand_reactions", "(", "model", ")", ":", "try", ":", "extracellular", "=", "find_compartment_id_in_model", "(", "model", ",", "'e'", ")", "except", "KeyError", ":", "extracellular", "=", "None", "return", "find_boundary_types", "(", "model", ",", "'demand'", ",", "extracellular", ")" ]
u""" Return a list of demand reactions. Parameters ---------- model : cobra.Model The metabolic model under investigation. Notes ----- [1] defines demand reactions as: -- 'unbalanced network reactions that allow the accumulation of a compound' -- reactions that are chiefly added during the gap-filling process -- as a means of dealing with 'compounds that are known to be produced by the organism [..] (i) for which no information is available about their fractional distribution to the biomass or (ii) which may only be produced in some environmental conditions -- reactions with a formula such as: 'met_c -> ' Demand reactions differ from exchange reactions in that the metabolites are not removed from the extracellular environment, but from any of the organism's compartments. References ---------- .. [1] Thiele, I., & Palsson, B. Ø. (2010, January). A protocol for generating a high-quality genome-scale metabolic reconstruction. Nature protocols. Nature Publishing Group. http://doi.org/10.1038/nprot.2009.203
[ "u", "Return", "a", "list", "of", "demand", "reactions", "." ]
276630fcd4449fb7b914186edfd38c239e7052df
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/support/helpers.py#L337-L373
2,387
opencobra/memote
memote/support/helpers.py
find_sink_reactions
def find_sink_reactions(model): u""" Return a list of sink reactions. Parameters ---------- model : cobra.Model The metabolic model under investigation. Notes ----- [1] defines sink reactions as: -- 'similar to demand reactions' but reversible, thus able to supply the model with metabolites -- reactions that are chiefly added during the gap-filling process -- as a means of dealing with 'compounds that are produced by nonmetabolic cellular processes but that need to be metabolized' -- reactions with a formula such as: 'met_c <-> ' Sink reactions differ from exchange reactions in that the metabolites are not removed from the extracellular environment, but from any of the organism's compartments. References ---------- .. [1] Thiele, I., & Palsson, B. Ø. (2010, January). A protocol for generating a high-quality genome-scale metabolic reconstruction. Nature protocols. Nature Publishing Group. http://doi.org/10.1038/nprot.2009.203 """ try: extracellular = find_compartment_id_in_model(model, 'e') except KeyError: extracellular = None return find_boundary_types(model, 'sink', extracellular)
python
def find_sink_reactions(model): u""" Return a list of sink reactions. Parameters ---------- model : cobra.Model The metabolic model under investigation. Notes ----- [1] defines sink reactions as: -- 'similar to demand reactions' but reversible, thus able to supply the model with metabolites -- reactions that are chiefly added during the gap-filling process -- as a means of dealing with 'compounds that are produced by nonmetabolic cellular processes but that need to be metabolized' -- reactions with a formula such as: 'met_c <-> ' Sink reactions differ from exchange reactions in that the metabolites are not removed from the extracellular environment, but from any of the organism's compartments. References ---------- .. [1] Thiele, I., & Palsson, B. Ø. (2010, January). A protocol for generating a high-quality genome-scale metabolic reconstruction. Nature protocols. Nature Publishing Group. http://doi.org/10.1038/nprot.2009.203 """ try: extracellular = find_compartment_id_in_model(model, 'e') except KeyError: extracellular = None return find_boundary_types(model, 'sink', extracellular)
[ "def", "find_sink_reactions", "(", "model", ")", ":", "try", ":", "extracellular", "=", "find_compartment_id_in_model", "(", "model", ",", "'e'", ")", "except", "KeyError", ":", "extracellular", "=", "None", "return", "find_boundary_types", "(", "model", ",", "'sink'", ",", "extracellular", ")" ]
u""" Return a list of sink reactions. Parameters ---------- model : cobra.Model The metabolic model under investigation. Notes ----- [1] defines sink reactions as: -- 'similar to demand reactions' but reversible, thus able to supply the model with metabolites -- reactions that are chiefly added during the gap-filling process -- as a means of dealing with 'compounds that are produced by nonmetabolic cellular processes but that need to be metabolized' -- reactions with a formula such as: 'met_c <-> ' Sink reactions differ from exchange reactions in that the metabolites are not removed from the extracellular environment, but from any of the organism's compartments. References ---------- .. [1] Thiele, I., & Palsson, B. Ø. (2010, January). A protocol for generating a high-quality genome-scale metabolic reconstruction. Nature protocols. Nature Publishing Group. http://doi.org/10.1038/nprot.2009.203
[ "u", "Return", "a", "list", "of", "sink", "reactions", "." ]
276630fcd4449fb7b914186edfd38c239e7052df
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/support/helpers.py#L377-L412
2,388
opencobra/memote
memote/support/helpers.py
find_exchange_rxns
def find_exchange_rxns(model): u""" Return a list of exchange reactions. Parameters ---------- model : cobra.Model The metabolic model under investigation. Notes ----- [1] defines exchange reactions as: -- reactions that 'define the extracellular environment' -- 'unbalanced, extra-organism reactions that represent the supply to or removal of metabolites from the extra-organism "space"' -- reactions with a formula such as: 'met_e -> ' or ' -> met_e' or 'met_e <=> ' Exchange reactions differ from demand reactions in that the metabolites are removed from or added to the extracellular environment only. With this the uptake or secretion of a metabolite is modeled, respectively. References ---------- .. [1] Thiele, I., & Palsson, B. Ø. (2010, January). A protocol for generating a high-quality genome-scale metabolic reconstruction. Nature protocols. Nature Publishing Group. http://doi.org/10.1038/nprot.2009.203 """ try: extracellular = find_compartment_id_in_model(model, 'e') except KeyError: extracellular = None return find_boundary_types(model, 'exchange', extracellular)
python
def find_exchange_rxns(model): u""" Return a list of exchange reactions. Parameters ---------- model : cobra.Model The metabolic model under investigation. Notes ----- [1] defines exchange reactions as: -- reactions that 'define the extracellular environment' -- 'unbalanced, extra-organism reactions that represent the supply to or removal of metabolites from the extra-organism "space"' -- reactions with a formula such as: 'met_e -> ' or ' -> met_e' or 'met_e <=> ' Exchange reactions differ from demand reactions in that the metabolites are removed from or added to the extracellular environment only. With this the uptake or secretion of a metabolite is modeled, respectively. References ---------- .. [1] Thiele, I., & Palsson, B. Ø. (2010, January). A protocol for generating a high-quality genome-scale metabolic reconstruction. Nature protocols. Nature Publishing Group. http://doi.org/10.1038/nprot.2009.203 """ try: extracellular = find_compartment_id_in_model(model, 'e') except KeyError: extracellular = None return find_boundary_types(model, 'exchange', extracellular)
[ "def", "find_exchange_rxns", "(", "model", ")", ":", "try", ":", "extracellular", "=", "find_compartment_id_in_model", "(", "model", ",", "'e'", ")", "except", "KeyError", ":", "extracellular", "=", "None", "return", "find_boundary_types", "(", "model", ",", "'exchange'", ",", "extracellular", ")" ]
u""" Return a list of exchange reactions. Parameters ---------- model : cobra.Model The metabolic model under investigation. Notes ----- [1] defines exchange reactions as: -- reactions that 'define the extracellular environment' -- 'unbalanced, extra-organism reactions that represent the supply to or removal of metabolites from the extra-organism "space"' -- reactions with a formula such as: 'met_e -> ' or ' -> met_e' or 'met_e <=> ' Exchange reactions differ from demand reactions in that the metabolites are removed from or added to the extracellular environment only. With this the uptake or secretion of a metabolite is modeled, respectively. References ---------- .. [1] Thiele, I., & Palsson, B. Ø. (2010, January). A protocol for generating a high-quality genome-scale metabolic reconstruction. Nature protocols. Nature Publishing Group. http://doi.org/10.1038/nprot.2009.203
[ "u", "Return", "a", "list", "of", "exchange", "reactions", "." ]
276630fcd4449fb7b914186edfd38c239e7052df
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/support/helpers.py#L416-L450
2,389
opencobra/memote
memote/support/helpers.py
find_interchange_biomass_reactions
def find_interchange_biomass_reactions(model, biomass=None): """ Return the set of all transport, boundary, and biomass reactions. These reactions are either pseudo-reactions, or incorporated to allow metabolites to pass between compartments. Some tests focus on purely metabolic reactions and hence exclude this set. Parameters ---------- model : cobra.Model The metabolic model under investigation. biomass : list or cobra.Reaction, optional A list of cobrapy biomass reactions. """ boundary = set(model.boundary) transporters = find_transport_reactions(model) if biomass is None: biomass = set(find_biomass_reaction(model)) return boundary | transporters | biomass
python
def find_interchange_biomass_reactions(model, biomass=None): boundary = set(model.boundary) transporters = find_transport_reactions(model) if biomass is None: biomass = set(find_biomass_reaction(model)) return boundary | transporters | biomass
[ "def", "find_interchange_biomass_reactions", "(", "model", ",", "biomass", "=", "None", ")", ":", "boundary", "=", "set", "(", "model", ".", "boundary", ")", "transporters", "=", "find_transport_reactions", "(", "model", ")", "if", "biomass", "is", "None", ":", "biomass", "=", "set", "(", "find_biomass_reaction", "(", "model", ")", ")", "return", "boundary", "|", "transporters", "|", "biomass" ]
Return the set of all transport, boundary, and biomass reactions. These reactions are either pseudo-reactions, or incorporated to allow metabolites to pass between compartments. Some tests focus on purely metabolic reactions and hence exclude this set. Parameters ---------- model : cobra.Model The metabolic model under investigation. biomass : list or cobra.Reaction, optional A list of cobrapy biomass reactions.
[ "Return", "the", "set", "of", "all", "transport", "boundary", "and", "biomass", "reactions", "." ]
276630fcd4449fb7b914186edfd38c239e7052df
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/support/helpers.py#L453-L473
2,390
opencobra/memote
memote/support/helpers.py
run_fba
def run_fba(model, rxn_id, direction="max", single_value=True): """ Return the solution of an FBA to a set objective function. Parameters ---------- model : cobra.Model The metabolic model under investigation. rxn_id : string A string containing the reaction ID of the desired FBA objective. direction: string A string containing either "max" or "min" to specify the direction of the desired FBA objective function. single_value: boolean Indicates whether the results for all reactions are gathered from the solver, or only the result for the objective value. Returns ------- cobra.solution The cobra solution object for the corresponding FBA problem. """ model.objective = model.reactions.get_by_id(rxn_id) model.objective_direction = direction if single_value: try: return model.slim_optimize() except Infeasible: return np.nan else: try: solution = model.optimize() return solution except Infeasible: return np.nan
python
def run_fba(model, rxn_id, direction="max", single_value=True): model.objective = model.reactions.get_by_id(rxn_id) model.objective_direction = direction if single_value: try: return model.slim_optimize() except Infeasible: return np.nan else: try: solution = model.optimize() return solution except Infeasible: return np.nan
[ "def", "run_fba", "(", "model", ",", "rxn_id", ",", "direction", "=", "\"max\"", ",", "single_value", "=", "True", ")", ":", "model", ".", "objective", "=", "model", ".", "reactions", ".", "get_by_id", "(", "rxn_id", ")", "model", ".", "objective_direction", "=", "direction", "if", "single_value", ":", "try", ":", "return", "model", ".", "slim_optimize", "(", ")", "except", "Infeasible", ":", "return", "np", ".", "nan", "else", ":", "try", ":", "solution", "=", "model", ".", "optimize", "(", ")", "return", "solution", "except", "Infeasible", ":", "return", "np", ".", "nan" ]
Return the solution of an FBA to a set objective function. Parameters ---------- model : cobra.Model The metabolic model under investigation. rxn_id : string A string containing the reaction ID of the desired FBA objective. direction: string A string containing either "max" or "min" to specify the direction of the desired FBA objective function. single_value: boolean Indicates whether the results for all reactions are gathered from the solver, or only the result for the objective value. Returns ------- cobra.solution The cobra solution object for the corresponding FBA problem.
[ "Return", "the", "solution", "of", "an", "FBA", "to", "a", "set", "objective", "function", "." ]
276630fcd4449fb7b914186edfd38c239e7052df
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/support/helpers.py#L476-L511
2,391
opencobra/memote
memote/support/helpers.py
close_boundaries_sensibly
def close_boundaries_sensibly(model): """ Return a cobra model with all boundaries closed and changed constraints. In the returned model previously fixed reactions are no longer constrained as such. Instead reactions are constrained according to their reversibility. This is to prevent the FBA from becoming infeasible when trying to solve a model with closed exchanges and one fixed reaction. Parameters ---------- model : cobra.Model The metabolic model under investigation. Returns ------- cobra.Model A cobra model with all boundary reactions closed and the constraints of each reaction set according to their reversibility. """ for rxn in model.reactions: if rxn.reversibility: rxn.bounds = -1, 1 else: rxn.bounds = 0, 1 for boundary in model.boundary: boundary.bounds = (0, 0)
python
def close_boundaries_sensibly(model): for rxn in model.reactions: if rxn.reversibility: rxn.bounds = -1, 1 else: rxn.bounds = 0, 1 for boundary in model.boundary: boundary.bounds = (0, 0)
[ "def", "close_boundaries_sensibly", "(", "model", ")", ":", "for", "rxn", "in", "model", ".", "reactions", ":", "if", "rxn", ".", "reversibility", ":", "rxn", ".", "bounds", "=", "-", "1", ",", "1", "else", ":", "rxn", ".", "bounds", "=", "0", ",", "1", "for", "boundary", "in", "model", ".", "boundary", ":", "boundary", ".", "bounds", "=", "(", "0", ",", "0", ")" ]
Return a cobra model with all boundaries closed and changed constraints. In the returned model previously fixed reactions are no longer constrained as such. Instead reactions are constrained according to their reversibility. This is to prevent the FBA from becoming infeasible when trying to solve a model with closed exchanges and one fixed reaction. Parameters ---------- model : cobra.Model The metabolic model under investigation. Returns ------- cobra.Model A cobra model with all boundary reactions closed and the constraints of each reaction set according to their reversibility.
[ "Return", "a", "cobra", "model", "with", "all", "boundaries", "closed", "and", "changed", "constraints", "." ]
276630fcd4449fb7b914186edfd38c239e7052df
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/support/helpers.py#L514-L541
2,392
opencobra/memote
memote/support/helpers.py
metabolites_per_compartment
def metabolites_per_compartment(model, compartment_id): """ Identify all metabolites that belong to a given compartment. Parameters ---------- model : cobra.Model The metabolic model under investigation. compartment_id : string Model specific compartment identifier. Returns ------- list List of metabolites belonging to a given compartment. """ return [met for met in model.metabolites if met.compartment == compartment_id]
python
def metabolites_per_compartment(model, compartment_id): return [met for met in model.metabolites if met.compartment == compartment_id]
[ "def", "metabolites_per_compartment", "(", "model", ",", "compartment_id", ")", ":", "return", "[", "met", "for", "met", "in", "model", ".", "metabolites", "if", "met", ".", "compartment", "==", "compartment_id", "]" ]
Identify all metabolites that belong to a given compartment. Parameters ---------- model : cobra.Model The metabolic model under investigation. compartment_id : string Model specific compartment identifier. Returns ------- list List of metabolites belonging to a given compartment.
[ "Identify", "all", "metabolites", "that", "belong", "to", "a", "given", "compartment", "." ]
276630fcd4449fb7b914186edfd38c239e7052df
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/support/helpers.py#L572-L590
2,393
opencobra/memote
memote/support/helpers.py
largest_compartment_id_met
def largest_compartment_id_met(model): """ Return the ID of the compartment with the most metabolites. Parameters ---------- model : cobra.Model The metabolic model under investigation. Returns ------- string Compartment ID of the compartment with the most metabolites. """ # Sort compartments by decreasing size and extract the largest two. candidate, second = sorted( ((c, len(metabolites_per_compartment(model, c))) for c in model.compartments), reverse=True, key=itemgetter(1))[:2] # Compare the size of the compartments. if candidate[1] == second[1]: raise RuntimeError("There is a tie for the largest compartment. " "Compartment {} and {} have equal amounts of " "metabolites.".format(candidate[0], second[0])) else: return candidate[0]
python
def largest_compartment_id_met(model): # Sort compartments by decreasing size and extract the largest two. candidate, second = sorted( ((c, len(metabolites_per_compartment(model, c))) for c in model.compartments), reverse=True, key=itemgetter(1))[:2] # Compare the size of the compartments. if candidate[1] == second[1]: raise RuntimeError("There is a tie for the largest compartment. " "Compartment {} and {} have equal amounts of " "metabolites.".format(candidate[0], second[0])) else: return candidate[0]
[ "def", "largest_compartment_id_met", "(", "model", ")", ":", "# Sort compartments by decreasing size and extract the largest two.", "candidate", ",", "second", "=", "sorted", "(", "(", "(", "c", ",", "len", "(", "metabolites_per_compartment", "(", "model", ",", "c", ")", ")", ")", "for", "c", "in", "model", ".", "compartments", ")", ",", "reverse", "=", "True", ",", "key", "=", "itemgetter", "(", "1", ")", ")", "[", ":", "2", "]", "# Compare the size of the compartments.", "if", "candidate", "[", "1", "]", "==", "second", "[", "1", "]", ":", "raise", "RuntimeError", "(", "\"There is a tie for the largest compartment. \"", "\"Compartment {} and {} have equal amounts of \"", "\"metabolites.\"", ".", "format", "(", "candidate", "[", "0", "]", ",", "second", "[", "0", "]", ")", ")", "else", ":", "return", "candidate", "[", "0", "]" ]
Return the ID of the compartment with the most metabolites. Parameters ---------- model : cobra.Model The metabolic model under investigation. Returns ------- string Compartment ID of the compartment with the most metabolites.
[ "Return", "the", "ID", "of", "the", "compartment", "with", "the", "most", "metabolites", "." ]
276630fcd4449fb7b914186edfd38c239e7052df
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/support/helpers.py#L593-L618
2,394
opencobra/memote
memote/support/helpers.py
find_compartment_id_in_model
def find_compartment_id_in_model(model, compartment_id): """ Identify a model compartment by looking up names in COMPARTMENT_SHORTLIST. Parameters ---------- model : cobra.Model The metabolic model under investigation. compartment_id : string Memote internal compartment identifier used to access compartment name shortlist to look up potential compartment names. Returns ------- string Compartment identifier in the model corresponding to compartment_id. """ if compartment_id not in COMPARTMENT_SHORTLIST.keys(): raise KeyError("{} is not in the COMPARTMENT_SHORTLIST! Make sure " "you typed the ID correctly, if yes, update the " "shortlist manually.".format(compartment_id)) if len(model.compartments) == 0: raise KeyError( "It was not possible to identify the " "compartment {}, since the " "model has no compartments at " "all.".format(COMPARTMENT_SHORTLIST[compartment_id][0]) ) if compartment_id in model.compartments.keys(): return compartment_id for name in COMPARTMENT_SHORTLIST[compartment_id]: for c_id, c_name in model.compartments.items(): if c_name.lower() == name: return c_id if compartment_id == 'c': return largest_compartment_id_met(model)
python
def find_compartment_id_in_model(model, compartment_id): if compartment_id not in COMPARTMENT_SHORTLIST.keys(): raise KeyError("{} is not in the COMPARTMENT_SHORTLIST! Make sure " "you typed the ID correctly, if yes, update the " "shortlist manually.".format(compartment_id)) if len(model.compartments) == 0: raise KeyError( "It was not possible to identify the " "compartment {}, since the " "model has no compartments at " "all.".format(COMPARTMENT_SHORTLIST[compartment_id][0]) ) if compartment_id in model.compartments.keys(): return compartment_id for name in COMPARTMENT_SHORTLIST[compartment_id]: for c_id, c_name in model.compartments.items(): if c_name.lower() == name: return c_id if compartment_id == 'c': return largest_compartment_id_met(model)
[ "def", "find_compartment_id_in_model", "(", "model", ",", "compartment_id", ")", ":", "if", "compartment_id", "not", "in", "COMPARTMENT_SHORTLIST", ".", "keys", "(", ")", ":", "raise", "KeyError", "(", "\"{} is not in the COMPARTMENT_SHORTLIST! Make sure \"", "\"you typed the ID correctly, if yes, update the \"", "\"shortlist manually.\"", ".", "format", "(", "compartment_id", ")", ")", "if", "len", "(", "model", ".", "compartments", ")", "==", "0", ":", "raise", "KeyError", "(", "\"It was not possible to identify the \"", "\"compartment {}, since the \"", "\"model has no compartments at \"", "\"all.\"", ".", "format", "(", "COMPARTMENT_SHORTLIST", "[", "compartment_id", "]", "[", "0", "]", ")", ")", "if", "compartment_id", "in", "model", ".", "compartments", ".", "keys", "(", ")", ":", "return", "compartment_id", "for", "name", "in", "COMPARTMENT_SHORTLIST", "[", "compartment_id", "]", ":", "for", "c_id", ",", "c_name", "in", "model", ".", "compartments", ".", "items", "(", ")", ":", "if", "c_name", ".", "lower", "(", ")", "==", "name", ":", "return", "c_id", "if", "compartment_id", "==", "'c'", ":", "return", "largest_compartment_id_met", "(", "model", ")" ]
Identify a model compartment by looking up names in COMPARTMENT_SHORTLIST. Parameters ---------- model : cobra.Model The metabolic model under investigation. compartment_id : string Memote internal compartment identifier used to access compartment name shortlist to look up potential compartment names. Returns ------- string Compartment identifier in the model corresponding to compartment_id.
[ "Identify", "a", "model", "compartment", "by", "looking", "up", "names", "in", "COMPARTMENT_SHORTLIST", "." ]
276630fcd4449fb7b914186edfd38c239e7052df
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/support/helpers.py#L621-L661
2,395
opencobra/memote
memote/support/helpers.py
find_met_in_model
def find_met_in_model(model, mnx_id, compartment_id=None): """ Return specific metabolites by looking up IDs in METANETX_SHORTLIST. Parameters ---------- model : cobra.Model The metabolic model under investigation. mnx_id : string Memote internal MetaNetX metabolite identifier used to map between cross-references in the METANETX_SHORTLIST. compartment_id : string, optional ID of the specific compartment where the metabolites should be found. Defaults to returning matching metabolites from all compartments. Returns ------- list cobra.Metabolite(s) matching the mnx_id. """ def compare_annotation(annotation): """ Return annotation IDs that match to METANETX_SHORTLIST references. Compares the set of METANETX_SHORTLIST references for a given mnx_id and the annotation IDs stored in a given annotation dictionary. """ query_values = set(utils.flatten(annotation.values())) ref_values = set(utils.flatten(METANETX_SHORTLIST[mnx_id])) return query_values & ref_values # Make sure that the MNX ID we're looking up exists in the metabolite # shortlist. if mnx_id not in METANETX_SHORTLIST.columns: raise ValueError( "{} is not in the MetaNetX Shortlist! Make sure " "you typed the ID correctly, if yes, update the " "shortlist by updating and re-running the script " "generate_mnx_shortlists.py.".format(mnx_id) ) candidates = [] # The MNX ID used in the model may or may not be tagged with a compartment # tag e.g. `MNXM23141_c` vs. `MNXM23141`, which is tested with the # following regex. # If the MNX ID itself cannot be found as an ID, we try all other # identifiers that are provided by our shortlist of MetaNetX' mapping # table. regex = re.compile('^{}(_[a-zA-Z0-9]+)?$'.format(mnx_id)) if model.metabolites.query(regex): candidates = model.metabolites.query(regex) elif model.metabolites.query(compare_annotation, attribute='annotation'): candidates = model.metabolites.query( compare_annotation, attribute='annotation' ) else: for value in METANETX_SHORTLIST[mnx_id]: if value: for ident in value: regex = re.compile('^{}(_[a-zA-Z0-9]+)?$'.format(ident)) if model.metabolites.query(regex, attribute='id'): candidates.extend( model.metabolites.query(regex, attribute='id')) # Return a list of all possible candidates if no specific compartment ID # is provided. # Otherwise, just return the candidate in one specific compartment. Raise # an exception if there are more than one possible candidates for a given # compartment. if compartment_id is None: print("compartment_id = None?") return candidates else: candidates_in_compartment = \ [cand for cand in candidates if cand.compartment == compartment_id] if len(candidates_in_compartment) == 0: raise RuntimeError("It was not possible to identify " "any metabolite in compartment {} corresponding to " "the following MetaNetX identifier: {}." "Make sure that a cross-reference to this ID in " "the MetaNetX Database exists for your " "identifier " "namespace.".format(compartment_id, mnx_id)) elif len(candidates_in_compartment) > 1: raise RuntimeError("It was not possible to uniquely identify " "a single metabolite in compartment {} that " "corresponds to the following MetaNetX " "identifier: {}." "Instead these candidates were found: {}." "Check that metabolite compartment tags are " "correct. Consider switching to a namespace scheme " "where identifiers are truly " "unique.".format(compartment_id, mnx_id, utils.get_ids( candidates_in_compartment )) ) else: return candidates_in_compartment
python
def find_met_in_model(model, mnx_id, compartment_id=None): def compare_annotation(annotation): """ Return annotation IDs that match to METANETX_SHORTLIST references. Compares the set of METANETX_SHORTLIST references for a given mnx_id and the annotation IDs stored in a given annotation dictionary. """ query_values = set(utils.flatten(annotation.values())) ref_values = set(utils.flatten(METANETX_SHORTLIST[mnx_id])) return query_values & ref_values # Make sure that the MNX ID we're looking up exists in the metabolite # shortlist. if mnx_id not in METANETX_SHORTLIST.columns: raise ValueError( "{} is not in the MetaNetX Shortlist! Make sure " "you typed the ID correctly, if yes, update the " "shortlist by updating and re-running the script " "generate_mnx_shortlists.py.".format(mnx_id) ) candidates = [] # The MNX ID used in the model may or may not be tagged with a compartment # tag e.g. `MNXM23141_c` vs. `MNXM23141`, which is tested with the # following regex. # If the MNX ID itself cannot be found as an ID, we try all other # identifiers that are provided by our shortlist of MetaNetX' mapping # table. regex = re.compile('^{}(_[a-zA-Z0-9]+)?$'.format(mnx_id)) if model.metabolites.query(regex): candidates = model.metabolites.query(regex) elif model.metabolites.query(compare_annotation, attribute='annotation'): candidates = model.metabolites.query( compare_annotation, attribute='annotation' ) else: for value in METANETX_SHORTLIST[mnx_id]: if value: for ident in value: regex = re.compile('^{}(_[a-zA-Z0-9]+)?$'.format(ident)) if model.metabolites.query(regex, attribute='id'): candidates.extend( model.metabolites.query(regex, attribute='id')) # Return a list of all possible candidates if no specific compartment ID # is provided. # Otherwise, just return the candidate in one specific compartment. Raise # an exception if there are more than one possible candidates for a given # compartment. if compartment_id is None: print("compartment_id = None?") return candidates else: candidates_in_compartment = \ [cand for cand in candidates if cand.compartment == compartment_id] if len(candidates_in_compartment) == 0: raise RuntimeError("It was not possible to identify " "any metabolite in compartment {} corresponding to " "the following MetaNetX identifier: {}." "Make sure that a cross-reference to this ID in " "the MetaNetX Database exists for your " "identifier " "namespace.".format(compartment_id, mnx_id)) elif len(candidates_in_compartment) > 1: raise RuntimeError("It was not possible to uniquely identify " "a single metabolite in compartment {} that " "corresponds to the following MetaNetX " "identifier: {}." "Instead these candidates were found: {}." "Check that metabolite compartment tags are " "correct. Consider switching to a namespace scheme " "where identifiers are truly " "unique.".format(compartment_id, mnx_id, utils.get_ids( candidates_in_compartment )) ) else: return candidates_in_compartment
[ "def", "find_met_in_model", "(", "model", ",", "mnx_id", ",", "compartment_id", "=", "None", ")", ":", "def", "compare_annotation", "(", "annotation", ")", ":", "\"\"\"\n Return annotation IDs that match to METANETX_SHORTLIST references.\n\n Compares the set of METANETX_SHORTLIST references for a given mnx_id\n and the annotation IDs stored in a given annotation dictionary.\n \"\"\"", "query_values", "=", "set", "(", "utils", ".", "flatten", "(", "annotation", ".", "values", "(", ")", ")", ")", "ref_values", "=", "set", "(", "utils", ".", "flatten", "(", "METANETX_SHORTLIST", "[", "mnx_id", "]", ")", ")", "return", "query_values", "&", "ref_values", "# Make sure that the MNX ID we're looking up exists in the metabolite", "# shortlist.", "if", "mnx_id", "not", "in", "METANETX_SHORTLIST", ".", "columns", ":", "raise", "ValueError", "(", "\"{} is not in the MetaNetX Shortlist! Make sure \"", "\"you typed the ID correctly, if yes, update the \"", "\"shortlist by updating and re-running the script \"", "\"generate_mnx_shortlists.py.\"", ".", "format", "(", "mnx_id", ")", ")", "candidates", "=", "[", "]", "# The MNX ID used in the model may or may not be tagged with a compartment", "# tag e.g. `MNXM23141_c` vs. `MNXM23141`, which is tested with the", "# following regex.", "# If the MNX ID itself cannot be found as an ID, we try all other", "# identifiers that are provided by our shortlist of MetaNetX' mapping", "# table.", "regex", "=", "re", ".", "compile", "(", "'^{}(_[a-zA-Z0-9]+)?$'", ".", "format", "(", "mnx_id", ")", ")", "if", "model", ".", "metabolites", ".", "query", "(", "regex", ")", ":", "candidates", "=", "model", ".", "metabolites", ".", "query", "(", "regex", ")", "elif", "model", ".", "metabolites", ".", "query", "(", "compare_annotation", ",", "attribute", "=", "'annotation'", ")", ":", "candidates", "=", "model", ".", "metabolites", ".", "query", "(", "compare_annotation", ",", "attribute", "=", "'annotation'", ")", "else", ":", "for", "value", "in", "METANETX_SHORTLIST", "[", "mnx_id", "]", ":", "if", "value", ":", "for", "ident", "in", "value", ":", "regex", "=", "re", ".", "compile", "(", "'^{}(_[a-zA-Z0-9]+)?$'", ".", "format", "(", "ident", ")", ")", "if", "model", ".", "metabolites", ".", "query", "(", "regex", ",", "attribute", "=", "'id'", ")", ":", "candidates", ".", "extend", "(", "model", ".", "metabolites", ".", "query", "(", "regex", ",", "attribute", "=", "'id'", ")", ")", "# Return a list of all possible candidates if no specific compartment ID", "# is provided.", "# Otherwise, just return the candidate in one specific compartment. Raise", "# an exception if there are more than one possible candidates for a given", "# compartment.", "if", "compartment_id", "is", "None", ":", "print", "(", "\"compartment_id = None?\"", ")", "return", "candidates", "else", ":", "candidates_in_compartment", "=", "[", "cand", "for", "cand", "in", "candidates", "if", "cand", ".", "compartment", "==", "compartment_id", "]", "if", "len", "(", "candidates_in_compartment", ")", "==", "0", ":", "raise", "RuntimeError", "(", "\"It was not possible to identify \"", "\"any metabolite in compartment {} corresponding to \"", "\"the following MetaNetX identifier: {}.\"", "\"Make sure that a cross-reference to this ID in \"", "\"the MetaNetX Database exists for your \"", "\"identifier \"", "\"namespace.\"", ".", "format", "(", "compartment_id", ",", "mnx_id", ")", ")", "elif", "len", "(", "candidates_in_compartment", ")", ">", "1", ":", "raise", "RuntimeError", "(", "\"It was not possible to uniquely identify \"", "\"a single metabolite in compartment {} that \"", "\"corresponds to the following MetaNetX \"", "\"identifier: {}.\"", "\"Instead these candidates were found: {}.\"", "\"Check that metabolite compartment tags are \"", "\"correct. Consider switching to a namespace scheme \"", "\"where identifiers are truly \"", "\"unique.\"", ".", "format", "(", "compartment_id", ",", "mnx_id", ",", "utils", ".", "get_ids", "(", "candidates_in_compartment", ")", ")", ")", "else", ":", "return", "candidates_in_compartment" ]
Return specific metabolites by looking up IDs in METANETX_SHORTLIST. Parameters ---------- model : cobra.Model The metabolic model under investigation. mnx_id : string Memote internal MetaNetX metabolite identifier used to map between cross-references in the METANETX_SHORTLIST. compartment_id : string, optional ID of the specific compartment where the metabolites should be found. Defaults to returning matching metabolites from all compartments. Returns ------- list cobra.Metabolite(s) matching the mnx_id.
[ "Return", "specific", "metabolites", "by", "looking", "up", "IDs", "in", "METANETX_SHORTLIST", "." ]
276630fcd4449fb7b914186edfd38c239e7052df
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/support/helpers.py#L664-L764
2,396
opencobra/memote
memote/support/helpers.py
find_bounds
def find_bounds(model): """ Return the median upper and lower bound of the metabolic model. Bounds can vary from model to model. Cobrapy defaults to (-1000, 1000) but this may not be the case for merged or autogenerated models. In these cases, this function is used to iterate over all the bounds of all the reactions and find the median bound values in the model, which are then used as the 'most common' bounds. Parameters ---------- model : cobra.Model The metabolic model under investigation. """ lower_bounds = np.asarray([rxn.lower_bound for rxn in model.reactions], dtype=float) upper_bounds = np.asarray([rxn.upper_bound for rxn in model.reactions], dtype=float) lower_bound = np.nanmedian(lower_bounds[lower_bounds != 0.0]) upper_bound = np.nanmedian(upper_bounds[upper_bounds != 0.0]) if np.isnan(lower_bound): LOGGER.warning("Could not identify a median lower bound.") lower_bound = -1000.0 if np.isnan(upper_bound): LOGGER.warning("Could not identify a median upper bound.") upper_bound = 1000.0 return lower_bound, upper_bound
python
def find_bounds(model): lower_bounds = np.asarray([rxn.lower_bound for rxn in model.reactions], dtype=float) upper_bounds = np.asarray([rxn.upper_bound for rxn in model.reactions], dtype=float) lower_bound = np.nanmedian(lower_bounds[lower_bounds != 0.0]) upper_bound = np.nanmedian(upper_bounds[upper_bounds != 0.0]) if np.isnan(lower_bound): LOGGER.warning("Could not identify a median lower bound.") lower_bound = -1000.0 if np.isnan(upper_bound): LOGGER.warning("Could not identify a median upper bound.") upper_bound = 1000.0 return lower_bound, upper_bound
[ "def", "find_bounds", "(", "model", ")", ":", "lower_bounds", "=", "np", ".", "asarray", "(", "[", "rxn", ".", "lower_bound", "for", "rxn", "in", "model", ".", "reactions", "]", ",", "dtype", "=", "float", ")", "upper_bounds", "=", "np", ".", "asarray", "(", "[", "rxn", ".", "upper_bound", "for", "rxn", "in", "model", ".", "reactions", "]", ",", "dtype", "=", "float", ")", "lower_bound", "=", "np", ".", "nanmedian", "(", "lower_bounds", "[", "lower_bounds", "!=", "0.0", "]", ")", "upper_bound", "=", "np", ".", "nanmedian", "(", "upper_bounds", "[", "upper_bounds", "!=", "0.0", "]", ")", "if", "np", ".", "isnan", "(", "lower_bound", ")", ":", "LOGGER", ".", "warning", "(", "\"Could not identify a median lower bound.\"", ")", "lower_bound", "=", "-", "1000.0", "if", "np", ".", "isnan", "(", "upper_bound", ")", ":", "LOGGER", ".", "warning", "(", "\"Could not identify a median upper bound.\"", ")", "upper_bound", "=", "1000.0", "return", "lower_bound", ",", "upper_bound" ]
Return the median upper and lower bound of the metabolic model. Bounds can vary from model to model. Cobrapy defaults to (-1000, 1000) but this may not be the case for merged or autogenerated models. In these cases, this function is used to iterate over all the bounds of all the reactions and find the median bound values in the model, which are then used as the 'most common' bounds. Parameters ---------- model : cobra.Model The metabolic model under investigation.
[ "Return", "the", "median", "upper", "and", "lower", "bound", "of", "the", "metabolic", "model", "." ]
276630fcd4449fb7b914186edfd38c239e7052df
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/support/helpers.py#L781-L809
2,397
opencobra/memote
memote/suite/reporting/report.py
Report.render_html
def render_html(self): """Render an HTML report.""" return self._template.safe_substitute( report_type=self._report_type, results=self.render_json() )
python
def render_html(self): return self._template.safe_substitute( report_type=self._report_type, results=self.render_json() )
[ "def", "render_html", "(", "self", ")", ":", "return", "self", ".", "_template", ".", "safe_substitute", "(", "report_type", "=", "self", ".", "_report_type", ",", "results", "=", "self", ".", "render_json", "(", ")", ")" ]
Render an HTML report.
[ "Render", "an", "HTML", "report", "." ]
276630fcd4449fb7b914186edfd38c239e7052df
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/suite/reporting/report.py#L80-L85
2,398
opencobra/memote
memote/suite/reporting/report.py
Report.compute_score
def compute_score(self): """Calculate the overall test score using the configuration.""" # LOGGER.info("Begin scoring") cases = self.get_configured_tests() | set(self.result.cases) scores = DataFrame({"score": 0.0, "max": 1.0}, index=sorted(cases)) self.result.setdefault("score", dict()) self.result["score"]["sections"] = list() # Calculate the scores for each test individually. for test, result in iteritems(self.result.cases): # LOGGER.info("Calculate score for test: '%s'.", test) # Test metric may be a dictionary for a parametrized test. metric = result["metric"] if hasattr(metric, "items"): result["score"] = test_score = dict() total = 0.0 for key, value in iteritems(metric): value = 1.0 - value total += value test_score[key] = value # For some reason there are parametrized tests without cases. if len(metric) == 0: metric = 0.0 else: metric = total / len(metric) else: metric = 1.0 - metric scores.at[test, "score"] = metric scores.loc[test, :] *= self.config["weights"].get(test, 1.0) score = 0.0 maximum = 0.0 # Calculate the scores for each section considering the individual test # case scores. for section_id, card in iteritems( self.config['cards']['scored']['sections'] ): # LOGGER.info("Calculate score for section: '%s'.", section_id) cases = card.get("cases", None) if cases is None: continue card_score = scores.loc[cases, "score"].sum() card_total = scores.loc[cases, "max"].sum() # Format results nicely to work immediately with Vega Bar Chart. section_score = {"section": section_id, "score": card_score / card_total} self.result["score"]["sections"].append(section_score) # Calculate the final score for the entire model. weight = card.get("weight", 1.0) score += card_score * weight maximum += card_total * weight self.result["score"]["total_score"] = score / maximum
python
def compute_score(self): # LOGGER.info("Begin scoring") cases = self.get_configured_tests() | set(self.result.cases) scores = DataFrame({"score": 0.0, "max": 1.0}, index=sorted(cases)) self.result.setdefault("score", dict()) self.result["score"]["sections"] = list() # Calculate the scores for each test individually. for test, result in iteritems(self.result.cases): # LOGGER.info("Calculate score for test: '%s'.", test) # Test metric may be a dictionary for a parametrized test. metric = result["metric"] if hasattr(metric, "items"): result["score"] = test_score = dict() total = 0.0 for key, value in iteritems(metric): value = 1.0 - value total += value test_score[key] = value # For some reason there are parametrized tests without cases. if len(metric) == 0: metric = 0.0 else: metric = total / len(metric) else: metric = 1.0 - metric scores.at[test, "score"] = metric scores.loc[test, :] *= self.config["weights"].get(test, 1.0) score = 0.0 maximum = 0.0 # Calculate the scores for each section considering the individual test # case scores. for section_id, card in iteritems( self.config['cards']['scored']['sections'] ): # LOGGER.info("Calculate score for section: '%s'.", section_id) cases = card.get("cases", None) if cases is None: continue card_score = scores.loc[cases, "score"].sum() card_total = scores.loc[cases, "max"].sum() # Format results nicely to work immediately with Vega Bar Chart. section_score = {"section": section_id, "score": card_score / card_total} self.result["score"]["sections"].append(section_score) # Calculate the final score for the entire model. weight = card.get("weight", 1.0) score += card_score * weight maximum += card_total * weight self.result["score"]["total_score"] = score / maximum
[ "def", "compute_score", "(", "self", ")", ":", "# LOGGER.info(\"Begin scoring\")", "cases", "=", "self", ".", "get_configured_tests", "(", ")", "|", "set", "(", "self", ".", "result", ".", "cases", ")", "scores", "=", "DataFrame", "(", "{", "\"score\"", ":", "0.0", ",", "\"max\"", ":", "1.0", "}", ",", "index", "=", "sorted", "(", "cases", ")", ")", "self", ".", "result", ".", "setdefault", "(", "\"score\"", ",", "dict", "(", ")", ")", "self", ".", "result", "[", "\"score\"", "]", "[", "\"sections\"", "]", "=", "list", "(", ")", "# Calculate the scores for each test individually.", "for", "test", ",", "result", "in", "iteritems", "(", "self", ".", "result", ".", "cases", ")", ":", "# LOGGER.info(\"Calculate score for test: '%s'.\", test)", "# Test metric may be a dictionary for a parametrized test.", "metric", "=", "result", "[", "\"metric\"", "]", "if", "hasattr", "(", "metric", ",", "\"items\"", ")", ":", "result", "[", "\"score\"", "]", "=", "test_score", "=", "dict", "(", ")", "total", "=", "0.0", "for", "key", ",", "value", "in", "iteritems", "(", "metric", ")", ":", "value", "=", "1.0", "-", "value", "total", "+=", "value", "test_score", "[", "key", "]", "=", "value", "# For some reason there are parametrized tests without cases.", "if", "len", "(", "metric", ")", "==", "0", ":", "metric", "=", "0.0", "else", ":", "metric", "=", "total", "/", "len", "(", "metric", ")", "else", ":", "metric", "=", "1.0", "-", "metric", "scores", ".", "at", "[", "test", ",", "\"score\"", "]", "=", "metric", "scores", ".", "loc", "[", "test", ",", ":", "]", "*=", "self", ".", "config", "[", "\"weights\"", "]", ".", "get", "(", "test", ",", "1.0", ")", "score", "=", "0.0", "maximum", "=", "0.0", "# Calculate the scores for each section considering the individual test", "# case scores.", "for", "section_id", ",", "card", "in", "iteritems", "(", "self", ".", "config", "[", "'cards'", "]", "[", "'scored'", "]", "[", "'sections'", "]", ")", ":", "# LOGGER.info(\"Calculate score for section: '%s'.\", section_id)", "cases", "=", "card", ".", "get", "(", "\"cases\"", ",", "None", ")", "if", "cases", "is", "None", ":", "continue", "card_score", "=", "scores", ".", "loc", "[", "cases", ",", "\"score\"", "]", ".", "sum", "(", ")", "card_total", "=", "scores", ".", "loc", "[", "cases", ",", "\"max\"", "]", ".", "sum", "(", ")", "# Format results nicely to work immediately with Vega Bar Chart.", "section_score", "=", "{", "\"section\"", ":", "section_id", ",", "\"score\"", ":", "card_score", "/", "card_total", "}", "self", ".", "result", "[", "\"score\"", "]", "[", "\"sections\"", "]", ".", "append", "(", "section_score", ")", "# Calculate the final score for the entire model.", "weight", "=", "card", ".", "get", "(", "\"weight\"", ",", "1.0", ")", "score", "+=", "card_score", "*", "weight", "maximum", "+=", "card_total", "*", "weight", "self", ".", "result", "[", "\"score\"", "]", "[", "\"total_score\"", "]", "=", "score", "/", "maximum" ]
Calculate the overall test score using the configuration.
[ "Calculate", "the", "overall", "test", "score", "using", "the", "configuration", "." ]
276630fcd4449fb7b914186edfd38c239e7052df
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/suite/reporting/report.py#L114-L164
2,399
opencobra/memote
memote/support/sbo.py
find_components_without_sbo_terms
def find_components_without_sbo_terms(model, components): """ Find model components that are not annotated with any SBO terms. Parameters ---------- model : cobra.Model The metabolic model under investigation. components : {"metabolites", "reactions", "genes"} A string denoting `cobra.Model` components. Returns ------- list The components without any SBO term annotation. """ return [elem for elem in getattr(model, components) if elem.annotation is None or 'sbo' not in elem.annotation]
python
def find_components_without_sbo_terms(model, components): return [elem for elem in getattr(model, components) if elem.annotation is None or 'sbo' not in elem.annotation]
[ "def", "find_components_without_sbo_terms", "(", "model", ",", "components", ")", ":", "return", "[", "elem", "for", "elem", "in", "getattr", "(", "model", ",", "components", ")", "if", "elem", ".", "annotation", "is", "None", "or", "'sbo'", "not", "in", "elem", ".", "annotation", "]" ]
Find model components that are not annotated with any SBO terms. Parameters ---------- model : cobra.Model The metabolic model under investigation. components : {"metabolites", "reactions", "genes"} A string denoting `cobra.Model` components. Returns ------- list The components without any SBO term annotation.
[ "Find", "model", "components", "that", "are", "not", "annotated", "with", "any", "SBO", "terms", "." ]
276630fcd4449fb7b914186edfd38c239e7052df
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/support/sbo.py#L27-L45