_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
31
13.1k
language
stringclasses
1 value
meta_information
dict
q2300
generic_filename
train
def generic_filename(path): ''' Extract filename of given path os-indepently, taking care of known path separators. :param path: path
python
{ "resource": "" }
q2301
clean_restricted_chars
train
def clean_restricted_chars(path, restricted_chars=restricted_chars): ''' Get path without restricted characters. :param path: path :return: path without restricted characters :rtype: str or unicode (depending
python
{ "resource": "" }
q2302
check_forbidden_filename
train
def check_forbidden_filename(filename, destiny_os=os.name, restricted_names=restricted_names): ''' Get if given filename is forbidden for current OS or filesystem. :param filename:
python
{ "resource": "" }
q2303
check_path
train
def check_path(path, base, os_sep=os.sep): ''' Check if both given paths are equal. :param path: absolute path :type path: str :param base: absolute base path :type base: str :param os_sep: path separator, defaults to os.sep :type base: str :return: wether two path are equal or not
python
{ "resource": "" }
q2304
check_base
train
def check_base(path, base, os_sep=os.sep): ''' Check if given absolute path is under or given base. :param path: absolute path :type path: str :param base: absolute base path
python
{ "resource": "" }
q2305
check_under_base
train
def check_under_base(path, base, os_sep=os.sep): ''' Check if given absolute path is under given base. :param path: absolute path :type path: str :param base: absolute base path :type base: str :param os_sep: path separator, defaults to os.sep :return: wether file is under given base or not
python
{ "resource": "" }
q2306
secure_filename
train
def secure_filename(path, destiny_os=os.name, fs_encoding=compat.FS_ENCODING): ''' Get rid of parent path components and special filenames. If path is invalid or protected, return empty string. :param path: unsafe path, only basename will be used :type: str :param destiny_os: destination operative system (defaults to os.name) :type destiny_os: str :param fs_encoding: fs path encoding (defaults to detected) :type fs_encoding: str :return: filename or empty string :rtype: str ''' path = generic_filename(path) path = clean_restricted_chars(
python
{ "resource": "" }
q2307
alternative_filename
train
def alternative_filename(filename, attempt=None): ''' Generates an alternative version of given filename. If an number attempt parameter is given, will be used on the alternative name, a random value will be used otherwise. :param filename: original filename :param attempt: optional attempt number, defaults to null :return: new filename :rtype: str or unicode ''' filename_parts = filename.rsplit(u'.', 2)
python
{ "resource": "" }
q2308
scandir
train
def scandir(path, app=None): ''' Config-aware scandir. Currently, only aware of ``exclude_fnc``. :param path: absolute path :type path: str :param app: flask application :type app: flask.Flask or None :returns: filtered scandir entries :rtype: iterator ''' exclude = app and app.config.get('exclude_fnc') if exclude:
python
{ "resource": "" }
q2309
Node.link
train
def link(self): ''' Get last widget with place "entry-link". :returns: widget on entry-link (ideally a link one) :rtype: namedtuple instance ''' link = None
python
{ "resource": "" }
q2310
Node.can_remove
train
def can_remove(self): ''' Get if current node can be removed based on app config's directory_remove. :returns: True if current node can be removed, False otherwise. :rtype: bool '''
python
{ "resource": "" }
q2311
Node.parent
train
def parent(self): ''' Get parent node if available based on app config's directory_base. :returns: parent object if available :rtype: Node instance or None ''' if check_path(self.path, self.app.config['directory_base']): return None
python
{ "resource": "" }
q2312
Node.ancestors
train
def ancestors(self): ''' Get list of ancestors until app config's directory_base is reached. :returns: list of ancestors starting from nearest. :rtype: list of Node objects ''' ancestors = []
python
{ "resource": "" }
q2313
Node.modified
train
def modified(self): ''' Get human-readable last modification date-time. :returns: iso9008-like date-time string (without timezone) :rtype: str ''' try:
python
{ "resource": "" }
q2314
Node.from_urlpath
train
def from_urlpath(cls, path, app=None): ''' Alternative constructor which accepts a path as taken from URL and uses the given app or the current app config to get the real path. If class has attribute `generic` set to True, `directory_class` or `file_class` will be used as type. :param path: relative path as from URL :param app: optional, flask application :return: file object pointing to path :rtype: File ''' app = app or current_app
python
{ "resource": "" }
q2315
WhisperFinder._find_paths
train
def _find_paths(self, current_dir, patterns): """Recursively generates absolute paths whose components underneath current_dir match the corresponding pattern in patterns""" pattern = patterns[0] patterns = patterns[1:] has_wildcard = is_pattern(pattern) using_globstar = pattern == "**" # This avoids os.listdir() for performance if has_wildcard: entries = [x.name for x in scandir(current_dir)] else: entries = [pattern] if using_globstar: matching_subdirs = map(lambda x: x[0], walk(current_dir)) else: subdirs = [e for e in entries if os.path.isdir(os.path.join(current_dir, e))] matching_subdirs = match_entries(subdirs, pattern) # For terminal globstar, add a pattern for all files in subdirs if using_globstar and not patterns: patterns = ['*'] if patterns: # we've still got more directories to traverse for subdir in matching_subdirs: absolute_path = os.path.join(current_dir,
python
{ "resource": "" }
q2316
union_overlapping
train
def union_overlapping(intervals): """Union any overlapping intervals in the given set.""" disjoint_intervals = [] for interval in intervals: if disjoint_intervals and disjoint_intervals[-1].overlaps(interval):
python
{ "resource": "" }
q2317
recurse
train
def recurse(query, index): """ Recursively walk across paths, adding leaves to the index as they're found.
python
{ "resource": "" }
q2318
__archive_fetch
train
def __archive_fetch(fh, archive, fromTime, untilTime): """ Fetch data from a single archive. Note that checks for validity of the time period requested happen above this level so it's possible to wrap around the archive on a read and request data older than the archive's retention """ fromInterval = int( fromTime - (fromTime % archive['secondsPerPoint']) ) + archive['secondsPerPoint'] untilInterval = int( untilTime - (untilTime % archive['secondsPerPoint']) ) + archive['secondsPerPoint'] if fromInterval == untilInterval: # Zero-length time range: always include the next point untilInterval += archive['secondsPerPoint'] fh.seek(archive['offset']) packedPoint = fh.read(pointSize) (baseInterval,baseValue) = struct.unpack(pointFormat,packedPoint) if baseInterval == 0: step = archive['secondsPerPoint'] points = (untilInterval - fromInterval) // step timeInfo = (fromInterval,untilInterval,step) valueList = [None] * points return (timeInfo,valueList) #Determine fromOffset timeDistance = fromInterval - baseInterval pointDistance = timeDistance // archive['secondsPerPoint'] byteDistance = pointDistance *
python
{ "resource": "" }
q2319
merge
train
def merge(path_from, path_to): """ Merges the data from one whisper file into another. Each file must have the same archive configuration """
python
{ "resource": "" }
q2320
diff
train
def diff(path_from, path_to, ignore_empty = False): """ Compare two whisper databases. Each file must have the same archive configuration
python
{ "resource": "" }
q2321
DataStore.add_data
train
def add_data(self, path, time_info, data, exprs): """ Stores data before it can be put into a time series """ # Dont add if empty if not nonempty(data):
python
{ "resource": "" }
q2322
CarbonLinkPool.select_host
train
def select_host(self, metric): """ Returns the carbon host that has data for the given metric. """ key = self.keyfunc(metric) nodes = [] servers = set() for node in self.hash_ring.get_nodes(key): server, instance = node if server in servers: continue servers.add(server)
python
{ "resource": "" }
q2323
safeArgs
train
def safeArgs(args): """Iterate over valid, finite values in an iterable. Skip any items that are None, NaN, or infinite. """ return (arg for arg
python
{ "resource": "" }
q2324
format_units
train
def format_units(v, step=None, system="si", units=None): """Format the given value in standardized units. ``system`` is either 'binary' or 'si' For more info, see: http://en.wikipedia.org/wiki/SI_prefix http://en.wikipedia.org/wiki/Binary_prefix """ if v is None: return 0, '' for prefix, size in UnitSystems[system]: if condition(v, size, step): v2 = v / size if v2 - math.floor(v2) < 0.00000000001 and v > 1:
python
{ "resource": "" }
q2325
_AxisTics.checkFinite
train
def checkFinite(value, name='value'): """Check that value is a finite number. If it is, return it. If not, raise GraphError describing the problem, using name in the error message. """ if math.isnan(value):
python
{ "resource": "" }
q2326
_AxisTics.reconcileLimits
train
def reconcileLimits(self): """If self.minValue is not less than self.maxValue, fix the problem. If self.minValue is not less than self.maxValue, adjust self.minValue and/or self.maxValue (depending on which was not specified explicitly by the user) to make self.minValue < self.maxValue. If the user specified both limits explicitly, then raise GraphError. """ if self.minValue < self.maxValue: # The limits are already OK. return minFixed = (self.minValueSource in ['min']) maxFixed = (self.maxValueSource in ['max', 'limit']) if minFixed and maxFixed: raise GraphError('The %s must be less than the %s' %
python
{ "resource": "" }
q2327
_AxisTics.applySettings
train
def applySettings(self, axisMin=None, axisMax=None, axisLimit=None): """Apply the specified settings to this axis. Set self.minValue, self.minValueSource, self.maxValue, self.maxValueSource, and self.axisLimit reasonably based on the parameters provided. Arguments: axisMin -- a finite number, or None to choose a round minimum limit that includes all of the data. axisMax -- a finite number, 'max' to use the maximum value contained in the data, or None to choose a round maximum limit that includes all of the data. axisLimit -- a finite number to use as an upper limit on maxValue, or None to impose no upper limit. """ if axisMin is not None and not math.isnan(axisMin): self.minValueSource = 'min' self.minValue = self.checkFinite(axisMin, 'axis min') if axisMax == 'max': self.maxValueSource = 'extremum' elif axisMax is not None and not math.isnan(axisMax): self.maxValueSource = 'max' self.maxValue = self.checkFinite(axisMax, 'axis max') if axisLimit is None or math.isnan(axisLimit):
python
{ "resource": "" }
q2328
_AxisTics.makeLabel
train
def makeLabel(self, value): """Create a label for the specified value. Create a label string containing the value and its units (if any), based on the values of self.step, self.span, and self.unitSystem. """ value, prefix = format_units(value, self.step, system=self.unitSystem) span, spanPrefix = format_units(self.span, self.step,
python
{ "resource": "" }
q2329
_LinearAxisTics.generateSteps
train
def generateSteps(self, minStep): """Generate allowed steps with step >= minStep in increasing order.""" self.checkFinite(minStep) if self.binary: base = 2.0 mantissas = [1.0] exponent = math.floor(math.log(minStep, 2) - EPSILON) else: base = 10.0 mantissas = [1.0, 2.0, 5.0] exponent = math.floor(math.log10(minStep) - EPSILON) while True:
python
{ "resource": "" }
q2330
_LinearAxisTics.computeSlop
train
def computeSlop(self, step, divisor): """Compute the slop that would result from step and divisor. Return the slop, or None if this combination can't cover the full range. See chooseStep() for the definition of "slop". """ bottom = step * math.floor(self.minValue / float(step) + EPSILON)
python
{ "resource": "" }
q2331
_LinearAxisTics.chooseStep
train
def chooseStep(self, divisors=None, binary=False): """Choose a nice, pretty size for the steps between axis labels. Our main constraint is that the number of divisions must be taken from the divisors list. We pick a number of divisions and a step size that minimizes the amount of whitespace ("slop") that would need to be included outside of the range [self.minValue, self.maxValue] if we were to push out the axis values to the next larger multiples of the step size. The minimum step that could possibly cover the variance satisfies minStep * max(divisors) >= variance or minStep = variance / max(divisors) It's not necessarily possible to cover the variance with a step that size, but we know that any smaller step definitely *cannot* cover it. So we can start there. For a sufficiently large step size, it is definitely possible to cover the variance, but at some point the slop will start growing. Let's define the slop to be slop = max(minValue - bottom, top - maxValue) Then for a given, step size, we know that slop >= (1/2) * (step * min(divisors) - variance) (the factor of 1/2 is for the best-case scenario that the slop is distributed equally on the two sides of the range). So suppose we already have a choice that yields bestSlop. Then there is no need to choose steps so large that the slop is guaranteed to be larger than bestSlop. Therefore, the maximum step size that we need to consider is maxStep = (2 * bestSlop + variance) / min(divisors) """ self.binary = binary if divisors is None: divisors = [4, 5, 6] else: for divisor in divisors: self.checkFinite(divisor, 'divisor') if divisor < 1: raise GraphError('Divisors must be greater than or equal '
python
{ "resource": "" }
q2332
formatPathExpressions
train
def formatPathExpressions(seriesList): """ Returns a comma-separated list of unique path expressions. """
python
{ "resource": "" }
q2333
rangeOfSeries
train
def rangeOfSeries(requestContext, *seriesLists): """ Takes a wildcard seriesList. Distills down a set of inputs into the range of the series Example:: &target=rangeOfSeries(Server*.connections.total) """ if not seriesLists or not any(seriesLists): return [] seriesList, start, end, step = normalize(seriesLists) name = "rangeOfSeries(%s)" % formatPathExpressions(seriesList) values
python
{ "resource": "" }
q2334
percentileOfSeries
train
def percentileOfSeries(requestContext, seriesList, n, interpolate=False): """ percentileOfSeries returns a single series which is composed of the n-percentile values taken across a wildcard series at each point. Unless `interpolate` is set to True, percentile values are actual values contained in one of the supplied series. """ if n <= 0: raise ValueError( 'The requested percent is required to be greater than 0') if not seriesList: return [] name = 'percentileOfSeries(%s,%g)' % (seriesList[0].pathExpression, n)
python
{ "resource": "" }
q2335
weightedAverage
train
def weightedAverage(requestContext, seriesListAvg, seriesListWeight, *nodes): """ Takes a series of average values and a series of weights and produces a weighted average for all values. The corresponding values should share one or more zero-indexed nodes. Example:: &target=weightedAverage(*.transactions.mean,*.transactions.count,0) &target=weightedAverage(*.transactions.mean,*.transactions.count,1,3,4) """ if isinstance(nodes, int): nodes = [nodes] sortedSeries = {} for seriesAvg, seriesWeight in zip_longest( seriesListAvg, seriesListWeight): key = '' for node in nodes: key += seriesAvg.name.split(".")[node] sortedSeries.setdefault(key, {}) sortedSeries[key]['avg'] = seriesAvg key = '' for node in nodes: key += seriesWeight.name.split(".")[node] sortedSeries.setdefault(key, {}) sortedSeries[key]['weight'] = seriesWeight productList = [] for key in sortedSeries: if 'weight' not in sortedSeries[key]: continue if 'avg' not in sortedSeries[key]: continue seriesWeight = sortedSeries[key]['weight'] seriesAvg = sortedSeries[key]['avg'] productValues = [safeMul(val1, val2) for val1, val2
python
{ "resource": "" }
q2336
scale
train
def scale(requestContext, seriesList, factor): """ Takes one metric or a wildcard seriesList followed by a constant, and multiplies the datapoint by the constant provided at each point. Example:: &target=scale(Server.instance01.threads.busy,10)
python
{ "resource": "" }
q2337
scaleToSeconds
train
def scaleToSeconds(requestContext, seriesList, seconds): """ Takes one metric or a wildcard seriesList and returns "value per seconds" where seconds is a last argument to this functions. Useful in conjunction with derivative or integral function if you want to normalize its result to a known resolution for arbitrary retentions """ for series in seriesList:
python
{ "resource": "" }
q2338
pow
train
def pow(requestContext, seriesList, factor): """ Takes one metric or a wildcard seriesList followed by a constant, and raises the datapoint by the power of the constant provided at each point. Example:: &target=pow(Server.instance01.threads.busy,10)
python
{ "resource": "" }
q2339
powSeries
train
def powSeries(requestContext, *seriesLists): """ Takes two or more series and pows their points. A constant line may be used. Example:: &target=powSeries(Server.instance01.app.requests, Server.instance01.app.replies) """ if not seriesLists or not any(seriesLists): return [] seriesList, start, end, step = normalize(seriesLists) name = "powSeries(%s)" % ','.join([s.name for s in seriesList]) values = [] for row in zip_longest(*seriesList): first = True tmpVal = None for element in row:
python
{ "resource": "" }
q2340
squareRoot
train
def squareRoot(requestContext, seriesList): """ Takes one metric or a wildcard seriesList, and computes the square root of each datapoint. Example:: &target=squareRoot(Server.instance01.threads.busy) """ for series in seriesList:
python
{ "resource": "" }
q2341
absolute
train
def absolute(requestContext, seriesList): """ Takes one metric or a wildcard seriesList and applies the mathematical abs function to each datapoint transforming it to its absolute value. Example:: &target=absolute(Server.instance01.threads.busy)
python
{ "resource": "" }
q2342
offset
train
def offset(requestContext, seriesList, factor): """ Takes one metric or a wildcard seriesList followed by a constant, and adds the constant to each datapoint. Example:: &target=offset(Server.instance01.threads.busy,10) """ for series in seriesList: series.name = "offset(%s,%g)" % (series.name, float(factor))
python
{ "resource": "" }
q2343
offsetToZero
train
def offsetToZero(requestContext, seriesList): """ Offsets a metric or wildcard seriesList by subtracting the minimum value in the series from each datapoint. Useful to compare different series where the values in each series may be higher or lower on average but you're only interested in the relative difference. An example use case is for comparing different round trip time results. When measuring RTT (like pinging a server), different devices may come back with consistently different results due to network latency which will be different depending on how many network hops between the probe and the device. To compare different devices in the same graph, the network latency to each has to be factored out of the results. This is a shortcut that takes the fastest response (lowest number in the series) and sets that to zero and then offsets all of the other datapoints in that series by
python
{ "resource": "" }
q2344
consolidateBy
train
def consolidateBy(requestContext, seriesList, consolidationFunc): """ Takes one metric or a wildcard seriesList and a consolidation function name. Valid function names are 'sum', 'average', 'min', and 'max'. When a graph is drawn where width of the graph size in pixels is smaller than the number of datapoints to be graphed, Graphite consolidates the values to to prevent line overlap. The consolidateBy() function changes the consolidation function from the default of 'average' to one of 'sum', 'max', or 'min'. This is especially useful in sales graphs, where fractional values make no sense and a 'sum' of consolidated values is appropriate. Example:: &target=consolidateBy(Sales.widgets.largeBlue, 'sum') &target=consolidateBy(Servers.web01.sda1.free_space, 'max')
python
{ "resource": "" }
q2345
integral
train
def integral(requestContext, seriesList): """ This will show the sum over time, sort of like a continuous addition function. Useful for finding totals or trends in metrics that are collected per minute. Example:: &target=integral(company.sales.perMinute) This would start at zero on the left side of the graph, adding the sales each minute, and show the total sales for the time period selected at the right
python
{ "resource": "" }
q2346
areaBetween
train
def areaBetween(requestContext, *seriesLists): """ Draws the vertical area in between the two series in seriesList. Useful for visualizing a range such as the minimum and maximum latency for a service. areaBetween expects **exactly one argument** that results in exactly two series (see example below). The order of the lower and higher values series does not matter. The visualization only works when used in conjunction with ``areaMode=stacked``. Most likely use case is to provide a band within which another metric should move. In such case applying an ``alpha()``, as in the second example, gives best visual results. Example:: &target=areaBetween(service.latency.{min,max})&areaMode=stacked &target=alpha(areaBetween(service.latency.{min,max}),0.3)&areaMode=stacked If for instance, you need to build a seriesList, you should use the ``group`` function, like so:: &target=areaBetween(group(minSeries(a.*.min),maxSeries(a.*.max))) """
python
{ "resource": "" }
q2347
alias
train
def alias(requestContext, seriesList, newName): """ Takes one metric or a wildcard seriesList and a string in quotes. Prints the string instead of the metric name in the legend. Example:: &target=alias(Sales.widgets.largeBlue,"Large Blue Widgets") """ try:
python
{ "resource": "" }
q2348
_getFirstPathExpression
train
def _getFirstPathExpression(name): """Returns the first metric path in an expression.""" tokens = grammar.parseString(name) pathExpression = None while pathExpression is None: if tokens.pathExpression: pathExpression = tokens.pathExpression elif tokens.expression:
python
{ "resource": "" }
q2349
alpha
train
def alpha(requestContext, seriesList, alpha): """ Assigns the given alpha transparency setting to the series. Takes a float value between 0 and 1.
python
{ "resource": "" }
q2350
color
train
def color(requestContext, seriesList, theColor): """ Assigns the given color to the seriesList Example:: &target=color(collectd.hostname.cpu.0.user, 'green') &target=color(collectd.hostname.cpu.0.system, 'ff0000') &target=color(collectd.hostname.cpu.0.idle, 'gray')
python
{ "resource": "" }
q2351
logarithm
train
def logarithm(requestContext, seriesList, base=10): """ Takes one metric or a wildcard seriesList, a base, and draws the y-axis in logarithmic format. If base is omitted, the function defaults to base 10. Example:: &target=log(carbon.agents.hostname.avgUpdateTime,2) """ results = [] for series in seriesList: newValues
python
{ "resource": "" }
q2352
maximumBelow
train
def maximumBelow(requestContext, seriesList, n): """ Takes one metric or a wildcard seriesList followed by a constant n. Draws only the metrics with a maximum value below n. Example:: &target=maximumBelow(system.interface.eth*.packetsSent,1000) This would only display interfaces which always sent less than 1000
python
{ "resource": "" }
q2353
minimumBelow
train
def minimumBelow(requestContext, seriesList, n): """ Takes one metric or a wildcard seriesList followed by a constant n. Draws only the metrics with a minimum value below n. Example:: &target=minimumBelow(system.interface.eth*.packetsSent,1000)
python
{ "resource": "" }
q2354
highestMax
train
def highestMax(requestContext, seriesList, n=1): """ Takes one metric or a wildcard seriesList followed by an integer N. Out of all metrics passed, draws only the N metrics with the highest maximum value in the time period specified. Example:: &target=highestMax(server*.instance*.threads.busy,5) Draws the top 5 servers who have had the most busy threads during
python
{ "resource": "" }
q2355
currentAbove
train
def currentAbove(requestContext, seriesList, n): """ Takes one metric or a wildcard seriesList followed by an integer N. Out of all metrics passed, draws only the metrics whose value is above N at the end of the time period specified. Example:: &target=currentAbove(server*.instance*.threads.busy,50) Draws the servers with more than 50 busy threads.
python
{ "resource": "" }
q2356
averageAbove
train
def averageAbove(requestContext, seriesList, n): """ Takes one metric or a wildcard seriesList followed by an integer N. Out of all metrics passed, draws only the metrics with an average value above N for the time period specified. Example:: &target=averageAbove(server*.instance*.threads.busy,25) Draws the servers with average values above 25.
python
{ "resource": "" }
q2357
nPercentile
train
def nPercentile(requestContext, seriesList, n): """Returns n-percent of each series in the seriesList.""" assert n, 'The requested percent is required to be greater than 0' results = [] for s in seriesList: # Create a sorted copy of the TimeSeries excluding None values in the # values list. s_copy = TimeSeries(s.name, s.start, s.end, s.step, sorted(not_none(s))) if not s_copy: continue # Skip this series because it is empty. perc_val =
python
{ "resource": "" }
q2358
averageOutsidePercentile
train
def averageOutsidePercentile(requestContext, seriesList, n): """ Removes functions lying inside an average percentile interval """ averages = [safeAvg(s) for s in seriesList] if n < 50: n = 100 - n lowPercentile = _getPercentile(averages, 100 - n)
python
{ "resource": "" }
q2359
removeBetweenPercentile
train
def removeBetweenPercentile(requestContext, seriesList, n): """ Removes lines who do not have an value lying in the x-percentile of all the values at a moment """ if n < 50: n = 100 - n transposed = list(zip_longest(*seriesList)) lowPercentiles = [_getPercentile(col, 100-n) for col in transposed]
python
{ "resource": "" }
q2360
removeAboveValue
train
def removeAboveValue(requestContext, seriesList, n): """ Removes data above the given threshold from the series or list of series provided. Values above this threshold are assigned a value of None. """ for s in seriesList: s.name = 'removeAboveValue(%s, %g)' % (s.name, n)
python
{ "resource": "" }
q2361
removeBelowPercentile
train
def removeBelowPercentile(requestContext, seriesList, n): """ Removes data below the nth percentile from the series or list of series provided. Values below this percentile are assigned a value of None. """ for s in seriesList: s.name = 'removeBelowPercentile(%s, %g)' % (s.name, n) s.pathExpression = s.name try:
python
{ "resource": "" }
q2362
useSeriesAbove
train
def useSeriesAbove(requestContext, seriesList, value, search, replace): """ Compares the maximum of each series against the given `value`. If the series maximum is greater than `value`, the regular expression search and replace is applied against the series name to plot a related metric. e.g. given useSeriesAbove(ganglia.metric1.reqs,10,'reqs','time'), the response time metric will be plotted only when the maximum value of the corresponding request/s metric is > 10 Example::
python
{ "resource": "" }
q2363
secondYAxis
train
def secondYAxis(requestContext, seriesList): """ Graph the series on the secondary Y axis. """ for series in seriesList: series.options['secondYAxis']
python
{ "resource": "" }
q2364
holtWintersForecast
train
def holtWintersForecast(requestContext, seriesList): """ Performs a Holt-Winters forecast using the series as input data. Data from one week previous to the series is used to bootstrap the initial forecast. """ previewSeconds = 7 * 86400 # 7 days # ignore original data and pull new, including our preview newContext = requestContext.copy() newContext['startTime'] = (requestContext['startTime'] - timedelta(seconds=previewSeconds)) previewList = evaluateTokens(newContext, requestContext['args'][0]) results = [] for series in previewList: analysis = holtWintersAnalysis(series) predictions = analysis['predictions'] windowPoints = previewSeconds
python
{ "resource": "" }
q2365
holtWintersConfidenceBands
train
def holtWintersConfidenceBands(requestContext, seriesList, delta=3): """ Performs a Holt-Winters forecast using the series as input data and plots upper and lower bands with the predicted forecast deviations. """ previewSeconds = 7 * 86400 # 7 days # ignore original data and pull new, including our preview newContext = requestContext.copy() newContext['startTime'] = (requestContext['startTime'] - timedelta(seconds=previewSeconds)) previewList = evaluateTokens(newContext, requestContext['args'][0]) results = [] for series in previewList: analysis = holtWintersAnalysis(series) data = analysis['predictions'] windowPoints = previewSeconds // data.step forecast = TimeSeries(data.name, data.start + previewSeconds, data.end, data.step, data[windowPoints:]) forecast.pathExpression = data.pathExpression data = analysis['deviations'] windowPoints = previewSeconds // data.step
python
{ "resource": "" }
q2366
holtWintersAberration
train
def holtWintersAberration(requestContext, seriesList, delta=3): """ Performs a Holt-Winters forecast using the series as input data and plots the positive or negative deviation of the series data from the forecast. """ results = [] for series in seriesList: confidenceBands = holtWintersConfidenceBands(requestContext, [series], delta) lowerBand = confidenceBands[0] upperBand = confidenceBands[1] aberration = list() for i, actual in enumerate(series): if actual is None: aberration.append(0)
python
{ "resource": "" }
q2367
holtWintersConfidenceArea
train
def holtWintersConfidenceArea(requestContext, seriesList, delta=3): """ Performs a Holt-Winters forecast using the series as input data and plots the area between the upper and lower bands of the predicted forecast
python
{ "resource": "" }
q2368
linearRegressionAnalysis
train
def linearRegressionAnalysis(series): """ Returns factor and offset of linear regression function by least squares method. """ n = safeLen(series) sumI = sum([i for i, v in enumerate(series) if v is not None]) sumV = sum([v for i, v in enumerate(series) if v is not None]) sumII
python
{ "resource": "" }
q2369
linearRegression
train
def linearRegression(requestContext, seriesList, startSourceAt=None, endSourceAt=None): """ Graphs the liner regression function by least squares method. Takes one metric or a wildcard seriesList, followed by a quoted string with the time to start the line and another quoted string with the time to end the line. The start and end times are inclusive (default range is from to until). See ``from / until`` in the render\_api_ for examples of time formats. Datapoints in the range is used to regression. Example:: &target=linearRegression(Server.instance01.threads.busy,'-1d') &target=linearRegression(Server.instance*.threads.busy, "00:00 20140101","11:59 20140630") """
python
{ "resource": "" }
q2370
drawAsInfinite
train
def drawAsInfinite(requestContext, seriesList): """ Takes one metric or a wildcard seriesList. If the value is zero, draw the line at 0. If the value is above zero, draw the line at infinity. If the value is null or less than zero, do not draw the line. Useful for displaying on/off metrics, such as exit codes. (0 = success, anything else = failure.)
python
{ "resource": "" }
q2371
constantLine
train
def constantLine(requestContext, value): """ Takes a float F. Draws a horizontal line at value F across the graph. Example:: &target=constantLine(123.456) """ name = "constantLine(%s)" % str(value) start = int(epoch(requestContext['startTime'])) end = int(epoch(requestContext['endTime']))
python
{ "resource": "" }
q2372
aggregateLine
train
def aggregateLine(requestContext, seriesList, func='avg'): """ Takes a metric or wildcard seriesList and draws a horizontal line based on the function applied to each series. Note: By default, the graphite renderer consolidates data points by averaging data points over time. If you are using the 'min' or 'max' function for aggregateLine, this can cause an unusual gap in the line drawn by this function and the data itself. To fix this, you should use the consolidateBy() function with the same function argument you are using for aggregateLine. This will ensure that the proper data points are retained and the graph should line up correctly. Example:: &target=aggregateLine(server01.connections.total, 'avg') &target=aggregateLine(server*.connections.total, 'avg') """ t_funcs = {'avg': safeAvg, 'min': safeMin, 'max': safeMax}
python
{ "resource": "" }
q2373
verticalLine
train
def verticalLine(requestContext, ts, label=None, color=None): """ Takes a timestamp string ts. Draws a vertical line at the designated timestamp with optional 'label' and 'color'. Supported timestamp formats include both relative (e.g. -3h) and absolute (e.g. 16:00_20110501) strings, such as those used with ``from`` and ``until`` parameters. When set, the 'label' will appear in the graph legend. Note: Any timestamps defined outside the requested range will raise a 'ValueError' exception. Example:: &target=verticalLine("12:3420131108","event","blue") &target=verticalLine("16:00_20110501","event") &target=verticalLine("-5mins") """ ts = int(epoch(parseATTime(ts, requestContext['tzinfo']))) start = int(epoch(requestContext['startTime'])) end = int(epoch(requestContext['endTime'])) if ts < start: raise
python
{ "resource": "" }
q2374
transformNull
train
def transformNull(requestContext, seriesList, default=0, referenceSeries=None): """ Takes a metric or wildcard seriesList and replaces null values with the value specified by `default`. The value 0 used if not specified. The optional referenceSeries, if specified, is a metric or wildcard series list that governs which time intervals nulls should be replaced. If specified, nulls are replaced only in intervals where a non-null is found for the same interval in any of referenceSeries. This method compliments the drawNullAsZero function in graphical mode, but also works in text-only mode. Example:: &target=transformNull(webapp.pages.*.views,-1) This would take any page that didn't have values and supply negative 1 as a default. Any other numeric value may be used as well. """ def transform(v, d): if v is None: return d else: return v if referenceSeries: defaults = [default if any(v is not None for v in x) else None for x in zip_longest(*referenceSeries)]
python
{ "resource": "" }
q2375
countSeries
train
def countSeries(requestContext, *seriesLists): """ Draws a horizontal line representing the number of nodes found in the seriesList. Example:: &target=countSeries(carbon.agents.*.*) """ if not seriesLists or not any(seriesLists): series = constantLine(requestContext, 0).pop() series.pathExpression = "countSeries()" else: seriesList, start, end, step = normalize(seriesLists)
python
{ "resource": "" }
q2376
group
train
def group(requestContext, *seriesLists): """ Takes an arbitrary number of seriesLists and adds them to a single seriesList. This is used to pass multiple seriesLists to a function which only takes one.
python
{ "resource": "" }
q2377
groupByNode
train
def groupByNode(requestContext, seriesList, nodeNum, callback): """ Takes a serieslist and maps a callback to subgroups within as defined by a common node. Example:: &target=groupByNode(ganglia.by-function.*.*.cpu.load5,2,"sumSeries") Would return multiple series which are each the result of applying the "sumSeries" function to groups joined on the second node (0 indexed) resulting in
python
{ "resource": "" }
q2378
groupByNodes
train
def groupByNodes(requestContext, seriesList, callback, *nodes): """ Takes a serieslist and maps a callback to subgroups within as defined by multiple nodes. Example:: &target=groupByNodes(ganglia.server*.*.cpu.load*,"sumSeries",1,4) Would return multiple series which are each the result of applying the "sumSeries" function to groups joined on the nodes' list (0 indexed) resulting in a list of targets like:: sumSeries(ganglia.server1.*.cpu.load5), sumSeries(ganglia.server1.*.cpu.load10), sumSeries(ganglia.server1.*.cpu.load15), sumSeries(ganglia.server2.*.cpu.load5), sumSeries(ganglia.server2.*.cpu.load10), sumSeries(ganglia.server2.*.cpu.load15), ... """ from .app import app metaSeries = {} keys = [] if isinstance(nodes, int):
python
{ "resource": "" }
q2379
exclude
train
def exclude(requestContext, seriesList, pattern): """ Takes a metric or a wildcard seriesList, followed by a regular expression in double quotes. Excludes metrics that match the regular expression. Example::
python
{ "resource": "" }
q2380
summarize
train
def summarize(requestContext, seriesList, intervalString, func='sum', alignToFrom=False): """ Summarize the data into interval buckets of a certain size. By default, the contents of each interval bucket are summed together. This is useful for counters where each increment represents a discrete event and retrieving a "per X" value requires summing all the events in that interval. Specifying 'avg' instead will return the mean for each bucket, which can be more useful when the value is a gauge that represents a certain value in time. 'max', 'min' or 'last' can also be specified. By default, buckets are calculated by rounding to the nearest interval. This works well for intervals smaller than a day. For example, 22:32 will end up in the bucket 22:00-23:00 when the interval=1hour. Passing alignToFrom=true will instead create buckets starting at the from time. In this case, the bucket for 22:32 depends on the from time. If from=6:30 then the 1hour bucket for 22:32 is 22:30-23:30. Example:: # total errors per hour &target=summarize(counter.errors, "1hour") # new users per week &target=summarize(nonNegativeDerivative(gauge.num_users), "1week") # average queue size per hour &target=summarize(queue.size, "1hour", "avg") # maximum queue size during each hour &target=summarize(queue.size, "1hour", "max") # 2010 Q1-4 &target=summarize(metric, "13week", "avg", true)&from=midnight+20100101 """ results = [] delta = parseTimeOffset(intervalString) interval = to_seconds(delta) for series in seriesList: buckets = {} timestamps = range(int(series.start), int(series.end) + 1, int(series.step)) datapoints = zip_longest(timestamps, series) for timestamp, value in datapoints: if timestamp is None: continue if alignToFrom: bucketInterval = int((timestamp - series.start) / interval) else: bucketInterval = timestamp - (timestamp % interval) if bucketInterval not in buckets: buckets[bucketInterval] = [] if value is not None:
python
{ "resource": "" }
q2381
Medium.apply
train
def apply(self, model): """Set the defined medium on the given model.""" model.medium = {row.exchange: row.uptake
python
{ "resource": "" }
q2382
MemoteResult.add_environment_information
train
def add_environment_information(meta): """Record environment information.""" meta["timestamp"] = datetime.utcnow().isoformat(" ") meta["platform"] = platform.system()
python
{ "resource": "" }
q2383
find_transported_elements
train
def find_transported_elements(rxn): """ Return a dictionary showing the amount of transported elements of a rxn. Collects the elements for each metabolite participating in a reaction, multiplies the amount by the metabolite's stoichiometry in the reaction and bins the result according to the compartment that metabolite is in. This produces a dictionary of dictionaries such as this ``{'p': {'C': -1, 'H': -4}, c: {'C': 1, 'H': 4}}`` which shows the transported entities. This dictionary is then simplified to only include the non-zero elements of one single compartment i.e. showing the precise elements that are transported. Parameters ---------- rxn : cobra.Reaction Any cobra.Reaction containing metabolites. """ element_dist = defaultdict() # Collecting elements for each metabolite. for met in rxn.metabolites: if met.compartment not in element_dist: # Multiplication by the metabolite stoichiometry. element_dist[met.compartment] = \ {k: v * rxn.metabolites[met] for (k, v) in iteritems(met.elements)} else:
python
{ "resource": "" }
q2384
find_transport_reactions
train
def find_transport_reactions(model): """ Return a list of all transport reactions. Parameters ---------- model : cobra.Model The metabolic model under investigation. Notes ----- A transport reaction is defined as follows: 1. It contains metabolites from at least 2 compartments and 2. at least 1 metabolite undergoes no chemical reaction, i.e., the formula and/or annotation stays the same on both sides of the equation. A notable exception is transport via PTS, which also contains the following restriction: 3. The transported metabolite(s) are transported into a compartment through the exchange of a phosphate group. An example of transport via PTS would be pep(c) + glucose(e) -> glucose-6-phosphate(c) + pyr(c) Reactions similar to transport via PTS (referred to as "modified transport reactions") follow a similar pattern: A(x) + B-R(y) -> A-R(y) + B(y) Such modified transport reactions can be detected, but only when a formula field exists for all metabolites in a particular reaction. If this is not the case, transport reactions are identified through annotations, which cannot detect modified transport reactions. """ transport_reactions = [] transport_rxn_candidates = set(model.reactions) - set(model.boundary) \ - set(find_biomass_reaction(model)) transport_rxn_candidates = set( [rxn for rxn in transport_rxn_candidates if len(rxn.compartments) >= 2] ) # Add all labeled transport reactions sbo_matches = set([rxn for rxn in transport_rxn_candidates if
python
{ "resource": "" }
q2385
find_converting_reactions
train
def find_converting_reactions(model, pair): """ Find all reactions which convert a given metabolite pair. Parameters ---------- model : cobra.Model The metabolic model under investigation. pair: tuple or list A pair of metabolite identifiers without compartment suffix. Returns ------- frozenset The set of reactions that have one of the pair on their left-hand
python
{ "resource": "" }
q2386
find_demand_reactions
train
def find_demand_reactions(model): u""" Return a list of demand reactions. Parameters ---------- model : cobra.Model The metabolic model under investigation. Notes ----- [1] defines demand reactions as: -- 'unbalanced network reactions that allow the accumulation of a compound' -- reactions that are chiefly added during the gap-filling process -- as a means of dealing with 'compounds that are known to be produced by the organism [..] (i) for which no information is available about their fractional distribution to the biomass or (ii) which may only be produced in some environmental conditions -- reactions with a formula such as: 'met_c -> ' Demand reactions differ from exchange reactions in that the metabolites are not removed from the extracellular environment, but from any of the organism's compartments.
python
{ "resource": "" }
q2387
find_sink_reactions
train
def find_sink_reactions(model): u""" Return a list of sink reactions. Parameters ---------- model : cobra.Model The metabolic model under investigation. Notes ----- [1] defines sink reactions as: -- 'similar to demand reactions' but reversible, thus able to supply the model with metabolites -- reactions that are chiefly added during the gap-filling process -- as a means of dealing with 'compounds that are produced by nonmetabolic cellular processes but that need to be metabolized' -- reactions with a formula such as: 'met_c <-> ' Sink reactions differ from exchange reactions in that the metabolites are not removed from the extracellular environment, but from any of the organism's compartments. References ----------
python
{ "resource": "" }
q2388
find_exchange_rxns
train
def find_exchange_rxns(model): u""" Return a list of exchange reactions. Parameters ---------- model : cobra.Model The metabolic model under investigation. Notes ----- [1] defines exchange reactions as: -- reactions that 'define the extracellular environment' -- 'unbalanced, extra-organism reactions that represent the supply to or removal of metabolites from the extra-organism "space"' -- reactions with a formula such as: 'met_e -> ' or ' -> met_e' or 'met_e <=> ' Exchange reactions differ from demand reactions in that the metabolites are removed from or added to the extracellular environment only. With this the uptake or secretion of a metabolite is modeled, respectively. References ---------- ..
python
{ "resource": "" }
q2389
find_interchange_biomass_reactions
train
def find_interchange_biomass_reactions(model, biomass=None): """ Return the set of all transport, boundary, and biomass reactions. These reactions are either pseudo-reactions, or incorporated to allow metabolites to pass between compartments. Some tests focus on purely metabolic reactions and hence exclude this set. Parameters ---------- model : cobra.Model The metabolic model under
python
{ "resource": "" }
q2390
run_fba
train
def run_fba(model, rxn_id, direction="max", single_value=True): """ Return the solution of an FBA to a set objective function. Parameters ---------- model : cobra.Model The metabolic model under investigation. rxn_id : string A string containing the reaction ID of the desired FBA objective. direction: string A string containing either "max" or "min" to specify the direction of the desired FBA objective function. single_value: boolean Indicates whether the results for all reactions are gathered from the solver, or only the result for the objective value. Returns ------- cobra.solution The cobra solution object
python
{ "resource": "" }
q2391
close_boundaries_sensibly
train
def close_boundaries_sensibly(model): """ Return a cobra model with all boundaries closed and changed constraints. In the returned model previously fixed reactions are no longer constrained as such. Instead reactions are constrained according to their reversibility. This is to prevent the FBA from becoming infeasible when trying to solve a model with closed exchanges and one fixed reaction. Parameters ---------- model : cobra.Model The metabolic model under investigation. Returns ------- cobra.Model A cobra model with all boundary reactions closed and the constraints
python
{ "resource": "" }
q2392
metabolites_per_compartment
train
def metabolites_per_compartment(model, compartment_id): """ Identify all metabolites that belong to a given compartment. Parameters ---------- model : cobra.Model The metabolic model under investigation. compartment_id : string Model specific compartment identifier. Returns -------
python
{ "resource": "" }
q2393
largest_compartment_id_met
train
def largest_compartment_id_met(model): """ Return the ID of the compartment with the most metabolites. Parameters ---------- model : cobra.Model The metabolic model under investigation. Returns ------- string Compartment ID of the compartment with the most metabolites. """ # Sort compartments by decreasing size and extract the largest two. candidate, second = sorted( ((c, len(metabolites_per_compartment(model, c))) for c
python
{ "resource": "" }
q2394
find_compartment_id_in_model
train
def find_compartment_id_in_model(model, compartment_id): """ Identify a model compartment by looking up names in COMPARTMENT_SHORTLIST. Parameters ---------- model : cobra.Model The metabolic model under investigation. compartment_id : string Memote internal compartment identifier used to access compartment name shortlist to look up potential compartment names. Returns ------- string Compartment identifier in the model corresponding to compartment_id. """ if compartment_id not in COMPARTMENT_SHORTLIST.keys(): raise KeyError("{} is not in the COMPARTMENT_SHORTLIST! Make sure " "you typed the ID correctly, if yes, update the " "shortlist manually.".format(compartment_id)) if len(model.compartments) == 0: raise KeyError( "It was not possible to identify the " "compartment {}, since the " "model has
python
{ "resource": "" }
q2395
find_met_in_model
train
def find_met_in_model(model, mnx_id, compartment_id=None): """ Return specific metabolites by looking up IDs in METANETX_SHORTLIST. Parameters ---------- model : cobra.Model The metabolic model under investigation. mnx_id : string Memote internal MetaNetX metabolite identifier used to map between cross-references in the METANETX_SHORTLIST. compartment_id : string, optional ID of the specific compartment where the metabolites should be found. Defaults to returning matching metabolites from all compartments. Returns ------- list cobra.Metabolite(s) matching the mnx_id. """ def compare_annotation(annotation): """ Return annotation IDs that match to METANETX_SHORTLIST references. Compares the set of METANETX_SHORTLIST references for a given mnx_id and the annotation IDs stored in a given annotation dictionary. """ query_values = set(utils.flatten(annotation.values())) ref_values = set(utils.flatten(METANETX_SHORTLIST[mnx_id])) return query_values & ref_values # Make sure that the MNX ID we're looking up exists in the metabolite # shortlist. if mnx_id not in METANETX_SHORTLIST.columns: raise ValueError( "{} is not in the MetaNetX Shortlist! Make sure " "you typed the ID correctly, if yes, update the " "shortlist by updating and re-running the script " "generate_mnx_shortlists.py.".format(mnx_id) ) candidates = [] # The MNX ID used in the model may or may not be tagged with a compartment # tag e.g. `MNXM23141_c` vs. `MNXM23141`, which is tested with the # following regex. # If the MNX ID itself cannot be found as an ID, we try all other # identifiers that are provided by our shortlist of MetaNetX' mapping # table. regex = re.compile('^{}(_[a-zA-Z0-9]+)?$'.format(mnx_id)) if model.metabolites.query(regex): candidates = model.metabolites.query(regex) elif model.metabolites.query(compare_annotation, attribute='annotation'): candidates = model.metabolites.query( compare_annotation, attribute='annotation' ) else: for value in METANETX_SHORTLIST[mnx_id]: if value: for ident in value: regex = re.compile('^{}(_[a-zA-Z0-9]+)?$'.format(ident)) if model.metabolites.query(regex, attribute='id'): candidates.extend( model.metabolites.query(regex, attribute='id')) # Return a list of all possible candidates if no specific compartment ID # is provided. # Otherwise, just return the candidate in one specific compartment. Raise # an exception if there are more than one possible candidates for a given
python
{ "resource": "" }
q2396
find_bounds
train
def find_bounds(model): """ Return the median upper and lower bound of the metabolic model. Bounds can vary from model to model. Cobrapy defaults to (-1000, 1000) but this may not be the case for merged or autogenerated models. In these cases, this function is used to iterate over all the bounds of all the reactions and find the median bound values in the model, which are then used as the 'most common' bounds. Parameters ---------- model : cobra.Model The metabolic model under investigation. """ lower_bounds = np.asarray([rxn.lower_bound for rxn in model.reactions], dtype=float) upper_bounds = np.asarray([rxn.upper_bound for rxn in model.reactions],
python
{ "resource": "" }
q2397
Report.render_html
train
def render_html(self): """Render an HTML report.""" return self._template.safe_substitute(
python
{ "resource": "" }
q2398
Report.compute_score
train
def compute_score(self): """Calculate the overall test score using the configuration.""" # LOGGER.info("Begin scoring") cases = self.get_configured_tests() | set(self.result.cases) scores = DataFrame({"score": 0.0, "max": 1.0}, index=sorted(cases)) self.result.setdefault("score", dict()) self.result["score"]["sections"] = list() # Calculate the scores for each test individually. for test, result in iteritems(self.result.cases): # LOGGER.info("Calculate score for test: '%s'.", test) # Test metric may be a dictionary for a parametrized test. metric = result["metric"] if hasattr(metric, "items"): result["score"] = test_score = dict() total = 0.0 for key, value in iteritems(metric): value = 1.0 - value total += value test_score[key] = value
python
{ "resource": "" }
q2399
find_components_without_sbo_terms
train
def find_components_without_sbo_terms(model, components): """ Find model components that are not annotated with any SBO terms. Parameters ---------- model : cobra.Model The metabolic model under investigation. components : {"metabolites", "reactions", "genes"} A string denoting `cobra.Model` components. Returns -------
python
{ "resource": "" }