text_prompt
stringlengths
157
13.1k
code_prompt
stringlengths
7
19.8k
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def socket_read(fp): """Buffered read from socket. Reads all data available from socket. @fp: File pointer for socket. @return: String of characters read from buffer. """
response = '' oldlen = 0 newlen = 0 while True: response += fp.read(buffSize) newlen = len(response) if newlen - oldlen == 0: break else: oldlen = newlen return response
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def exec_command(args, env=None): """Convenience function that executes command and returns result. @param args: Tuple of command and arguments. @param env: Dictionary of environment variables. (Environment is not modified if None.) @return: Command output. """
try: cmd = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=buffSize, env=env) except OSError, e: raise Exception("Execution of command failed.\n", " Command: %s\n Error: %s" % (' '.join(args), str(e))) out, err = cmd.communicate(None) if cmd.returncode != 0: raise Exception("Execution of command failed with error code: %s\n%s\n" % (cmd.returncode, err)) return out
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def registerFilter(self, column, patterns, is_regex=False, ignore_case=False): """Register filter on a column of table. @param column: The column name. @param patterns: A single pattern or a list of patterns used for matching column values. @param is_regex: The patterns will be treated as regex if True, the column values will be tested for equality with the patterns otherwise. @param ignore_case: Case insensitive matching will be used if True. """
if isinstance(patterns, basestring): patt_list = (patterns,) elif isinstance(patterns, (tuple, list)): patt_list = list(patterns) else: raise ValueError("The patterns parameter must either be as string " "or a tuple / list of strings.") if is_regex: if ignore_case: flags = re.IGNORECASE else: flags = 0 patt_exprs = [re.compile(pattern, flags) for pattern in patt_list] else: if ignore_case: patt_exprs = [pattern.lower() for pattern in patt_list] else: patt_exprs = patt_list self._filters[column] = (patt_exprs, is_regex, ignore_case)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def unregisterFilter(self, column): """Unregister filter on a column of the table. @param column: The column header. """
if self._filters.has_key(column): del self._filters[column]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def registerFilters(self, **kwargs): """Register multiple filters at once. @param **kwargs: Multiple filters are registered using keyword variables. Each keyword must correspond to a field name with an optional suffix: field: Field equal to value or in list of values. field_ic: Field equal to value or in list of values, using case insensitive comparison. field_regex: Field matches regex value or matches with any regex in list of values. field_ic_regex: Field matches regex value or matches with any regex in list of values using case insensitive match. """
for (key, patterns) in kwargs.items(): if key.endswith('_regex'): col = key[:-len('_regex')] is_regex = True else: col = key is_regex = False if col.endswith('_ic'): col = col[:-len('_ic')] ignore_case = True else: ignore_case = False self.registerFilter(col, patterns, is_regex, ignore_case)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def applyFilters(self, headers, table): """Apply filter on ps command result. @param headers: List of column headers. @param table: Nested list of rows and columns. @return: Nested list of rows and columns filtered using registered filters. """
result = [] column_idxs = {} for column in self._filters.keys(): try: column_idxs[column] = headers.index(column) except ValueError: raise ValueError('Invalid column name %s in filter.' % column) for row in table: for (column, (patterns, is_regex, ignore_case)) in self._filters.items(): col_idx = column_idxs[column] col_val = row[col_idx] if is_regex: for pattern in patterns: if pattern.search(col_val): break else: break else: if ignore_case: col_val = col_val.lower() if col_val in patterns: pass else: break else: result.append(row) return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def function(data, maxt=None): """ Calculate the autocorrelation function for a 1D time series. Parameters data : numpy.ndarray (N,) The time series. Returns ------- rho : numpy.ndarray (N,) An autocorrelation function. """
data = np.atleast_1d(data) assert len(np.shape(data)) == 1, \ "The autocorrelation function can only by computed " \ + "on a 1D time series." if maxt is None: maxt = len(data) result = np.zeros(maxt, dtype=float) _acor.function(np.array(data, dtype=float), result) return result / result[0]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def getDesc(self, entry): """Returns description for stat entry. @param entry: Entry name. @return: Description for entry. """
if len(self._descDict) == 0: self.getStats() return self._descDict.get(entry)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def fingerprint_helper(egg, permute=False, n_perms=1000, match='exact', distance='euclidean', features=None): """ Computes clustering along a set of feature dimensions Parameters egg : quail.Egg Data to analyze dist_funcs : dict Dictionary of distance functions for feature clustering analyses Returns probabilities : Numpy array Each number represents clustering along a different feature dimension """
if features is None: features = egg.dist_funcs.keys() inds = egg.pres.index.tolist() slices = [egg.crack(subjects=[i], lists=[j]) for i, j in inds] weights = _get_weights(slices, features, distdict, permute, n_perms, match, distance) return np.nanmean(weights, axis=0)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get(key, default=None): """ Searches os.environ. If a key is found try evaluating its type else; return the string. returns: k->value (type as defined by ast.literal_eval) """
try: # Attempt to evaluate into python literal return ast.literal_eval(os.environ.get(key.upper(), default)) except (ValueError, SyntaxError): return os.environ.get(key.upper(), default)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def save(filepath=None, **kwargs): """ Saves a list of keyword arguments as environment variables to a file. If no filepath given will default to the default `.env` file. """
if filepath is None: filepath = os.path.join('.env') with open(filepath, 'wb') as file_handle: file_handle.writelines( '{0}={1}\n'.format(key.upper(), val) for key, val in kwargs.items() )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def load(filepath=None): """ Reads a .env file into os.environ. For a set filepath, open the file and read contents into os.environ. If filepath is not set then look in current dir for a .env file. """
if filepath and os.path.exists(filepath): pass else: if not os.path.exists('.env'): return False filepath = os.path.join('.env') for key, value in _get_line_(filepath): # set the key, value in the python environment vars dictionary # does not make modifications system wide. os.environ.setdefault(key, str(value)) return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def initStats(self): """Query and parse Apache Web Server Status Page."""
url = "%s://%s:%d/%s?auto" % (self._proto, self._host, self._port, self._statuspath) response = util.get_url(url, self._user, self._password) self._statusDict = {} for line in response.splitlines(): mobj = re.match('(\S.*\S)\s*:\s*(\S+)\s*$', line) if mobj: self._statusDict[mobj.group(1)] = util.parse_value(mobj.group(2)) if self._statusDict.has_key('Scoreboard'): self._statusDict['MaxWorkers'] = len(self._statusDict['Scoreboard'])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_pres_features(self, features=None): """ Returns a df of features for presented items """
if features is None: features = self.dist_funcs.keys() elif not isinstance(features, list): features = [features] return self.pres.applymap(lambda x: {k:v for k,v in x.items() if k in features} if x is not None else None)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_rec_features(self, features=None): """ Returns a df of features for recalled items """
if features is None: features = self.dist_funcs.keys() elif not isinstance(features, list): features = [features] return self.rec.applymap(lambda x: {k:v for k,v in x.items() if k != 'item'} if x is not None else None)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def info(self): """ Print info about the data egg """
print('Number of subjects: ' + str(self.n_subjects)) print('Number of lists per subject: ' + str(self.n_lists)) print('Number of words per list: ' + str(self.list_length)) print('Date created: ' + str(self.date_created)) print('Meta data: ' + str(self.meta))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def save(self, fname, compression='blosc'): """ Save method for the Egg object The data will be saved as a 'egg' file, which is a dictionary containing the elements of a Egg saved in the hd5 format using `deepdish`. Parameters fname : str A name for the file. If the file extension (.egg) is not specified, it will be appended. compression : str The kind of compression to use. See the deepdish documentation for options: http://deepdish.readthedocs.io/en/latest/api_io.html#deepdish.io.save """
# put egg vars into a dict egg = { 'pres' : df2list(self.pres), 'rec' : df2list(self.rec), 'dist_funcs' : self.dist_funcs, 'subjgroup' : self.subjgroup, 'subjname' : self.subjname, 'listgroup' : self.listgroup, 'listname' : self.listname, 'date_created' : self.date_created, 'meta' : self.meta } # if extension wasn't included, add it if fname[-4:]!='.egg': fname+='.egg' # save with warnings.catch_warnings(): warnings.simplefilter("ignore") dd.io.save(fname, egg, compression=compression)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def save(self, fname, compression='blosc'): """ Save method for the FriedEgg object The data will be saved as a 'fegg' file, which is a dictionary containing the elements of a FriedEgg saved in the hd5 format using `deepdish`. Parameters fname : str A name for the file. If the file extension (.fegg) is not specified, it will be appended. compression : str The kind of compression to use. See the deepdish documentation for options: http://deepdish.readthedocs.io/en/latest/api_io.html#deepdish.io.save """
egg = { 'data' : self.data, 'analysis' : self.analysis, 'list_length' : self.list_length, 'n_lists' : self.n_lists, 'n_subjects' : self.n_subjects, 'position' : self.position, 'date_created' : self.date_created, 'meta' : self.meta } if fname[-4:]!='.fegg': fname+='.fegg' with warnings.catch_warnings(): warnings.simplefilter("ignore") dd.io.save(fname, egg, compression=compression)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def free_symbolic(self): """Free symbolic data"""
if self._symbolic is not None: self.funs.free_symbolic(self._symbolic) self._symbolic = None self.mtx = None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def free_numeric(self): """Free numeric data"""
if self._numeric is not None: self.funs.free_numeric(self._numeric) self._numeric = None self.free_symbolic()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def solve(self, sys, mtx, rhs, autoTranspose=False): """ Solution of system of linear equation using the Numeric object. Parameters sys : constant one of UMFPACK system description constants, like UMFPACK_A, UMFPACK_At, see umfSys list and UMFPACK docs mtx : scipy.sparse.csc_matrix or scipy.sparse.csr_matrix Input. rhs : ndarray Right Hand Side autoTranspose : bool Automatically changes `sys` to the transposed type, if `mtx` is in CSR, since UMFPACK assumes CSC internally Returns ------- sol : ndarray Solution to the equation system. """
if sys not in umfSys: raise ValueError('sys must be in' % umfSys) if autoTranspose and self.isCSR: ## # UMFPACK uses CSC internally... if self.family in umfRealTypes: ii = 0 else: ii = 1 if sys in umfSys_transposeMap[ii]: sys = umfSys_transposeMap[ii][sys] else: raise RuntimeError('autoTranspose ambiguous, switch it off') if self._numeric is not None: if self.mtx is not mtx: raise ValueError('must be called with same matrix as numeric()') else: raise RuntimeError('numeric() not called') indx = self._getIndx(mtx) if self.isReal: rhs = rhs.astype(np.float64) sol = np.zeros((mtx.shape[1],), dtype=np.float64) status = self.funs.solve(sys, mtx.indptr, indx, mtx.data, sol, rhs, self._numeric, self.control, self.info) else: rhs = rhs.astype(np.complex128) sol = np.zeros((mtx.shape[1],), dtype=np.complex128) mreal, mimag = mtx.data.real.copy(), mtx.data.imag.copy() sreal, simag = sol.real.copy(), sol.imag.copy() rreal, rimag = rhs.real.copy(), rhs.imag.copy() status = self.funs.solve(sys, mtx.indptr, indx, mreal, mimag, sreal, simag, rreal, rimag, self._numeric, self.control, self.info) sol.real, sol.imag = sreal, simag # self.funs.report_info( self.control, self.info ) # pause() if status != UMFPACK_OK: if status == UMFPACK_WARNING_singular_matrix: ## Change inf, nan to zeros. warnings.warn('Zeroing nan and inf entries...', UmfpackWarning) sol[~np.isfinite(sol)] = 0.0 else: raise RuntimeError('%s failed with %s' % (self.funs.solve, umfStatus[status])) econd = 1.0 / self.info[UMFPACK_RCOND] if econd > self.maxCond: msg = '(almost) singular matrix! '\ + '(estimated cond. number: %.2e)' % econd warnings.warn(msg, UmfpackWarning) return sol
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def linsolve(self, sys, mtx, rhs, autoTranspose=False): """ One-shot solution of system of linear equation. Reuses Numeric object if possible. Parameters sys : constant one of UMFPACK system description constants, like UMFPACK_A, UMFPACK_At, see umfSys list and UMFPACK docs mtx : scipy.sparse.csc_matrix or scipy.sparse.csr_matrix Input. rhs : ndarray Right Hand Side autoTranspose : bool Automatically changes `sys` to the transposed type, if `mtx` is in CSR, since UMFPACK assumes CSC internally Returns ------- sol : ndarray Solution to the equation system. """
if sys not in umfSys: raise ValueError('sys must be in' % umfSys) if self._numeric is None: self.numeric(mtx) else: if self.mtx is not mtx: self.numeric(mtx) sol = self.solve(sys, mtx, rhs, autoTranspose) self.free_numeric() return sol
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def stick_perm(presenter, egg, dist_dict, strategy): """Computes weights for one reordering using stick-breaking method"""
# seed RNG np.random.seed() # unpack egg egg_pres, egg_rec, egg_features, egg_dist_funcs = parse_egg(egg) # reorder regg = order_stick(presenter, egg, dist_dict, strategy) # unpack regg regg_pres, regg_rec, regg_features, regg_dist_funcs = parse_egg(regg) # # get the order regg_pres = list(regg_pres) egg_pres = list(egg_pres) idx = [egg_pres.index(r) for r in regg_pres] # compute weights weights = compute_feature_weights_dict(list(regg_pres), list(regg_pres), list(regg_features), dist_dict) # save out the order orders = idx return weights, orders
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def compute_distances_dict(egg): """ Creates a nested dict of distances """
pres, rec, features, dist_funcs = parse_egg(egg) pres_list = list(pres) features_list = list(features) # initialize dist dict distances = {} # for each word in the list for idx1, item1 in enumerate(pres_list): distances[item1]={} # for each word in the list for idx2, item2 in enumerate(pres_list): distances[item1][item2]={} # for each feature in dist_funcs for feature in dist_funcs: distances[item1][item2][feature] = builtin_dist_funcs[dist_funcs[feature]](features_list[idx1][feature],features_list[idx2][feature]) return distances
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def update(self, egg, permute=False, nperms=1000, parallel=False): """ In-place method that updates fingerprint with new data Parameters egg : quail.Egg Data to update fingerprint Returns None """
# increment n self.n+=1 next_weights = np.nanmean(_analyze_chunk(egg, analysis=fingerprint_helper, analysis_type='fingerprint', pass_features=True, permute=permute, n_perms=nperms, parallel=parallel).values, 0) if self.state is not None: # multiply states by n c = self.state*self.n # update state self.state = np.nansum(np.array([c, next_weights]), axis=0)/(self.n+1) else: self.state = next_weights # update the history self.history.append(next_weights)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _getDevMajorMinor(self, devpath): """Return major and minor device number for block device path devpath. @param devpath: Full path for block device. @return: Tuple (major, minor). """
fstat = os.stat(devpath) if stat.S_ISBLK(fstat.st_mode): return(os.major(fstat.st_rdev), os.minor(fstat.st_rdev)) else: raise ValueError("The file %s is not a valid block device." % devpath)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _getUniqueDev(self, devpath): """Return unique device for any block device path. @param devpath: Full path for block device. @return: Unique device string without the /dev prefix. """
realpath = os.path.realpath(devpath) mobj = re.match('\/dev\/(.*)$', realpath) if mobj: dev = mobj.group(1) if dev in self._diskStats: return dev else: try: (major, minor) = self._getDevMajorMinor(realpath) except: return None return self._mapMajorMinor2dev.get((major, minor)) return None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _initFilesystemInfo(self): """Initialize filesystem to device mappings."""
self._mapFSpathDev = {} fsinfo = FilesystemInfo() for fs in fsinfo.getFSlist(): devpath = fsinfo.getFSdev(fs) dev = self._getUniqueDev(devpath) if dev is not None: self._mapFSpathDev[fs] = dev
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _initSwapInfo(self): """Initialize swap partition to device mappings."""
self._swapList = [] sysinfo = SystemInfo() for (swap,attrs) in sysinfo.getSwapStats().iteritems(): if attrs['type'] == 'partition': dev = self._getUniqueDev(swap) if dev is not None: self._swapList.append(dev)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _initDevClasses(self): """Sort block devices into lists depending on device class and initialize device type map and partition map."""
self._devClassTree = {} self._partitionTree = {} self._mapDevType = {} basedevs = [] otherdevs = [] if self._mapMajorDevclass is None: self._initBlockMajorMap() for dev in self._diskStats: stats = self._diskStats[dev] devclass = self._mapMajorDevclass.get(stats['major']) if devclass is not None: devdir = os.path.join(sysfsBlockdevDir, dev) if os.path.isdir(devdir): if not self._devClassTree.has_key(devclass): self._devClassTree[devclass] = [] self._devClassTree[devclass].append(dev) self._mapDevType[dev] = devclass basedevs.append(dev) else: otherdevs.append(dev) basedevs.sort(key=len, reverse=True) otherdevs.sort(key=len, reverse=True) idx = 0 for partdev in otherdevs: while len(basedevs[idx]) > partdev: idx += 1 for dev in basedevs[idx:]: if re.match("%s(\d+|p\d+)$" % dev, partdev): if not self._partitionTree.has_key(dev): self._partitionTree[dev] = [] self._partitionTree[dev].append(partdev) self._mapDevType[partdev] = 'part'
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def getDevType(self, dev): """Returns type of device dev. @return: Device type as string. """
if self._devClassTree is None: self._initDevClasses() return self._mapDevType.get(dev)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def getPartitionList(self): """Returns list of partitions. @return: List of (disk,partition) pairs. """
if self._partList is None: self._partList = [] for (disk,parts) in self.getPartitionDict().iteritems(): for part in parts: self._partList.append((disk,part)) return self._partList
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def getContainerStats(self, limit=None, marker=None): """Returns Rackspace Cloud Files usage stats for containers. @param limit: Number of containers to return. @param marker: Return only results whose name is greater than marker. @return: Dictionary of container stats indexed by container name. """
stats = {} for row in self._conn.list_containers_info(limit, marker): stats[row['name']] = {'count': row['count'], 'size': row['bytes']} return stats
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _connect(self): """Connect to Squid Proxy Manager interface."""
if sys.version_info[:2] < (2,6): self._conn = httplib.HTTPConnection(self._host, self._port) else: self._conn = httplib.HTTPConnection(self._host, self._port, False, defaultTimeout)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _retrieve(self, map): """Query Squid Proxy Server Manager Interface for stats. @param map: Statistics map name. @return: Dictionary of query results. """
self._conn.request('GET', "cache_object://%s/%s" % (self._host, map), None, self._httpHeaders) rp = self._conn.getresponse() if rp.status == 200: data = rp.read() return data else: raise Exception("Retrieval of stats from Squid Proxy Server" "on host %s and port %s failed.\n" "HTTP - Status: %s Reason: %s" % (self._host, self._port, rp.status, rp.reason))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _parseCounters(self, data): """Parse simple stats list of key, value pairs. @param data: Multiline data with one key-value pair in each line. @return: Dictionary of stats. """
info_dict = util.NestedDict() for line in data.splitlines(): mobj = re.match('^\s*([\w\.]+)\s*=\s*(\S.*)$', line) if mobj: (key, value) = mobj.groups() klist = key.split('.') info_dict.set_nested(klist, parse_value(value)) return info_dict
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _parseSections(self, data): """Parse data and separate sections. Returns dictionary that maps section name to section data. @param data: Multiline data. @return: Dictionary that maps section names to section data. """
section_dict = {} lines = data.splitlines() idx = 0 numlines = len(lines) section = None while idx < numlines: line = lines[idx] idx += 1 mobj = re.match('^(\w[\w\s\(\)]+[\w\)])\s*:$', line) if mobj: section = mobj.group(1) section_dict[section] = [] else: mobj = re.match('(\t|\s)\s*(\w.*)$', line) if mobj: section_dict[section].append(mobj.group(2)) else: mobj = re.match('^(\w[\w\s\(\)]+[\w\)])\s*:\s*(\S.*)$', line) if mobj: section = None if not section_dict.has_key(section): section_dict[section] = [] section_dict[section].append(line) else: if not section_dict.has_key('PARSEERROR'): section_dict['PARSEERROR'] = [] section_dict['PARSEERROR'].append(line) return section_dict
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def getMenu(self): """Get manager interface section list from Squid Proxy Server @return: List of tuples (section, description, type) """
data = self._retrieve('') info_list = [] for line in data.splitlines(): mobj = re.match('^\s*(\S.*\S)\s*\t\s*(\S.*\S)\s*\t\s*(\S.*\S)$', line) if mobj: info_list.append(mobj.groups()) return info_list
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def getIfaceStats(self): """Return dictionary of Traffic Stats for each Wanpipe Interface. @return: Nested dictionary of statistics for each interface. """
ifInfo = netiface.NetIfaceInfo() ifStats = ifInfo.getIfStats() info_dict = {} for ifname in ifStats: if re.match('^w\d+g\d+$', ifname): info_dict[ifname] = ifStats[ifname] return info_dict
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _connect(self): """Connect to FreeSWITCH ESL Interface."""
try: self._eslconn = ESL.ESLconnection(self._eslhost, str(self._eslport), self._eslpass) except: pass if not self._eslconn.connected(): raise Exception( "Connection to FreeSWITCH ESL Interface on host %s and port %d failed." % (self._eslhost, self._eslport) )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _execCmd(self, cmd, args): """Execute command and return result body as list of lines. @param cmd: Command string. @param args: Comand arguments string. @return: Result dictionary. """
output = self._eslconn.api(cmd, args) if output: body = output.getBody() if body: return body.splitlines() return None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def ping(self): """Ping Redis Server and return Round-Trip-Time in seconds. @return: Round-trip-time in seconds as float. """
start = time.time() self._conn.ping() return (time.time() - start)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def list2pd(all_data, subjindex=None, listindex=None): """ Makes multi-indexed dataframe of subject data Parameters all_data : list of lists of strings strings are either all presented or all recalled items, in the order of presentation or recall *should also work for presented / recalled ints and floats, if desired Returns subs_list_of_dfs : multi-indexed dataframe dataframe of subject data (presented or recalled words/items), indexed by subject and list number cell populated by the term presented or recalled in the position indicated by the column number """
# set default index if it is not defined # max_nlists = max(map(lambda x: len(x), all_data)) listindex = [[idx for idx in range(len(sub))] for sub in all_data] if not listindex else listindex subjindex = [idx for idx,subj in enumerate(all_data)] if not subjindex else subjindex def make_multi_index(listindex, sub_num): return pd.MultiIndex.from_tuples([(sub_num,lst) for lst in listindex], names = ['Subject', 'List']) listindex = list(listindex) subjindex = list(subjindex) subs_list_of_dfs = [pd.DataFrame(sub_data, index=make_multi_index(listindex[sub_num], subjindex[sub_num])) for sub_num,sub_data in enumerate(all_data)] return pd.concat(subs_list_of_dfs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def recmat2egg(recmat, list_length=None): """ Creates egg data object from zero-indexed recall matrix Parameters recmat : list of lists (subs) of lists (encoding lists) of ints or 2D numpy array recall matrix representing serial positions of freely recalled words \ list_length : int The length of each list (e.g. 16) Returns egg : Egg data object egg data object computed from the recall matrix """
from .egg import Egg as Egg pres = [[[str(word) for word in list(range(0,list_length))] for reclist in recsub] for recsub in recmat] rec = [[[str(word) for word in reclist if word is not None] for reclist in recsub] for recsub in recmat] return Egg(pres=pres,rec=rec)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def default_dist_funcs(dist_funcs, feature_example): """ Fills in default distance metrics for fingerprint analyses """
if dist_funcs is None: dist_funcs = dict() for key in feature_example: if key in dist_funcs: pass if key == 'item': pass elif isinstance(feature_example[key], (six.string_types, six.binary_type)): dist_funcs[key] = 'match' elif isinstance(feature_example[key], (int, np.integer, float)) or all([isinstance(i, (int, np.integer, float)) for i in feature_example[key]]): dist_funcs[key] = 'euclidean' return dist_funcs
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def stack_eggs(eggs, meta='concatenate'): ''' Takes a list of eggs, stacks them and reindexes the subject number Parameters ---------- eggs : list of Egg data objects A list of Eggs that you want to combine meta : string Determines how the meta data of each Egg combines. Default is 'concatenate' 'concatenate' concatenates keys in meta data dictionary shared between eggs, and copies non-overlapping keys 'separate' keeps the Eggs' meta data dictionaries separate, with each as a list index in the stacked meta data Returns ---------- new_egg : Egg data object A mega egg comprised of the input eggs stacked together ''' from .egg import Egg pres = [egg.pres.loc[sub,:].values.tolist() for egg in eggs for sub in egg.pres.index.levels[0].values.tolist()] rec = [egg.rec.loc[sub,:].values.tolist() for egg in eggs for sub in egg.rec.index.levels[0].values.tolist()] if meta is 'concatenate': new_meta = {} for egg in eggs: for key in egg.meta: if key in new_meta: new_meta[key] = list(new_meta[key]) new_meta[key].extend(egg.meta.get(key)) else: new_meta[key] = egg.meta.get(key) elif meta is 'separate': new_meta = list(egg.meta for egg in eggs) return Egg(pres=pres, rec=rec, meta=new_meta)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def crack_egg(egg, subjects=None, lists=None): ''' Takes an egg and returns a subset of the subjects or lists Parameters ---------- egg : Egg data object Egg that you want to crack subjects : list List of subject idxs lists : list List of lists idxs Returns ---------- new_egg : Egg data object A sliced egg, good on a salad ''' from .egg import Egg if hasattr(egg, 'features'): all_have_features = egg.features is not None else: all_have_features=False opts = {} if subjects is None: subjects = egg.pres.index.levels[0].values.tolist() elif type(subjects) is not list: subjects = [subjects] if lists is None: lists = egg.pres.index.levels[1].values.tolist() elif type(lists) is not list: lists = [lists] idx = pd.IndexSlice pres = egg.pres.loc[idx[subjects,lists],egg.pres.columns] rec = egg.rec.loc[idx[subjects,lists],egg.rec.columns] pres = [pres.loc[sub,:].values.tolist() for sub in subjects] rec = [rec.loc[sub,:].values.tolist() for sub in subjects] if all_have_features: features = egg.features.loc[idx[subjects,lists],egg.features.columns] opts['features'] = [features.loc[sub,:].values.tolist() for sub in subjects] return Egg(pres=pres, rec=rec, **opts)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def df2list(df): """ Convert a MultiIndex df to list Parameters df : pandas.DataFrame A MultiIndex DataFrame where the first level is subjects and the second level is lists (e.g. egg.pres) Returns lst : a list of lists of lists of values The input df reformatted as a list """
subjects = df.index.levels[0].values.tolist() lists = df.index.levels[1].values.tolist() idx = pd.IndexSlice df = df.loc[idx[subjects,lists],df.columns] lst = [df.loc[sub,:].values.tolist() for sub in subjects] return lst
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parse_egg(egg): """Parses an egg and returns fields"""
pres_list = egg.get_pres_items().values[0] rec_list = egg.get_rec_items().values[0] feature_list = egg.get_pres_features().values[0] dist_funcs = egg.dist_funcs return pres_list, rec_list, feature_list, dist_funcs
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def merge_pres_feats(pres, features): """ Helper function to merge pres and features to support legacy features argument """
sub = [] for psub, fsub in zip(pres, features): exp = [] for pexp, fexp in zip(psub, fsub): lst = [] for p, f in zip(pexp, fexp): p.update(f) lst.append(p) exp.append(lst) sub.append(exp) return sub
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def r2z(r): """ Function that calculates the Fisher z-transformation Parameters r : int or ndarray Correlation value Returns result : int or ndarray Fishers z transformed correlation value """
with np.errstate(invalid='ignore', divide='ignore'): return 0.5 * (np.log(1 + r) - np.log(1 - r))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def z2r(z): """ Function that calculates the inverse Fisher z-transformation Parameters z : int or ndarray Fishers z transformed correlation value Returns result : int or ndarray Correlation value """
with np.errstate(invalid='ignore', divide='ignore'): return (np.exp(2 * z) - 1) / (np.exp(2 * z) + 1)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def shuffle_egg(egg): """ Shuffle an Egg's recalls"""
from .egg import Egg pres, rec, features, dist_funcs = parse_egg(egg) if pres.ndim==1: pres = pres.reshape(1, pres.shape[0]) rec = rec.reshape(1, rec.shape[0]) features = features.reshape(1, features.shape[0]) for ilist in range(rec.shape[0]): idx = np.random.permutation(rec.shape[1]) rec[ilist,:] = rec[ilist,idx] return Egg(pres=pres, rec=rec, features=features, dist_funcs=dist_funcs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def getUptime(self): """Return system uptime in seconds. @return: Float that represents uptime in seconds. """
try: fp = open(uptimeFile, 'r') line = fp.readline() fp.close() except: raise IOError('Failed reading stats from file: %s' % uptimeFile) return float(line.split()[0])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def getLoadAvg(self): """Return system Load Average. @return: List of 1 min, 5 min and 15 min Load Average figures. """
try: fp = open(loadavgFile, 'r') line = fp.readline() fp.close() except: raise IOError('Failed reading stats from file: %s' % loadavgFile) arr = line.split() if len(arr) >= 3: return [float(col) for col in arr[:3]] else: return None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def getCPUuse(self): """Return cpu time utilization in seconds. @return: Dictionary of stats. """
hz = os.sysconf('SC_CLK_TCK') info_dict = {} try: fp = open(cpustatFile, 'r') line = fp.readline() fp.close() except: raise IOError('Failed reading stats from file: %s' % cpustatFile) headers = ['user', 'nice', 'system', 'idle', 'iowait', 'irq', 'softirq', 'steal', 'guest'] arr = line.split() if len(arr) > 1 and arr[0] == 'cpu': return dict(zip(headers[0:len(arr)], [(float(t) / hz) for t in arr[1:]])) return info_dict
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def getProcessStats(self): """Return stats for running and blocked processes, forks, context switches and interrupts. @return: Dictionary of stats. """
info_dict = {} try: fp = open(cpustatFile, 'r') data = fp.read() fp.close() except: raise IOError('Failed reading stats from file: %s' % cpustatFile) for line in data.splitlines(): arr = line.split() if len(arr) > 1 and arr[0] in ('ctxt', 'intr', 'softirq', 'processes', 'procs_running', 'procs_blocked'): info_dict[arr[0]] = arr[1] return info_dict
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def getMemoryUse(self): """Return stats for memory utilization. @return: Dictionary of stats. """
info_dict = {} try: fp = open(meminfoFile, 'r') data = fp.read() fp.close() except: raise IOError('Failed reading stats from file: %s' % meminfoFile) for line in data.splitlines(): mobj = re.match('^(.+):\s*(\d+)\s*(\w+|)\s*$', line) if mobj: if mobj.group(3).lower() == 'kb': mult = 1024 else: mult = 1 info_dict[mobj.group(1)] = int(mobj.group(2)) * mult return info_dict
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def getVMstats(self): """Return stats for Virtual Memory Subsystem. @return: Dictionary of stats. """
info_dict = {} try: fp = open(vmstatFile, 'r') data = fp.read() fp.close() except: raise IOError('Failed reading stats from file: %s' % vmstatFile) for line in data.splitlines(): cols = line.split() if len(cols) == 2: info_dict[cols[0]] = cols[1] return info_dict
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _connect(self): """Connect to Memcached."""
if self._socketFile is not None: if not os.path.exists(self._socketFile): raise Exception("Socket file (%s) for Memcached Instance not found." % self._socketFile) try: if self._timeout is not None: self._conn = util.Telnet(self._host, self._port, self._socketFile, timeout) else: self._conn = util.Telnet(self._host, self._port, self._socketFile) except: raise Exception("Connection to %s failed." % self._instanceName)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _sendStatCmd(self, cmd): """Send stat command to Memcached Server and return response lines. @param cmd: Command string. @return: Array of strings. """
try: self._conn.write("%s\r\n" % cmd) regex = re.compile('^(END|ERROR)\r\n', re.MULTILINE) (idx, mobj, text) = self._conn.expect([regex,], self._timeout) #@UnusedVariable except: raise Exception("Communication with %s failed" % self._instanceName) if mobj is not None: if mobj.group(1) == 'END': return text.splitlines()[:-1] elif mobj.group(1) == 'ERROR': raise Exception("Protocol error in communication with %s." % self._instanceName) else: raise Exception("Connection with %s timed out." % self._instanceName)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _parseStats(self, lines, parse_slabs = False): """Parse stats output from memcached and return dictionary of stats- @param lines: Array of lines of input text. @param parse_slabs: Parse slab stats if True. @return: Stats dictionary. """
info_dict = {} info_dict['slabs'] = {} for line in lines: mobj = re.match('^STAT\s(\w+)\s(\S+)$', line) if mobj: info_dict[mobj.group(1)] = util.parse_value(mobj.group(2), True) continue elif parse_slabs: mobj = re.match('STAT\s(\w+:)?(\d+):(\w+)\s(\S+)$', line) if mobj: (slab, key, val) = mobj.groups()[-3:] if not info_dict['slabs'].has_key(slab): info_dict['slabs'][slab] = {} info_dict['slabs'][slab][key] = util.parse_value(val, True) return info_dict
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def correlation(a, b): "Returns correlation distance between a and b" if isinstance(a, list): a = np.array(a) if isinstance(b, list): b = np.array(b) a = a.reshape(1, -1) b = b.reshape(1, -1) return cdist(a, b, 'correlation')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def euclidean(a, b): "Returns euclidean distance between a and b" return np.linalg.norm(np.subtract(a, b))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parseProcCmd(self, fields=('pid', 'user', 'cmd',), threads=False): """Execute ps command with custom output format with columns from fields and return result as a nested list. The Standard Format Specifiers from ps man page must be used for the fields parameter. @param fields: List of fields included in the output. Default: pid, user, cmd @param threads: If True, include threads in output. @return: List of headers and list of rows and columns. """
args = [] headers = [f.lower() for f in fields] args.append('--no-headers') args.append('-e') if threads: args.append('-T') field_ranges = [] fmt_strs = [] start = 0 for header in headers: field_width = psFieldWidth.get(header, psDefaultFieldWidth) fmt_strs.append('%s:%d' % (header, field_width)) end = start + field_width + 1 field_ranges.append((start,end)) start = end args.append('-o') args.append(','.join(fmt_strs)) lines = self.execProcCmd(*args) if len(lines) > 0: stats = [] for line in lines: cols = [] for (start, end) in field_ranges: cols.append(line[start:end].strip()) stats.append(cols) return {'headers': headers, 'stats': stats} else: return None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def getProcList(self, fields=('pid', 'user', 'cmd',), threads=False, **kwargs): """Execute ps command with custom output format with columns columns from fields, select lines using the filters defined by kwargs and return result as a nested list. The Standard Format Specifiers from ps man page must be used for the fields parameter. @param fields: Fields included in the output. Default: pid, user, cmd @param threads: If True, include threads in output. @param **kwargs: Keyword variables are used for filtering the results depending on the values of the columns. Each keyword must correspond to a field name with an optional suffix: field: Field equal to value or in list of values. field_ic: Field equal to value or in list of values, using case insensitive comparison. field_regex: Field matches regex value or matches with any regex in list of values. field_ic_regex: Field matches regex value or matches with any regex in list of values using case insensitive match. @return: List of headers and list of rows and columns. """
field_list = list(fields) for key in kwargs: col = re.sub('(_ic)?(_regex)?$', '', key) if not col in field_list: field_list.append(col) pinfo = self.parseProcCmd(field_list, threads) if pinfo: if len(kwargs) > 0: pfilter = util.TableFilter() pfilter.registerFilters(**kwargs) stats = pfilter.applyFilters(pinfo['headers'], pinfo['stats']) return {'headers': pinfo['headers'], 'stats': stats} else: return pinfo else: return None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def getProcDict(self, fields=('user', 'cmd',), threads=False, **kwargs): """Execute ps command with custom output format with columns format with columns from fields, and return result as a nested dictionary with the key PID or SPID. The Standard Format Specifiers from ps man page must be used for the fields parameter. @param fields: Fields included in the output. Default: user, cmd (PID or SPID column is included by default.) @param threads: If True, include threads in output. @param **kwargs: Keyword variables are used for filtering the results depending on the values of the columns. Each keyword must correspond to a field name with an optional suffix: field: Field equal to value or in list of values. field_ic: Field equal to value or in list of values, using case insensitive comparison. field_regex: Field matches regex value or matches with any regex in list of values. field_ic_regex: Field matches regex value or matches with any regex in list of values using case insensitive match. @return: Nested dictionary indexed by: PID for process info. SPID for thread info. """
stats = {} field_list = list(fields) num_cols = len(field_list) if threads: key = 'spid' else: key = 'pid' try: key_idx = field_list.index(key) except ValueError: field_list.append(key) key_idx = len(field_list) - 1 result = self.getProcList(field_list, threads, **kwargs) if result is not None: headers = result['headers'][:num_cols] lines = result['stats'] if len(lines) > 1: for cols in lines: stats[cols[key_idx]] = dict(zip(headers, cols[:num_cols])) return stats else: return None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def getProcStatStatus(self, threads=False, **kwargs): """Return process counts per status and priority. @param **kwargs: Keyword variables are used for filtering the results depending on the values of the columns. Each keyword must correspond to a field name with an optional suffix: field: Field equal to value or in list of values. field_ic: Field equal to value or in list of values, using case insensitive comparison. field_regex: Field matches regex value or matches with any regex in list of values. field_ic_regex: Field matches regex value or matches with any regex in list of values using case insensitive match. @return: Dictionary of process counters. """
procs = self.getProcList(['stat',], threads=threads, **kwargs) status = dict(zip(procStatusNames.values(), [0,] * len(procStatusNames))) prio = {'high': 0, 'low': 0, 'norm': 0, 'locked_in_mem': 0} total = 0 locked_in_mem = 0 if procs is not None: for cols in procs['stats']: col_stat = cols[0] status[procStatusNames[col_stat[0]]] += 1 if '<' in col_stat[1:]: prio['high'] += 1 elif 'N' in col_stat[1:]: prio['low'] += 1 else: prio['norm'] += 1 if 'L' in col_stat[1:]: locked_in_mem += 1 total += 1 return {'status': status, 'prio': prio, 'locked_in_mem': locked_in_mem, 'total': total}
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def muninMain(pluginClass, argv=None, env=None, debug=False): """Main Block for Munin Plugins. @param pluginClass: Child class of MuninPlugin that implements plugin. @param argv: List of command line arguments to Munin Plugin. @param env: Dictionary of environment variables passed to Munin Plugin. @param debug: Print debugging messages if True. (Default: False) """
if argv is None: argv = sys.argv if env is None: env = os.environ debug = debug or env.has_key('MUNIN_DEBUG') if len(argv) > 1 and argv[1] == 'autoconf': autoconf = True else: autoconf = False try: plugin = pluginClass(argv, env, debug) ret = plugin.run() if ret: return 0 else: return 1 except Exception: print >> sys.stderr, "ERROR: %s" % repr(sys.exc_info()[1]) if autoconf: print "no" if debug: raise else: if autoconf: return 0 else: return 1
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def fixLabel(label, maxlen, delim=None, repl='', truncend=True): """Truncate long graph and field labels. @param label: Label text. @param maxlen: Maximum field label length in characters. No maximum field label length is enforced by default. @param delim: Delimiter for field labels field labels longer than maxlen will preferably be truncated at delimiter. @param repl: Replacement string for truncated part. @param truncend: Truncate the end of label name if True. (Default) The beginning part of label will be truncated if False. """
if len(label) <= maxlen: return label else: maxlen -= len(repl) if delim is not None: if truncend: end = label.rfind(delim, 0, maxlen) if end > 0: return label[:end+1] + repl else: start = label.find(delim, len(label) - maxlen) if start > 0: return repl + label[start:] if truncend: return label[:maxlen] + repl else: return repl + label[-maxlen:]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _getGraph(self, graph_name, fail_noexist=False): """Private method for returning graph object with name graph_name. @param graph_name: Graph Name @param fail_noexist: If true throw exception if there is no graph with name graph_name. @return: Graph Object or None """
graph = self._graphDict.get(graph_name) if fail_noexist and graph is None: raise AttributeError("Invalid graph name: %s" % graph_name) else: return graph
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _getSubGraph(self, parent_name, graph_name, fail_noexist=False): """Private method for returning subgraph object with name graph_name and parent graph with name parent_name. @param parent_name: Root Graph Name @param graph_name: Subgraph Name @param fail_noexist: If true throw exception if there is no subgraph with name graph_name. @return: Graph Object or None """
if not self.isMultigraph: raise AttributeError("Simple Munin Plugins cannot have subgraphs.") if self._graphDict.has_key(parent_name) is not None: subgraphs = self._subgraphDict.get(parent_name) if subgraphs is not None: subgraph = subgraphs.get(graph_name) if fail_noexist and subgraph is None: raise AttributeError("Invalid subgraph name %s" "for graph %s." % (graph_name, parent_name)) else: return subgraph else: raise AttributeError("Parent graph %s has no subgraphs." % (parent_name,)) else: raise AttributeError("Invalid parent graph name %s " "for subgraph %s." % (parent_name, graph_name))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _getMultigraphID(self, graph_name, subgraph_name=None): """Private method for generating Multigraph ID from graph name and subgraph name. @param graph_name: Graph Name. @param subgraph_name: Subgraph Name. @return: Multigraph ID. """
if self.isMultiInstance and self._instanceName is not None: if subgraph_name is None: return "%s_%s" % (graph_name, self._instanceName) else: return "%s_%s.%s_%s" % (graph_name, self._instanceName, subgraph_name, self._instanceName) else: if subgraph_name is None: return graph_name else: return "%s.%s" % (graph_name, subgraph_name)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _formatConfig(self, conf_dict): """Formats configuration directory from Munin Graph and returns multi-line value entries for the plugin config cycle. @param conf_dict: Configuration directory. @return: Multi-line text. """
confs = [] graph_dict = conf_dict['graph'] field_list = conf_dict['fields'] # Order and format Graph Attributes title = graph_dict.get('title') if title is not None: if self.isMultiInstance and self._instanceLabel is not None: if self._instanceLabelType == 'suffix': confs.append("graph_%s %s - %s" % ('title', title, self._instanceLabel,)) elif self._instanceLabelType == 'prefix': confs.append("graph_%s %s - %s" % ('title', self._instanceLabel, title,)) else: confs.append("graph_%s %s" % ('title', title)) for key in ('category', 'vlabel', 'info', 'args', 'period', 'scale', 'total', 'order', 'printf', 'width', 'height'): val = graph_dict.get(key) if val is not None: if isinstance(val, bool): if val: val = "yes" else: val = "no" confs.append("graph_%s %s" % (key, val)) # Order and Format Field Attributes for (field_name, field_attrs) in field_list: for key in ('label', 'type', 'draw', 'info', 'extinfo', 'colour', 'negative', 'graph', 'min', 'max', 'cdef', 'line', 'warning', 'critical'): val = field_attrs.get(key) if val is not None: if isinstance(val, bool): if val: val = "yes" else: val = "no" confs.append("%s.%s %s" % (field_name, key, val)) return "\n".join(confs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _formatVals(self, val_list): """Formats value list from Munin Graph and returns multi-line value entries for the plugin fetch cycle. @param val_list: List of name-value pairs. @return: Multi-line text. """
vals = [] for (name, val) in val_list: if val is not None: if isinstance(val, float): vals.append("%s.value %f" % (name, val)) else: vals.append("%s.value %s" % (name, val)) else: vals.append("%s.value U" % (name,)) return "\n".join(vals)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def envGet(self, name, default=None, conv=None): """Return value for environment variable or None. @param name: Name of environment variable. @param default: Default value if variable is undefined. @param conv: Function for converting value to desired type. @return: Value of environment variable. """
if self._env.has_key(name): if conv is not None: return conv(self._env.get(name)) else: return self._env.get(name) else: return default
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def saveState(self, stateObj): """Utility methos to save plugin state stored in stateObj to persistent storage to permit access to previous state in subsequent plugin runs. Any object that can be pickled and unpickled can be used to store the plugin state. @param stateObj: Object that stores plugin state. """
try: fp = open(self._stateFile, 'w') pickle.dump(stateObj, fp) except: raise IOError("Failure in storing plugin state in file: %s" % self._stateFile) return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def restoreState(self): """Utility method to restore plugin state from persistent storage to permit access to previous plugin state. @return: Object that stores plugin state. """
if os.path.exists(self._stateFile): try: fp = open(self._stateFile, 'r') stateObj = pickle.load(fp) except: raise IOError("Failure in reading plugin state from file: %s" % self._stateFile) return stateObj return None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def appendSubgraph(self, parent_name, graph_name, graph): """Utility method to associate Subgraph Instance to Root Graph Instance. This utility method is for use in constructor of child classes for associating a MuninGraph Subgraph instance with a Root Graph instance. @param parent_name: Root Graph Name @param graph_name: Subgraph Name @param graph: MuninGraph Instance """
if not self.isMultigraph: raise AttributeError("Simple Munin Plugins cannot have subgraphs.") if self._graphDict.has_key(parent_name): if not self._subgraphDict.has_key(parent_name): self._subgraphDict[parent_name] = {} self._subgraphNames[parent_name] = [] self._subgraphDict[parent_name][graph_name] = graph self._subgraphNames[parent_name].append(graph_name) else: raise AttributeError("Invalid parent graph name %s used for subgraph %s." % (parent_name, graph_name))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def setSubgraphVal(self, parent_name, graph_name, field_name, val): """Set Value for Field in Subgraph. The private method is for use in retrieveVals() method of child classes. @param parent_name: Root Graph Name @param graph_name: Subgraph Name @param field_name: Field Name. @param val: Value for field. """
subgraph = self._getSubGraph(parent_name, graph_name, True) if subgraph.hasField(field_name): subgraph.setVal(field_name, val) else: raise AttributeError("Invalid field name %s for subgraph %s " "of parent graph %s." % (field_name, graph_name, parent_name))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def getSubgraphList(self, parent_name): """Returns list of names of subgraphs for Root Graph with name parent_name. @param parent_name: Name of Root Graph. @return: List of subgraph names. """
if not self.isMultigraph: raise AttributeError("Simple Munin Plugins cannot have subgraphs.") if self._graphDict.has_key(parent_name): return self._subgraphNames[parent_name] or [] else: raise AttributeError("Invalid parent graph name %s." % (parent_name,))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def graphHasField(self, graph_name, field_name): """Return true if graph with name graph_name has field with name field_name. @param graph_name: Graph Name @param field_name: Field Name. @return: Boolean """
graph = self._graphDict.get(graph_name, True) return graph.hasField(field_name)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def subGraphHasField(self, parent_name, graph_name, field_name): """Return true if subgraph with name graph_name with parent graph with name parent_name has field with name field_name. @param parent_name: Root Graph Name @param graph_name: Subgraph Name @param field_name: Field Name. @return: Boolean """
subgraph = self._getSubGraph(parent_name, graph_name, True) return subgraph.hasField(field_name)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def getGraphFieldList(self, graph_name): """Returns list of names of fields for graph with name graph_name. @param graph_name: Graph Name @return: List of field names for graph. """
graph = self._getGraph(graph_name, True) return graph.getFieldList()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def getGraphFieldCount(self, graph_name): """Returns number of fields for graph with name graph_name. @param graph_name: Graph Name @return: Number of fields for graph. """
graph = self._getGraph(graph_name, True) return graph.getFieldCount()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def getSubgraphFieldList(self, parent_name, graph_name): """Returns list of names of fields for subgraph with name graph_name and parent graph with name parent_name. @param parent_name: Root Graph Name @param graph_name: Subgraph Name @return: List of field names for subgraph. """
graph = self._getSubGraph(parent_name, graph_name, True) return graph.getFieldList()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def getSubgraphFieldCount(self, parent_name, graph_name): """Returns number of fields for subgraph with name graph_name and parent graph with name parent_name. @param parent_name: Root Graph Name @param graph_name: Subgraph Name @return: Number of fields for subgraph. """
graph = self._getSubGraph(parent_name, graph_name, True) return graph.getFieldCount()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def config(self): """Implements Munin Plugin Graph Configuration. Prints out configuration for graphs. Use as is. Not required to be overwritten in child classes. The plugin will work correctly as long as the Munin Graph objects have been populated. """
for parent_name in self._graphNames: graph = self._graphDict[parent_name] if self.isMultigraph: print "multigraph %s" % self._getMultigraphID(parent_name) print self._formatConfig(graph.getConfig()) print if (self.isMultigraph and self._nestedGraphs and self._subgraphDict and self._subgraphNames): for (parent_name, subgraph_names) in self._subgraphNames.iteritems(): for graph_name in subgraph_names: graph = self._subgraphDict[parent_name][graph_name] print "multigraph %s" % self.getMultigraphID(parent_name, graph_name) print self._formatConfig(graph.getConfig()) print return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def fetch(self): """Implements Munin Plugin Fetch Option. Prints out measured values. """
self.retrieveVals() for parent_name in self._graphNames: graph = self._graphDict[parent_name] if self.isMultigraph: print "multigraph %s" % self._getMultigraphID(parent_name) print self._formatVals(graph.getVals()) print if (self.isMultigraph and self._nestedGraphs and self._subgraphDict and self._subgraphNames): for (parent_name, subgraph_names) in self._subgraphNames.iteritems(): for graph_name in subgraph_names: graph = self._subgraphDict[parent_name][graph_name] print "multigraph %s" % self.getMultigraphID(parent_name, graph_name) print self._formatVals(graph.getVals()) print return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def run(self): """Implements main entry point for plugin execution."""
if len(self._argv) > 1 and len(self._argv[1]) > 0: oper = self._argv[1] else: oper = 'fetch' if oper == 'fetch': ret = self.fetch() elif oper == 'config': ret = self.config() if ret and self._dirtyConfig: ret = self.fetch() elif oper == 'autoconf': ret = self.autoconf() if ret: print "yes" else: print "no" ret = True elif oper == 'suggest': ret = self.suggest() else: raise AttributeError("Invalid command argument: %s" % oper) return ret
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def addField(self, name, label, type=None, draw=None, info=None, #@ReservedAssignment extinfo=None, colour=None, negative=None, graph=None, min=None, max=None, cdef=None, line=None, #@ReservedAssignment warning=None, critical=None): """Add field to Munin Graph @param name: Field Name @param label: Field Label @param type: Stat Type: 'COUNTER' / 'ABSOLUTE' / 'DERIVE' / 'GAUGE' @param draw: Graph Type: 'AREA' / 'LINE{1,2,3}' / 'STACK' / 'LINESTACK{1,2,3}' / 'AREASTACK' @param info: Detailed Field Info @param extinfo: Extended Field Info @param colour: Field Colour @param negative: Mirror Value @param graph: Draw on Graph - True / False (Default: True) @param min: Minimum Valid Value @param max: Maximum Valid Value @param cdef: CDEF @param line: Adds horizontal line at value defined for field. @param warning: Warning Value @param critical: Critical Value """
if self._autoFixNames: name = self._fixName(name) if negative is not None: negative = self._fixName(negative) self._fieldAttrDict[name] = dict(((k,v) for (k,v) in locals().iteritems() if (v is not None and k not in ('self',)))) self._fieldNameList.append(name)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def hasField(self, name): """Returns true if field with field_name exists. @param name: Field Name @return: Boolean """
if self._autoFixNames: name = self._fixName(name) return self._fieldAttrDict.has_key(name)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def getConfig(self): """Returns dictionary of config entries for Munin Graph. @return: Dictionary of config entries. """
return {'graph': self._graphAttrDict, 'fields': [(field_name, self._fieldAttrDict.get(field_name)) for field_name in self._fieldNameList]}
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def setVal(self, name, val): """Set value for field in graph. @param name : Graph Name @param value : Value for field. """
if self._autoFixNames: name = self._fixName(name) self._fieldValDict[name] = val
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def getVals(self): """Returns value list for Munin Graph @return: List of name-value pairs. """
return [(name, self._fieldValDict.get(name)) for name in self._fieldNameList]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def initStats(self): """Query and parse Nginx Web Server Status Page."""
url = "%s://%s:%d/%s" % (self._proto, self._host, self._port, self._statuspath) response = util.get_url(url, self._user, self._password) self._statusDict = {} for line in response.splitlines(): mobj = re.match('\s*(\d+)\s+(\d+)\s+(\d+)\s*$', line) if mobj: idx = 0 for key in ('accepts','handled','requests'): idx += 1 self._statusDict[key] = util.parse_value(mobj.group(idx)) else: for (key,val) in re.findall('(\w+):\s*(\d+)', line): self._statusDict[key.lower()] = util.parse_value(val)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def getIfStats(self): """Return dictionary of Traffic Stats for Network Interfaces. @return: Nested dictionary of statistics for each interface. """
info_dict = {} try: fp = open(ifaceStatsFile, 'r') data = fp.read() fp.close() except: raise IOError('Failed reading interface stats from file: %s' % ifaceStatsFile) for line in data.splitlines(): mobj = re.match('^\s*([\w\d:]+):\s*(.*\S)\s*$', line) if mobj: iface = mobj.group(1) statline = mobj.group(2) info_dict[iface] = dict(zip( ('rxbytes', 'rxpackets', 'rxerrs', 'rxdrop', 'rxfifo', 'rxframe', 'rxcompressed', 'rxmulticast', 'txbytes', 'txpackets', 'txerrs', 'txdrop', 'txfifo', 'txcolls', 'txcarrier', 'txcompressed'), [int(x) for x in statline.split()])) return info_dict
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def getRoutes(self): """Get routing table. @return: List of routes. """
routes = [] try: out = subprocess.Popen([routeCmd, "-n"], stdout=subprocess.PIPE).communicate()[0] except: raise Exception('Execution of command %s failed.' % ipCmd) lines = out.splitlines() if len(lines) > 1: headers = [col.lower() for col in lines[1].split()] for line in lines[2:]: routes.append(dict(zip(headers, line.split()))) return routes
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def getTCPportConnStatus(self, ipv4=True, ipv6=True, include_listen=False, **kwargs): """Returns the number of TCP endpoints discriminated by status. @param ipv4: Include IPv4 ports in output if True. @param ipv6: Include IPv6 ports in output if True. @param include_listen: Include listening ports in output if True. @param **kwargs: Keyword variables are used for filtering the results depending on the values of the columns. Each keyword must correspond to a field name with an optional suffix: field: Field equal to value or in list of values. field_ic: Field equal to value or in list of values, using case insensitive comparison. field_regex: Field matches regex value or matches with any regex in list of values. field_ic_regex: Field matches regex value or matches with any regex in list of values using case insensitive match. @return: Dictionary mapping connection status to the number of endpoints. """
status_dict = {} result = self.getStats(tcp=True, udp=False, include_listen=include_listen, ipv4=ipv4, ipv6=ipv6, **kwargs) stats = result['stats'] for stat in stats: if stat is not None: status = stat[8].lower() status_dict[status] = status_dict.get(status, 0) + 1 return status_dict
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def getTCPportConnCount(self, ipv4=True, ipv6=True, resolve_ports=False, **kwargs): """Returns TCP connection counts for each local port. @param ipv4: Include IPv4 ports in output if True. @param ipv6: Include IPv6 ports in output if True. @param resolve_ports: Resolve numeric ports to names if True. @param **kwargs: Keyword variables are used for filtering the results depending on the values of the columns. Each keyword must correspond to a field name with an optional suffix: field: Field equal to value or in list of values. field_ic: Field equal to value or in list of values, using case insensitive comparison. field_regex: Field matches regex value or matches with any regex in list of values. field_ic_regex: Field matches regex value or matches with any regex in list of values using case insensitive match. @return: Dictionary mapping port number or name to the number of established connections. """
port_dict = {} result = self.getStats(tcp=True, udp=False, include_listen=False, ipv4=ipv4, ipv6=ipv6, resolve_ports=resolve_ports, **kwargs) stats = result['stats'] for stat in stats: if stat[8] == 'ESTABLISHED': port_dict[stat[5]] = port_dict.get(5, 0) + 1 return port_dict