idx
int64 0
63k
| question
stringlengths 61
4.03k
| target
stringlengths 6
1.23k
|
---|---|---|
1,000 |
def scan_to_table ( input_table , genome , scoring , pwmfile = None , ncpus = None ) : config = MotifConfig ( ) if pwmfile is None : pwmfile = config . get_default_params ( ) . get ( "motif_db" , None ) if pwmfile is not None : pwmfile = os . path . join ( config . get_motif_dir ( ) , pwmfile ) if pwmfile is None : raise ValueError ( "no pwmfile given and no default database specified" ) logger . info ( "reading table" ) if input_table . endswith ( "feather" ) : df = pd . read_feather ( input_table ) idx = df . iloc [ : , 0 ] . values else : df = pd . read_table ( input_table , index_col = 0 , comment = "#" ) idx = df . index regions = list ( idx ) s = Scanner ( ncpus = ncpus ) s . set_motifs ( pwmfile ) s . set_genome ( genome ) s . set_background ( genome = genome ) nregions = len ( regions ) scores = [ ] if scoring == "count" : logger . info ( "setting threshold" ) s . set_threshold ( fpr = FPR ) logger . info ( "creating count table" ) for row in s . count ( regions ) : scores . append ( row ) logger . info ( "done" ) else : s . set_threshold ( threshold = 0.0 ) logger . info ( "creating score table" ) for row in s . best_score ( regions , normalize = True ) : scores . append ( row ) logger . info ( "done" ) motif_names = [ m . id for m in read_motifs ( pwmfile ) ] logger . info ( "creating dataframe" ) return pd . DataFrame ( scores , index = idx , columns = motif_names )
|
Scan regions in input table with motifs .
|
1,001 |
def get_args ( parser ) : args = vars ( parser . parse_args ( ) ) . items ( ) return { key : val for key , val in args if not isinstance ( val , NotSet ) }
|
Converts arguments extracted from a parser to a dict and will dismiss arguments which default to NOT_SET .
|
1,002 |
def socket_read ( fp ) : response = '' oldlen = 0 newlen = 0 while True : response += fp . read ( buffSize ) newlen = len ( response ) if newlen - oldlen == 0 : break else : oldlen = newlen return response
|
Buffered read from socket . Reads all data available from socket .
|
1,003 |
def exec_command ( args , env = None ) : try : cmd = subprocess . Popen ( args , stdout = subprocess . PIPE , stderr = subprocess . PIPE , bufsize = buffSize , env = env ) except OSError , e : raise Exception ( "Execution of command failed.\n" , " Command: %s\n Error: %s" % ( ' ' . join ( args ) , str ( e ) ) ) out , err = cmd . communicate ( None ) if cmd . returncode != 0 : raise Exception ( "Execution of command failed with error code: %s\n%s\n" % ( cmd . returncode , err ) ) return out
|
Convenience function that executes command and returns result .
|
1,004 |
def registerFilter ( self , column , patterns , is_regex = False , ignore_case = False ) : if isinstance ( patterns , basestring ) : patt_list = ( patterns , ) elif isinstance ( patterns , ( tuple , list ) ) : patt_list = list ( patterns ) else : raise ValueError ( "The patterns parameter must either be as string " "or a tuple / list of strings." ) if is_regex : if ignore_case : flags = re . IGNORECASE else : flags = 0 patt_exprs = [ re . compile ( pattern , flags ) for pattern in patt_list ] else : if ignore_case : patt_exprs = [ pattern . lower ( ) for pattern in patt_list ] else : patt_exprs = patt_list self . _filters [ column ] = ( patt_exprs , is_regex , ignore_case )
|
Register filter on a column of table .
|
1,005 |
def unregisterFilter ( self , column ) : if self . _filters . has_key ( column ) : del self . _filters [ column ]
|
Unregister filter on a column of the table .
|
1,006 |
def registerFilters ( self , ** kwargs ) : for ( key , patterns ) in kwargs . items ( ) : if key . endswith ( '_regex' ) : col = key [ : - len ( '_regex' ) ] is_regex = True else : col = key is_regex = False if col . endswith ( '_ic' ) : col = col [ : - len ( '_ic' ) ] ignore_case = True else : ignore_case = False self . registerFilter ( col , patterns , is_regex , ignore_case )
|
Register multiple filters at once .
|
1,007 |
def applyFilters ( self , headers , table ) : result = [ ] column_idxs = { } for column in self . _filters . keys ( ) : try : column_idxs [ column ] = headers . index ( column ) except ValueError : raise ValueError ( 'Invalid column name %s in filter.' % column ) for row in table : for ( column , ( patterns , is_regex , ignore_case ) ) in self . _filters . items ( ) : col_idx = column_idxs [ column ] col_val = row [ col_idx ] if is_regex : for pattern in patterns : if pattern . search ( col_val ) : break else : break else : if ignore_case : col_val = col_val . lower ( ) if col_val in patterns : pass else : break else : result . append ( row ) return result
|
Apply filter on ps command result .
|
1,008 |
def function ( data , maxt = None ) : data = np . atleast_1d ( data ) assert len ( np . shape ( data ) ) == 1 , "The autocorrelation function can only by computed " + "on a 1D time series." if maxt is None : maxt = len ( data ) result = np . zeros ( maxt , dtype = float ) _acor . function ( np . array ( data , dtype = float ) , result ) return result / result [ 0 ]
|
Calculate the autocorrelation function for a 1D time series .
|
1,009 |
def getDesc ( self , entry ) : if len ( self . _descDict ) == 0 : self . getStats ( ) return self . _descDict . get ( entry )
|
Returns description for stat entry .
|
1,010 |
def fingerprint_helper ( egg , permute = False , n_perms = 1000 , match = 'exact' , distance = 'euclidean' , features = None ) : if features is None : features = egg . dist_funcs . keys ( ) inds = egg . pres . index . tolist ( ) slices = [ egg . crack ( subjects = [ i ] , lists = [ j ] ) for i , j in inds ] weights = _get_weights ( slices , features , distdict , permute , n_perms , match , distance ) return np . nanmean ( weights , axis = 0 )
|
Computes clustering along a set of feature dimensions
|
1,011 |
def get ( key , default = None ) : try : return ast . literal_eval ( os . environ . get ( key . upper ( ) , default ) ) except ( ValueError , SyntaxError ) : return os . environ . get ( key . upper ( ) , default )
|
Searches os . environ . If a key is found try evaluating its type else ; return the string .
|
1,012 |
def save ( filepath = None , ** kwargs ) : if filepath is None : filepath = os . path . join ( '.env' ) with open ( filepath , 'wb' ) as file_handle : file_handle . writelines ( '{0}={1}\n' . format ( key . upper ( ) , val ) for key , val in kwargs . items ( ) )
|
Saves a list of keyword arguments as environment variables to a file . If no filepath given will default to the default . env file .
|
1,013 |
def load ( filepath = None ) : if filepath and os . path . exists ( filepath ) : pass else : if not os . path . exists ( '.env' ) : return False filepath = os . path . join ( '.env' ) for key , value in _get_line_ ( filepath ) : os . environ . setdefault ( key , str ( value ) ) return True
|
Reads a . env file into os . environ .
|
1,014 |
def initStats ( self ) : url = "%s://%s:%d/%s?auto" % ( self . _proto , self . _host , self . _port , self . _statuspath ) response = util . get_url ( url , self . _user , self . _password ) self . _statusDict = { } for line in response . splitlines ( ) : mobj = re . match ( '(\S.*\S)\s*:\s*(\S+)\s*$' , line ) if mobj : self . _statusDict [ mobj . group ( 1 ) ] = util . parse_value ( mobj . group ( 2 ) ) if self . _statusDict . has_key ( 'Scoreboard' ) : self . _statusDict [ 'MaxWorkers' ] = len ( self . _statusDict [ 'Scoreboard' ] )
|
Query and parse Apache Web Server Status Page .
|
1,015 |
def get_pres_features ( self , features = None ) : if features is None : features = self . dist_funcs . keys ( ) elif not isinstance ( features , list ) : features = [ features ] return self . pres . applymap ( lambda x : { k : v for k , v in x . items ( ) if k in features } if x is not None else None )
|
Returns a df of features for presented items
|
1,016 |
def get_rec_features ( self , features = None ) : if features is None : features = self . dist_funcs . keys ( ) elif not isinstance ( features , list ) : features = [ features ] return self . rec . applymap ( lambda x : { k : v for k , v in x . items ( ) if k != 'item' } if x is not None else None )
|
Returns a df of features for recalled items
|
1,017 |
def info ( self ) : print ( 'Number of subjects: ' + str ( self . n_subjects ) ) print ( 'Number of lists per subject: ' + str ( self . n_lists ) ) print ( 'Number of words per list: ' + str ( self . list_length ) ) print ( 'Date created: ' + str ( self . date_created ) ) print ( 'Meta data: ' + str ( self . meta ) )
|
Print info about the data egg
|
1,018 |
def save ( self , fname , compression = 'blosc' ) : egg = { 'pres' : df2list ( self . pres ) , 'rec' : df2list ( self . rec ) , 'dist_funcs' : self . dist_funcs , 'subjgroup' : self . subjgroup , 'subjname' : self . subjname , 'listgroup' : self . listgroup , 'listname' : self . listname , 'date_created' : self . date_created , 'meta' : self . meta } if fname [ - 4 : ] != '.egg' : fname += '.egg' with warnings . catch_warnings ( ) : warnings . simplefilter ( "ignore" ) dd . io . save ( fname , egg , compression = compression )
|
Save method for the Egg object
|
1,019 |
def save ( self , fname , compression = 'blosc' ) : egg = { 'data' : self . data , 'analysis' : self . analysis , 'list_length' : self . list_length , 'n_lists' : self . n_lists , 'n_subjects' : self . n_subjects , 'position' : self . position , 'date_created' : self . date_created , 'meta' : self . meta } if fname [ - 4 : ] != '.fegg' : fname += '.fegg' with warnings . catch_warnings ( ) : warnings . simplefilter ( "ignore" ) dd . io . save ( fname , egg , compression = compression )
|
Save method for the FriedEgg object
|
1,020 |
def free_symbolic ( self ) : if self . _symbolic is not None : self . funs . free_symbolic ( self . _symbolic ) self . _symbolic = None self . mtx = None
|
Free symbolic data
|
1,021 |
def free_numeric ( self ) : if self . _numeric is not None : self . funs . free_numeric ( self . _numeric ) self . _numeric = None self . free_symbolic ( )
|
Free numeric data
|
1,022 |
def solve ( self , sys , mtx , rhs , autoTranspose = False ) : if sys not in umfSys : raise ValueError ( 'sys must be in' % umfSys ) if autoTranspose and self . isCSR : if self . family in umfRealTypes : ii = 0 else : ii = 1 if sys in umfSys_transposeMap [ ii ] : sys = umfSys_transposeMap [ ii ] [ sys ] else : raise RuntimeError ( 'autoTranspose ambiguous, switch it off' ) if self . _numeric is not None : if self . mtx is not mtx : raise ValueError ( 'must be called with same matrix as numeric()' ) else : raise RuntimeError ( 'numeric() not called' ) indx = self . _getIndx ( mtx ) if self . isReal : rhs = rhs . astype ( np . float64 ) sol = np . zeros ( ( mtx . shape [ 1 ] , ) , dtype = np . float64 ) status = self . funs . solve ( sys , mtx . indptr , indx , mtx . data , sol , rhs , self . _numeric , self . control , self . info ) else : rhs = rhs . astype ( np . complex128 ) sol = np . zeros ( ( mtx . shape [ 1 ] , ) , dtype = np . complex128 ) mreal , mimag = mtx . data . real . copy ( ) , mtx . data . imag . copy ( ) sreal , simag = sol . real . copy ( ) , sol . imag . copy ( ) rreal , rimag = rhs . real . copy ( ) , rhs . imag . copy ( ) status = self . funs . solve ( sys , mtx . indptr , indx , mreal , mimag , sreal , simag , rreal , rimag , self . _numeric , self . control , self . info ) sol . real , sol . imag = sreal , simag if status != UMFPACK_OK : if status == UMFPACK_WARNING_singular_matrix : warnings . warn ( 'Zeroing nan and inf entries...' , UmfpackWarning ) sol [ ~ np . isfinite ( sol ) ] = 0.0 else : raise RuntimeError ( '%s failed with %s' % ( self . funs . solve , umfStatus [ status ] ) ) econd = 1.0 / self . info [ UMFPACK_RCOND ] if econd > self . maxCond : msg = '(almost) singular matrix! ' + '(estimated cond. number: %.2e)' % econd warnings . warn ( msg , UmfpackWarning ) return sol
|
Solution of system of linear equation using the Numeric object .
|
1,023 |
def linsolve ( self , sys , mtx , rhs , autoTranspose = False ) : if sys not in umfSys : raise ValueError ( 'sys must be in' % umfSys ) if self . _numeric is None : self . numeric ( mtx ) else : if self . mtx is not mtx : self . numeric ( mtx ) sol = self . solve ( sys , mtx , rhs , autoTranspose ) self . free_numeric ( ) return sol
|
One - shot solution of system of linear equation . Reuses Numeric object if possible .
|
1,024 |
def stick_perm ( presenter , egg , dist_dict , strategy ) : np . random . seed ( ) egg_pres , egg_rec , egg_features , egg_dist_funcs = parse_egg ( egg ) regg = order_stick ( presenter , egg , dist_dict , strategy ) regg_pres , regg_rec , regg_features , regg_dist_funcs = parse_egg ( regg ) regg_pres = list ( regg_pres ) egg_pres = list ( egg_pres ) idx = [ egg_pres . index ( r ) for r in regg_pres ] weights = compute_feature_weights_dict ( list ( regg_pres ) , list ( regg_pres ) , list ( regg_features ) , dist_dict ) orders = idx return weights , orders
|
Computes weights for one reordering using stick - breaking method
|
1,025 |
def compute_distances_dict ( egg ) : pres , rec , features , dist_funcs = parse_egg ( egg ) pres_list = list ( pres ) features_list = list ( features ) distances = { } for idx1 , item1 in enumerate ( pres_list ) : distances [ item1 ] = { } for idx2 , item2 in enumerate ( pres_list ) : distances [ item1 ] [ item2 ] = { } for feature in dist_funcs : distances [ item1 ] [ item2 ] [ feature ] = builtin_dist_funcs [ dist_funcs [ feature ] ] ( features_list [ idx1 ] [ feature ] , features_list [ idx2 ] [ feature ] ) return distances
|
Creates a nested dict of distances
|
1,026 |
def update ( self , egg , permute = False , nperms = 1000 , parallel = False ) : self . n += 1 next_weights = np . nanmean ( _analyze_chunk ( egg , analysis = fingerprint_helper , analysis_type = 'fingerprint' , pass_features = True , permute = permute , n_perms = nperms , parallel = parallel ) . values , 0 ) if self . state is not None : c = self . state * self . n self . state = np . nansum ( np . array ( [ c , next_weights ] ) , axis = 0 ) / ( self . n + 1 ) else : self . state = next_weights self . history . append ( next_weights )
|
In - place method that updates fingerprint with new data
|
1,027 |
def _getDevMajorMinor ( self , devpath ) : fstat = os . stat ( devpath ) if stat . S_ISBLK ( fstat . st_mode ) : return ( os . major ( fstat . st_rdev ) , os . minor ( fstat . st_rdev ) ) else : raise ValueError ( "The file %s is not a valid block device." % devpath )
|
Return major and minor device number for block device path devpath .
|
1,028 |
def _getUniqueDev ( self , devpath ) : realpath = os . path . realpath ( devpath ) mobj = re . match ( '\/dev\/(.*)$' , realpath ) if mobj : dev = mobj . group ( 1 ) if dev in self . _diskStats : return dev else : try : ( major , minor ) = self . _getDevMajorMinor ( realpath ) except : return None return self . _mapMajorMinor2dev . get ( ( major , minor ) ) return None
|
Return unique device for any block device path .
|
1,029 |
def _initFilesystemInfo ( self ) : self . _mapFSpathDev = { } fsinfo = FilesystemInfo ( ) for fs in fsinfo . getFSlist ( ) : devpath = fsinfo . getFSdev ( fs ) dev = self . _getUniqueDev ( devpath ) if dev is not None : self . _mapFSpathDev [ fs ] = dev
|
Initialize filesystem to device mappings .
|
1,030 |
def _initSwapInfo ( self ) : self . _swapList = [ ] sysinfo = SystemInfo ( ) for ( swap , attrs ) in sysinfo . getSwapStats ( ) . iteritems ( ) : if attrs [ 'type' ] == 'partition' : dev = self . _getUniqueDev ( swap ) if dev is not None : self . _swapList . append ( dev )
|
Initialize swap partition to device mappings .
|
1,031 |
def _initDevClasses ( self ) : self . _devClassTree = { } self . _partitionTree = { } self . _mapDevType = { } basedevs = [ ] otherdevs = [ ] if self . _mapMajorDevclass is None : self . _initBlockMajorMap ( ) for dev in self . _diskStats : stats = self . _diskStats [ dev ] devclass = self . _mapMajorDevclass . get ( stats [ 'major' ] ) if devclass is not None : devdir = os . path . join ( sysfsBlockdevDir , dev ) if os . path . isdir ( devdir ) : if not self . _devClassTree . has_key ( devclass ) : self . _devClassTree [ devclass ] = [ ] self . _devClassTree [ devclass ] . append ( dev ) self . _mapDevType [ dev ] = devclass basedevs . append ( dev ) else : otherdevs . append ( dev ) basedevs . sort ( key = len , reverse = True ) otherdevs . sort ( key = len , reverse = True ) idx = 0 for partdev in otherdevs : while len ( basedevs [ idx ] ) > partdev : idx += 1 for dev in basedevs [ idx : ] : if re . match ( "%s(\d+|p\d+)$" % dev , partdev ) : if not self . _partitionTree . has_key ( dev ) : self . _partitionTree [ dev ] = [ ] self . _partitionTree [ dev ] . append ( partdev ) self . _mapDevType [ partdev ] = 'part'
|
Sort block devices into lists depending on device class and initialize device type map and partition map .
|
1,032 |
def getDevType ( self , dev ) : if self . _devClassTree is None : self . _initDevClasses ( ) return self . _mapDevType . get ( dev )
|
Returns type of device dev .
|
1,033 |
def getPartitionList ( self ) : if self . _partList is None : self . _partList = [ ] for ( disk , parts ) in self . getPartitionDict ( ) . iteritems ( ) : for part in parts : self . _partList . append ( ( disk , part ) ) return self . _partList
|
Returns list of partitions .
|
1,034 |
def getContainerStats ( self , limit = None , marker = None ) : stats = { } for row in self . _conn . list_containers_info ( limit , marker ) : stats [ row [ 'name' ] ] = { 'count' : row [ 'count' ] , 'size' : row [ 'bytes' ] } return stats
|
Returns Rackspace Cloud Files usage stats for containers .
|
1,035 |
def _connect ( self ) : if sys . version_info [ : 2 ] < ( 2 , 6 ) : self . _conn = httplib . HTTPConnection ( self . _host , self . _port ) else : self . _conn = httplib . HTTPConnection ( self . _host , self . _port , False , defaultTimeout )
|
Connect to Squid Proxy Manager interface .
|
1,036 |
def _retrieve ( self , map ) : self . _conn . request ( 'GET' , "cache_object://%s/%s" % ( self . _host , map ) , None , self . _httpHeaders ) rp = self . _conn . getresponse ( ) if rp . status == 200 : data = rp . read ( ) return data else : raise Exception ( "Retrieval of stats from Squid Proxy Server" "on host %s and port %s failed.\n" "HTTP - Status: %s Reason: %s" % ( self . _host , self . _port , rp . status , rp . reason ) )
|
Query Squid Proxy Server Manager Interface for stats .
|
1,037 |
def _parseCounters ( self , data ) : info_dict = util . NestedDict ( ) for line in data . splitlines ( ) : mobj = re . match ( '^\s*([\w\.]+)\s*=\s*(\S.*)$' , line ) if mobj : ( key , value ) = mobj . groups ( ) klist = key . split ( '.' ) info_dict . set_nested ( klist , parse_value ( value ) ) return info_dict
|
Parse simple stats list of key value pairs .
|
1,038 |
def _parseSections ( self , data ) : section_dict = { } lines = data . splitlines ( ) idx = 0 numlines = len ( lines ) section = None while idx < numlines : line = lines [ idx ] idx += 1 mobj = re . match ( '^(\w[\w\s\(\)]+[\w\)])\s*:$' , line ) if mobj : section = mobj . group ( 1 ) section_dict [ section ] = [ ] else : mobj = re . match ( '(\t|\s)\s*(\w.*)$' , line ) if mobj : section_dict [ section ] . append ( mobj . group ( 2 ) ) else : mobj = re . match ( '^(\w[\w\s\(\)]+[\w\)])\s*:\s*(\S.*)$' , line ) if mobj : section = None if not section_dict . has_key ( section ) : section_dict [ section ] = [ ] section_dict [ section ] . append ( line ) else : if not section_dict . has_key ( 'PARSEERROR' ) : section_dict [ 'PARSEERROR' ] = [ ] section_dict [ 'PARSEERROR' ] . append ( line ) return section_dict
|
Parse data and separate sections . Returns dictionary that maps section name to section data .
|
1,039 |
def getMenu ( self ) : data = self . _retrieve ( '' ) info_list = [ ] for line in data . splitlines ( ) : mobj = re . match ( '^\s*(\S.*\S)\s*\t\s*(\S.*\S)\s*\t\s*(\S.*\S)$' , line ) if mobj : info_list . append ( mobj . groups ( ) ) return info_list
|
Get manager interface section list from Squid Proxy Server
|
1,040 |
def getIfaceStats ( self ) : ifInfo = netiface . NetIfaceInfo ( ) ifStats = ifInfo . getIfStats ( ) info_dict = { } for ifname in ifStats : if re . match ( '^w\d+g\d+$' , ifname ) : info_dict [ ifname ] = ifStats [ ifname ] return info_dict
|
Return dictionary of Traffic Stats for each Wanpipe Interface .
|
1,041 |
def _connect ( self ) : try : self . _eslconn = ESL . ESLconnection ( self . _eslhost , str ( self . _eslport ) , self . _eslpass ) except : pass if not self . _eslconn . connected ( ) : raise Exception ( "Connection to FreeSWITCH ESL Interface on host %s and port %d failed." % ( self . _eslhost , self . _eslport ) )
|
Connect to FreeSWITCH ESL Interface .
|
1,042 |
def _execCmd ( self , cmd , args ) : output = self . _eslconn . api ( cmd , args ) if output : body = output . getBody ( ) if body : return body . splitlines ( ) return None
|
Execute command and return result body as list of lines .
|
1,043 |
def ping ( self ) : start = time . time ( ) self . _conn . ping ( ) return ( time . time ( ) - start )
|
Ping Redis Server and return Round - Trip - Time in seconds .
|
1,044 |
def list2pd ( all_data , subjindex = None , listindex = None ) : listindex = [ [ idx for idx in range ( len ( sub ) ) ] for sub in all_data ] if not listindex else listindex subjindex = [ idx for idx , subj in enumerate ( all_data ) ] if not subjindex else subjindex def make_multi_index ( listindex , sub_num ) : return pd . MultiIndex . from_tuples ( [ ( sub_num , lst ) for lst in listindex ] , names = [ 'Subject' , 'List' ] ) listindex = list ( listindex ) subjindex = list ( subjindex ) subs_list_of_dfs = [ pd . DataFrame ( sub_data , index = make_multi_index ( listindex [ sub_num ] , subjindex [ sub_num ] ) ) for sub_num , sub_data in enumerate ( all_data ) ] return pd . concat ( subs_list_of_dfs )
|
Makes multi - indexed dataframe of subject data
|
1,045 |
def recmat2egg ( recmat , list_length = None ) : from . egg import Egg as Egg pres = [ [ [ str ( word ) for word in list ( range ( 0 , list_length ) ) ] for reclist in recsub ] for recsub in recmat ] rec = [ [ [ str ( word ) for word in reclist if word is not None ] for reclist in recsub ] for recsub in recmat ] return Egg ( pres = pres , rec = rec )
|
Creates egg data object from zero - indexed recall matrix
|
1,046 |
def default_dist_funcs ( dist_funcs , feature_example ) : if dist_funcs is None : dist_funcs = dict ( ) for key in feature_example : if key in dist_funcs : pass if key == 'item' : pass elif isinstance ( feature_example [ key ] , ( six . string_types , six . binary_type ) ) : dist_funcs [ key ] = 'match' elif isinstance ( feature_example [ key ] , ( int , np . integer , float ) ) or all ( [ isinstance ( i , ( int , np . integer , float ) ) for i in feature_example [ key ] ] ) : dist_funcs [ key ] = 'euclidean' return dist_funcs
|
Fills in default distance metrics for fingerprint analyses
|
1,047 |
def stack_eggs ( eggs , meta = 'concatenate' ) : from . egg import Egg pres = [ egg . pres . loc [ sub , : ] . values . tolist ( ) for egg in eggs for sub in egg . pres . index . levels [ 0 ] . values . tolist ( ) ] rec = [ egg . rec . loc [ sub , : ] . values . tolist ( ) for egg in eggs for sub in egg . rec . index . levels [ 0 ] . values . tolist ( ) ] if meta is 'concatenate' : new_meta = { } for egg in eggs : for key in egg . meta : if key in new_meta : new_meta [ key ] = list ( new_meta [ key ] ) new_meta [ key ] . extend ( egg . meta . get ( key ) ) else : new_meta [ key ] = egg . meta . get ( key ) elif meta is 'separate' : new_meta = list ( egg . meta for egg in eggs ) return Egg ( pres = pres , rec = rec , meta = new_meta )
|
Takes a list of eggs stacks them and reindexes the subject number
|
1,048 |
def crack_egg ( egg , subjects = None , lists = None ) : from . egg import Egg if hasattr ( egg , 'features' ) : all_have_features = egg . features is not None else : all_have_features = False opts = { } if subjects is None : subjects = egg . pres . index . levels [ 0 ] . values . tolist ( ) elif type ( subjects ) is not list : subjects = [ subjects ] if lists is None : lists = egg . pres . index . levels [ 1 ] . values . tolist ( ) elif type ( lists ) is not list : lists = [ lists ] idx = pd . IndexSlice pres = egg . pres . loc [ idx [ subjects , lists ] , egg . pres . columns ] rec = egg . rec . loc [ idx [ subjects , lists ] , egg . rec . columns ] pres = [ pres . loc [ sub , : ] . values . tolist ( ) for sub in subjects ] rec = [ rec . loc [ sub , : ] . values . tolist ( ) for sub in subjects ] if all_have_features : features = egg . features . loc [ idx [ subjects , lists ] , egg . features . columns ] opts [ 'features' ] = [ features . loc [ sub , : ] . values . tolist ( ) for sub in subjects ] return Egg ( pres = pres , rec = rec , ** opts )
|
Takes an egg and returns a subset of the subjects or lists
|
1,049 |
def df2list ( df ) : subjects = df . index . levels [ 0 ] . values . tolist ( ) lists = df . index . levels [ 1 ] . values . tolist ( ) idx = pd . IndexSlice df = df . loc [ idx [ subjects , lists ] , df . columns ] lst = [ df . loc [ sub , : ] . values . tolist ( ) for sub in subjects ] return lst
|
Convert a MultiIndex df to list
|
1,050 |
def parse_egg ( egg ) : pres_list = egg . get_pres_items ( ) . values [ 0 ] rec_list = egg . get_rec_items ( ) . values [ 0 ] feature_list = egg . get_pres_features ( ) . values [ 0 ] dist_funcs = egg . dist_funcs return pres_list , rec_list , feature_list , dist_funcs
|
Parses an egg and returns fields
|
1,051 |
def merge_pres_feats ( pres , features ) : sub = [ ] for psub , fsub in zip ( pres , features ) : exp = [ ] for pexp , fexp in zip ( psub , fsub ) : lst = [ ] for p , f in zip ( pexp , fexp ) : p . update ( f ) lst . append ( p ) exp . append ( lst ) sub . append ( exp ) return sub
|
Helper function to merge pres and features to support legacy features argument
|
1,052 |
def r2z ( r ) : with np . errstate ( invalid = 'ignore' , divide = 'ignore' ) : return 0.5 * ( np . log ( 1 + r ) - np . log ( 1 - r ) )
|
Function that calculates the Fisher z - transformation
|
1,053 |
def z2r ( z ) : with np . errstate ( invalid = 'ignore' , divide = 'ignore' ) : return ( np . exp ( 2 * z ) - 1 ) / ( np . exp ( 2 * z ) + 1 )
|
Function that calculates the inverse Fisher z - transformation
|
1,054 |
def shuffle_egg ( egg ) : from . egg import Egg pres , rec , features , dist_funcs = parse_egg ( egg ) if pres . ndim == 1 : pres = pres . reshape ( 1 , pres . shape [ 0 ] ) rec = rec . reshape ( 1 , rec . shape [ 0 ] ) features = features . reshape ( 1 , features . shape [ 0 ] ) for ilist in range ( rec . shape [ 0 ] ) : idx = np . random . permutation ( rec . shape [ 1 ] ) rec [ ilist , : ] = rec [ ilist , idx ] return Egg ( pres = pres , rec = rec , features = features , dist_funcs = dist_funcs )
|
Shuffle an Egg s recalls
|
1,055 |
def getUptime ( self ) : try : fp = open ( uptimeFile , 'r' ) line = fp . readline ( ) fp . close ( ) except : raise IOError ( 'Failed reading stats from file: %s' % uptimeFile ) return float ( line . split ( ) [ 0 ] )
|
Return system uptime in seconds .
|
1,056 |
def getLoadAvg ( self ) : try : fp = open ( loadavgFile , 'r' ) line = fp . readline ( ) fp . close ( ) except : raise IOError ( 'Failed reading stats from file: %s' % loadavgFile ) arr = line . split ( ) if len ( arr ) >= 3 : return [ float ( col ) for col in arr [ : 3 ] ] else : return None
|
Return system Load Average .
|
1,057 |
def getCPUuse ( self ) : hz = os . sysconf ( 'SC_CLK_TCK' ) info_dict = { } try : fp = open ( cpustatFile , 'r' ) line = fp . readline ( ) fp . close ( ) except : raise IOError ( 'Failed reading stats from file: %s' % cpustatFile ) headers = [ 'user' , 'nice' , 'system' , 'idle' , 'iowait' , 'irq' , 'softirq' , 'steal' , 'guest' ] arr = line . split ( ) if len ( arr ) > 1 and arr [ 0 ] == 'cpu' : return dict ( zip ( headers [ 0 : len ( arr ) ] , [ ( float ( t ) / hz ) for t in arr [ 1 : ] ] ) ) return info_dict
|
Return cpu time utilization in seconds .
|
1,058 |
def getProcessStats ( self ) : info_dict = { } try : fp = open ( cpustatFile , 'r' ) data = fp . read ( ) fp . close ( ) except : raise IOError ( 'Failed reading stats from file: %s' % cpustatFile ) for line in data . splitlines ( ) : arr = line . split ( ) if len ( arr ) > 1 and arr [ 0 ] in ( 'ctxt' , 'intr' , 'softirq' , 'processes' , 'procs_running' , 'procs_blocked' ) : info_dict [ arr [ 0 ] ] = arr [ 1 ] return info_dict
|
Return stats for running and blocked processes forks context switches and interrupts .
|
1,059 |
def getMemoryUse ( self ) : info_dict = { } try : fp = open ( meminfoFile , 'r' ) data = fp . read ( ) fp . close ( ) except : raise IOError ( 'Failed reading stats from file: %s' % meminfoFile ) for line in data . splitlines ( ) : mobj = re . match ( '^(.+):\s*(\d+)\s*(\w+|)\s*$' , line ) if mobj : if mobj . group ( 3 ) . lower ( ) == 'kb' : mult = 1024 else : mult = 1 info_dict [ mobj . group ( 1 ) ] = int ( mobj . group ( 2 ) ) * mult return info_dict
|
Return stats for memory utilization .
|
1,060 |
def getVMstats ( self ) : info_dict = { } try : fp = open ( vmstatFile , 'r' ) data = fp . read ( ) fp . close ( ) except : raise IOError ( 'Failed reading stats from file: %s' % vmstatFile ) for line in data . splitlines ( ) : cols = line . split ( ) if len ( cols ) == 2 : info_dict [ cols [ 0 ] ] = cols [ 1 ] return info_dict
|
Return stats for Virtual Memory Subsystem .
|
1,061 |
def _connect ( self ) : if self . _socketFile is not None : if not os . path . exists ( self . _socketFile ) : raise Exception ( "Socket file (%s) for Memcached Instance not found." % self . _socketFile ) try : if self . _timeout is not None : self . _conn = util . Telnet ( self . _host , self . _port , self . _socketFile , timeout ) else : self . _conn = util . Telnet ( self . _host , self . _port , self . _socketFile ) except : raise Exception ( "Connection to %s failed." % self . _instanceName )
|
Connect to Memcached .
|
1,062 |
def _sendStatCmd ( self , cmd ) : try : self . _conn . write ( "%s\r\n" % cmd ) regex = re . compile ( '^(END|ERROR)\r\n' , re . MULTILINE ) ( idx , mobj , text ) = self . _conn . expect ( [ regex , ] , self . _timeout ) except : raise Exception ( "Communication with %s failed" % self . _instanceName ) if mobj is not None : if mobj . group ( 1 ) == 'END' : return text . splitlines ( ) [ : - 1 ] elif mobj . group ( 1 ) == 'ERROR' : raise Exception ( "Protocol error in communication with %s." % self . _instanceName ) else : raise Exception ( "Connection with %s timed out." % self . _instanceName )
|
Send stat command to Memcached Server and return response lines .
|
1,063 |
def _parseStats ( self , lines , parse_slabs = False ) : info_dict = { } info_dict [ 'slabs' ] = { } for line in lines : mobj = re . match ( '^STAT\s(\w+)\s(\S+)$' , line ) if mobj : info_dict [ mobj . group ( 1 ) ] = util . parse_value ( mobj . group ( 2 ) , True ) continue elif parse_slabs : mobj = re . match ( 'STAT\s(\w+:)?(\d+):(\w+)\s(\S+)$' , line ) if mobj : ( slab , key , val ) = mobj . groups ( ) [ - 3 : ] if not info_dict [ 'slabs' ] . has_key ( slab ) : info_dict [ 'slabs' ] [ slab ] = { } info_dict [ 'slabs' ] [ slab ] [ key ] = util . parse_value ( val , True ) return info_dict
|
Parse stats output from memcached and return dictionary of stats -
|
1,064 |
def correlation ( a , b ) : "Returns correlation distance between a and b" if isinstance ( a , list ) : a = np . array ( a ) if isinstance ( b , list ) : b = np . array ( b ) a = a . reshape ( 1 , - 1 ) b = b . reshape ( 1 , - 1 ) return cdist ( a , b , 'correlation' )
|
Returns correlation distance between a and b
|
1,065 |
def euclidean ( a , b ) : "Returns euclidean distance between a and b" return np . linalg . norm ( np . subtract ( a , b ) )
|
Returns euclidean distance between a and b
|
1,066 |
def parseProcCmd ( self , fields = ( 'pid' , 'user' , 'cmd' , ) , threads = False ) : args = [ ] headers = [ f . lower ( ) for f in fields ] args . append ( '--no-headers' ) args . append ( '-e' ) if threads : args . append ( '-T' ) field_ranges = [ ] fmt_strs = [ ] start = 0 for header in headers : field_width = psFieldWidth . get ( header , psDefaultFieldWidth ) fmt_strs . append ( '%s:%d' % ( header , field_width ) ) end = start + field_width + 1 field_ranges . append ( ( start , end ) ) start = end args . append ( '-o' ) args . append ( ',' . join ( fmt_strs ) ) lines = self . execProcCmd ( * args ) if len ( lines ) > 0 : stats = [ ] for line in lines : cols = [ ] for ( start , end ) in field_ranges : cols . append ( line [ start : end ] . strip ( ) ) stats . append ( cols ) return { 'headers' : headers , 'stats' : stats } else : return None
|
Execute ps command with custom output format with columns from fields and return result as a nested list . The Standard Format Specifiers from ps man page must be used for the fields parameter .
|
1,067 |
def getProcList ( self , fields = ( 'pid' , 'user' , 'cmd' , ) , threads = False , ** kwargs ) : field_list = list ( fields ) for key in kwargs : col = re . sub ( '(_ic)?(_regex)?$' , '' , key ) if not col in field_list : field_list . append ( col ) pinfo = self . parseProcCmd ( field_list , threads ) if pinfo : if len ( kwargs ) > 0 : pfilter = util . TableFilter ( ) pfilter . registerFilters ( ** kwargs ) stats = pfilter . applyFilters ( pinfo [ 'headers' ] , pinfo [ 'stats' ] ) return { 'headers' : pinfo [ 'headers' ] , 'stats' : stats } else : return pinfo else : return None
|
Execute ps command with custom output format with columns columns from fields select lines using the filters defined by kwargs and return result as a nested list . The Standard Format Specifiers from ps man page must be used for the fields parameter .
|
1,068 |
def getProcDict ( self , fields = ( 'user' , 'cmd' , ) , threads = False , ** kwargs ) : stats = { } field_list = list ( fields ) num_cols = len ( field_list ) if threads : key = 'spid' else : key = 'pid' try : key_idx = field_list . index ( key ) except ValueError : field_list . append ( key ) key_idx = len ( field_list ) - 1 result = self . getProcList ( field_list , threads , ** kwargs ) if result is not None : headers = result [ 'headers' ] [ : num_cols ] lines = result [ 'stats' ] if len ( lines ) > 1 : for cols in lines : stats [ cols [ key_idx ] ] = dict ( zip ( headers , cols [ : num_cols ] ) ) return stats else : return None
|
Execute ps command with custom output format with columns format with columns from fields and return result as a nested dictionary with the key PID or SPID . The Standard Format Specifiers from ps man page must be used for the fields parameter .
|
1,069 |
def getProcStatStatus ( self , threads = False , ** kwargs ) : procs = self . getProcList ( [ 'stat' , ] , threads = threads , ** kwargs ) status = dict ( zip ( procStatusNames . values ( ) , [ 0 , ] * len ( procStatusNames ) ) ) prio = { 'high' : 0 , 'low' : 0 , 'norm' : 0 , 'locked_in_mem' : 0 } total = 0 locked_in_mem = 0 if procs is not None : for cols in procs [ 'stats' ] : col_stat = cols [ 0 ] status [ procStatusNames [ col_stat [ 0 ] ] ] += 1 if '<' in col_stat [ 1 : ] : prio [ 'high' ] += 1 elif 'N' in col_stat [ 1 : ] : prio [ 'low' ] += 1 else : prio [ 'norm' ] += 1 if 'L' in col_stat [ 1 : ] : locked_in_mem += 1 total += 1 return { 'status' : status , 'prio' : prio , 'locked_in_mem' : locked_in_mem , 'total' : total }
|
Return process counts per status and priority .
|
1,070 |
def muninMain ( pluginClass , argv = None , env = None , debug = False ) : if argv is None : argv = sys . argv if env is None : env = os . environ debug = debug or env . has_key ( 'MUNIN_DEBUG' ) if len ( argv ) > 1 and argv [ 1 ] == 'autoconf' : autoconf = True else : autoconf = False try : plugin = pluginClass ( argv , env , debug ) ret = plugin . run ( ) if ret : return 0 else : return 1 except Exception : print >> sys . stderr , "ERROR: %s" % repr ( sys . exc_info ( ) [ 1 ] ) if autoconf : print "no" if debug : raise else : if autoconf : return 0 else : return 1
|
Main Block for Munin Plugins .
|
1,071 |
def fixLabel ( label , maxlen , delim = None , repl = '' , truncend = True ) : if len ( label ) <= maxlen : return label else : maxlen -= len ( repl ) if delim is not None : if truncend : end = label . rfind ( delim , 0 , maxlen ) if end > 0 : return label [ : end + 1 ] + repl else : start = label . find ( delim , len ( label ) - maxlen ) if start > 0 : return repl + label [ start : ] if truncend : return label [ : maxlen ] + repl else : return repl + label [ - maxlen : ]
|
Truncate long graph and field labels .
|
1,072 |
def _getGraph ( self , graph_name , fail_noexist = False ) : graph = self . _graphDict . get ( graph_name ) if fail_noexist and graph is None : raise AttributeError ( "Invalid graph name: %s" % graph_name ) else : return graph
|
Private method for returning graph object with name graph_name .
|
1,073 |
def _getSubGraph ( self , parent_name , graph_name , fail_noexist = False ) : if not self . isMultigraph : raise AttributeError ( "Simple Munin Plugins cannot have subgraphs." ) if self . _graphDict . has_key ( parent_name ) is not None : subgraphs = self . _subgraphDict . get ( parent_name ) if subgraphs is not None : subgraph = subgraphs . get ( graph_name ) if fail_noexist and subgraph is None : raise AttributeError ( "Invalid subgraph name %s" "for graph %s." % ( graph_name , parent_name ) ) else : return subgraph else : raise AttributeError ( "Parent graph %s has no subgraphs." % ( parent_name , ) ) else : raise AttributeError ( "Invalid parent graph name %s " "for subgraph %s." % ( parent_name , graph_name ) )
|
Private method for returning subgraph object with name graph_name and parent graph with name parent_name .
|
1,074 |
def _getMultigraphID ( self , graph_name , subgraph_name = None ) : if self . isMultiInstance and self . _instanceName is not None : if subgraph_name is None : return "%s_%s" % ( graph_name , self . _instanceName ) else : return "%s_%s.%s_%s" % ( graph_name , self . _instanceName , subgraph_name , self . _instanceName ) else : if subgraph_name is None : return graph_name else : return "%s.%s" % ( graph_name , subgraph_name )
|
Private method for generating Multigraph ID from graph name and subgraph name .
|
1,075 |
def _formatConfig ( self , conf_dict ) : confs = [ ] graph_dict = conf_dict [ 'graph' ] field_list = conf_dict [ 'fields' ] title = graph_dict . get ( 'title' ) if title is not None : if self . isMultiInstance and self . _instanceLabel is not None : if self . _instanceLabelType == 'suffix' : confs . append ( "graph_%s %s - %s" % ( 'title' , title , self . _instanceLabel , ) ) elif self . _instanceLabelType == 'prefix' : confs . append ( "graph_%s %s - %s" % ( 'title' , self . _instanceLabel , title , ) ) else : confs . append ( "graph_%s %s" % ( 'title' , title ) ) for key in ( 'category' , 'vlabel' , 'info' , 'args' , 'period' , 'scale' , 'total' , 'order' , 'printf' , 'width' , 'height' ) : val = graph_dict . get ( key ) if val is not None : if isinstance ( val , bool ) : if val : val = "yes" else : val = "no" confs . append ( "graph_%s %s" % ( key , val ) ) for ( field_name , field_attrs ) in field_list : for key in ( 'label' , 'type' , 'draw' , 'info' , 'extinfo' , 'colour' , 'negative' , 'graph' , 'min' , 'max' , 'cdef' , 'line' , 'warning' , 'critical' ) : val = field_attrs . get ( key ) if val is not None : if isinstance ( val , bool ) : if val : val = "yes" else : val = "no" confs . append ( "%s.%s %s" % ( field_name , key , val ) ) return "\n" . join ( confs )
|
Formats configuration directory from Munin Graph and returns multi - line value entries for the plugin config cycle .
|
1,076 |
def _formatVals ( self , val_list ) : vals = [ ] for ( name , val ) in val_list : if val is not None : if isinstance ( val , float ) : vals . append ( "%s.value %f" % ( name , val ) ) else : vals . append ( "%s.value %s" % ( name , val ) ) else : vals . append ( "%s.value U" % ( name , ) ) return "\n" . join ( vals )
|
Formats value list from Munin Graph and returns multi - line value entries for the plugin fetch cycle .
|
1,077 |
def envGet ( self , name , default = None , conv = None ) : if self . _env . has_key ( name ) : if conv is not None : return conv ( self . _env . get ( name ) ) else : return self . _env . get ( name ) else : return default
|
Return value for environment variable or None .
|
1,078 |
def saveState ( self , stateObj ) : try : fp = open ( self . _stateFile , 'w' ) pickle . dump ( stateObj , fp ) except : raise IOError ( "Failure in storing plugin state in file: %s" % self . _stateFile ) return True
|
Utility methos to save plugin state stored in stateObj to persistent storage to permit access to previous state in subsequent plugin runs . Any object that can be pickled and unpickled can be used to store the plugin state .
|
1,079 |
def restoreState ( self ) : if os . path . exists ( self . _stateFile ) : try : fp = open ( self . _stateFile , 'r' ) stateObj = pickle . load ( fp ) except : raise IOError ( "Failure in reading plugin state from file: %s" % self . _stateFile ) return stateObj return None
|
Utility method to restore plugin state from persistent storage to permit access to previous plugin state .
|
1,080 |
def appendSubgraph ( self , parent_name , graph_name , graph ) : if not self . isMultigraph : raise AttributeError ( "Simple Munin Plugins cannot have subgraphs." ) if self . _graphDict . has_key ( parent_name ) : if not self . _subgraphDict . has_key ( parent_name ) : self . _subgraphDict [ parent_name ] = { } self . _subgraphNames [ parent_name ] = [ ] self . _subgraphDict [ parent_name ] [ graph_name ] = graph self . _subgraphNames [ parent_name ] . append ( graph_name ) else : raise AttributeError ( "Invalid parent graph name %s used for subgraph %s." % ( parent_name , graph_name ) )
|
Utility method to associate Subgraph Instance to Root Graph Instance .
|
1,081 |
def setSubgraphVal ( self , parent_name , graph_name , field_name , val ) : subgraph = self . _getSubGraph ( parent_name , graph_name , True ) if subgraph . hasField ( field_name ) : subgraph . setVal ( field_name , val ) else : raise AttributeError ( "Invalid field name %s for subgraph %s " "of parent graph %s." % ( field_name , graph_name , parent_name ) )
|
Set Value for Field in Subgraph .
|
1,082 |
def getSubgraphList ( self , parent_name ) : if not self . isMultigraph : raise AttributeError ( "Simple Munin Plugins cannot have subgraphs." ) if self . _graphDict . has_key ( parent_name ) : return self . _subgraphNames [ parent_name ] or [ ] else : raise AttributeError ( "Invalid parent graph name %s." % ( parent_name , ) )
|
Returns list of names of subgraphs for Root Graph with name parent_name .
|
1,083 |
def graphHasField ( self , graph_name , field_name ) : graph = self . _graphDict . get ( graph_name , True ) return graph . hasField ( field_name )
|
Return true if graph with name graph_name has field with name field_name .
|
1,084 |
def subGraphHasField ( self , parent_name , graph_name , field_name ) : subgraph = self . _getSubGraph ( parent_name , graph_name , True ) return subgraph . hasField ( field_name )
|
Return true if subgraph with name graph_name with parent graph with name parent_name has field with name field_name .
|
1,085 |
def getGraphFieldList ( self , graph_name ) : graph = self . _getGraph ( graph_name , True ) return graph . getFieldList ( )
|
Returns list of names of fields for graph with name graph_name .
|
1,086 |
def getGraphFieldCount ( self , graph_name ) : graph = self . _getGraph ( graph_name , True ) return graph . getFieldCount ( )
|
Returns number of fields for graph with name graph_name .
|
1,087 |
def getSubgraphFieldList ( self , parent_name , graph_name ) : graph = self . _getSubGraph ( parent_name , graph_name , True ) return graph . getFieldList ( )
|
Returns list of names of fields for subgraph with name graph_name and parent graph with name parent_name .
|
1,088 |
def getSubgraphFieldCount ( self , parent_name , graph_name ) : graph = self . _getSubGraph ( parent_name , graph_name , True ) return graph . getFieldCount ( )
|
Returns number of fields for subgraph with name graph_name and parent graph with name parent_name .
|
1,089 |
def config ( self ) : for parent_name in self . _graphNames : graph = self . _graphDict [ parent_name ] if self . isMultigraph : print "multigraph %s" % self . _getMultigraphID ( parent_name ) print self . _formatConfig ( graph . getConfig ( ) ) print if ( self . isMultigraph and self . _nestedGraphs and self . _subgraphDict and self . _subgraphNames ) : for ( parent_name , subgraph_names ) in self . _subgraphNames . iteritems ( ) : for graph_name in subgraph_names : graph = self . _subgraphDict [ parent_name ] [ graph_name ] print "multigraph %s" % self . getMultigraphID ( parent_name , graph_name ) print self . _formatConfig ( graph . getConfig ( ) ) print return True
|
Implements Munin Plugin Graph Configuration . Prints out configuration for graphs .
|
1,090 |
def fetch ( self ) : self . retrieveVals ( ) for parent_name in self . _graphNames : graph = self . _graphDict [ parent_name ] if self . isMultigraph : print "multigraph %s" % self . _getMultigraphID ( parent_name ) print self . _formatVals ( graph . getVals ( ) ) print if ( self . isMultigraph and self . _nestedGraphs and self . _subgraphDict and self . _subgraphNames ) : for ( parent_name , subgraph_names ) in self . _subgraphNames . iteritems ( ) : for graph_name in subgraph_names : graph = self . _subgraphDict [ parent_name ] [ graph_name ] print "multigraph %s" % self . getMultigraphID ( parent_name , graph_name ) print self . _formatVals ( graph . getVals ( ) ) print return True
|
Implements Munin Plugin Fetch Option .
|
1,091 |
def run ( self ) : if len ( self . _argv ) > 1 and len ( self . _argv [ 1 ] ) > 0 : oper = self . _argv [ 1 ] else : oper = 'fetch' if oper == 'fetch' : ret = self . fetch ( ) elif oper == 'config' : ret = self . config ( ) if ret and self . _dirtyConfig : ret = self . fetch ( ) elif oper == 'autoconf' : ret = self . autoconf ( ) if ret : print "yes" else : print "no" ret = True elif oper == 'suggest' : ret = self . suggest ( ) else : raise AttributeError ( "Invalid command argument: %s" % oper ) return ret
|
Implements main entry point for plugin execution .
|
1,092 |
def addField ( self , name , label , type = None , draw = None , info = None , extinfo = None , colour = None , negative = None , graph = None , min = None , max = None , cdef = None , line = None , warning = None , critical = None ) : if self . _autoFixNames : name = self . _fixName ( name ) if negative is not None : negative = self . _fixName ( negative ) self . _fieldAttrDict [ name ] = dict ( ( ( k , v ) for ( k , v ) in locals ( ) . iteritems ( ) if ( v is not None and k not in ( 'self' , ) ) ) ) self . _fieldNameList . append ( name )
|
Add field to Munin Graph
|
1,093 |
def hasField ( self , name ) : if self . _autoFixNames : name = self . _fixName ( name ) return self . _fieldAttrDict . has_key ( name )
|
Returns true if field with field_name exists .
|
1,094 |
def getConfig ( self ) : return { 'graph' : self . _graphAttrDict , 'fields' : [ ( field_name , self . _fieldAttrDict . get ( field_name ) ) for field_name in self . _fieldNameList ] }
|
Returns dictionary of config entries for Munin Graph .
|
1,095 |
def setVal ( self , name , val ) : if self . _autoFixNames : name = self . _fixName ( name ) self . _fieldValDict [ name ] = val
|
Set value for field in graph .
|
1,096 |
def getVals ( self ) : return [ ( name , self . _fieldValDict . get ( name ) ) for name in self . _fieldNameList ]
|
Returns value list for Munin Graph
|
1,097 |
def initStats ( self ) : url = "%s://%s:%d/%s" % ( self . _proto , self . _host , self . _port , self . _statuspath ) response = util . get_url ( url , self . _user , self . _password ) self . _statusDict = { } for line in response . splitlines ( ) : mobj = re . match ( '\s*(\d+)\s+(\d+)\s+(\d+)\s*$' , line ) if mobj : idx = 0 for key in ( 'accepts' , 'handled' , 'requests' ) : idx += 1 self . _statusDict [ key ] = util . parse_value ( mobj . group ( idx ) ) else : for ( key , val ) in re . findall ( '(\w+):\s*(\d+)' , line ) : self . _statusDict [ key . lower ( ) ] = util . parse_value ( val )
|
Query and parse Nginx Web Server Status Page .
|
1,098 |
def getIfStats ( self ) : info_dict = { } try : fp = open ( ifaceStatsFile , 'r' ) data = fp . read ( ) fp . close ( ) except : raise IOError ( 'Failed reading interface stats from file: %s' % ifaceStatsFile ) for line in data . splitlines ( ) : mobj = re . match ( '^\s*([\w\d:]+):\s*(.*\S)\s*$' , line ) if mobj : iface = mobj . group ( 1 ) statline = mobj . group ( 2 ) info_dict [ iface ] = dict ( zip ( ( 'rxbytes' , 'rxpackets' , 'rxerrs' , 'rxdrop' , 'rxfifo' , 'rxframe' , 'rxcompressed' , 'rxmulticast' , 'txbytes' , 'txpackets' , 'txerrs' , 'txdrop' , 'txfifo' , 'txcolls' , 'txcarrier' , 'txcompressed' ) , [ int ( x ) for x in statline . split ( ) ] ) ) return info_dict
|
Return dictionary of Traffic Stats for Network Interfaces .
|
1,099 |
def getRoutes ( self ) : routes = [ ] try : out = subprocess . Popen ( [ routeCmd , "-n" ] , stdout = subprocess . PIPE ) . communicate ( ) [ 0 ] except : raise Exception ( 'Execution of command %s failed.' % ipCmd ) lines = out . splitlines ( ) if len ( lines ) > 1 : headers = [ col . lower ( ) for col in lines [ 1 ] . split ( ) ] for line in lines [ 2 : ] : routes . append ( dict ( zip ( headers , line . split ( ) ) ) ) return routes
|
Get routing table .
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.