idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
800
def router_elements ( self ) : elements = devicetools . Elements ( ) for router in self . _router_numbers : element = self . _get_routername ( router ) inlet = self . _get_nodename ( router ) try : outlet = self . _get_nodename ( self . _up2down [ router ] ) except TypeError : outlet = self . last_node elements += devicetools . Element ( element , inlets = inlet , outlets = outlet ) return elements
A |Elements| collection of all routing basins .
801
def nodes ( self ) : return ( devicetools . Nodes ( self . node_prefix + routers for routers in self . _router_numbers ) + devicetools . Node ( self . last_node ) )
A |Nodes| collection of all required nodes .
802
def selection ( self ) : return selectiontools . Selection ( self . selection_name , self . nodes , self . elements )
A complete |Selection| object of all supplying and routing elements and required nodes .
803
def chars2str ( chars ) -> List [ str ] : strings = collections . deque ( ) for subchars in chars : substrings = collections . deque ( ) for char in subchars : if char : substrings . append ( char . decode ( 'utf-8' ) ) else : substrings . append ( '' ) strings . append ( '' . join ( substrings ) ) return list ( strings )
Inversion function of function |str2chars| .
804
def create_dimension ( ncfile , name , length ) -> None : try : ncfile . createDimension ( name , length ) except BaseException : objecttools . augment_excmessage ( 'While trying to add dimension `%s` with length `%d` ' 'to the NetCDF file `%s`' % ( name , length , get_filepath ( ncfile ) ) )
Add a new dimension with the given name and length to the given NetCDF file .
805
def create_variable ( ncfile , name , datatype , dimensions ) -> None : default = fillvalue if ( datatype == 'f8' ) else None try : ncfile . createVariable ( name , datatype , dimensions = dimensions , fill_value = default ) ncfile [ name ] . long_name = name except BaseException : objecttools . augment_excmessage ( 'While trying to add variable `%s` with datatype `%s` ' 'and dimensions `%s` to the NetCDF file `%s`' % ( name , datatype , dimensions , get_filepath ( ncfile ) ) )
Add a new variable with the given name datatype and dimensions to the given NetCDF file .
806
def query_variable ( ncfile , name ) -> netcdf4 . Variable : try : return ncfile [ name ] except ( IndexError , KeyError ) : raise OSError ( 'NetCDF file `%s` does not contain variable `%s`.' % ( get_filepath ( ncfile ) , name ) )
Return the variable with the given name from the given NetCDF file .
807
def query_timegrid ( ncfile ) -> timetools . Timegrid : timepoints = ncfile [ varmapping [ 'timepoints' ] ] refdate = timetools . Date . from_cfunits ( timepoints . units ) return timetools . Timegrid . from_timepoints ( timepoints = timepoints [ : ] , refdate = refdate , unit = timepoints . units . strip ( ) . split ( ) [ 0 ] )
Return the |Timegrid| defined by the given NetCDF file .
808
def query_array ( ncfile , name ) -> numpy . ndarray : variable = query_variable ( ncfile , name ) maskedarray = variable [ : ] fillvalue_ = getattr ( variable , '_FillValue' , numpy . nan ) if not numpy . isnan ( fillvalue_ ) : maskedarray [ maskedarray . mask ] = numpy . nan return maskedarray . data
Return the data of the variable with the given name from the given NetCDF file .
809
def log ( self , sequence , infoarray ) -> None : if isinstance ( sequence , sequencetools . ModelSequence ) : descr = sequence . descr_model else : descr = 'node' if self . _isolate : descr = '%s_%s' % ( descr , sequence . descr_sequence ) if ( ( infoarray is not None ) and ( infoarray . info [ 'type' ] != 'unmodified' ) ) : descr = '%s_%s' % ( descr , infoarray . info [ 'type' ] ) dirpath = sequence . dirpath_ext try : files = self . folders [ dirpath ] except KeyError : files : Dict [ str , 'NetCDFFile' ] = collections . OrderedDict ( ) self . folders [ dirpath ] = files try : file_ = files [ descr ] except KeyError : file_ = NetCDFFile ( name = descr , flatten = self . _flatten , isolate = self . _isolate , timeaxis = self . _timeaxis , dirpath = dirpath ) files [ descr ] = file_ file_ . log ( sequence , infoarray )
Prepare a |NetCDFFile| object suitable for the given |IOSequence| object when necessary and pass the given arguments to its |NetCDFFile . log| method .
810
def read ( self ) -> None : for folder in self . folders . values ( ) : for file_ in folder . values ( ) : file_ . read ( )
Call method |NetCDFFile . read| of all handled |NetCDFFile| objects .
811
def write ( self ) -> None : if self . folders : init = hydpy . pub . timegrids . init timeunits = init . firstdate . to_cfunits ( 'hours' ) timepoints = init . to_timepoints ( 'hours' ) for folder in self . folders . values ( ) : for file_ in folder . values ( ) : file_ . write ( timeunits , timepoints )
Call method |NetCDFFile . write| of all handled |NetCDFFile| objects .
812
def filenames ( self ) -> Tuple [ str , ... ] : return tuple ( sorted ( set ( itertools . chain ( * ( _ . keys ( ) for _ in self . folders . values ( ) ) ) ) ) )
A |tuple| of names of all handled |NetCDFFile| objects .
813
def log ( self , sequence , infoarray ) -> None : aggregated = ( ( infoarray is not None ) and ( infoarray . info [ 'type' ] != 'unmodified' ) ) descr = sequence . descr_sequence if aggregated : descr = '_' . join ( [ descr , infoarray . info [ 'type' ] ] ) if descr in self . variables : var_ = self . variables [ descr ] else : if aggregated : cls = NetCDFVariableAgg elif self . _flatten : cls = NetCDFVariableFlat else : cls = NetCDFVariableDeep var_ = cls ( name = descr , isolate = self . _isolate , timeaxis = self . _timeaxis ) self . variables [ descr ] = var_ var_ . log ( sequence , infoarray )
Pass the given |IoSequence| to a suitable instance of a |NetCDFVariableBase| subclass .
814
def filepath ( self ) -> str : return os . path . join ( self . _dirpath , self . name + '.nc' )
The NetCDF file path .
815
def read ( self ) -> None : try : with netcdf4 . Dataset ( self . filepath , "r" ) as ncfile : timegrid = query_timegrid ( ncfile ) for variable in self . variables . values ( ) : variable . read ( ncfile , timegrid ) except BaseException : objecttools . augment_excmessage ( f'While trying to read data from NetCDF file `{self.filepath}`' )
Open an existing NetCDF file temporarily and call method |NetCDFVariableDeep . read| of all handled |NetCDFVariableBase| objects .
816
def write ( self , timeunit , timepoints ) -> None : with netcdf4 . Dataset ( self . filepath , "w" ) as ncfile : ncfile . Conventions = 'CF-1.6' self . _insert_timepoints ( ncfile , timepoints , timeunit ) for variable in self . variables . values ( ) : variable . write ( ncfile )
Open a new NetCDF file temporarily and call method |NetCDFVariableBase . write| of all handled |NetCDFVariableBase| objects .
817
def get_index ( self , name_subdevice ) -> int : try : return self . dict_ [ name_subdevice ] except KeyError : raise OSError ( 'No data for sequence `%s` and (sub)device `%s` ' 'in NetCDF file `%s` available.' % ( self . name_sequence , name_subdevice , self . name_ncfile ) )
Item access to the wrapped |dict| object with a specialized error message .
818
def log ( self , sequence , infoarray ) -> None : descr_device = sequence . descr_device self . sequences [ descr_device ] = sequence self . arrays [ descr_device ] = infoarray
Log the given |IOSequence| object either for reading or writing data .
819
def sort_timeplaceentries ( self , timeentry , placeentry ) -> Tuple [ Any , Any ] : if self . _timeaxis : return placeentry , timeentry return timeentry , placeentry
Return a |tuple| containing the given timeentry and placeentry sorted in agreement with the currently selected timeaxis .
820
def get_timeplaceslice ( self , placeindex ) -> Union [ Tuple [ slice , int ] , Tuple [ int , slice ] ] : return self . sort_timeplaceentries ( slice ( None ) , int ( placeindex ) )
Return a |tuple| for indexing a complete time series of a certain location available in |NetCDFVariableBase . array| .
821
def subdevicenames ( self ) -> Tuple [ str , ... ] : self : NetCDFVariableBase return tuple ( self . sequences . keys ( ) )
A |tuple| containing the device names .
822
def get_slices ( self , idx , shape ) -> Tuple [ IntOrSlice , ... ] : slices = list ( self . get_timeplaceslice ( idx ) ) for length in shape : slices . append ( slice ( 0 , length ) ) return tuple ( slices )
Return a |tuple| of one |int| and some |slice| objects to accesses all values of a certain device within |NetCDFVariableDeep . array| .
823
def shape ( self ) -> Tuple [ int , ... ] : nmb_place = len ( self . sequences ) nmb_time = len ( hydpy . pub . timegrids . init ) nmb_others = collections . deque ( ) for sequence in self . sequences . values ( ) : nmb_others . append ( sequence . shape ) nmb_others_max = tuple ( numpy . max ( nmb_others , axis = 0 ) ) return self . sort_timeplaceentries ( nmb_time , nmb_place ) + nmb_others_max
Required shape of |NetCDFVariableDeep . array| .
824
def array ( self ) -> numpy . ndarray : array = numpy . full ( self . shape , fillvalue , dtype = float ) for idx , ( descr , subarray ) in enumerate ( self . arrays . items ( ) ) : sequence = self . sequences [ descr ] array [ self . get_slices ( idx , sequence . shape ) ] = subarray return array
The series data of all logged |IOSequence| objects contained in one single |numpy . ndarray| .
825
def shape ( self ) -> Tuple [ int , int ] : return self . sort_timeplaceentries ( len ( hydpy . pub . timegrids . init ) , len ( self . sequences ) )
Required shape of |NetCDFVariableAgg . array| .
826
def array ( self ) -> numpy . ndarray : array = numpy . full ( self . shape , fillvalue , dtype = float ) for idx , subarray in enumerate ( self . arrays . values ( ) ) : array [ self . get_timeplaceslice ( idx ) ] = subarray return array
The aggregated data of all logged |IOSequence| objects contained in one single |numpy . ndarray| object .
827
def shape ( self ) -> Tuple [ int , int ] : return self . sort_timeplaceentries ( len ( hydpy . pub . timegrids . init ) , sum ( len ( seq ) for seq in self . sequences . values ( ) ) )
Required shape of |NetCDFVariableFlat . array| .
828
def array ( self ) -> numpy . ndarray : array = numpy . full ( self . shape , fillvalue , dtype = float ) idx0 = 0 idxs : List [ Any ] = [ slice ( None ) ] for seq , subarray in zip ( self . sequences . values ( ) , self . arrays . values ( ) ) : for prod in self . _product ( seq . shape ) : subsubarray = subarray [ tuple ( idxs + list ( prod ) ) ] array [ self . get_timeplaceslice ( idx0 ) ] = subsubarray idx0 += 1 return array
The series data of all logged |IOSequence| objects contained in one single |numpy . ndarray| object .
829
def update ( self ) : maxdt = self . subpars . pars . control . maxdt seconds = self . simulationstep . seconds self . value = numpy . ceil ( seconds / maxdt )
Determine the number of substeps .
830
def update ( self ) : con = self . subpars . pars . control der = self . subpars for ( toy , qs ) in con . q : setattr ( self , str ( toy ) , 2. * con . v + der . seconds / der . nmbsubsteps * qs ) self . refresh ( )
Calulate the auxilary term .
831
def prepare_io_example_1 ( ) -> Tuple [ devicetools . Nodes , devicetools . Elements ] : from hydpy import TestIO TestIO . clear ( ) from hydpy . core . filetools import SequenceManager hydpy . pub . sequencemanager = SequenceManager ( ) with TestIO ( ) : hydpy . pub . sequencemanager . inputdirpath = 'inputpath' hydpy . pub . sequencemanager . fluxdirpath = 'outputpath' hydpy . pub . sequencemanager . statedirpath = 'outputpath' hydpy . pub . sequencemanager . nodedirpath = 'nodepath' hydpy . pub . timegrids = '2000-01-01' , '2000-01-05' , '1d' from hydpy import Node , Nodes , Element , Elements , prepare_model node1 = Node ( 'node1' ) node2 = Node ( 'node2' , variable = 'T' ) nodes = Nodes ( node1 , node2 ) element1 = Element ( 'element1' , outlets = node1 ) element2 = Element ( 'element2' , outlets = node1 ) element3 = Element ( 'element3' , outlets = node1 ) elements = Elements ( element1 , element2 , element3 ) from hydpy . models import lland_v1 , lland_v2 element1 . model = prepare_model ( lland_v1 ) element2 . model = prepare_model ( lland_v1 ) element3 . model = prepare_model ( lland_v2 ) from hydpy . models . lland import ACKER for idx , element in enumerate ( elements ) : parameters = element . model . parameters parameters . control . nhru ( idx + 1 ) parameters . control . lnk ( ACKER ) parameters . derived . absfhru ( 10.0 ) with hydpy . pub . options . printprogress ( False ) : nodes . prepare_simseries ( ) elements . prepare_inputseries ( ) elements . prepare_fluxseries ( ) elements . prepare_stateseries ( ) def init_values ( seq , value1_ ) : value2_ = value1_ + len ( seq . series . flatten ( ) ) values_ = numpy . arange ( value1_ , value2_ , dtype = float ) seq . testarray = values_ . reshape ( seq . seriesshape ) seq . series = seq . testarray . copy ( ) return value2_ import numpy value1 = 0 for subname , seqname in zip ( [ 'inputs' , 'fluxes' , 'states' ] , [ 'nied' , 'nkor' , 'bowa' ] ) : for element in elements : subseqs = getattr ( element . model . sequences , subname ) value1 = init_values ( getattr ( subseqs , seqname ) , value1 ) for node in nodes : value1 = init_values ( node . sequences . sim , value1 ) return nodes , elements
Prepare an IO example configuration .
832
def get_postalcodes_around_radius ( self , pc , radius ) : postalcodes = self . get ( pc ) if postalcodes is None : raise PostalCodeNotFoundException ( "Could not find postal code you're searching for." ) else : pc = postalcodes [ 0 ] radius = float ( radius ) earth_radius = 6371 dlat = radius / earth_radius dlon = asin ( sin ( dlat ) / cos ( radians ( pc . latitude ) ) ) lat_delta = degrees ( dlat ) lon_delta = degrees ( dlon ) if lat_delta < 0 : lat_range = ( pc . latitude + lat_delta , pc . latitude - lat_delta ) else : lat_range = ( pc . latitude - lat_delta , pc . latitude + lat_delta ) long_range = ( pc . longitude - lat_delta , pc . longitude + lon_delta ) return format_result ( self . conn_manager . query ( PC_RANGE_QUERY % ( long_range [ 0 ] , long_range [ 1 ] , lat_range [ 0 ] , lat_range [ 1 ] ) ) )
Bounding box calculations updated from pyzipcode
833
def get_all_team_ids ( ) : df = get_all_player_ids ( "all_data" ) df = pd . DataFrame ( { "TEAM_NAME" : df . TEAM_NAME . unique ( ) , "TEAM_ID" : df . TEAM_ID . unique ( ) } ) return df
Returns a pandas DataFrame with all Team IDs
834
def get_team_id ( team_name ) : df = get_all_team_ids ( ) df = df [ df . TEAM_NAME == team_name ] if len ( df ) == 0 : er = "Invalid team name or there is no team with that name." raise ValueError ( er ) team_id = df . TEAM_ID . iloc [ 0 ] return team_id
Returns the team ID associated with the team name that is passed in .
835
def get_game_logs ( self ) : logs = self . response . json ( ) [ 'resultSets' ] [ 0 ] [ 'rowSet' ] headers = self . response . json ( ) [ 'resultSets' ] [ 0 ] [ 'headers' ] df = pd . DataFrame ( logs , columns = headers ) df . GAME_DATE = pd . to_datetime ( df . GAME_DATE ) return df
Returns team game logs as a pandas DataFrame
836
def get_game_id ( self , date ) : df = self . get_game_logs ( ) game_id = df [ df . GAME_DATE == date ] . Game_ID . values [ 0 ] return game_id
Returns the Game ID associated with the date that is passed in .
837
def update_params ( self , parameters ) : self . url_paramaters . update ( parameters ) self . response = requests . get ( self . base_url , params = self . url_paramaters , headers = HEADERS ) self . response . raise_for_status ( ) return self
Pass in a dictionary to update url parameters for NBA stats API
838
def get_shots ( self ) : shots = self . response . json ( ) [ 'resultSets' ] [ 0 ] [ 'rowSet' ] headers = self . response . json ( ) [ 'resultSets' ] [ 0 ] [ 'headers' ] return pd . DataFrame ( shots , columns = headers )
Returns the shot chart data as a pandas DataFrame .
839
def unsubscribe ( self , subscription , max = None ) : if max is None : self . _send ( 'UNSUB %d' % subscription . sid ) self . _subscriptions . pop ( subscription . sid ) else : subscription . max = max self . _send ( 'UNSUB %d %s' % ( subscription . sid , max ) )
Unsubscribe will remove interest in the given subject . If max is provided an automatic Unsubscribe that is processed by the server when max messages have been received
840
def request ( self , subject , callback , msg = None ) : inbox = self . _build_inbox ( ) s = self . subscribe ( inbox , callback ) self . unsubscribe ( s , 1 ) self . publish ( subject , msg , inbox ) return s
ublish a message with an implicit inbox listener as the reply . Message is optional .
841
def draw_court ( ax = None , color = 'gray' , lw = 1 , outer_lines = False ) : if ax is None : ax = plt . gca ( ) hoop = Circle ( ( 0 , 0 ) , radius = 7.5 , linewidth = lw , color = color , fill = False ) backboard = Rectangle ( ( - 30 , - 12.5 ) , 60 , 0 , linewidth = lw , color = color ) outer_box = Rectangle ( ( - 80 , - 47.5 ) , 160 , 190 , linewidth = lw , color = color , fill = False ) inner_box = Rectangle ( ( - 60 , - 47.5 ) , 120 , 190 , linewidth = lw , color = color , fill = False ) top_free_throw = Arc ( ( 0 , 142.5 ) , 120 , 120 , theta1 = 0 , theta2 = 180 , linewidth = lw , color = color , fill = False ) bottom_free_throw = Arc ( ( 0 , 142.5 ) , 120 , 120 , theta1 = 180 , theta2 = 0 , linewidth = lw , color = color , linestyle = 'dashed' ) restricted = Arc ( ( 0 , 0 ) , 80 , 80 , theta1 = 0 , theta2 = 180 , linewidth = lw , color = color ) corner_three_a = Rectangle ( ( - 220 , - 47.5 ) , 0 , 140 , linewidth = lw , color = color ) corner_three_b = Rectangle ( ( 220 , - 47.5 ) , 0 , 140 , linewidth = lw , color = color ) three_arc = Arc ( ( 0 , 0 ) , 475 , 475 , theta1 = 22 , theta2 = 158 , linewidth = lw , color = color ) center_outer_arc = Arc ( ( 0 , 422.5 ) , 120 , 120 , theta1 = 180 , theta2 = 0 , linewidth = lw , color = color ) center_inner_arc = Arc ( ( 0 , 422.5 ) , 40 , 40 , theta1 = 180 , theta2 = 0 , linewidth = lw , color = color ) court_elements = [ hoop , backboard , outer_box , inner_box , top_free_throw , bottom_free_throw , restricted , corner_three_a , corner_three_b , three_arc , center_outer_arc , center_inner_arc ] if outer_lines : outer_lines = Rectangle ( ( - 250 , - 47.5 ) , 500 , 470 , linewidth = lw , color = color , fill = False ) court_elements . append ( outer_lines ) for element in court_elements : ax . add_patch ( element ) return ax
Returns an axes with a basketball court drawn onto to it .
842
def shot_chart ( x , y , kind = "scatter" , title = "" , color = "b" , cmap = None , xlim = ( - 250 , 250 ) , ylim = ( 422.5 , - 47.5 ) , court_color = "gray" , court_lw = 1 , outer_lines = False , flip_court = False , kde_shade = True , gridsize = None , ax = None , despine = False , ** kwargs ) : if ax is None : ax = plt . gca ( ) if cmap is None : cmap = sns . light_palette ( color , as_cmap = True ) if not flip_court : ax . set_xlim ( xlim ) ax . set_ylim ( ylim ) else : ax . set_xlim ( xlim [ : : - 1 ] ) ax . set_ylim ( ylim [ : : - 1 ] ) ax . tick_params ( labelbottom = "off" , labelleft = "off" ) ax . set_title ( title , fontsize = 18 ) draw_court ( ax , color = court_color , lw = court_lw , outer_lines = outer_lines ) if kind == "scatter" : ax . scatter ( x , y , c = color , ** kwargs ) elif kind == "kde" : sns . kdeplot ( x , y , shade = kde_shade , cmap = cmap , ax = ax , ** kwargs ) ax . set_xlabel ( '' ) ax . set_ylabel ( '' ) elif kind == "hex" : if gridsize is None : from seaborn . distributions import _freedman_diaconis_bins x_bin = _freedman_diaconis_bins ( x ) y_bin = _freedman_diaconis_bins ( y ) gridsize = int ( np . mean ( [ x_bin , y_bin ] ) ) ax . hexbin ( x , y , gridsize = gridsize , cmap = cmap , ** kwargs ) else : raise ValueError ( "kind must be 'scatter', 'kde', or 'hex'." ) for spine in ax . spines : ax . spines [ spine ] . set_lw ( court_lw ) ax . spines [ spine ] . set_color ( court_color ) if despine : ax . spines [ "top" ] . set_visible ( False ) ax . spines [ "bottom" ] . set_visible ( False ) ax . spines [ "right" ] . set_visible ( False ) ax . spines [ "left" ] . set_visible ( False ) return ax
Returns an Axes object with player shots plotted .
843
def shot_chart_jointplot ( x , y , data = None , kind = "scatter" , title = "" , color = "b" , cmap = None , xlim = ( - 250 , 250 ) , ylim = ( 422.5 , - 47.5 ) , court_color = "gray" , court_lw = 1 , outer_lines = False , flip_court = False , size = ( 12 , 11 ) , space = 0 , despine = False , joint_kws = None , marginal_kws = None , ** kwargs ) : if cmap is None : cmap = sns . light_palette ( color , as_cmap = True ) if kind not in [ "scatter" , "kde" , "hex" ] : raise ValueError ( "kind must be 'scatter', 'kde', or 'hex'." ) grid = sns . jointplot ( x = x , y = y , data = data , stat_func = None , kind = kind , space = 0 , color = color , cmap = cmap , joint_kws = joint_kws , marginal_kws = marginal_kws , ** kwargs ) grid . fig . set_size_inches ( size ) ax = grid . ax_joint if not flip_court : ax . set_xlim ( xlim ) ax . set_ylim ( ylim ) else : ax . set_xlim ( xlim [ : : - 1 ] ) ax . set_ylim ( ylim [ : : - 1 ] ) draw_court ( ax , color = court_color , lw = court_lw , outer_lines = outer_lines ) ax . set_xlabel ( '' ) ax . set_ylabel ( '' ) ax . tick_params ( labelbottom = 'off' , labelleft = 'off' ) ax . set_title ( title , y = 1.2 , fontsize = 18 ) for spine in ax . spines : ax . spines [ spine ] . set_lw ( court_lw ) ax . spines [ spine ] . set_color ( court_color ) grid . ax_marg_x . spines [ spine ] . set_lw ( court_lw ) grid . ax_marg_x . spines [ spine ] . set_color ( court_color ) grid . ax_marg_y . spines [ spine ] . set_lw ( court_lw ) grid . ax_marg_y . spines [ spine ] . set_color ( court_color ) if despine : ax . spines [ "top" ] . set_visible ( False ) ax . spines [ "bottom" ] . set_visible ( False ) ax . spines [ "right" ] . set_visible ( False ) ax . spines [ "left" ] . set_visible ( False ) return grid
Returns a seaborn JointGrid using sns . jointplot
844
def heatmap ( x , y , z , title = "" , cmap = plt . cm . YlOrRd , bins = 20 , xlim = ( - 250 , 250 ) , ylim = ( 422.5 , - 47.5 ) , facecolor = 'lightgray' , facecolor_alpha = 0.4 , court_color = "black" , court_lw = 0.5 , outer_lines = False , flip_court = False , ax = None , ** kwargs ) : mean , xedges , yedges , binnumber = binned_statistic_2d ( x = x , y = y , values = z , statistic = 'mean' , bins = bins ) if ax is None : ax = plt . gca ( ) if not flip_court : ax . set_xlim ( xlim ) ax . set_ylim ( ylim ) else : ax . set_xlim ( xlim [ : : - 1 ] ) ax . set_ylim ( ylim [ : : - 1 ] ) ax . tick_params ( labelbottom = "off" , labelleft = "off" ) ax . set_title ( title , fontsize = 18 ) ax . patch . set_facecolor ( facecolor ) ax . patch . set_alpha ( facecolor_alpha ) draw_court ( ax , color = court_color , lw = court_lw , outer_lines = outer_lines ) heatmap = ax . imshow ( mean . T , origin = 'lower' , extent = [ xedges [ 0 ] , xedges [ - 1 ] , yedges [ 0 ] , yedges [ - 1 ] ] , interpolation = 'nearest' , cmap = cmap ) return heatmap
Returns an AxesImage object that contains a heatmap .
845
def bokeh_draw_court ( figure , line_color = 'gray' , line_width = 1 ) : figure . circle ( x = 0 , y = 0 , radius = 7.5 , fill_alpha = 0 , line_color = line_color , line_width = line_width ) figure . line ( x = range ( - 30 , 31 ) , y = - 12.5 , line_color = line_color ) figure . rect ( x = 0 , y = 47.5 , width = 160 , height = 190 , fill_alpha = 0 , line_color = line_color , line_width = line_width ) figure . line ( x = - 60 , y = np . arange ( - 47.5 , 143.5 ) , line_color = line_color , line_width = line_width ) figure . line ( x = 60 , y = np . arange ( - 47.5 , 143.5 ) , line_color = line_color , line_width = line_width ) figure . arc ( x = 0 , y = 0 , radius = 40 , start_angle = pi , end_angle = 0 , line_color = line_color , line_width = line_width ) figure . arc ( x = 0 , y = 142.5 , radius = 60 , start_angle = pi , end_angle = 0 , line_color = line_color ) figure . arc ( x = 0 , y = 142.5 , radius = 60 , start_angle = 0 , end_angle = pi , line_color = line_color , line_dash = "dashed" ) figure . line ( x = - 220 , y = np . arange ( - 47.5 , 92.5 ) , line_color = line_color , line_width = line_width ) figure . line ( x = 220 , y = np . arange ( - 47.5 , 92.5 ) , line_color = line_color , line_width = line_width ) figure . arc ( x = 0 , y = 0 , radius = 237.5 , start_angle = 3.528 , end_angle = - 0.3863 , line_color = line_color , line_width = line_width ) figure . arc ( x = 0 , y = 422.5 , radius = 60 , start_angle = 0 , end_angle = pi , line_color = line_color , line_width = line_width ) figure . arc ( x = 0 , y = 422.5 , radius = 20 , start_angle = 0 , end_angle = pi , line_color = line_color , line_width = line_width ) figure . rect ( x = 0 , y = 187.5 , width = 500 , height = 470 , fill_alpha = 0 , line_color = line_color , line_width = line_width ) return figure
Returns a figure with the basketball court lines drawn onto it
846
def bokeh_shot_chart ( data , x = "LOC_X" , y = "LOC_Y" , fill_color = "#1f77b4" , scatter_size = 10 , fill_alpha = 0.4 , line_alpha = 0.4 , court_line_color = 'gray' , court_line_width = 1 , hover_tool = False , tooltips = None , ** kwargs ) : source = ColumnDataSource ( data ) fig = figure ( width = 700 , height = 658 , x_range = [ - 250 , 250 ] , y_range = [ 422.5 , - 47.5 ] , min_border = 0 , x_axis_type = None , y_axis_type = None , outline_line_color = "black" , ** kwargs ) fig . scatter ( x , y , source = source , size = scatter_size , color = fill_color , alpha = fill_alpha , line_alpha = line_alpha ) bokeh_draw_court ( fig , line_color = court_line_color , line_width = court_line_width ) if hover_tool : hover = HoverTool ( renderers = [ fig . renderers [ 0 ] ] , tooltips = tooltips ) fig . add_tools ( hover ) return fig
Returns a figure with both FGA and basketball court lines drawn onto it .
847
def _kmedoids_run ( X , n_clusters , distance , max_iter , tol , rng ) : membs = np . empty ( shape = X . shape [ 0 ] , dtype = int ) centers = kmeans . _kmeans_init ( X , n_clusters , method = '' , rng = rng ) sse_last = 9999.9 n_iter = 0 for it in range ( 1 , max_iter ) : membs = kmeans . _assign_clusters ( X , centers ) centers , sse_arr = _update_centers ( X , membs , n_clusters , distance ) sse_total = np . sum ( sse_arr ) if np . abs ( sse_total - sse_last ) < tol : n_iter = it break sse_last = sse_total return ( centers , membs , sse_total , sse_arr , n_iter )
Run a single trial of k - medoids clustering on dataset X and given number of clusters
848
def _init_mixture_params ( X , n_mixtures , init_method ) : init_priors = np . ones ( shape = n_mixtures , dtype = float ) / n_mixtures if init_method == 'kmeans' : km = _kmeans . KMeans ( n_clusters = n_mixtures , n_trials = 20 ) km . fit ( X ) init_means = km . centers_ else : inx_rand = np . random . choice ( X . shape [ 0 ] , size = n_mixtures ) init_means = X [ inx_rand , : ] if np . any ( np . isnan ( init_means ) ) : raise ValueError ( "Init means are NaN! " ) n_features = X . shape [ 1 ] init_covars = np . empty ( shape = ( n_mixtures , n_features , n_features ) , dtype = float ) for i in range ( n_mixtures ) : init_covars [ i ] = np . eye ( n_features ) return ( init_priors , init_means , init_covars )
Initialize mixture density parameters with equal priors random means identity covariance matrices
849
def __log_density_single ( x , mean , covar ) : n_dim = mean . shape [ 0 ] dx = x - mean covar_inv = scipy . linalg . inv ( covar ) covar_det = scipy . linalg . det ( covar ) den = np . dot ( np . dot ( dx . T , covar_inv ) , dx ) + n_dim * np . log ( 2 * np . pi ) + np . log ( covar_det ) return ( - 1 / 2 * den )
This is just a test function to calculate the normal density at x given mean and covariance matrix .
850
def _validate_params ( priors , means , covars ) : for i , ( p , m , cv ) in enumerate ( zip ( priors , means , covars ) ) : if np . any ( np . isinf ( p ) ) or np . any ( np . isnan ( p ) ) : raise ValueError ( "Component %d of priors is not valid " % i ) if np . any ( np . isinf ( m ) ) or np . any ( np . isnan ( m ) ) : raise ValueError ( "Component %d of means is not valid " % i ) if np . any ( np . isinf ( cv ) ) or np . any ( np . isnan ( cv ) ) : raise ValueError ( "Component %d of covars is not valid " % i ) if ( not np . allclose ( cv , cv . T ) or np . any ( scipy . linalg . eigvalsh ( cv ) <= 0 ) ) : raise ValueError ( "Component %d of covars must be positive-definite" % i )
Validation Check for M . L . paramateres
851
def fit ( self , X ) : params_dict = _fit_gmm_params ( X = X , n_mixtures = self . n_clusters , n_init = self . n_trials , init_method = self . init_method , n_iter = self . max_iter , tol = self . tol ) self . priors_ = params_dict [ 'priors' ] self . means_ = params_dict [ 'means' ] self . covars_ = params_dict [ 'covars' ] self . converged = True self . labels_ = self . predict ( X )
Fit mixture - density parameters with EM algorithm
852
def _kmeans_init ( X , n_clusters , method = 'balanced' , rng = None ) : n_samples = X . shape [ 0 ] if rng is None : cent_idx = np . random . choice ( n_samples , replace = False , size = n_clusters ) else : cent_idx = rng . choice ( n_samples , replace = False , size = n_clusters ) centers = X [ cent_idx , : ] mean_X = np . mean ( X , axis = 0 ) if method == 'balanced' : centers [ n_clusters - 1 ] = n_clusters * mean_X - np . sum ( centers [ : ( n_clusters - 1 ) ] , axis = 0 ) return ( centers )
Initialize k = n_clusters centroids randomly
853
def _cal_dist2center ( X , center ) : dmemb2cen = scipy . spatial . distance . cdist ( X , center . reshape ( 1 , X . shape [ 1 ] ) , metric = 'seuclidean' ) return ( np . sum ( dmemb2cen ) )
Calculate the SSE to the cluster center
854
def _kmeans_run ( X , n_clusters , max_iter , tol ) : membs = np . empty ( shape = X . shape [ 0 ] , dtype = int ) centers = _kmeans_init ( X , n_clusters ) sse_last = 9999.9 n_iter = 0 for it in range ( 1 , max_iter ) : membs = _assign_clusters ( X , centers ) centers , sse_arr = _update_centers ( X , membs , n_clusters ) sse_total = np . sum ( sse_arr ) if np . abs ( sse_total - sse_last ) < tol : n_iter = it break sse_last = sse_total return ( centers , membs , sse_total , sse_arr , n_iter )
Run a single trial of k - means clustering on dataset X and given number of clusters
855
def _kmeans ( X , n_clusters , max_iter , n_trials , tol ) : n_samples , n_features = X . shape [ 0 ] , X . shape [ 1 ] centers_best = np . empty ( shape = ( n_clusters , n_features ) , dtype = float ) labels_best = np . empty ( shape = n_samples , dtype = int ) for i in range ( n_trials ) : centers , labels , sse_tot , sse_arr , n_iter = _kmeans_run ( X , n_clusters , max_iter , tol ) if i == 0 : sse_tot_best = sse_tot sse_arr_best = sse_arr n_iter_best = n_iter centers_best = centers . copy ( ) labels_best = labels . copy ( ) if sse_tot < sse_tot_best : sse_tot_best = sse_tot sse_arr_best = sse_arr n_iter_best = n_iter centers_best = centers . copy ( ) labels_best = labels . copy ( ) return ( centers_best , labels_best , sse_arr_best , n_iter_best )
Run multiple trials of k - means clustering and outputt he best centers and cluster labels
856
def _cut_tree ( tree , n_clusters , membs ) : assert ( n_clusters >= 2 ) assert ( n_clusters <= len ( tree . leaves ( ) ) ) cut_centers = dict ( ) for i in range ( n_clusters - 1 ) : if i == 0 : search_set = set ( tree . children ( 0 ) ) node_set , cut_set = set ( ) , set ( ) else : search_set = node_set . union ( cut_set ) node_set , cut_set = set ( ) , set ( ) if i + 2 == n_clusters : cut_set = search_set else : for _ in range ( len ( search_set ) ) : n = search_set . pop ( ) if n . data [ 'ilev' ] is None or n . data [ 'ilev' ] > i + 2 : cut_set . add ( n ) else : nid = n . identifier if n . data [ 'ilev' ] - 2 == i : node_set = node_set . union ( set ( tree . children ( nid ) ) ) conv_membs = membs . copy ( ) for node in cut_set : nid = node . identifier label = node . data [ 'label' ] cut_centers [ label ] = node . data [ 'center' ] sub_leaves = tree . leaves ( nid ) for leaf in sub_leaves : indx = np . where ( conv_membs == leaf ) [ 0 ] conv_membs [ indx ] = nid return ( conv_membs , cut_centers )
Cut the tree to get desired number of clusters as n_clusters 2 < = n_desired < = n_clusters
857
def _add_tree_node ( tree , label , ilev , X = None , size = None , center = None , sse = None , parent = None ) : if size is None : size = X . shape [ 0 ] if ( center is None ) : center = np . mean ( X , axis = 0 ) if ( sse is None ) : sse = _kmeans . _cal_dist2center ( X , center ) center = list ( center ) datadict = { 'size' : size , 'center' : center , 'label' : label , 'sse' : sse , 'ilev' : None } if ( parent is None ) : tree . create_node ( label , label , data = datadict ) else : tree . create_node ( label , label , parent = parent , data = datadict ) tree . get_node ( parent ) . data [ 'ilev' ] = ilev return ( tree )
Add a node to the tree if parent is not known the node is a root
858
def _bisect_kmeans ( X , n_clusters , n_trials , max_iter , tol ) : membs = np . empty ( shape = X . shape [ 0 ] , dtype = int ) centers = dict ( ) sse_arr = dict ( ) tree = treelib . Tree ( ) tree = _add_tree_node ( tree , 0 , ilev = 0 , X = X ) km = _kmeans . KMeans ( n_clusters = 2 , n_trials = n_trials , max_iter = max_iter , tol = tol ) for i in range ( 1 , n_clusters ) : sel_clust_id , sel_memb_ids = _select_cluster_2_split ( membs , tree ) X_sub = X [ sel_memb_ids , : ] km . fit ( X_sub ) tree = _add_tree_node ( tree , 2 * i - 1 , i , size = np . sum ( km . labels_ == 0 ) , center = km . centers_ [ 0 ] , sse = km . sse_arr_ [ 0 ] , parent = sel_clust_id ) tree = _add_tree_node ( tree , 2 * i , i , size = np . sum ( km . labels_ == 1 ) , center = km . centers_ [ 1 ] , sse = km . sse_arr_ [ 1 ] , parent = sel_clust_id ) pred_labels = km . labels_ pred_labels [ np . where ( pred_labels == 1 ) [ 0 ] ] = 2 * i pred_labels [ np . where ( pred_labels == 0 ) [ 0 ] ] = 2 * i - 1 membs [ sel_memb_ids ] = pred_labels for n in tree . leaves ( ) : label = n . data [ 'label' ] centers [ label ] = n . data [ 'center' ] sse_arr [ label ] = n . data [ 'sse' ] return ( centers , membs , sse_arr , tree )
Apply Bisecting Kmeans clustering to reach n_clusters number of clusters
859
def comparison_table ( self , caption = None , label = "tab:model_comp" , hlines = True , aic = True , bic = True , dic = True , sort = "bic" , descending = True ) : if sort == "bic" : assert bic , "You cannot sort by BIC if you turn it off" if sort == "aic" : assert aic , "You cannot sort by AIC if you turn it off" if sort == "dic" : assert dic , "You cannot sort by DIC if you turn it off" if caption is None : caption = "" if label is None : label = "" base_string = get_latex_table_frame ( caption , label ) end_text = " \\\\ \n" num_cols = 1 + ( 1 if aic else 0 ) + ( 1 if bic else 0 ) column_text = "c" * ( num_cols + 1 ) center_text = "" hline_text = "\\hline\n" if hlines : center_text += hline_text center_text += "\tModel" + ( " & AIC" if aic else "" ) + ( " & BIC " if bic else "" ) + ( " & DIC " if dic else "" ) + end_text if hlines : center_text += "\t" + hline_text if aic : aics = self . aic ( ) else : aics = np . zeros ( len ( self . parent . chains ) ) if bic : bics = self . bic ( ) else : bics = np . zeros ( len ( self . parent . chains ) ) if dic : dics = self . dic ( ) else : dics = np . zeros ( len ( self . parent . chains ) ) if sort == "bic" : to_sort = bics elif sort == "aic" : to_sort = aics elif sort == "dic" : to_sort = dics else : raise ValueError ( "sort %s not recognised, must be dic, aic or dic" % sort ) good = [ i for i , t in enumerate ( to_sort ) if t is not None ] names = [ self . parent . chains [ g ] . name for g in good ] aics = [ aics [ g ] for g in good ] bics = [ bics [ g ] for g in good ] to_sort = bics if sort == "bic" else aics indexes = np . argsort ( to_sort ) if descending : indexes = indexes [ : : - 1 ] for i in indexes : line = "\t" + names [ i ] if aic : line += " & %5.1f " % aics [ i ] if bic : line += " & %5.1f " % bics [ i ] if dic : line += " & %5.1f " % dics [ i ] line += end_text center_text += line if hlines : center_text += "\t" + hline_text return base_string % ( column_text , center_text )
Return a LaTeX ready table of model comparisons .
860
def plot_walks ( self , parameters = None , truth = None , extents = None , display = False , filename = None , chains = None , convolve = None , figsize = None , plot_weights = True , plot_posterior = True , log_weight = None ) : chains , parameters , truth , extents , _ = self . _sanitise ( chains , parameters , truth , extents ) n = len ( parameters ) extra = 0 if plot_weights : plot_weights = plot_weights and np . any ( [ np . any ( c . weights != 1.0 ) for c in chains ] ) plot_posterior = plot_posterior and np . any ( [ c . posterior is not None for c in chains ] ) if plot_weights : extra += 1 if plot_posterior : extra += 1 if figsize is None : figsize = ( 8 , 0.75 + ( n + extra ) ) fig , axes = plt . subplots ( figsize = figsize , nrows = n + extra , squeeze = False , sharex = True ) for i , axes_row in enumerate ( axes ) : ax = axes_row [ 0 ] if i >= extra : p = parameters [ i - n ] for chain in chains : if p in chain . parameters : chain_row = chain . get_data ( p ) self . _plot_walk ( ax , p , chain_row , extents = extents . get ( p ) , convolve = convolve , color = chain . config [ "color" ] ) if truth . get ( p ) is not None : self . _plot_walk_truth ( ax , truth . get ( p ) ) else : if i == 0 and plot_posterior : for chain in chains : if chain . posterior is not None : self . _plot_walk ( ax , "$\log(P)$" , chain . posterior - chain . posterior . max ( ) , convolve = convolve , color = chain . config [ "color" ] ) else : if log_weight is None : log_weight = np . any ( [ chain . weights . mean ( ) < 0.1 for chain in chains ] ) if log_weight : for chain in chains : self . _plot_walk ( ax , r"$\log_{10}(w)$" , np . log10 ( chain . weights ) , convolve = convolve , color = chain . config [ "color" ] ) else : for chain in chains : self . _plot_walk ( ax , "$w$" , chain . weights , convolve = convolve , color = chain . config [ "color" ] ) if filename is not None : if isinstance ( filename , str ) : filename = [ filename ] for f in filename : self . _save_fig ( fig , f , 300 ) if display : plt . show ( ) return fig
Plots the chain walk ; the parameter values as a function of step index .
861
def gelman_rubin ( self , chain = None , threshold = 0.05 ) : r if chain is None : return np . all ( [ self . gelman_rubin ( k , threshold = threshold ) for k in range ( len ( self . parent . chains ) ) ] ) index = self . parent . _get_chain ( chain ) assert len ( index ) == 1 , "Please specify only one chain, have %d chains" % len ( index ) chain = self . parent . chains [ index [ 0 ] ] num_walkers = chain . walkers parameters = chain . parameters name = chain . name data = chain . chain chains = np . split ( data , num_walkers ) assert num_walkers > 1 , "Cannot run Gelman-Rubin statistic with only one walker" m = 1.0 * len ( chains ) n = 1.0 * chains [ 0 ] . shape [ 0 ] all_mean = np . mean ( data , axis = 0 ) chain_means = np . array ( [ np . mean ( c , axis = 0 ) for c in chains ] ) chain_var = np . array ( [ np . var ( c , axis = 0 , ddof = 1 ) for c in chains ] ) b = n / ( m - 1 ) * ( ( chain_means - all_mean ) ** 2 ) . sum ( axis = 0 ) w = ( 1 / m ) * chain_var . sum ( axis = 0 ) var = ( n - 1 ) * w / n + b / n v = var + b / ( n * m ) R = np . sqrt ( v / w ) passed = np . abs ( R - 1 ) < threshold print ( "Gelman-Rubin Statistic values for chain %s" % name ) for p , v , pas in zip ( parameters , R , passed ) : param = "Param %d" % p if isinstance ( p , int ) else p print ( "%s: %7.5f (%s)" % ( param , v , "Passed" if pas else "Failed" ) ) return np . all ( passed )
r Runs the Gelman Rubin diagnostic on the supplied chains .
862
def geweke ( self , chain = None , first = 0.1 , last = 0.5 , threshold = 0.05 ) : if chain is None : return np . all ( [ self . geweke ( k , threshold = threshold ) for k in range ( len ( self . parent . chains ) ) ] ) index = self . parent . _get_chain ( chain ) assert len ( index ) == 1 , "Please specify only one chain, have %d chains" % len ( index ) chain = self . parent . chains [ index [ 0 ] ] num_walkers = chain . walkers assert num_walkers is not None and num_walkers > 0 , "You need to specify the number of walkers to use the Geweke diagnostic." name = chain . name data = chain . chain chains = np . split ( data , num_walkers ) n = 1.0 * chains [ 0 ] . shape [ 0 ] n_start = int ( np . floor ( first * n ) ) n_end = int ( np . floor ( ( 1 - last ) * n ) ) mean_start = np . array ( [ np . mean ( c [ : n_start , i ] ) for c in chains for i in range ( c . shape [ 1 ] ) ] ) var_start = np . array ( [ self . _spec ( c [ : n_start , i ] ) / c [ : n_start , i ] . size for c in chains for i in range ( c . shape [ 1 ] ) ] ) mean_end = np . array ( [ np . mean ( c [ n_end : , i ] ) for c in chains for i in range ( c . shape [ 1 ] ) ] ) var_end = np . array ( [ self . _spec ( c [ n_end : , i ] ) / c [ n_end : , i ] . size for c in chains for i in range ( c . shape [ 1 ] ) ] ) zs = ( mean_start - mean_end ) / ( np . sqrt ( var_start + var_end ) ) _ , pvalue = normaltest ( zs ) print ( "Gweke Statistic for chain %s has p-value %e" % ( name , pvalue ) ) return pvalue > threshold
Runs the Geweke diagnostic on the supplied chains .
863
def get_latex_table ( self , parameters = None , transpose = False , caption = None , label = "tab:model_params" , hlines = True , blank_fill = "--" ) : if parameters is None : parameters = self . parent . _all_parameters for p in parameters : assert isinstance ( p , str ) , "Generating a LaTeX table requires all parameters have labels" num_parameters = len ( parameters ) num_chains = len ( self . parent . chains ) fit_values = self . get_summary ( squeeze = False ) if label is None : label = "" if caption is None : caption = "" end_text = " \\\\ \n" if transpose : column_text = "c" * ( num_chains + 1 ) else : column_text = "c" * ( num_parameters + 1 ) center_text = "" hline_text = "\\hline\n" if hlines : center_text += hline_text + "\t\t" if transpose : center_text += " & " . join ( [ "Parameter" ] + [ c . name for c in self . parent . chains ] ) + end_text if hlines : center_text += "\t\t" + hline_text for p in parameters : arr = [ "\t\t" + p ] for chain_res in fit_values : if p in chain_res : arr . append ( self . get_parameter_text ( * chain_res [ p ] , wrap = True ) ) else : arr . append ( blank_fill ) center_text += " & " . join ( arr ) + end_text else : center_text += " & " . join ( [ "Model" ] + parameters ) + end_text if hlines : center_text += "\t\t" + hline_text for name , chain_res in zip ( [ c . name for c in self . parent . chains ] , fit_values ) : arr = [ "\t\t" + name ] for p in parameters : if p in chain_res : arr . append ( self . get_parameter_text ( * chain_res [ p ] , wrap = True ) ) else : arr . append ( blank_fill ) center_text += " & " . join ( arr ) + end_text if hlines : center_text += "\t\t" + hline_text final_text = get_latex_table_frame ( caption , label ) % ( column_text , center_text ) return final_text
Generates a LaTeX table from parameter summaries .
864
def get_summary ( self , squeeze = True , parameters = None , chains = None ) : results = [ ] if chains is None : chains = self . parent . chains else : if isinstance ( chains , ( int , str ) ) : chains = [ chains ] chains = [ self . parent . chains [ i ] for c in chains for i in self . parent . _get_chain ( c ) ] for chain in chains : res = { } params_to_find = parameters if parameters is not None else chain . parameters for p in params_to_find : if p not in chain . parameters : continue summary = self . get_parameter_summary ( chain , p ) res [ p ] = summary results . append ( res ) if squeeze and len ( results ) == 1 : return results [ 0 ] return results
Gets a summary of the marginalised parameter distributions .
865
def get_max_posteriors ( self , parameters = None , squeeze = True , chains = None ) : results = [ ] if chains is None : chains = self . parent . chains else : if isinstance ( chains , ( int , str ) ) : chains = [ chains ] chains = [ self . parent . chains [ i ] for c in chains for i in self . parent . _get_chain ( c ) ] if isinstance ( parameters , str ) : parameters = [ parameters ] for chain in chains : if chain . posterior_max_index is None : results . append ( None ) continue res = { } params_to_find = parameters if parameters is not None else chain . parameters for p in params_to_find : if p in chain . parameters : res [ p ] = chain . posterior_max_params [ p ] results . append ( res ) if squeeze and len ( results ) == 1 : return results [ 0 ] return results
Gets the maximum posterior point in parameter space from the passed parameters . Requires the chains to have set posterior values .
866
def get_correlations ( self , chain = 0 , parameters = None ) : parameters , cov = self . get_covariance ( chain = chain , parameters = parameters ) diag = np . sqrt ( np . diag ( cov ) ) divisor = diag [ None , : ] * diag [ : , None ] correlations = cov / divisor return parameters , correlations
Takes a chain and returns the correlation between chain parameters .
867
def get_covariance ( self , chain = 0 , parameters = None ) : index = self . parent . _get_chain ( chain ) assert len ( index ) == 1 , "Please specify only one chain, have %d chains" % len ( index ) chain = self . parent . chains [ index [ 0 ] ] if parameters is None : parameters = chain . parameters data = chain . get_data ( parameters ) cov = np . atleast_2d ( np . cov ( data , aweights = chain . weights , rowvar = False ) ) return parameters , cov
Takes a chain and returns the covariance between chain parameters .
868
def get_correlation_table ( self , chain = 0 , parameters = None , caption = "Parameter Correlations" , label = "tab:parameter_correlations" ) : parameters , cor = self . get_correlations ( chain = chain , parameters = parameters ) return self . _get_2d_latex_table ( parameters , cor , caption , label )
Gets a LaTeX table of parameter correlations .
869
def get_covariance_table ( self , chain = 0 , parameters = None , caption = "Parameter Covariance" , label = "tab:parameter_covariance" ) : parameters , cov = self . get_covariance ( chain = chain , parameters = parameters ) return self . _get_2d_latex_table ( parameters , cov , caption , label )
Gets a LaTeX table of parameter covariance .
870
def get_parameter_text ( self , lower , maximum , upper , wrap = False ) : if lower is None or upper is None : return "" upper_error = upper - maximum lower_error = maximum - lower if upper_error != 0 and lower_error != 0 : resolution = min ( np . floor ( np . log10 ( np . abs ( upper_error ) ) ) , np . floor ( np . log10 ( np . abs ( lower_error ) ) ) ) elif upper_error == 0 and lower_error != 0 : resolution = np . floor ( np . log10 ( np . abs ( lower_error ) ) ) elif upper_error != 0 and lower_error == 0 : resolution = np . floor ( np . log10 ( np . abs ( upper_error ) ) ) else : resolution = np . floor ( np . log10 ( np . abs ( maximum ) ) ) factor = 0 fmt = "%0.1f" r = 1 if np . abs ( resolution ) > 2 : factor = - resolution if resolution == 2 : fmt = "%0.0f" factor = - 1 r = 0 if resolution == 1 : fmt = "%0.0f" if resolution == - 1 : fmt = "%0.2f" r = 2 elif resolution == - 2 : fmt = "%0.3f" r = 3 upper_error *= 10 ** factor lower_error *= 10 ** factor maximum *= 10 ** factor upper_error = round ( upper_error , r ) lower_error = round ( lower_error , r ) maximum = round ( maximum , r ) if maximum == - 0.0 : maximum = 0.0 if resolution == 2 : upper_error *= 10 ** - factor lower_error *= 10 ** - factor maximum *= 10 ** - factor factor = 0 fmt = "%0.0f" upper_error_text = fmt % upper_error lower_error_text = fmt % lower_error if upper_error_text == lower_error_text : text = r"%s\pm %s" % ( fmt , "%s" ) % ( maximum , lower_error_text ) else : text = r"%s^{+%s}_{-%s}" % ( fmt , "%s" , "%s" ) % ( maximum , upper_error_text , lower_error_text ) if factor != 0 : text = r"\left( %s \right) \times 10^{%d}" % ( text , - factor ) if wrap : text = "$%s$" % text return text
Generates LaTeX appropriate text from marginalised parameter bounds .
871
def remove_chain ( self , chain = - 1 ) : if isinstance ( chain , str ) or isinstance ( chain , int ) : chain = [ chain ] chain = sorted ( [ i for c in chain for i in self . _get_chain ( c ) ] ) [ : : - 1 ] assert len ( chain ) == len ( list ( set ( chain ) ) ) , "Error, you are trying to remove a chain more than once." for index in chain : del self . chains [ index ] seen = set ( ) self . _all_parameters = [ p for c in self . chains for p in c . parameters if not ( p in seen or seen . add ( p ) ) ] self . _init_params ( ) return self
Removes a chain from ChainConsumer . Calling this will require any configurations set to be redone!
872
def configure_truth ( self , ** kwargs ) : if kwargs . get ( "ls" ) is None and kwargs . get ( "linestyle" ) is None : kwargs [ "ls" ] = "--" kwargs [ "dashes" ] = ( 3 , 3 ) if kwargs . get ( "color" ) is None : kwargs [ "color" ] = "#000000" self . config_truth = kwargs self . _configured_truth = True return self
Configure the arguments passed to the axvline and axhline methods when plotting truth values .
873
def divide_chain ( self , chain = 0 ) : indexes = self . _get_chain ( chain ) con = ChainConsumer ( ) for index in indexes : chain = self . chains [ index ] assert chain . walkers is not None , "The chain you have selected was not added with any walkers!" num_walkers = chain . walkers data = np . split ( chain . chain , num_walkers ) ws = np . split ( chain . weights , num_walkers ) for j , ( c , w ) in enumerate ( zip ( data , ws ) ) : con . add_chain ( c , weights = w , name = "Chain %d" % j , parameters = chain . parameters ) return con
Returns a ChainConsumer instance containing all the walks of a given chain as individual chains themselves .
874
def threshold ( args ) : if args . fpr < 0 or args . fpr > 1 : print ( "Please specify a FPR between 0 and 1" ) sys . exit ( 1 ) motifs = read_motifs ( args . pwmfile ) s = Scanner ( ) s . set_motifs ( args . pwmfile ) s . set_threshold ( args . fpr , filename = args . inputfile ) print ( "Motif\tScore\tCutoff" ) for motif in motifs : min_score = motif . pwm_min_score ( ) max_score = motif . pwm_max_score ( ) opt_score = s . threshold [ motif . id ] if opt_score is None : opt_score = motif . pwm_max_score ( ) threshold = ( opt_score - min_score ) / ( max_score - min_score ) print ( "{0}\t{1}\t{2}" . format ( motif . id , opt_score , threshold ) )
Calculate motif score threshold for a given FPR .
875
def values_to_labels ( fg_vals , bg_vals ) : y_true = np . hstack ( ( np . ones ( len ( fg_vals ) ) , np . zeros ( len ( bg_vals ) ) ) ) y_score = np . hstack ( ( fg_vals , bg_vals ) ) return y_true , y_score
Convert two arrays of values to an array of labels and an array of scores .
876
def max_enrichment ( fg_vals , bg_vals , minbg = 2 ) : scores = np . hstack ( ( fg_vals , bg_vals ) ) idx = np . argsort ( scores ) x = np . hstack ( ( np . ones ( len ( fg_vals ) ) , np . zeros ( len ( bg_vals ) ) ) ) xsort = x [ idx ] l_fg = len ( fg_vals ) l_bg = len ( bg_vals ) m = 0 s = 0 for i in range ( len ( scores ) , 0 , - 1 ) : bgcount = float ( len ( xsort [ i : ] [ xsort [ i : ] == 0 ] ) ) if bgcount >= minbg : enr = ( len ( xsort [ i : ] [ xsort [ i : ] == 1 ] ) / l_fg ) / ( bgcount / l_bg ) if enr > m : m = enr s = scores [ idx [ i ] ] return m
Computes the maximum enrichment .
877
def roc_auc_xlim ( x_bla , y_bla , xlim = 0.1 ) : x = x_bla [ : ] y = y_bla [ : ] x . sort ( ) y . sort ( ) u = { } for i in x + y : u [ i ] = 1 vals = sorted ( u . keys ( ) ) len_x = float ( len ( x ) ) len_y = float ( len ( y ) ) new_x = [ ] new_y = [ ] x_p = 0 y_p = 0 for val in vals [ : : - 1 ] : while len ( x ) > 0 and x [ - 1 ] >= val : x . pop ( ) x_p += 1 while len ( y ) > 0 and y [ - 1 ] >= val : y . pop ( ) y_p += 1 new_y . append ( ( len_x - x_p ) / len_x ) new_x . append ( ( len_y - y_p ) / len_y ) new_x = 1 - np . array ( new_x ) new_y = 1 - np . array ( new_y ) x = new_x y = new_y if len ( x ) != len ( y ) : raise ValueError ( "Unequal!" ) if not xlim : xlim = 1.0 auc = 0.0 bla = zip ( stats . rankdata ( x ) , range ( len ( x ) ) ) bla = sorted ( bla , key = lambda x : x [ 1 ] ) prev_x = x [ bla [ 0 ] [ 1 ] ] prev_y = y [ bla [ 0 ] [ 1 ] ] index = 1 while index < len ( bla ) and x [ bla [ index ] [ 1 ] ] <= xlim : _ , i = bla [ index ] auc += y [ i ] * ( x [ i ] - prev_x ) - ( ( x [ i ] - prev_x ) * ( y [ i ] - prev_y ) / 2.0 ) prev_x = x [ i ] prev_y = y [ i ] index += 1 if index < len ( bla ) : ( rank , i ) = bla [ index ] auc += prev_y * ( xlim - prev_x ) + ( ( y [ i ] - prev_y ) / ( x [ i ] - prev_x ) * ( xlim - prev_x ) * ( xlim - prev_x ) / 2 ) return auc
Computes the ROC Area Under Curve until a certain FPR value .
878
def max_fmeasure ( fg_vals , bg_vals ) : x , y = roc_values ( fg_vals , bg_vals ) x , y = x [ 1 : ] , y [ 1 : ] p = y / ( y + x ) filt = np . logical_and ( ( p * y ) > 0 , ( p + y ) > 0 ) p = p [ filt ] y = y [ filt ] f = ( 2 * p * y ) / ( p + y ) if len ( f ) > 0 : return np . nanmax ( f ) else : return None
Computes the maximum F - measure .
879
def ks_pvalue ( fg_pos , bg_pos = None ) : if len ( fg_pos ) == 0 : return 1.0 a = np . array ( fg_pos , dtype = "float" ) / max ( fg_pos ) p = kstest ( a , "uniform" ) [ 1 ] return p
Computes the Kolmogorov - Smirnov p - value of position distribution .
880
def ks_significance ( fg_pos , bg_pos = None ) : p = ks_pvalue ( fg_pos , max ( fg_pos ) ) if p > 0 : return - np . log10 ( p ) else : return np . inf
Computes the - log10 of Kolmogorov - Smirnov p - value of position distribution .
881
def setup_data ( ) : ( x_train , y_train ) , ( x_test , y_test ) = mnist . load_data ( ) if K . image_data_format ( ) == 'channels_first' : x_train = x_train . reshape ( x_train . shape [ 0 ] , 1 , img_rows , img_cols ) x_test = x_test . reshape ( x_test . shape [ 0 ] , 1 , img_rows , img_cols ) input_shape = ( 1 , img_rows , img_cols ) else : x_train = x_train . reshape ( x_train . shape [ 0 ] , img_rows , img_cols , 1 ) x_test = x_test . reshape ( x_test . shape [ 0 ] , img_rows , img_cols , 1 ) input_shape = ( img_rows , img_cols , 1 ) x_train = x_train . astype ( 'float32' ) x_test = x_test . astype ( 'float32' ) x_train /= 255 x_test /= 255 print ( 'x_train shape:' , x_train . shape ) print ( x_train . shape [ 0 ] , 'train samples' ) print ( x_test . shape [ 0 ] , 'test samples' ) y_train = keras . utils . to_categorical ( y_train , num_classes ) y_test = keras . utils . to_categorical ( y_test , num_classes ) return input_shape , ( x_train , y_train ) , ( x_test , y_test )
Load and shape data for training with Keras + Pescador .
882
def build_model ( input_shape ) : model = Sequential ( ) model . add ( Conv2D ( 32 , kernel_size = ( 3 , 3 ) , activation = 'relu' , input_shape = input_shape ) ) model . add ( Conv2D ( 64 , kernel_size = ( 3 , 3 ) , activation = 'relu' ) ) model . add ( MaxPooling2D ( pool_size = ( 2 , 2 ) ) ) model . add ( Dropout ( 0.25 ) ) model . add ( Flatten ( ) ) model . add ( Dense ( 128 , activation = 'relu' ) ) model . add ( Dropout ( 0.5 ) ) model . add ( Dense ( num_classes , activation = 'softmax' ) ) model . compile ( loss = keras . losses . categorical_crossentropy , optimizer = keras . optimizers . Adadelta ( ) , metrics = [ 'accuracy' ] ) return model
Create a compiled Keras model .
883
def sampler ( X , y ) : X = np . atleast_2d ( X ) y = np . atleast_1d ( y ) n = X . shape [ 0 ] while True : i = np . random . randint ( 0 , n ) yield { 'X' : X [ i ] , 'y' : y [ i ] }
A basic generator for sampling data .
884
def additive_noise ( stream , key = 'X' , scale = 1e-1 ) : for data in stream : noise_shape = data [ key ] . shape noise = scale * np . random . randn ( * noise_shape ) data [ key ] = data [ key ] + noise yield data
Add noise to a data stream .
885
def parse_denovo_params ( user_params = None ) : config = MotifConfig ( ) if user_params is None : user_params = { } params = config . get_default_params ( ) params . update ( user_params ) if params . get ( "torque" ) : logger . debug ( "Using torque" ) else : logger . debug ( "Using multiprocessing" ) params [ "background" ] = [ x . strip ( ) for x in params [ "background" ] . split ( "," ) ] logger . debug ( "Parameters:" ) for param , value in params . items ( ) : logger . debug ( " %s: %s" , param , value ) if params [ "max_time" ] : try : max_time = params [ "max_time" ] = float ( params [ "max_time" ] ) except Exception : logger . debug ( "Could not parse max_time value, setting to no limit" ) params [ "max_time" ] = - 1 if params [ "max_time" ] > 0 : logger . debug ( "Time limit for motif prediction: %0.2f hours" , max_time ) params [ "max_time" ] = 3600 * params [ "max_time" ] logger . debug ( "Max_time in seconds %0.0f" , max_time ) else : logger . debug ( "No time limit for motif prediction" ) return params
Return default GimmeMotifs parameters .
886
def rankagg ( df , method = "stuart" ) : rmat = pd . DataFrame ( index = df . iloc [ : , 0 ] ) step = 1 / rmat . shape [ 0 ] for col in df . columns : rmat [ col ] = pd . DataFrame ( { col : np . arange ( step , 1 + step , step ) } , index = df [ col ] ) . loc [ rmat . index ] rmat = rmat . apply ( sorted , 1 , result_type = "expand" ) p = rmat . apply ( qStuart , 1 ) df = pd . DataFrame ( { "p.adjust" : multipletests ( p , method = "h" ) [ 1 ] } , index = rmat . index ) . sort_values ( 'p.adjust' ) return df [ "p.adjust" ]
Return aggregated ranks .
887
def data_gen ( n_ops = 100 ) : while True : X = np . random . uniform ( size = ( 64 , 64 ) ) yield dict ( X = costly_function ( X , n_ops ) , y = np . random . randint ( 10 , size = ( 1 , ) ) )
Yield data while optionally burning compute cycles .
888
def mp_calc_stats ( motifs , fg_fa , bg_fa , bg_name = None ) : try : stats = calc_stats ( motifs , fg_fa , bg_fa , ncpus = 1 ) except Exception as e : raise sys . stderr . write ( "ERROR: {}\n" . format ( str ( e ) ) ) stats = { } if not bg_name : bg_name = "default" return bg_name , stats
Parallel calculation of motif statistics .
889
def _run_tool ( job_name , t , fastafile , params ) : try : result = t . run ( fastafile , params , mytmpdir ( ) ) except Exception as e : result = ( [ ] , "" , "{} failed to run: {}" . format ( job_name , e ) ) return job_name , result
Parallel motif prediction .
890
def predict_motifs ( infile , bgfile , outfile , params = None , stats_fg = None , stats_bg = None ) : required_params = [ "tools" , "available_tools" , "analysis" , "genome" , "use_strand" , "max_time" ] if params is None : params = parse_denovo_params ( ) else : for p in required_params : if p not in params : params = parse_denovo_params ( ) break tools = dict ( [ ( x . strip ( ) , x in [ y . strip ( ) for y in params [ "tools" ] . split ( "," ) ] ) for x in params [ "available_tools" ] . split ( "," ) ] ) analysis = params [ "analysis" ] logger . info ( "starting motif prediction (%s)" , analysis ) logger . info ( "tools: %s" , ", " . join ( [ x for x in tools . keys ( ) if tools [ x ] ] ) ) result = pp_predict_motifs ( infile , outfile , analysis , params . get ( "genome" , None ) , params [ "use_strand" ] , bgfile , tools , None , max_time = params [ "max_time" ] , stats_fg = stats_fg , stats_bg = stats_bg ) motifs = result . motifs logger . info ( "predicted %s motifs" , len ( motifs ) ) logger . debug ( "written to %s" , outfile ) if len ( motifs ) == 0 : logger . info ( "no motifs found" ) result . motifs = [ ] return result
Predict motifs input is a FASTA - file
891
def add_motifs ( self , args ) : self . lock . acquire ( ) if args is None or len ( args ) != 2 or len ( args [ 1 ] ) != 3 : try : job = args [ 0 ] logger . warn ( "job %s failed" , job ) self . finished . append ( job ) except Exception : logger . warn ( "job failed" ) return job , ( motifs , stdout , stderr ) = args logger . info ( "%s finished, found %s motifs" , job , len ( motifs ) ) for motif in motifs : if self . do_counter : self . counter += 1 motif . id = "gimme_{}_" . format ( self . counter ) + motif . id f = open ( self . outfile , "a" ) f . write ( "%s\n" % motif . to_pfm ( ) ) f . close ( ) self . motifs . append ( motif ) if self . do_stats and len ( motifs ) > 0 : logger . debug ( "Starting stats job of %s motifs" , len ( motifs ) ) for bg_name , bg_fa in self . background . items ( ) : job = self . job_server . apply_async ( mp_calc_stats , ( motifs , self . fg_fa , bg_fa , bg_name ) , callback = self . add_stats ) self . stat_jobs . append ( job ) logger . debug ( "stdout %s: %s" , job , stdout ) logger . debug ( "stdout %s: %s" , job , stderr ) self . finished . append ( job ) self . lock . release ( )
Add motifs to the result object .
892
def wait_for_stats ( self ) : logging . debug ( "waiting for statistics to finish" ) for job in self . stat_jobs : job . get ( ) sleep ( 2 )
Make sure all jobs are finished .
893
def add_stats ( self , args ) : bg_name , stats = args logger . debug ( "Stats: %s %s" , bg_name , stats ) for motif_id in stats . keys ( ) : if motif_id not in self . stats : self . stats [ motif_id ] = { } self . stats [ motif_id ] [ bg_name ] = stats [ motif_id ]
Callback to add motif statistics .
894
def prepare_denovo_input_narrowpeak ( inputfile , params , outdir ) : bedfile = os . path . join ( outdir , "input.from.narrowpeak.bed" ) p = re . compile ( r'^(#|track|browser)' ) width = int ( params [ "width" ] ) logger . info ( "preparing input (narrowPeak to BED, width %s)" , width ) warn_no_summit = True with open ( bedfile , "w" ) as f_out : with open ( inputfile ) as f_in : for line in f_in : if p . search ( line ) : continue vals = line . strip ( ) . split ( "\t" ) start , end = int ( vals [ 1 ] ) , int ( vals [ 2 ] ) summit = int ( vals [ 9 ] ) if summit == - 1 : if warn_no_summit : logger . warn ( "No summit present in narrowPeak file, using the peak center." ) warn_no_summit = False summit = ( end - start ) // 2 start = start + summit - ( width // 2 ) end = start + width f_out . write ( "{}\t{}\t{}\t{}\n" . format ( vals [ 0 ] , start , end , vals [ 6 ] ) ) prepare_denovo_input_bed ( bedfile , params , outdir )
Prepare a narrowPeak file for de novo motif prediction .
895
def prepare_denovo_input_bed ( inputfile , params , outdir ) : logger . info ( "preparing input (BED)" ) width = int ( params [ "width" ] ) bedfile = os . path . join ( outdir , "input.bed" ) write_equalwidth_bedfile ( inputfile , width , bedfile ) abs_max = int ( params [ "abs_max" ] ) fraction = float ( params [ "fraction" ] ) pred_bedfile = os . path . join ( outdir , "prediction.bed" ) val_bedfile = os . path . join ( outdir , "validation.bed" ) logger . debug ( "Splitting %s into prediction set (%s) and validation set (%s)" , bedfile , pred_bedfile , val_bedfile ) divide_file ( bedfile , pred_bedfile , val_bedfile , fraction , abs_max ) config = MotifConfig ( ) genome = Genome ( params [ "genome" ] ) for infile in [ pred_bedfile , val_bedfile ] : genome . track2fasta ( infile , infile . replace ( ".bed" , ".fa" ) , ) lwidth = int ( params [ "lwidth" ] ) extend = ( lwidth - width ) // 2 genome . track2fasta ( val_bedfile , os . path . join ( outdir , "localization.fa" ) , extend_up = extend , extend_down = extend , stranded = params [ "use_strand" ] , )
Prepare a BED file for de novo motif prediction .
896
def create_background ( bg_type , fafile , outfile , genome = "hg18" , width = 200 , nr_times = 10 , custom_background = None ) : width = int ( width ) config = MotifConfig ( ) fg = Fasta ( fafile ) if bg_type in [ "genomic" , "gc" ] : if not genome : logger . error ( "Need a genome to create background" ) sys . exit ( 1 ) if bg_type == "random" : f = MarkovFasta ( fg , k = 1 , n = nr_times * len ( fg ) ) logger . debug ( "Random background: %s" , outfile ) elif bg_type == "genomic" : logger . debug ( "Creating genomic background" ) f = RandomGenomicFasta ( genome , width , nr_times * len ( fg ) ) elif bg_type == "gc" : logger . debug ( "Creating GC matched background" ) f = MatchedGcFasta ( fafile , genome , nr_times * len ( fg ) ) logger . debug ( "GC matched background: %s" , outfile ) elif bg_type == "promoter" : fname = Genome ( genome ) . filename gene_file = fname . replace ( ".fa" , ".annotation.bed.gz" ) if not gene_file : gene_file = os . path . join ( config . get_gene_dir ( ) , "%s.bed" % genome ) if not os . path . exists ( gene_file ) : print ( "Could not find a gene file for genome {}" ) print ( "Did you use the --annotation flag for genomepy?" ) print ( "Alternatively make sure there is a file called {}.bed in {}" . format ( genome , config . get_gene_dir ( ) ) ) raise ValueError ( ) logger . info ( "Creating random promoter background (%s, using genes in %s)" , genome , gene_file ) f = PromoterFasta ( gene_file , genome , width , nr_times * len ( fg ) ) logger . debug ( "Random promoter background: %s" , outfile ) elif bg_type == "custom" : bg_file = custom_background if not bg_file : raise IOError ( "Background file not specified!" ) if not os . path . exists ( bg_file ) : raise IOError ( "Custom background file %s does not exist!" , bg_file ) else : logger . info ( "Copying custom background file %s to %s." , bg_file , outfile ) f = Fasta ( bg_file ) l = np . median ( [ len ( seq ) for seq in f . seqs ] ) if l < ( width * 0.95 ) or l > ( width * 1.05 ) : logger . warn ( "The custom background file %s contains sequences with a " "median length of %s, while GimmeMotifs predicts motifs in sequences " "of length %s. This will influence the statistics! It is recommended " "to use background sequences of the same length." , bg_file , l , width ) f . writefasta ( outfile ) return len ( f )
Create background of a specific type .
897
def create_backgrounds ( outdir , background = None , genome = "hg38" , width = 200 , custom_background = None ) : if background is None : background = [ "random" ] nr_sequences = { } if "gc" in background : pred_bg = "gc" else : pred_bg = background [ 0 ] create_background ( pred_bg , os . path . join ( outdir , "prediction.fa" ) , os . path . join ( outdir , "prediction.bg.fa" ) , genome = genome , width = width , custom_background = custom_background ) bg_info = { } nr_sequences = { } for bg in background : fname = os . path . join ( outdir , "bg.{}.fa" . format ( bg ) ) nr_sequences [ bg ] = create_background ( bg , os . path . join ( outdir , "validation.fa" ) , fname , genome = genome , width = width , custom_background = custom_background ) bg_info [ bg ] = fname return bg_info
Create different backgrounds for motif prediction and validation .
898
def filter_significant_motifs ( fname , result , bg , metrics = None ) : sig_motifs = [ ] with open ( fname , "w" ) as f : for motif in result . motifs : stats = result . stats . get ( "%s_%s" % ( motif . id , motif . to_consensus ( ) ) , { } ) . get ( bg , { } ) if _is_significant ( stats , metrics ) : f . write ( "%s\n" % motif . to_pfm ( ) ) sig_motifs . append ( motif ) logger . info ( "%s motifs are significant" , len ( sig_motifs ) ) logger . debug ( "written to %s" , fname ) return sig_motifs
Filter significant motifs based on several statistics .
899
def best_motif_in_cluster ( single_pwm , clus_pwm , clusters , fg_fa , background , stats = None , metrics = ( "roc_auc" , "recall_at_fdr" ) ) : motifs = read_motifs ( single_pwm ) + read_motifs ( clus_pwm ) motifs = dict ( [ ( str ( m ) , m ) for m in motifs ] ) clustered_motifs = [ ] for clus , singles in clusters : for motif in set ( [ clus ] + singles ) : if str ( motif ) not in stats : clustered_motifs . append ( motifs [ str ( motif ) ] ) new_stats = { } for bg , bg_fa in background . items ( ) : for m , s in calc_stats ( clustered_motifs , fg_fa , bg_fa ) . items ( ) : if m not in new_stats : new_stats [ m ] = { } new_stats [ m ] [ bg ] = s stats . update ( new_stats ) rank = rank_motifs ( stats , metrics ) best_motifs = [ ] for clus , singles in clusters : if len ( singles ) > 1 : eval_motifs = singles if clus not in motifs : eval_motifs . append ( clus ) eval_motifs = [ motifs [ str ( e ) ] for e in eval_motifs ] best_motif = sorted ( eval_motifs , key = lambda x : rank [ str ( x ) ] ) [ - 1 ] best_motifs . append ( best_motif ) else : best_motifs . append ( clus ) for bg in background : stats [ str ( best_motifs [ - 1 ] ) ] [ bg ] [ "num_cluster" ] = len ( singles ) best_motifs = sorted ( best_motifs , key = lambda x : rank [ str ( x ) ] , reverse = True ) return best_motifs
Return the best motif per cluster for a clustering results .