idx
int64 0
63k
| question
stringlengths 61
4.03k
| target
stringlengths 6
1.23k
|
---|---|---|
60,000 |
def general_eq ( a , b , attributes ) : try : for attr in attributes : _a , _b = getattr ( a , attr ) , getattr ( b , attr ) if attr in [ 'phi' , 'alpha' ] : if not utils . eq ( _a , _b ) : return False elif attr in [ 'mechanism' , 'purview' ] : if _a is None or _b is None : if _a != _b : return False elif not set ( _a ) == set ( _b ) : return False else : if not numpy_aware_eq ( _a , _b ) : return False return True except AttributeError : return False
|
Return whether two objects are equal up to the given attributes .
|
60,001 |
def time_emd ( emd_type , data ) : emd = { 'cause' : _CAUSE_EMD , 'effect' : pyphi . subsystem . effect_emd , 'hamming' : pyphi . utils . hamming_emd } [ emd_type ] def statement ( ) : for ( d1 , d2 ) in data : emd ( d1 , d2 ) results = timeit . repeat ( statement , number = NUMBER , repeat = REPEAT ) return min ( results )
|
Time an EMD command with the given data as arguments
|
60,002 |
def marginal_zero ( repertoire , node_index ) : index = [ slice ( None ) ] * repertoire . ndim index [ node_index ] = 0 return repertoire [ tuple ( index ) ] . sum ( )
|
Return the marginal probability that the node is OFF .
|
60,003 |
def marginal ( repertoire , node_index ) : index = tuple ( i for i in range ( repertoire . ndim ) if i != node_index ) return repertoire . sum ( index , keepdims = True )
|
Get the marginal distribution for a node .
|
60,004 |
def independent ( repertoire ) : marginals = [ marginal ( repertoire , i ) for i in range ( repertoire . ndim ) ] joint = marginals [ 0 ] for m in marginals [ 1 : ] : joint = joint * m return np . array_equal ( repertoire , joint )
|
Check whether the repertoire is independent .
|
60,005 |
def purview ( repertoire ) : if repertoire is None : return None return tuple ( i for i , dim in enumerate ( repertoire . shape ) if dim == 2 )
|
The purview of the repertoire .
|
60,006 |
def flatten ( repertoire , big_endian = False ) : if repertoire is None : return None order = 'C' if big_endian else 'F' return repertoire . squeeze ( ) . ravel ( order = order )
|
Flatten a repertoire removing empty dimensions .
|
60,007 |
def max_entropy_distribution ( node_indices , number_of_nodes ) : distribution = np . ones ( repertoire_shape ( node_indices , number_of_nodes ) ) return distribution / distribution . size
|
Return the maximum entropy distribution over a set of nodes .
|
60,008 |
def run_tpm ( system , steps , blackbox ) : node_tpms = [ ] for node in system . nodes : node_tpm = node . tpm_on for input_node in node . inputs : if not blackbox . in_same_box ( node . index , input_node ) : if input_node in blackbox . output_indices : node_tpm = marginalize_out ( [ input_node ] , node_tpm ) node_tpms . append ( node_tpm ) noised_tpm = rebuild_system_tpm ( node_tpms ) noised_tpm = convert . state_by_node2state_by_state ( noised_tpm ) tpm = convert . state_by_node2state_by_state ( system . tpm ) tpm = np . dot ( tpm , np . linalg . matrix_power ( noised_tpm , steps - 1 ) ) return convert . state_by_state2state_by_node ( tpm )
|
Iterate the TPM for the given number of timesteps .
|
60,009 |
def _partitions_list ( N ) : if N < ( _NUM_PRECOMPUTED_PARTITION_LISTS ) : return list ( _partition_lists [ N ] ) else : raise ValueError ( 'Partition lists not yet available for system with {} ' 'nodes or more' . format ( _NUM_PRECOMPUTED_PARTITION_LISTS ) )
|
Return a list of partitions of the |N| binary nodes .
|
60,010 |
def all_partitions ( indices ) : n = len ( indices ) partitions = _partitions_list ( n ) if n > 0 : partitions [ - 1 ] = [ list ( range ( n ) ) ] for partition in partitions : yield tuple ( tuple ( indices [ i ] for i in part ) for part in partition )
|
Return a list of all possible coarse grains of a network .
|
60,011 |
def all_coarse_grains ( indices ) : for partition in all_partitions ( indices ) : for grouping in all_groupings ( partition ) : yield CoarseGrain ( partition , grouping )
|
Generator over all possible |CoarseGrains| of these indices .
|
60,012 |
def all_coarse_grains_for_blackbox ( blackbox ) : for partition in all_partitions ( blackbox . output_indices ) : for grouping in all_groupings ( partition ) : coarse_grain = CoarseGrain ( partition , grouping ) try : validate . blackbox_and_coarse_grain ( blackbox , coarse_grain ) except ValueError : continue yield coarse_grain
|
Generator over all |CoarseGrains| for the given blackbox .
|
60,013 |
def all_blackboxes ( indices ) : for partition in all_partitions ( indices ) : for output_indices in utils . powerset ( indices ) : blackbox = Blackbox ( partition , output_indices ) try : validate . blackbox ( blackbox ) except ValueError : continue yield blackbox
|
Generator over all possible blackboxings of these indices .
|
60,014 |
def coarse_graining ( network , state , internal_indices ) : max_phi = float ( '-inf' ) max_coarse_grain = CoarseGrain ( ( ) , ( ) ) for coarse_grain in all_coarse_grains ( internal_indices ) : try : subsystem = MacroSubsystem ( network , state , internal_indices , coarse_grain = coarse_grain ) except ConditionallyDependentError : continue phi = compute . phi ( subsystem ) if ( phi - max_phi ) > constants . EPSILON : max_phi = phi max_coarse_grain = coarse_grain return ( max_phi , max_coarse_grain )
|
Find the maximal coarse - graining of a micro - system .
|
60,015 |
def all_macro_systems ( network , state , do_blackbox = False , do_coarse_grain = False , time_scales = None ) : if time_scales is None : time_scales = [ 1 ] def blackboxes ( system ) : if not do_blackbox : return [ None ] return all_blackboxes ( system ) def coarse_grains ( blackbox , system ) : if not do_coarse_grain : return [ None ] if blackbox is None : return all_coarse_grains ( system ) return all_coarse_grains_for_blackbox ( blackbox ) for system in utils . powerset ( network . node_indices ) : for time_scale in time_scales : for blackbox in blackboxes ( system ) : for coarse_grain in coarse_grains ( blackbox , system ) : try : yield MacroSubsystem ( network , state , system , time_scale = time_scale , blackbox = blackbox , coarse_grain = coarse_grain ) except ( StateUnreachableError , ConditionallyDependentError ) : continue
|
Generator over all possible macro - systems for the network .
|
60,016 |
def emergence ( network , state , do_blackbox = False , do_coarse_grain = True , time_scales = None ) : micro_phi = compute . major_complex ( network , state ) . phi max_phi = float ( '-inf' ) max_network = None for subsystem in all_macro_systems ( network , state , do_blackbox = do_blackbox , do_coarse_grain = do_coarse_grain , time_scales = time_scales ) : phi = compute . phi ( subsystem ) if ( phi - max_phi ) > constants . EPSILON : max_phi = phi max_network = MacroNetwork ( network = network , macro_phi = phi , micro_phi = micro_phi , system = subsystem . micro_node_indices , time_scale = subsystem . time_scale , blackbox = subsystem . blackbox , coarse_grain = subsystem . coarse_grain ) return max_network
|
Check for the emergence of a micro - system into a macro - system .
|
60,017 |
def effective_info ( network ) : validate . is_network ( network ) sbs_tpm = convert . state_by_node2state_by_state ( network . tpm ) avg_repertoire = np . mean ( sbs_tpm , 0 ) return np . mean ( [ entropy ( repertoire , avg_repertoire , 2.0 ) for repertoire in sbs_tpm ] )
|
Return the effective information of the given network .
|
60,018 |
def node_labels ( self ) : assert list ( self . node_indices ) [ 0 ] == 0 labels = list ( "m{}" . format ( i ) for i in self . node_indices ) return NodeLabels ( labels , self . node_indices )
|
Return the labels for macro nodes .
|
60,019 |
def _squeeze ( system ) : assert system . node_indices == tpm_indices ( system . tpm ) internal_indices = tpm_indices ( system . tpm ) tpm = remove_singleton_dimensions ( system . tpm ) cm = system . cm [ np . ix_ ( internal_indices , internal_indices ) ] state = utils . state_of ( internal_indices , system . state ) node_indices = reindex ( internal_indices ) nodes = generate_nodes ( tpm , cm , state , node_indices ) tpm = rebuild_system_tpm ( node . tpm_on for node in nodes ) return SystemAttrs ( tpm , cm , node_indices , state )
|
Squeeze out all singleton dimensions in the Subsystem .
|
60,020 |
def _blackbox_partial_noise ( blackbox , system ) : node_tpms = [ ] for node in system . nodes : node_tpm = node . tpm_on for input_node in node . inputs : if blackbox . hidden_from ( input_node , node . index ) : node_tpm = marginalize_out ( [ input_node ] , node_tpm ) node_tpms . append ( node_tpm ) tpm = rebuild_system_tpm ( node_tpms ) return system . _replace ( tpm = tpm )
|
Noise connections from hidden elements to other boxes .
|
60,021 |
def _blackbox_time ( time_scale , blackbox , system ) : blackbox = blackbox . reindex ( ) tpm = run_tpm ( system , time_scale , blackbox ) n = len ( system . node_indices ) cm = np . ones ( ( n , n ) ) return SystemAttrs ( tpm , cm , system . node_indices , system . state )
|
Black box the CM and TPM over the given time_scale .
|
60,022 |
def _blackbox_space ( self , blackbox , system ) : tpm = marginalize_out ( blackbox . hidden_indices , system . tpm ) assert blackbox . output_indices == tpm_indices ( tpm ) tpm = remove_singleton_dimensions ( tpm ) n = len ( blackbox ) cm = np . zeros ( ( n , n ) ) for i , j in itertools . product ( range ( n ) , repeat = 2 ) : outputs = self . blackbox . outputs_of ( i ) to = self . blackbox . partition [ j ] if self . cm [ np . ix_ ( outputs , to ) ] . sum ( ) > 0 : cm [ i , j ] = 1 state = blackbox . macro_state ( system . state ) node_indices = blackbox . macro_indices return SystemAttrs ( tpm , cm , node_indices , state )
|
Blackbox the TPM and CM in space .
|
60,023 |
def _coarsegrain_space ( coarse_grain , is_cut , system ) : tpm = coarse_grain . macro_tpm ( system . tpm , check_independence = ( not is_cut ) ) node_indices = coarse_grain . macro_indices state = coarse_grain . macro_state ( system . state ) n = len ( node_indices ) cm = np . ones ( ( n , n ) ) return SystemAttrs ( tpm , cm , node_indices , state )
|
Spatially coarse - grain the TPM and CM .
|
60,024 |
def cut_mechanisms ( self ) : for mechanism in utils . powerset ( self . node_indices , nonempty = True ) : micro_mechanism = self . macro2micro ( mechanism ) if self . cut . splits_mechanism ( micro_mechanism ) : yield mechanism
|
The mechanisms of this system that are currently cut .
|
60,025 |
def apply_cut ( self , cut ) : return MacroSubsystem ( self . network , self . network_state , self . micro_node_indices , cut = cut , time_scale = self . time_scale , blackbox = self . blackbox , coarse_grain = self . coarse_grain )
|
Return a cut version of this |MacroSubsystem| .
|
60,026 |
def potential_purviews ( self , direction , mechanism , purviews = False ) : all_purviews = utils . powerset ( self . node_indices ) return irreducible_purviews ( self . cm , direction , mechanism , all_purviews )
|
Override Subsystem implementation using Network - level indices .
|
60,027 |
def macro2micro ( self , macro_indices ) : def from_partition ( partition , macro_indices ) : micro_indices = itertools . chain . from_iterable ( partition [ i ] for i in macro_indices ) return tuple ( sorted ( micro_indices ) ) if self . blackbox and self . coarse_grain : cg_micro_indices = from_partition ( self . coarse_grain . partition , macro_indices ) return from_partition ( self . blackbox . partition , reindex ( cg_micro_indices ) ) elif self . blackbox : return from_partition ( self . blackbox . partition , macro_indices ) elif self . coarse_grain : return from_partition ( self . coarse_grain . partition , macro_indices ) return macro_indices
|
Return all micro indices which compose the elements specified by macro_indices .
|
60,028 |
def macro2blackbox_outputs ( self , macro_indices ) : if not self . blackbox : raise ValueError ( 'System is not blackboxed' ) return tuple ( sorted ( set ( self . macro2micro ( macro_indices ) ) . intersection ( self . blackbox . output_indices ) ) )
|
Given a set of macro elements return the blackbox output elements which compose these elements .
|
60,029 |
def micro_indices ( self ) : return tuple ( sorted ( idx for part in self . partition for idx in part ) )
|
Indices of micro elements represented in this coarse - graining .
|
60,030 |
def reindex ( self ) : _map = dict ( zip ( self . micro_indices , reindex ( self . micro_indices ) ) ) partition = tuple ( tuple ( _map [ index ] for index in group ) for group in self . partition ) return CoarseGrain ( partition , self . grouping )
|
Re - index this coarse graining to use squeezed indices .
|
60,031 |
def macro_state ( self , micro_state ) : assert len ( micro_state ) == len ( self . micro_indices ) reindexed = self . reindex ( ) micro_state = np . array ( micro_state ) return tuple ( 0 if sum ( micro_state [ list ( reindexed . partition [ i ] ) ] ) in self . grouping [ i ] [ 0 ] else 1 for i in self . macro_indices )
|
Translate a micro state to a macro state
|
60,032 |
def make_mapping ( self ) : micro_states = utils . all_states ( len ( self . micro_indices ) ) mapping = [ convert . state2le_index ( self . macro_state ( micro_state ) ) for micro_state in micro_states ] return np . array ( mapping )
|
Return a mapping from micro - state to the macro - states based on the partition and state grouping of this coarse - grain .
|
60,033 |
def macro_tpm_sbs ( self , state_by_state_micro_tpm ) : validate . tpm ( state_by_state_micro_tpm , check_independence = False ) mapping = self . make_mapping ( ) num_macro_states = 2 ** len ( self . macro_indices ) macro_tpm = np . zeros ( ( num_macro_states , num_macro_states ) ) micro_states = range ( 2 ** len ( self . micro_indices ) ) micro_state_transitions = itertools . product ( micro_states , repeat = 2 ) for previous_state , current_state in micro_state_transitions : macro_tpm [ mapping [ previous_state ] , mapping [ current_state ] ] += ( state_by_state_micro_tpm [ previous_state , current_state ] ) return np . array ( [ distribution . normalize ( row ) for row in macro_tpm ] )
|
Create a state - by - state coarse - grained macro TPM .
|
60,034 |
def macro_tpm ( self , micro_tpm , check_independence = True ) : if not is_state_by_state ( micro_tpm ) : micro_tpm = convert . state_by_node2state_by_state ( micro_tpm ) macro_tpm = self . macro_tpm_sbs ( micro_tpm ) if check_independence : validate . conditionally_independent ( macro_tpm ) return convert . state_by_state2state_by_node ( macro_tpm )
|
Create a coarse - grained macro TPM .
|
60,035 |
def hidden_indices ( self ) : return tuple ( sorted ( set ( self . micro_indices ) - set ( self . output_indices ) ) )
|
All elements hidden inside the blackboxes .
|
60,036 |
def outputs_of ( self , partition_index ) : partition = self . partition [ partition_index ] outputs = set ( partition ) . intersection ( self . output_indices ) return tuple ( sorted ( outputs ) )
|
The outputs of the partition at partition_index .
|
60,037 |
def reindex ( self ) : _map = dict ( zip ( self . micro_indices , reindex ( self . micro_indices ) ) ) partition = tuple ( tuple ( _map [ index ] for index in group ) for group in self . partition ) output_indices = tuple ( _map [ i ] for i in self . output_indices ) return Blackbox ( partition , output_indices )
|
Squeeze the indices of this blackboxing to 0 .. n .
|
60,038 |
def macro_state ( self , micro_state ) : assert len ( micro_state ) == len ( self . micro_indices ) reindexed = self . reindex ( ) return utils . state_of ( reindexed . output_indices , micro_state )
|
Compute the macro - state of this blackbox .
|
60,039 |
def in_same_box ( self , a , b ) : assert a in self . micro_indices assert b in self . micro_indices for part in self . partition : if a in part and b in part : return True return False
|
Return True if nodes a and b are in the same box .
|
60,040 |
def hidden_from ( self , a , b ) : return a in self . hidden_indices and not self . in_same_box ( a , b )
|
Return True if a is hidden in a different box than b .
|
60,041 |
def irreducible_purviews ( cm , direction , mechanism , purviews ) : def reducible ( purview ) : _from , to = direction . order ( mechanism , purview ) return connectivity . block_reducible ( cm , _from , to ) return [ purview for purview in purviews if not reducible ( purview ) ]
|
Return all purviews which are irreducible for the mechanism .
|
60,042 |
def _build_tpm ( tpm ) : tpm = np . array ( tpm ) validate . tpm ( tpm ) if is_state_by_state ( tpm ) : tpm = convert . state_by_state2state_by_node ( tpm ) else : tpm = convert . to_multidimensional ( tpm ) utils . np_immutable ( tpm ) return ( tpm , utils . np_hash ( tpm ) )
|
Validate the TPM passed by the user and convert to multidimensional form .
|
60,043 |
def _build_cm ( self , cm ) : if cm is None : cm = np . ones ( ( self . size , self . size ) ) else : cm = np . array ( cm ) utils . np_immutable ( cm ) return ( cm , utils . np_hash ( cm ) )
|
Convert the passed CM to the proper format or construct the unitary CM if none was provided .
|
60,044 |
def potential_purviews ( self , direction , mechanism ) : all_purviews = utils . powerset ( self . _node_indices ) return irreducible_purviews ( self . cm , direction , mechanism , all_purviews )
|
All purviews which are not clearly reducible for mechanism .
|
60,045 |
def _loadable_models ( ) : classes = [ pyphi . Direction , pyphi . Network , pyphi . Subsystem , pyphi . Transition , pyphi . labels . NodeLabels , pyphi . models . Cut , pyphi . models . KCut , pyphi . models . NullCut , pyphi . models . Part , pyphi . models . Bipartition , pyphi . models . KPartition , pyphi . models . Tripartition , pyphi . models . RepertoireIrreducibilityAnalysis , pyphi . models . MaximallyIrreducibleCauseOrEffect , pyphi . models . MaximallyIrreducibleCause , pyphi . models . MaximallyIrreducibleEffect , pyphi . models . Concept , pyphi . models . CauseEffectStructure , pyphi . models . SystemIrreducibilityAnalysis , pyphi . models . ActualCut , pyphi . models . AcRepertoireIrreducibilityAnalysis , pyphi . models . CausalLink , pyphi . models . Account , pyphi . models . AcSystemIrreducibilityAnalysis ] return { cls . __name__ : cls for cls in classes }
|
A dictionary of loadable PyPhi models .
|
60,046 |
def jsonify ( obj ) : if hasattr ( obj , 'to_json' ) : d = obj . to_json ( ) _push_metadata ( d , obj ) return jsonify ( d ) if isinstance ( obj , np . ndarray ) : return obj . tolist ( ) if isinstance ( obj , ( np . int32 , np . int64 ) ) : return int ( obj ) if isinstance ( obj , np . float64 ) : return float ( obj ) if isinstance ( obj , dict ) : return _jsonify_dict ( obj ) if hasattr ( obj , '__dict__' ) : return _jsonify_dict ( obj . __dict__ ) if isinstance ( obj , ( list , tuple ) ) : return [ jsonify ( item ) for item in obj ] return obj
|
Return a JSON - encodable representation of an object recursively using any available to_json methods converting NumPy arrays and datatypes to native lists and types along the way .
|
60,047 |
def _check_version ( version ) : if version != pyphi . __version__ : raise pyphi . exceptions . JSONVersionError ( 'Cannot load JSON from a different version of PyPhi. ' 'JSON version = {0}, current version = {1}.' . format ( version , pyphi . __version__ ) )
|
Check whether the JSON version matches the PyPhi version .
|
60,048 |
def _load_object ( self , obj ) : if isinstance ( obj , dict ) : obj = { k : self . _load_object ( v ) for k , v in obj . items ( ) } if _is_model ( obj ) : return self . _load_model ( obj ) elif isinstance ( obj , list ) : return tuple ( self . _load_object ( item ) for item in obj ) return obj
|
Recursively load a PyPhi object .
|
60,049 |
def _load_model ( self , dct ) : classname , version , _ = _pop_metadata ( dct ) _check_version ( version ) cls = self . _models [ classname ] if hasattr ( cls , 'from_json' ) : return cls . from_json ( dct ) return cls ( ** dct )
|
Load a serialized PyPhi model .
|
60,050 |
def _compute_hamming_matrix ( N ) : possible_states = np . array ( list ( utils . all_states ( ( N ) ) ) ) return cdist ( possible_states , possible_states , 'hamming' ) * N
|
Compute and store a Hamming matrix for |N| nodes .
|
60,051 |
def effect_emd ( d1 , d2 ) : return sum ( abs ( marginal_zero ( d1 , i ) - marginal_zero ( d2 , i ) ) for i in range ( d1 . ndim ) )
|
Compute the EMD between two effect repertoires .
|
60,052 |
def entropy_difference ( d1 , d2 ) : d1 , d2 = flatten ( d1 ) , flatten ( d2 ) return abs ( entropy ( d1 , base = 2.0 ) - entropy ( d2 , base = 2.0 ) )
|
Return the difference in entropy between two distributions .
|
60,053 |
def psq2 ( d1 , d2 ) : d1 , d2 = flatten ( d1 ) , flatten ( d2 ) def f ( p ) : return sum ( ( p ** 2 ) * np . nan_to_num ( np . log ( p * len ( p ) ) ) ) return abs ( f ( d1 ) - f ( d2 ) )
|
Compute the PSQ2 measure .
|
60,054 |
def mp2q ( p , q ) : p , q = flatten ( p ) , flatten ( q ) entropy_dist = 1 / len ( p ) return sum ( entropy_dist * np . nan_to_num ( ( p ** 2 ) / q * np . log ( p / q ) ) )
|
Compute the MP2Q measure .
|
60,055 |
def klm ( p , q ) : p , q = flatten ( p ) , flatten ( q ) return max ( abs ( p * np . nan_to_num ( np . log ( p / q ) ) ) )
|
Compute the KLM divergence .
|
60,056 |
def directional_emd ( direction , d1 , d2 ) : if direction == Direction . CAUSE : func = hamming_emd elif direction == Direction . EFFECT : func = effect_emd else : validate . direction ( direction ) return round ( func ( d1 , d2 ) , config . PRECISION )
|
Compute the EMD between two repertoires for a given direction .
|
60,057 |
def repertoire_distance ( direction , r1 , r2 ) : if config . MEASURE == 'EMD' : dist = directional_emd ( direction , r1 , r2 ) else : dist = measures [ config . MEASURE ] ( r1 , r2 ) return round ( dist , config . PRECISION )
|
Compute the distance between two repertoires for the given direction .
|
60,058 |
def system_repertoire_distance ( r1 , r2 ) : if config . MEASURE in measures . asymmetric ( ) : raise ValueError ( '{} is asymmetric and cannot be used as a system-level ' 'irreducibility measure.' . format ( config . MEASURE ) ) return measures [ config . MEASURE ] ( r1 , r2 )
|
Compute the distance between two repertoires of a system .
|
60,059 |
def register ( self , name , asymmetric = False ) : def register_func ( func ) : if asymmetric : self . _asymmetric . append ( name ) self . store [ name ] = func return func return register_func
|
Decorator for registering a measure with PyPhi .
|
60,060 |
def partitions ( collection ) : collection = list ( collection ) if not collection : return if len ( collection ) == 1 : yield [ collection ] return first = collection [ 0 ] for smaller in partitions ( collection [ 1 : ] ) : for n , subset in enumerate ( smaller ) : yield smaller [ : n ] + [ [ first ] + subset ] + smaller [ n + 1 : ] yield [ [ first ] ] + smaller
|
Generate all set partitions of a collection .
|
60,061 |
def bipartition_indices ( N ) : result = [ ] if N <= 0 : return result for i in range ( 2 ** ( N - 1 ) ) : part = [ [ ] , [ ] ] for n in range ( N ) : bit = ( i >> n ) & 1 part [ bit ] . append ( n ) result . append ( ( tuple ( part [ 1 ] ) , tuple ( part [ 0 ] ) ) ) return result
|
Return indices for undirected bipartitions of a sequence .
|
60,062 |
def bipartition ( seq ) : return [ ( tuple ( seq [ i ] for i in part0_idx ) , tuple ( seq [ j ] for j in part1_idx ) ) for part0_idx , part1_idx in bipartition_indices ( len ( seq ) ) ]
|
Return a list of bipartitions for a sequence .
|
60,063 |
def directed_bipartition ( seq , nontrivial = False ) : bipartitions = [ ( tuple ( seq [ i ] for i in part0_idx ) , tuple ( seq [ j ] for j in part1_idx ) ) for part0_idx , part1_idx in directed_bipartition_indices ( len ( seq ) ) ] if nontrivial : return bipartitions [ 1 : - 1 ] return bipartitions
|
Return a list of directed bipartitions for a sequence .
|
60,064 |
def bipartition_of_one ( seq ) : seq = list ( seq ) for i , elt in enumerate ( seq ) : yield ( ( elt , ) , tuple ( seq [ : i ] + seq [ ( i + 1 ) : ] ) )
|
Generate bipartitions where one part is of length 1 .
|
60,065 |
def directed_bipartition_of_one ( seq ) : bipartitions = list ( bipartition_of_one ( seq ) ) return chain ( bipartitions , reverse_elements ( bipartitions ) )
|
Generate directed bipartitions where one part is of length 1 .
|
60,066 |
def directed_tripartition_indices ( N ) : result = [ ] if N <= 0 : return result base = [ 0 , 1 , 2 ] for key in product ( base , repeat = N ) : part = [ [ ] , [ ] , [ ] ] for i , location in enumerate ( key ) : part [ location ] . append ( i ) result . append ( tuple ( tuple ( p ) for p in part ) ) return result
|
Return indices for directed tripartitions of a sequence .
|
60,067 |
def directed_tripartition ( seq ) : for a , b , c in directed_tripartition_indices ( len ( seq ) ) : yield ( tuple ( seq [ i ] for i in a ) , tuple ( seq [ j ] for j in b ) , tuple ( seq [ k ] for k in c ) )
|
Generator over all directed tripartitions of a sequence .
|
60,068 |
def k_partitions ( collection , k ) : collection = list ( collection ) n = len ( collection ) if n == 0 or k < 1 : return [ ] if k == 1 : return [ [ collection ] ] a = [ 0 ] * ( n + 1 ) for j in range ( 1 , k + 1 ) : a [ n - k + j ] = j - 1 return _f ( k , n , 0 , n , a , k , collection )
|
Generate all k - partitions of a collection .
|
60,069 |
def mip_partitions ( mechanism , purview , node_labels = None ) : func = partition_types [ config . PARTITION_TYPE ] return func ( mechanism , purview , node_labels )
|
Return a generator over all mechanism - purview partitions based on the current configuration .
|
60,070 |
def mip_bipartitions ( mechanism , purview , node_labels = None ) : r numerators = bipartition ( mechanism ) denominators = directed_bipartition ( purview ) for n , d in product ( numerators , denominators ) : if ( n [ 0 ] or d [ 0 ] ) and ( n [ 1 ] or d [ 1 ] ) : yield Bipartition ( Part ( n [ 0 ] , d [ 0 ] ) , Part ( n [ 1 ] , d [ 1 ] ) , node_labels = node_labels )
|
r Return an generator of all |small_phi| bipartitions of a mechanism over a purview .
|
60,071 |
def wedge_partitions ( mechanism , purview , node_labels = None ) : numerators = bipartition ( mechanism ) denominators = directed_tripartition ( purview ) yielded = set ( ) def valid ( factoring ) : numerator , denominator = factoring return ( ( numerator [ 0 ] or denominator [ 0 ] ) and ( numerator [ 1 ] or denominator [ 1 ] ) and ( ( numerator [ 0 ] and numerator [ 1 ] ) or not denominator [ 0 ] or not denominator [ 1 ] ) ) for n , d in filter ( valid , product ( numerators , denominators ) ) : tripart = Tripartition ( Part ( n [ 0 ] , d [ 0 ] ) , Part ( n [ 1 ] , d [ 1 ] ) , Part ( ( ) , d [ 2 ] ) , node_labels = node_labels ) . normalize ( ) def nonempty ( part ) : return part . mechanism or part . purview def compressible ( tripart ) : pairs = [ ( tripart [ 0 ] , tripart [ 1 ] ) , ( tripart [ 0 ] , tripart [ 2 ] ) , ( tripart [ 1 ] , tripart [ 2 ] ) ] for x , y in pairs : if ( nonempty ( x ) and nonempty ( y ) and ( x . mechanism + y . mechanism == ( ) or x . purview + y . purview == ( ) ) ) : return True return False if not compressible ( tripart ) and tripart not in yielded : yielded . add ( tripart ) yield tripart
|
Return an iterator over all wedge partitions .
|
60,072 |
def all_partitions ( mechanism , purview , node_labels = None ) : for mechanism_partition in partitions ( mechanism ) : mechanism_partition . append ( [ ] ) n_mechanism_parts = len ( mechanism_partition ) max_purview_partition = min ( len ( purview ) , n_mechanism_parts ) for n_purview_parts in range ( 1 , max_purview_partition + 1 ) : n_empty = n_mechanism_parts - n_purview_parts for purview_partition in k_partitions ( purview , n_purview_parts ) : purview_partition = [ tuple ( _list ) for _list in purview_partition ] purview_partition . extend ( [ ( ) ] * n_empty ) for purview_permutation in set ( permutations ( purview_partition ) ) : parts = [ Part ( tuple ( m ) , tuple ( p ) ) for m , p in zip ( mechanism_partition , purview_permutation ) ] if parts [ 0 ] . mechanism == mechanism and parts [ 0 ] . purview : continue yield KPartition ( * parts , node_labels = node_labels )
|
Return all possible partitions of a mechanism and purview .
|
60,073 |
def naturalize_string ( key ) : return [ int ( text ) if text . isdigit ( ) else text . lower ( ) for text in re . split ( numregex , key ) ]
|
Analyzes string in a human way to enable natural sort
|
60,074 |
def fetch_sel ( self , ipmicmd , clear = False ) : records = [ ] endat = self . _fetch_entries ( ipmicmd , 0 , records ) if clear and records : rsp = ipmicmd . xraw_command ( netfn = 0xa , command = 0x42 ) rsvid = struct . unpack_from ( '<H' , rsp [ 'data' ] ) [ 0 ] del records [ - 1 ] self . _fetch_entries ( ipmicmd , endat , records , rsvid ) clrdata = bytearray ( struct . pack ( '<HI' , rsvid , 0xAA524C43 ) ) ipmicmd . xraw_command ( netfn = 0xa , command = 0x47 , data = clrdata ) _fix_sel_time ( records , ipmicmd ) return records
|
Fetch SEL entries
|
60,075 |
def oem_init ( self ) : if self . _oemknown : return self . _oem , self . _oemknown = get_oem_handler ( self . _get_device_id ( ) , self )
|
Initialize the command object for OEM capabilities
|
60,076 |
def reset_bmc ( self ) : response = self . raw_command ( netfn = 6 , command = 2 ) if 'error' in response : raise exc . IpmiException ( response [ 'error' ] )
|
Do a cold reset in BMC
|
60,077 |
def xraw_command ( self , netfn , command , bridge_request = ( ) , data = ( ) , delay_xmit = None , retry = True , timeout = None ) : rsp = self . ipmi_session . raw_command ( netfn = netfn , command = command , bridge_request = bridge_request , data = data , delay_xmit = delay_xmit , retry = retry , timeout = timeout ) if 'error' in rsp : raise exc . IpmiException ( rsp [ 'error' ] , rsp [ 'code' ] ) rsp [ 'data' ] = buffer ( rsp [ 'data' ] ) return rsp
|
Send raw ipmi command to BMC raising exception on error
|
60,078 |
def raw_command ( self , netfn , command , bridge_request = ( ) , data = ( ) , delay_xmit = None , retry = True , timeout = None ) : rsp = self . ipmi_session . raw_command ( netfn = netfn , command = command , bridge_request = bridge_request , data = data , delay_xmit = delay_xmit , retry = retry , timeout = timeout ) if 'data' in rsp : rsp [ 'data' ] = list ( rsp [ 'data' ] ) return rsp
|
Send raw ipmi command to BMC
|
60,079 |
def get_power ( self ) : response = self . raw_command ( netfn = 0 , command = 1 ) if 'error' in response : raise exc . IpmiException ( response [ 'error' ] ) assert ( response [ 'command' ] == 1 and response [ 'netfn' ] == 1 ) powerstate = 'on' if ( response [ 'data' ] [ 0 ] & 1 ) else 'off' return { 'powerstate' : powerstate }
|
Get current power state of the managed system
|
60,080 |
def get_event_log ( self , clear = False ) : self . oem_init ( ) return sel . EventHandler ( self . init_sdr ( ) , self ) . fetch_sel ( self , clear )
|
Retrieve the log of events optionally clearing
|
60,081 |
def decode_pet ( self , specifictrap , petdata ) : self . oem_init ( ) return sel . EventHandler ( self . init_sdr ( ) , self ) . decode_pet ( specifictrap , petdata )
|
Decode PET to an event
|
60,082 |
def get_inventory_descriptions ( self ) : yield "System" self . init_sdr ( ) for fruid in sorted ( self . _sdr . fru ) : yield self . _sdr . fru [ fruid ] . fru_name self . oem_init ( ) for compname in self . _oem . get_oem_inventory_descriptions ( ) : yield compname
|
Retrieve list of things that could be inventoried
|
60,083 |
def get_inventory_of_component ( self , component ) : self . oem_init ( ) if component == 'System' : return self . _get_zero_fru ( ) self . init_sdr ( ) for fruid in self . _sdr . fru : if self . _sdr . fru [ fruid ] . fru_name == component : return self . _oem . process_fru ( fru . FRU ( ipmicmd = self , fruid = fruid , sdr = self . _sdr . fru [ fruid ] ) . info , component ) return self . _oem . get_inventory_of_component ( component )
|
Retrieve inventory of a component
|
60,084 |
def get_inventory ( self ) : self . oem_init ( ) yield ( "System" , self . _get_zero_fru ( ) ) self . init_sdr ( ) for fruid in sorted ( self . _sdr . fru ) : fruinf = fru . FRU ( ipmicmd = self , fruid = fruid , sdr = self . _sdr . fru [ fruid ] ) . info if fruinf is not None : fruinf = self . _oem . process_fru ( fruinf , self . _sdr . fru [ fruid ] . fru_name ) yield ( self . _sdr . fru [ fruid ] . fru_name , fruinf ) for componentpair in self . _oem . get_oem_inventory ( ) : yield componentpair
|
Retrieve inventory of system
|
60,085 |
def get_health ( self ) : summary = { 'badreadings' : [ ] , 'health' : const . Health . Ok } fallbackreadings = [ ] try : self . oem_init ( ) fallbackreadings = self . _oem . get_health ( summary ) for reading in self . get_sensor_data ( ) : if reading . health != const . Health . Ok : summary [ 'health' ] |= reading . health summary [ 'badreadings' ] . append ( reading ) except exc . BypassGenericBehavior : pass if not summary [ 'badreadings' ] : summary [ 'badreadings' ] = fallbackreadings return summary
|
Summarize health of managed system
|
60,086 |
def get_sensor_reading ( self , sensorname ) : self . init_sdr ( ) for sensor in self . _sdr . get_sensor_numbers ( ) : if self . _sdr . sensors [ sensor ] . name == sensorname : rsp = self . raw_command ( command = 0x2d , netfn = 4 , data = ( sensor , ) ) if 'error' in rsp : raise exc . IpmiException ( rsp [ 'error' ] , rsp [ 'code' ] ) return self . _sdr . sensors [ sensor ] . decode_sensor_reading ( rsp [ 'data' ] ) self . oem_init ( ) return self . _oem . get_sensor_reading ( sensorname )
|
Get a sensor reading by name
|
60,087 |
def _fetch_lancfg_param ( self , channel , param , prefixlen = False ) : fetchcmd = bytearray ( ( channel , param , 0 , 0 ) ) fetched = self . xraw_command ( 0xc , 2 , data = fetchcmd ) fetchdata = fetched [ 'data' ] if ord ( fetchdata [ 0 ] ) != 17 : return None if len ( fetchdata ) == 5 : if prefixlen : return _mask_to_cidr ( fetchdata [ 1 : ] ) else : ip = socket . inet_ntoa ( fetchdata [ 1 : ] ) if ip == '0.0.0.0' : return None return ip elif len ( fetchdata ) == 7 : mac = '{0:02x}:{1:02x}:{2:02x}:{3:02x}:{4:02x}:{5:02x}' . format ( * bytearray ( fetchdata [ 1 : ] ) ) if mac == '00:00:00:00:00:00' : return None return mac elif len ( fetchdata ) == 2 : return ord ( fetchdata [ 1 ] ) else : raise Exception ( "Unrecognized data format " + repr ( fetchdata ) )
|
Internal helper for fetching lan cfg parameters
|
60,088 |
def set_net_configuration ( self , ipv4_address = None , ipv4_configuration = None , ipv4_gateway = None , channel = None ) : if channel is None : channel = self . get_network_channel ( ) if ipv4_configuration is not None : cmddata = [ channel , 4 , 0 ] if ipv4_configuration . lower ( ) == 'dhcp' : cmddata [ - 1 ] = 2 elif ipv4_configuration . lower ( ) == 'static' : cmddata [ - 1 ] = 1 else : raise Exception ( 'Unrecognized ipv4cfg parameter {0}' . format ( ipv4_configuration ) ) self . xraw_command ( netfn = 0xc , command = 1 , data = cmddata ) if ipv4_address is not None : netmask = None if '/' in ipv4_address : ipv4_address , prefix = ipv4_address . split ( '/' ) netmask = _cidr_to_mask ( int ( prefix ) ) cmddata = bytearray ( ( channel , 3 ) ) + socket . inet_aton ( ipv4_address ) self . xraw_command ( netfn = 0xc , command = 1 , data = cmddata ) if netmask is not None : cmddata = bytearray ( ( channel , 6 ) ) + netmask self . xraw_command ( netfn = 0xc , command = 1 , data = cmddata ) if ipv4_gateway is not None : cmddata = bytearray ( ( channel , 12 ) ) + socket . inet_aton ( ipv4_gateway ) self . xraw_command ( netfn = 0xc , command = 1 , data = cmddata )
|
Set network configuration data .
|
60,089 |
def get_net_configuration ( self , channel = None , gateway_macs = True ) : if channel is None : channel = self . get_network_channel ( ) retdata = { } v4addr = self . _fetch_lancfg_param ( channel , 3 ) if v4addr is None : retdata [ 'ipv4_address' ] = None else : v4masklen = self . _fetch_lancfg_param ( channel , 6 , prefixlen = True ) retdata [ 'ipv4_address' ] = '{0}/{1}' . format ( v4addr , v4masklen ) v4cfgmethods = { 0 : 'Unspecified' , 1 : 'Static' , 2 : 'DHCP' , 3 : 'BIOS' , 4 : 'Other' , } retdata [ 'ipv4_configuration' ] = v4cfgmethods [ self . _fetch_lancfg_param ( channel , 4 ) ] retdata [ 'mac_address' ] = self . _fetch_lancfg_param ( channel , 5 ) retdata [ 'ipv4_gateway' ] = self . _fetch_lancfg_param ( channel , 12 ) retdata [ 'ipv4_backup_gateway' ] = self . _fetch_lancfg_param ( channel , 14 ) if gateway_macs : retdata [ 'ipv4_gateway_mac' ] = self . _fetch_lancfg_param ( channel , 13 ) retdata [ 'ipv4_backup_gateway_mac' ] = self . _fetch_lancfg_param ( channel , 15 ) self . oem_init ( ) self . _oem . add_extra_net_configuration ( retdata ) return retdata
|
Get network configuration data
|
60,090 |
def get_sensor_data ( self ) : self . init_sdr ( ) for sensor in self . _sdr . get_sensor_numbers ( ) : rsp = self . raw_command ( command = 0x2d , netfn = 4 , data = ( sensor , ) ) if 'error' in rsp : if rsp [ 'code' ] == 203 : continue raise exc . IpmiException ( rsp [ 'error' ] , code = rsp [ 'code' ] ) yield self . _sdr . sensors [ sensor ] . decode_sensor_reading ( rsp [ 'data' ] ) self . oem_init ( ) for reading in self . _oem . get_sensor_data ( ) : yield reading
|
Get sensor reading objects
|
60,091 |
def get_sensor_descriptions ( self ) : self . init_sdr ( ) for sensor in self . _sdr . get_sensor_numbers ( ) : yield { 'name' : self . _sdr . sensors [ sensor ] . name , 'type' : self . _sdr . sensors [ sensor ] . sensor_type } self . oem_init ( ) for sensor in self . _oem . get_sensor_descriptions ( ) : yield sensor
|
Get available sensor names
|
60,092 |
def get_network_channel ( self ) : if self . _netchannel is None : for channel in chain ( ( 0xe , ) , range ( 1 , 0xc ) ) : try : rsp = self . xraw_command ( netfn = 6 , command = 0x42 , data = ( channel , ) ) except exc . IpmiException as ie : if ie . ipmicode == 0xcc : continue else : raise chantype = ord ( rsp [ 'data' ] [ 1 ] ) & 0b1111111 if chantype in ( 4 , 6 ) : try : if channel != 0xe : self . xraw_command ( netfn = 0xc , command = 2 , data = ( channel , 5 , 0 , 0 ) ) self . _netchannel = ord ( rsp [ 'data' ] [ 0 ] ) & 0b1111 break except exc . IpmiException as ie : continue return self . _netchannel
|
Get a reasonable default network channel .
|
60,093 |
def get_alert_destination_count ( self , channel = None ) : if channel is None : channel = self . get_network_channel ( ) rqdata = ( channel , 0x11 , 0 , 0 ) rsp = self . xraw_command ( netfn = 0xc , command = 2 , data = rqdata ) return ord ( rsp [ 'data' ] [ 1 ] )
|
Get the number of supported alert destinations
|
60,094 |
def get_alert_destination ( self , destination = 0 , channel = None ) : destinfo = { } if channel is None : channel = self . get_network_channel ( ) rqdata = ( channel , 18 , destination , 0 ) rsp = self . xraw_command ( netfn = 0xc , command = 2 , data = rqdata ) dtype , acktimeout , retries = struct . unpack ( 'BBB' , rsp [ 'data' ] [ 2 : ] ) destinfo [ 'acknowledge_required' ] = dtype & 0b10000000 == 0b10000000 if destinfo [ 'acknowledge_required' ] : destinfo [ 'acknowledge_timeout' ] = acktimeout destinfo [ 'retries' ] = retries rqdata = ( channel , 19 , destination , 0 ) rsp = self . xraw_command ( netfn = 0xc , command = 2 , data = rqdata ) if ord ( rsp [ 'data' ] [ 2 ] ) & 0b11110000 == 0 : destinfo [ 'address_format' ] = 'ipv4' destinfo [ 'address' ] = socket . inet_ntoa ( rsp [ 'data' ] [ 4 : 8 ] ) elif ord ( rsp [ 'data' ] [ 2 ] ) & 0b11110000 == 0b10000 : destinfo [ 'address_format' ] = 'ipv6' destinfo [ 'address' ] = socket . inet_ntop ( socket . AF_INET6 , rsp [ 'data' ] [ 3 : ] ) return destinfo
|
Get alert destination
|
60,095 |
def clear_alert_destination ( self , destination = 0 , channel = None ) : if channel is None : channel = self . get_network_channel ( ) self . set_alert_destination ( '0.0.0.0' , False , 0 , 0 , destination , channel )
|
Clear an alert destination
|
60,096 |
def set_alert_community ( self , community , channel = None ) : if channel is None : channel = self . get_network_channel ( ) community = community . encode ( 'utf-8' ) community += b'\x00' * ( 18 - len ( community ) ) cmddata = bytearray ( ( channel , 16 ) ) cmddata += community self . xraw_command ( netfn = 0xc , command = 1 , data = cmddata )
|
Set the community string for alerts
|
60,097 |
def _assure_alert_policy ( self , channel , destination ) : rsp = self . xraw_command ( netfn = 4 , command = 0x13 , data = ( 8 , 0 , 0 ) ) numpol = ord ( rsp [ 'data' ] [ 1 ] ) desiredchandest = ( channel << 4 ) | destination availpolnum = None for polnum in range ( 1 , numpol + 1 ) : currpol = self . xraw_command ( netfn = 4 , command = 0x13 , data = ( 9 , polnum , 0 ) ) polidx , chandest = struct . unpack_from ( '>BB' , currpol [ 'data' ] [ 2 : 4 ] ) if not polidx & 0b1000 : if availpolnum is None : availpolnum = polnum continue if chandest == desiredchandest : return True if availpolnum is None : raise Exception ( "No available alert policy entry" ) self . xraw_command ( netfn = 4 , command = 0x12 , data = ( 9 , availpolnum , 24 , desiredchandest , 0 ) )
|
Make sure an alert policy exists
|
60,098 |
def get_alert_community ( self , channel = None ) : if channel is None : channel = self . get_network_channel ( ) rsp = self . xraw_command ( netfn = 0xc , command = 2 , data = ( channel , 16 , 0 , 0 ) ) return rsp [ 'data' ] [ 1 : ] . partition ( '\x00' ) [ 0 ]
|
Get the current community string for alerts
|
60,099 |
def set_alert_destination ( self , ip = None , acknowledge_required = None , acknowledge_timeout = None , retries = None , destination = 0 , channel = None ) : if channel is None : channel = self . get_network_channel ( ) if ip is not None : destdata = bytearray ( ( channel , 19 , destination ) ) try : parsedip = socket . inet_aton ( ip ) destdata . extend ( ( 0 , 0 ) ) destdata . extend ( parsedip ) destdata . extend ( b'\x00\x00\x00\x00\x00\x00' ) except socket . error : if self . _supports_standard_ipv6 : parsedip = socket . inet_pton ( socket . AF_INET6 , ip ) destdata . append ( 0b10000000 ) destdata . extend ( parsedip ) else : destdata = None self . oem_init ( ) self . _oem . set_alert_ipv6_destination ( ip , destination , channel ) if destdata : self . xraw_command ( netfn = 0xc , command = 1 , data = destdata ) if ( acknowledge_required is not None or retries is not None or acknowledge_timeout is not None ) : currtype = self . xraw_command ( netfn = 0xc , command = 2 , data = ( channel , 18 , destination , 0 ) ) if currtype [ 'data' ] [ 0 ] != b'\x11' : raise exc . PyghmiException ( "Unknown parameter format" ) currtype = bytearray ( currtype [ 'data' ] [ 1 : ] ) if acknowledge_required is not None : if acknowledge_required : currtype [ 1 ] |= 0b10000000 else : currtype [ 1 ] &= 0b1111111 if acknowledge_timeout is not None : currtype [ 2 ] = acknowledge_timeout if retries is not None : currtype [ 3 ] = retries destreq = bytearray ( ( channel , 18 ) ) destreq . extend ( currtype ) self . xraw_command ( netfn = 0xc , command = 1 , data = destreq ) if not ip == '0.0.0.0' : self . _assure_alert_policy ( channel , destination )
|
Configure one or more parameters of an alert destination
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.