repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
sequence
docstring
stringlengths
3
17.3k
docstring_tokens
sequence
sha
stringlengths
40
40
url
stringlengths
87
242
partition
stringclasses
1 value
wmayner/pyphi
pyphi/macro.py
Blackbox.hidden_indices
def hidden_indices(self): """All elements hidden inside the blackboxes.""" return tuple(sorted(set(self.micro_indices) - set(self.output_indices)))
python
def hidden_indices(self): """All elements hidden inside the blackboxes.""" return tuple(sorted(set(self.micro_indices) - set(self.output_indices)))
[ "def", "hidden_indices", "(", "self", ")", ":", "return", "tuple", "(", "sorted", "(", "set", "(", "self", ".", "micro_indices", ")", "-", "set", "(", "self", ".", "output_indices", ")", ")", ")" ]
All elements hidden inside the blackboxes.
[ "All", "elements", "hidden", "inside", "the", "blackboxes", "." ]
deeca69a084d782a6fde7bf26f59e93b593c5d77
https://github.com/wmayner/pyphi/blob/deeca69a084d782a6fde7bf26f59e93b593c5d77/pyphi/macro.py#L582-L585
train
wmayner/pyphi
pyphi/macro.py
Blackbox.outputs_of
def outputs_of(self, partition_index): """The outputs of the partition at ``partition_index``. Note that this returns a tuple of element indices, since coarse- grained blackboxes may have multiple outputs. """ partition = self.partition[partition_index] outputs = set(partition).intersection(self.output_indices) return tuple(sorted(outputs))
python
def outputs_of(self, partition_index): """The outputs of the partition at ``partition_index``. Note that this returns a tuple of element indices, since coarse- grained blackboxes may have multiple outputs. """ partition = self.partition[partition_index] outputs = set(partition).intersection(self.output_indices) return tuple(sorted(outputs))
[ "def", "outputs_of", "(", "self", ",", "partition_index", ")", ":", "partition", "=", "self", ".", "partition", "[", "partition_index", "]", "outputs", "=", "set", "(", "partition", ")", ".", "intersection", "(", "self", ".", "output_indices", ")", "return", "tuple", "(", "sorted", "(", "outputs", ")", ")" ]
The outputs of the partition at ``partition_index``. Note that this returns a tuple of element indices, since coarse- grained blackboxes may have multiple outputs.
[ "The", "outputs", "of", "the", "partition", "at", "partition_index", "." ]
deeca69a084d782a6fde7bf26f59e93b593c5d77
https://github.com/wmayner/pyphi/blob/deeca69a084d782a6fde7bf26f59e93b593c5d77/pyphi/macro.py#L600-L608
train
wmayner/pyphi
pyphi/macro.py
Blackbox.reindex
def reindex(self): """Squeeze the indices of this blackboxing to ``0..n``. Returns: Blackbox: a new, reindexed |Blackbox|. Example: >>> partition = ((3,), (2, 4)) >>> output_indices = (2, 3) >>> blackbox = Blackbox(partition, output_indices) >>> blackbox.reindex() Blackbox(partition=((1,), (0, 2)), output_indices=(0, 1)) """ _map = dict(zip(self.micro_indices, reindex(self.micro_indices))) partition = tuple( tuple(_map[index] for index in group) for group in self.partition ) output_indices = tuple(_map[i] for i in self.output_indices) return Blackbox(partition, output_indices)
python
def reindex(self): """Squeeze the indices of this blackboxing to ``0..n``. Returns: Blackbox: a new, reindexed |Blackbox|. Example: >>> partition = ((3,), (2, 4)) >>> output_indices = (2, 3) >>> blackbox = Blackbox(partition, output_indices) >>> blackbox.reindex() Blackbox(partition=((1,), (0, 2)), output_indices=(0, 1)) """ _map = dict(zip(self.micro_indices, reindex(self.micro_indices))) partition = tuple( tuple(_map[index] for index in group) for group in self.partition ) output_indices = tuple(_map[i] for i in self.output_indices) return Blackbox(partition, output_indices)
[ "def", "reindex", "(", "self", ")", ":", "_map", "=", "dict", "(", "zip", "(", "self", ".", "micro_indices", ",", "reindex", "(", "self", ".", "micro_indices", ")", ")", ")", "partition", "=", "tuple", "(", "tuple", "(", "_map", "[", "index", "]", "for", "index", "in", "group", ")", "for", "group", "in", "self", ".", "partition", ")", "output_indices", "=", "tuple", "(", "_map", "[", "i", "]", "for", "i", "in", "self", ".", "output_indices", ")", "return", "Blackbox", "(", "partition", ",", "output_indices", ")" ]
Squeeze the indices of this blackboxing to ``0..n``. Returns: Blackbox: a new, reindexed |Blackbox|. Example: >>> partition = ((3,), (2, 4)) >>> output_indices = (2, 3) >>> blackbox = Blackbox(partition, output_indices) >>> blackbox.reindex() Blackbox(partition=((1,), (0, 2)), output_indices=(0, 1))
[ "Squeeze", "the", "indices", "of", "this", "blackboxing", "to", "0", "..", "n", "." ]
deeca69a084d782a6fde7bf26f59e93b593c5d77
https://github.com/wmayner/pyphi/blob/deeca69a084d782a6fde7bf26f59e93b593c5d77/pyphi/macro.py#L610-L630
train
wmayner/pyphi
pyphi/macro.py
Blackbox.macro_state
def macro_state(self, micro_state): """Compute the macro-state of this blackbox. This is just the state of the blackbox's output indices. Args: micro_state (tuple[int]): The state of the micro-elements in the blackbox. Returns: tuple[int]: The state of the output indices. """ assert len(micro_state) == len(self.micro_indices) reindexed = self.reindex() return utils.state_of(reindexed.output_indices, micro_state)
python
def macro_state(self, micro_state): """Compute the macro-state of this blackbox. This is just the state of the blackbox's output indices. Args: micro_state (tuple[int]): The state of the micro-elements in the blackbox. Returns: tuple[int]: The state of the output indices. """ assert len(micro_state) == len(self.micro_indices) reindexed = self.reindex() return utils.state_of(reindexed.output_indices, micro_state)
[ "def", "macro_state", "(", "self", ",", "micro_state", ")", ":", "assert", "len", "(", "micro_state", ")", "==", "len", "(", "self", ".", "micro_indices", ")", "reindexed", "=", "self", ".", "reindex", "(", ")", "return", "utils", ".", "state_of", "(", "reindexed", ".", "output_indices", ",", "micro_state", ")" ]
Compute the macro-state of this blackbox. This is just the state of the blackbox's output indices. Args: micro_state (tuple[int]): The state of the micro-elements in the blackbox. Returns: tuple[int]: The state of the output indices.
[ "Compute", "the", "macro", "-", "state", "of", "this", "blackbox", "." ]
deeca69a084d782a6fde7bf26f59e93b593c5d77
https://github.com/wmayner/pyphi/blob/deeca69a084d782a6fde7bf26f59e93b593c5d77/pyphi/macro.py#L632-L647
train
wmayner/pyphi
pyphi/macro.py
Blackbox.in_same_box
def in_same_box(self, a, b): """Return ``True`` if nodes ``a`` and ``b``` are in the same box.""" assert a in self.micro_indices assert b in self.micro_indices for part in self.partition: if a in part and b in part: return True return False
python
def in_same_box(self, a, b): """Return ``True`` if nodes ``a`` and ``b``` are in the same box.""" assert a in self.micro_indices assert b in self.micro_indices for part in self.partition: if a in part and b in part: return True return False
[ "def", "in_same_box", "(", "self", ",", "a", ",", "b", ")", ":", "assert", "a", "in", "self", ".", "micro_indices", "assert", "b", "in", "self", ".", "micro_indices", "for", "part", "in", "self", ".", "partition", ":", "if", "a", "in", "part", "and", "b", "in", "part", ":", "return", "True", "return", "False" ]
Return ``True`` if nodes ``a`` and ``b``` are in the same box.
[ "Return", "True", "if", "nodes", "a", "and", "b", "are", "in", "the", "same", "box", "." ]
deeca69a084d782a6fde7bf26f59e93b593c5d77
https://github.com/wmayner/pyphi/blob/deeca69a084d782a6fde7bf26f59e93b593c5d77/pyphi/macro.py#L649-L658
train
wmayner/pyphi
pyphi/macro.py
Blackbox.hidden_from
def hidden_from(self, a, b): """Return True if ``a`` is hidden in a different box than ``b``.""" return a in self.hidden_indices and not self.in_same_box(a, b)
python
def hidden_from(self, a, b): """Return True if ``a`` is hidden in a different box than ``b``.""" return a in self.hidden_indices and not self.in_same_box(a, b)
[ "def", "hidden_from", "(", "self", ",", "a", ",", "b", ")", ":", "return", "a", "in", "self", ".", "hidden_indices", "and", "not", "self", ".", "in_same_box", "(", "a", ",", "b", ")" ]
Return True if ``a`` is hidden in a different box than ``b``.
[ "Return", "True", "if", "a", "is", "hidden", "in", "a", "different", "box", "than", "b", "." ]
deeca69a084d782a6fde7bf26f59e93b593c5d77
https://github.com/wmayner/pyphi/blob/deeca69a084d782a6fde7bf26f59e93b593c5d77/pyphi/macro.py#L660-L662
train
wmayner/pyphi
pyphi/network.py
irreducible_purviews
def irreducible_purviews(cm, direction, mechanism, purviews): """Return all purviews which are irreducible for the mechanism. Args: cm (np.ndarray): An |N x N| connectivity matrix. direction (Direction): |CAUSE| or |EFFECT|. purviews (list[tuple[int]]): The purviews to check. mechanism (tuple[int]): The mechanism in question. Returns: list[tuple[int]]: All purviews in ``purviews`` which are not reducible over ``mechanism``. Raises: ValueError: If ``direction`` is invalid. """ def reducible(purview): """Return ``True`` if purview is trivially reducible.""" _from, to = direction.order(mechanism, purview) return connectivity.block_reducible(cm, _from, to) return [purview for purview in purviews if not reducible(purview)]
python
def irreducible_purviews(cm, direction, mechanism, purviews): """Return all purviews which are irreducible for the mechanism. Args: cm (np.ndarray): An |N x N| connectivity matrix. direction (Direction): |CAUSE| or |EFFECT|. purviews (list[tuple[int]]): The purviews to check. mechanism (tuple[int]): The mechanism in question. Returns: list[tuple[int]]: All purviews in ``purviews`` which are not reducible over ``mechanism``. Raises: ValueError: If ``direction`` is invalid. """ def reducible(purview): """Return ``True`` if purview is trivially reducible.""" _from, to = direction.order(mechanism, purview) return connectivity.block_reducible(cm, _from, to) return [purview for purview in purviews if not reducible(purview)]
[ "def", "irreducible_purviews", "(", "cm", ",", "direction", ",", "mechanism", ",", "purviews", ")", ":", "def", "reducible", "(", "purview", ")", ":", "\"\"\"Return ``True`` if purview is trivially reducible.\"\"\"", "_from", ",", "to", "=", "direction", ".", "order", "(", "mechanism", ",", "purview", ")", "return", "connectivity", ".", "block_reducible", "(", "cm", ",", "_from", ",", "to", ")", "return", "[", "purview", "for", "purview", "in", "purviews", "if", "not", "reducible", "(", "purview", ")", "]" ]
Return all purviews which are irreducible for the mechanism. Args: cm (np.ndarray): An |N x N| connectivity matrix. direction (Direction): |CAUSE| or |EFFECT|. purviews (list[tuple[int]]): The purviews to check. mechanism (tuple[int]): The mechanism in question. Returns: list[tuple[int]]: All purviews in ``purviews`` which are not reducible over ``mechanism``. Raises: ValueError: If ``direction`` is invalid.
[ "Return", "all", "purviews", "which", "are", "irreducible", "for", "the", "mechanism", "." ]
deeca69a084d782a6fde7bf26f59e93b593c5d77
https://github.com/wmayner/pyphi/blob/deeca69a084d782a6fde7bf26f59e93b593c5d77/pyphi/network.py#L214-L235
train
wmayner/pyphi
pyphi/network.py
Network._build_tpm
def _build_tpm(tpm): """Validate the TPM passed by the user and convert to multidimensional form. """ tpm = np.array(tpm) validate.tpm(tpm) # Convert to multidimensional state-by-node form if is_state_by_state(tpm): tpm = convert.state_by_state2state_by_node(tpm) else: tpm = convert.to_multidimensional(tpm) utils.np_immutable(tpm) return (tpm, utils.np_hash(tpm))
python
def _build_tpm(tpm): """Validate the TPM passed by the user and convert to multidimensional form. """ tpm = np.array(tpm) validate.tpm(tpm) # Convert to multidimensional state-by-node form if is_state_by_state(tpm): tpm = convert.state_by_state2state_by_node(tpm) else: tpm = convert.to_multidimensional(tpm) utils.np_immutable(tpm) return (tpm, utils.np_hash(tpm))
[ "def", "_build_tpm", "(", "tpm", ")", ":", "tpm", "=", "np", ".", "array", "(", "tpm", ")", "validate", ".", "tpm", "(", "tpm", ")", "# Convert to multidimensional state-by-node form", "if", "is_state_by_state", "(", "tpm", ")", ":", "tpm", "=", "convert", ".", "state_by_state2state_by_node", "(", "tpm", ")", "else", ":", "tpm", "=", "convert", ".", "to_multidimensional", "(", "tpm", ")", "utils", ".", "np_immutable", "(", "tpm", ")", "return", "(", "tpm", ",", "utils", ".", "np_hash", "(", "tpm", ")", ")" ]
Validate the TPM passed by the user and convert to multidimensional form.
[ "Validate", "the", "TPM", "passed", "by", "the", "user", "and", "convert", "to", "multidimensional", "form", "." ]
deeca69a084d782a6fde7bf26f59e93b593c5d77
https://github.com/wmayner/pyphi/blob/deeca69a084d782a6fde7bf26f59e93b593c5d77/pyphi/network.py#L77-L93
train
wmayner/pyphi
pyphi/network.py
Network._build_cm
def _build_cm(self, cm): """Convert the passed CM to the proper format, or construct the unitary CM if none was provided. """ if cm is None: # Assume all are connected. cm = np.ones((self.size, self.size)) else: cm = np.array(cm) utils.np_immutable(cm) return (cm, utils.np_hash(cm))
python
def _build_cm(self, cm): """Convert the passed CM to the proper format, or construct the unitary CM if none was provided. """ if cm is None: # Assume all are connected. cm = np.ones((self.size, self.size)) else: cm = np.array(cm) utils.np_immutable(cm) return (cm, utils.np_hash(cm))
[ "def", "_build_cm", "(", "self", ",", "cm", ")", ":", "if", "cm", "is", "None", ":", "# Assume all are connected.", "cm", "=", "np", ".", "ones", "(", "(", "self", ".", "size", ",", "self", ".", "size", ")", ")", "else", ":", "cm", "=", "np", ".", "array", "(", "cm", ")", "utils", ".", "np_immutable", "(", "cm", ")", "return", "(", "cm", ",", "utils", ".", "np_hash", "(", "cm", ")", ")" ]
Convert the passed CM to the proper format, or construct the unitary CM if none was provided.
[ "Convert", "the", "passed", "CM", "to", "the", "proper", "format", "or", "construct", "the", "unitary", "CM", "if", "none", "was", "provided", "." ]
deeca69a084d782a6fde7bf26f59e93b593c5d77
https://github.com/wmayner/pyphi/blob/deeca69a084d782a6fde7bf26f59e93b593c5d77/pyphi/network.py#L104-L116
train
wmayner/pyphi
pyphi/network.py
Network.potential_purviews
def potential_purviews(self, direction, mechanism): """All purviews which are not clearly reducible for mechanism. Args: direction (Direction): |CAUSE| or |EFFECT|. mechanism (tuple[int]): The mechanism which all purviews are checked for reducibility over. Returns: list[tuple[int]]: All purviews which are irreducible over ``mechanism``. """ all_purviews = utils.powerset(self._node_indices) return irreducible_purviews(self.cm, direction, mechanism, all_purviews)
python
def potential_purviews(self, direction, mechanism): """All purviews which are not clearly reducible for mechanism. Args: direction (Direction): |CAUSE| or |EFFECT|. mechanism (tuple[int]): The mechanism which all purviews are checked for reducibility over. Returns: list[tuple[int]]: All purviews which are irreducible over ``mechanism``. """ all_purviews = utils.powerset(self._node_indices) return irreducible_purviews(self.cm, direction, mechanism, all_purviews)
[ "def", "potential_purviews", "(", "self", ",", "direction", ",", "mechanism", ")", ":", "all_purviews", "=", "utils", ".", "powerset", "(", "self", ".", "_node_indices", ")", "return", "irreducible_purviews", "(", "self", ".", "cm", ",", "direction", ",", "mechanism", ",", "all_purviews", ")" ]
All purviews which are not clearly reducible for mechanism. Args: direction (Direction): |CAUSE| or |EFFECT|. mechanism (tuple[int]): The mechanism which all purviews are checked for reducibility over. Returns: list[tuple[int]]: All purviews which are irreducible over ``mechanism``.
[ "All", "purviews", "which", "are", "not", "clearly", "reducible", "for", "mechanism", "." ]
deeca69a084d782a6fde7bf26f59e93b593c5d77
https://github.com/wmayner/pyphi/blob/deeca69a084d782a6fde7bf26f59e93b593c5d77/pyphi/network.py#L155-L169
train
wmayner/pyphi
pyphi/jsonify.py
_loadable_models
def _loadable_models(): """A dictionary of loadable PyPhi models. These are stored in this function (instead of module scope) to resolve circular import issues. """ classes = [ pyphi.Direction, pyphi.Network, pyphi.Subsystem, pyphi.Transition, pyphi.labels.NodeLabels, pyphi.models.Cut, pyphi.models.KCut, pyphi.models.NullCut, pyphi.models.Part, pyphi.models.Bipartition, pyphi.models.KPartition, pyphi.models.Tripartition, pyphi.models.RepertoireIrreducibilityAnalysis, pyphi.models.MaximallyIrreducibleCauseOrEffect, pyphi.models.MaximallyIrreducibleCause, pyphi.models.MaximallyIrreducibleEffect, pyphi.models.Concept, pyphi.models.CauseEffectStructure, pyphi.models.SystemIrreducibilityAnalysis, pyphi.models.ActualCut, pyphi.models.AcRepertoireIrreducibilityAnalysis, pyphi.models.CausalLink, pyphi.models.Account, pyphi.models.AcSystemIrreducibilityAnalysis ] return {cls.__name__: cls for cls in classes}
python
def _loadable_models(): """A dictionary of loadable PyPhi models. These are stored in this function (instead of module scope) to resolve circular import issues. """ classes = [ pyphi.Direction, pyphi.Network, pyphi.Subsystem, pyphi.Transition, pyphi.labels.NodeLabels, pyphi.models.Cut, pyphi.models.KCut, pyphi.models.NullCut, pyphi.models.Part, pyphi.models.Bipartition, pyphi.models.KPartition, pyphi.models.Tripartition, pyphi.models.RepertoireIrreducibilityAnalysis, pyphi.models.MaximallyIrreducibleCauseOrEffect, pyphi.models.MaximallyIrreducibleCause, pyphi.models.MaximallyIrreducibleEffect, pyphi.models.Concept, pyphi.models.CauseEffectStructure, pyphi.models.SystemIrreducibilityAnalysis, pyphi.models.ActualCut, pyphi.models.AcRepertoireIrreducibilityAnalysis, pyphi.models.CausalLink, pyphi.models.Account, pyphi.models.AcSystemIrreducibilityAnalysis ] return {cls.__name__: cls for cls in classes}
[ "def", "_loadable_models", "(", ")", ":", "classes", "=", "[", "pyphi", ".", "Direction", ",", "pyphi", ".", "Network", ",", "pyphi", ".", "Subsystem", ",", "pyphi", ".", "Transition", ",", "pyphi", ".", "labels", ".", "NodeLabels", ",", "pyphi", ".", "models", ".", "Cut", ",", "pyphi", ".", "models", ".", "KCut", ",", "pyphi", ".", "models", ".", "NullCut", ",", "pyphi", ".", "models", ".", "Part", ",", "pyphi", ".", "models", ".", "Bipartition", ",", "pyphi", ".", "models", ".", "KPartition", ",", "pyphi", ".", "models", ".", "Tripartition", ",", "pyphi", ".", "models", ".", "RepertoireIrreducibilityAnalysis", ",", "pyphi", ".", "models", ".", "MaximallyIrreducibleCauseOrEffect", ",", "pyphi", ".", "models", ".", "MaximallyIrreducibleCause", ",", "pyphi", ".", "models", ".", "MaximallyIrreducibleEffect", ",", "pyphi", ".", "models", ".", "Concept", ",", "pyphi", ".", "models", ".", "CauseEffectStructure", ",", "pyphi", ".", "models", ".", "SystemIrreducibilityAnalysis", ",", "pyphi", ".", "models", ".", "ActualCut", ",", "pyphi", ".", "models", ".", "AcRepertoireIrreducibilityAnalysis", ",", "pyphi", ".", "models", ".", "CausalLink", ",", "pyphi", ".", "models", ".", "Account", ",", "pyphi", ".", "models", ".", "AcSystemIrreducibilityAnalysis", "]", "return", "{", "cls", ".", "__name__", ":", "cls", "for", "cls", "in", "classes", "}" ]
A dictionary of loadable PyPhi models. These are stored in this function (instead of module scope) to resolve circular import issues.
[ "A", "dictionary", "of", "loadable", "PyPhi", "models", "." ]
deeca69a084d782a6fde7bf26f59e93b593c5d77
https://github.com/wmayner/pyphi/blob/deeca69a084d782a6fde7bf26f59e93b593c5d77/pyphi/jsonify.py#L51-L83
train
wmayner/pyphi
pyphi/jsonify.py
jsonify
def jsonify(obj): # pylint: disable=too-many-return-statements """Return a JSON-encodable representation of an object, recursively using any available ``to_json`` methods, converting NumPy arrays and datatypes to native lists and types along the way. """ # Call the `to_json` method if available and add metadata. if hasattr(obj, 'to_json'): d = obj.to_json() _push_metadata(d, obj) return jsonify(d) # If we have a numpy array, convert it to a list. if isinstance(obj, np.ndarray): return obj.tolist() # If we have NumPy datatypes, convert them to native types. if isinstance(obj, (np.int32, np.int64)): return int(obj) if isinstance(obj, np.float64): return float(obj) # Recurse over dictionaries. if isinstance(obj, dict): return _jsonify_dict(obj) # Recurse over object dictionaries. if hasattr(obj, '__dict__'): return _jsonify_dict(obj.__dict__) # Recurse over lists and tuples. if isinstance(obj, (list, tuple)): return [jsonify(item) for item in obj] # Otherwise, give up and hope it's serializable. return obj
python
def jsonify(obj): # pylint: disable=too-many-return-statements """Return a JSON-encodable representation of an object, recursively using any available ``to_json`` methods, converting NumPy arrays and datatypes to native lists and types along the way. """ # Call the `to_json` method if available and add metadata. if hasattr(obj, 'to_json'): d = obj.to_json() _push_metadata(d, obj) return jsonify(d) # If we have a numpy array, convert it to a list. if isinstance(obj, np.ndarray): return obj.tolist() # If we have NumPy datatypes, convert them to native types. if isinstance(obj, (np.int32, np.int64)): return int(obj) if isinstance(obj, np.float64): return float(obj) # Recurse over dictionaries. if isinstance(obj, dict): return _jsonify_dict(obj) # Recurse over object dictionaries. if hasattr(obj, '__dict__'): return _jsonify_dict(obj.__dict__) # Recurse over lists and tuples. if isinstance(obj, (list, tuple)): return [jsonify(item) for item in obj] # Otherwise, give up and hope it's serializable. return obj
[ "def", "jsonify", "(", "obj", ")", ":", "# pylint: disable=too-many-return-statements", "# Call the `to_json` method if available and add metadata.", "if", "hasattr", "(", "obj", ",", "'to_json'", ")", ":", "d", "=", "obj", ".", "to_json", "(", ")", "_push_metadata", "(", "d", ",", "obj", ")", "return", "jsonify", "(", "d", ")", "# If we have a numpy array, convert it to a list.", "if", "isinstance", "(", "obj", ",", "np", ".", "ndarray", ")", ":", "return", "obj", ".", "tolist", "(", ")", "# If we have NumPy datatypes, convert them to native types.", "if", "isinstance", "(", "obj", ",", "(", "np", ".", "int32", ",", "np", ".", "int64", ")", ")", ":", "return", "int", "(", "obj", ")", "if", "isinstance", "(", "obj", ",", "np", ".", "float64", ")", ":", "return", "float", "(", "obj", ")", "# Recurse over dictionaries.", "if", "isinstance", "(", "obj", ",", "dict", ")", ":", "return", "_jsonify_dict", "(", "obj", ")", "# Recurse over object dictionaries.", "if", "hasattr", "(", "obj", ",", "'__dict__'", ")", ":", "return", "_jsonify_dict", "(", "obj", ".", "__dict__", ")", "# Recurse over lists and tuples.", "if", "isinstance", "(", "obj", ",", "(", "list", ",", "tuple", ")", ")", ":", "return", "[", "jsonify", "(", "item", ")", "for", "item", "in", "obj", "]", "# Otherwise, give up and hope it's serializable.", "return", "obj" ]
Return a JSON-encodable representation of an object, recursively using any available ``to_json`` methods, converting NumPy arrays and datatypes to native lists and types along the way.
[ "Return", "a", "JSON", "-", "encodable", "representation", "of", "an", "object", "recursively", "using", "any", "available", "to_json", "methods", "converting", "NumPy", "arrays", "and", "datatypes", "to", "native", "lists", "and", "types", "along", "the", "way", "." ]
deeca69a084d782a6fde7bf26f59e93b593c5d77
https://github.com/wmayner/pyphi/blob/deeca69a084d782a6fde7bf26f59e93b593c5d77/pyphi/jsonify.py#L107-L141
train
wmayner/pyphi
pyphi/jsonify.py
_check_version
def _check_version(version): """Check whether the JSON version matches the PyPhi version.""" if version != pyphi.__version__: raise pyphi.exceptions.JSONVersionError( 'Cannot load JSON from a different version of PyPhi. ' 'JSON version = {0}, current version = {1}.'.format( version, pyphi.__version__))
python
def _check_version(version): """Check whether the JSON version matches the PyPhi version.""" if version != pyphi.__version__: raise pyphi.exceptions.JSONVersionError( 'Cannot load JSON from a different version of PyPhi. ' 'JSON version = {0}, current version = {1}.'.format( version, pyphi.__version__))
[ "def", "_check_version", "(", "version", ")", ":", "if", "version", "!=", "pyphi", ".", "__version__", ":", "raise", "pyphi", ".", "exceptions", ".", "JSONVersionError", "(", "'Cannot load JSON from a different version of PyPhi. '", "'JSON version = {0}, current version = {1}.'", ".", "format", "(", "version", ",", "pyphi", ".", "__version__", ")", ")" ]
Check whether the JSON version matches the PyPhi version.
[ "Check", "whether", "the", "JSON", "version", "matches", "the", "PyPhi", "version", "." ]
deeca69a084d782a6fde7bf26f59e93b593c5d77
https://github.com/wmayner/pyphi/blob/deeca69a084d782a6fde7bf26f59e93b593c5d77/pyphi/jsonify.py#L176-L182
train
wmayner/pyphi
pyphi/jsonify.py
PyPhiJSONDecoder._load_object
def _load_object(self, obj): """Recursively load a PyPhi object. PyPhi models are recursively loaded, using the model metadata to recreate the original object relations. Lists are cast to tuples because most objects in PyPhi which are serialized to lists (eg. mechanisms and purviews) are ultimately tuples. Other lists (tpms, repertoires) should be cast to the correct type in init methods. """ if isinstance(obj, dict): obj = {k: self._load_object(v) for k, v in obj.items()} # Load a serialized PyPhi model if _is_model(obj): return self._load_model(obj) elif isinstance(obj, list): return tuple(self._load_object(item) for item in obj) return obj
python
def _load_object(self, obj): """Recursively load a PyPhi object. PyPhi models are recursively loaded, using the model metadata to recreate the original object relations. Lists are cast to tuples because most objects in PyPhi which are serialized to lists (eg. mechanisms and purviews) are ultimately tuples. Other lists (tpms, repertoires) should be cast to the correct type in init methods. """ if isinstance(obj, dict): obj = {k: self._load_object(v) for k, v in obj.items()} # Load a serialized PyPhi model if _is_model(obj): return self._load_model(obj) elif isinstance(obj, list): return tuple(self._load_object(item) for item in obj) return obj
[ "def", "_load_object", "(", "self", ",", "obj", ")", ":", "if", "isinstance", "(", "obj", ",", "dict", ")", ":", "obj", "=", "{", "k", ":", "self", ".", "_load_object", "(", "v", ")", "for", "k", ",", "v", "in", "obj", ".", "items", "(", ")", "}", "# Load a serialized PyPhi model", "if", "_is_model", "(", "obj", ")", ":", "return", "self", ".", "_load_model", "(", "obj", ")", "elif", "isinstance", "(", "obj", ",", "list", ")", ":", "return", "tuple", "(", "self", ".", "_load_object", "(", "item", ")", "for", "item", "in", "obj", ")", "return", "obj" ]
Recursively load a PyPhi object. PyPhi models are recursively loaded, using the model metadata to recreate the original object relations. Lists are cast to tuples because most objects in PyPhi which are serialized to lists (eg. mechanisms and purviews) are ultimately tuples. Other lists (tpms, repertoires) should be cast to the correct type in init methods.
[ "Recursively", "load", "a", "PyPhi", "object", "." ]
deeca69a084d782a6fde7bf26f59e93b593c5d77
https://github.com/wmayner/pyphi/blob/deeca69a084d782a6fde7bf26f59e93b593c5d77/pyphi/jsonify.py#L212-L230
train
wmayner/pyphi
pyphi/jsonify.py
PyPhiJSONDecoder._load_model
def _load_model(self, dct): """Load a serialized PyPhi model. The object is memoized for reuse elsewhere in the object graph. """ classname, version, _ = _pop_metadata(dct) _check_version(version) cls = self._models[classname] # Use `from_json` if available if hasattr(cls, 'from_json'): return cls.from_json(dct) # Default to object constructor return cls(**dct)
python
def _load_model(self, dct): """Load a serialized PyPhi model. The object is memoized for reuse elsewhere in the object graph. """ classname, version, _ = _pop_metadata(dct) _check_version(version) cls = self._models[classname] # Use `from_json` if available if hasattr(cls, 'from_json'): return cls.from_json(dct) # Default to object constructor return cls(**dct)
[ "def", "_load_model", "(", "self", ",", "dct", ")", ":", "classname", ",", "version", ",", "_", "=", "_pop_metadata", "(", "dct", ")", "_check_version", "(", "version", ")", "cls", "=", "self", ".", "_models", "[", "classname", "]", "# Use `from_json` if available", "if", "hasattr", "(", "cls", ",", "'from_json'", ")", ":", "return", "cls", ".", "from_json", "(", "dct", ")", "# Default to object constructor", "return", "cls", "(", "*", "*", "dct", ")" ]
Load a serialized PyPhi model. The object is memoized for reuse elsewhere in the object graph.
[ "Load", "a", "serialized", "PyPhi", "model", "." ]
deeca69a084d782a6fde7bf26f59e93b593c5d77
https://github.com/wmayner/pyphi/blob/deeca69a084d782a6fde7bf26f59e93b593c5d77/pyphi/jsonify.py#L233-L248
train
wmayner/pyphi
pyphi/distance.py
_compute_hamming_matrix
def _compute_hamming_matrix(N): """Compute and store a Hamming matrix for |N| nodes. Hamming matrices have the following sizes:: N MBs == === 9 2 10 8 11 32 12 128 13 512 Given these sizes and the fact that large matrices are needed infrequently, we store computed matrices using the Joblib filesystem cache instead of adding computed matrices to the ``_hamming_matrices`` global and clogging up memory. This function is only called when |N| > ``_NUM_PRECOMPUTED_HAMMING_MATRICES``. Don't call this function directly; use |_hamming_matrix| instead. """ possible_states = np.array(list(utils.all_states((N)))) return cdist(possible_states, possible_states, 'hamming') * N
python
def _compute_hamming_matrix(N): """Compute and store a Hamming matrix for |N| nodes. Hamming matrices have the following sizes:: N MBs == === 9 2 10 8 11 32 12 128 13 512 Given these sizes and the fact that large matrices are needed infrequently, we store computed matrices using the Joblib filesystem cache instead of adding computed matrices to the ``_hamming_matrices`` global and clogging up memory. This function is only called when |N| > ``_NUM_PRECOMPUTED_HAMMING_MATRICES``. Don't call this function directly; use |_hamming_matrix| instead. """ possible_states = np.array(list(utils.all_states((N)))) return cdist(possible_states, possible_states, 'hamming') * N
[ "def", "_compute_hamming_matrix", "(", "N", ")", ":", "possible_states", "=", "np", ".", "array", "(", "list", "(", "utils", ".", "all_states", "(", "(", "N", ")", ")", ")", ")", "return", "cdist", "(", "possible_states", ",", "possible_states", ",", "'hamming'", ")", "*", "N" ]
Compute and store a Hamming matrix for |N| nodes. Hamming matrices have the following sizes:: N MBs == === 9 2 10 8 11 32 12 128 13 512 Given these sizes and the fact that large matrices are needed infrequently, we store computed matrices using the Joblib filesystem cache instead of adding computed matrices to the ``_hamming_matrices`` global and clogging up memory. This function is only called when |N| > ``_NUM_PRECOMPUTED_HAMMING_MATRICES``. Don't call this function directly; use |_hamming_matrix| instead.
[ "Compute", "and", "store", "a", "Hamming", "matrix", "for", "|N|", "nodes", "." ]
deeca69a084d782a6fde7bf26f59e93b593c5d77
https://github.com/wmayner/pyphi/blob/deeca69a084d782a6fde7bf26f59e93b593c5d77/pyphi/distance.py#L107-L130
train
wmayner/pyphi
pyphi/distance.py
effect_emd
def effect_emd(d1, d2): """Compute the EMD between two effect repertoires. Because the nodes are independent, the EMD between effect repertoires is equal to the sum of the EMDs between the marginal distributions of each node, and the EMD between marginal distribution for a node is the absolute difference in the probabilities that the node is OFF. Args: d1 (np.ndarray): The first repertoire. d2 (np.ndarray): The second repertoire. Returns: float: The EMD between ``d1`` and ``d2``. """ return sum(abs(marginal_zero(d1, i) - marginal_zero(d2, i)) for i in range(d1.ndim))
python
def effect_emd(d1, d2): """Compute the EMD between two effect repertoires. Because the nodes are independent, the EMD between effect repertoires is equal to the sum of the EMDs between the marginal distributions of each node, and the EMD between marginal distribution for a node is the absolute difference in the probabilities that the node is OFF. Args: d1 (np.ndarray): The first repertoire. d2 (np.ndarray): The second repertoire. Returns: float: The EMD between ``d1`` and ``d2``. """ return sum(abs(marginal_zero(d1, i) - marginal_zero(d2, i)) for i in range(d1.ndim))
[ "def", "effect_emd", "(", "d1", ",", "d2", ")", ":", "return", "sum", "(", "abs", "(", "marginal_zero", "(", "d1", ",", "i", ")", "-", "marginal_zero", "(", "d2", ",", "i", ")", ")", "for", "i", "in", "range", "(", "d1", ".", "ndim", ")", ")" ]
Compute the EMD between two effect repertoires. Because the nodes are independent, the EMD between effect repertoires is equal to the sum of the EMDs between the marginal distributions of each node, and the EMD between marginal distribution for a node is the absolute difference in the probabilities that the node is OFF. Args: d1 (np.ndarray): The first repertoire. d2 (np.ndarray): The second repertoire. Returns: float: The EMD between ``d1`` and ``d2``.
[ "Compute", "the", "EMD", "between", "two", "effect", "repertoires", "." ]
deeca69a084d782a6fde7bf26f59e93b593c5d77
https://github.com/wmayner/pyphi/blob/deeca69a084d782a6fde7bf26f59e93b593c5d77/pyphi/distance.py#L147-L163
train
wmayner/pyphi
pyphi/distance.py
entropy_difference
def entropy_difference(d1, d2): """Return the difference in entropy between two distributions.""" d1, d2 = flatten(d1), flatten(d2) return abs(entropy(d1, base=2.0) - entropy(d2, base=2.0))
python
def entropy_difference(d1, d2): """Return the difference in entropy between two distributions.""" d1, d2 = flatten(d1), flatten(d2) return abs(entropy(d1, base=2.0) - entropy(d2, base=2.0))
[ "def", "entropy_difference", "(", "d1", ",", "d2", ")", ":", "d1", ",", "d2", "=", "flatten", "(", "d1", ")", ",", "flatten", "(", "d2", ")", "return", "abs", "(", "entropy", "(", "d1", ",", "base", "=", "2.0", ")", "-", "entropy", "(", "d2", ",", "base", "=", "2.0", ")", ")" ]
Return the difference in entropy between two distributions.
[ "Return", "the", "difference", "in", "entropy", "between", "two", "distributions", "." ]
deeca69a084d782a6fde7bf26f59e93b593c5d77
https://github.com/wmayner/pyphi/blob/deeca69a084d782a6fde7bf26f59e93b593c5d77/pyphi/distance.py#L196-L199
train
wmayner/pyphi
pyphi/distance.py
psq2
def psq2(d1, d2): """Compute the PSQ2 measure. Args: d1 (np.ndarray): The first distribution. d2 (np.ndarray): The second distribution. """ d1, d2 = flatten(d1), flatten(d2) def f(p): return sum((p ** 2) * np.nan_to_num(np.log(p * len(p)))) return abs(f(d1) - f(d2))
python
def psq2(d1, d2): """Compute the PSQ2 measure. Args: d1 (np.ndarray): The first distribution. d2 (np.ndarray): The second distribution. """ d1, d2 = flatten(d1), flatten(d2) def f(p): return sum((p ** 2) * np.nan_to_num(np.log(p * len(p)))) return abs(f(d1) - f(d2))
[ "def", "psq2", "(", "d1", ",", "d2", ")", ":", "d1", ",", "d2", "=", "flatten", "(", "d1", ")", ",", "flatten", "(", "d2", ")", "def", "f", "(", "p", ")", ":", "return", "sum", "(", "(", "p", "**", "2", ")", "*", "np", ".", "nan_to_num", "(", "np", ".", "log", "(", "p", "*", "len", "(", "p", ")", ")", ")", ")", "return", "abs", "(", "f", "(", "d1", ")", "-", "f", "(", "d2", ")", ")" ]
Compute the PSQ2 measure. Args: d1 (np.ndarray): The first distribution. d2 (np.ndarray): The second distribution.
[ "Compute", "the", "PSQ2", "measure", "." ]
deeca69a084d782a6fde7bf26f59e93b593c5d77
https://github.com/wmayner/pyphi/blob/deeca69a084d782a6fde7bf26f59e93b593c5d77/pyphi/distance.py#L204-L216
train
wmayner/pyphi
pyphi/distance.py
mp2q
def mp2q(p, q): """Compute the MP2Q measure. Args: p (np.ndarray): The unpartitioned repertoire q (np.ndarray): The partitioned repertoire """ p, q = flatten(p), flatten(q) entropy_dist = 1 / len(p) return sum(entropy_dist * np.nan_to_num((p ** 2) / q * np.log(p / q)))
python
def mp2q(p, q): """Compute the MP2Q measure. Args: p (np.ndarray): The unpartitioned repertoire q (np.ndarray): The partitioned repertoire """ p, q = flatten(p), flatten(q) entropy_dist = 1 / len(p) return sum(entropy_dist * np.nan_to_num((p ** 2) / q * np.log(p / q)))
[ "def", "mp2q", "(", "p", ",", "q", ")", ":", "p", ",", "q", "=", "flatten", "(", "p", ")", ",", "flatten", "(", "q", ")", "entropy_dist", "=", "1", "/", "len", "(", "p", ")", "return", "sum", "(", "entropy_dist", "*", "np", ".", "nan_to_num", "(", "(", "p", "**", "2", ")", "/", "q", "*", "np", ".", "log", "(", "p", "/", "q", ")", ")", ")" ]
Compute the MP2Q measure. Args: p (np.ndarray): The unpartitioned repertoire q (np.ndarray): The partitioned repertoire
[ "Compute", "the", "MP2Q", "measure", "." ]
deeca69a084d782a6fde7bf26f59e93b593c5d77
https://github.com/wmayner/pyphi/blob/deeca69a084d782a6fde7bf26f59e93b593c5d77/pyphi/distance.py#L221-L230
train
wmayner/pyphi
pyphi/distance.py
klm
def klm(p, q): """Compute the KLM divergence.""" p, q = flatten(p), flatten(q) return max(abs(p * np.nan_to_num(np.log(p / q))))
python
def klm(p, q): """Compute the KLM divergence.""" p, q = flatten(p), flatten(q) return max(abs(p * np.nan_to_num(np.log(p / q))))
[ "def", "klm", "(", "p", ",", "q", ")", ":", "p", ",", "q", "=", "flatten", "(", "p", ")", ",", "flatten", "(", "q", ")", "return", "max", "(", "abs", "(", "p", "*", "np", ".", "nan_to_num", "(", "np", ".", "log", "(", "p", "/", "q", ")", ")", ")", ")" ]
Compute the KLM divergence.
[ "Compute", "the", "KLM", "divergence", "." ]
deeca69a084d782a6fde7bf26f59e93b593c5d77
https://github.com/wmayner/pyphi/blob/deeca69a084d782a6fde7bf26f59e93b593c5d77/pyphi/distance.py#L236-L239
train
wmayner/pyphi
pyphi/distance.py
directional_emd
def directional_emd(direction, d1, d2): """Compute the EMD between two repertoires for a given direction. The full EMD computation is used for cause repertoires. A fast analytic solution is used for effect repertoires. Args: direction (Direction): |CAUSE| or |EFFECT|. d1 (np.ndarray): The first repertoire. d2 (np.ndarray): The second repertoire. Returns: float: The EMD between ``d1`` and ``d2``, rounded to |PRECISION|. Raises: ValueError: If ``direction`` is invalid. """ if direction == Direction.CAUSE: func = hamming_emd elif direction == Direction.EFFECT: func = effect_emd else: # TODO: test that ValueError is raised validate.direction(direction) return round(func(d1, d2), config.PRECISION)
python
def directional_emd(direction, d1, d2): """Compute the EMD between two repertoires for a given direction. The full EMD computation is used for cause repertoires. A fast analytic solution is used for effect repertoires. Args: direction (Direction): |CAUSE| or |EFFECT|. d1 (np.ndarray): The first repertoire. d2 (np.ndarray): The second repertoire. Returns: float: The EMD between ``d1`` and ``d2``, rounded to |PRECISION|. Raises: ValueError: If ``direction`` is invalid. """ if direction == Direction.CAUSE: func = hamming_emd elif direction == Direction.EFFECT: func = effect_emd else: # TODO: test that ValueError is raised validate.direction(direction) return round(func(d1, d2), config.PRECISION)
[ "def", "directional_emd", "(", "direction", ",", "d1", ",", "d2", ")", ":", "if", "direction", "==", "Direction", ".", "CAUSE", ":", "func", "=", "hamming_emd", "elif", "direction", "==", "Direction", ".", "EFFECT", ":", "func", "=", "effect_emd", "else", ":", "# TODO: test that ValueError is raised", "validate", ".", "direction", "(", "direction", ")", "return", "round", "(", "func", "(", "d1", ",", "d2", ")", ",", "config", ".", "PRECISION", ")" ]
Compute the EMD between two repertoires for a given direction. The full EMD computation is used for cause repertoires. A fast analytic solution is used for effect repertoires. Args: direction (Direction): |CAUSE| or |EFFECT|. d1 (np.ndarray): The first repertoire. d2 (np.ndarray): The second repertoire. Returns: float: The EMD between ``d1`` and ``d2``, rounded to |PRECISION|. Raises: ValueError: If ``direction`` is invalid.
[ "Compute", "the", "EMD", "between", "two", "repertoires", "for", "a", "given", "direction", "." ]
deeca69a084d782a6fde7bf26f59e93b593c5d77
https://github.com/wmayner/pyphi/blob/deeca69a084d782a6fde7bf26f59e93b593c5d77/pyphi/distance.py#L242-L267
train
wmayner/pyphi
pyphi/distance.py
repertoire_distance
def repertoire_distance(direction, r1, r2): """Compute the distance between two repertoires for the given direction. Args: direction (Direction): |CAUSE| or |EFFECT|. r1 (np.ndarray): The first repertoire. r2 (np.ndarray): The second repertoire. Returns: float: The distance between ``d1`` and ``d2``, rounded to |PRECISION|. """ if config.MEASURE == 'EMD': dist = directional_emd(direction, r1, r2) else: dist = measures[config.MEASURE](r1, r2) return round(dist, config.PRECISION)
python
def repertoire_distance(direction, r1, r2): """Compute the distance between two repertoires for the given direction. Args: direction (Direction): |CAUSE| or |EFFECT|. r1 (np.ndarray): The first repertoire. r2 (np.ndarray): The second repertoire. Returns: float: The distance between ``d1`` and ``d2``, rounded to |PRECISION|. """ if config.MEASURE == 'EMD': dist = directional_emd(direction, r1, r2) else: dist = measures[config.MEASURE](r1, r2) return round(dist, config.PRECISION)
[ "def", "repertoire_distance", "(", "direction", ",", "r1", ",", "r2", ")", ":", "if", "config", ".", "MEASURE", "==", "'EMD'", ":", "dist", "=", "directional_emd", "(", "direction", ",", "r1", ",", "r2", ")", "else", ":", "dist", "=", "measures", "[", "config", ".", "MEASURE", "]", "(", "r1", ",", "r2", ")", "return", "round", "(", "dist", ",", "config", ".", "PRECISION", ")" ]
Compute the distance between two repertoires for the given direction. Args: direction (Direction): |CAUSE| or |EFFECT|. r1 (np.ndarray): The first repertoire. r2 (np.ndarray): The second repertoire. Returns: float: The distance between ``d1`` and ``d2``, rounded to |PRECISION|.
[ "Compute", "the", "distance", "between", "two", "repertoires", "for", "the", "given", "direction", "." ]
deeca69a084d782a6fde7bf26f59e93b593c5d77
https://github.com/wmayner/pyphi/blob/deeca69a084d782a6fde7bf26f59e93b593c5d77/pyphi/distance.py#L270-L286
train
wmayner/pyphi
pyphi/distance.py
system_repertoire_distance
def system_repertoire_distance(r1, r2): """Compute the distance between two repertoires of a system. Args: r1 (np.ndarray): The first repertoire. r2 (np.ndarray): The second repertoire. Returns: float: The distance between ``r1`` and ``r2``. """ if config.MEASURE in measures.asymmetric(): raise ValueError( '{} is asymmetric and cannot be used as a system-level ' 'irreducibility measure.'.format(config.MEASURE)) return measures[config.MEASURE](r1, r2)
python
def system_repertoire_distance(r1, r2): """Compute the distance between two repertoires of a system. Args: r1 (np.ndarray): The first repertoire. r2 (np.ndarray): The second repertoire. Returns: float: The distance between ``r1`` and ``r2``. """ if config.MEASURE in measures.asymmetric(): raise ValueError( '{} is asymmetric and cannot be used as a system-level ' 'irreducibility measure.'.format(config.MEASURE)) return measures[config.MEASURE](r1, r2)
[ "def", "system_repertoire_distance", "(", "r1", ",", "r2", ")", ":", "if", "config", ".", "MEASURE", "in", "measures", ".", "asymmetric", "(", ")", ":", "raise", "ValueError", "(", "'{} is asymmetric and cannot be used as a system-level '", "'irreducibility measure.'", ".", "format", "(", "config", ".", "MEASURE", ")", ")", "return", "measures", "[", "config", ".", "MEASURE", "]", "(", "r1", ",", "r2", ")" ]
Compute the distance between two repertoires of a system. Args: r1 (np.ndarray): The first repertoire. r2 (np.ndarray): The second repertoire. Returns: float: The distance between ``r1`` and ``r2``.
[ "Compute", "the", "distance", "between", "two", "repertoires", "of", "a", "system", "." ]
deeca69a084d782a6fde7bf26f59e93b593c5d77
https://github.com/wmayner/pyphi/blob/deeca69a084d782a6fde7bf26f59e93b593c5d77/pyphi/distance.py#L289-L304
train
wmayner/pyphi
pyphi/distance.py
MeasureRegistry.register
def register(self, name, asymmetric=False): """Decorator for registering a measure with PyPhi. Args: name (string): The name of the measure. Keyword Args: asymmetric (boolean): ``True`` if the measure is asymmetric. """ def register_func(func): if asymmetric: self._asymmetric.append(name) self.store[name] = func return func return register_func
python
def register(self, name, asymmetric=False): """Decorator for registering a measure with PyPhi. Args: name (string): The name of the measure. Keyword Args: asymmetric (boolean): ``True`` if the measure is asymmetric. """ def register_func(func): if asymmetric: self._asymmetric.append(name) self.store[name] = func return func return register_func
[ "def", "register", "(", "self", ",", "name", ",", "asymmetric", "=", "False", ")", ":", "def", "register_func", "(", "func", ")", ":", "if", "asymmetric", ":", "self", ".", "_asymmetric", ".", "append", "(", "name", ")", "self", ".", "store", "[", "name", "]", "=", "func", "return", "func", "return", "register_func" ]
Decorator for registering a measure with PyPhi. Args: name (string): The name of the measure. Keyword Args: asymmetric (boolean): ``True`` if the measure is asymmetric.
[ "Decorator", "for", "registering", "a", "measure", "with", "PyPhi", "." ]
deeca69a084d782a6fde7bf26f59e93b593c5d77
https://github.com/wmayner/pyphi/blob/deeca69a084d782a6fde7bf26f59e93b593c5d77/pyphi/distance.py#L46-L60
train
wmayner/pyphi
pyphi/partition.py
partitions
def partitions(collection): """Generate all set partitions of a collection. Example: >>> list(partitions(range(3))) # doctest: +NORMALIZE_WHITESPACE [[[0, 1, 2]], [[0], [1, 2]], [[0, 1], [2]], [[1], [0, 2]], [[0], [1], [2]]] """ collection = list(collection) # Special cases if not collection: return if len(collection) == 1: yield [collection] return first = collection[0] for smaller in partitions(collection[1:]): for n, subset in enumerate(smaller): yield smaller[:n] + [[first] + subset] + smaller[n+1:] yield [[first]] + smaller
python
def partitions(collection): """Generate all set partitions of a collection. Example: >>> list(partitions(range(3))) # doctest: +NORMALIZE_WHITESPACE [[[0, 1, 2]], [[0], [1, 2]], [[0, 1], [2]], [[1], [0, 2]], [[0], [1], [2]]] """ collection = list(collection) # Special cases if not collection: return if len(collection) == 1: yield [collection] return first = collection[0] for smaller in partitions(collection[1:]): for n, subset in enumerate(smaller): yield smaller[:n] + [[first] + subset] + smaller[n+1:] yield [[first]] + smaller
[ "def", "partitions", "(", "collection", ")", ":", "collection", "=", "list", "(", "collection", ")", "# Special cases", "if", "not", "collection", ":", "return", "if", "len", "(", "collection", ")", "==", "1", ":", "yield", "[", "collection", "]", "return", "first", "=", "collection", "[", "0", "]", "for", "smaller", "in", "partitions", "(", "collection", "[", "1", ":", "]", ")", ":", "for", "n", ",", "subset", "in", "enumerate", "(", "smaller", ")", ":", "yield", "smaller", "[", ":", "n", "]", "+", "[", "[", "first", "]", "+", "subset", "]", "+", "smaller", "[", "n", "+", "1", ":", "]", "yield", "[", "[", "first", "]", "]", "+", "smaller" ]
Generate all set partitions of a collection. Example: >>> list(partitions(range(3))) # doctest: +NORMALIZE_WHITESPACE [[[0, 1, 2]], [[0], [1, 2]], [[0, 1], [2]], [[1], [0, 2]], [[0], [1], [2]]]
[ "Generate", "all", "set", "partitions", "of", "a", "collection", "." ]
deeca69a084d782a6fde7bf26f59e93b593c5d77
https://github.com/wmayner/pyphi/blob/deeca69a084d782a6fde7bf26f59e93b593c5d77/pyphi/partition.py#L18-L43
train
wmayner/pyphi
pyphi/partition.py
bipartition_indices
def bipartition_indices(N): """Return indices for undirected bipartitions of a sequence. Args: N (int): The length of the sequence. Returns: list: A list of tuples containing the indices for each of the two parts. Example: >>> N = 3 >>> bipartition_indices(N) [((), (0, 1, 2)), ((0,), (1, 2)), ((1,), (0, 2)), ((0, 1), (2,))] """ result = [] if N <= 0: return result for i in range(2**(N - 1)): part = [[], []] for n in range(N): bit = (i >> n) & 1 part[bit].append(n) result.append((tuple(part[1]), tuple(part[0]))) return result
python
def bipartition_indices(N): """Return indices for undirected bipartitions of a sequence. Args: N (int): The length of the sequence. Returns: list: A list of tuples containing the indices for each of the two parts. Example: >>> N = 3 >>> bipartition_indices(N) [((), (0, 1, 2)), ((0,), (1, 2)), ((1,), (0, 2)), ((0, 1), (2,))] """ result = [] if N <= 0: return result for i in range(2**(N - 1)): part = [[], []] for n in range(N): bit = (i >> n) & 1 part[bit].append(n) result.append((tuple(part[1]), tuple(part[0]))) return result
[ "def", "bipartition_indices", "(", "N", ")", ":", "result", "=", "[", "]", "if", "N", "<=", "0", ":", "return", "result", "for", "i", "in", "range", "(", "2", "**", "(", "N", "-", "1", ")", ")", ":", "part", "=", "[", "[", "]", ",", "[", "]", "]", "for", "n", "in", "range", "(", "N", ")", ":", "bit", "=", "(", "i", ">>", "n", ")", "&", "1", "part", "[", "bit", "]", ".", "append", "(", "n", ")", "result", ".", "append", "(", "(", "tuple", "(", "part", "[", "1", "]", ")", ",", "tuple", "(", "part", "[", "0", "]", ")", ")", ")", "return", "result" ]
Return indices for undirected bipartitions of a sequence. Args: N (int): The length of the sequence. Returns: list: A list of tuples containing the indices for each of the two parts. Example: >>> N = 3 >>> bipartition_indices(N) [((), (0, 1, 2)), ((0,), (1, 2)), ((1,), (0, 2)), ((0, 1), (2,))]
[ "Return", "indices", "for", "undirected", "bipartitions", "of", "a", "sequence", "." ]
deeca69a084d782a6fde7bf26f59e93b593c5d77
https://github.com/wmayner/pyphi/blob/deeca69a084d782a6fde7bf26f59e93b593c5d77/pyphi/partition.py#L47-L72
train
wmayner/pyphi
pyphi/partition.py
bipartition
def bipartition(seq): """Return a list of bipartitions for a sequence. Args: a (Iterable): The sequence to partition. Returns: list[tuple[tuple]]: A list of tuples containing each of the two partitions. Example: >>> bipartition((1,2,3)) [((), (1, 2, 3)), ((1,), (2, 3)), ((2,), (1, 3)), ((1, 2), (3,))] """ return [(tuple(seq[i] for i in part0_idx), tuple(seq[j] for j in part1_idx)) for part0_idx, part1_idx in bipartition_indices(len(seq))]
python
def bipartition(seq): """Return a list of bipartitions for a sequence. Args: a (Iterable): The sequence to partition. Returns: list[tuple[tuple]]: A list of tuples containing each of the two partitions. Example: >>> bipartition((1,2,3)) [((), (1, 2, 3)), ((1,), (2, 3)), ((2,), (1, 3)), ((1, 2), (3,))] """ return [(tuple(seq[i] for i in part0_idx), tuple(seq[j] for j in part1_idx)) for part0_idx, part1_idx in bipartition_indices(len(seq))]
[ "def", "bipartition", "(", "seq", ")", ":", "return", "[", "(", "tuple", "(", "seq", "[", "i", "]", "for", "i", "in", "part0_idx", ")", ",", "tuple", "(", "seq", "[", "j", "]", "for", "j", "in", "part1_idx", ")", ")", "for", "part0_idx", ",", "part1_idx", "in", "bipartition_indices", "(", "len", "(", "seq", ")", ")", "]" ]
Return a list of bipartitions for a sequence. Args: a (Iterable): The sequence to partition. Returns: list[tuple[tuple]]: A list of tuples containing each of the two partitions. Example: >>> bipartition((1,2,3)) [((), (1, 2, 3)), ((1,), (2, 3)), ((2,), (1, 3)), ((1, 2), (3,))]
[ "Return", "a", "list", "of", "bipartitions", "for", "a", "sequence", "." ]
deeca69a084d782a6fde7bf26f59e93b593c5d77
https://github.com/wmayner/pyphi/blob/deeca69a084d782a6fde7bf26f59e93b593c5d77/pyphi/partition.py#L76-L92
train
wmayner/pyphi
pyphi/partition.py
directed_bipartition
def directed_bipartition(seq, nontrivial=False): """Return a list of directed bipartitions for a sequence. Args: seq (Iterable): The sequence to partition. Returns: list[tuple[tuple]]: A list of tuples containing each of the two parts. Example: >>> directed_bipartition((1, 2, 3)) # doctest: +NORMALIZE_WHITESPACE [((), (1, 2, 3)), ((1,), (2, 3)), ((2,), (1, 3)), ((1, 2), (3,)), ((3,), (1, 2)), ((1, 3), (2,)), ((2, 3), (1,)), ((1, 2, 3), ())] """ bipartitions = [ (tuple(seq[i] for i in part0_idx), tuple(seq[j] for j in part1_idx)) for part0_idx, part1_idx in directed_bipartition_indices(len(seq)) ] if nontrivial: # The first and last partitions have a part that is empty; skip them. # NOTE: This depends on the implementation of # `directed_partition_indices`. return bipartitions[1:-1] return bipartitions
python
def directed_bipartition(seq, nontrivial=False): """Return a list of directed bipartitions for a sequence. Args: seq (Iterable): The sequence to partition. Returns: list[tuple[tuple]]: A list of tuples containing each of the two parts. Example: >>> directed_bipartition((1, 2, 3)) # doctest: +NORMALIZE_WHITESPACE [((), (1, 2, 3)), ((1,), (2, 3)), ((2,), (1, 3)), ((1, 2), (3,)), ((3,), (1, 2)), ((1, 3), (2,)), ((2, 3), (1,)), ((1, 2, 3), ())] """ bipartitions = [ (tuple(seq[i] for i in part0_idx), tuple(seq[j] for j in part1_idx)) for part0_idx, part1_idx in directed_bipartition_indices(len(seq)) ] if nontrivial: # The first and last partitions have a part that is empty; skip them. # NOTE: This depends on the implementation of # `directed_partition_indices`. return bipartitions[1:-1] return bipartitions
[ "def", "directed_bipartition", "(", "seq", ",", "nontrivial", "=", "False", ")", ":", "bipartitions", "=", "[", "(", "tuple", "(", "seq", "[", "i", "]", "for", "i", "in", "part0_idx", ")", ",", "tuple", "(", "seq", "[", "j", "]", "for", "j", "in", "part1_idx", ")", ")", "for", "part0_idx", ",", "part1_idx", "in", "directed_bipartition_indices", "(", "len", "(", "seq", ")", ")", "]", "if", "nontrivial", ":", "# The first and last partitions have a part that is empty; skip them.", "# NOTE: This depends on the implementation of", "# `directed_partition_indices`.", "return", "bipartitions", "[", "1", ":", "-", "1", "]", "return", "bipartitions" ]
Return a list of directed bipartitions for a sequence. Args: seq (Iterable): The sequence to partition. Returns: list[tuple[tuple]]: A list of tuples containing each of the two parts. Example: >>> directed_bipartition((1, 2, 3)) # doctest: +NORMALIZE_WHITESPACE [((), (1, 2, 3)), ((1,), (2, 3)), ((2,), (1, 3)), ((1, 2), (3,)), ((3,), (1, 2)), ((1, 3), (2,)), ((2, 3), (1,)), ((1, 2, 3), ())]
[ "Return", "a", "list", "of", "directed", "bipartitions", "for", "a", "sequence", "." ]
deeca69a084d782a6fde7bf26f59e93b593c5d77
https://github.com/wmayner/pyphi/blob/deeca69a084d782a6fde7bf26f59e93b593c5d77/pyphi/partition.py#L123-L153
train
wmayner/pyphi
pyphi/partition.py
bipartition_of_one
def bipartition_of_one(seq): """Generate bipartitions where one part is of length 1.""" seq = list(seq) for i, elt in enumerate(seq): yield ((elt,), tuple(seq[:i] + seq[(i + 1):]))
python
def bipartition_of_one(seq): """Generate bipartitions where one part is of length 1.""" seq = list(seq) for i, elt in enumerate(seq): yield ((elt,), tuple(seq[:i] + seq[(i + 1):]))
[ "def", "bipartition_of_one", "(", "seq", ")", ":", "seq", "=", "list", "(", "seq", ")", "for", "i", ",", "elt", "in", "enumerate", "(", "seq", ")", ":", "yield", "(", "(", "elt", ",", ")", ",", "tuple", "(", "seq", "[", ":", "i", "]", "+", "seq", "[", "(", "i", "+", "1", ")", ":", "]", ")", ")" ]
Generate bipartitions where one part is of length 1.
[ "Generate", "bipartitions", "where", "one", "part", "is", "of", "length", "1", "." ]
deeca69a084d782a6fde7bf26f59e93b593c5d77
https://github.com/wmayner/pyphi/blob/deeca69a084d782a6fde7bf26f59e93b593c5d77/pyphi/partition.py#L156-L160
train
wmayner/pyphi
pyphi/partition.py
directed_bipartition_of_one
def directed_bipartition_of_one(seq): """Generate directed bipartitions where one part is of length 1. Args: seq (Iterable): The sequence to partition. Returns: list[tuple[tuple]]: A list of tuples containing each of the two partitions. Example: >>> partitions = directed_bipartition_of_one((1, 2, 3)) >>> list(partitions) # doctest: +NORMALIZE_WHITESPACE [((1,), (2, 3)), ((2,), (1, 3)), ((3,), (1, 2)), ((2, 3), (1,)), ((1, 3), (2,)), ((1, 2), (3,))] """ bipartitions = list(bipartition_of_one(seq)) return chain(bipartitions, reverse_elements(bipartitions))
python
def directed_bipartition_of_one(seq): """Generate directed bipartitions where one part is of length 1. Args: seq (Iterable): The sequence to partition. Returns: list[tuple[tuple]]: A list of tuples containing each of the two partitions. Example: >>> partitions = directed_bipartition_of_one((1, 2, 3)) >>> list(partitions) # doctest: +NORMALIZE_WHITESPACE [((1,), (2, 3)), ((2,), (1, 3)), ((3,), (1, 2)), ((2, 3), (1,)), ((1, 3), (2,)), ((1, 2), (3,))] """ bipartitions = list(bipartition_of_one(seq)) return chain(bipartitions, reverse_elements(bipartitions))
[ "def", "directed_bipartition_of_one", "(", "seq", ")", ":", "bipartitions", "=", "list", "(", "bipartition_of_one", "(", "seq", ")", ")", "return", "chain", "(", "bipartitions", ",", "reverse_elements", "(", "bipartitions", ")", ")" ]
Generate directed bipartitions where one part is of length 1. Args: seq (Iterable): The sequence to partition. Returns: list[tuple[tuple]]: A list of tuples containing each of the two partitions. Example: >>> partitions = directed_bipartition_of_one((1, 2, 3)) >>> list(partitions) # doctest: +NORMALIZE_WHITESPACE [((1,), (2, 3)), ((2,), (1, 3)), ((3,), (1, 2)), ((2, 3), (1,)), ((1, 3), (2,)), ((1, 2), (3,))]
[ "Generate", "directed", "bipartitions", "where", "one", "part", "is", "of", "length", "1", "." ]
deeca69a084d782a6fde7bf26f59e93b593c5d77
https://github.com/wmayner/pyphi/blob/deeca69a084d782a6fde7bf26f59e93b593c5d77/pyphi/partition.py#L169-L190
train
wmayner/pyphi
pyphi/partition.py
directed_tripartition_indices
def directed_tripartition_indices(N): """Return indices for directed tripartitions of a sequence. Args: N (int): The length of the sequence. Returns: list[tuple]: A list of tuples containing the indices for each partition. Example: >>> N = 1 >>> directed_tripartition_indices(N) [((0,), (), ()), ((), (0,), ()), ((), (), (0,))] """ result = [] if N <= 0: return result base = [0, 1, 2] for key in product(base, repeat=N): part = [[], [], []] for i, location in enumerate(key): part[location].append(i) result.append(tuple(tuple(p) for p in part)) return result
python
def directed_tripartition_indices(N): """Return indices for directed tripartitions of a sequence. Args: N (int): The length of the sequence. Returns: list[tuple]: A list of tuples containing the indices for each partition. Example: >>> N = 1 >>> directed_tripartition_indices(N) [((0,), (), ()), ((), (0,), ()), ((), (), (0,))] """ result = [] if N <= 0: return result base = [0, 1, 2] for key in product(base, repeat=N): part = [[], [], []] for i, location in enumerate(key): part[location].append(i) result.append(tuple(tuple(p) for p in part)) return result
[ "def", "directed_tripartition_indices", "(", "N", ")", ":", "result", "=", "[", "]", "if", "N", "<=", "0", ":", "return", "result", "base", "=", "[", "0", ",", "1", ",", "2", "]", "for", "key", "in", "product", "(", "base", ",", "repeat", "=", "N", ")", ":", "part", "=", "[", "[", "]", ",", "[", "]", ",", "[", "]", "]", "for", "i", ",", "location", "in", "enumerate", "(", "key", ")", ":", "part", "[", "location", "]", ".", "append", "(", "i", ")", "result", ".", "append", "(", "tuple", "(", "tuple", "(", "p", ")", "for", "p", "in", "part", ")", ")", "return", "result" ]
Return indices for directed tripartitions of a sequence. Args: N (int): The length of the sequence. Returns: list[tuple]: A list of tuples containing the indices for each partition. Example: >>> N = 1 >>> directed_tripartition_indices(N) [((0,), (), ()), ((), (0,), ()), ((), (), (0,))]
[ "Return", "indices", "for", "directed", "tripartitions", "of", "a", "sequence", "." ]
deeca69a084d782a6fde7bf26f59e93b593c5d77
https://github.com/wmayner/pyphi/blob/deeca69a084d782a6fde7bf26f59e93b593c5d77/pyphi/partition.py#L194-L221
train
wmayner/pyphi
pyphi/partition.py
directed_tripartition
def directed_tripartition(seq): """Generator over all directed tripartitions of a sequence. Args: seq (Iterable): a sequence. Yields: tuple[tuple]: A tripartition of ``seq``. Example: >>> seq = (2, 5) >>> list(directed_tripartition(seq)) # doctest: +NORMALIZE_WHITESPACE [((2, 5), (), ()), ((2,), (5,), ()), ((2,), (), (5,)), ((5,), (2,), ()), ((), (2, 5), ()), ((), (2,), (5,)), ((5,), (), (2,)), ((), (5,), (2,)), ((), (), (2, 5))] """ for a, b, c in directed_tripartition_indices(len(seq)): yield (tuple(seq[i] for i in a), tuple(seq[j] for j in b), tuple(seq[k] for k in c))
python
def directed_tripartition(seq): """Generator over all directed tripartitions of a sequence. Args: seq (Iterable): a sequence. Yields: tuple[tuple]: A tripartition of ``seq``. Example: >>> seq = (2, 5) >>> list(directed_tripartition(seq)) # doctest: +NORMALIZE_WHITESPACE [((2, 5), (), ()), ((2,), (5,), ()), ((2,), (), (5,)), ((5,), (2,), ()), ((), (2, 5), ()), ((), (2,), (5,)), ((5,), (), (2,)), ((), (5,), (2,)), ((), (), (2, 5))] """ for a, b, c in directed_tripartition_indices(len(seq)): yield (tuple(seq[i] for i in a), tuple(seq[j] for j in b), tuple(seq[k] for k in c))
[ "def", "directed_tripartition", "(", "seq", ")", ":", "for", "a", ",", "b", ",", "c", "in", "directed_tripartition_indices", "(", "len", "(", "seq", ")", ")", ":", "yield", "(", "tuple", "(", "seq", "[", "i", "]", "for", "i", "in", "a", ")", ",", "tuple", "(", "seq", "[", "j", "]", "for", "j", "in", "b", ")", ",", "tuple", "(", "seq", "[", "k", "]", "for", "k", "in", "c", ")", ")" ]
Generator over all directed tripartitions of a sequence. Args: seq (Iterable): a sequence. Yields: tuple[tuple]: A tripartition of ``seq``. Example: >>> seq = (2, 5) >>> list(directed_tripartition(seq)) # doctest: +NORMALIZE_WHITESPACE [((2, 5), (), ()), ((2,), (5,), ()), ((2,), (), (5,)), ((5,), (2,), ()), ((), (2, 5), ()), ((), (2,), (5,)), ((5,), (), (2,)), ((), (5,), (2,)), ((), (), (2, 5))]
[ "Generator", "over", "all", "directed", "tripartitions", "of", "a", "sequence", "." ]
deeca69a084d782a6fde7bf26f59e93b593c5d77
https://github.com/wmayner/pyphi/blob/deeca69a084d782a6fde7bf26f59e93b593c5d77/pyphi/partition.py#L224-L249
train
wmayner/pyphi
pyphi/partition.py
k_partitions
def k_partitions(collection, k): """Generate all ``k``-partitions of a collection. Example: >>> list(k_partitions(range(3), 2)) [[[0, 1], [2]], [[0], [1, 2]], [[0, 2], [1]]] """ collection = list(collection) n = len(collection) # Special cases if n == 0 or k < 1: return [] if k == 1: return [[collection]] a = [0] * (n + 1) for j in range(1, k + 1): a[n - k + j] = j - 1 return _f(k, n, 0, n, a, k, collection)
python
def k_partitions(collection, k): """Generate all ``k``-partitions of a collection. Example: >>> list(k_partitions(range(3), 2)) [[[0, 1], [2]], [[0], [1, 2]], [[0, 2], [1]]] """ collection = list(collection) n = len(collection) # Special cases if n == 0 or k < 1: return [] if k == 1: return [[collection]] a = [0] * (n + 1) for j in range(1, k + 1): a[n - k + j] = j - 1 return _f(k, n, 0, n, a, k, collection)
[ "def", "k_partitions", "(", "collection", ",", "k", ")", ":", "collection", "=", "list", "(", "collection", ")", "n", "=", "len", "(", "collection", ")", "# Special cases", "if", "n", "==", "0", "or", "k", "<", "1", ":", "return", "[", "]", "if", "k", "==", "1", ":", "return", "[", "[", "collection", "]", "]", "a", "=", "[", "0", "]", "*", "(", "n", "+", "1", ")", "for", "j", "in", "range", "(", "1", ",", "k", "+", "1", ")", ":", "a", "[", "n", "-", "k", "+", "j", "]", "=", "j", "-", "1", "return", "_f", "(", "k", ",", "n", ",", "0", ",", "n", ",", "a", ",", "k", ",", "collection", ")" ]
Generate all ``k``-partitions of a collection. Example: >>> list(k_partitions(range(3), 2)) [[[0, 1], [2]], [[0], [1, 2]], [[0, 2], [1]]]
[ "Generate", "all", "k", "-", "partitions", "of", "a", "collection", "." ]
deeca69a084d782a6fde7bf26f59e93b593c5d77
https://github.com/wmayner/pyphi/blob/deeca69a084d782a6fde7bf26f59e93b593c5d77/pyphi/partition.py#L335-L354
train
wmayner/pyphi
pyphi/partition.py
mip_partitions
def mip_partitions(mechanism, purview, node_labels=None): """Return a generator over all mechanism-purview partitions, based on the current configuration. """ func = partition_types[config.PARTITION_TYPE] return func(mechanism, purview, node_labels)
python
def mip_partitions(mechanism, purview, node_labels=None): """Return a generator over all mechanism-purview partitions, based on the current configuration. """ func = partition_types[config.PARTITION_TYPE] return func(mechanism, purview, node_labels)
[ "def", "mip_partitions", "(", "mechanism", ",", "purview", ",", "node_labels", "=", "None", ")", ":", "func", "=", "partition_types", "[", "config", ".", "PARTITION_TYPE", "]", "return", "func", "(", "mechanism", ",", "purview", ",", "node_labels", ")" ]
Return a generator over all mechanism-purview partitions, based on the current configuration.
[ "Return", "a", "generator", "over", "all", "mechanism", "-", "purview", "partitions", "based", "on", "the", "current", "configuration", "." ]
deeca69a084d782a6fde7bf26f59e93b593c5d77
https://github.com/wmayner/pyphi/blob/deeca69a084d782a6fde7bf26f59e93b593c5d77/pyphi/partition.py#L378-L383
train
wmayner/pyphi
pyphi/partition.py
mip_bipartitions
def mip_bipartitions(mechanism, purview, node_labels=None): r"""Return an generator of all |small_phi| bipartitions of a mechanism over a purview. Excludes all bipartitions where one half is entirely empty, *e.g*:: A ∅ ─── ✕ ─── B ∅ is not valid, but :: A ∅ ─── ✕ ─── ∅ B is. Args: mechanism (tuple[int]): The mechanism to partition purview (tuple[int]): The purview to partition Yields: Bipartition: Where each bipartition is:: bipart[0].mechanism bipart[1].mechanism ─────────────────── ✕ ─────────────────── bipart[0].purview bipart[1].purview Example: >>> mechanism = (0,) >>> purview = (2, 3) >>> for partition in mip_bipartitions(mechanism, purview): ... print(partition, '\n') # doctest: +NORMALIZE_WHITESPACE ∅ 0 ─── ✕ ─── 2 3 <BLANKLINE> ∅ 0 ─── ✕ ─── 3 2 <BLANKLINE> ∅ 0 ─── ✕ ─── 2,3 ∅ """ numerators = bipartition(mechanism) denominators = directed_bipartition(purview) for n, d in product(numerators, denominators): if (n[0] or d[0]) and (n[1] or d[1]): yield Bipartition(Part(n[0], d[0]), Part(n[1], d[1]), node_labels=node_labels)
python
def mip_bipartitions(mechanism, purview, node_labels=None): r"""Return an generator of all |small_phi| bipartitions of a mechanism over a purview. Excludes all bipartitions where one half is entirely empty, *e.g*:: A ∅ ─── ✕ ─── B ∅ is not valid, but :: A ∅ ─── ✕ ─── ∅ B is. Args: mechanism (tuple[int]): The mechanism to partition purview (tuple[int]): The purview to partition Yields: Bipartition: Where each bipartition is:: bipart[0].mechanism bipart[1].mechanism ─────────────────── ✕ ─────────────────── bipart[0].purview bipart[1].purview Example: >>> mechanism = (0,) >>> purview = (2, 3) >>> for partition in mip_bipartitions(mechanism, purview): ... print(partition, '\n') # doctest: +NORMALIZE_WHITESPACE ∅ 0 ─── ✕ ─── 2 3 <BLANKLINE> ∅ 0 ─── ✕ ─── 3 2 <BLANKLINE> ∅ 0 ─── ✕ ─── 2,3 ∅ """ numerators = bipartition(mechanism) denominators = directed_bipartition(purview) for n, d in product(numerators, denominators): if (n[0] or d[0]) and (n[1] or d[1]): yield Bipartition(Part(n[0], d[0]), Part(n[1], d[1]), node_labels=node_labels)
[ "def", "mip_bipartitions", "(", "mechanism", ",", "purview", ",", "node_labels", "=", "None", ")", ":", "numerators", "=", "bipartition", "(", "mechanism", ")", "denominators", "=", "directed_bipartition", "(", "purview", ")", "for", "n", ",", "d", "in", "product", "(", "numerators", ",", "denominators", ")", ":", "if", "(", "n", "[", "0", "]", "or", "d", "[", "0", "]", ")", "and", "(", "n", "[", "1", "]", "or", "d", "[", "1", "]", ")", ":", "yield", "Bipartition", "(", "Part", "(", "n", "[", "0", "]", ",", "d", "[", "0", "]", ")", ",", "Part", "(", "n", "[", "1", "]", ",", "d", "[", "1", "]", ")", ",", "node_labels", "=", "node_labels", ")" ]
r"""Return an generator of all |small_phi| bipartitions of a mechanism over a purview. Excludes all bipartitions where one half is entirely empty, *e.g*:: A ∅ ─── ✕ ─── B ∅ is not valid, but :: A ∅ ─── ✕ ─── ∅ B is. Args: mechanism (tuple[int]): The mechanism to partition purview (tuple[int]): The purview to partition Yields: Bipartition: Where each bipartition is:: bipart[0].mechanism bipart[1].mechanism ─────────────────── ✕ ─────────────────── bipart[0].purview bipart[1].purview Example: >>> mechanism = (0,) >>> purview = (2, 3) >>> for partition in mip_bipartitions(mechanism, purview): ... print(partition, '\n') # doctest: +NORMALIZE_WHITESPACE ∅ 0 ─── ✕ ─── 2 3 <BLANKLINE> ∅ 0 ─── ✕ ─── 3 2 <BLANKLINE> ∅ 0 ─── ✕ ─── 2,3 ∅
[ "r", "Return", "an", "generator", "of", "all", "|small_phi|", "bipartitions", "of", "a", "mechanism", "over", "a", "purview", "." ]
deeca69a084d782a6fde7bf26f59e93b593c5d77
https://github.com/wmayner/pyphi/blob/deeca69a084d782a6fde7bf26f59e93b593c5d77/pyphi/partition.py#L387-L439
train
wmayner/pyphi
pyphi/partition.py
wedge_partitions
def wedge_partitions(mechanism, purview, node_labels=None): """Return an iterator over all wedge partitions. These are partitions which strictly split the mechanism and allow a subset of the purview to be split into a third partition, e.g.:: A B ∅ ─── ✕ ─── ✕ ─── B C D See |PARTITION_TYPE| in |config| for more information. Args: mechanism (tuple[int]): A mechanism. purview (tuple[int]): A purview. Yields: Tripartition: all unique tripartitions of this mechanism and purview. """ numerators = bipartition(mechanism) denominators = directed_tripartition(purview) yielded = set() def valid(factoring): """Return whether the factoring should be considered.""" # pylint: disable=too-many-boolean-expressions numerator, denominator = factoring return ( (numerator[0] or denominator[0]) and (numerator[1] or denominator[1]) and ((numerator[0] and numerator[1]) or not denominator[0] or not denominator[1]) ) for n, d in filter(valid, product(numerators, denominators)): # Normalize order of parts to remove duplicates. tripart = Tripartition( Part(n[0], d[0]), Part(n[1], d[1]), Part((), d[2]), node_labels=node_labels ).normalize() # pylint: disable=bad-whitespace def nonempty(part): """Check that the part is not empty.""" return part.mechanism or part.purview def compressible(tripart): """Check if the tripartition can be transformed into a causally equivalent partition by combing two of its parts; e.g., A/∅ × B/∅ × ∅/CD is equivalent to AB/∅ × ∅/CD so we don't include it. """ pairs = [ (tripart[0], tripart[1]), (tripart[0], tripart[2]), (tripart[1], tripart[2]) ] for x, y in pairs: if (nonempty(x) and nonempty(y) and (x.mechanism + y.mechanism == () or x.purview + y.purview == ())): return True return False if not compressible(tripart) and tripart not in yielded: yielded.add(tripart) yield tripart
python
def wedge_partitions(mechanism, purview, node_labels=None): """Return an iterator over all wedge partitions. These are partitions which strictly split the mechanism and allow a subset of the purview to be split into a third partition, e.g.:: A B ∅ ─── ✕ ─── ✕ ─── B C D See |PARTITION_TYPE| in |config| for more information. Args: mechanism (tuple[int]): A mechanism. purview (tuple[int]): A purview. Yields: Tripartition: all unique tripartitions of this mechanism and purview. """ numerators = bipartition(mechanism) denominators = directed_tripartition(purview) yielded = set() def valid(factoring): """Return whether the factoring should be considered.""" # pylint: disable=too-many-boolean-expressions numerator, denominator = factoring return ( (numerator[0] or denominator[0]) and (numerator[1] or denominator[1]) and ((numerator[0] and numerator[1]) or not denominator[0] or not denominator[1]) ) for n, d in filter(valid, product(numerators, denominators)): # Normalize order of parts to remove duplicates. tripart = Tripartition( Part(n[0], d[0]), Part(n[1], d[1]), Part((), d[2]), node_labels=node_labels ).normalize() # pylint: disable=bad-whitespace def nonempty(part): """Check that the part is not empty.""" return part.mechanism or part.purview def compressible(tripart): """Check if the tripartition can be transformed into a causally equivalent partition by combing two of its parts; e.g., A/∅ × B/∅ × ∅/CD is equivalent to AB/∅ × ∅/CD so we don't include it. """ pairs = [ (tripart[0], tripart[1]), (tripart[0], tripart[2]), (tripart[1], tripart[2]) ] for x, y in pairs: if (nonempty(x) and nonempty(y) and (x.mechanism + y.mechanism == () or x.purview + y.purview == ())): return True return False if not compressible(tripart) and tripart not in yielded: yielded.add(tripart) yield tripart
[ "def", "wedge_partitions", "(", "mechanism", ",", "purview", ",", "node_labels", "=", "None", ")", ":", "numerators", "=", "bipartition", "(", "mechanism", ")", "denominators", "=", "directed_tripartition", "(", "purview", ")", "yielded", "=", "set", "(", ")", "def", "valid", "(", "factoring", ")", ":", "\"\"\"Return whether the factoring should be considered.\"\"\"", "# pylint: disable=too-many-boolean-expressions", "numerator", ",", "denominator", "=", "factoring", "return", "(", "(", "numerator", "[", "0", "]", "or", "denominator", "[", "0", "]", ")", "and", "(", "numerator", "[", "1", "]", "or", "denominator", "[", "1", "]", ")", "and", "(", "(", "numerator", "[", "0", "]", "and", "numerator", "[", "1", "]", ")", "or", "not", "denominator", "[", "0", "]", "or", "not", "denominator", "[", "1", "]", ")", ")", "for", "n", ",", "d", "in", "filter", "(", "valid", ",", "product", "(", "numerators", ",", "denominators", ")", ")", ":", "# Normalize order of parts to remove duplicates.", "tripart", "=", "Tripartition", "(", "Part", "(", "n", "[", "0", "]", ",", "d", "[", "0", "]", ")", ",", "Part", "(", "n", "[", "1", "]", ",", "d", "[", "1", "]", ")", ",", "Part", "(", "(", ")", ",", "d", "[", "2", "]", ")", ",", "node_labels", "=", "node_labels", ")", ".", "normalize", "(", ")", "# pylint: disable=bad-whitespace", "def", "nonempty", "(", "part", ")", ":", "\"\"\"Check that the part is not empty.\"\"\"", "return", "part", ".", "mechanism", "or", "part", ".", "purview", "def", "compressible", "(", "tripart", ")", ":", "\"\"\"Check if the tripartition can be transformed into a causally\n equivalent partition by combing two of its parts; e.g., A/∅ × B/∅ ×\n ∅/CD is equivalent to AB/∅ × ∅/CD so we don't include it.\n \"\"\"", "pairs", "=", "[", "(", "tripart", "[", "0", "]", ",", "tripart", "[", "1", "]", ")", ",", "(", "tripart", "[", "0", "]", ",", "tripart", "[", "2", "]", ")", ",", "(", "tripart", "[", "1", "]", ",", "tripart", "[", "2", "]", ")", "]", "for", "x", ",", "y", "in", "pairs", ":", "if", "(", "nonempty", "(", "x", ")", "and", "nonempty", "(", "y", ")", "and", "(", "x", ".", "mechanism", "+", "y", ".", "mechanism", "==", "(", ")", "or", "x", ".", "purview", "+", "y", ".", "purview", "==", "(", ")", ")", ")", ":", "return", "True", "return", "False", "if", "not", "compressible", "(", "tripart", ")", "and", "tripart", "not", "in", "yielded", ":", "yielded", ".", "add", "(", "tripart", ")", "yield", "tripart" ]
Return an iterator over all wedge partitions. These are partitions which strictly split the mechanism and allow a subset of the purview to be split into a third partition, e.g.:: A B ∅ ─── ✕ ─── ✕ ─── B C D See |PARTITION_TYPE| in |config| for more information. Args: mechanism (tuple[int]): A mechanism. purview (tuple[int]): A purview. Yields: Tripartition: all unique tripartitions of this mechanism and purview.
[ "Return", "an", "iterator", "over", "all", "wedge", "partitions", "." ]
deeca69a084d782a6fde7bf26f59e93b593c5d77
https://github.com/wmayner/pyphi/blob/deeca69a084d782a6fde7bf26f59e93b593c5d77/pyphi/partition.py#L443-L511
train
wmayner/pyphi
pyphi/partition.py
all_partitions
def all_partitions(mechanism, purview, node_labels=None): """Return all possible partitions of a mechanism and purview. Partitions can consist of any number of parts. Args: mechanism (tuple[int]): A mechanism. purview (tuple[int]): A purview. Yields: KPartition: A partition of this mechanism and purview into ``k`` parts. """ for mechanism_partition in partitions(mechanism): mechanism_partition.append([]) n_mechanism_parts = len(mechanism_partition) max_purview_partition = min(len(purview), n_mechanism_parts) for n_purview_parts in range(1, max_purview_partition + 1): n_empty = n_mechanism_parts - n_purview_parts for purview_partition in k_partitions(purview, n_purview_parts): purview_partition = [tuple(_list) for _list in purview_partition] # Extend with empty tuples so purview partition has same size # as mechanism purview purview_partition.extend([()] * n_empty) # Unique permutations to avoid duplicates empties for purview_permutation in set( permutations(purview_partition)): parts = [ Part(tuple(m), tuple(p)) for m, p in zip(mechanism_partition, purview_permutation) ] # Must partition the mechanism, unless the purview is fully # cut away from the mechanism. if parts[0].mechanism == mechanism and parts[0].purview: continue yield KPartition(*parts, node_labels=node_labels)
python
def all_partitions(mechanism, purview, node_labels=None): """Return all possible partitions of a mechanism and purview. Partitions can consist of any number of parts. Args: mechanism (tuple[int]): A mechanism. purview (tuple[int]): A purview. Yields: KPartition: A partition of this mechanism and purview into ``k`` parts. """ for mechanism_partition in partitions(mechanism): mechanism_partition.append([]) n_mechanism_parts = len(mechanism_partition) max_purview_partition = min(len(purview), n_mechanism_parts) for n_purview_parts in range(1, max_purview_partition + 1): n_empty = n_mechanism_parts - n_purview_parts for purview_partition in k_partitions(purview, n_purview_parts): purview_partition = [tuple(_list) for _list in purview_partition] # Extend with empty tuples so purview partition has same size # as mechanism purview purview_partition.extend([()] * n_empty) # Unique permutations to avoid duplicates empties for purview_permutation in set( permutations(purview_partition)): parts = [ Part(tuple(m), tuple(p)) for m, p in zip(mechanism_partition, purview_permutation) ] # Must partition the mechanism, unless the purview is fully # cut away from the mechanism. if parts[0].mechanism == mechanism and parts[0].purview: continue yield KPartition(*parts, node_labels=node_labels)
[ "def", "all_partitions", "(", "mechanism", ",", "purview", ",", "node_labels", "=", "None", ")", ":", "for", "mechanism_partition", "in", "partitions", "(", "mechanism", ")", ":", "mechanism_partition", ".", "append", "(", "[", "]", ")", "n_mechanism_parts", "=", "len", "(", "mechanism_partition", ")", "max_purview_partition", "=", "min", "(", "len", "(", "purview", ")", ",", "n_mechanism_parts", ")", "for", "n_purview_parts", "in", "range", "(", "1", ",", "max_purview_partition", "+", "1", ")", ":", "n_empty", "=", "n_mechanism_parts", "-", "n_purview_parts", "for", "purview_partition", "in", "k_partitions", "(", "purview", ",", "n_purview_parts", ")", ":", "purview_partition", "=", "[", "tuple", "(", "_list", ")", "for", "_list", "in", "purview_partition", "]", "# Extend with empty tuples so purview partition has same size", "# as mechanism purview", "purview_partition", ".", "extend", "(", "[", "(", ")", "]", "*", "n_empty", ")", "# Unique permutations to avoid duplicates empties", "for", "purview_permutation", "in", "set", "(", "permutations", "(", "purview_partition", ")", ")", ":", "parts", "=", "[", "Part", "(", "tuple", "(", "m", ")", ",", "tuple", "(", "p", ")", ")", "for", "m", ",", "p", "in", "zip", "(", "mechanism_partition", ",", "purview_permutation", ")", "]", "# Must partition the mechanism, unless the purview is fully", "# cut away from the mechanism.", "if", "parts", "[", "0", "]", ".", "mechanism", "==", "mechanism", "and", "parts", "[", "0", "]", ".", "purview", ":", "continue", "yield", "KPartition", "(", "*", "parts", ",", "node_labels", "=", "node_labels", ")" ]
Return all possible partitions of a mechanism and purview. Partitions can consist of any number of parts. Args: mechanism (tuple[int]): A mechanism. purview (tuple[int]): A purview. Yields: KPartition: A partition of this mechanism and purview into ``k`` parts.
[ "Return", "all", "possible", "partitions", "of", "a", "mechanism", "and", "purview", "." ]
deeca69a084d782a6fde7bf26f59e93b593c5d77
https://github.com/wmayner/pyphi/blob/deeca69a084d782a6fde7bf26f59e93b593c5d77/pyphi/partition.py#L515-L555
train
openstack/pyghmi
pyghmi/ipmi/oem/lenovo/imm.py
naturalize_string
def naturalize_string(key): """Analyzes string in a human way to enable natural sort :param nodename: The node name to analyze :returns: A structure that can be consumed by 'sorted' """ return [int(text) if text.isdigit() else text.lower() for text in re.split(numregex, key)]
python
def naturalize_string(key): """Analyzes string in a human way to enable natural sort :param nodename: The node name to analyze :returns: A structure that can be consumed by 'sorted' """ return [int(text) if text.isdigit() else text.lower() for text in re.split(numregex, key)]
[ "def", "naturalize_string", "(", "key", ")", ":", "return", "[", "int", "(", "text", ")", "if", "text", ".", "isdigit", "(", ")", "else", "text", ".", "lower", "(", ")", "for", "text", "in", "re", ".", "split", "(", "numregex", ",", "key", ")", "]" ]
Analyzes string in a human way to enable natural sort :param nodename: The node name to analyze :returns: A structure that can be consumed by 'sorted'
[ "Analyzes", "string", "in", "a", "human", "way", "to", "enable", "natural", "sort" ]
f710b1d30a8eed19a9e86f01f9351c737666f3e5
https://github.com/openstack/pyghmi/blob/f710b1d30a8eed19a9e86f01f9351c737666f3e5/pyghmi/ipmi/oem/lenovo/imm.py#L52-L59
train
openstack/pyghmi
pyghmi/ipmi/events.py
EventHandler.fetch_sel
def fetch_sel(self, ipmicmd, clear=False): """Fetch SEL entries Return an iterable of SEL entries. If clearing is requested, the fetch and clear will be done as an atomic operation, assuring no entries are dropped. :param ipmicmd: The Command object to use to interrogate :param clear: Whether to clear the entries upon retrieval. """ records = [] # First we do a fetch all without reservation, reducing the risk # of having a long lived reservation that gets canceled in the middle endat = self._fetch_entries(ipmicmd, 0, records) if clear and records: # don't bother clearing if there were no records # To do clear, we make a reservation first... rsp = ipmicmd.xraw_command(netfn=0xa, command=0x42) rsvid = struct.unpack_from('<H', rsp['data'])[0] # Then we refetch the tail with reservation (check for change) del records[-1] # remove the record that's about to be duplicated self._fetch_entries(ipmicmd, endat, records, rsvid) # finally clear the SEL # 0XAA means start initiate, 0x524c43 is 'RCL' or 'CLR' backwards clrdata = bytearray(struct.pack('<HI', rsvid, 0xAA524C43)) ipmicmd.xraw_command(netfn=0xa, command=0x47, data=clrdata) # Now to fixup the record timestamps... first we need to get the BMC # opinion of current time _fix_sel_time(records, ipmicmd) return records
python
def fetch_sel(self, ipmicmd, clear=False): """Fetch SEL entries Return an iterable of SEL entries. If clearing is requested, the fetch and clear will be done as an atomic operation, assuring no entries are dropped. :param ipmicmd: The Command object to use to interrogate :param clear: Whether to clear the entries upon retrieval. """ records = [] # First we do a fetch all without reservation, reducing the risk # of having a long lived reservation that gets canceled in the middle endat = self._fetch_entries(ipmicmd, 0, records) if clear and records: # don't bother clearing if there were no records # To do clear, we make a reservation first... rsp = ipmicmd.xraw_command(netfn=0xa, command=0x42) rsvid = struct.unpack_from('<H', rsp['data'])[0] # Then we refetch the tail with reservation (check for change) del records[-1] # remove the record that's about to be duplicated self._fetch_entries(ipmicmd, endat, records, rsvid) # finally clear the SEL # 0XAA means start initiate, 0x524c43 is 'RCL' or 'CLR' backwards clrdata = bytearray(struct.pack('<HI', rsvid, 0xAA524C43)) ipmicmd.xraw_command(netfn=0xa, command=0x47, data=clrdata) # Now to fixup the record timestamps... first we need to get the BMC # opinion of current time _fix_sel_time(records, ipmicmd) return records
[ "def", "fetch_sel", "(", "self", ",", "ipmicmd", ",", "clear", "=", "False", ")", ":", "records", "=", "[", "]", "# First we do a fetch all without reservation, reducing the risk", "# of having a long lived reservation that gets canceled in the middle", "endat", "=", "self", ".", "_fetch_entries", "(", "ipmicmd", ",", "0", ",", "records", ")", "if", "clear", "and", "records", ":", "# don't bother clearing if there were no records", "# To do clear, we make a reservation first...", "rsp", "=", "ipmicmd", ".", "xraw_command", "(", "netfn", "=", "0xa", ",", "command", "=", "0x42", ")", "rsvid", "=", "struct", ".", "unpack_from", "(", "'<H'", ",", "rsp", "[", "'data'", "]", ")", "[", "0", "]", "# Then we refetch the tail with reservation (check for change)", "del", "records", "[", "-", "1", "]", "# remove the record that's about to be duplicated", "self", ".", "_fetch_entries", "(", "ipmicmd", ",", "endat", ",", "records", ",", "rsvid", ")", "# finally clear the SEL", "# 0XAA means start initiate, 0x524c43 is 'RCL' or 'CLR' backwards", "clrdata", "=", "bytearray", "(", "struct", ".", "pack", "(", "'<HI'", ",", "rsvid", ",", "0xAA524C43", ")", ")", "ipmicmd", ".", "xraw_command", "(", "netfn", "=", "0xa", ",", "command", "=", "0x47", ",", "data", "=", "clrdata", ")", "# Now to fixup the record timestamps... first we need to get the BMC", "# opinion of current time", "_fix_sel_time", "(", "records", ",", "ipmicmd", ")", "return", "records" ]
Fetch SEL entries Return an iterable of SEL entries. If clearing is requested, the fetch and clear will be done as an atomic operation, assuring no entries are dropped. :param ipmicmd: The Command object to use to interrogate :param clear: Whether to clear the entries upon retrieval.
[ "Fetch", "SEL", "entries" ]
f710b1d30a8eed19a9e86f01f9351c737666f3e5
https://github.com/openstack/pyghmi/blob/f710b1d30a8eed19a9e86f01f9351c737666f3e5/pyghmi/ipmi/events.py#L553-L581
train
openstack/pyghmi
pyghmi/ipmi/command.py
Command.oem_init
def oem_init(self): """Initialize the command object for OEM capabilities A number of capabilities are either totally OEM defined or else augmented somehow by knowledge of the OEM. This method does an interrogation to identify the OEM. """ if self._oemknown: return self._oem, self._oemknown = get_oem_handler(self._get_device_id(), self)
python
def oem_init(self): """Initialize the command object for OEM capabilities A number of capabilities are either totally OEM defined or else augmented somehow by knowledge of the OEM. This method does an interrogation to identify the OEM. """ if self._oemknown: return self._oem, self._oemknown = get_oem_handler(self._get_device_id(), self)
[ "def", "oem_init", "(", "self", ")", ":", "if", "self", ".", "_oemknown", ":", "return", "self", ".", "_oem", ",", "self", ".", "_oemknown", "=", "get_oem_handler", "(", "self", ".", "_get_device_id", "(", ")", ",", "self", ")" ]
Initialize the command object for OEM capabilities A number of capabilities are either totally OEM defined or else augmented somehow by knowledge of the OEM. This method does an interrogation to identify the OEM.
[ "Initialize", "the", "command", "object", "for", "OEM", "capabilities" ]
f710b1d30a8eed19a9e86f01f9351c737666f3e5
https://github.com/openstack/pyghmi/blob/f710b1d30a8eed19a9e86f01f9351c737666f3e5/pyghmi/ipmi/command.py#L228-L239
train
openstack/pyghmi
pyghmi/ipmi/command.py
Command.reset_bmc
def reset_bmc(self): """Do a cold reset in BMC """ response = self.raw_command(netfn=6, command=2) if 'error' in response: raise exc.IpmiException(response['error'])
python
def reset_bmc(self): """Do a cold reset in BMC """ response = self.raw_command(netfn=6, command=2) if 'error' in response: raise exc.IpmiException(response['error'])
[ "def", "reset_bmc", "(", "self", ")", ":", "response", "=", "self", ".", "raw_command", "(", "netfn", "=", "6", ",", "command", "=", "2", ")", "if", "'error'", "in", "response", ":", "raise", "exc", ".", "IpmiException", "(", "response", "[", "'error'", "]", ")" ]
Do a cold reset in BMC
[ "Do", "a", "cold", "reset", "in", "BMC" ]
f710b1d30a8eed19a9e86f01f9351c737666f3e5
https://github.com/openstack/pyghmi/blob/f710b1d30a8eed19a9e86f01f9351c737666f3e5/pyghmi/ipmi/command.py#L364-L369
train
openstack/pyghmi
pyghmi/ipmi/command.py
Command.xraw_command
def xraw_command(self, netfn, command, bridge_request=(), data=(), delay_xmit=None, retry=True, timeout=None): """Send raw ipmi command to BMC, raising exception on error This is identical to raw_command, except it raises exceptions on IPMI errors and returns data as a buffer. This is the recommend function to use. The response['data'] being a buffer allows traditional indexed access as well as works nicely with struct.unpack_from when certain data is coming back. :param netfn: Net function number :param command: Command value :param bridge_request: The target slave address and channel number for the bridge request. :param data: Command data as a tuple or list :param retry: Whether to retry this particular payload or not, defaults to true. :param timeout: A custom time to wait for initial reply, useful for a slow command. This may interfere with retry logic. :returns: dict -- The response from IPMI device """ rsp = self.ipmi_session.raw_command(netfn=netfn, command=command, bridge_request=bridge_request, data=data, delay_xmit=delay_xmit, retry=retry, timeout=timeout) if 'error' in rsp: raise exc.IpmiException(rsp['error'], rsp['code']) rsp['data'] = buffer(rsp['data']) return rsp
python
def xraw_command(self, netfn, command, bridge_request=(), data=(), delay_xmit=None, retry=True, timeout=None): """Send raw ipmi command to BMC, raising exception on error This is identical to raw_command, except it raises exceptions on IPMI errors and returns data as a buffer. This is the recommend function to use. The response['data'] being a buffer allows traditional indexed access as well as works nicely with struct.unpack_from when certain data is coming back. :param netfn: Net function number :param command: Command value :param bridge_request: The target slave address and channel number for the bridge request. :param data: Command data as a tuple or list :param retry: Whether to retry this particular payload or not, defaults to true. :param timeout: A custom time to wait for initial reply, useful for a slow command. This may interfere with retry logic. :returns: dict -- The response from IPMI device """ rsp = self.ipmi_session.raw_command(netfn=netfn, command=command, bridge_request=bridge_request, data=data, delay_xmit=delay_xmit, retry=retry, timeout=timeout) if 'error' in rsp: raise exc.IpmiException(rsp['error'], rsp['code']) rsp['data'] = buffer(rsp['data']) return rsp
[ "def", "xraw_command", "(", "self", ",", "netfn", ",", "command", ",", "bridge_request", "=", "(", ")", ",", "data", "=", "(", ")", ",", "delay_xmit", "=", "None", ",", "retry", "=", "True", ",", "timeout", "=", "None", ")", ":", "rsp", "=", "self", ".", "ipmi_session", ".", "raw_command", "(", "netfn", "=", "netfn", ",", "command", "=", "command", ",", "bridge_request", "=", "bridge_request", ",", "data", "=", "data", ",", "delay_xmit", "=", "delay_xmit", ",", "retry", "=", "retry", ",", "timeout", "=", "timeout", ")", "if", "'error'", "in", "rsp", ":", "raise", "exc", ".", "IpmiException", "(", "rsp", "[", "'error'", "]", ",", "rsp", "[", "'code'", "]", ")", "rsp", "[", "'data'", "]", "=", "buffer", "(", "rsp", "[", "'data'", "]", ")", "return", "rsp" ]
Send raw ipmi command to BMC, raising exception on error This is identical to raw_command, except it raises exceptions on IPMI errors and returns data as a buffer. This is the recommend function to use. The response['data'] being a buffer allows traditional indexed access as well as works nicely with struct.unpack_from when certain data is coming back. :param netfn: Net function number :param command: Command value :param bridge_request: The target slave address and channel number for the bridge request. :param data: Command data as a tuple or list :param retry: Whether to retry this particular payload or not, defaults to true. :param timeout: A custom time to wait for initial reply, useful for a slow command. This may interfere with retry logic. :returns: dict -- The response from IPMI device
[ "Send", "raw", "ipmi", "command", "to", "BMC", "raising", "exception", "on", "error" ]
f710b1d30a8eed19a9e86f01f9351c737666f3e5
https://github.com/openstack/pyghmi/blob/f710b1d30a8eed19a9e86f01f9351c737666f3e5/pyghmi/ipmi/command.py#L418-L446
train
openstack/pyghmi
pyghmi/ipmi/command.py
Command.raw_command
def raw_command(self, netfn, command, bridge_request=(), data=(), delay_xmit=None, retry=True, timeout=None): """Send raw ipmi command to BMC This allows arbitrary IPMI bytes to be issued. This is commonly used for certain vendor specific commands. Example: ipmicmd.raw_command(netfn=0,command=4,data=(5)) :param netfn: Net function number :param command: Command value :param bridge_request: The target slave address and channel number for the bridge request. :param data: Command data as a tuple or list :param retry: Whether or not to retry command if no response received. Defaults to True :param timeout: A custom amount of time to wait for initial reply :returns: dict -- The response from IPMI device """ rsp = self.ipmi_session.raw_command(netfn=netfn, command=command, bridge_request=bridge_request, data=data, delay_xmit=delay_xmit, retry=retry, timeout=timeout) if 'data' in rsp: rsp['data'] = list(rsp['data']) return rsp
python
def raw_command(self, netfn, command, bridge_request=(), data=(), delay_xmit=None, retry=True, timeout=None): """Send raw ipmi command to BMC This allows arbitrary IPMI bytes to be issued. This is commonly used for certain vendor specific commands. Example: ipmicmd.raw_command(netfn=0,command=4,data=(5)) :param netfn: Net function number :param command: Command value :param bridge_request: The target slave address and channel number for the bridge request. :param data: Command data as a tuple or list :param retry: Whether or not to retry command if no response received. Defaults to True :param timeout: A custom amount of time to wait for initial reply :returns: dict -- The response from IPMI device """ rsp = self.ipmi_session.raw_command(netfn=netfn, command=command, bridge_request=bridge_request, data=data, delay_xmit=delay_xmit, retry=retry, timeout=timeout) if 'data' in rsp: rsp['data'] = list(rsp['data']) return rsp
[ "def", "raw_command", "(", "self", ",", "netfn", ",", "command", ",", "bridge_request", "=", "(", ")", ",", "data", "=", "(", ")", ",", "delay_xmit", "=", "None", ",", "retry", "=", "True", ",", "timeout", "=", "None", ")", ":", "rsp", "=", "self", ".", "ipmi_session", ".", "raw_command", "(", "netfn", "=", "netfn", ",", "command", "=", "command", ",", "bridge_request", "=", "bridge_request", ",", "data", "=", "data", ",", "delay_xmit", "=", "delay_xmit", ",", "retry", "=", "retry", ",", "timeout", "=", "timeout", ")", "if", "'data'", "in", "rsp", ":", "rsp", "[", "'data'", "]", "=", "list", "(", "rsp", "[", "'data'", "]", ")", "return", "rsp" ]
Send raw ipmi command to BMC This allows arbitrary IPMI bytes to be issued. This is commonly used for certain vendor specific commands. Example: ipmicmd.raw_command(netfn=0,command=4,data=(5)) :param netfn: Net function number :param command: Command value :param bridge_request: The target slave address and channel number for the bridge request. :param data: Command data as a tuple or list :param retry: Whether or not to retry command if no response received. Defaults to True :param timeout: A custom amount of time to wait for initial reply :returns: dict -- The response from IPMI device
[ "Send", "raw", "ipmi", "command", "to", "BMC" ]
f710b1d30a8eed19a9e86f01f9351c737666f3e5
https://github.com/openstack/pyghmi/blob/f710b1d30a8eed19a9e86f01f9351c737666f3e5/pyghmi/ipmi/command.py#L462-L487
train
openstack/pyghmi
pyghmi/ipmi/command.py
Command.get_power
def get_power(self): """Get current power state of the managed system The response, if successful, should contain 'powerstate' key and either 'on' or 'off' to indicate current state. :returns: dict -- {'powerstate': value} """ response = self.raw_command(netfn=0, command=1) if 'error' in response: raise exc.IpmiException(response['error']) assert (response['command'] == 1 and response['netfn'] == 1) powerstate = 'on' if (response['data'][0] & 1) else 'off' return {'powerstate': powerstate}
python
def get_power(self): """Get current power state of the managed system The response, if successful, should contain 'powerstate' key and either 'on' or 'off' to indicate current state. :returns: dict -- {'powerstate': value} """ response = self.raw_command(netfn=0, command=1) if 'error' in response: raise exc.IpmiException(response['error']) assert (response['command'] == 1 and response['netfn'] == 1) powerstate = 'on' if (response['data'][0] & 1) else 'off' return {'powerstate': powerstate}
[ "def", "get_power", "(", "self", ")", ":", "response", "=", "self", ".", "raw_command", "(", "netfn", "=", "0", ",", "command", "=", "1", ")", "if", "'error'", "in", "response", ":", "raise", "exc", ".", "IpmiException", "(", "response", "[", "'error'", "]", ")", "assert", "(", "response", "[", "'command'", "]", "==", "1", "and", "response", "[", "'netfn'", "]", "==", "1", ")", "powerstate", "=", "'on'", "if", "(", "response", "[", "'data'", "]", "[", "0", "]", "&", "1", ")", "else", "'off'", "return", "{", "'powerstate'", ":", "powerstate", "}" ]
Get current power state of the managed system The response, if successful, should contain 'powerstate' key and either 'on' or 'off' to indicate current state. :returns: dict -- {'powerstate': value}
[ "Get", "current", "power", "state", "of", "the", "managed", "system" ]
f710b1d30a8eed19a9e86f01f9351c737666f3e5
https://github.com/openstack/pyghmi/blob/f710b1d30a8eed19a9e86f01f9351c737666f3e5/pyghmi/ipmi/command.py#L489-L502
train
openstack/pyghmi
pyghmi/ipmi/command.py
Command.get_event_log
def get_event_log(self, clear=False): """Retrieve the log of events, optionally clearing The contents of the SEL are returned as an iterable. Timestamps are given as local time, ISO 8601 (whether the target has an accurate clock or not). Timestamps may be omitted for events that cannot be given a timestamp, leaving only the raw timecode to provide relative time information. clear set to true will result in the log being cleared as it is returned. This allows an atomic fetch and clear behavior so that no log entries will be lost between the fetch and clear actions. There is no 'clear_event_log' function to encourage users to create code that is not at risk for losing events. :param clear: Whether to remove the SEL entries from the target BMC """ self.oem_init() return sel.EventHandler(self.init_sdr(), self).fetch_sel(self, clear)
python
def get_event_log(self, clear=False): """Retrieve the log of events, optionally clearing The contents of the SEL are returned as an iterable. Timestamps are given as local time, ISO 8601 (whether the target has an accurate clock or not). Timestamps may be omitted for events that cannot be given a timestamp, leaving only the raw timecode to provide relative time information. clear set to true will result in the log being cleared as it is returned. This allows an atomic fetch and clear behavior so that no log entries will be lost between the fetch and clear actions. There is no 'clear_event_log' function to encourage users to create code that is not at risk for losing events. :param clear: Whether to remove the SEL entries from the target BMC """ self.oem_init() return sel.EventHandler(self.init_sdr(), self).fetch_sel(self, clear)
[ "def", "get_event_log", "(", "self", ",", "clear", "=", "False", ")", ":", "self", ".", "oem_init", "(", ")", "return", "sel", ".", "EventHandler", "(", "self", ".", "init_sdr", "(", ")", ",", "self", ")", ".", "fetch_sel", "(", "self", ",", "clear", ")" ]
Retrieve the log of events, optionally clearing The contents of the SEL are returned as an iterable. Timestamps are given as local time, ISO 8601 (whether the target has an accurate clock or not). Timestamps may be omitted for events that cannot be given a timestamp, leaving only the raw timecode to provide relative time information. clear set to true will result in the log being cleared as it is returned. This allows an atomic fetch and clear behavior so that no log entries will be lost between the fetch and clear actions. There is no 'clear_event_log' function to encourage users to create code that is not at risk for losing events. :param clear: Whether to remove the SEL entries from the target BMC
[ "Retrieve", "the", "log", "of", "events", "optionally", "clearing" ]
f710b1d30a8eed19a9e86f01f9351c737666f3e5
https://github.com/openstack/pyghmi/blob/f710b1d30a8eed19a9e86f01f9351c737666f3e5/pyghmi/ipmi/command.py#L559-L575
train
openstack/pyghmi
pyghmi/ipmi/command.py
Command.decode_pet
def decode_pet(self, specifictrap, petdata): """Decode PET to an event In IPMI, the alert format are PET alerts. It is a particular set of data put into an SNMPv1 trap and sent. It bears no small resemblence to the SEL entries. This function takes data that would have been received by an SNMP trap handler, and provides an event decode, similar to one entry of get_event_log. :param specifictrap: The specific trap, as either a bytearray or int :param petdata: An iterable of the octet data of varbind for 1.3.6.1.4.1.3183.1.1.1 :returns: A dict event similar to one iteration of get_event_log """ self.oem_init() return sel.EventHandler(self.init_sdr(), self).decode_pet(specifictrap, petdata)
python
def decode_pet(self, specifictrap, petdata): """Decode PET to an event In IPMI, the alert format are PET alerts. It is a particular set of data put into an SNMPv1 trap and sent. It bears no small resemblence to the SEL entries. This function takes data that would have been received by an SNMP trap handler, and provides an event decode, similar to one entry of get_event_log. :param specifictrap: The specific trap, as either a bytearray or int :param petdata: An iterable of the octet data of varbind for 1.3.6.1.4.1.3183.1.1.1 :returns: A dict event similar to one iteration of get_event_log """ self.oem_init() return sel.EventHandler(self.init_sdr(), self).decode_pet(specifictrap, petdata)
[ "def", "decode_pet", "(", "self", ",", "specifictrap", ",", "petdata", ")", ":", "self", ".", "oem_init", "(", ")", "return", "sel", ".", "EventHandler", "(", "self", ".", "init_sdr", "(", ")", ",", "self", ")", ".", "decode_pet", "(", "specifictrap", ",", "petdata", ")" ]
Decode PET to an event In IPMI, the alert format are PET alerts. It is a particular set of data put into an SNMPv1 trap and sent. It bears no small resemblence to the SEL entries. This function takes data that would have been received by an SNMP trap handler, and provides an event decode, similar to one entry of get_event_log. :param specifictrap: The specific trap, as either a bytearray or int :param petdata: An iterable of the octet data of varbind for 1.3.6.1.4.1.3183.1.1.1 :returns: A dict event similar to one iteration of get_event_log
[ "Decode", "PET", "to", "an", "event" ]
f710b1d30a8eed19a9e86f01f9351c737666f3e5
https://github.com/openstack/pyghmi/blob/f710b1d30a8eed19a9e86f01f9351c737666f3e5/pyghmi/ipmi/command.py#L577-L593
train
openstack/pyghmi
pyghmi/ipmi/command.py
Command.get_inventory_descriptions
def get_inventory_descriptions(self): """Retrieve list of things that could be inventoried This permits a caller to examine the available items without actually causing the inventory data to be gathered. It returns an iterable of string descriptions """ yield "System" self.init_sdr() for fruid in sorted(self._sdr.fru): yield self._sdr.fru[fruid].fru_name self.oem_init() for compname in self._oem.get_oem_inventory_descriptions(): yield compname
python
def get_inventory_descriptions(self): """Retrieve list of things that could be inventoried This permits a caller to examine the available items without actually causing the inventory data to be gathered. It returns an iterable of string descriptions """ yield "System" self.init_sdr() for fruid in sorted(self._sdr.fru): yield self._sdr.fru[fruid].fru_name self.oem_init() for compname in self._oem.get_oem_inventory_descriptions(): yield compname
[ "def", "get_inventory_descriptions", "(", "self", ")", ":", "yield", "\"System\"", "self", ".", "init_sdr", "(", ")", "for", "fruid", "in", "sorted", "(", "self", ".", "_sdr", ".", "fru", ")", ":", "yield", "self", ".", "_sdr", ".", "fru", "[", "fruid", "]", ".", "fru_name", "self", ".", "oem_init", "(", ")", "for", "compname", "in", "self", ".", "_oem", ".", "get_oem_inventory_descriptions", "(", ")", ":", "yield", "compname" ]
Retrieve list of things that could be inventoried This permits a caller to examine the available items without actually causing the inventory data to be gathered. It returns an iterable of string descriptions
[ "Retrieve", "list", "of", "things", "that", "could", "be", "inventoried" ]
f710b1d30a8eed19a9e86f01f9351c737666f3e5
https://github.com/openstack/pyghmi/blob/f710b1d30a8eed19a9e86f01f9351c737666f3e5/pyghmi/ipmi/command.py#L595-L608
train
openstack/pyghmi
pyghmi/ipmi/command.py
Command.get_inventory_of_component
def get_inventory_of_component(self, component): """Retrieve inventory of a component Retrieve detailed inventory information for only the requested component. """ self.oem_init() if component == 'System': return self._get_zero_fru() self.init_sdr() for fruid in self._sdr.fru: if self._sdr.fru[fruid].fru_name == component: return self._oem.process_fru(fru.FRU( ipmicmd=self, fruid=fruid, sdr=self._sdr.fru[fruid]).info, component) return self._oem.get_inventory_of_component(component)
python
def get_inventory_of_component(self, component): """Retrieve inventory of a component Retrieve detailed inventory information for only the requested component. """ self.oem_init() if component == 'System': return self._get_zero_fru() self.init_sdr() for fruid in self._sdr.fru: if self._sdr.fru[fruid].fru_name == component: return self._oem.process_fru(fru.FRU( ipmicmd=self, fruid=fruid, sdr=self._sdr.fru[fruid]).info, component) return self._oem.get_inventory_of_component(component)
[ "def", "get_inventory_of_component", "(", "self", ",", "component", ")", ":", "self", ".", "oem_init", "(", ")", "if", "component", "==", "'System'", ":", "return", "self", ".", "_get_zero_fru", "(", ")", "self", ".", "init_sdr", "(", ")", "for", "fruid", "in", "self", ".", "_sdr", ".", "fru", ":", "if", "self", ".", "_sdr", ".", "fru", "[", "fruid", "]", ".", "fru_name", "==", "component", ":", "return", "self", ".", "_oem", ".", "process_fru", "(", "fru", ".", "FRU", "(", "ipmicmd", "=", "self", ",", "fruid", "=", "fruid", ",", "sdr", "=", "self", ".", "_sdr", ".", "fru", "[", "fruid", "]", ")", ".", "info", ",", "component", ")", "return", "self", ".", "_oem", ".", "get_inventory_of_component", "(", "component", ")" ]
Retrieve inventory of a component Retrieve detailed inventory information for only the requested component.
[ "Retrieve", "inventory", "of", "a", "component" ]
f710b1d30a8eed19a9e86f01f9351c737666f3e5
https://github.com/openstack/pyghmi/blob/f710b1d30a8eed19a9e86f01f9351c737666f3e5/pyghmi/ipmi/command.py#L610-L625
train
openstack/pyghmi
pyghmi/ipmi/command.py
Command.get_inventory
def get_inventory(self): """Retrieve inventory of system Retrieve inventory of the targeted system. This frequently includes serial numbers, sometimes hardware addresses, sometimes memory modules This function will retrieve whatever the underlying platform provides and apply some structure. Iterating over the return yields tuples of a name for the inventoried item and dictionary of descriptions or None for items not present. """ self.oem_init() yield ("System", self._get_zero_fru()) self.init_sdr() for fruid in sorted(self._sdr.fru): fruinf = fru.FRU( ipmicmd=self, fruid=fruid, sdr=self._sdr.fru[fruid]).info if fruinf is not None: fruinf = self._oem.process_fru(fruinf, self._sdr.fru[fruid].fru_name) yield (self._sdr.fru[fruid].fru_name, fruinf) for componentpair in self._oem.get_oem_inventory(): yield componentpair
python
def get_inventory(self): """Retrieve inventory of system Retrieve inventory of the targeted system. This frequently includes serial numbers, sometimes hardware addresses, sometimes memory modules This function will retrieve whatever the underlying platform provides and apply some structure. Iterating over the return yields tuples of a name for the inventoried item and dictionary of descriptions or None for items not present. """ self.oem_init() yield ("System", self._get_zero_fru()) self.init_sdr() for fruid in sorted(self._sdr.fru): fruinf = fru.FRU( ipmicmd=self, fruid=fruid, sdr=self._sdr.fru[fruid]).info if fruinf is not None: fruinf = self._oem.process_fru(fruinf, self._sdr.fru[fruid].fru_name) yield (self._sdr.fru[fruid].fru_name, fruinf) for componentpair in self._oem.get_oem_inventory(): yield componentpair
[ "def", "get_inventory", "(", "self", ")", ":", "self", ".", "oem_init", "(", ")", "yield", "(", "\"System\"", ",", "self", ".", "_get_zero_fru", "(", ")", ")", "self", ".", "init_sdr", "(", ")", "for", "fruid", "in", "sorted", "(", "self", ".", "_sdr", ".", "fru", ")", ":", "fruinf", "=", "fru", ".", "FRU", "(", "ipmicmd", "=", "self", ",", "fruid", "=", "fruid", ",", "sdr", "=", "self", ".", "_sdr", ".", "fru", "[", "fruid", "]", ")", ".", "info", "if", "fruinf", "is", "not", "None", ":", "fruinf", "=", "self", ".", "_oem", ".", "process_fru", "(", "fruinf", ",", "self", ".", "_sdr", ".", "fru", "[", "fruid", "]", ".", "fru_name", ")", "yield", "(", "self", ".", "_sdr", ".", "fru", "[", "fruid", "]", ".", "fru_name", ",", "fruinf", ")", "for", "componentpair", "in", "self", ".", "_oem", ".", "get_oem_inventory", "(", ")", ":", "yield", "componentpair" ]
Retrieve inventory of system Retrieve inventory of the targeted system. This frequently includes serial numbers, sometimes hardware addresses, sometimes memory modules This function will retrieve whatever the underlying platform provides and apply some structure. Iterating over the return yields tuples of a name for the inventoried item and dictionary of descriptions or None for items not present.
[ "Retrieve", "inventory", "of", "system" ]
f710b1d30a8eed19a9e86f01f9351c737666f3e5
https://github.com/openstack/pyghmi/blob/f710b1d30a8eed19a9e86f01f9351c737666f3e5/pyghmi/ipmi/command.py#L651-L672
train
openstack/pyghmi
pyghmi/ipmi/command.py
Command.get_health
def get_health(self): """Summarize health of managed system This provides a summary of the health of the managed system. It additionally provides an iterable list of reasons for warning, critical, or failed assessments. """ summary = {'badreadings': [], 'health': const.Health.Ok} fallbackreadings = [] try: self.oem_init() fallbackreadings = self._oem.get_health(summary) for reading in self.get_sensor_data(): if reading.health != const.Health.Ok: summary['health'] |= reading.health summary['badreadings'].append(reading) except exc.BypassGenericBehavior: pass if not summary['badreadings']: summary['badreadings'] = fallbackreadings return summary
python
def get_health(self): """Summarize health of managed system This provides a summary of the health of the managed system. It additionally provides an iterable list of reasons for warning, critical, or failed assessments. """ summary = {'badreadings': [], 'health': const.Health.Ok} fallbackreadings = [] try: self.oem_init() fallbackreadings = self._oem.get_health(summary) for reading in self.get_sensor_data(): if reading.health != const.Health.Ok: summary['health'] |= reading.health summary['badreadings'].append(reading) except exc.BypassGenericBehavior: pass if not summary['badreadings']: summary['badreadings'] = fallbackreadings return summary
[ "def", "get_health", "(", "self", ")", ":", "summary", "=", "{", "'badreadings'", ":", "[", "]", ",", "'health'", ":", "const", ".", "Health", ".", "Ok", "}", "fallbackreadings", "=", "[", "]", "try", ":", "self", ".", "oem_init", "(", ")", "fallbackreadings", "=", "self", ".", "_oem", ".", "get_health", "(", "summary", ")", "for", "reading", "in", "self", ".", "get_sensor_data", "(", ")", ":", "if", "reading", ".", "health", "!=", "const", ".", "Health", ".", "Ok", ":", "summary", "[", "'health'", "]", "|=", "reading", ".", "health", "summary", "[", "'badreadings'", "]", ".", "append", "(", "reading", ")", "except", "exc", ".", "BypassGenericBehavior", ":", "pass", "if", "not", "summary", "[", "'badreadings'", "]", ":", "summary", "[", "'badreadings'", "]", "=", "fallbackreadings", "return", "summary" ]
Summarize health of managed system This provides a summary of the health of the managed system. It additionally provides an iterable list of reasons for warning, critical, or failed assessments.
[ "Summarize", "health", "of", "managed", "system" ]
f710b1d30a8eed19a9e86f01f9351c737666f3e5
https://github.com/openstack/pyghmi/blob/f710b1d30a8eed19a9e86f01f9351c737666f3e5/pyghmi/ipmi/command.py#L698-L718
train
openstack/pyghmi
pyghmi/ipmi/command.py
Command.get_sensor_reading
def get_sensor_reading(self, sensorname): """Get a sensor reading by name Returns a single decoded sensor reading per the name passed in :param sensorname: Name of the desired sensor :returns: sdr.SensorReading object """ self.init_sdr() for sensor in self._sdr.get_sensor_numbers(): if self._sdr.sensors[sensor].name == sensorname: rsp = self.raw_command(command=0x2d, netfn=4, data=(sensor,)) if 'error' in rsp: raise exc.IpmiException(rsp['error'], rsp['code']) return self._sdr.sensors[sensor].decode_sensor_reading( rsp['data']) self.oem_init() return self._oem.get_sensor_reading(sensorname)
python
def get_sensor_reading(self, sensorname): """Get a sensor reading by name Returns a single decoded sensor reading per the name passed in :param sensorname: Name of the desired sensor :returns: sdr.SensorReading object """ self.init_sdr() for sensor in self._sdr.get_sensor_numbers(): if self._sdr.sensors[sensor].name == sensorname: rsp = self.raw_command(command=0x2d, netfn=4, data=(sensor,)) if 'error' in rsp: raise exc.IpmiException(rsp['error'], rsp['code']) return self._sdr.sensors[sensor].decode_sensor_reading( rsp['data']) self.oem_init() return self._oem.get_sensor_reading(sensorname)
[ "def", "get_sensor_reading", "(", "self", ",", "sensorname", ")", ":", "self", ".", "init_sdr", "(", ")", "for", "sensor", "in", "self", ".", "_sdr", ".", "get_sensor_numbers", "(", ")", ":", "if", "self", ".", "_sdr", ".", "sensors", "[", "sensor", "]", ".", "name", "==", "sensorname", ":", "rsp", "=", "self", ".", "raw_command", "(", "command", "=", "0x2d", ",", "netfn", "=", "4", ",", "data", "=", "(", "sensor", ",", ")", ")", "if", "'error'", "in", "rsp", ":", "raise", "exc", ".", "IpmiException", "(", "rsp", "[", "'error'", "]", ",", "rsp", "[", "'code'", "]", ")", "return", "self", ".", "_sdr", ".", "sensors", "[", "sensor", "]", ".", "decode_sensor_reading", "(", "rsp", "[", "'data'", "]", ")", "self", ".", "oem_init", "(", ")", "return", "self", ".", "_oem", ".", "get_sensor_reading", "(", "sensorname", ")" ]
Get a sensor reading by name Returns a single decoded sensor reading per the name passed in :param sensorname: Name of the desired sensor :returns: sdr.SensorReading object
[ "Get", "a", "sensor", "reading", "by", "name" ]
f710b1d30a8eed19a9e86f01f9351c737666f3e5
https://github.com/openstack/pyghmi/blob/f710b1d30a8eed19a9e86f01f9351c737666f3e5/pyghmi/ipmi/command.py#L720-L738
train
openstack/pyghmi
pyghmi/ipmi/command.py
Command._fetch_lancfg_param
def _fetch_lancfg_param(self, channel, param, prefixlen=False): """Internal helper for fetching lan cfg parameters If the parameter revison != 0x11, bail. Further, if 4 bytes, return string with ipv4. If 6 bytes, colon delimited hex (mac address). If one byte, return the int value """ fetchcmd = bytearray((channel, param, 0, 0)) fetched = self.xraw_command(0xc, 2, data=fetchcmd) fetchdata = fetched['data'] if ord(fetchdata[0]) != 17: return None if len(fetchdata) == 5: # IPv4 address if prefixlen: return _mask_to_cidr(fetchdata[1:]) else: ip = socket.inet_ntoa(fetchdata[1:]) if ip == '0.0.0.0': return None return ip elif len(fetchdata) == 7: # MAC address mac = '{0:02x}:{1:02x}:{2:02x}:{3:02x}:{4:02x}:{5:02x}'.format( *bytearray(fetchdata[1:])) if mac == '00:00:00:00:00:00': return None return mac elif len(fetchdata) == 2: return ord(fetchdata[1]) else: raise Exception("Unrecognized data format " + repr(fetchdata))
python
def _fetch_lancfg_param(self, channel, param, prefixlen=False): """Internal helper for fetching lan cfg parameters If the parameter revison != 0x11, bail. Further, if 4 bytes, return string with ipv4. If 6 bytes, colon delimited hex (mac address). If one byte, return the int value """ fetchcmd = bytearray((channel, param, 0, 0)) fetched = self.xraw_command(0xc, 2, data=fetchcmd) fetchdata = fetched['data'] if ord(fetchdata[0]) != 17: return None if len(fetchdata) == 5: # IPv4 address if prefixlen: return _mask_to_cidr(fetchdata[1:]) else: ip = socket.inet_ntoa(fetchdata[1:]) if ip == '0.0.0.0': return None return ip elif len(fetchdata) == 7: # MAC address mac = '{0:02x}:{1:02x}:{2:02x}:{3:02x}:{4:02x}:{5:02x}'.format( *bytearray(fetchdata[1:])) if mac == '00:00:00:00:00:00': return None return mac elif len(fetchdata) == 2: return ord(fetchdata[1]) else: raise Exception("Unrecognized data format " + repr(fetchdata))
[ "def", "_fetch_lancfg_param", "(", "self", ",", "channel", ",", "param", ",", "prefixlen", "=", "False", ")", ":", "fetchcmd", "=", "bytearray", "(", "(", "channel", ",", "param", ",", "0", ",", "0", ")", ")", "fetched", "=", "self", ".", "xraw_command", "(", "0xc", ",", "2", ",", "data", "=", "fetchcmd", ")", "fetchdata", "=", "fetched", "[", "'data'", "]", "if", "ord", "(", "fetchdata", "[", "0", "]", ")", "!=", "17", ":", "return", "None", "if", "len", "(", "fetchdata", ")", "==", "5", ":", "# IPv4 address", "if", "prefixlen", ":", "return", "_mask_to_cidr", "(", "fetchdata", "[", "1", ":", "]", ")", "else", ":", "ip", "=", "socket", ".", "inet_ntoa", "(", "fetchdata", "[", "1", ":", "]", ")", "if", "ip", "==", "'0.0.0.0'", ":", "return", "None", "return", "ip", "elif", "len", "(", "fetchdata", ")", "==", "7", ":", "# MAC address", "mac", "=", "'{0:02x}:{1:02x}:{2:02x}:{3:02x}:{4:02x}:{5:02x}'", ".", "format", "(", "*", "bytearray", "(", "fetchdata", "[", "1", ":", "]", ")", ")", "if", "mac", "==", "'00:00:00:00:00:00'", ":", "return", "None", "return", "mac", "elif", "len", "(", "fetchdata", ")", "==", "2", ":", "return", "ord", "(", "fetchdata", "[", "1", "]", ")", "else", ":", "raise", "Exception", "(", "\"Unrecognized data format \"", "+", "repr", "(", "fetchdata", ")", ")" ]
Internal helper for fetching lan cfg parameters If the parameter revison != 0x11, bail. Further, if 4 bytes, return string with ipv4. If 6 bytes, colon delimited hex (mac address). If one byte, return the int value
[ "Internal", "helper", "for", "fetching", "lan", "cfg", "parameters" ]
f710b1d30a8eed19a9e86f01f9351c737666f3e5
https://github.com/openstack/pyghmi/blob/f710b1d30a8eed19a9e86f01f9351c737666f3e5/pyghmi/ipmi/command.py#L740-L769
train
openstack/pyghmi
pyghmi/ipmi/command.py
Command.set_net_configuration
def set_net_configuration(self, ipv4_address=None, ipv4_configuration=None, ipv4_gateway=None, channel=None): """Set network configuration data. Apply desired network configuration data, leaving unspecified parameters alone. :param ipv4_address: CIDR notation for IP address and netmask Example: '192.168.0.10/16' :param ipv4_configuration: Method to use to configure the network. 'DHCP' or 'Static'. :param ipv4_gateway: IP address of gateway to use. :param channel: LAN channel to configure, defaults to autodetect """ if channel is None: channel = self.get_network_channel() if ipv4_configuration is not None: cmddata = [channel, 4, 0] if ipv4_configuration.lower() == 'dhcp': cmddata[-1] = 2 elif ipv4_configuration.lower() == 'static': cmddata[-1] = 1 else: raise Exception('Unrecognized ipv4cfg parameter {0}'.format( ipv4_configuration)) self.xraw_command(netfn=0xc, command=1, data=cmddata) if ipv4_address is not None: netmask = None if '/' in ipv4_address: ipv4_address, prefix = ipv4_address.split('/') netmask = _cidr_to_mask(int(prefix)) cmddata = bytearray((channel, 3)) + socket.inet_aton(ipv4_address) self.xraw_command(netfn=0xc, command=1, data=cmddata) if netmask is not None: cmddata = bytearray((channel, 6)) + netmask self.xraw_command(netfn=0xc, command=1, data=cmddata) if ipv4_gateway is not None: cmddata = bytearray((channel, 12)) + socket.inet_aton(ipv4_gateway) self.xraw_command(netfn=0xc, command=1, data=cmddata)
python
def set_net_configuration(self, ipv4_address=None, ipv4_configuration=None, ipv4_gateway=None, channel=None): """Set network configuration data. Apply desired network configuration data, leaving unspecified parameters alone. :param ipv4_address: CIDR notation for IP address and netmask Example: '192.168.0.10/16' :param ipv4_configuration: Method to use to configure the network. 'DHCP' or 'Static'. :param ipv4_gateway: IP address of gateway to use. :param channel: LAN channel to configure, defaults to autodetect """ if channel is None: channel = self.get_network_channel() if ipv4_configuration is not None: cmddata = [channel, 4, 0] if ipv4_configuration.lower() == 'dhcp': cmddata[-1] = 2 elif ipv4_configuration.lower() == 'static': cmddata[-1] = 1 else: raise Exception('Unrecognized ipv4cfg parameter {0}'.format( ipv4_configuration)) self.xraw_command(netfn=0xc, command=1, data=cmddata) if ipv4_address is not None: netmask = None if '/' in ipv4_address: ipv4_address, prefix = ipv4_address.split('/') netmask = _cidr_to_mask(int(prefix)) cmddata = bytearray((channel, 3)) + socket.inet_aton(ipv4_address) self.xraw_command(netfn=0xc, command=1, data=cmddata) if netmask is not None: cmddata = bytearray((channel, 6)) + netmask self.xraw_command(netfn=0xc, command=1, data=cmddata) if ipv4_gateway is not None: cmddata = bytearray((channel, 12)) + socket.inet_aton(ipv4_gateway) self.xraw_command(netfn=0xc, command=1, data=cmddata)
[ "def", "set_net_configuration", "(", "self", ",", "ipv4_address", "=", "None", ",", "ipv4_configuration", "=", "None", ",", "ipv4_gateway", "=", "None", ",", "channel", "=", "None", ")", ":", "if", "channel", "is", "None", ":", "channel", "=", "self", ".", "get_network_channel", "(", ")", "if", "ipv4_configuration", "is", "not", "None", ":", "cmddata", "=", "[", "channel", ",", "4", ",", "0", "]", "if", "ipv4_configuration", ".", "lower", "(", ")", "==", "'dhcp'", ":", "cmddata", "[", "-", "1", "]", "=", "2", "elif", "ipv4_configuration", ".", "lower", "(", ")", "==", "'static'", ":", "cmddata", "[", "-", "1", "]", "=", "1", "else", ":", "raise", "Exception", "(", "'Unrecognized ipv4cfg parameter {0}'", ".", "format", "(", "ipv4_configuration", ")", ")", "self", ".", "xraw_command", "(", "netfn", "=", "0xc", ",", "command", "=", "1", ",", "data", "=", "cmddata", ")", "if", "ipv4_address", "is", "not", "None", ":", "netmask", "=", "None", "if", "'/'", "in", "ipv4_address", ":", "ipv4_address", ",", "prefix", "=", "ipv4_address", ".", "split", "(", "'/'", ")", "netmask", "=", "_cidr_to_mask", "(", "int", "(", "prefix", ")", ")", "cmddata", "=", "bytearray", "(", "(", "channel", ",", "3", ")", ")", "+", "socket", ".", "inet_aton", "(", "ipv4_address", ")", "self", ".", "xraw_command", "(", "netfn", "=", "0xc", ",", "command", "=", "1", ",", "data", "=", "cmddata", ")", "if", "netmask", "is", "not", "None", ":", "cmddata", "=", "bytearray", "(", "(", "channel", ",", "6", ")", ")", "+", "netmask", "self", ".", "xraw_command", "(", "netfn", "=", "0xc", ",", "command", "=", "1", ",", "data", "=", "cmddata", ")", "if", "ipv4_gateway", "is", "not", "None", ":", "cmddata", "=", "bytearray", "(", "(", "channel", ",", "12", ")", ")", "+", "socket", ".", "inet_aton", "(", "ipv4_gateway", ")", "self", ".", "xraw_command", "(", "netfn", "=", "0xc", ",", "command", "=", "1", ",", "data", "=", "cmddata", ")" ]
Set network configuration data. Apply desired network configuration data, leaving unspecified parameters alone. :param ipv4_address: CIDR notation for IP address and netmask Example: '192.168.0.10/16' :param ipv4_configuration: Method to use to configure the network. 'DHCP' or 'Static'. :param ipv4_gateway: IP address of gateway to use. :param channel: LAN channel to configure, defaults to autodetect
[ "Set", "network", "configuration", "data", "." ]
f710b1d30a8eed19a9e86f01f9351c737666f3e5
https://github.com/openstack/pyghmi/blob/f710b1d30a8eed19a9e86f01f9351c737666f3e5/pyghmi/ipmi/command.py#L787-L825
train
openstack/pyghmi
pyghmi/ipmi/command.py
Command.get_net_configuration
def get_net_configuration(self, channel=None, gateway_macs=True): """Get network configuration data Retrieve network configuration from the target :param channel: Channel to configure, defaults to None for 'autodetect' :param gateway_macs: Whether to retrieve mac addresses for gateways :returns: A dictionary of network configuration data """ if channel is None: channel = self.get_network_channel() retdata = {} v4addr = self._fetch_lancfg_param(channel, 3) if v4addr is None: retdata['ipv4_address'] = None else: v4masklen = self._fetch_lancfg_param(channel, 6, prefixlen=True) retdata['ipv4_address'] = '{0}/{1}'.format(v4addr, v4masklen) v4cfgmethods = { 0: 'Unspecified', 1: 'Static', 2: 'DHCP', 3: 'BIOS', 4: 'Other', } retdata['ipv4_configuration'] = v4cfgmethods[self._fetch_lancfg_param( channel, 4)] retdata['mac_address'] = self._fetch_lancfg_param(channel, 5) retdata['ipv4_gateway'] = self._fetch_lancfg_param(channel, 12) retdata['ipv4_backup_gateway'] = self._fetch_lancfg_param(channel, 14) if gateway_macs: retdata['ipv4_gateway_mac'] = self._fetch_lancfg_param(channel, 13) retdata['ipv4_backup_gateway_mac'] = self._fetch_lancfg_param( channel, 15) self.oem_init() self._oem.add_extra_net_configuration(retdata) return retdata
python
def get_net_configuration(self, channel=None, gateway_macs=True): """Get network configuration data Retrieve network configuration from the target :param channel: Channel to configure, defaults to None for 'autodetect' :param gateway_macs: Whether to retrieve mac addresses for gateways :returns: A dictionary of network configuration data """ if channel is None: channel = self.get_network_channel() retdata = {} v4addr = self._fetch_lancfg_param(channel, 3) if v4addr is None: retdata['ipv4_address'] = None else: v4masklen = self._fetch_lancfg_param(channel, 6, prefixlen=True) retdata['ipv4_address'] = '{0}/{1}'.format(v4addr, v4masklen) v4cfgmethods = { 0: 'Unspecified', 1: 'Static', 2: 'DHCP', 3: 'BIOS', 4: 'Other', } retdata['ipv4_configuration'] = v4cfgmethods[self._fetch_lancfg_param( channel, 4)] retdata['mac_address'] = self._fetch_lancfg_param(channel, 5) retdata['ipv4_gateway'] = self._fetch_lancfg_param(channel, 12) retdata['ipv4_backup_gateway'] = self._fetch_lancfg_param(channel, 14) if gateway_macs: retdata['ipv4_gateway_mac'] = self._fetch_lancfg_param(channel, 13) retdata['ipv4_backup_gateway_mac'] = self._fetch_lancfg_param( channel, 15) self.oem_init() self._oem.add_extra_net_configuration(retdata) return retdata
[ "def", "get_net_configuration", "(", "self", ",", "channel", "=", "None", ",", "gateway_macs", "=", "True", ")", ":", "if", "channel", "is", "None", ":", "channel", "=", "self", ".", "get_network_channel", "(", ")", "retdata", "=", "{", "}", "v4addr", "=", "self", ".", "_fetch_lancfg_param", "(", "channel", ",", "3", ")", "if", "v4addr", "is", "None", ":", "retdata", "[", "'ipv4_address'", "]", "=", "None", "else", ":", "v4masklen", "=", "self", ".", "_fetch_lancfg_param", "(", "channel", ",", "6", ",", "prefixlen", "=", "True", ")", "retdata", "[", "'ipv4_address'", "]", "=", "'{0}/{1}'", ".", "format", "(", "v4addr", ",", "v4masklen", ")", "v4cfgmethods", "=", "{", "0", ":", "'Unspecified'", ",", "1", ":", "'Static'", ",", "2", ":", "'DHCP'", ",", "3", ":", "'BIOS'", ",", "4", ":", "'Other'", ",", "}", "retdata", "[", "'ipv4_configuration'", "]", "=", "v4cfgmethods", "[", "self", ".", "_fetch_lancfg_param", "(", "channel", ",", "4", ")", "]", "retdata", "[", "'mac_address'", "]", "=", "self", ".", "_fetch_lancfg_param", "(", "channel", ",", "5", ")", "retdata", "[", "'ipv4_gateway'", "]", "=", "self", ".", "_fetch_lancfg_param", "(", "channel", ",", "12", ")", "retdata", "[", "'ipv4_backup_gateway'", "]", "=", "self", ".", "_fetch_lancfg_param", "(", "channel", ",", "14", ")", "if", "gateway_macs", ":", "retdata", "[", "'ipv4_gateway_mac'", "]", "=", "self", ".", "_fetch_lancfg_param", "(", "channel", ",", "13", ")", "retdata", "[", "'ipv4_backup_gateway_mac'", "]", "=", "self", ".", "_fetch_lancfg_param", "(", "channel", ",", "15", ")", "self", ".", "oem_init", "(", ")", "self", ".", "_oem", ".", "add_extra_net_configuration", "(", "retdata", ")", "return", "retdata" ]
Get network configuration data Retrieve network configuration from the target :param channel: Channel to configure, defaults to None for 'autodetect' :param gateway_macs: Whether to retrieve mac addresses for gateways :returns: A dictionary of network configuration data
[ "Get", "network", "configuration", "data" ]
f710b1d30a8eed19a9e86f01f9351c737666f3e5
https://github.com/openstack/pyghmi/blob/f710b1d30a8eed19a9e86f01f9351c737666f3e5/pyghmi/ipmi/command.py#L880-L916
train
openstack/pyghmi
pyghmi/ipmi/command.py
Command.get_sensor_data
def get_sensor_data(self): """Get sensor reading objects Iterates sensor reading objects pertaining to the currently managed BMC. :returns: Iterator of sdr.SensorReading objects """ self.init_sdr() for sensor in self._sdr.get_sensor_numbers(): rsp = self.raw_command(command=0x2d, netfn=4, data=(sensor,)) if 'error' in rsp: if rsp['code'] == 203: # Sensor does not exist, optional dev continue raise exc.IpmiException(rsp['error'], code=rsp['code']) yield self._sdr.sensors[sensor].decode_sensor_reading(rsp['data']) self.oem_init() for reading in self._oem.get_sensor_data(): yield reading
python
def get_sensor_data(self): """Get sensor reading objects Iterates sensor reading objects pertaining to the currently managed BMC. :returns: Iterator of sdr.SensorReading objects """ self.init_sdr() for sensor in self._sdr.get_sensor_numbers(): rsp = self.raw_command(command=0x2d, netfn=4, data=(sensor,)) if 'error' in rsp: if rsp['code'] == 203: # Sensor does not exist, optional dev continue raise exc.IpmiException(rsp['error'], code=rsp['code']) yield self._sdr.sensors[sensor].decode_sensor_reading(rsp['data']) self.oem_init() for reading in self._oem.get_sensor_data(): yield reading
[ "def", "get_sensor_data", "(", "self", ")", ":", "self", ".", "init_sdr", "(", ")", "for", "sensor", "in", "self", ".", "_sdr", ".", "get_sensor_numbers", "(", ")", ":", "rsp", "=", "self", ".", "raw_command", "(", "command", "=", "0x2d", ",", "netfn", "=", "4", ",", "data", "=", "(", "sensor", ",", ")", ")", "if", "'error'", "in", "rsp", ":", "if", "rsp", "[", "'code'", "]", "==", "203", ":", "# Sensor does not exist, optional dev", "continue", "raise", "exc", ".", "IpmiException", "(", "rsp", "[", "'error'", "]", ",", "code", "=", "rsp", "[", "'code'", "]", ")", "yield", "self", ".", "_sdr", ".", "sensors", "[", "sensor", "]", ".", "decode_sensor_reading", "(", "rsp", "[", "'data'", "]", ")", "self", ".", "oem_init", "(", ")", "for", "reading", "in", "self", ".", "_oem", ".", "get_sensor_data", "(", ")", ":", "yield", "reading" ]
Get sensor reading objects Iterates sensor reading objects pertaining to the currently managed BMC. :returns: Iterator of sdr.SensorReading objects
[ "Get", "sensor", "reading", "objects" ]
f710b1d30a8eed19a9e86f01f9351c737666f3e5
https://github.com/openstack/pyghmi/blob/f710b1d30a8eed19a9e86f01f9351c737666f3e5/pyghmi/ipmi/command.py#L918-L936
train
openstack/pyghmi
pyghmi/ipmi/command.py
Command.get_sensor_descriptions
def get_sensor_descriptions(self): """Get available sensor names Iterates over the available sensor descriptions :returns: Iterator of dicts describing each sensor """ self.init_sdr() for sensor in self._sdr.get_sensor_numbers(): yield {'name': self._sdr.sensors[sensor].name, 'type': self._sdr.sensors[sensor].sensor_type} self.oem_init() for sensor in self._oem.get_sensor_descriptions(): yield sensor
python
def get_sensor_descriptions(self): """Get available sensor names Iterates over the available sensor descriptions :returns: Iterator of dicts describing each sensor """ self.init_sdr() for sensor in self._sdr.get_sensor_numbers(): yield {'name': self._sdr.sensors[sensor].name, 'type': self._sdr.sensors[sensor].sensor_type} self.oem_init() for sensor in self._oem.get_sensor_descriptions(): yield sensor
[ "def", "get_sensor_descriptions", "(", "self", ")", ":", "self", ".", "init_sdr", "(", ")", "for", "sensor", "in", "self", ".", "_sdr", ".", "get_sensor_numbers", "(", ")", ":", "yield", "{", "'name'", ":", "self", ".", "_sdr", ".", "sensors", "[", "sensor", "]", ".", "name", ",", "'type'", ":", "self", ".", "_sdr", ".", "sensors", "[", "sensor", "]", ".", "sensor_type", "}", "self", ".", "oem_init", "(", ")", "for", "sensor", "in", "self", ".", "_oem", ".", "get_sensor_descriptions", "(", ")", ":", "yield", "sensor" ]
Get available sensor names Iterates over the available sensor descriptions :returns: Iterator of dicts describing each sensor
[ "Get", "available", "sensor", "names" ]
f710b1d30a8eed19a9e86f01f9351c737666f3e5
https://github.com/openstack/pyghmi/blob/f710b1d30a8eed19a9e86f01f9351c737666f3e5/pyghmi/ipmi/command.py#L938-L951
train
openstack/pyghmi
pyghmi/ipmi/command.py
Command.get_network_channel
def get_network_channel(self): """Get a reasonable 'default' network channel. When configuring/examining network configuration, it's desirable to find the correct channel. Here we run with the 'real' number of the current channel if it is a LAN channel, otherwise it evaluates all of the channels to find the first workable LAN channel and returns that """ if self._netchannel is None: for channel in chain((0xe,), range(1, 0xc)): try: rsp = self.xraw_command( netfn=6, command=0x42, data=(channel,)) except exc.IpmiException as ie: if ie.ipmicode == 0xcc: # We have hit an invalid channel, move on to next # candidate continue else: raise chantype = ord(rsp['data'][1]) & 0b1111111 if chantype in (4, 6): try: # Some implementations denote an inactive channel # by refusing to do parameter retrieval if channel != 0xe: # skip checking if channel is active if we are # actively using the channel self.xraw_command( netfn=0xc, command=2, data=(channel, 5, 0, 0)) # If still here, the channel seems serviceable... # However some implementations may still have # ambiguous channel info, that will need to be # picked up on an OEM extension... self._netchannel = ord(rsp['data'][0]) & 0b1111 break except exc.IpmiException as ie: # This means the attempt to fetch parameter 5 failed, # therefore move on to next candidate channel continue return self._netchannel
python
def get_network_channel(self): """Get a reasonable 'default' network channel. When configuring/examining network configuration, it's desirable to find the correct channel. Here we run with the 'real' number of the current channel if it is a LAN channel, otherwise it evaluates all of the channels to find the first workable LAN channel and returns that """ if self._netchannel is None: for channel in chain((0xe,), range(1, 0xc)): try: rsp = self.xraw_command( netfn=6, command=0x42, data=(channel,)) except exc.IpmiException as ie: if ie.ipmicode == 0xcc: # We have hit an invalid channel, move on to next # candidate continue else: raise chantype = ord(rsp['data'][1]) & 0b1111111 if chantype in (4, 6): try: # Some implementations denote an inactive channel # by refusing to do parameter retrieval if channel != 0xe: # skip checking if channel is active if we are # actively using the channel self.xraw_command( netfn=0xc, command=2, data=(channel, 5, 0, 0)) # If still here, the channel seems serviceable... # However some implementations may still have # ambiguous channel info, that will need to be # picked up on an OEM extension... self._netchannel = ord(rsp['data'][0]) & 0b1111 break except exc.IpmiException as ie: # This means the attempt to fetch parameter 5 failed, # therefore move on to next candidate channel continue return self._netchannel
[ "def", "get_network_channel", "(", "self", ")", ":", "if", "self", ".", "_netchannel", "is", "None", ":", "for", "channel", "in", "chain", "(", "(", "0xe", ",", ")", ",", "range", "(", "1", ",", "0xc", ")", ")", ":", "try", ":", "rsp", "=", "self", ".", "xraw_command", "(", "netfn", "=", "6", ",", "command", "=", "0x42", ",", "data", "=", "(", "channel", ",", ")", ")", "except", "exc", ".", "IpmiException", "as", "ie", ":", "if", "ie", ".", "ipmicode", "==", "0xcc", ":", "# We have hit an invalid channel, move on to next", "# candidate", "continue", "else", ":", "raise", "chantype", "=", "ord", "(", "rsp", "[", "'data'", "]", "[", "1", "]", ")", "&", "0b1111111", "if", "chantype", "in", "(", "4", ",", "6", ")", ":", "try", ":", "# Some implementations denote an inactive channel", "# by refusing to do parameter retrieval", "if", "channel", "!=", "0xe", ":", "# skip checking if channel is active if we are", "# actively using the channel", "self", ".", "xraw_command", "(", "netfn", "=", "0xc", ",", "command", "=", "2", ",", "data", "=", "(", "channel", ",", "5", ",", "0", ",", "0", ")", ")", "# If still here, the channel seems serviceable...", "# However some implementations may still have", "# ambiguous channel info, that will need to be", "# picked up on an OEM extension...", "self", ".", "_netchannel", "=", "ord", "(", "rsp", "[", "'data'", "]", "[", "0", "]", ")", "&", "0b1111", "break", "except", "exc", ".", "IpmiException", "as", "ie", ":", "# This means the attempt to fetch parameter 5 failed,", "# therefore move on to next candidate channel", "continue", "return", "self", ".", "_netchannel" ]
Get a reasonable 'default' network channel. When configuring/examining network configuration, it's desirable to find the correct channel. Here we run with the 'real' number of the current channel if it is a LAN channel, otherwise it evaluates all of the channels to find the first workable LAN channel and returns that
[ "Get", "a", "reasonable", "default", "network", "channel", "." ]
f710b1d30a8eed19a9e86f01f9351c737666f3e5
https://github.com/openstack/pyghmi/blob/f710b1d30a8eed19a9e86f01f9351c737666f3e5/pyghmi/ipmi/command.py#L953-L994
train
openstack/pyghmi
pyghmi/ipmi/command.py
Command.get_alert_destination_count
def get_alert_destination_count(self, channel=None): """Get the number of supported alert destinations :param channel: Channel for alerts to be examined, defaults to current """ if channel is None: channel = self.get_network_channel() rqdata = (channel, 0x11, 0, 0) rsp = self.xraw_command(netfn=0xc, command=2, data=rqdata) return ord(rsp['data'][1])
python
def get_alert_destination_count(self, channel=None): """Get the number of supported alert destinations :param channel: Channel for alerts to be examined, defaults to current """ if channel is None: channel = self.get_network_channel() rqdata = (channel, 0x11, 0, 0) rsp = self.xraw_command(netfn=0xc, command=2, data=rqdata) return ord(rsp['data'][1])
[ "def", "get_alert_destination_count", "(", "self", ",", "channel", "=", "None", ")", ":", "if", "channel", "is", "None", ":", "channel", "=", "self", ".", "get_network_channel", "(", ")", "rqdata", "=", "(", "channel", ",", "0x11", ",", "0", ",", "0", ")", "rsp", "=", "self", ".", "xraw_command", "(", "netfn", "=", "0xc", ",", "command", "=", "2", ",", "data", "=", "rqdata", ")", "return", "ord", "(", "rsp", "[", "'data'", "]", "[", "1", "]", ")" ]
Get the number of supported alert destinations :param channel: Channel for alerts to be examined, defaults to current
[ "Get", "the", "number", "of", "supported", "alert", "destinations" ]
f710b1d30a8eed19a9e86f01f9351c737666f3e5
https://github.com/openstack/pyghmi/blob/f710b1d30a8eed19a9e86f01f9351c737666f3e5/pyghmi/ipmi/command.py#L996-L1005
train
openstack/pyghmi
pyghmi/ipmi/command.py
Command.get_alert_destination
def get_alert_destination(self, destination=0, channel=None): """Get alert destination Get a specified alert destination. Returns a dictionary of relevant configuration. The following keys may be present: acknowledge_required - Indicates whether the target expects an acknowledgement acknowledge_timeout - How long it will wait for an acknowledgment before retrying retries - How many attempts will be made to deliver the alert to this destination address_format - 'ipv4' or 'ipv6' address - The IP address of the target :param destination: The destination number. Defaults to 0 :param channel: The channel for alerting. Defaults to current channel """ destinfo = {} if channel is None: channel = self.get_network_channel() rqdata = (channel, 18, destination, 0) rsp = self.xraw_command(netfn=0xc, command=2, data=rqdata) dtype, acktimeout, retries = struct.unpack('BBB', rsp['data'][2:]) destinfo['acknowledge_required'] = dtype & 0b10000000 == 0b10000000 # Ignore destination type for now... if destinfo['acknowledge_required']: destinfo['acknowledge_timeout'] = acktimeout destinfo['retries'] = retries rqdata = (channel, 19, destination, 0) rsp = self.xraw_command(netfn=0xc, command=2, data=rqdata) if ord(rsp['data'][2]) & 0b11110000 == 0: destinfo['address_format'] = 'ipv4' destinfo['address'] = socket.inet_ntoa(rsp['data'][4:8]) elif ord(rsp['data'][2]) & 0b11110000 == 0b10000: destinfo['address_format'] = 'ipv6' destinfo['address'] = socket.inet_ntop(socket.AF_INET6, rsp['data'][3:]) return destinfo
python
def get_alert_destination(self, destination=0, channel=None): """Get alert destination Get a specified alert destination. Returns a dictionary of relevant configuration. The following keys may be present: acknowledge_required - Indicates whether the target expects an acknowledgement acknowledge_timeout - How long it will wait for an acknowledgment before retrying retries - How many attempts will be made to deliver the alert to this destination address_format - 'ipv4' or 'ipv6' address - The IP address of the target :param destination: The destination number. Defaults to 0 :param channel: The channel for alerting. Defaults to current channel """ destinfo = {} if channel is None: channel = self.get_network_channel() rqdata = (channel, 18, destination, 0) rsp = self.xraw_command(netfn=0xc, command=2, data=rqdata) dtype, acktimeout, retries = struct.unpack('BBB', rsp['data'][2:]) destinfo['acknowledge_required'] = dtype & 0b10000000 == 0b10000000 # Ignore destination type for now... if destinfo['acknowledge_required']: destinfo['acknowledge_timeout'] = acktimeout destinfo['retries'] = retries rqdata = (channel, 19, destination, 0) rsp = self.xraw_command(netfn=0xc, command=2, data=rqdata) if ord(rsp['data'][2]) & 0b11110000 == 0: destinfo['address_format'] = 'ipv4' destinfo['address'] = socket.inet_ntoa(rsp['data'][4:8]) elif ord(rsp['data'][2]) & 0b11110000 == 0b10000: destinfo['address_format'] = 'ipv6' destinfo['address'] = socket.inet_ntop(socket.AF_INET6, rsp['data'][3:]) return destinfo
[ "def", "get_alert_destination", "(", "self", ",", "destination", "=", "0", ",", "channel", "=", "None", ")", ":", "destinfo", "=", "{", "}", "if", "channel", "is", "None", ":", "channel", "=", "self", ".", "get_network_channel", "(", ")", "rqdata", "=", "(", "channel", ",", "18", ",", "destination", ",", "0", ")", "rsp", "=", "self", ".", "xraw_command", "(", "netfn", "=", "0xc", ",", "command", "=", "2", ",", "data", "=", "rqdata", ")", "dtype", ",", "acktimeout", ",", "retries", "=", "struct", ".", "unpack", "(", "'BBB'", ",", "rsp", "[", "'data'", "]", "[", "2", ":", "]", ")", "destinfo", "[", "'acknowledge_required'", "]", "=", "dtype", "&", "0b10000000", "==", "0b10000000", "# Ignore destination type for now...", "if", "destinfo", "[", "'acknowledge_required'", "]", ":", "destinfo", "[", "'acknowledge_timeout'", "]", "=", "acktimeout", "destinfo", "[", "'retries'", "]", "=", "retries", "rqdata", "=", "(", "channel", ",", "19", ",", "destination", ",", "0", ")", "rsp", "=", "self", ".", "xraw_command", "(", "netfn", "=", "0xc", ",", "command", "=", "2", ",", "data", "=", "rqdata", ")", "if", "ord", "(", "rsp", "[", "'data'", "]", "[", "2", "]", ")", "&", "0b11110000", "==", "0", ":", "destinfo", "[", "'address_format'", "]", "=", "'ipv4'", "destinfo", "[", "'address'", "]", "=", "socket", ".", "inet_ntoa", "(", "rsp", "[", "'data'", "]", "[", "4", ":", "8", "]", ")", "elif", "ord", "(", "rsp", "[", "'data'", "]", "[", "2", "]", ")", "&", "0b11110000", "==", "0b10000", ":", "destinfo", "[", "'address_format'", "]", "=", "'ipv6'", "destinfo", "[", "'address'", "]", "=", "socket", ".", "inet_ntop", "(", "socket", ".", "AF_INET6", ",", "rsp", "[", "'data'", "]", "[", "3", ":", "]", ")", "return", "destinfo" ]
Get alert destination Get a specified alert destination. Returns a dictionary of relevant configuration. The following keys may be present: acknowledge_required - Indicates whether the target expects an acknowledgement acknowledge_timeout - How long it will wait for an acknowledgment before retrying retries - How many attempts will be made to deliver the alert to this destination address_format - 'ipv4' or 'ipv6' address - The IP address of the target :param destination: The destination number. Defaults to 0 :param channel: The channel for alerting. Defaults to current channel
[ "Get", "alert", "destination" ]
f710b1d30a8eed19a9e86f01f9351c737666f3e5
https://github.com/openstack/pyghmi/blob/f710b1d30a8eed19a9e86f01f9351c737666f3e5/pyghmi/ipmi/command.py#L1007-L1044
train
openstack/pyghmi
pyghmi/ipmi/command.py
Command.clear_alert_destination
def clear_alert_destination(self, destination=0, channel=None): """Clear an alert destination Remove the specified alert destination configuration. :param destination: The destination to clear (defaults to 0) """ if channel is None: channel = self.get_network_channel() self.set_alert_destination( '0.0.0.0', False, 0, 0, destination, channel)
python
def clear_alert_destination(self, destination=0, channel=None): """Clear an alert destination Remove the specified alert destination configuration. :param destination: The destination to clear (defaults to 0) """ if channel is None: channel = self.get_network_channel() self.set_alert_destination( '0.0.0.0', False, 0, 0, destination, channel)
[ "def", "clear_alert_destination", "(", "self", ",", "destination", "=", "0", ",", "channel", "=", "None", ")", ":", "if", "channel", "is", "None", ":", "channel", "=", "self", ".", "get_network_channel", "(", ")", "self", ".", "set_alert_destination", "(", "'0.0.0.0'", ",", "False", ",", "0", ",", "0", ",", "destination", ",", "channel", ")" ]
Clear an alert destination Remove the specified alert destination configuration. :param destination: The destination to clear (defaults to 0)
[ "Clear", "an", "alert", "destination" ]
f710b1d30a8eed19a9e86f01f9351c737666f3e5
https://github.com/openstack/pyghmi/blob/f710b1d30a8eed19a9e86f01f9351c737666f3e5/pyghmi/ipmi/command.py#L1046-L1056
train
openstack/pyghmi
pyghmi/ipmi/command.py
Command.set_alert_community
def set_alert_community(self, community, channel=None): """Set the community string for alerts This configures the string the BMC will use as the community string for PET alerts/traps. :param community: The community string :param channel: The LAN channel (defaults to auto detect) """ if channel is None: channel = self.get_network_channel() community = community.encode('utf-8') community += b'\x00' * (18 - len(community)) cmddata = bytearray((channel, 16)) cmddata += community self.xraw_command(netfn=0xc, command=1, data=cmddata)
python
def set_alert_community(self, community, channel=None): """Set the community string for alerts This configures the string the BMC will use as the community string for PET alerts/traps. :param community: The community string :param channel: The LAN channel (defaults to auto detect) """ if channel is None: channel = self.get_network_channel() community = community.encode('utf-8') community += b'\x00' * (18 - len(community)) cmddata = bytearray((channel, 16)) cmddata += community self.xraw_command(netfn=0xc, command=1, data=cmddata)
[ "def", "set_alert_community", "(", "self", ",", "community", ",", "channel", "=", "None", ")", ":", "if", "channel", "is", "None", ":", "channel", "=", "self", ".", "get_network_channel", "(", ")", "community", "=", "community", ".", "encode", "(", "'utf-8'", ")", "community", "+=", "b'\\x00'", "*", "(", "18", "-", "len", "(", "community", ")", ")", "cmddata", "=", "bytearray", "(", "(", "channel", ",", "16", ")", ")", "cmddata", "+=", "community", "self", ".", "xraw_command", "(", "netfn", "=", "0xc", ",", "command", "=", "1", ",", "data", "=", "cmddata", ")" ]
Set the community string for alerts This configures the string the BMC will use as the community string for PET alerts/traps. :param community: The community string :param channel: The LAN channel (defaults to auto detect)
[ "Set", "the", "community", "string", "for", "alerts" ]
f710b1d30a8eed19a9e86f01f9351c737666f3e5
https://github.com/openstack/pyghmi/blob/f710b1d30a8eed19a9e86f01f9351c737666f3e5/pyghmi/ipmi/command.py#L1058-L1073
train
openstack/pyghmi
pyghmi/ipmi/command.py
Command._assure_alert_policy
def _assure_alert_policy(self, channel, destination): """Make sure an alert policy exists Each policy will be a dict with the following keys: -'index' - The policy index number :returns: An iterable of currently configured alert policies """ # First we do a get PEF configuration parameters to get the count # of entries. We have no guarantee that the meaningful data will # be contiguous rsp = self.xraw_command(netfn=4, command=0x13, data=(8, 0, 0)) numpol = ord(rsp['data'][1]) desiredchandest = (channel << 4) | destination availpolnum = None for polnum in range(1, numpol + 1): currpol = self.xraw_command(netfn=4, command=0x13, data=(9, polnum, 0)) polidx, chandest = struct.unpack_from('>BB', currpol['data'][2:4]) if not polidx & 0b1000: if availpolnum is None: availpolnum = polnum continue if chandest == desiredchandest: return True # If chandest did not equal desiredchandest ever, we need to use a slot if availpolnum is None: raise Exception("No available alert policy entry") # 24 = 1 << 4 | 8 # 1 == set to which this rule belongs # 8 == 0b1000, in other words, enable this policy, always send to # indicated destination self.xraw_command(netfn=4, command=0x12, data=(9, availpolnum, 24, desiredchandest, 0))
python
def _assure_alert_policy(self, channel, destination): """Make sure an alert policy exists Each policy will be a dict with the following keys: -'index' - The policy index number :returns: An iterable of currently configured alert policies """ # First we do a get PEF configuration parameters to get the count # of entries. We have no guarantee that the meaningful data will # be contiguous rsp = self.xraw_command(netfn=4, command=0x13, data=(8, 0, 0)) numpol = ord(rsp['data'][1]) desiredchandest = (channel << 4) | destination availpolnum = None for polnum in range(1, numpol + 1): currpol = self.xraw_command(netfn=4, command=0x13, data=(9, polnum, 0)) polidx, chandest = struct.unpack_from('>BB', currpol['data'][2:4]) if not polidx & 0b1000: if availpolnum is None: availpolnum = polnum continue if chandest == desiredchandest: return True # If chandest did not equal desiredchandest ever, we need to use a slot if availpolnum is None: raise Exception("No available alert policy entry") # 24 = 1 << 4 | 8 # 1 == set to which this rule belongs # 8 == 0b1000, in other words, enable this policy, always send to # indicated destination self.xraw_command(netfn=4, command=0x12, data=(9, availpolnum, 24, desiredchandest, 0))
[ "def", "_assure_alert_policy", "(", "self", ",", "channel", ",", "destination", ")", ":", "# First we do a get PEF configuration parameters to get the count", "# of entries. We have no guarantee that the meaningful data will", "# be contiguous", "rsp", "=", "self", ".", "xraw_command", "(", "netfn", "=", "4", ",", "command", "=", "0x13", ",", "data", "=", "(", "8", ",", "0", ",", "0", ")", ")", "numpol", "=", "ord", "(", "rsp", "[", "'data'", "]", "[", "1", "]", ")", "desiredchandest", "=", "(", "channel", "<<", "4", ")", "|", "destination", "availpolnum", "=", "None", "for", "polnum", "in", "range", "(", "1", ",", "numpol", "+", "1", ")", ":", "currpol", "=", "self", ".", "xraw_command", "(", "netfn", "=", "4", ",", "command", "=", "0x13", ",", "data", "=", "(", "9", ",", "polnum", ",", "0", ")", ")", "polidx", ",", "chandest", "=", "struct", ".", "unpack_from", "(", "'>BB'", ",", "currpol", "[", "'data'", "]", "[", "2", ":", "4", "]", ")", "if", "not", "polidx", "&", "0b1000", ":", "if", "availpolnum", "is", "None", ":", "availpolnum", "=", "polnum", "continue", "if", "chandest", "==", "desiredchandest", ":", "return", "True", "# If chandest did not equal desiredchandest ever, we need to use a slot", "if", "availpolnum", "is", "None", ":", "raise", "Exception", "(", "\"No available alert policy entry\"", ")", "# 24 = 1 << 4 | 8", "# 1 == set to which this rule belongs", "# 8 == 0b1000, in other words, enable this policy, always send to", "# indicated destination", "self", ".", "xraw_command", "(", "netfn", "=", "4", ",", "command", "=", "0x12", ",", "data", "=", "(", "9", ",", "availpolnum", ",", "24", ",", "desiredchandest", ",", "0", ")", ")" ]
Make sure an alert policy exists Each policy will be a dict with the following keys: -'index' - The policy index number :returns: An iterable of currently configured alert policies
[ "Make", "sure", "an", "alert", "policy", "exists" ]
f710b1d30a8eed19a9e86f01f9351c737666f3e5
https://github.com/openstack/pyghmi/blob/f710b1d30a8eed19a9e86f01f9351c737666f3e5/pyghmi/ipmi/command.py#L1075-L1108
train
openstack/pyghmi
pyghmi/ipmi/command.py
Command.get_alert_community
def get_alert_community(self, channel=None): """Get the current community string for alerts Returns the community string that will be in SNMP traps from this BMC :param channel: The channel to get configuration for, autodetect by default :returns: The community string """ if channel is None: channel = self.get_network_channel() rsp = self.xraw_command(netfn=0xc, command=2, data=(channel, 16, 0, 0)) return rsp['data'][1:].partition('\x00')[0]
python
def get_alert_community(self, channel=None): """Get the current community string for alerts Returns the community string that will be in SNMP traps from this BMC :param channel: The channel to get configuration for, autodetect by default :returns: The community string """ if channel is None: channel = self.get_network_channel() rsp = self.xraw_command(netfn=0xc, command=2, data=(channel, 16, 0, 0)) return rsp['data'][1:].partition('\x00')[0]
[ "def", "get_alert_community", "(", "self", ",", "channel", "=", "None", ")", ":", "if", "channel", "is", "None", ":", "channel", "=", "self", ".", "get_network_channel", "(", ")", "rsp", "=", "self", ".", "xraw_command", "(", "netfn", "=", "0xc", ",", "command", "=", "2", ",", "data", "=", "(", "channel", ",", "16", ",", "0", ",", "0", ")", ")", "return", "rsp", "[", "'data'", "]", "[", "1", ":", "]", ".", "partition", "(", "'\\x00'", ")", "[", "0", "]" ]
Get the current community string for alerts Returns the community string that will be in SNMP traps from this BMC :param channel: The channel to get configuration for, autodetect by default :returns: The community string
[ "Get", "the", "current", "community", "string", "for", "alerts" ]
f710b1d30a8eed19a9e86f01f9351c737666f3e5
https://github.com/openstack/pyghmi/blob/f710b1d30a8eed19a9e86f01f9351c737666f3e5/pyghmi/ipmi/command.py#L1110-L1123
train
openstack/pyghmi
pyghmi/ipmi/command.py
Command.set_alert_destination
def set_alert_destination(self, ip=None, acknowledge_required=None, acknowledge_timeout=None, retries=None, destination=0, channel=None): """Configure one or more parameters of an alert destination If any parameter is 'None' (default), that parameter is left unchanged. Otherwise, all given parameters are set by this command. :param ip: IP address of the destination. It is currently expected that the calling code will handle any name lookup and present this data as IP address. :param acknowledge_required: Whether or not the target should expect an acknowledgement from this alert target. :param acknowledge_timeout: Time to wait for acknowledgement if enabled :param retries: How many times to attempt transmit of an alert. :param destination: Destination index, defaults to 0. :param channel: The channel to configure the alert on. Defaults to current """ if channel is None: channel = self.get_network_channel() if ip is not None: destdata = bytearray((channel, 19, destination)) try: parsedip = socket.inet_aton(ip) destdata.extend((0, 0)) destdata.extend(parsedip) destdata.extend(b'\x00\x00\x00\x00\x00\x00') except socket.error: if self._supports_standard_ipv6: parsedip = socket.inet_pton(socket.AF_INET6, ip) destdata.append(0b10000000) destdata.extend(parsedip) else: destdata = None self.oem_init() self._oem.set_alert_ipv6_destination(ip, destination, channel) if destdata: self.xraw_command(netfn=0xc, command=1, data=destdata) if (acknowledge_required is not None or retries is not None or acknowledge_timeout is not None): currtype = self.xraw_command(netfn=0xc, command=2, data=( channel, 18, destination, 0)) if currtype['data'][0] != b'\x11': raise exc.PyghmiException("Unknown parameter format") currtype = bytearray(currtype['data'][1:]) if acknowledge_required is not None: if acknowledge_required: currtype[1] |= 0b10000000 else: currtype[1] &= 0b1111111 if acknowledge_timeout is not None: currtype[2] = acknowledge_timeout if retries is not None: currtype[3] = retries destreq = bytearray((channel, 18)) destreq.extend(currtype) self.xraw_command(netfn=0xc, command=1, data=destreq) if not ip == '0.0.0.0': self._assure_alert_policy(channel, destination)
python
def set_alert_destination(self, ip=None, acknowledge_required=None, acknowledge_timeout=None, retries=None, destination=0, channel=None): """Configure one or more parameters of an alert destination If any parameter is 'None' (default), that parameter is left unchanged. Otherwise, all given parameters are set by this command. :param ip: IP address of the destination. It is currently expected that the calling code will handle any name lookup and present this data as IP address. :param acknowledge_required: Whether or not the target should expect an acknowledgement from this alert target. :param acknowledge_timeout: Time to wait for acknowledgement if enabled :param retries: How many times to attempt transmit of an alert. :param destination: Destination index, defaults to 0. :param channel: The channel to configure the alert on. Defaults to current """ if channel is None: channel = self.get_network_channel() if ip is not None: destdata = bytearray((channel, 19, destination)) try: parsedip = socket.inet_aton(ip) destdata.extend((0, 0)) destdata.extend(parsedip) destdata.extend(b'\x00\x00\x00\x00\x00\x00') except socket.error: if self._supports_standard_ipv6: parsedip = socket.inet_pton(socket.AF_INET6, ip) destdata.append(0b10000000) destdata.extend(parsedip) else: destdata = None self.oem_init() self._oem.set_alert_ipv6_destination(ip, destination, channel) if destdata: self.xraw_command(netfn=0xc, command=1, data=destdata) if (acknowledge_required is not None or retries is not None or acknowledge_timeout is not None): currtype = self.xraw_command(netfn=0xc, command=2, data=( channel, 18, destination, 0)) if currtype['data'][0] != b'\x11': raise exc.PyghmiException("Unknown parameter format") currtype = bytearray(currtype['data'][1:]) if acknowledge_required is not None: if acknowledge_required: currtype[1] |= 0b10000000 else: currtype[1] &= 0b1111111 if acknowledge_timeout is not None: currtype[2] = acknowledge_timeout if retries is not None: currtype[3] = retries destreq = bytearray((channel, 18)) destreq.extend(currtype) self.xraw_command(netfn=0xc, command=1, data=destreq) if not ip == '0.0.0.0': self._assure_alert_policy(channel, destination)
[ "def", "set_alert_destination", "(", "self", ",", "ip", "=", "None", ",", "acknowledge_required", "=", "None", ",", "acknowledge_timeout", "=", "None", ",", "retries", "=", "None", ",", "destination", "=", "0", ",", "channel", "=", "None", ")", ":", "if", "channel", "is", "None", ":", "channel", "=", "self", ".", "get_network_channel", "(", ")", "if", "ip", "is", "not", "None", ":", "destdata", "=", "bytearray", "(", "(", "channel", ",", "19", ",", "destination", ")", ")", "try", ":", "parsedip", "=", "socket", ".", "inet_aton", "(", "ip", ")", "destdata", ".", "extend", "(", "(", "0", ",", "0", ")", ")", "destdata", ".", "extend", "(", "parsedip", ")", "destdata", ".", "extend", "(", "b'\\x00\\x00\\x00\\x00\\x00\\x00'", ")", "except", "socket", ".", "error", ":", "if", "self", ".", "_supports_standard_ipv6", ":", "parsedip", "=", "socket", ".", "inet_pton", "(", "socket", ".", "AF_INET6", ",", "ip", ")", "destdata", ".", "append", "(", "0b10000000", ")", "destdata", ".", "extend", "(", "parsedip", ")", "else", ":", "destdata", "=", "None", "self", ".", "oem_init", "(", ")", "self", ".", "_oem", ".", "set_alert_ipv6_destination", "(", "ip", ",", "destination", ",", "channel", ")", "if", "destdata", ":", "self", ".", "xraw_command", "(", "netfn", "=", "0xc", ",", "command", "=", "1", ",", "data", "=", "destdata", ")", "if", "(", "acknowledge_required", "is", "not", "None", "or", "retries", "is", "not", "None", "or", "acknowledge_timeout", "is", "not", "None", ")", ":", "currtype", "=", "self", ".", "xraw_command", "(", "netfn", "=", "0xc", ",", "command", "=", "2", ",", "data", "=", "(", "channel", ",", "18", ",", "destination", ",", "0", ")", ")", "if", "currtype", "[", "'data'", "]", "[", "0", "]", "!=", "b'\\x11'", ":", "raise", "exc", ".", "PyghmiException", "(", "\"Unknown parameter format\"", ")", "currtype", "=", "bytearray", "(", "currtype", "[", "'data'", "]", "[", "1", ":", "]", ")", "if", "acknowledge_required", "is", "not", "None", ":", "if", "acknowledge_required", ":", "currtype", "[", "1", "]", "|=", "0b10000000", "else", ":", "currtype", "[", "1", "]", "&=", "0b1111111", "if", "acknowledge_timeout", "is", "not", "None", ":", "currtype", "[", "2", "]", "=", "acknowledge_timeout", "if", "retries", "is", "not", "None", ":", "currtype", "[", "3", "]", "=", "retries", "destreq", "=", "bytearray", "(", "(", "channel", ",", "18", ")", ")", "destreq", ".", "extend", "(", "currtype", ")", "self", ".", "xraw_command", "(", "netfn", "=", "0xc", ",", "command", "=", "1", ",", "data", "=", "destreq", ")", "if", "not", "ip", "==", "'0.0.0.0'", ":", "self", ".", "_assure_alert_policy", "(", "channel", ",", "destination", ")" ]
Configure one or more parameters of an alert destination If any parameter is 'None' (default), that parameter is left unchanged. Otherwise, all given parameters are set by this command. :param ip: IP address of the destination. It is currently expected that the calling code will handle any name lookup and present this data as IP address. :param acknowledge_required: Whether or not the target should expect an acknowledgement from this alert target. :param acknowledge_timeout: Time to wait for acknowledgement if enabled :param retries: How many times to attempt transmit of an alert. :param destination: Destination index, defaults to 0. :param channel: The channel to configure the alert on. Defaults to current
[ "Configure", "one", "or", "more", "parameters", "of", "an", "alert", "destination" ]
f710b1d30a8eed19a9e86f01f9351c737666f3e5
https://github.com/openstack/pyghmi/blob/f710b1d30a8eed19a9e86f01f9351c737666f3e5/pyghmi/ipmi/command.py#L1137-L1197
train
openstack/pyghmi
pyghmi/ipmi/command.py
Command.get_hostname
def get_hostname(self): """Get the hostname used by the BMC in various contexts This can vary somewhat in interpretation, but generally speaking this should be the name that shows up on UIs and in DHCP requests and DNS registration requests, as applicable. :return: current hostname """ self.oem_init() try: return self._oem.get_hostname() except exc.UnsupportedFunctionality: # Use the DCMI MCI field as a fallback, since it's the closest # thing in the IPMI Spec for this return self.get_mci()
python
def get_hostname(self): """Get the hostname used by the BMC in various contexts This can vary somewhat in interpretation, but generally speaking this should be the name that shows up on UIs and in DHCP requests and DNS registration requests, as applicable. :return: current hostname """ self.oem_init() try: return self._oem.get_hostname() except exc.UnsupportedFunctionality: # Use the DCMI MCI field as a fallback, since it's the closest # thing in the IPMI Spec for this return self.get_mci()
[ "def", "get_hostname", "(", "self", ")", ":", "self", ".", "oem_init", "(", ")", "try", ":", "return", "self", ".", "_oem", ".", "get_hostname", "(", ")", "except", "exc", ".", "UnsupportedFunctionality", ":", "# Use the DCMI MCI field as a fallback, since it's the closest", "# thing in the IPMI Spec for this", "return", "self", ".", "get_mci", "(", ")" ]
Get the hostname used by the BMC in various contexts This can vary somewhat in interpretation, but generally speaking this should be the name that shows up on UIs and in DHCP requests and DNS registration requests, as applicable. :return: current hostname
[ "Get", "the", "hostname", "used", "by", "the", "BMC", "in", "various", "contexts" ]
f710b1d30a8eed19a9e86f01f9351c737666f3e5
https://github.com/openstack/pyghmi/blob/f710b1d30a8eed19a9e86f01f9351c737666f3e5/pyghmi/ipmi/command.py#L1199-L1214
train
openstack/pyghmi
pyghmi/ipmi/command.py
Command.set_hostname
def set_hostname(self, hostname): """Set the hostname to be used by the BMC in various contexts. See get_hostname for details :param hostname: The hostname to set :return: Nothing """ self.oem_init() try: return self._oem.set_hostname(hostname) except exc.UnsupportedFunctionality: return self.set_mci(hostname)
python
def set_hostname(self, hostname): """Set the hostname to be used by the BMC in various contexts. See get_hostname for details :param hostname: The hostname to set :return: Nothing """ self.oem_init() try: return self._oem.set_hostname(hostname) except exc.UnsupportedFunctionality: return self.set_mci(hostname)
[ "def", "set_hostname", "(", "self", ",", "hostname", ")", ":", "self", ".", "oem_init", "(", ")", "try", ":", "return", "self", ".", "_oem", ".", "set_hostname", "(", "hostname", ")", "except", "exc", ".", "UnsupportedFunctionality", ":", "return", "self", ".", "set_mci", "(", "hostname", ")" ]
Set the hostname to be used by the BMC in various contexts. See get_hostname for details :param hostname: The hostname to set :return: Nothing
[ "Set", "the", "hostname", "to", "be", "used", "by", "the", "BMC", "in", "various", "contexts", "." ]
f710b1d30a8eed19a9e86f01f9351c737666f3e5
https://github.com/openstack/pyghmi/blob/f710b1d30a8eed19a9e86f01f9351c737666f3e5/pyghmi/ipmi/command.py#L1223-L1235
train
openstack/pyghmi
pyghmi/ipmi/command.py
Command.get_channel_access
def get_channel_access(self, channel=None, read_mode='volatile'): """Get channel access :param channel: number [1:7] :param read_mode: non_volatile = get non-volatile Channel Access volatile = get present volatile (active) setting of Channel Access :return: A Python dict with the following keys/values: { - alerting: - per_msg_auth: - user_level_auth: - access_mode:{ 0: 'disabled', 1: 'pre_boot', 2: 'always', 3: 'shared' } - privilege_level: { 1: 'callback', 2: 'user', 3: 'operator', 4: 'administrator', 5: 'proprietary', } } """ if channel is None: channel = self.get_network_channel() data = [] data.append(channel & 0b00001111) b = 0 read_modes = { 'non_volatile': 1, 'volatile': 2, } b |= (read_modes[read_mode] << 6) & 0b11000000 data.append(b) response = self.raw_command(netfn=0x06, command=0x41, data=data) if 'error' in response: raise Exception(response['error']) data = response['data'] if len(data) != 2: raise Exception('expecting 2 data bytes') r = {} r['alerting'] = data[0] & 0b10000000 > 0 r['per_msg_auth'] = data[0] & 0b01000000 > 0 r['user_level_auth'] = data[0] & 0b00100000 > 0 access_modes = { 0: 'disabled', 1: 'pre_boot', 2: 'always', 3: 'shared' } r['access_mode'] = access_modes[data[0] & 0b00000011] privilege_levels = { 0: 'reserved', 1: 'callback', 2: 'user', 3: 'operator', 4: 'administrator', 5: 'proprietary', # 0x0F: 'no_access' } r['privilege_level'] = privilege_levels[data[1] & 0b00001111] return r
python
def get_channel_access(self, channel=None, read_mode='volatile'): """Get channel access :param channel: number [1:7] :param read_mode: non_volatile = get non-volatile Channel Access volatile = get present volatile (active) setting of Channel Access :return: A Python dict with the following keys/values: { - alerting: - per_msg_auth: - user_level_auth: - access_mode:{ 0: 'disabled', 1: 'pre_boot', 2: 'always', 3: 'shared' } - privilege_level: { 1: 'callback', 2: 'user', 3: 'operator', 4: 'administrator', 5: 'proprietary', } } """ if channel is None: channel = self.get_network_channel() data = [] data.append(channel & 0b00001111) b = 0 read_modes = { 'non_volatile': 1, 'volatile': 2, } b |= (read_modes[read_mode] << 6) & 0b11000000 data.append(b) response = self.raw_command(netfn=0x06, command=0x41, data=data) if 'error' in response: raise Exception(response['error']) data = response['data'] if len(data) != 2: raise Exception('expecting 2 data bytes') r = {} r['alerting'] = data[0] & 0b10000000 > 0 r['per_msg_auth'] = data[0] & 0b01000000 > 0 r['user_level_auth'] = data[0] & 0b00100000 > 0 access_modes = { 0: 'disabled', 1: 'pre_boot', 2: 'always', 3: 'shared' } r['access_mode'] = access_modes[data[0] & 0b00000011] privilege_levels = { 0: 'reserved', 1: 'callback', 2: 'user', 3: 'operator', 4: 'administrator', 5: 'proprietary', # 0x0F: 'no_access' } r['privilege_level'] = privilege_levels[data[1] & 0b00001111] return r
[ "def", "get_channel_access", "(", "self", ",", "channel", "=", "None", ",", "read_mode", "=", "'volatile'", ")", ":", "if", "channel", "is", "None", ":", "channel", "=", "self", ".", "get_network_channel", "(", ")", "data", "=", "[", "]", "data", ".", "append", "(", "channel", "&", "0b00001111", ")", "b", "=", "0", "read_modes", "=", "{", "'non_volatile'", ":", "1", ",", "'volatile'", ":", "2", ",", "}", "b", "|=", "(", "read_modes", "[", "read_mode", "]", "<<", "6", ")", "&", "0b11000000", "data", ".", "append", "(", "b", ")", "response", "=", "self", ".", "raw_command", "(", "netfn", "=", "0x06", ",", "command", "=", "0x41", ",", "data", "=", "data", ")", "if", "'error'", "in", "response", ":", "raise", "Exception", "(", "response", "[", "'error'", "]", ")", "data", "=", "response", "[", "'data'", "]", "if", "len", "(", "data", ")", "!=", "2", ":", "raise", "Exception", "(", "'expecting 2 data bytes'", ")", "r", "=", "{", "}", "r", "[", "'alerting'", "]", "=", "data", "[", "0", "]", "&", "0b10000000", ">", "0", "r", "[", "'per_msg_auth'", "]", "=", "data", "[", "0", "]", "&", "0b01000000", ">", "0", "r", "[", "'user_level_auth'", "]", "=", "data", "[", "0", "]", "&", "0b00100000", ">", "0", "access_modes", "=", "{", "0", ":", "'disabled'", ",", "1", ":", "'pre_boot'", ",", "2", ":", "'always'", ",", "3", ":", "'shared'", "}", "r", "[", "'access_mode'", "]", "=", "access_modes", "[", "data", "[", "0", "]", "&", "0b00000011", "]", "privilege_levels", "=", "{", "0", ":", "'reserved'", ",", "1", ":", "'callback'", ",", "2", ":", "'user'", ",", "3", ":", "'operator'", ",", "4", ":", "'administrator'", ",", "5", ":", "'proprietary'", ",", "# 0x0F: 'no_access'", "}", "r", "[", "'privilege_level'", "]", "=", "privilege_levels", "[", "data", "[", "1", "]", "&", "0b00001111", "]", "return", "r" ]
Get channel access :param channel: number [1:7] :param read_mode: non_volatile = get non-volatile Channel Access volatile = get present volatile (active) setting of Channel Access :return: A Python dict with the following keys/values: { - alerting: - per_msg_auth: - user_level_auth: - access_mode:{ 0: 'disabled', 1: 'pre_boot', 2: 'always', 3: 'shared' } - privilege_level: { 1: 'callback', 2: 'user', 3: 'operator', 4: 'administrator', 5: 'proprietary', } }
[ "Get", "channel", "access" ]
f710b1d30a8eed19a9e86f01f9351c737666f3e5
https://github.com/openstack/pyghmi/blob/f710b1d30a8eed19a9e86f01f9351c737666f3e5/pyghmi/ipmi/command.py#L1394-L1463
train
openstack/pyghmi
pyghmi/ipmi/command.py
Command.get_channel_info
def get_channel_info(self, channel=None): """Get channel info :param channel: number [1:7] :return: session_support: no_session: channel is session-less single: channel is single-session multi: channel is multi-session auto: channel is session-based (channel could alternate between single- and multi-session operation, as can occur with a serial/modem channel that supports connection mode auto-detect) """ if channel is None: channel = self.get_network_channel() data = [] data.append(channel & 0b00001111) response = self.raw_command(netfn=0x06, command=0x42, data=data) if 'error' in response: raise Exception(response['error']) data = response['data'] if len(data) != 9: raise Exception('expecting 10 data bytes got: {0}'.format(data)) r = {} r['Actual channel'] = data[0] & 0b00000111 channel_medium_types = { 0: 'reserved', 1: 'IPMB', 2: 'ICMB v1.0', 3: 'ICMB v0.9', 4: '802.3 LAN', 5: 'Asynch. Serial/Modem (RS-232)', 6: 'Other LAN', 7: 'PCI SMBus', 8: 'SMBus v1.0/1.1', 9: 'SMBus v2.0', 0x0a: 'reserved for USB 1.x', 0x0b: 'reserved for USB 2.x', 0x0c: 'System Interface (KCS, SMIC, or BT)', # 60h-7Fh: OEM # all other reserved } t = data[1] & 0b01111111 if t in channel_medium_types: r['Channel Medium type'] = channel_medium_types[t] else: r['Channel Medium type'] = 'OEM {:02X}'.format(t) r['5-bit Channel IPMI Messaging Protocol Type'] = data[2] & 0b00001111 session_supports = { 0: 'no_session', 1: 'single', 2: 'multi', 3: 'auto' } r['session_support'] = session_supports[(data[3] & 0b11000000) >> 6] r['active_session_count'] = data[3] & 0b00111111 r['Vendor ID'] = [data[4], data[5], data[6]] r['Auxiliary Channel Info'] = [data[7], data[8]] return r
python
def get_channel_info(self, channel=None): """Get channel info :param channel: number [1:7] :return: session_support: no_session: channel is session-less single: channel is single-session multi: channel is multi-session auto: channel is session-based (channel could alternate between single- and multi-session operation, as can occur with a serial/modem channel that supports connection mode auto-detect) """ if channel is None: channel = self.get_network_channel() data = [] data.append(channel & 0b00001111) response = self.raw_command(netfn=0x06, command=0x42, data=data) if 'error' in response: raise Exception(response['error']) data = response['data'] if len(data) != 9: raise Exception('expecting 10 data bytes got: {0}'.format(data)) r = {} r['Actual channel'] = data[0] & 0b00000111 channel_medium_types = { 0: 'reserved', 1: 'IPMB', 2: 'ICMB v1.0', 3: 'ICMB v0.9', 4: '802.3 LAN', 5: 'Asynch. Serial/Modem (RS-232)', 6: 'Other LAN', 7: 'PCI SMBus', 8: 'SMBus v1.0/1.1', 9: 'SMBus v2.0', 0x0a: 'reserved for USB 1.x', 0x0b: 'reserved for USB 2.x', 0x0c: 'System Interface (KCS, SMIC, or BT)', # 60h-7Fh: OEM # all other reserved } t = data[1] & 0b01111111 if t in channel_medium_types: r['Channel Medium type'] = channel_medium_types[t] else: r['Channel Medium type'] = 'OEM {:02X}'.format(t) r['5-bit Channel IPMI Messaging Protocol Type'] = data[2] & 0b00001111 session_supports = { 0: 'no_session', 1: 'single', 2: 'multi', 3: 'auto' } r['session_support'] = session_supports[(data[3] & 0b11000000) >> 6] r['active_session_count'] = data[3] & 0b00111111 r['Vendor ID'] = [data[4], data[5], data[6]] r['Auxiliary Channel Info'] = [data[7], data[8]] return r
[ "def", "get_channel_info", "(", "self", ",", "channel", "=", "None", ")", ":", "if", "channel", "is", "None", ":", "channel", "=", "self", ".", "get_network_channel", "(", ")", "data", "=", "[", "]", "data", ".", "append", "(", "channel", "&", "0b00001111", ")", "response", "=", "self", ".", "raw_command", "(", "netfn", "=", "0x06", ",", "command", "=", "0x42", ",", "data", "=", "data", ")", "if", "'error'", "in", "response", ":", "raise", "Exception", "(", "response", "[", "'error'", "]", ")", "data", "=", "response", "[", "'data'", "]", "if", "len", "(", "data", ")", "!=", "9", ":", "raise", "Exception", "(", "'expecting 10 data bytes got: {0}'", ".", "format", "(", "data", ")", ")", "r", "=", "{", "}", "r", "[", "'Actual channel'", "]", "=", "data", "[", "0", "]", "&", "0b00000111", "channel_medium_types", "=", "{", "0", ":", "'reserved'", ",", "1", ":", "'IPMB'", ",", "2", ":", "'ICMB v1.0'", ",", "3", ":", "'ICMB v0.9'", ",", "4", ":", "'802.3 LAN'", ",", "5", ":", "'Asynch. Serial/Modem (RS-232)'", ",", "6", ":", "'Other LAN'", ",", "7", ":", "'PCI SMBus'", ",", "8", ":", "'SMBus v1.0/1.1'", ",", "9", ":", "'SMBus v2.0'", ",", "0x0a", ":", "'reserved for USB 1.x'", ",", "0x0b", ":", "'reserved for USB 2.x'", ",", "0x0c", ":", "'System Interface (KCS, SMIC, or BT)'", ",", "# 60h-7Fh: OEM", "# all other reserved", "}", "t", "=", "data", "[", "1", "]", "&", "0b01111111", "if", "t", "in", "channel_medium_types", ":", "r", "[", "'Channel Medium type'", "]", "=", "channel_medium_types", "[", "t", "]", "else", ":", "r", "[", "'Channel Medium type'", "]", "=", "'OEM {:02X}'", ".", "format", "(", "t", ")", "r", "[", "'5-bit Channel IPMI Messaging Protocol Type'", "]", "=", "data", "[", "2", "]", "&", "0b00001111", "session_supports", "=", "{", "0", ":", "'no_session'", ",", "1", ":", "'single'", ",", "2", ":", "'multi'", ",", "3", ":", "'auto'", "}", "r", "[", "'session_support'", "]", "=", "session_supports", "[", "(", "data", "[", "3", "]", "&", "0b11000000", ")", ">>", "6", "]", "r", "[", "'active_session_count'", "]", "=", "data", "[", "3", "]", "&", "0b00111111", "r", "[", "'Vendor ID'", "]", "=", "[", "data", "[", "4", "]", ",", "data", "[", "5", "]", ",", "data", "[", "6", "]", "]", "r", "[", "'Auxiliary Channel Info'", "]", "=", "[", "data", "[", "7", "]", ",", "data", "[", "8", "]", "]", "return", "r" ]
Get channel info :param channel: number [1:7] :return: session_support: no_session: channel is session-less single: channel is single-session multi: channel is multi-session auto: channel is session-based (channel could alternate between single- and multi-session operation, as can occur with a serial/modem channel that supports connection mode auto-detect)
[ "Get", "channel", "info" ]
f710b1d30a8eed19a9e86f01f9351c737666f3e5
https://github.com/openstack/pyghmi/blob/f710b1d30a8eed19a9e86f01f9351c737666f3e5/pyghmi/ipmi/command.py#L1465-L1524
train
openstack/pyghmi
pyghmi/ipmi/command.py
Command.get_firmware
def get_firmware(self, components=()): """Retrieve OEM Firmware information """ self.oem_init() mcinfo = self.xraw_command(netfn=6, command=1) bmcver = '{0}.{1}'.format( ord(mcinfo['data'][2]), hex(ord(mcinfo['data'][3]))[2:]) return self._oem.get_oem_firmware(bmcver, components)
python
def get_firmware(self, components=()): """Retrieve OEM Firmware information """ self.oem_init() mcinfo = self.xraw_command(netfn=6, command=1) bmcver = '{0}.{1}'.format( ord(mcinfo['data'][2]), hex(ord(mcinfo['data'][3]))[2:]) return self._oem.get_oem_firmware(bmcver, components)
[ "def", "get_firmware", "(", "self", ",", "components", "=", "(", ")", ")", ":", "self", ".", "oem_init", "(", ")", "mcinfo", "=", "self", ".", "xraw_command", "(", "netfn", "=", "6", ",", "command", "=", "1", ")", "bmcver", "=", "'{0}.{1}'", ".", "format", "(", "ord", "(", "mcinfo", "[", "'data'", "]", "[", "2", "]", ")", ",", "hex", "(", "ord", "(", "mcinfo", "[", "'data'", "]", "[", "3", "]", ")", ")", "[", "2", ":", "]", ")", "return", "self", ".", "_oem", ".", "get_oem_firmware", "(", "bmcver", ",", "components", ")" ]
Retrieve OEM Firmware information
[ "Retrieve", "OEM", "Firmware", "information" ]
f710b1d30a8eed19a9e86f01f9351c737666f3e5
https://github.com/openstack/pyghmi/blob/f710b1d30a8eed19a9e86f01f9351c737666f3e5/pyghmi/ipmi/command.py#L1886-L1893
train
openstack/pyghmi
pyghmi/ipmi/command.py
Command.update_firmware
def update_firmware(self, file, data=None, progress=None, bank=None): """Send file to BMC to perform firmware update :param filename: The filename to upload to the target BMC :param data: The payload of the firmware. Default is to read from specified filename. :param progress: A callback that will be given a dict describing update process. Provide if :param bank: Indicate a target 'bank' of firmware if supported """ self.oem_init() if progress is None: progress = lambda x: True return self._oem.update_firmware(file, data, progress, bank)
python
def update_firmware(self, file, data=None, progress=None, bank=None): """Send file to BMC to perform firmware update :param filename: The filename to upload to the target BMC :param data: The payload of the firmware. Default is to read from specified filename. :param progress: A callback that will be given a dict describing update process. Provide if :param bank: Indicate a target 'bank' of firmware if supported """ self.oem_init() if progress is None: progress = lambda x: True return self._oem.update_firmware(file, data, progress, bank)
[ "def", "update_firmware", "(", "self", ",", "file", ",", "data", "=", "None", ",", "progress", "=", "None", ",", "bank", "=", "None", ")", ":", "self", ".", "oem_init", "(", ")", "if", "progress", "is", "None", ":", "progress", "=", "lambda", "x", ":", "True", "return", "self", ".", "_oem", ".", "update_firmware", "(", "file", ",", "data", ",", "progress", ",", "bank", ")" ]
Send file to BMC to perform firmware update :param filename: The filename to upload to the target BMC :param data: The payload of the firmware. Default is to read from specified filename. :param progress: A callback that will be given a dict describing update process. Provide if :param bank: Indicate a target 'bank' of firmware if supported
[ "Send", "file", "to", "BMC", "to", "perform", "firmware", "update" ]
f710b1d30a8eed19a9e86f01f9351c737666f3e5
https://github.com/openstack/pyghmi/blob/f710b1d30a8eed19a9e86f01f9351c737666f3e5/pyghmi/ipmi/command.py#L1936-L1949
train
openstack/pyghmi
pyghmi/ipmi/command.py
Command.attach_remote_media
def attach_remote_media(self, url, username=None, password=None): """Attach remote media by url Given a url, attach remote media (cd/usb image) to the target system. :param url: URL to indicate where to find image (protocol support varies by BMC) :param username: Username for endpoint to use when accessing the URL. If applicable, 'domain' would be indicated by '@' or '\' syntax. :param password: Password for endpoint to use when accessing the URL. """ self.oem_init() return self._oem.attach_remote_media(url, username, password)
python
def attach_remote_media(self, url, username=None, password=None): """Attach remote media by url Given a url, attach remote media (cd/usb image) to the target system. :param url: URL to indicate where to find image (protocol support varies by BMC) :param username: Username for endpoint to use when accessing the URL. If applicable, 'domain' would be indicated by '@' or '\' syntax. :param password: Password for endpoint to use when accessing the URL. """ self.oem_init() return self._oem.attach_remote_media(url, username, password)
[ "def", "attach_remote_media", "(", "self", ",", "url", ",", "username", "=", "None", ",", "password", "=", "None", ")", ":", "self", ".", "oem_init", "(", ")", "return", "self", ".", "_oem", ".", "attach_remote_media", "(", "url", ",", "username", ",", "password", ")" ]
Attach remote media by url Given a url, attach remote media (cd/usb image) to the target system. :param url: URL to indicate where to find image (protocol support varies by BMC) :param username: Username for endpoint to use when accessing the URL. If applicable, 'domain' would be indicated by '@' or '\' syntax. :param password: Password for endpoint to use when accessing the URL.
[ "Attach", "remote", "media", "by", "url" ]
f710b1d30a8eed19a9e86f01f9351c737666f3e5
https://github.com/openstack/pyghmi/blob/f710b1d30a8eed19a9e86f01f9351c737666f3e5/pyghmi/ipmi/command.py#L1951-L1964
train
openstack/pyghmi
pyghmi/ipmi/command.py
Command.upload_media
def upload_media(self, filename, progress=None): """Upload a file to be hosted on the target BMC This will upload the specified data to the BMC so that it will make it available to the system as an emulated USB device. :param filename: The filename to use, the basename of the parameter will be given to the bmc. :param filename: Optional callback for progress updates """ self.oem_init() return self._oem.upload_media(filename, progress)
python
def upload_media(self, filename, progress=None): """Upload a file to be hosted on the target BMC This will upload the specified data to the BMC so that it will make it available to the system as an emulated USB device. :param filename: The filename to use, the basename of the parameter will be given to the bmc. :param filename: Optional callback for progress updates """ self.oem_init() return self._oem.upload_media(filename, progress)
[ "def", "upload_media", "(", "self", ",", "filename", ",", "progress", "=", "None", ")", ":", "self", ".", "oem_init", "(", ")", "return", "self", ".", "_oem", ".", "upload_media", "(", "filename", ",", "progress", ")" ]
Upload a file to be hosted on the target BMC This will upload the specified data to the BMC so that it will make it available to the system as an emulated USB device. :param filename: The filename to use, the basename of the parameter will be given to the bmc. :param filename: Optional callback for progress updates
[ "Upload", "a", "file", "to", "be", "hosted", "on", "the", "target", "BMC" ]
f710b1d30a8eed19a9e86f01f9351c737666f3e5
https://github.com/openstack/pyghmi/blob/f710b1d30a8eed19a9e86f01f9351c737666f3e5/pyghmi/ipmi/command.py#L1970-L1982
train
openstack/pyghmi
pyghmi/ipmi/oem/generic.py
OEMHandler.process_event
def process_event(self, event, ipmicmd, seldata): """Modify an event according with OEM understanding. Given an event, allow an OEM module to augment it. For example, event data fields can have OEM bytes. Other times an OEM may wish to apply some transform to some field to suit their conventions. """ event['oem_handler'] = None evdata = event['event_data_bytes'] if evdata[0] & 0b11000000 == 0b10000000: event['oem_byte2'] = evdata[1] if evdata[0] & 0b110000 == 0b100000: event['oem_byte3'] = evdata[2]
python
def process_event(self, event, ipmicmd, seldata): """Modify an event according with OEM understanding. Given an event, allow an OEM module to augment it. For example, event data fields can have OEM bytes. Other times an OEM may wish to apply some transform to some field to suit their conventions. """ event['oem_handler'] = None evdata = event['event_data_bytes'] if evdata[0] & 0b11000000 == 0b10000000: event['oem_byte2'] = evdata[1] if evdata[0] & 0b110000 == 0b100000: event['oem_byte3'] = evdata[2]
[ "def", "process_event", "(", "self", ",", "event", ",", "ipmicmd", ",", "seldata", ")", ":", "event", "[", "'oem_handler'", "]", "=", "None", "evdata", "=", "event", "[", "'event_data_bytes'", "]", "if", "evdata", "[", "0", "]", "&", "0b11000000", "==", "0b10000000", ":", "event", "[", "'oem_byte2'", "]", "=", "evdata", "[", "1", "]", "if", "evdata", "[", "0", "]", "&", "0b110000", "==", "0b100000", ":", "event", "[", "'oem_byte3'", "]", "=", "evdata", "[", "2", "]" ]
Modify an event according with OEM understanding. Given an event, allow an OEM module to augment it. For example, event data fields can have OEM bytes. Other times an OEM may wish to apply some transform to some field to suit their conventions.
[ "Modify", "an", "event", "according", "with", "OEM", "understanding", "." ]
f710b1d30a8eed19a9e86f01f9351c737666f3e5
https://github.com/openstack/pyghmi/blob/f710b1d30a8eed19a9e86f01f9351c737666f3e5/pyghmi/ipmi/oem/generic.py#L45-L57
train
openstack/pyghmi
pyghmi/ipmi/console.py
Console._got_session
def _got_session(self, response): """Private function to navigate SOL payload activation """ if 'error' in response: self._print_error(response['error']) return if not self.ipmi_session: self.callgotsession = response return # Send activate sol payload directive # netfn= 6 (application) # command = 0x48 (activate payload) # data = (1, sol payload type # 1, first instance # 0b11000000, -encrypt, authenticate, # disable serial/modem alerts, CTS fine # 0, 0, 0 reserved response = self.ipmi_session.raw_command(netfn=0x6, command=0x48, data=(1, 1, 192, 0, 0, 0)) # given that these are specific to the command, # it's probably best if one can grep the error # here instead of in constants sol_activate_codes = { 0x81: 'SOL is disabled', 0x82: 'Maximum SOL session count reached', 0x83: 'Cannot activate payload with encryption', 0x84: 'Cannot activate payload without encryption', } if 'code' in response and response['code']: if response['code'] in constants.ipmi_completion_codes: self._print_error( constants.ipmi_completion_codes[response['code']]) return elif response['code'] == 0x80: if self.force_session and not self.retriedpayload: self.retriedpayload = 1 sessrsp = self.ipmi_session.raw_command( netfn=0x6, command=0x49, data=(1, 1, 0, 0, 0, 0)) self._got_session(sessrsp) return else: self._print_error('SOL Session active for another client') return elif response['code'] in sol_activate_codes: self._print_error(sol_activate_codes[response['code']]) return else: self._print_error( 'SOL encountered Unrecognized error code %d' % response['code']) return if 'error' in response: self._print_error(response['error']) return self.activated = True # data[0:3] is reserved except for the test mode, which we don't use data = response['data'] self.maxoutcount = (data[5] << 8) + data[4] # BMC tells us this is the maximum allowed size # data[6:7] is the promise of how small packets are going to be, but we # don't have any reason to worry about it # some BMCs disagree on the endianness, so do both valid_ports = (self.port, struct.unpack( '<H', struct.pack('>H', self.port))[0]) if (data[8] + (data[9] << 8)) not in valid_ports: # TODO(jbjohnso): support atypical SOL port number raise NotImplementedError("Non-standard SOL Port Number") # ignore data[10:11] for now, the vlan detail, shouldn't matter to this # code anyway... # NOTE(jbjohnso): # We will use a special purpose keepalive if self.ipmi_session.sol_handler is not None: # If there is erroneously another SOL handler already, notify # it of newly established session self.ipmi_session.sol_handler({'error': 'Session Disconnected'}) self.keepaliveid = self.ipmi_session.register_keepalive( cmd={'netfn': 6, 'command': 0x4b, 'data': (1, 1)}, callback=self._got_payload_instance_info) self.ipmi_session.sol_handler = self._got_sol_payload self.connected = True # self._sendpendingoutput() checks len(self._sendpendingoutput) self._sendpendingoutput()
python
def _got_session(self, response): """Private function to navigate SOL payload activation """ if 'error' in response: self._print_error(response['error']) return if not self.ipmi_session: self.callgotsession = response return # Send activate sol payload directive # netfn= 6 (application) # command = 0x48 (activate payload) # data = (1, sol payload type # 1, first instance # 0b11000000, -encrypt, authenticate, # disable serial/modem alerts, CTS fine # 0, 0, 0 reserved response = self.ipmi_session.raw_command(netfn=0x6, command=0x48, data=(1, 1, 192, 0, 0, 0)) # given that these are specific to the command, # it's probably best if one can grep the error # here instead of in constants sol_activate_codes = { 0x81: 'SOL is disabled', 0x82: 'Maximum SOL session count reached', 0x83: 'Cannot activate payload with encryption', 0x84: 'Cannot activate payload without encryption', } if 'code' in response and response['code']: if response['code'] in constants.ipmi_completion_codes: self._print_error( constants.ipmi_completion_codes[response['code']]) return elif response['code'] == 0x80: if self.force_session and not self.retriedpayload: self.retriedpayload = 1 sessrsp = self.ipmi_session.raw_command( netfn=0x6, command=0x49, data=(1, 1, 0, 0, 0, 0)) self._got_session(sessrsp) return else: self._print_error('SOL Session active for another client') return elif response['code'] in sol_activate_codes: self._print_error(sol_activate_codes[response['code']]) return else: self._print_error( 'SOL encountered Unrecognized error code %d' % response['code']) return if 'error' in response: self._print_error(response['error']) return self.activated = True # data[0:3] is reserved except for the test mode, which we don't use data = response['data'] self.maxoutcount = (data[5] << 8) + data[4] # BMC tells us this is the maximum allowed size # data[6:7] is the promise of how small packets are going to be, but we # don't have any reason to worry about it # some BMCs disagree on the endianness, so do both valid_ports = (self.port, struct.unpack( '<H', struct.pack('>H', self.port))[0]) if (data[8] + (data[9] << 8)) not in valid_ports: # TODO(jbjohnso): support atypical SOL port number raise NotImplementedError("Non-standard SOL Port Number") # ignore data[10:11] for now, the vlan detail, shouldn't matter to this # code anyway... # NOTE(jbjohnso): # We will use a special purpose keepalive if self.ipmi_session.sol_handler is not None: # If there is erroneously another SOL handler already, notify # it of newly established session self.ipmi_session.sol_handler({'error': 'Session Disconnected'}) self.keepaliveid = self.ipmi_session.register_keepalive( cmd={'netfn': 6, 'command': 0x4b, 'data': (1, 1)}, callback=self._got_payload_instance_info) self.ipmi_session.sol_handler = self._got_sol_payload self.connected = True # self._sendpendingoutput() checks len(self._sendpendingoutput) self._sendpendingoutput()
[ "def", "_got_session", "(", "self", ",", "response", ")", ":", "if", "'error'", "in", "response", ":", "self", ".", "_print_error", "(", "response", "[", "'error'", "]", ")", "return", "if", "not", "self", ".", "ipmi_session", ":", "self", ".", "callgotsession", "=", "response", "return", "# Send activate sol payload directive", "# netfn= 6 (application)", "# command = 0x48 (activate payload)", "# data = (1, sol payload type", "# 1, first instance", "# 0b11000000, -encrypt, authenticate,", "# disable serial/modem alerts, CTS fine", "# 0, 0, 0 reserved", "response", "=", "self", ".", "ipmi_session", ".", "raw_command", "(", "netfn", "=", "0x6", ",", "command", "=", "0x48", ",", "data", "=", "(", "1", ",", "1", ",", "192", ",", "0", ",", "0", ",", "0", ")", ")", "# given that these are specific to the command,", "# it's probably best if one can grep the error", "# here instead of in constants", "sol_activate_codes", "=", "{", "0x81", ":", "'SOL is disabled'", ",", "0x82", ":", "'Maximum SOL session count reached'", ",", "0x83", ":", "'Cannot activate payload with encryption'", ",", "0x84", ":", "'Cannot activate payload without encryption'", ",", "}", "if", "'code'", "in", "response", "and", "response", "[", "'code'", "]", ":", "if", "response", "[", "'code'", "]", "in", "constants", ".", "ipmi_completion_codes", ":", "self", ".", "_print_error", "(", "constants", ".", "ipmi_completion_codes", "[", "response", "[", "'code'", "]", "]", ")", "return", "elif", "response", "[", "'code'", "]", "==", "0x80", ":", "if", "self", ".", "force_session", "and", "not", "self", ".", "retriedpayload", ":", "self", ".", "retriedpayload", "=", "1", "sessrsp", "=", "self", ".", "ipmi_session", ".", "raw_command", "(", "netfn", "=", "0x6", ",", "command", "=", "0x49", ",", "data", "=", "(", "1", ",", "1", ",", "0", ",", "0", ",", "0", ",", "0", ")", ")", "self", ".", "_got_session", "(", "sessrsp", ")", "return", "else", ":", "self", ".", "_print_error", "(", "'SOL Session active for another client'", ")", "return", "elif", "response", "[", "'code'", "]", "in", "sol_activate_codes", ":", "self", ".", "_print_error", "(", "sol_activate_codes", "[", "response", "[", "'code'", "]", "]", ")", "return", "else", ":", "self", ".", "_print_error", "(", "'SOL encountered Unrecognized error code %d'", "%", "response", "[", "'code'", "]", ")", "return", "if", "'error'", "in", "response", ":", "self", ".", "_print_error", "(", "response", "[", "'error'", "]", ")", "return", "self", ".", "activated", "=", "True", "# data[0:3] is reserved except for the test mode, which we don't use", "data", "=", "response", "[", "'data'", "]", "self", ".", "maxoutcount", "=", "(", "data", "[", "5", "]", "<<", "8", ")", "+", "data", "[", "4", "]", "# BMC tells us this is the maximum allowed size", "# data[6:7] is the promise of how small packets are going to be, but we", "# don't have any reason to worry about it", "# some BMCs disagree on the endianness, so do both", "valid_ports", "=", "(", "self", ".", "port", ",", "struct", ".", "unpack", "(", "'<H'", ",", "struct", ".", "pack", "(", "'>H'", ",", "self", ".", "port", ")", ")", "[", "0", "]", ")", "if", "(", "data", "[", "8", "]", "+", "(", "data", "[", "9", "]", "<<", "8", ")", ")", "not", "in", "valid_ports", ":", "# TODO(jbjohnso): support atypical SOL port number", "raise", "NotImplementedError", "(", "\"Non-standard SOL Port Number\"", ")", "# ignore data[10:11] for now, the vlan detail, shouldn't matter to this", "# code anyway...", "# NOTE(jbjohnso):", "# We will use a special purpose keepalive", "if", "self", ".", "ipmi_session", ".", "sol_handler", "is", "not", "None", ":", "# If there is erroneously another SOL handler already, notify", "# it of newly established session", "self", ".", "ipmi_session", ".", "sol_handler", "(", "{", "'error'", ":", "'Session Disconnected'", "}", ")", "self", ".", "keepaliveid", "=", "self", ".", "ipmi_session", ".", "register_keepalive", "(", "cmd", "=", "{", "'netfn'", ":", "6", ",", "'command'", ":", "0x4b", ",", "'data'", ":", "(", "1", ",", "1", ")", "}", ",", "callback", "=", "self", ".", "_got_payload_instance_info", ")", "self", ".", "ipmi_session", ".", "sol_handler", "=", "self", ".", "_got_sol_payload", "self", ".", "connected", "=", "True", "# self._sendpendingoutput() checks len(self._sendpendingoutput)", "self", ".", "_sendpendingoutput", "(", ")" ]
Private function to navigate SOL payload activation
[ "Private", "function", "to", "navigate", "SOL", "payload", "activation" ]
f710b1d30a8eed19a9e86f01f9351c737666f3e5
https://github.com/openstack/pyghmi/blob/f710b1d30a8eed19a9e86f01f9351c737666f3e5/pyghmi/ipmi/console.py#L77-L160
train
openstack/pyghmi
pyghmi/ipmi/console.py
Console._got_cons_input
def _got_cons_input(self, handle): """Callback for handle events detected by ipmi session """ self._addpendingdata(handle.read()) if not self.awaitingack: self._sendpendingoutput()
python
def _got_cons_input(self, handle): """Callback for handle events detected by ipmi session """ self._addpendingdata(handle.read()) if not self.awaitingack: self._sendpendingoutput()
[ "def", "_got_cons_input", "(", "self", ",", "handle", ")", ":", "self", ".", "_addpendingdata", "(", "handle", ".", "read", "(", ")", ")", "if", "not", "self", ".", "awaitingack", ":", "self", ".", "_sendpendingoutput", "(", ")" ]
Callback for handle events detected by ipmi session
[ "Callback", "for", "handle", "events", "detected", "by", "ipmi", "session" ]
f710b1d30a8eed19a9e86f01f9351c737666f3e5
https://github.com/openstack/pyghmi/blob/f710b1d30a8eed19a9e86f01f9351c737666f3e5/pyghmi/ipmi/console.py#L192-L197
train
openstack/pyghmi
pyghmi/ipmi/console.py
Console.close
def close(self): """Shut down an SOL session, """ if self.ipmi_session: self.ipmi_session.unregister_keepalive(self.keepaliveid) if self.activated: try: self.ipmi_session.raw_command(netfn=6, command=0x49, data=(1, 1, 0, 0, 0, 0)) except exc.IpmiException: # if underlying ipmi session is not working, then # run with the implicit success pass
python
def close(self): """Shut down an SOL session, """ if self.ipmi_session: self.ipmi_session.unregister_keepalive(self.keepaliveid) if self.activated: try: self.ipmi_session.raw_command(netfn=6, command=0x49, data=(1, 1, 0, 0, 0, 0)) except exc.IpmiException: # if underlying ipmi session is not working, then # run with the implicit success pass
[ "def", "close", "(", "self", ")", ":", "if", "self", ".", "ipmi_session", ":", "self", ".", "ipmi_session", ".", "unregister_keepalive", "(", "self", ".", "keepaliveid", ")", "if", "self", ".", "activated", ":", "try", ":", "self", ".", "ipmi_session", ".", "raw_command", "(", "netfn", "=", "6", ",", "command", "=", "0x49", ",", "data", "=", "(", "1", ",", "1", ",", "0", ",", "0", ",", "0", ",", "0", ")", ")", "except", "exc", ".", "IpmiException", ":", "# if underlying ipmi session is not working, then", "# run with the implicit success", "pass" ]
Shut down an SOL session,
[ "Shut", "down", "an", "SOL", "session" ]
f710b1d30a8eed19a9e86f01f9351c737666f3e5
https://github.com/openstack/pyghmi/blob/f710b1d30a8eed19a9e86f01f9351c737666f3e5/pyghmi/ipmi/console.py#L199-L211
train
openstack/pyghmi
pyghmi/ipmi/console.py
ServerConsole._got_sol_payload
def _got_sol_payload(self, payload): """SOL payload callback """ # TODO(jbjohnso) test cases to throw some likely scenarios at functions # for example, retry with new data, retry with no new data # retry with unexpected sequence number if type(payload) == dict: # we received an error condition self.activated = False self._print_error(payload) return newseq = payload[0] & 0b1111 ackseq = payload[1] & 0b1111 ackcount = payload[2] nacked = payload[3] & 0b1000000 breakdetected = payload[3] & 0b10000 # for now, ignore overrun. I assume partial NACK for this reason or # for no reason would be treated the same, new payload with partial # data. remdata = "" remdatalen = 0 flag = 0 if not self.poweredon: flag |= 0b1100000 if not self.activated: flag |= 0b1010000 if newseq != 0: # this packet at least has some data to send to us.. if len(payload) > 4: remdatalen = len(payload[4:]) # store remote len before dupe # retry logic, we must ack *this* many even if it is # a retry packet with new partial data remdata = bytes(payload[4:]) if newseq == self.remseq: # it is a retry, but could have new data if remdatalen > self.lastsize: remdata = bytes(remdata[4 + self.lastsize:]) else: # no new data... remdata = "" else: # TODO(jbjohnso) what if remote sequence number is wrong?? self.remseq = newseq self.lastsize = remdatalen ackpayload = bytearray((0, self.remseq, remdatalen, flag)) # Why not put pending data into the ack? because it's rare # and might be hard to decide what to do in the context of # retry situation try: self.send_payload(ackpayload, retry=False) except exc.IpmiException: # if the session is broken, then close the SOL session self.close() if remdata: # Do not subject callers to empty data self._print_data(remdata) if self.myseq != 0 and ackseq == self.myseq: # the bmc has something # to say about last xmit self.awaitingack = False if nacked and not breakdetected: # the BMC was in some way unhappy newtext = self.lastpayload[4 + ackcount:] with self.outputlock: if (self.pendingoutput and not isinstance(self.pendingoutput[0], dict)): self.pendingoutput[0] = newtext + self.pendingoutput[0] else: self.pendingoutput = [newtext] + self.pendingoutput # self._sendpendingoutput() checks len(self._sendpendingoutput) self._sendpendingoutput() elif ackseq != 0 and self.awaitingack: # if an ack packet came in, but did not match what we # expected, retry our payload now. # the situation that was triggered was a senseless retry # when data came in while we xmitted. In theory, a BMC # should handle a retry correctly, but some do not, so # try to mitigate by avoiding overeager retries # occasional retry of a packet # sooner than timeout suggests is evidently a big deal self.send_payload(payload=self.lastpayload)
python
def _got_sol_payload(self, payload): """SOL payload callback """ # TODO(jbjohnso) test cases to throw some likely scenarios at functions # for example, retry with new data, retry with no new data # retry with unexpected sequence number if type(payload) == dict: # we received an error condition self.activated = False self._print_error(payload) return newseq = payload[0] & 0b1111 ackseq = payload[1] & 0b1111 ackcount = payload[2] nacked = payload[3] & 0b1000000 breakdetected = payload[3] & 0b10000 # for now, ignore overrun. I assume partial NACK for this reason or # for no reason would be treated the same, new payload with partial # data. remdata = "" remdatalen = 0 flag = 0 if not self.poweredon: flag |= 0b1100000 if not self.activated: flag |= 0b1010000 if newseq != 0: # this packet at least has some data to send to us.. if len(payload) > 4: remdatalen = len(payload[4:]) # store remote len before dupe # retry logic, we must ack *this* many even if it is # a retry packet with new partial data remdata = bytes(payload[4:]) if newseq == self.remseq: # it is a retry, but could have new data if remdatalen > self.lastsize: remdata = bytes(remdata[4 + self.lastsize:]) else: # no new data... remdata = "" else: # TODO(jbjohnso) what if remote sequence number is wrong?? self.remseq = newseq self.lastsize = remdatalen ackpayload = bytearray((0, self.remseq, remdatalen, flag)) # Why not put pending data into the ack? because it's rare # and might be hard to decide what to do in the context of # retry situation try: self.send_payload(ackpayload, retry=False) except exc.IpmiException: # if the session is broken, then close the SOL session self.close() if remdata: # Do not subject callers to empty data self._print_data(remdata) if self.myseq != 0 and ackseq == self.myseq: # the bmc has something # to say about last xmit self.awaitingack = False if nacked and not breakdetected: # the BMC was in some way unhappy newtext = self.lastpayload[4 + ackcount:] with self.outputlock: if (self.pendingoutput and not isinstance(self.pendingoutput[0], dict)): self.pendingoutput[0] = newtext + self.pendingoutput[0] else: self.pendingoutput = [newtext] + self.pendingoutput # self._sendpendingoutput() checks len(self._sendpendingoutput) self._sendpendingoutput() elif ackseq != 0 and self.awaitingack: # if an ack packet came in, but did not match what we # expected, retry our payload now. # the situation that was triggered was a senseless retry # when data came in while we xmitted. In theory, a BMC # should handle a retry correctly, but some do not, so # try to mitigate by avoiding overeager retries # occasional retry of a packet # sooner than timeout suggests is evidently a big deal self.send_payload(payload=self.lastpayload)
[ "def", "_got_sol_payload", "(", "self", ",", "payload", ")", ":", "# TODO(jbjohnso) test cases to throw some likely scenarios at functions", "# for example, retry with new data, retry with no new data", "# retry with unexpected sequence number", "if", "type", "(", "payload", ")", "==", "dict", ":", "# we received an error condition", "self", ".", "activated", "=", "False", "self", ".", "_print_error", "(", "payload", ")", "return", "newseq", "=", "payload", "[", "0", "]", "&", "0b1111", "ackseq", "=", "payload", "[", "1", "]", "&", "0b1111", "ackcount", "=", "payload", "[", "2", "]", "nacked", "=", "payload", "[", "3", "]", "&", "0b1000000", "breakdetected", "=", "payload", "[", "3", "]", "&", "0b10000", "# for now, ignore overrun. I assume partial NACK for this reason or", "# for no reason would be treated the same, new payload with partial", "# data.", "remdata", "=", "\"\"", "remdatalen", "=", "0", "flag", "=", "0", "if", "not", "self", ".", "poweredon", ":", "flag", "|=", "0b1100000", "if", "not", "self", ".", "activated", ":", "flag", "|=", "0b1010000", "if", "newseq", "!=", "0", ":", "# this packet at least has some data to send to us..", "if", "len", "(", "payload", ")", ">", "4", ":", "remdatalen", "=", "len", "(", "payload", "[", "4", ":", "]", ")", "# store remote len before dupe", "# retry logic, we must ack *this* many even if it is", "# a retry packet with new partial data", "remdata", "=", "bytes", "(", "payload", "[", "4", ":", "]", ")", "if", "newseq", "==", "self", ".", "remseq", ":", "# it is a retry, but could have new data", "if", "remdatalen", ">", "self", ".", "lastsize", ":", "remdata", "=", "bytes", "(", "remdata", "[", "4", "+", "self", ".", "lastsize", ":", "]", ")", "else", ":", "# no new data...", "remdata", "=", "\"\"", "else", ":", "# TODO(jbjohnso) what if remote sequence number is wrong??", "self", ".", "remseq", "=", "newseq", "self", ".", "lastsize", "=", "remdatalen", "ackpayload", "=", "bytearray", "(", "(", "0", ",", "self", ".", "remseq", ",", "remdatalen", ",", "flag", ")", ")", "# Why not put pending data into the ack? because it's rare", "# and might be hard to decide what to do in the context of", "# retry situation", "try", ":", "self", ".", "send_payload", "(", "ackpayload", ",", "retry", "=", "False", ")", "except", "exc", ".", "IpmiException", ":", "# if the session is broken, then close the SOL session", "self", ".", "close", "(", ")", "if", "remdata", ":", "# Do not subject callers to empty data", "self", ".", "_print_data", "(", "remdata", ")", "if", "self", ".", "myseq", "!=", "0", "and", "ackseq", "==", "self", ".", "myseq", ":", "# the bmc has something", "# to say about last xmit", "self", ".", "awaitingack", "=", "False", "if", "nacked", "and", "not", "breakdetected", ":", "# the BMC was in some way unhappy", "newtext", "=", "self", ".", "lastpayload", "[", "4", "+", "ackcount", ":", "]", "with", "self", ".", "outputlock", ":", "if", "(", "self", ".", "pendingoutput", "and", "not", "isinstance", "(", "self", ".", "pendingoutput", "[", "0", "]", ",", "dict", ")", ")", ":", "self", ".", "pendingoutput", "[", "0", "]", "=", "newtext", "+", "self", ".", "pendingoutput", "[", "0", "]", "else", ":", "self", ".", "pendingoutput", "=", "[", "newtext", "]", "+", "self", ".", "pendingoutput", "# self._sendpendingoutput() checks len(self._sendpendingoutput)", "self", ".", "_sendpendingoutput", "(", ")", "elif", "ackseq", "!=", "0", "and", "self", ".", "awaitingack", ":", "# if an ack packet came in, but did not match what we", "# expected, retry our payload now.", "# the situation that was triggered was a senseless retry", "# when data came in while we xmitted. In theory, a BMC", "# should handle a retry correctly, but some do not, so", "# try to mitigate by avoiding overeager retries", "# occasional retry of a packet", "# sooner than timeout suggests is evidently a big deal", "self", ".", "send_payload", "(", "payload", "=", "self", ".", "lastpayload", ")" ]
SOL payload callback
[ "SOL", "payload", "callback" ]
f710b1d30a8eed19a9e86f01f9351c737666f3e5
https://github.com/openstack/pyghmi/blob/f710b1d30a8eed19a9e86f01f9351c737666f3e5/pyghmi/ipmi/console.py#L459-L531
train
openstack/pyghmi
pyghmi/ipmi/oem/lenovo/handler.py
OEMHandler.is_fpc
def is_fpc(self): """True if the target is a Lenovo nextscale fan power controller """ if self.has_imm or self.has_xcc: return None if self._fpc_variant is not None: return self._fpc_variant fpc_ids = ((19046, 32, 1063), (20301, 32, 462)) smm_id = (19046, 32, 1180) currid = (self.oemid['manufacturer_id'], self.oemid['device_id'], self.oemid['product_id']) if currid in fpc_ids: self._fpc_variant = 6 elif currid == smm_id: self._fpc_variant = 2 return self._fpc_variant
python
def is_fpc(self): """True if the target is a Lenovo nextscale fan power controller """ if self.has_imm or self.has_xcc: return None if self._fpc_variant is not None: return self._fpc_variant fpc_ids = ((19046, 32, 1063), (20301, 32, 462)) smm_id = (19046, 32, 1180) currid = (self.oemid['manufacturer_id'], self.oemid['device_id'], self.oemid['product_id']) if currid in fpc_ids: self._fpc_variant = 6 elif currid == smm_id: self._fpc_variant = 2 return self._fpc_variant
[ "def", "is_fpc", "(", "self", ")", ":", "if", "self", ".", "has_imm", "or", "self", ".", "has_xcc", ":", "return", "None", "if", "self", ".", "_fpc_variant", "is", "not", "None", ":", "return", "self", ".", "_fpc_variant", "fpc_ids", "=", "(", "(", "19046", ",", "32", ",", "1063", ")", ",", "(", "20301", ",", "32", ",", "462", ")", ")", "smm_id", "=", "(", "19046", ",", "32", ",", "1180", ")", "currid", "=", "(", "self", ".", "oemid", "[", "'manufacturer_id'", "]", ",", "self", ".", "oemid", "[", "'device_id'", "]", ",", "self", ".", "oemid", "[", "'product_id'", "]", ")", "if", "currid", "in", "fpc_ids", ":", "self", ".", "_fpc_variant", "=", "6", "elif", "currid", "==", "smm_id", ":", "self", ".", "_fpc_variant", "=", "2", "return", "self", ".", "_fpc_variant" ]
True if the target is a Lenovo nextscale fan power controller
[ "True", "if", "the", "target", "is", "a", "Lenovo", "nextscale", "fan", "power", "controller" ]
f710b1d30a8eed19a9e86f01f9351c737666f3e5
https://github.com/openstack/pyghmi/blob/f710b1d30a8eed19a9e86f01f9351c737666f3e5/pyghmi/ipmi/oem/lenovo/handler.py#L335-L350
train
openstack/pyghmi
pyghmi/ipmi/oem/lenovo/handler.py
OEMHandler.has_tsm
def has_tsm(self): """True if this particular server have a TSM based service processor """ if (self.oemid['manufacturer_id'] == 19046 and self.oemid['device_id'] == 32): try: self.ipmicmd.xraw_command(netfn=0x3a, command=0xf) except pygexc.IpmiException as ie: if ie.ipmicode == 193: return False raise return True return False
python
def has_tsm(self): """True if this particular server have a TSM based service processor """ if (self.oemid['manufacturer_id'] == 19046 and self.oemid['device_id'] == 32): try: self.ipmicmd.xraw_command(netfn=0x3a, command=0xf) except pygexc.IpmiException as ie: if ie.ipmicode == 193: return False raise return True return False
[ "def", "has_tsm", "(", "self", ")", ":", "if", "(", "self", ".", "oemid", "[", "'manufacturer_id'", "]", "==", "19046", "and", "self", ".", "oemid", "[", "'device_id'", "]", "==", "32", ")", ":", "try", ":", "self", ".", "ipmicmd", ".", "xraw_command", "(", "netfn", "=", "0x3a", ",", "command", "=", "0xf", ")", "except", "pygexc", ".", "IpmiException", "as", "ie", ":", "if", "ie", ".", "ipmicode", "==", "193", ":", "return", "False", "raise", "return", "True", "return", "False" ]
True if this particular server have a TSM based service processor
[ "True", "if", "this", "particular", "server", "have", "a", "TSM", "based", "service", "processor" ]
f710b1d30a8eed19a9e86f01f9351c737666f3e5
https://github.com/openstack/pyghmi/blob/f710b1d30a8eed19a9e86f01f9351c737666f3e5/pyghmi/ipmi/oem/lenovo/handler.py#L359-L371
train
openstack/pyghmi
pyghmi/ipmi/oem/lenovo/handler.py
OEMHandler.set_oem_capping_enabled
def set_oem_capping_enabled(self, enable): """Set PSU based power capping :param enable: True for enable and False for disable """ # 1 - Enable power capping(default) if enable: statecode = 1 # 0 - Disable power capping else: statecode = 0 if self.has_tsm: self.ipmicmd.xraw_command(netfn=0x3a, command=0x1a, data=(3, statecode)) return True
python
def set_oem_capping_enabled(self, enable): """Set PSU based power capping :param enable: True for enable and False for disable """ # 1 - Enable power capping(default) if enable: statecode = 1 # 0 - Disable power capping else: statecode = 0 if self.has_tsm: self.ipmicmd.xraw_command(netfn=0x3a, command=0x1a, data=(3, statecode)) return True
[ "def", "set_oem_capping_enabled", "(", "self", ",", "enable", ")", ":", "# 1 - Enable power capping(default)", "if", "enable", ":", "statecode", "=", "1", "# 0 - Disable power capping", "else", ":", "statecode", "=", "0", "if", "self", ".", "has_tsm", ":", "self", ".", "ipmicmd", ".", "xraw_command", "(", "netfn", "=", "0x3a", ",", "command", "=", "0x1a", ",", "data", "=", "(", "3", ",", "statecode", ")", ")", "return", "True" ]
Set PSU based power capping :param enable: True for enable and False for disable
[ "Set", "PSU", "based", "power", "capping" ]
f710b1d30a8eed19a9e86f01f9351c737666f3e5
https://github.com/openstack/pyghmi/blob/f710b1d30a8eed19a9e86f01f9351c737666f3e5/pyghmi/ipmi/oem/lenovo/handler.py#L636-L650
train
openstack/pyghmi
pyghmi/ipmi/private/util.py
decode_wireformat_uuid
def decode_wireformat_uuid(rawguid): """Decode a wire format UUID It handles the rather particular scheme where half is little endian and half is big endian. It returns a string like dmidecode would output. """ if isinstance(rawguid, list): rawguid = bytearray(rawguid) lebytes = struct.unpack_from('<IHH', buffer(rawguid[:8])) bebytes = struct.unpack_from('>HHI', buffer(rawguid[8:])) return '{0:08X}-{1:04X}-{2:04X}-{3:04X}-{4:04X}{5:08X}'.format( lebytes[0], lebytes[1], lebytes[2], bebytes[0], bebytes[1], bebytes[2])
python
def decode_wireformat_uuid(rawguid): """Decode a wire format UUID It handles the rather particular scheme where half is little endian and half is big endian. It returns a string like dmidecode would output. """ if isinstance(rawguid, list): rawguid = bytearray(rawguid) lebytes = struct.unpack_from('<IHH', buffer(rawguid[:8])) bebytes = struct.unpack_from('>HHI', buffer(rawguid[8:])) return '{0:08X}-{1:04X}-{2:04X}-{3:04X}-{4:04X}{5:08X}'.format( lebytes[0], lebytes[1], lebytes[2], bebytes[0], bebytes[1], bebytes[2])
[ "def", "decode_wireformat_uuid", "(", "rawguid", ")", ":", "if", "isinstance", "(", "rawguid", ",", "list", ")", ":", "rawguid", "=", "bytearray", "(", "rawguid", ")", "lebytes", "=", "struct", ".", "unpack_from", "(", "'<IHH'", ",", "buffer", "(", "rawguid", "[", ":", "8", "]", ")", ")", "bebytes", "=", "struct", ".", "unpack_from", "(", "'>HHI'", ",", "buffer", "(", "rawguid", "[", "8", ":", "]", ")", ")", "return", "'{0:08X}-{1:04X}-{2:04X}-{3:04X}-{4:04X}{5:08X}'", ".", "format", "(", "lebytes", "[", "0", "]", ",", "lebytes", "[", "1", "]", ",", "lebytes", "[", "2", "]", ",", "bebytes", "[", "0", "]", ",", "bebytes", "[", "1", "]", ",", "bebytes", "[", "2", "]", ")" ]
Decode a wire format UUID It handles the rather particular scheme where half is little endian and half is big endian. It returns a string like dmidecode would output.
[ "Decode", "a", "wire", "format", "UUID" ]
f710b1d30a8eed19a9e86f01f9351c737666f3e5
https://github.com/openstack/pyghmi/blob/f710b1d30a8eed19a9e86f01f9351c737666f3e5/pyghmi/ipmi/private/util.py#L41-L52
train
openstack/pyghmi
pyghmi/ipmi/private/util.py
urlsplit
def urlsplit(url): """Split an arbitrary url into protocol, host, rest The standard urlsplit does not want to provide 'netloc' for arbitrary protocols, this works around that. :param url: The url to split into component parts """ proto, rest = url.split(':', 1) host = '' if rest[:2] == '//': host, rest = rest[2:].split('/', 1) rest = '/' + rest return proto, host, rest
python
def urlsplit(url): """Split an arbitrary url into protocol, host, rest The standard urlsplit does not want to provide 'netloc' for arbitrary protocols, this works around that. :param url: The url to split into component parts """ proto, rest = url.split(':', 1) host = '' if rest[:2] == '//': host, rest = rest[2:].split('/', 1) rest = '/' + rest return proto, host, rest
[ "def", "urlsplit", "(", "url", ")", ":", "proto", ",", "rest", "=", "url", ".", "split", "(", "':'", ",", "1", ")", "host", "=", "''", "if", "rest", "[", ":", "2", "]", "==", "'//'", ":", "host", ",", "rest", "=", "rest", "[", "2", ":", "]", ".", "split", "(", "'/'", ",", "1", ")", "rest", "=", "'/'", "+", "rest", "return", "proto", ",", "host", ",", "rest" ]
Split an arbitrary url into protocol, host, rest The standard urlsplit does not want to provide 'netloc' for arbitrary protocols, this works around that. :param url: The url to split into component parts
[ "Split", "an", "arbitrary", "url", "into", "protocol", "host", "rest" ]
f710b1d30a8eed19a9e86f01f9351c737666f3e5
https://github.com/openstack/pyghmi/blob/f710b1d30a8eed19a9e86f01f9351c737666f3e5/pyghmi/ipmi/private/util.py#L55-L68
train
openstack/pyghmi
pyghmi/ipmi/private/util.py
get_ipv4
def get_ipv4(hostname): """Get list of ipv4 addresses for hostname """ addrinfo = socket.getaddrinfo(hostname, None, socket.AF_INET, socket.SOCK_STREAM) return [addrinfo[x][4][0] for x in range(len(addrinfo))]
python
def get_ipv4(hostname): """Get list of ipv4 addresses for hostname """ addrinfo = socket.getaddrinfo(hostname, None, socket.AF_INET, socket.SOCK_STREAM) return [addrinfo[x][4][0] for x in range(len(addrinfo))]
[ "def", "get_ipv4", "(", "hostname", ")", ":", "addrinfo", "=", "socket", ".", "getaddrinfo", "(", "hostname", ",", "None", ",", "socket", ".", "AF_INET", ",", "socket", ".", "SOCK_STREAM", ")", "return", "[", "addrinfo", "[", "x", "]", "[", "4", "]", "[", "0", "]", "for", "x", "in", "range", "(", "len", "(", "addrinfo", ")", ")", "]" ]
Get list of ipv4 addresses for hostname
[ "Get", "list", "of", "ipv4", "addresses", "for", "hostname" ]
f710b1d30a8eed19a9e86f01f9351c737666f3e5
https://github.com/openstack/pyghmi/blob/f710b1d30a8eed19a9e86f01f9351c737666f3e5/pyghmi/ipmi/private/util.py#L71-L77
train
openstack/pyghmi
pyghmi/ipmi/private/session.py
_aespad
def _aespad(data): """ipmi demands a certain pad scheme, per table 13-20 AES-CBC encrypted payload fields. """ currlen = len(data) + 1 # need to count the pad length field as well neededpad = currlen % 16 if neededpad: # if it happens to be zero, hurray, but otherwise invert the # sense of the padding neededpad = 16 - neededpad padval = 1 pad = bytearray(neededpad) while padval <= neededpad: pad[padval - 1] = padval padval += 1 pad.append(neededpad) return pad
python
def _aespad(data): """ipmi demands a certain pad scheme, per table 13-20 AES-CBC encrypted payload fields. """ currlen = len(data) + 1 # need to count the pad length field as well neededpad = currlen % 16 if neededpad: # if it happens to be zero, hurray, but otherwise invert the # sense of the padding neededpad = 16 - neededpad padval = 1 pad = bytearray(neededpad) while padval <= neededpad: pad[padval - 1] = padval padval += 1 pad.append(neededpad) return pad
[ "def", "_aespad", "(", "data", ")", ":", "currlen", "=", "len", "(", "data", ")", "+", "1", "# need to count the pad length field as well", "neededpad", "=", "currlen", "%", "16", "if", "neededpad", ":", "# if it happens to be zero, hurray, but otherwise invert the", "# sense of the padding", "neededpad", "=", "16", "-", "neededpad", "padval", "=", "1", "pad", "=", "bytearray", "(", "neededpad", ")", "while", "padval", "<=", "neededpad", ":", "pad", "[", "padval", "-", "1", "]", "=", "padval", "padval", "+=", "1", "pad", ".", "append", "(", "neededpad", ")", "return", "pad" ]
ipmi demands a certain pad scheme, per table 13-20 AES-CBC encrypted payload fields.
[ "ipmi", "demands", "a", "certain", "pad", "scheme", "per", "table", "13", "-", "20", "AES", "-", "CBC", "encrypted", "payload", "fields", "." ]
f710b1d30a8eed19a9e86f01f9351c737666f3e5
https://github.com/openstack/pyghmi/blob/f710b1d30a8eed19a9e86f01f9351c737666f3e5/pyghmi/ipmi/private/session.py#L252-L267
train
openstack/pyghmi
pyghmi/ipmi/private/session.py
Session._make_bridge_request_msg
def _make_bridge_request_msg(self, channel, netfn, command): """This function generate message for bridge request. It is a part of ipmi payload. """ head = bytearray((constants.IPMI_BMC_ADDRESS, constants.netfn_codes['application'] << 2)) check_sum = _checksum(*head) # NOTE(fengqian): according IPMI Figure 14-11, rqSWID is set to 81h boday = bytearray((0x81, self.seqlun, constants.IPMI_SEND_MESSAGE_CMD, 0x40 | channel)) # NOTE(fengqian): Track request self._add_request_entry((constants.netfn_codes['application'] + 1, self.seqlun, constants.IPMI_SEND_MESSAGE_CMD)) return head + bytearray((check_sum,)) + boday
python
def _make_bridge_request_msg(self, channel, netfn, command): """This function generate message for bridge request. It is a part of ipmi payload. """ head = bytearray((constants.IPMI_BMC_ADDRESS, constants.netfn_codes['application'] << 2)) check_sum = _checksum(*head) # NOTE(fengqian): according IPMI Figure 14-11, rqSWID is set to 81h boday = bytearray((0x81, self.seqlun, constants.IPMI_SEND_MESSAGE_CMD, 0x40 | channel)) # NOTE(fengqian): Track request self._add_request_entry((constants.netfn_codes['application'] + 1, self.seqlun, constants.IPMI_SEND_MESSAGE_CMD)) return head + bytearray((check_sum,)) + boday
[ "def", "_make_bridge_request_msg", "(", "self", ",", "channel", ",", "netfn", ",", "command", ")", ":", "head", "=", "bytearray", "(", "(", "constants", ".", "IPMI_BMC_ADDRESS", ",", "constants", ".", "netfn_codes", "[", "'application'", "]", "<<", "2", ")", ")", "check_sum", "=", "_checksum", "(", "*", "head", ")", "# NOTE(fengqian): according IPMI Figure 14-11, rqSWID is set to 81h", "boday", "=", "bytearray", "(", "(", "0x81", ",", "self", ".", "seqlun", ",", "constants", ".", "IPMI_SEND_MESSAGE_CMD", ",", "0x40", "|", "channel", ")", ")", "# NOTE(fengqian): Track request", "self", ".", "_add_request_entry", "(", "(", "constants", ".", "netfn_codes", "[", "'application'", "]", "+", "1", ",", "self", ".", "seqlun", ",", "constants", ".", "IPMI_SEND_MESSAGE_CMD", ")", ")", "return", "head", "+", "bytearray", "(", "(", "check_sum", ",", ")", ")", "+", "boday" ]
This function generate message for bridge request. It is a part of ipmi payload.
[ "This", "function", "generate", "message", "for", "bridge", "request", ".", "It", "is", "a", "part", "of", "ipmi", "payload", "." ]
f710b1d30a8eed19a9e86f01f9351c737666f3e5
https://github.com/openstack/pyghmi/blob/f710b1d30a8eed19a9e86f01f9351c737666f3e5/pyghmi/ipmi/private/session.py#L611-L624
train
openstack/pyghmi
pyghmi/ipmi/private/session.py
Session.wait_for_rsp
def wait_for_rsp(cls, timeout=None, callout=True): """IPMI Session Event loop iteration This watches for any activity on IPMI handles and handles registered by register_handle_callback. Callers are satisfied in the order that packets return from network, not in the order of calling. :param timeout: Maximum time to wait for data to come across. If unspecified, will autodetect based on earliest timeout """ global iosockets # Assume: # Instance A sends request to packet B # Then Instance C sends request to BMC D # BMC D was faster, so data comes back before BMC B # Instance C gets to go ahead of Instance A, because # Instance C can get work done, but instance A cannot curtime = _monotonic_time() # There ar a number of parties that each has their own timeout # The caller can specify a deadline in timeout argument # each session with active outbound payload has callback to # handle retry/timout error # each session that is 'alive' wants to send a keepalive ever so often. # We want to make sure the most strict request is honored and block for # no more time than that, so that whatever part(ies) need to service in # a deadline, will be honored if timeout != 0: with util.protect(WAITING_SESSIONS): for session, parms in dictitems(cls.waiting_sessions): if parms['timeout'] <= curtime: timeout = 0 # exit after one guaranteed pass break if (timeout is not None and timeout < parms['timeout'] - curtime): continue # timeout smaller than the current session # needs timeout = parms['timeout'] - curtime # set new timeout # value with util.protect(KEEPALIVE_SESSIONS): for session, parms in dictitems(cls.keepalive_sessions): if parms['timeout'] <= curtime: timeout = 0 break if (timeout is not None and timeout < parms['timeout'] - curtime): continue timeout = parms['timeout'] - curtime # If the loop above found no sessions wanting *and* the caller had no # timeout, exit function. In this case there is no way a session # could be waiting so we can always return 0 while cls.iterwaiters: waiter = cls.iterwaiters.pop() waiter({'success': True}) # cause a quick exit from the event loop iteration for calling code # to be able to reasonably set up for the next iteration before # a long select comes along if timeout is not None: timeout = 0 if timeout is None: return 0 if _poller(timeout=timeout): while sessionqueue: relsession = sessionqueue.popleft() relsession.process_pktqueue() sessionstodel = [] sessionstokeepalive = [] with util.protect(KEEPALIVE_SESSIONS): for session, parms in dictitems(cls.keepalive_sessions): # if the session is busy inside a command, defer invoking # keepalive until incommand is no longer the case if parms['timeout'] < curtime and not session._isincommand(): cls.keepalive_sessions[session]['timeout'] = \ _monotonic_time() + MAX_IDLE - (random.random() * 4.9) sessionstokeepalive.append(session) for session in sessionstokeepalive: session._keepalive() with util.protect(WAITING_SESSIONS): for session, parms in dictitems(cls.waiting_sessions): if parms['timeout'] < curtime: # timeout has expired, time to # give up on it and trigger timeout # response in the respective session # defer deletion until after loop sessionstodel.append(session) # to avoid confusing the for loop for session in sessionstodel: cls.waiting_sessions.pop(session, None) # one loop iteration to make sure recursion doesn't induce # redundant timeouts for session in sessionstodel: session._timedout() return len(cls.waiting_sessions)
python
def wait_for_rsp(cls, timeout=None, callout=True): """IPMI Session Event loop iteration This watches for any activity on IPMI handles and handles registered by register_handle_callback. Callers are satisfied in the order that packets return from network, not in the order of calling. :param timeout: Maximum time to wait for data to come across. If unspecified, will autodetect based on earliest timeout """ global iosockets # Assume: # Instance A sends request to packet B # Then Instance C sends request to BMC D # BMC D was faster, so data comes back before BMC B # Instance C gets to go ahead of Instance A, because # Instance C can get work done, but instance A cannot curtime = _monotonic_time() # There ar a number of parties that each has their own timeout # The caller can specify a deadline in timeout argument # each session with active outbound payload has callback to # handle retry/timout error # each session that is 'alive' wants to send a keepalive ever so often. # We want to make sure the most strict request is honored and block for # no more time than that, so that whatever part(ies) need to service in # a deadline, will be honored if timeout != 0: with util.protect(WAITING_SESSIONS): for session, parms in dictitems(cls.waiting_sessions): if parms['timeout'] <= curtime: timeout = 0 # exit after one guaranteed pass break if (timeout is not None and timeout < parms['timeout'] - curtime): continue # timeout smaller than the current session # needs timeout = parms['timeout'] - curtime # set new timeout # value with util.protect(KEEPALIVE_SESSIONS): for session, parms in dictitems(cls.keepalive_sessions): if parms['timeout'] <= curtime: timeout = 0 break if (timeout is not None and timeout < parms['timeout'] - curtime): continue timeout = parms['timeout'] - curtime # If the loop above found no sessions wanting *and* the caller had no # timeout, exit function. In this case there is no way a session # could be waiting so we can always return 0 while cls.iterwaiters: waiter = cls.iterwaiters.pop() waiter({'success': True}) # cause a quick exit from the event loop iteration for calling code # to be able to reasonably set up for the next iteration before # a long select comes along if timeout is not None: timeout = 0 if timeout is None: return 0 if _poller(timeout=timeout): while sessionqueue: relsession = sessionqueue.popleft() relsession.process_pktqueue() sessionstodel = [] sessionstokeepalive = [] with util.protect(KEEPALIVE_SESSIONS): for session, parms in dictitems(cls.keepalive_sessions): # if the session is busy inside a command, defer invoking # keepalive until incommand is no longer the case if parms['timeout'] < curtime and not session._isincommand(): cls.keepalive_sessions[session]['timeout'] = \ _monotonic_time() + MAX_IDLE - (random.random() * 4.9) sessionstokeepalive.append(session) for session in sessionstokeepalive: session._keepalive() with util.protect(WAITING_SESSIONS): for session, parms in dictitems(cls.waiting_sessions): if parms['timeout'] < curtime: # timeout has expired, time to # give up on it and trigger timeout # response in the respective session # defer deletion until after loop sessionstodel.append(session) # to avoid confusing the for loop for session in sessionstodel: cls.waiting_sessions.pop(session, None) # one loop iteration to make sure recursion doesn't induce # redundant timeouts for session in sessionstodel: session._timedout() return len(cls.waiting_sessions)
[ "def", "wait_for_rsp", "(", "cls", ",", "timeout", "=", "None", ",", "callout", "=", "True", ")", ":", "global", "iosockets", "# Assume:", "# Instance A sends request to packet B", "# Then Instance C sends request to BMC D", "# BMC D was faster, so data comes back before BMC B", "# Instance C gets to go ahead of Instance A, because", "# Instance C can get work done, but instance A cannot", "curtime", "=", "_monotonic_time", "(", ")", "# There ar a number of parties that each has their own timeout", "# The caller can specify a deadline in timeout argument", "# each session with active outbound payload has callback to", "# handle retry/timout error", "# each session that is 'alive' wants to send a keepalive ever so often.", "# We want to make sure the most strict request is honored and block for", "# no more time than that, so that whatever part(ies) need to service in", "# a deadline, will be honored", "if", "timeout", "!=", "0", ":", "with", "util", ".", "protect", "(", "WAITING_SESSIONS", ")", ":", "for", "session", ",", "parms", "in", "dictitems", "(", "cls", ".", "waiting_sessions", ")", ":", "if", "parms", "[", "'timeout'", "]", "<=", "curtime", ":", "timeout", "=", "0", "# exit after one guaranteed pass", "break", "if", "(", "timeout", "is", "not", "None", "and", "timeout", "<", "parms", "[", "'timeout'", "]", "-", "curtime", ")", ":", "continue", "# timeout smaller than the current session", "# needs", "timeout", "=", "parms", "[", "'timeout'", "]", "-", "curtime", "# set new timeout", "# value", "with", "util", ".", "protect", "(", "KEEPALIVE_SESSIONS", ")", ":", "for", "session", ",", "parms", "in", "dictitems", "(", "cls", ".", "keepalive_sessions", ")", ":", "if", "parms", "[", "'timeout'", "]", "<=", "curtime", ":", "timeout", "=", "0", "break", "if", "(", "timeout", "is", "not", "None", "and", "timeout", "<", "parms", "[", "'timeout'", "]", "-", "curtime", ")", ":", "continue", "timeout", "=", "parms", "[", "'timeout'", "]", "-", "curtime", "# If the loop above found no sessions wanting *and* the caller had no", "# timeout, exit function. In this case there is no way a session", "# could be waiting so we can always return 0", "while", "cls", ".", "iterwaiters", ":", "waiter", "=", "cls", ".", "iterwaiters", ".", "pop", "(", ")", "waiter", "(", "{", "'success'", ":", "True", "}", ")", "# cause a quick exit from the event loop iteration for calling code", "# to be able to reasonably set up for the next iteration before", "# a long select comes along", "if", "timeout", "is", "not", "None", ":", "timeout", "=", "0", "if", "timeout", "is", "None", ":", "return", "0", "if", "_poller", "(", "timeout", "=", "timeout", ")", ":", "while", "sessionqueue", ":", "relsession", "=", "sessionqueue", ".", "popleft", "(", ")", "relsession", ".", "process_pktqueue", "(", ")", "sessionstodel", "=", "[", "]", "sessionstokeepalive", "=", "[", "]", "with", "util", ".", "protect", "(", "KEEPALIVE_SESSIONS", ")", ":", "for", "session", ",", "parms", "in", "dictitems", "(", "cls", ".", "keepalive_sessions", ")", ":", "# if the session is busy inside a command, defer invoking", "# keepalive until incommand is no longer the case", "if", "parms", "[", "'timeout'", "]", "<", "curtime", "and", "not", "session", ".", "_isincommand", "(", ")", ":", "cls", ".", "keepalive_sessions", "[", "session", "]", "[", "'timeout'", "]", "=", "_monotonic_time", "(", ")", "+", "MAX_IDLE", "-", "(", "random", ".", "random", "(", ")", "*", "4.9", ")", "sessionstokeepalive", ".", "append", "(", "session", ")", "for", "session", "in", "sessionstokeepalive", ":", "session", ".", "_keepalive", "(", ")", "with", "util", ".", "protect", "(", "WAITING_SESSIONS", ")", ":", "for", "session", ",", "parms", "in", "dictitems", "(", "cls", ".", "waiting_sessions", ")", ":", "if", "parms", "[", "'timeout'", "]", "<", "curtime", ":", "# timeout has expired, time to", "# give up on it and trigger timeout", "# response in the respective session", "# defer deletion until after loop", "sessionstodel", ".", "append", "(", "session", ")", "# to avoid confusing the for loop", "for", "session", "in", "sessionstodel", ":", "cls", ".", "waiting_sessions", ".", "pop", "(", "session", ",", "None", ")", "# one loop iteration to make sure recursion doesn't induce", "# redundant timeouts", "for", "session", "in", "sessionstodel", ":", "session", ".", "_timedout", "(", ")", "return", "len", "(", "cls", ".", "waiting_sessions", ")" ]
IPMI Session Event loop iteration This watches for any activity on IPMI handles and handles registered by register_handle_callback. Callers are satisfied in the order that packets return from network, not in the order of calling. :param timeout: Maximum time to wait for data to come across. If unspecified, will autodetect based on earliest timeout
[ "IPMI", "Session", "Event", "loop", "iteration" ]
f710b1d30a8eed19a9e86f01f9351c737666f3e5
https://github.com/openstack/pyghmi/blob/f710b1d30a8eed19a9e86f01f9351c737666f3e5/pyghmi/ipmi/private/session.py#L1078-L1169
train
openstack/pyghmi
pyghmi/ipmi/private/session.py
Session.register_keepalive
def register_keepalive(self, cmd, callback): """Register custom keepalive IPMI command This is mostly intended for use by the console code. calling code would have an easier time just scheduling in their own threading scheme. Such a behavior would naturally cause the default keepalive to not occur anyway if the calling code is at least as aggressive about timing as pyghmi :param cmd: A dict of arguments to be passed into raw_command :param callback: A function to be called with results of the keepalive :returns: value to identify registration for unregister_keepalive """ regid = random.random() if self._customkeepalives is None: self._customkeepalives = {regid: (cmd, callback)} else: while regid in self._customkeepalives: regid = random.random() self._customkeepalives[regid] = (cmd, callback) return regid
python
def register_keepalive(self, cmd, callback): """Register custom keepalive IPMI command This is mostly intended for use by the console code. calling code would have an easier time just scheduling in their own threading scheme. Such a behavior would naturally cause the default keepalive to not occur anyway if the calling code is at least as aggressive about timing as pyghmi :param cmd: A dict of arguments to be passed into raw_command :param callback: A function to be called with results of the keepalive :returns: value to identify registration for unregister_keepalive """ regid = random.random() if self._customkeepalives is None: self._customkeepalives = {regid: (cmd, callback)} else: while regid in self._customkeepalives: regid = random.random() self._customkeepalives[regid] = (cmd, callback) return regid
[ "def", "register_keepalive", "(", "self", ",", "cmd", ",", "callback", ")", ":", "regid", "=", "random", ".", "random", "(", ")", "if", "self", ".", "_customkeepalives", "is", "None", ":", "self", ".", "_customkeepalives", "=", "{", "regid", ":", "(", "cmd", ",", "callback", ")", "}", "else", ":", "while", "regid", "in", "self", ".", "_customkeepalives", ":", "regid", "=", "random", ".", "random", "(", ")", "self", ".", "_customkeepalives", "[", "regid", "]", "=", "(", "cmd", ",", "callback", ")", "return", "regid" ]
Register custom keepalive IPMI command This is mostly intended for use by the console code. calling code would have an easier time just scheduling in their own threading scheme. Such a behavior would naturally cause the default keepalive to not occur anyway if the calling code is at least as aggressive about timing as pyghmi :param cmd: A dict of arguments to be passed into raw_command :param callback: A function to be called with results of the keepalive :returns: value to identify registration for unregister_keepalive
[ "Register", "custom", "keepalive", "IPMI", "command" ]
f710b1d30a8eed19a9e86f01f9351c737666f3e5
https://github.com/openstack/pyghmi/blob/f710b1d30a8eed19a9e86f01f9351c737666f3e5/pyghmi/ipmi/private/session.py#L1171-L1191
train
openstack/pyghmi
pyghmi/ipmi/private/session.py
Session._keepalive
def _keepalive(self): """Performs a keepalive to avoid idle disconnect """ try: keptalive = False if self._customkeepalives: kaids = list(self._customkeepalives.keys()) for keepalive in kaids: try: cmd, callback = self._customkeepalives[keepalive] except TypeError: # raw_command made customkeepalives None break except KeyError: # raw command ultimately caused a keepalive to # deregister continue if callable(cmd): cmd() continue keptalive = True cmd['callback'] = self._keepalive_wrapper(callback) self.raw_command(**cmd) if not keptalive: if self.incommand: # if currently in command, no cause to keepalive return self.raw_command(netfn=6, command=1, callback=self._keepalive_wrapper(None)) except exc.IpmiException: self._mark_broken()
python
def _keepalive(self): """Performs a keepalive to avoid idle disconnect """ try: keptalive = False if self._customkeepalives: kaids = list(self._customkeepalives.keys()) for keepalive in kaids: try: cmd, callback = self._customkeepalives[keepalive] except TypeError: # raw_command made customkeepalives None break except KeyError: # raw command ultimately caused a keepalive to # deregister continue if callable(cmd): cmd() continue keptalive = True cmd['callback'] = self._keepalive_wrapper(callback) self.raw_command(**cmd) if not keptalive: if self.incommand: # if currently in command, no cause to keepalive return self.raw_command(netfn=6, command=1, callback=self._keepalive_wrapper(None)) except exc.IpmiException: self._mark_broken()
[ "def", "_keepalive", "(", "self", ")", ":", "try", ":", "keptalive", "=", "False", "if", "self", ".", "_customkeepalives", ":", "kaids", "=", "list", "(", "self", ".", "_customkeepalives", ".", "keys", "(", ")", ")", "for", "keepalive", "in", "kaids", ":", "try", ":", "cmd", ",", "callback", "=", "self", ".", "_customkeepalives", "[", "keepalive", "]", "except", "TypeError", ":", "# raw_command made customkeepalives None", "break", "except", "KeyError", ":", "# raw command ultimately caused a keepalive to", "# deregister", "continue", "if", "callable", "(", "cmd", ")", ":", "cmd", "(", ")", "continue", "keptalive", "=", "True", "cmd", "[", "'callback'", "]", "=", "self", ".", "_keepalive_wrapper", "(", "callback", ")", "self", ".", "raw_command", "(", "*", "*", "cmd", ")", "if", "not", "keptalive", ":", "if", "self", ".", "incommand", ":", "# if currently in command, no cause to keepalive", "return", "self", ".", "raw_command", "(", "netfn", "=", "6", ",", "command", "=", "1", ",", "callback", "=", "self", ".", "_keepalive_wrapper", "(", "None", ")", ")", "except", "exc", ".", "IpmiException", ":", "self", ".", "_mark_broken", "(", ")" ]
Performs a keepalive to avoid idle disconnect
[ "Performs", "a", "keepalive", "to", "avoid", "idle", "disconnect" ]
f710b1d30a8eed19a9e86f01f9351c737666f3e5
https://github.com/openstack/pyghmi/blob/f710b1d30a8eed19a9e86f01f9351c737666f3e5/pyghmi/ipmi/private/session.py#L1215-L1245
train
openstack/pyghmi
pyghmi/util/webclient.py
SecureHTTPConnection.download
def download(self, url, file): """Download a file to filename or file object """ if isinstance(file, str) or isinstance(file, unicode): file = open(file, 'wb') webclient = self.dupe() webclient.request('GET', url) rsp = webclient.getresponse() self._currdl = rsp self._dlfile = file for chunk in iter(lambda: rsp.read(16384), ''): file.write(chunk) self._currdl = None file.close()
python
def download(self, url, file): """Download a file to filename or file object """ if isinstance(file, str) or isinstance(file, unicode): file = open(file, 'wb') webclient = self.dupe() webclient.request('GET', url) rsp = webclient.getresponse() self._currdl = rsp self._dlfile = file for chunk in iter(lambda: rsp.read(16384), ''): file.write(chunk) self._currdl = None file.close()
[ "def", "download", "(", "self", ",", "url", ",", "file", ")", ":", "if", "isinstance", "(", "file", ",", "str", ")", "or", "isinstance", "(", "file", ",", "unicode", ")", ":", "file", "=", "open", "(", "file", ",", "'wb'", ")", "webclient", "=", "self", ".", "dupe", "(", ")", "webclient", ".", "request", "(", "'GET'", ",", "url", ")", "rsp", "=", "webclient", ".", "getresponse", "(", ")", "self", ".", "_currdl", "=", "rsp", "self", ".", "_dlfile", "=", "file", "for", "chunk", "in", "iter", "(", "lambda", ":", "rsp", ".", "read", "(", "16384", ")", ",", "''", ")", ":", "file", ".", "write", "(", "chunk", ")", "self", ".", "_currdl", "=", "None", "file", ".", "close", "(", ")" ]
Download a file to filename or file object
[ "Download", "a", "file", "to", "filename", "or", "file", "object" ]
f710b1d30a8eed19a9e86f01f9351c737666f3e5
https://github.com/openstack/pyghmi/blob/f710b1d30a8eed19a9e86f01f9351c737666f3e5/pyghmi/util/webclient.py#L202-L216
train
openstack/pyghmi
pyghmi/util/webclient.py
SecureHTTPConnection.upload
def upload(self, url, filename, data=None, formname=None, otherfields=()): """Upload a file to the url :param url: :param filename: The name of the file :param data: A file object or data to use rather than reading from the file. :return: """ if data is None: data = open(filename, 'rb') self._upbuffer = StringIO.StringIO(get_upload_form(filename, data, formname, otherfields)) ulheaders = self.stdheaders.copy() ulheaders['Content-Type'] = 'multipart/form-data; boundary=' + BND ulheaders['Content-Length'] = len(uploadforms[filename]) self.ulsize = len(uploadforms[filename]) webclient = self.dupe() webclient.request('POST', url, self._upbuffer, ulheaders) rsp = webclient.getresponse() # peer updates in progress should already have pointers, # subsequent transactions will cause memory to needlessly double, # but easiest way to keep memory relatively low try: del uploadforms[filename] except KeyError: # something could have already deleted it pass self.rspstatus = rsp.status if rsp.status != 200: raise Exception('Unexpected response in file upload: ' + rsp.read()) return rsp.read()
python
def upload(self, url, filename, data=None, formname=None, otherfields=()): """Upload a file to the url :param url: :param filename: The name of the file :param data: A file object or data to use rather than reading from the file. :return: """ if data is None: data = open(filename, 'rb') self._upbuffer = StringIO.StringIO(get_upload_form(filename, data, formname, otherfields)) ulheaders = self.stdheaders.copy() ulheaders['Content-Type'] = 'multipart/form-data; boundary=' + BND ulheaders['Content-Length'] = len(uploadforms[filename]) self.ulsize = len(uploadforms[filename]) webclient = self.dupe() webclient.request('POST', url, self._upbuffer, ulheaders) rsp = webclient.getresponse() # peer updates in progress should already have pointers, # subsequent transactions will cause memory to needlessly double, # but easiest way to keep memory relatively low try: del uploadforms[filename] except KeyError: # something could have already deleted it pass self.rspstatus = rsp.status if rsp.status != 200: raise Exception('Unexpected response in file upload: ' + rsp.read()) return rsp.read()
[ "def", "upload", "(", "self", ",", "url", ",", "filename", ",", "data", "=", "None", ",", "formname", "=", "None", ",", "otherfields", "=", "(", ")", ")", ":", "if", "data", "is", "None", ":", "data", "=", "open", "(", "filename", ",", "'rb'", ")", "self", ".", "_upbuffer", "=", "StringIO", ".", "StringIO", "(", "get_upload_form", "(", "filename", ",", "data", ",", "formname", ",", "otherfields", ")", ")", "ulheaders", "=", "self", ".", "stdheaders", ".", "copy", "(", ")", "ulheaders", "[", "'Content-Type'", "]", "=", "'multipart/form-data; boundary='", "+", "BND", "ulheaders", "[", "'Content-Length'", "]", "=", "len", "(", "uploadforms", "[", "filename", "]", ")", "self", ".", "ulsize", "=", "len", "(", "uploadforms", "[", "filename", "]", ")", "webclient", "=", "self", ".", "dupe", "(", ")", "webclient", ".", "request", "(", "'POST'", ",", "url", ",", "self", ".", "_upbuffer", ",", "ulheaders", ")", "rsp", "=", "webclient", ".", "getresponse", "(", ")", "# peer updates in progress should already have pointers,", "# subsequent transactions will cause memory to needlessly double,", "# but easiest way to keep memory relatively low", "try", ":", "del", "uploadforms", "[", "filename", "]", "except", "KeyError", ":", "# something could have already deleted it", "pass", "self", ".", "rspstatus", "=", "rsp", ".", "status", "if", "rsp", ".", "status", "!=", "200", ":", "raise", "Exception", "(", "'Unexpected response in file upload: '", "+", "rsp", ".", "read", "(", ")", ")", "return", "rsp", ".", "read", "(", ")" ]
Upload a file to the url :param url: :param filename: The name of the file :param data: A file object or data to use rather than reading from the file. :return:
[ "Upload", "a", "file", "to", "the", "url" ]
f710b1d30a8eed19a9e86f01f9351c737666f3e5
https://github.com/openstack/pyghmi/blob/f710b1d30a8eed19a9e86f01f9351c737666f3e5/pyghmi/util/webclient.py#L224-L257
train
openstack/pyghmi
pyghmi/ipmi/oem/lenovo/inventory.py
parse_inventory_category_entry
def parse_inventory_category_entry(raw, fields): """Parses one entry in an inventory category. :param raw: the raw data to the entry. May contain more than one entry, only one entry will be read in that case. :param fields: an iterable of EntryField objects to be used for parsing the entry. :returns: dict -- a tuple with the number of bytes read and a dictionary representing the entry. """ r = raw obj = {} bytes_read = 0 discard = False for field in fields: value = struct.unpack_from(field.fmt, r)[0] read = struct.calcsize(field.fmt) bytes_read += read r = r[read:] # If this entry is not actually present, just parse and then discard it if field.presence and not bool(value): discard = True if not field.include: continue if (field.fmt[-1] == "s"): value = value.rstrip("\x00") if (field.mapper and value in field.mapper): value = field.mapper[value] if (field.valuefunc): value = field.valuefunc(value) if not field.multivaluefunc: obj[field.name] = value else: for key in value: obj[key] = value[key] if discard: obj = None return bytes_read, obj
python
def parse_inventory_category_entry(raw, fields): """Parses one entry in an inventory category. :param raw: the raw data to the entry. May contain more than one entry, only one entry will be read in that case. :param fields: an iterable of EntryField objects to be used for parsing the entry. :returns: dict -- a tuple with the number of bytes read and a dictionary representing the entry. """ r = raw obj = {} bytes_read = 0 discard = False for field in fields: value = struct.unpack_from(field.fmt, r)[0] read = struct.calcsize(field.fmt) bytes_read += read r = r[read:] # If this entry is not actually present, just parse and then discard it if field.presence and not bool(value): discard = True if not field.include: continue if (field.fmt[-1] == "s"): value = value.rstrip("\x00") if (field.mapper and value in field.mapper): value = field.mapper[value] if (field.valuefunc): value = field.valuefunc(value) if not field.multivaluefunc: obj[field.name] = value else: for key in value: obj[key] = value[key] if discard: obj = None return bytes_read, obj
[ "def", "parse_inventory_category_entry", "(", "raw", ",", "fields", ")", ":", "r", "=", "raw", "obj", "=", "{", "}", "bytes_read", "=", "0", "discard", "=", "False", "for", "field", "in", "fields", ":", "value", "=", "struct", ".", "unpack_from", "(", "field", ".", "fmt", ",", "r", ")", "[", "0", "]", "read", "=", "struct", ".", "calcsize", "(", "field", ".", "fmt", ")", "bytes_read", "+=", "read", "r", "=", "r", "[", "read", ":", "]", "# If this entry is not actually present, just parse and then discard it", "if", "field", ".", "presence", "and", "not", "bool", "(", "value", ")", ":", "discard", "=", "True", "if", "not", "field", ".", "include", ":", "continue", "if", "(", "field", ".", "fmt", "[", "-", "1", "]", "==", "\"s\"", ")", ":", "value", "=", "value", ".", "rstrip", "(", "\"\\x00\"", ")", "if", "(", "field", ".", "mapper", "and", "value", "in", "field", ".", "mapper", ")", ":", "value", "=", "field", ".", "mapper", "[", "value", "]", "if", "(", "field", ".", "valuefunc", ")", ":", "value", "=", "field", ".", "valuefunc", "(", "value", ")", "if", "not", "field", ".", "multivaluefunc", ":", "obj", "[", "field", ".", "name", "]", "=", "value", "else", ":", "for", "key", "in", "value", ":", "obj", "[", "key", "]", "=", "value", "[", "key", "]", "if", "discard", ":", "obj", "=", "None", "return", "bytes_read", ",", "obj" ]
Parses one entry in an inventory category. :param raw: the raw data to the entry. May contain more than one entry, only one entry will be read in that case. :param fields: an iterable of EntryField objects to be used for parsing the entry. :returns: dict -- a tuple with the number of bytes read and a dictionary representing the entry.
[ "Parses", "one", "entry", "in", "an", "inventory", "category", "." ]
f710b1d30a8eed19a9e86f01f9351c737666f3e5
https://github.com/openstack/pyghmi/blob/f710b1d30a8eed19a9e86f01f9351c737666f3e5/pyghmi/ipmi/oem/lenovo/inventory.py#L105-L147
train
openstack/pyghmi
pyghmi/ipmi/private/serversession.py
IpmiServer.sessionless_data
def sessionless_data(self, data, sockaddr): """Examines unsolocited packet and decides appropriate action. For a listening IpmiServer, a packet without an active session comes here for examination. If it is something that is utterly sessionless (e.g. get channel authentication), send the appropriate response. If it is a get session challenge or open rmcp+ request, spawn a session to handle the context. """ if len(data) < 22: return data = bytearray(data) if not (data[0] == 6 and data[2:4] == b'\xff\x07'): # not ipmi return if data[4] == 6: # ipmi 2 payload... payloadtype = data[5] if payloadtype not in (0, 16): return if payloadtype == 16: # new session to handle conversation ServerSession(self.authdata, self.kg, sockaddr, self.serversocket, data[16:], self.uuid, bmc=self) return # ditch two byte, because ipmi2 header is two # bytes longer than ipmi1 (payload type added, payload length 2). data = data[2:] myaddr, netfnlun = struct.unpack('2B', bytes(data[14:16])) netfn = (netfnlun & 0b11111100) >> 2 mylun = netfnlun & 0b11 if netfn == 6: # application request if data[19] == 0x38: # cmd = get channel auth capabilities verchannel, level = struct.unpack('2B', bytes(data[20:22])) version = verchannel & 0b10000000 if version != 0b10000000: return channel = verchannel & 0b1111 if channel != 0xe: return (clientaddr, clientlun) = struct.unpack( 'BB', bytes(data[17:19])) clientseq = clientlun >> 2 clientlun &= 0b11 # Lun is only the least significant bits level &= 0b1111 self.send_auth_cap(myaddr, mylun, clientaddr, clientlun, clientseq, sockaddr)
python
def sessionless_data(self, data, sockaddr): """Examines unsolocited packet and decides appropriate action. For a listening IpmiServer, a packet without an active session comes here for examination. If it is something that is utterly sessionless (e.g. get channel authentication), send the appropriate response. If it is a get session challenge or open rmcp+ request, spawn a session to handle the context. """ if len(data) < 22: return data = bytearray(data) if not (data[0] == 6 and data[2:4] == b'\xff\x07'): # not ipmi return if data[4] == 6: # ipmi 2 payload... payloadtype = data[5] if payloadtype not in (0, 16): return if payloadtype == 16: # new session to handle conversation ServerSession(self.authdata, self.kg, sockaddr, self.serversocket, data[16:], self.uuid, bmc=self) return # ditch two byte, because ipmi2 header is two # bytes longer than ipmi1 (payload type added, payload length 2). data = data[2:] myaddr, netfnlun = struct.unpack('2B', bytes(data[14:16])) netfn = (netfnlun & 0b11111100) >> 2 mylun = netfnlun & 0b11 if netfn == 6: # application request if data[19] == 0x38: # cmd = get channel auth capabilities verchannel, level = struct.unpack('2B', bytes(data[20:22])) version = verchannel & 0b10000000 if version != 0b10000000: return channel = verchannel & 0b1111 if channel != 0xe: return (clientaddr, clientlun) = struct.unpack( 'BB', bytes(data[17:19])) clientseq = clientlun >> 2 clientlun &= 0b11 # Lun is only the least significant bits level &= 0b1111 self.send_auth_cap(myaddr, mylun, clientaddr, clientlun, clientseq, sockaddr)
[ "def", "sessionless_data", "(", "self", ",", "data", ",", "sockaddr", ")", ":", "if", "len", "(", "data", ")", "<", "22", ":", "return", "data", "=", "bytearray", "(", "data", ")", "if", "not", "(", "data", "[", "0", "]", "==", "6", "and", "data", "[", "2", ":", "4", "]", "==", "b'\\xff\\x07'", ")", ":", "# not ipmi", "return", "if", "data", "[", "4", "]", "==", "6", ":", "# ipmi 2 payload...", "payloadtype", "=", "data", "[", "5", "]", "if", "payloadtype", "not", "in", "(", "0", ",", "16", ")", ":", "return", "if", "payloadtype", "==", "16", ":", "# new session to handle conversation", "ServerSession", "(", "self", ".", "authdata", ",", "self", ".", "kg", ",", "sockaddr", ",", "self", ".", "serversocket", ",", "data", "[", "16", ":", "]", ",", "self", ".", "uuid", ",", "bmc", "=", "self", ")", "return", "# ditch two byte, because ipmi2 header is two", "# bytes longer than ipmi1 (payload type added, payload length 2).", "data", "=", "data", "[", "2", ":", "]", "myaddr", ",", "netfnlun", "=", "struct", ".", "unpack", "(", "'2B'", ",", "bytes", "(", "data", "[", "14", ":", "16", "]", ")", ")", "netfn", "=", "(", "netfnlun", "&", "0b11111100", ")", ">>", "2", "mylun", "=", "netfnlun", "&", "0b11", "if", "netfn", "==", "6", ":", "# application request", "if", "data", "[", "19", "]", "==", "0x38", ":", "# cmd = get channel auth capabilities", "verchannel", ",", "level", "=", "struct", ".", "unpack", "(", "'2B'", ",", "bytes", "(", "data", "[", "20", ":", "22", "]", ")", ")", "version", "=", "verchannel", "&", "0b10000000", "if", "version", "!=", "0b10000000", ":", "return", "channel", "=", "verchannel", "&", "0b1111", "if", "channel", "!=", "0xe", ":", "return", "(", "clientaddr", ",", "clientlun", ")", "=", "struct", ".", "unpack", "(", "'BB'", ",", "bytes", "(", "data", "[", "17", ":", "19", "]", ")", ")", "clientseq", "=", "clientlun", ">>", "2", "clientlun", "&=", "0b11", "# Lun is only the least significant bits", "level", "&=", "0b1111", "self", ".", "send_auth_cap", "(", "myaddr", ",", "mylun", ",", "clientaddr", ",", "clientlun", ",", "clientseq", ",", "sockaddr", ")" ]
Examines unsolocited packet and decides appropriate action. For a listening IpmiServer, a packet without an active session comes here for examination. If it is something that is utterly sessionless (e.g. get channel authentication), send the appropriate response. If it is a get session challenge or open rmcp+ request, spawn a session to handle the context.
[ "Examines", "unsolocited", "packet", "and", "decides", "appropriate", "action", "." ]
f710b1d30a8eed19a9e86f01f9351c737666f3e5
https://github.com/openstack/pyghmi/blob/f710b1d30a8eed19a9e86f01f9351c737666f3e5/pyghmi/ipmi/private/serversession.py#L297-L341
train
openstack/pyghmi
pyghmi/ipmi/private/serversession.py
IpmiServer.set_kg
def set_kg(self, kg): """Sets the Kg for the BMC to use In RAKP, Kg is a BMC-specific integrity key that can be set. If not set, Kuid is used for the integrity key """ try: self.kg = kg.encode('utf-8') except AttributeError: self.kg = kg
python
def set_kg(self, kg): """Sets the Kg for the BMC to use In RAKP, Kg is a BMC-specific integrity key that can be set. If not set, Kuid is used for the integrity key """ try: self.kg = kg.encode('utf-8') except AttributeError: self.kg = kg
[ "def", "set_kg", "(", "self", ",", "kg", ")", ":", "try", ":", "self", ".", "kg", "=", "kg", ".", "encode", "(", "'utf-8'", ")", "except", "AttributeError", ":", "self", ".", "kg", "=", "kg" ]
Sets the Kg for the BMC to use In RAKP, Kg is a BMC-specific integrity key that can be set. If not set, Kuid is used for the integrity key
[ "Sets", "the", "Kg", "for", "the", "BMC", "to", "use" ]
f710b1d30a8eed19a9e86f01f9351c737666f3e5
https://github.com/openstack/pyghmi/blob/f710b1d30a8eed19a9e86f01f9351c737666f3e5/pyghmi/ipmi/private/serversession.py#L343-L352
train
astraw/stdeb
stdeb/util.py
source_debianize_name
def source_debianize_name(name): "make name acceptable as a Debian source package name" name = name.replace('_','-') name = name.replace('.','-') name = name.lower() return name
python
def source_debianize_name(name): "make name acceptable as a Debian source package name" name = name.replace('_','-') name = name.replace('.','-') name = name.lower() return name
[ "def", "source_debianize_name", "(", "name", ")", ":", "name", "=", "name", ".", "replace", "(", "'_'", ",", "'-'", ")", "name", "=", "name", ".", "replace", "(", "'.'", ",", "'-'", ")", "name", "=", "name", ".", "lower", "(", ")", "return", "name" ]
make name acceptable as a Debian source package name
[ "make", "name", "acceptable", "as", "a", "Debian", "source", "package", "name" ]
493ab88e8a60be053b1baef81fb39b45e17ceef5
https://github.com/astraw/stdeb/blob/493ab88e8a60be053b1baef81fb39b45e17ceef5/stdeb/util.py#L220-L225
train
astraw/stdeb
stdeb/util.py
get_date_822
def get_date_822(): """return output of 822-date command""" cmd = '/bin/date' if not os.path.exists(cmd): raise ValueError('%s command does not exist.'%cmd) args = [cmd,'-R'] result = get_cmd_stdout(args).strip() result = normstr(result) return result
python
def get_date_822(): """return output of 822-date command""" cmd = '/bin/date' if not os.path.exists(cmd): raise ValueError('%s command does not exist.'%cmd) args = [cmd,'-R'] result = get_cmd_stdout(args).strip() result = normstr(result) return result
[ "def", "get_date_822", "(", ")", ":", "cmd", "=", "'/bin/date'", "if", "not", "os", ".", "path", ".", "exists", "(", "cmd", ")", ":", "raise", "ValueError", "(", "'%s command does not exist.'", "%", "cmd", ")", "args", "=", "[", "cmd", ",", "'-R'", "]", "result", "=", "get_cmd_stdout", "(", "args", ")", ".", "strip", "(", ")", "result", "=", "normstr", "(", "result", ")", "return", "result" ]
return output of 822-date command
[ "return", "output", "of", "822", "-", "date", "command" ]
493ab88e8a60be053b1baef81fb39b45e17ceef5
https://github.com/astraw/stdeb/blob/493ab88e8a60be053b1baef81fb39b45e17ceef5/stdeb/util.py#L261-L269
train
astraw/stdeb
stdeb/util.py
make_tarball
def make_tarball(tarball_fname,directory,cwd=None): "create a tarball from a directory" if tarball_fname.endswith('.gz'): opts = 'czf' else: opts = 'cf' args = ['/bin/tar',opts,tarball_fname,directory] process_command(args, cwd=cwd)
python
def make_tarball(tarball_fname,directory,cwd=None): "create a tarball from a directory" if tarball_fname.endswith('.gz'): opts = 'czf' else: opts = 'cf' args = ['/bin/tar',opts,tarball_fname,directory] process_command(args, cwd=cwd)
[ "def", "make_tarball", "(", "tarball_fname", ",", "directory", ",", "cwd", "=", "None", ")", ":", "if", "tarball_fname", ".", "endswith", "(", "'.gz'", ")", ":", "opts", "=", "'czf'", "else", ":", "opts", "=", "'cf'", "args", "=", "[", "'/bin/tar'", ",", "opts", ",", "tarball_fname", ",", "directory", "]", "process_command", "(", "args", ",", "cwd", "=", "cwd", ")" ]
create a tarball from a directory
[ "create", "a", "tarball", "from", "a", "directory" ]
493ab88e8a60be053b1baef81fb39b45e17ceef5
https://github.com/astraw/stdeb/blob/493ab88e8a60be053b1baef81fb39b45e17ceef5/stdeb/util.py#L458-L463
train
astraw/stdeb
stdeb/util.py
expand_tarball
def expand_tarball(tarball_fname,cwd=None): "expand a tarball" if tarball_fname.endswith('.gz'): opts = 'xzf' elif tarball_fname.endswith('.bz2'): opts = 'xjf' else: opts = 'xf' args = ['/bin/tar',opts,tarball_fname] process_command(args, cwd=cwd)
python
def expand_tarball(tarball_fname,cwd=None): "expand a tarball" if tarball_fname.endswith('.gz'): opts = 'xzf' elif tarball_fname.endswith('.bz2'): opts = 'xjf' else: opts = 'xf' args = ['/bin/tar',opts,tarball_fname] process_command(args, cwd=cwd)
[ "def", "expand_tarball", "(", "tarball_fname", ",", "cwd", "=", "None", ")", ":", "if", "tarball_fname", ".", "endswith", "(", "'.gz'", ")", ":", "opts", "=", "'xzf'", "elif", "tarball_fname", ".", "endswith", "(", "'.bz2'", ")", ":", "opts", "=", "'xjf'", "else", ":", "opts", "=", "'xf'", "args", "=", "[", "'/bin/tar'", ",", "opts", ",", "tarball_fname", "]", "process_command", "(", "args", ",", "cwd", "=", "cwd", ")" ]
expand a tarball
[ "expand", "a", "tarball" ]
493ab88e8a60be053b1baef81fb39b45e17ceef5
https://github.com/astraw/stdeb/blob/493ab88e8a60be053b1baef81fb39b45e17ceef5/stdeb/util.py#L466-L472
train
astraw/stdeb
stdeb/util.py
expand_zip
def expand_zip(zip_fname,cwd=None): "expand a zip" unzip_path = '/usr/bin/unzip' if not os.path.exists(unzip_path): log.error('ERROR: {} does not exist'.format(unzip_path)) sys.exit(1) args = [unzip_path, zip_fname] # Does it have a top dir res = subprocess.Popen( [args[0], '-l', args[1]], cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) contents = [] for line in res.stdout.readlines()[3:-2]: contents.append(line.split()[-1]) commonprefix = os.path.commonprefix(contents) if not commonprefix: extdir = os.path.join(cwd, os.path.basename(zip_fname[:-4])) args.extend(['-d', os.path.abspath(extdir)]) process_command(args, cwd=cwd)
python
def expand_zip(zip_fname,cwd=None): "expand a zip" unzip_path = '/usr/bin/unzip' if not os.path.exists(unzip_path): log.error('ERROR: {} does not exist'.format(unzip_path)) sys.exit(1) args = [unzip_path, zip_fname] # Does it have a top dir res = subprocess.Popen( [args[0], '-l', args[1]], cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) contents = [] for line in res.stdout.readlines()[3:-2]: contents.append(line.split()[-1]) commonprefix = os.path.commonprefix(contents) if not commonprefix: extdir = os.path.join(cwd, os.path.basename(zip_fname[:-4])) args.extend(['-d', os.path.abspath(extdir)]) process_command(args, cwd=cwd)
[ "def", "expand_zip", "(", "zip_fname", ",", "cwd", "=", "None", ")", ":", "unzip_path", "=", "'/usr/bin/unzip'", "if", "not", "os", ".", "path", ".", "exists", "(", "unzip_path", ")", ":", "log", ".", "error", "(", "'ERROR: {} does not exist'", ".", "format", "(", "unzip_path", ")", ")", "sys", ".", "exit", "(", "1", ")", "args", "=", "[", "unzip_path", ",", "zip_fname", "]", "# Does it have a top dir", "res", "=", "subprocess", ".", "Popen", "(", "[", "args", "[", "0", "]", ",", "'-l'", ",", "args", "[", "1", "]", "]", ",", "cwd", "=", "cwd", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "PIPE", ",", ")", "contents", "=", "[", "]", "for", "line", "in", "res", ".", "stdout", ".", "readlines", "(", ")", "[", "3", ":", "-", "2", "]", ":", "contents", ".", "append", "(", "line", ".", "split", "(", ")", "[", "-", "1", "]", ")", "commonprefix", "=", "os", ".", "path", ".", "commonprefix", "(", "contents", ")", "if", "not", "commonprefix", ":", "extdir", "=", "os", ".", "path", ".", "join", "(", "cwd", ",", "os", ".", "path", ".", "basename", "(", "zip_fname", "[", ":", "-", "4", "]", ")", ")", "args", ".", "extend", "(", "[", "'-d'", ",", "os", ".", "path", ".", "abspath", "(", "extdir", ")", "]", ")", "process_command", "(", "args", ",", "cwd", "=", "cwd", ")" ]
expand a zip
[ "expand", "a", "zip" ]
493ab88e8a60be053b1baef81fb39b45e17ceef5
https://github.com/astraw/stdeb/blob/493ab88e8a60be053b1baef81fb39b45e17ceef5/stdeb/util.py#L475-L496
train
astraw/stdeb
stdeb/util.py
parse_vals
def parse_vals(cfg,section,option): """parse comma separated values in debian control file style from .cfg""" try: vals = cfg.get(section,option) except ConfigParser.NoSectionError as err: if section != 'DEFAULT': vals = cfg.get('DEFAULT',option) else: raise err vals = vals.split('#')[0] vals = vals.strip() vals = vals.split(',') vals = [v.strip() for v in vals] vals = [v for v in vals if len(v)] return vals
python
def parse_vals(cfg,section,option): """parse comma separated values in debian control file style from .cfg""" try: vals = cfg.get(section,option) except ConfigParser.NoSectionError as err: if section != 'DEFAULT': vals = cfg.get('DEFAULT',option) else: raise err vals = vals.split('#')[0] vals = vals.strip() vals = vals.split(',') vals = [v.strip() for v in vals] vals = [v for v in vals if len(v)] return vals
[ "def", "parse_vals", "(", "cfg", ",", "section", ",", "option", ")", ":", "try", ":", "vals", "=", "cfg", ".", "get", "(", "section", ",", "option", ")", "except", "ConfigParser", ".", "NoSectionError", "as", "err", ":", "if", "section", "!=", "'DEFAULT'", ":", "vals", "=", "cfg", ".", "get", "(", "'DEFAULT'", ",", "option", ")", "else", ":", "raise", "err", "vals", "=", "vals", ".", "split", "(", "'#'", ")", "[", "0", "]", "vals", "=", "vals", ".", "strip", "(", ")", "vals", "=", "vals", ".", "split", "(", "','", ")", "vals", "=", "[", "v", ".", "strip", "(", ")", "for", "v", "in", "vals", "]", "vals", "=", "[", "v", "for", "v", "in", "vals", "if", "len", "(", "v", ")", "]", "return", "vals" ]
parse comma separated values in debian control file style from .cfg
[ "parse", "comma", "separated", "values", "in", "debian", "control", "file", "style", "from", ".", "cfg" ]
493ab88e8a60be053b1baef81fb39b45e17ceef5
https://github.com/astraw/stdeb/blob/493ab88e8a60be053b1baef81fb39b45e17ceef5/stdeb/util.py#L595-L609
train