body_hash
stringlengths 64
64
| body
stringlengths 23
109k
| docstring
stringlengths 1
57k
| path
stringlengths 4
198
| name
stringlengths 1
115
| repository_name
stringlengths 7
111
| repository_stars
float64 0
191k
| lang
stringclasses 1
value | body_without_docstring
stringlengths 14
108k
| unified
stringlengths 45
133k
|
---|---|---|---|---|---|---|---|---|---|
0610bcfb1cc684b289e10e1eaa2b10fe536f7a82c20d131093a3e011c797a40c | @property
def matrix(self) -> numpy.ndarray:
'Getter on the unitary matrix representing the circuit.\n\n Depending on the value of `cache_matrix` given at initialisation, this\n method will either return the cached matrix or compute it.\n\n :return: the unitary matrix representing the current quantum circuit.\n '
if self._cache_matrix:
return self._matrix
ret = numpy.identity((2 ** self._qubit_number))
for operation in self.operations:
ret = (ret @ operation.matrix(self._qubit_number))
return ret | Getter on the unitary matrix representing the circuit.
Depending on the value of `cache_matrix` given at initialisation, this
method will either return the cached matrix or compute it.
:return: the unitary matrix representing the current quantum circuit. | qtoolkit/data_structures/quantum_circuit/quantum_circuit.py | matrix | nelimee/qtoolkit | 3 | python | @property
def matrix(self) -> numpy.ndarray:
'Getter on the unitary matrix representing the circuit.\n\n Depending on the value of `cache_matrix` given at initialisation, this\n method will either return the cached matrix or compute it.\n\n :return: the unitary matrix representing the current quantum circuit.\n '
if self._cache_matrix:
return self._matrix
ret = numpy.identity((2 ** self._qubit_number))
for operation in self.operations:
ret = (ret @ operation.matrix(self._qubit_number))
return ret | @property
def matrix(self) -> numpy.ndarray:
'Getter on the unitary matrix representing the circuit.\n\n Depending on the value of `cache_matrix` given at initialisation, this\n method will either return the cached matrix or compute it.\n\n :return: the unitary matrix representing the current quantum circuit.\n '
if self._cache_matrix:
return self._matrix
ret = numpy.identity((2 ** self._qubit_number))
for operation in self.operations:
ret = (ret @ operation.matrix(self._qubit_number))
return ret<|docstring|>Getter on the unitary matrix representing the circuit.
Depending on the value of `cache_matrix` given at initialisation, this
method will either return the cached matrix or compute it.
:return: the unitary matrix representing the current quantum circuit.<|endoftext|> |
0257b4863f2190a7a13a5edda80c49c3f6f539cf5d0e365a86adece525b62d9d | @property
def qubit_number(self) -> int:
'Getter on the number of qubits of the current instance.'
return self._qubit_number | Getter on the number of qubits of the current instance. | qtoolkit/data_structures/quantum_circuit/quantum_circuit.py | qubit_number | nelimee/qtoolkit | 3 | python | @property
def qubit_number(self) -> int:
return self._qubit_number | @property
def qubit_number(self) -> int:
return self._qubit_number<|docstring|>Getter on the number of qubits of the current instance.<|endoftext|> |
e9b95f9c26dcc027950d32139d08111682017468b6e32d798805a893a31dcc55 | @property
def size(self) -> int:
'Getter on the number of quantum gates in the current instance.'
return (self._node_counter - self._qubit_number) | Getter on the number of quantum gates in the current instance. | qtoolkit/data_structures/quantum_circuit/quantum_circuit.py | size | nelimee/qtoolkit | 3 | python | @property
def size(self) -> int:
return (self._node_counter - self._qubit_number) | @property
def size(self) -> int:
return (self._node_counter - self._qubit_number)<|docstring|>Getter on the number of quantum gates in the current instance.<|endoftext|> |
774733d52b48eea0c433be9100b21e5a22a68f1cd1010c1c3ad54348ff904506 | def __iadd__(self, other: 'QuantumCircuit') -> 'QuantumCircuit':
'Add all the operations contained in `other` to the current instance.\n\n :param other: the quantum circuit containing the operations to append\n to the current instance. `other` and the instance\n :py:meth:`~.__iadd__` is called on should have the same number of\n qubits.\n :return: The union of self and other.\n :raise RuntimeError: if `self` and `other` have a different number of\n qubits.\n '
if (self.qubit_number != other.qubit_number):
raise RuntimeError(f'The number of qubits of the first circuit ({self.qubit_number}) does not match the number of qubits of the second circuit ({other.qubit_number}).')
other_subgraph = other._graph.subgraph(range(other.qubit_number, other._node_counter))
self._graph = nx.disjoint_union(self._graph, other_subgraph)
for qubit_index in range(self.qubit_number):
old_neighbor = list(other._graph.neighbors(qubit_index))
if old_neighbor:
new_neighbor = ((old_neighbor[0] - other.qubit_number) + self._node_counter)
self._graph.add_edge(self._last_inserted_operations[qubit_index], new_neighbor)
self._last_inserted_operations[qubit_index] = new_neighbor
self._node_counter += (other._node_counter - other.qubit_number)
if (self._cache_matrix and (other._matrix is not None)):
self._matrix = (self.matrix @ other.matrix)
return self | Add all the operations contained in `other` to the current instance.
:param other: the quantum circuit containing the operations to append
to the current instance. `other` and the instance
:py:meth:`~.__iadd__` is called on should have the same number of
qubits.
:return: The union of self and other.
:raise RuntimeError: if `self` and `other` have a different number of
qubits. | qtoolkit/data_structures/quantum_circuit/quantum_circuit.py | __iadd__ | nelimee/qtoolkit | 3 | python | def __iadd__(self, other: 'QuantumCircuit') -> 'QuantumCircuit':
'Add all the operations contained in `other` to the current instance.\n\n :param other: the quantum circuit containing the operations to append\n to the current instance. `other` and the instance\n :py:meth:`~.__iadd__` is called on should have the same number of\n qubits.\n :return: The union of self and other.\n :raise RuntimeError: if `self` and `other` have a different number of\n qubits.\n '
if (self.qubit_number != other.qubit_number):
raise RuntimeError(f'The number of qubits of the first circuit ({self.qubit_number}) does not match the number of qubits of the second circuit ({other.qubit_number}).')
other_subgraph = other._graph.subgraph(range(other.qubit_number, other._node_counter))
self._graph = nx.disjoint_union(self._graph, other_subgraph)
for qubit_index in range(self.qubit_number):
old_neighbor = list(other._graph.neighbors(qubit_index))
if old_neighbor:
new_neighbor = ((old_neighbor[0] - other.qubit_number) + self._node_counter)
self._graph.add_edge(self._last_inserted_operations[qubit_index], new_neighbor)
self._last_inserted_operations[qubit_index] = new_neighbor
self._node_counter += (other._node_counter - other.qubit_number)
if (self._cache_matrix and (other._matrix is not None)):
self._matrix = (self.matrix @ other.matrix)
return self | def __iadd__(self, other: 'QuantumCircuit') -> 'QuantumCircuit':
'Add all the operations contained in `other` to the current instance.\n\n :param other: the quantum circuit containing the operations to append\n to the current instance. `other` and the instance\n :py:meth:`~.__iadd__` is called on should have the same number of\n qubits.\n :return: The union of self and other.\n :raise RuntimeError: if `self` and `other` have a different number of\n qubits.\n '
if (self.qubit_number != other.qubit_number):
raise RuntimeError(f'The number of qubits of the first circuit ({self.qubit_number}) does not match the number of qubits of the second circuit ({other.qubit_number}).')
other_subgraph = other._graph.subgraph(range(other.qubit_number, other._node_counter))
self._graph = nx.disjoint_union(self._graph, other_subgraph)
for qubit_index in range(self.qubit_number):
old_neighbor = list(other._graph.neighbors(qubit_index))
if old_neighbor:
new_neighbor = ((old_neighbor[0] - other.qubit_number) + self._node_counter)
self._graph.add_edge(self._last_inserted_operations[qubit_index], new_neighbor)
self._last_inserted_operations[qubit_index] = new_neighbor
self._node_counter += (other._node_counter - other.qubit_number)
if (self._cache_matrix and (other._matrix is not None)):
self._matrix = (self.matrix @ other.matrix)
return self<|docstring|>Add all the operations contained in `other` to the current instance.
:param other: the quantum circuit containing the operations to append
to the current instance. `other` and the instance
:py:meth:`~.__iadd__` is called on should have the same number of
qubits.
:return: The union of self and other.
:raise RuntimeError: if `self` and `other` have a different number of
qubits.<|endoftext|> |
cf41da4ab5d0c4ae8cd72f18d6e09d5e15d17c418881fc5c6aa5e8a66eef048c | def __matmul__(self: 'QuantumCircuit', other: 'QuantumCircuit') -> 'QuantumCircuit':
"Wrapper around __iadd__ for the new '@' operator."
cpy = copy.copy(self)
return cpy.__iadd__(other) | Wrapper around __iadd__ for the new '@' operator. | qtoolkit/data_structures/quantum_circuit/quantum_circuit.py | __matmul__ | nelimee/qtoolkit | 3 | python | def __matmul__(self: 'QuantumCircuit', other: 'QuantumCircuit') -> 'QuantumCircuit':
cpy = copy.copy(self)
return cpy.__iadd__(other) | def __matmul__(self: 'QuantumCircuit', other: 'QuantumCircuit') -> 'QuantumCircuit':
cpy = copy.copy(self)
return cpy.__iadd__(other)<|docstring|>Wrapper around __iadd__ for the new '@' operator.<|endoftext|> |
c27ccf6f7558bc4371da3a436fae77c65b1a988e927dff535bf6c6de08b5ebd0 | def __copy__(self) -> 'QuantumCircuit':
'Override the default copy behaviour.'
cpy = QuantumCircuit(self._qubit_number, cache_matrix=self._cache_matrix)
if self.compressed:
cpy._compressed_graph = copy.copy(self._compressed_graph)
else:
cpy._graph = self._graph.copy()
cpy._node_counter = self._node_counter
cpy._last_inserted_operations = self._last_inserted_operations.copy()
if self._cache_matrix:
cpy._matrix = self._matrix
return cpy | Override the default copy behaviour. | qtoolkit/data_structures/quantum_circuit/quantum_circuit.py | __copy__ | nelimee/qtoolkit | 3 | python | def __copy__(self) -> 'QuantumCircuit':
cpy = QuantumCircuit(self._qubit_number, cache_matrix=self._cache_matrix)
if self.compressed:
cpy._compressed_graph = copy.copy(self._compressed_graph)
else:
cpy._graph = self._graph.copy()
cpy._node_counter = self._node_counter
cpy._last_inserted_operations = self._last_inserted_operations.copy()
if self._cache_matrix:
cpy._matrix = self._matrix
return cpy | def __copy__(self) -> 'QuantumCircuit':
cpy = QuantumCircuit(self._qubit_number, cache_matrix=self._cache_matrix)
if self.compressed:
cpy._compressed_graph = copy.copy(self._compressed_graph)
else:
cpy._graph = self._graph.copy()
cpy._node_counter = self._node_counter
cpy._last_inserted_operations = self._last_inserted_operations.copy()
if self._cache_matrix:
cpy._matrix = self._matrix
return cpy<|docstring|>Override the default copy behaviour.<|endoftext|> |
51ff1d010b784b4373ae536792441e619ce50b8c070f715917ba64e87080835c | def compress(self) -> 'QuantumCircuit':
'Compress the instance to save some memory.\n\n This method is useful when a large number of small circuits needs to be\n stored in memory.\n\n .. warning:: Several methods of the :py:class:`~.QuantumCircuit` class\n will not work as expected (or will raise an exception) if called on\n a compressed circuit.\n '
if (not self.compressed):
self._compressed_graph = CompressedMultiDiGraph(self._graph)
del self._graph
return self | Compress the instance to save some memory.
This method is useful when a large number of small circuits needs to be
stored in memory.
.. warning:: Several methods of the :py:class:`~.QuantumCircuit` class
will not work as expected (or will raise an exception) if called on
a compressed circuit. | qtoolkit/data_structures/quantum_circuit/quantum_circuit.py | compress | nelimee/qtoolkit | 3 | python | def compress(self) -> 'QuantumCircuit':
'Compress the instance to save some memory.\n\n This method is useful when a large number of small circuits needs to be\n stored in memory.\n\n .. warning:: Several methods of the :py:class:`~.QuantumCircuit` class\n will not work as expected (or will raise an exception) if called on\n a compressed circuit.\n '
if (not self.compressed):
self._compressed_graph = CompressedMultiDiGraph(self._graph)
del self._graph
return self | def compress(self) -> 'QuantumCircuit':
'Compress the instance to save some memory.\n\n This method is useful when a large number of small circuits needs to be\n stored in memory.\n\n .. warning:: Several methods of the :py:class:`~.QuantumCircuit` class\n will not work as expected (or will raise an exception) if called on\n a compressed circuit.\n '
if (not self.compressed):
self._compressed_graph = CompressedMultiDiGraph(self._graph)
del self._graph
return self<|docstring|>Compress the instance to save some memory.
This method is useful when a large number of small circuits needs to be
stored in memory.
.. warning:: Several methods of the :py:class:`~.QuantumCircuit` class
will not work as expected (or will raise an exception) if called on
a compressed circuit.<|endoftext|> |
b2e1a25da1ec72831b726066e0f1755959179af5c351961cfbce70c05be1304b | def uncompress(self) -> 'QuantumCircuit':
'Uncompress the instance.'
if self.compressed:
self._graph = self._compressed_graph.uncompress()
del self._compressed_graph
return self | Uncompress the instance. | qtoolkit/data_structures/quantum_circuit/quantum_circuit.py | uncompress | nelimee/qtoolkit | 3 | python | def uncompress(self) -> 'QuantumCircuit':
if self.compressed:
self._graph = self._compressed_graph.uncompress()
del self._compressed_graph
return self | def uncompress(self) -> 'QuantumCircuit':
if self.compressed:
self._graph = self._compressed_graph.uncompress()
del self._compressed_graph
return self<|docstring|>Uncompress the instance.<|endoftext|> |
872b37fa19e5b13707060d3e82cff7772843d342e3b5e5f88837110fa2838db5 | @property
def compressed(self) -> bool:
'Return True if the instance is compressed, else False.'
return hasattr(self, '_compressed_graph') | Return True if the instance is compressed, else False. | qtoolkit/data_structures/quantum_circuit/quantum_circuit.py | compressed | nelimee/qtoolkit | 3 | python | @property
def compressed(self) -> bool:
return hasattr(self, '_compressed_graph') | @property
def compressed(self) -> bool:
return hasattr(self, '_compressed_graph')<|docstring|>Return True if the instance is compressed, else False.<|endoftext|> |
1b2f886d857aadb64f6d105368b563568670dbbc324062af18f683cfbe020923 | def inverse(self) -> 'QuantumCircuit':
'Create the inverse of the instance it is called on.\n\n This method will create a new :py:class:`~.QuantumCircuit` and construct\n in this new circuit the inverse of `self`.\n '
inv = QuantumCircuit(self._qubit_number, cache_matrix=self._cache_matrix)
for op in reversed(list(self.operations)):
inv.add_operation(op.inverse())
return inv | Create the inverse of the instance it is called on.
This method will create a new :py:class:`~.QuantumCircuit` and construct
in this new circuit the inverse of `self`. | qtoolkit/data_structures/quantum_circuit/quantum_circuit.py | inverse | nelimee/qtoolkit | 3 | python | def inverse(self) -> 'QuantumCircuit':
'Create the inverse of the instance it is called on.\n\n This method will create a new :py:class:`~.QuantumCircuit` and construct\n in this new circuit the inverse of `self`.\n '
inv = QuantumCircuit(self._qubit_number, cache_matrix=self._cache_matrix)
for op in reversed(list(self.operations)):
inv.add_operation(op.inverse())
return inv | def inverse(self) -> 'QuantumCircuit':
'Create the inverse of the instance it is called on.\n\n This method will create a new :py:class:`~.QuantumCircuit` and construct\n in this new circuit the inverse of `self`.\n '
inv = QuantumCircuit(self._qubit_number, cache_matrix=self._cache_matrix)
for op in reversed(list(self.operations)):
inv.add_operation(op.inverse())
return inv<|docstring|>Create the inverse of the instance it is called on.
This method will create a new :py:class:`~.QuantumCircuit` and construct
in this new circuit the inverse of `self`.<|endoftext|> |
200c57401ffc4029e3d9f4f976e0f1050daf0d39f55919fa148fdc3a875c125d | def __str__(self) -> str:
'Textual representation of the circuit.\n\n The representation used is very similar to OpenQASM.\n '
return '\n'.join(('{Cs}{opname} {controls}{commaornot}{target}'.format(Cs=('C' * len(op.controls)), opname=op.gate.name, controls=','.join(map(str, op.controls)), commaornot=(', ' if op.controls else ''), target=op.target) for op in self.operations)) | Textual representation of the circuit.
The representation used is very similar to OpenQASM. | qtoolkit/data_structures/quantum_circuit/quantum_circuit.py | __str__ | nelimee/qtoolkit | 3 | python | def __str__(self) -> str:
'Textual representation of the circuit.\n\n The representation used is very similar to OpenQASM.\n '
return '\n'.join(('{Cs}{opname} {controls}{commaornot}{target}'.format(Cs=('C' * len(op.controls)), opname=op.gate.name, controls=','.join(map(str, op.controls)), commaornot=(', ' if op.controls else ), target=op.target) for op in self.operations)) | def __str__(self) -> str:
'Textual representation of the circuit.\n\n The representation used is very similar to OpenQASM.\n '
return '\n'.join(('{Cs}{opname} {controls}{commaornot}{target}'.format(Cs=('C' * len(op.controls)), opname=op.gate.name, controls=','.join(map(str, op.controls)), commaornot=(', ' if op.controls else ), target=op.target) for op in self.operations))<|docstring|>Textual representation of the circuit.
The representation used is very similar to OpenQASM.<|endoftext|> |
3a6458962c7579cb462acddd7fbfee0a4677234f151c62ad935fdb61dfb48fdd | def __init__(self, graph: nx.MultiDiGraph=None) -> None:
'Initialise the :py:class:`~.CompressedMultiDiGraph` instance.\n\n Instances of :py:class:`~.CompressedMultiDiGraph` are just storing\n a :py:class:`networkx.MultiDiGraph` in a more memory efficient format.\n\n :param graph: The graph to compress.\n '
if (graph is None):
self._qubit_number = 0
return
node_number = len(graph.nodes)
edge_number = len(graph.edges)
if (node_number < (2 ** 8)):
data_type = numpy.uint8
elif (node_number < (2 ** 16)):
data_type = numpy.uint16
else:
data_type = numpy.uint32
self._from_arr = numpy.zeros((edge_number,), dtype=data_type)
self._to_arr = numpy.zeros((edge_number,), dtype=data_type)
self._data_arr = numpy.zeros((edge_number,), dtype=data_type)
for (idx, (u, v, qubit_id)) in enumerate(graph.edges):
self._from_arr[idx] = u
self._to_arr[idx] = v
self._data_arr[idx] = qubit_id
self._qubit_number = 0
self._is_op_node = numpy.zeros((node_number,), dtype=numpy.bool)
self._operations = list()
for (node_id, node_data) in graph.nodes.items():
if (node_data['type'] == 'op'):
self._is_op_node[node_id] = True
self._operations.append(node_data['op'])
else:
self._qubit_number += 1 | Initialise the :py:class:`~.CompressedMultiDiGraph` instance.
Instances of :py:class:`~.CompressedMultiDiGraph` are just storing
a :py:class:`networkx.MultiDiGraph` in a more memory efficient format.
:param graph: The graph to compress. | qtoolkit/data_structures/quantum_circuit/quantum_circuit.py | __init__ | nelimee/qtoolkit | 3 | python | def __init__(self, graph: nx.MultiDiGraph=None) -> None:
'Initialise the :py:class:`~.CompressedMultiDiGraph` instance.\n\n Instances of :py:class:`~.CompressedMultiDiGraph` are just storing\n a :py:class:`networkx.MultiDiGraph` in a more memory efficient format.\n\n :param graph: The graph to compress.\n '
if (graph is None):
self._qubit_number = 0
return
node_number = len(graph.nodes)
edge_number = len(graph.edges)
if (node_number < (2 ** 8)):
data_type = numpy.uint8
elif (node_number < (2 ** 16)):
data_type = numpy.uint16
else:
data_type = numpy.uint32
self._from_arr = numpy.zeros((edge_number,), dtype=data_type)
self._to_arr = numpy.zeros((edge_number,), dtype=data_type)
self._data_arr = numpy.zeros((edge_number,), dtype=data_type)
for (idx, (u, v, qubit_id)) in enumerate(graph.edges):
self._from_arr[idx] = u
self._to_arr[idx] = v
self._data_arr[idx] = qubit_id
self._qubit_number = 0
self._is_op_node = numpy.zeros((node_number,), dtype=numpy.bool)
self._operations = list()
for (node_id, node_data) in graph.nodes.items():
if (node_data['type'] == 'op'):
self._is_op_node[node_id] = True
self._operations.append(node_data['op'])
else:
self._qubit_number += 1 | def __init__(self, graph: nx.MultiDiGraph=None) -> None:
'Initialise the :py:class:`~.CompressedMultiDiGraph` instance.\n\n Instances of :py:class:`~.CompressedMultiDiGraph` are just storing\n a :py:class:`networkx.MultiDiGraph` in a more memory efficient format.\n\n :param graph: The graph to compress.\n '
if (graph is None):
self._qubit_number = 0
return
node_number = len(graph.nodes)
edge_number = len(graph.edges)
if (node_number < (2 ** 8)):
data_type = numpy.uint8
elif (node_number < (2 ** 16)):
data_type = numpy.uint16
else:
data_type = numpy.uint32
self._from_arr = numpy.zeros((edge_number,), dtype=data_type)
self._to_arr = numpy.zeros((edge_number,), dtype=data_type)
self._data_arr = numpy.zeros((edge_number,), dtype=data_type)
for (idx, (u, v, qubit_id)) in enumerate(graph.edges):
self._from_arr[idx] = u
self._to_arr[idx] = v
self._data_arr[idx] = qubit_id
self._qubit_number = 0
self._is_op_node = numpy.zeros((node_number,), dtype=numpy.bool)
self._operations = list()
for (node_id, node_data) in graph.nodes.items():
if (node_data['type'] == 'op'):
self._is_op_node[node_id] = True
self._operations.append(node_data['op'])
else:
self._qubit_number += 1<|docstring|>Initialise the :py:class:`~.CompressedMultiDiGraph` instance.
Instances of :py:class:`~.CompressedMultiDiGraph` are just storing
a :py:class:`networkx.MultiDiGraph` in a more memory efficient format.
:param graph: The graph to compress.<|endoftext|> |
55f5f5c2c5b25ba82444dd1d219e0ba4339109a36675de887f8c6769af547b97 | def __copy__(self) -> 'CompressedMultiDiGraph':
'Override the default copy behaviour.'
cpy = CompressedMultiDiGraph()
cpy._qubit_number = self._qubit_number
cpy._from_arr = self._from_arr.copy()
cpy._to_arr = self._to_arr.copy()
cpy._data_arr = self._data_arr.copy()
cpy._is_op_node = self._is_op_node.copy()
cpy._operations = copy.copy(self._operations)
return cpy | Override the default copy behaviour. | qtoolkit/data_structures/quantum_circuit/quantum_circuit.py | __copy__ | nelimee/qtoolkit | 3 | python | def __copy__(self) -> 'CompressedMultiDiGraph':
cpy = CompressedMultiDiGraph()
cpy._qubit_number = self._qubit_number
cpy._from_arr = self._from_arr.copy()
cpy._to_arr = self._to_arr.copy()
cpy._data_arr = self._data_arr.copy()
cpy._is_op_node = self._is_op_node.copy()
cpy._operations = copy.copy(self._operations)
return cpy | def __copy__(self) -> 'CompressedMultiDiGraph':
cpy = CompressedMultiDiGraph()
cpy._qubit_number = self._qubit_number
cpy._from_arr = self._from_arr.copy()
cpy._to_arr = self._to_arr.copy()
cpy._data_arr = self._data_arr.copy()
cpy._is_op_node = self._is_op_node.copy()
cpy._operations = copy.copy(self._operations)
return cpy<|docstring|>Override the default copy behaviour.<|endoftext|> |
074d8bd6dfff8c55a34136a087cfd69ec057c38901b7c31bbbdd2dcec6771eca | def uncompress(self) -> nx.MultiDiGraph:
'Uncompress the stored :py:class:`networkx.MultiDiGraph`.\n\n :return: the uncompressed :py:class:`networkx.MultiDiGraph`.\n '
graph = nx.MultiDiGraph()
if (self._qubit_number == 0):
return graph
for i in range(self._qubit_number):
graph.add_node(i, type='input', key=i)
for node_id in range(self._qubit_number, len(self._is_op_node)):
graph.add_node(node_id, type='op', op=self._operations[(node_id - self._qubit_number)])
for (u, v, qubit_id) in zip(self._from_arr, self._to_arr, self._data_arr):
graph.add_edge(u, v, key=qubit_id)
return graph | Uncompress the stored :py:class:`networkx.MultiDiGraph`.
:return: the uncompressed :py:class:`networkx.MultiDiGraph`. | qtoolkit/data_structures/quantum_circuit/quantum_circuit.py | uncompress | nelimee/qtoolkit | 3 | python | def uncompress(self) -> nx.MultiDiGraph:
'Uncompress the stored :py:class:`networkx.MultiDiGraph`.\n\n :return: the uncompressed :py:class:`networkx.MultiDiGraph`.\n '
graph = nx.MultiDiGraph()
if (self._qubit_number == 0):
return graph
for i in range(self._qubit_number):
graph.add_node(i, type='input', key=i)
for node_id in range(self._qubit_number, len(self._is_op_node)):
graph.add_node(node_id, type='op', op=self._operations[(node_id - self._qubit_number)])
for (u, v, qubit_id) in zip(self._from_arr, self._to_arr, self._data_arr):
graph.add_edge(u, v, key=qubit_id)
return graph | def uncompress(self) -> nx.MultiDiGraph:
'Uncompress the stored :py:class:`networkx.MultiDiGraph`.\n\n :return: the uncompressed :py:class:`networkx.MultiDiGraph`.\n '
graph = nx.MultiDiGraph()
if (self._qubit_number == 0):
return graph
for i in range(self._qubit_number):
graph.add_node(i, type='input', key=i)
for node_id in range(self._qubit_number, len(self._is_op_node)):
graph.add_node(node_id, type='op', op=self._operations[(node_id - self._qubit_number)])
for (u, v, qubit_id) in zip(self._from_arr, self._to_arr, self._data_arr):
graph.add_edge(u, v, key=qubit_id)
return graph<|docstring|>Uncompress the stored :py:class:`networkx.MultiDiGraph`.
:return: the uncompressed :py:class:`networkx.MultiDiGraph`.<|endoftext|> |
88d9100cf7e078f3ee7f36ff9a57d4aef7524715114f4ca5f3ac536886e7f4d6 | def update(self):
'Refreshes the cached options data'
self.options = self._get_options() | Refreshes the cached options data | src/bos/operators/utils/clients/bos/options.py | update | Cray-HPE/bos | 1 | python | def update(self):
self.options = self._get_options() | def update(self):
self.options = self._get_options()<|docstring|>Refreshes the cached options data<|endoftext|> |
6e882b4b4b11d8d6da22746474a9eaba50a7ef7c6f71d3ffa75220e61eef7142 | def _get_options(self):
'Retrieves the current options from the BOS api'
session = requests_retry_session()
try:
response = session.get(ENDPOINT)
response.raise_for_status()
return json.loads(response.text)
except (ConnectionError, MaxRetryError) as e:
LOGGER.error('Unable to connect to BOS: {}'.format(e))
except HTTPError as e:
LOGGER.error('Unexpected response from BOS: {}'.format(e))
except json.JSONDecodeError as e:
LOGGER.error('Non-JSON response from BOS: {}'.format(e))
return {} | Retrieves the current options from the BOS api | src/bos/operators/utils/clients/bos/options.py | _get_options | Cray-HPE/bos | 1 | python | def _get_options(self):
session = requests_retry_session()
try:
response = session.get(ENDPOINT)
response.raise_for_status()
return json.loads(response.text)
except (ConnectionError, MaxRetryError) as e:
LOGGER.error('Unable to connect to BOS: {}'.format(e))
except HTTPError as e:
LOGGER.error('Unexpected response from BOS: {}'.format(e))
except json.JSONDecodeError as e:
LOGGER.error('Non-JSON response from BOS: {}'.format(e))
return {} | def _get_options(self):
session = requests_retry_session()
try:
response = session.get(ENDPOINT)
response.raise_for_status()
return json.loads(response.text)
except (ConnectionError, MaxRetryError) as e:
LOGGER.error('Unable to connect to BOS: {}'.format(e))
except HTTPError as e:
LOGGER.error('Unexpected response from BOS: {}'.format(e))
except json.JSONDecodeError as e:
LOGGER.error('Non-JSON response from BOS: {}'.format(e))
return {}<|docstring|>Retrieves the current options from the BOS api<|endoftext|> |
a52197a75085feecf4ec0e4c51e528f1ce95e2d1130791a9898d07a0808bcff8 | def __init__(self, path, entry=None, dependencies=None, devDependencies=None, peerDependencies=None):
'Initialize webpack bundle.'
self.path = path
self.entry = (entry or {})
self.dependencies = {'dependencies': (dependencies or {}), 'devDependencies': (devDependencies or {}), 'peerDependencies': (peerDependencies or {})} | Initialize webpack bundle. | pywebpack/bundle.py | __init__ | ntarocco/pywebpack | 0 | python | def __init__(self, path, entry=None, dependencies=None, devDependencies=None, peerDependencies=None):
self.path = path
self.entry = (entry or {})
self.dependencies = {'dependencies': (dependencies or {}), 'devDependencies': (devDependencies or {}), 'peerDependencies': (peerDependencies or {})} | def __init__(self, path, entry=None, dependencies=None, devDependencies=None, peerDependencies=None):
self.path = path
self.entry = (entry or {})
self.dependencies = {'dependencies': (dependencies or {}), 'devDependencies': (devDependencies or {}), 'peerDependencies': (peerDependencies or {})}<|docstring|>Initialize webpack bundle.<|endoftext|> |
dc5a440fe0f2d370514c4971d1046bb1769800f4db0ad7a7077d30617f2d05a0 | def format_recipient(user: User):
'\n Format a user as a recipient\n\n Args:\n user (User): the user\n\n Returns:\n str:\n the formatted recipient\n '
return formataddr((f'{user.first_name} {user.last_name}', user.email)) | Format a user as a recipient
Args:
user (User): the user
Returns:
str:
the formatted recipient | src/mitol/mail/defaults.py | format_recipient | mitodl/ol-django | 1 | python | def format_recipient(user: User):
'\n Format a user as a recipient\n\n Args:\n user (User): the user\n\n Returns:\n str:\n the formatted recipient\n '
return formataddr((f'{user.first_name} {user.last_name}', user.email)) | def format_recipient(user: User):
'\n Format a user as a recipient\n\n Args:\n user (User): the user\n\n Returns:\n str:\n the formatted recipient\n '
return formataddr((f'{user.first_name} {user.last_name}', user.email))<|docstring|>Format a user as a recipient
Args:
user (User): the user
Returns:
str:
the formatted recipient<|endoftext|> |
f28f5378babb0b05e31cd4d51d10c483025bd55f7f21c7fac59f0a62efd75a0b | def can_email_user(user: User):
'\n Returns True if the user has an email address\n\n Args:\n user (User): user to check\n\n Returns:\n bool: True if we can email this user\n '
return bool(user.email) | Returns True if the user has an email address
Args:
user (User): user to check
Returns:
bool: True if we can email this user | src/mitol/mail/defaults.py | can_email_user | mitodl/ol-django | 1 | python | def can_email_user(user: User):
'\n Returns True if the user has an email address\n\n Args:\n user (User): user to check\n\n Returns:\n bool: True if we can email this user\n '
return bool(user.email) | def can_email_user(user: User):
'\n Returns True if the user has an email address\n\n Args:\n user (User): user to check\n\n Returns:\n bool: True if we can email this user\n '
return bool(user.email)<|docstring|>Returns True if the user has an email address
Args:
user (User): user to check
Returns:
bool: True if we can email this user<|endoftext|> |
aad32ec10e3fd5813fba27b71291ce8016d30a629139d2d96e35c8e550cccad0 | def make_quadrants(parent, yp):
' make quadrant buttons '
parent.quadbtns = QButtonGroup(parent)
for b in range(9):
btn = QuadButton(b, (' ' + str((b + 1))), parent)
parent.quadbtns.addButton(btn, b)
parent.l0.addWidget(btn, (yp + parent.quadbtns.button(b).ypos), (5 + parent.quadbtns.button(b).xpos), 1, 1)
btn.setEnabled(True)
b += 1
parent.quadbtns.setExclusive(True) | make quadrant buttons | cellpose/gui/guiparts.py | make_quadrants | thccheung/cellpose | 0 | python | def make_quadrants(parent, yp):
' '
parent.quadbtns = QButtonGroup(parent)
for b in range(9):
btn = QuadButton(b, (' ' + str((b + 1))), parent)
parent.quadbtns.addButton(btn, b)
parent.l0.addWidget(btn, (yp + parent.quadbtns.button(b).ypos), (5 + parent.quadbtns.button(b).xpos), 1, 1)
btn.setEnabled(True)
b += 1
parent.quadbtns.setExclusive(True) | def make_quadrants(parent, yp):
' '
parent.quadbtns = QButtonGroup(parent)
for b in range(9):
btn = QuadButton(b, (' ' + str((b + 1))), parent)
parent.quadbtns.addButton(btn, b)
parent.l0.addWidget(btn, (yp + parent.quadbtns.button(b).ypos), (5 + parent.quadbtns.button(b).xpos), 1, 1)
btn.setEnabled(True)
b += 1
parent.quadbtns.setExclusive(True)<|docstring|>make quadrant buttons<|endoftext|> |
3ecbb5b133234213e5fc118842629e1c98ac59fb8bd44ea9306039ad5d6c7f61 | def keyPressEvent(self, ev):
'\n This routine should capture key presses in the current view box.\n The following events are implemented:\n +/= : moves forward in the zooming stack (if it exists)\n - : moves backward in the zooming stack (if it exists)\n\n '
ev.accept()
if (ev.text() == '-'):
self.scaleBy([1.1, 1.1])
elif (ev.text() in ['+', '=']):
self.scaleBy([0.9, 0.9])
else:
ev.ignore() | This routine should capture key presses in the current view box.
The following events are implemented:
+/= : moves forward in the zooming stack (if it exists)
- : moves backward in the zooming stack (if it exists) | cellpose/gui/guiparts.py | keyPressEvent | thccheung/cellpose | 0 | python | def keyPressEvent(self, ev):
'\n This routine should capture key presses in the current view box.\n The following events are implemented:\n +/= : moves forward in the zooming stack (if it exists)\n - : moves backward in the zooming stack (if it exists)\n\n '
ev.accept()
if (ev.text() == '-'):
self.scaleBy([1.1, 1.1])
elif (ev.text() in ['+', '=']):
self.scaleBy([0.9, 0.9])
else:
ev.ignore() | def keyPressEvent(self, ev):
'\n This routine should capture key presses in the current view box.\n The following events are implemented:\n +/= : moves forward in the zooming stack (if it exists)\n - : moves backward in the zooming stack (if it exists)\n\n '
ev.accept()
if (ev.text() == '-'):
self.scaleBy([1.1, 1.1])
elif (ev.text() in ['+', '=']):
self.scaleBy([0.9, 0.9])
else:
ev.ignore()<|docstring|>This routine should capture key presses in the current view box.
The following events are implemented:
+/= : moves forward in the zooming stack (if it exists)
- : moves backward in the zooming stack (if it exists)<|endoftext|> |
f57ac177a7acd94ac7f71ee124bf67b0e9967cb6ff426578881fd83e6e4c8049 | def test_format_paragraphs(monkeypatch):
'Try to dedent and reformat a paragraph.'
lorem_before = '\n Lorem ipsum dolor sit amet, consectetur adipiscing elit. Vestibulum\n vehicula aliquam felis sed iaculis.\n\n Integer vulputate dui vulputate metus pulvinar volutpat. Nullam\n eu elementum libero.\n '
lorem_width35 = 'Lorem ipsum dolor sit amet,\nconsectetur adipiscing elit.\nVestibulum vehicula aliquam\nfelis sed iaculis.\n\nInteger vulputate dui\nvulputate metus pulvinar\nvolutpat. Nullam eu elementum\nlibero.\n'
with monkeypatch.context() as monkey:
monkey.setenv('COLUMNS', '35')
formatted = fmt.format_paragraphs(lorem_before)
assert (formatted == lorem_width35) | Try to dedent and reformat a paragraph. | tests/fmt_test.py | test_format_paragraphs | harkabeeparolus/csv2xlsx | 2 | python | def test_format_paragraphs(monkeypatch):
lorem_before = '\n Lorem ipsum dolor sit amet, consectetur adipiscing elit. Vestibulum\n vehicula aliquam felis sed iaculis.\n\n Integer vulputate dui vulputate metus pulvinar volutpat. Nullam\n eu elementum libero.\n '
lorem_width35 = 'Lorem ipsum dolor sit amet,\nconsectetur adipiscing elit.\nVestibulum vehicula aliquam\nfelis sed iaculis.\n\nInteger vulputate dui\nvulputate metus pulvinar\nvolutpat. Nullam eu elementum\nlibero.\n'
with monkeypatch.context() as monkey:
monkey.setenv('COLUMNS', '35')
formatted = fmt.format_paragraphs(lorem_before)
assert (formatted == lorem_width35) | def test_format_paragraphs(monkeypatch):
lorem_before = '\n Lorem ipsum dolor sit amet, consectetur adipiscing elit. Vestibulum\n vehicula aliquam felis sed iaculis.\n\n Integer vulputate dui vulputate metus pulvinar volutpat. Nullam\n eu elementum libero.\n '
lorem_width35 = 'Lorem ipsum dolor sit amet,\nconsectetur adipiscing elit.\nVestibulum vehicula aliquam\nfelis sed iaculis.\n\nInteger vulputate dui\nvulputate metus pulvinar\nvolutpat. Nullam eu elementum\nlibero.\n'
with monkeypatch.context() as monkey:
monkey.setenv('COLUMNS', '35')
formatted = fmt.format_paragraphs(lorem_before)
assert (formatted == lorem_width35)<|docstring|>Try to dedent and reformat a paragraph.<|endoftext|> |
5f539ff9780a4abcbdcc983bfb6cc0767e5dc6afe5a8ff90ae4d96612fb3e353 | def test_newlines():
'Make sure we strip and reapply newlines correctly.'
assert (fmt.format_paragraphs('\n\nfoo bar') == 'foo bar')
assert (fmt.format_paragraphs('\n\nfoo bar baz\n\n') == 'foo bar baz\n') | Make sure we strip and reapply newlines correctly. | tests/fmt_test.py | test_newlines | harkabeeparolus/csv2xlsx | 2 | python | def test_newlines():
assert (fmt.format_paragraphs('\n\nfoo bar') == 'foo bar')
assert (fmt.format_paragraphs('\n\nfoo bar baz\n\n') == 'foo bar baz\n') | def test_newlines():
assert (fmt.format_paragraphs('\n\nfoo bar') == 'foo bar')
assert (fmt.format_paragraphs('\n\nfoo bar baz\n\n') == 'foo bar baz\n')<|docstring|>Make sure we strip and reapply newlines correctly.<|endoftext|> |
74c69b013b1749d646636bef999270ef7cf0842acacb2b784462f2d3a7f8a8a4 | def IoU(box, boxes):
'Compute IoU between detect box and gt boxes\n\n Parameters:\n ----------\n box: numpy array , shape (5, ): x1, y1, x2, y2, score\n input box\n boxes: numpy array, shape (n, 4): x1, y1, x2, y2\n input ground truth boxes\n\n Returns:\n -------\n ovr: numpy.array, shape (n, )\n IoU\n '
box_area = ((box[2] - box[0]) * (box[3] - box[1]))
area = ((boxes[(:, 2)] - boxes[(:, 0)]) * (boxes[(:, 3)] - boxes[(:, 1)]))
xx1 = np.maximum(box[0], boxes[(:, 0)])
yy1 = np.maximum(box[1], boxes[(:, 1)])
xx2 = np.minimum(box[2], boxes[(:, 2)])
yy2 = np.minimum(box[3], boxes[(:, 3)])
w = np.maximum(0, (xx2 - xx1))
h = np.maximum(0, (yy2 - yy1))
inter = (w * h)
ovr = np.true_divide(inter, ((box_area + area) - inter))
return ovr | Compute IoU between detect box and gt boxes
Parameters:
----------
box: numpy array , shape (5, ): x1, y1, x2, y2, score
input box
boxes: numpy array, shape (n, 4): x1, y1, x2, y2
input ground truth boxes
Returns:
-------
ovr: numpy.array, shape (n, )
IoU | 2019ML_Lab/Lab4/mtcnn_pytorch/tools/utils.py | IoU | Pangxiaox/Machine-Learning-Lab | 0 | python | def IoU(box, boxes):
'Compute IoU between detect box and gt boxes\n\n Parameters:\n ----------\n box: numpy array , shape (5, ): x1, y1, x2, y2, score\n input box\n boxes: numpy array, shape (n, 4): x1, y1, x2, y2\n input ground truth boxes\n\n Returns:\n -------\n ovr: numpy.array, shape (n, )\n IoU\n '
box_area = ((box[2] - box[0]) * (box[3] - box[1]))
area = ((boxes[(:, 2)] - boxes[(:, 0)]) * (boxes[(:, 3)] - boxes[(:, 1)]))
xx1 = np.maximum(box[0], boxes[(:, 0)])
yy1 = np.maximum(box[1], boxes[(:, 1)])
xx2 = np.minimum(box[2], boxes[(:, 2)])
yy2 = np.minimum(box[3], boxes[(:, 3)])
w = np.maximum(0, (xx2 - xx1))
h = np.maximum(0, (yy2 - yy1))
inter = (w * h)
ovr = np.true_divide(inter, ((box_area + area) - inter))
return ovr | def IoU(box, boxes):
'Compute IoU between detect box and gt boxes\n\n Parameters:\n ----------\n box: numpy array , shape (5, ): x1, y1, x2, y2, score\n input box\n boxes: numpy array, shape (n, 4): x1, y1, x2, y2\n input ground truth boxes\n\n Returns:\n -------\n ovr: numpy.array, shape (n, )\n IoU\n '
box_area = ((box[2] - box[0]) * (box[3] - box[1]))
area = ((boxes[(:, 2)] - boxes[(:, 0)]) * (boxes[(:, 3)] - boxes[(:, 1)]))
xx1 = np.maximum(box[0], boxes[(:, 0)])
yy1 = np.maximum(box[1], boxes[(:, 1)])
xx2 = np.minimum(box[2], boxes[(:, 2)])
yy2 = np.minimum(box[3], boxes[(:, 3)])
w = np.maximum(0, (xx2 - xx1))
h = np.maximum(0, (yy2 - yy1))
inter = (w * h)
ovr = np.true_divide(inter, ((box_area + area) - inter))
return ovr<|docstring|>Compute IoU between detect box and gt boxes
Parameters:
----------
box: numpy array , shape (5, ): x1, y1, x2, y2, score
input box
boxes: numpy array, shape (n, 4): x1, y1, x2, y2
input ground truth boxes
Returns:
-------
ovr: numpy.array, shape (n, )
IoU<|endoftext|> |
fdba06e088f170877c7bbac4f87be3b2fd094f8720c4ce852dbb4927a7401864 | def convert_to_square(bbox):
' Convert bbox to a square which it can include the bbox\n Parameters:\n bbox: numpy array, shape n x 5\n \n returns:\n square box\n '
square_bbox = bbox.copy()
h = (bbox[(:, 3)] - bbox[(:, 1)])
w = (bbox[(:, 2)] - bbox[(:, 0)])
max_side = np.maximum(h, w)
square_bbox[(:, 0)] = ((bbox[(:, 0)] + (w * 0.5)) - (max_side * 0.5))
square_bbox[(:, 1)] = ((bbox[(:, 1)] + (h * 0.5)) - (max_side * 0.5))
square_bbox[(:, 2)] = (square_bbox[(:, 0)] + max_side)
square_bbox[(:, 3)] = (square_bbox[(:, 1)] + max_side)
return square_bbox | Convert bbox to a square which it can include the bbox
Parameters:
bbox: numpy array, shape n x 5
returns:
square box | 2019ML_Lab/Lab4/mtcnn_pytorch/tools/utils.py | convert_to_square | Pangxiaox/Machine-Learning-Lab | 0 | python | def convert_to_square(bbox):
' Convert bbox to a square which it can include the bbox\n Parameters:\n bbox: numpy array, shape n x 5\n \n returns:\n square box\n '
square_bbox = bbox.copy()
h = (bbox[(:, 3)] - bbox[(:, 1)])
w = (bbox[(:, 2)] - bbox[(:, 0)])
max_side = np.maximum(h, w)
square_bbox[(:, 0)] = ((bbox[(:, 0)] + (w * 0.5)) - (max_side * 0.5))
square_bbox[(:, 1)] = ((bbox[(:, 1)] + (h * 0.5)) - (max_side * 0.5))
square_bbox[(:, 2)] = (square_bbox[(:, 0)] + max_side)
square_bbox[(:, 3)] = (square_bbox[(:, 1)] + max_side)
return square_bbox | def convert_to_square(bbox):
' Convert bbox to a square which it can include the bbox\n Parameters:\n bbox: numpy array, shape n x 5\n \n returns:\n square box\n '
square_bbox = bbox.copy()
h = (bbox[(:, 3)] - bbox[(:, 1)])
w = (bbox[(:, 2)] - bbox[(:, 0)])
max_side = np.maximum(h, w)
square_bbox[(:, 0)] = ((bbox[(:, 0)] + (w * 0.5)) - (max_side * 0.5))
square_bbox[(:, 1)] = ((bbox[(:, 1)] + (h * 0.5)) - (max_side * 0.5))
square_bbox[(:, 2)] = (square_bbox[(:, 0)] + max_side)
square_bbox[(:, 3)] = (square_bbox[(:, 1)] + max_side)
return square_bbox<|docstring|>Convert bbox to a square which it can include the bbox
Parameters:
bbox: numpy array, shape n x 5
returns:
square box<|endoftext|> |
a698253545c12e086abab2d749055ee86eac3220a704955b438ae39bee04697a | def nms(dets, thresh, mode='Union'):
' greedily select bboxes with high confidence,if an box overlap with the highest score box > thres, rule it out\n \n params:\n dets: [[x1, y1, x2, y2, score]]\n thresh: retain overlap <= thresh\n return:\n indexes to keep\n '
x1 = dets[(:, 0)]
y1 = dets[(:, 1)]
x2 = dets[(:, 2)]
y2 = dets[(:, 3)]
scores = dets[(:, 4)]
areas = ((x2 - x1) * (y2 - y1))
order = scores.argsort()[::(- 1)]
keep = []
while (order.size > 0):
i = order[0]
keep.append(i)
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
w = np.maximum(0.0, (xx2 - xx1))
h = np.maximum(0.0, (yy2 - yy1))
inter = (w * h)
inter = (w * h)
if (mode == 'Union'):
ovr = (inter / ((areas[i] + areas[order[1:]]) - inter))
elif (mode == 'Minimum'):
ovr = (inter / np.minimum(areas[i], areas[order[1:]]))
inds = np.where((ovr <= thresh))[0]
order = order[(inds + 1)]
return keep | greedily select bboxes with high confidence,if an box overlap with the highest score box > thres, rule it out
params:
dets: [[x1, y1, x2, y2, score]]
thresh: retain overlap <= thresh
return:
indexes to keep | 2019ML_Lab/Lab4/mtcnn_pytorch/tools/utils.py | nms | Pangxiaox/Machine-Learning-Lab | 0 | python | def nms(dets, thresh, mode='Union'):
' greedily select bboxes with high confidence,if an box overlap with the highest score box > thres, rule it out\n \n params:\n dets: [[x1, y1, x2, y2, score]]\n thresh: retain overlap <= thresh\n return:\n indexes to keep\n '
x1 = dets[(:, 0)]
y1 = dets[(:, 1)]
x2 = dets[(:, 2)]
y2 = dets[(:, 3)]
scores = dets[(:, 4)]
areas = ((x2 - x1) * (y2 - y1))
order = scores.argsort()[::(- 1)]
keep = []
while (order.size > 0):
i = order[0]
keep.append(i)
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
w = np.maximum(0.0, (xx2 - xx1))
h = np.maximum(0.0, (yy2 - yy1))
inter = (w * h)
inter = (w * h)
if (mode == 'Union'):
ovr = (inter / ((areas[i] + areas[order[1:]]) - inter))
elif (mode == 'Minimum'):
ovr = (inter / np.minimum(areas[i], areas[order[1:]]))
inds = np.where((ovr <= thresh))[0]
order = order[(inds + 1)]
return keep | def nms(dets, thresh, mode='Union'):
' greedily select bboxes with high confidence,if an box overlap with the highest score box > thres, rule it out\n \n params:\n dets: [[x1, y1, x2, y2, score]]\n thresh: retain overlap <= thresh\n return:\n indexes to keep\n '
x1 = dets[(:, 0)]
y1 = dets[(:, 1)]
x2 = dets[(:, 2)]
y2 = dets[(:, 3)]
scores = dets[(:, 4)]
areas = ((x2 - x1) * (y2 - y1))
order = scores.argsort()[::(- 1)]
keep = []
while (order.size > 0):
i = order[0]
keep.append(i)
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
w = np.maximum(0.0, (xx2 - xx1))
h = np.maximum(0.0, (yy2 - yy1))
inter = (w * h)
inter = (w * h)
if (mode == 'Union'):
ovr = (inter / ((areas[i] + areas[order[1:]]) - inter))
elif (mode == 'Minimum'):
ovr = (inter / np.minimum(areas[i], areas[order[1:]]))
inds = np.where((ovr <= thresh))[0]
order = order[(inds + 1)]
return keep<|docstring|>greedily select bboxes with high confidence,if an box overlap with the highest score box > thres, rule it out
params:
dets: [[x1, y1, x2, y2, score]]
thresh: retain overlap <= thresh
return:
indexes to keep<|endoftext|> |
def8b996ba9eba493186504392e3f70b7d8efdb24d3ca26faacdf08d7dab0ec7 | def reset(self):
'\n reset all parameters\n '
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0 | reset all parameters | 2019ML_Lab/Lab4/mtcnn_pytorch/tools/utils.py | reset | Pangxiaox/Machine-Learning-Lab | 0 | python | def reset(self):
'\n \n '
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0 | def reset(self):
'\n \n '
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0<|docstring|>reset all parameters<|endoftext|> |
039b628903e395200b54c974945c96beee3e71efbb0e6f6432b56d28d567911a | def update(self, val, n=1):
'\n update parameters\n '
self.val = val
self.sum += (val * n)
self.count += n
self.avg = (self.sum / self.count) | update parameters | 2019ML_Lab/Lab4/mtcnn_pytorch/tools/utils.py | update | Pangxiaox/Machine-Learning-Lab | 0 | python | def update(self, val, n=1):
'\n \n '
self.val = val
self.sum += (val * n)
self.count += n
self.avg = (self.sum / self.count) | def update(self, val, n=1):
'\n \n '
self.val = val
self.sum += (val * n)
self.count += n
self.avg = (self.sum / self.count)<|docstring|>update parameters<|endoftext|> |
eca78088ef8ee496b4a021fc4015849eb040c372c0ea2f0d2b1f11995d021a19 | def tanh_op(node, ctx=None):
'Calculate tanh of a matrix elementwisely.\n\n Parameters:\n ----\n node : Node\n Input variable.\n\n Returns:\n ----\n A new Node instance created by Op.\n\n '
return TanhOp(node, ctx=ctx) | Calculate tanh of a matrix elementwisely.
Parameters:
----
node : Node
Input variable.
Returns:
----
A new Node instance created by Op. | python/hetu/gpu_ops/Tanh.py | tanh_op | HugoZHL/Hetu | 0 | python | def tanh_op(node, ctx=None):
'Calculate tanh of a matrix elementwisely.\n\n Parameters:\n ----\n node : Node\n Input variable.\n\n Returns:\n ----\n A new Node instance created by Op.\n\n '
return TanhOp(node, ctx=ctx) | def tanh_op(node, ctx=None):
'Calculate tanh of a matrix elementwisely.\n\n Parameters:\n ----\n node : Node\n Input variable.\n\n Returns:\n ----\n A new Node instance created by Op.\n\n '
return TanhOp(node, ctx=ctx)<|docstring|>Calculate tanh of a matrix elementwisely.
Parameters:
----
node : Node
Input variable.
Returns:
----
A new Node instance created by Op.<|endoftext|> |
c27833acef7d356dd5f5c5d24d03c3b2f9c499a52d959b2b631fdf0eee236749 | @staticmethod
def is_url(location):
' Checks if provided path is a URL '
return bool(urllib.parse.urlparse(location).netloc) | Checks if provided path is a URL | dogen/tools.py | is_url | jboss-dockerfiles/dogen | 14 | python | @staticmethod
def is_url(location):
' '
return bool(urllib.parse.urlparse(location).netloc) | @staticmethod
def is_url(location):
' '
return bool(urllib.parse.urlparse(location).netloc)<|docstring|>Checks if provided path is a URL<|endoftext|> |
929bdc4699c47aed7326127335fb510d76a639567ea7f846b7ee1985c330aa62 | def initialize(self, opt):
'\n :param opt:\n :return:\n '
self.opt = opt
self.root = opt.dataroot
self.dir_A = os.path.join(opt.dataroot)
self.A_paths = make_dataset(self.dir_A)
self.A_paths = sorted(self.A_paths)
self.transform = get_transform(opt) | :param opt:
:return: | data/single_dataset.py | initialize | CaptainEven/MyEnlightenGAN | 1 | python | def initialize(self, opt):
'\n :param opt:\n :return:\n '
self.opt = opt
self.root = opt.dataroot
self.dir_A = os.path.join(opt.dataroot)
self.A_paths = make_dataset(self.dir_A)
self.A_paths = sorted(self.A_paths)
self.transform = get_transform(opt) | def initialize(self, opt):
'\n :param opt:\n :return:\n '
self.opt = opt
self.root = opt.dataroot
self.dir_A = os.path.join(opt.dataroot)
self.A_paths = make_dataset(self.dir_A)
self.A_paths = sorted(self.A_paths)
self.transform = get_transform(opt)<|docstring|>:param opt:
:return:<|endoftext|> |
6212d420de85fc3326c7128f3fb61c6c2b09b96f7c916a21b6cd9daedd9b1ed0 | def __getitem__(self, idx):
'\n :param idx:\n :return:\n '
A_path = self.A_paths[idx]
A_img = Image.open(A_path).convert('RGB')
A_size = A_img.size
A_size = A_size = (((A_size[0] // 16) * 16), ((A_size[1] // 16) * 16))
A_img = A_img.resize(A_size, Image.BICUBIC)
A_img = self.transform(A_img)
return {'A': A_img, 'A_paths': A_path} | :param idx:
:return: | data/single_dataset.py | __getitem__ | CaptainEven/MyEnlightenGAN | 1 | python | def __getitem__(self, idx):
'\n :param idx:\n :return:\n '
A_path = self.A_paths[idx]
A_img = Image.open(A_path).convert('RGB')
A_size = A_img.size
A_size = A_size = (((A_size[0] // 16) * 16), ((A_size[1] // 16) * 16))
A_img = A_img.resize(A_size, Image.BICUBIC)
A_img = self.transform(A_img)
return {'A': A_img, 'A_paths': A_path} | def __getitem__(self, idx):
'\n :param idx:\n :return:\n '
A_path = self.A_paths[idx]
A_img = Image.open(A_path).convert('RGB')
A_size = A_img.size
A_size = A_size = (((A_size[0] // 16) * 16), ((A_size[1] // 16) * 16))
A_img = A_img.resize(A_size, Image.BICUBIC)
A_img = self.transform(A_img)
return {'A': A_img, 'A_paths': A_path}<|docstring|>:param idx:
:return:<|endoftext|> |
095fa3151f0311206c329e89016956fe31c7f4930fa71d4e9721d7ff58b78213 | def __init__(self, jsondict=None, strict=True):
' Initialize all valid properties.\n \n :raises: FHIRValidationError on validation errors, unless strict is False\n :param dict jsondict: A JSON dictionary to use for initialization\n :param bool strict: If True (the default), invalid variables will raise a TypeError\n '
self.account = None
' Account to place this charge.\n List of `FHIRReference` items (represented as `dict` in JSON). '
self.bodysite = None
' Anatomical location, if relevant.\n List of `CodeableConcept` items (represented as `dict` in JSON). '
self.code = None
' A code that identifies the charge, like a billing code.\n Type `CodeableConcept` (represented as `dict` in JSON). '
self.context = None
' Encounter / Episode associated with event.\n Type `FHIRReference` (represented as `dict` in JSON). '
self.costCenter = None
' Organization that has ownership of the (potential, future) revenue.\n Type `FHIRReference` (represented as `dict` in JSON). '
self.definitionCanonical = None
' Resource defining the code of this ChargeItem.\n List of `str` items. '
self.definitionUri = None
' Defining information about the code of this charge item.\n List of `str` items. '
self.enteredDate = None
' Date the charge item was entered.\n Type `FHIRDate` (represented as `str` in JSON). '
self.enterer = None
' Individual who was entering.\n Type `FHIRReference` (represented as `dict` in JSON). '
self.factorOverride = None
' Factor overriding the associated rules.\n Type `float`. '
self.identifier = None
' Business Identifier for item.\n List of `Identifier` items (represented as `dict` in JSON). '
self.note = None
' Comments made about the ChargeItem.\n List of `Annotation` items (represented as `dict` in JSON). '
self.occurrenceDateTime = None
' When the charged service was applied.\n Type `FHIRDate` (represented as `str` in JSON). '
self.occurrencePeriod = None
' When the charged service was applied.\n Type `Period` (represented as `dict` in JSON). '
self.occurrenceTiming = None
' When the charged service was applied.\n Type `Timing` (represented as `dict` in JSON). '
self.overrideReason = None
' Reason for overriding the list price/factor.\n Type `str`. '
self.partOf = None
' Part of referenced ChargeItem.\n List of `FHIRReference` items (represented as `dict` in JSON). '
self.performer = None
' Who performed charged service.\n List of `ChargeItemPerformer` items (represented as `dict` in JSON). '
self.performingOrganization = None
' Organization providing the charged service.\n Type `FHIRReference` (represented as `dict` in JSON). '
self.priceOverride = None
' Price overriding the associated rules.\n Type `Money` (represented as `dict` in JSON). '
self.productCodeableConcept = None
' Product charged.\n Type `CodeableConcept` (represented as `dict` in JSON). '
self.productReference = None
' Product charged.\n Type `FHIRReference` (represented as `dict` in JSON). '
self.quantity = None
' Quantity of which the charge item has been serviced.\n Type `Quantity` (represented as `dict` in JSON). '
self.reason = None
' Why was the charged service rendered?.\n List of `CodeableConcept` items (represented as `dict` in JSON). '
self.requestingOrganization = None
' Organization requesting the charged service.\n Type `FHIRReference` (represented as `dict` in JSON). '
self.service = None
' Which rendered service is being charged?.\n List of `FHIRReference` items (represented as `dict` in JSON). '
self.status = None
' planned | billable | not-billable | aborted | billed | entered-in-\n error | unknown.\n Type `str`. '
self.subject = None
' Individual service was done for/to.\n Type `FHIRReference` (represented as `dict` in JSON). '
self.supportingInformation = None
' Further information supporting this charge.\n List of `FHIRReference` items (represented as `dict` in JSON). '
super(ChargeItem, self).__init__(jsondict=jsondict, strict=strict) | Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError | fhirclient/models/chargeitem.py | __init__ | zeel-dev/client-py | 418 | python | def __init__(self, jsondict=None, strict=True):
' Initialize all valid properties.\n \n :raises: FHIRValidationError on validation errors, unless strict is False\n :param dict jsondict: A JSON dictionary to use for initialization\n :param bool strict: If True (the default), invalid variables will raise a TypeError\n '
self.account = None
' Account to place this charge.\n List of `FHIRReference` items (represented as `dict` in JSON). '
self.bodysite = None
' Anatomical location, if relevant.\n List of `CodeableConcept` items (represented as `dict` in JSON). '
self.code = None
' A code that identifies the charge, like a billing code.\n Type `CodeableConcept` (represented as `dict` in JSON). '
self.context = None
' Encounter / Episode associated with event.\n Type `FHIRReference` (represented as `dict` in JSON). '
self.costCenter = None
' Organization that has ownership of the (potential, future) revenue.\n Type `FHIRReference` (represented as `dict` in JSON). '
self.definitionCanonical = None
' Resource defining the code of this ChargeItem.\n List of `str` items. '
self.definitionUri = None
' Defining information about the code of this charge item.\n List of `str` items. '
self.enteredDate = None
' Date the charge item was entered.\n Type `FHIRDate` (represented as `str` in JSON). '
self.enterer = None
' Individual who was entering.\n Type `FHIRReference` (represented as `dict` in JSON). '
self.factorOverride = None
' Factor overriding the associated rules.\n Type `float`. '
self.identifier = None
' Business Identifier for item.\n List of `Identifier` items (represented as `dict` in JSON). '
self.note = None
' Comments made about the ChargeItem.\n List of `Annotation` items (represented as `dict` in JSON). '
self.occurrenceDateTime = None
' When the charged service was applied.\n Type `FHIRDate` (represented as `str` in JSON). '
self.occurrencePeriod = None
' When the charged service was applied.\n Type `Period` (represented as `dict` in JSON). '
self.occurrenceTiming = None
' When the charged service was applied.\n Type `Timing` (represented as `dict` in JSON). '
self.overrideReason = None
' Reason for overriding the list price/factor.\n Type `str`. '
self.partOf = None
' Part of referenced ChargeItem.\n List of `FHIRReference` items (represented as `dict` in JSON). '
self.performer = None
' Who performed charged service.\n List of `ChargeItemPerformer` items (represented as `dict` in JSON). '
self.performingOrganization = None
' Organization providing the charged service.\n Type `FHIRReference` (represented as `dict` in JSON). '
self.priceOverride = None
' Price overriding the associated rules.\n Type `Money` (represented as `dict` in JSON). '
self.productCodeableConcept = None
' Product charged.\n Type `CodeableConcept` (represented as `dict` in JSON). '
self.productReference = None
' Product charged.\n Type `FHIRReference` (represented as `dict` in JSON). '
self.quantity = None
' Quantity of which the charge item has been serviced.\n Type `Quantity` (represented as `dict` in JSON). '
self.reason = None
' Why was the charged service rendered?.\n List of `CodeableConcept` items (represented as `dict` in JSON). '
self.requestingOrganization = None
' Organization requesting the charged service.\n Type `FHIRReference` (represented as `dict` in JSON). '
self.service = None
' Which rendered service is being charged?.\n List of `FHIRReference` items (represented as `dict` in JSON). '
self.status = None
' planned | billable | not-billable | aborted | billed | entered-in-\n error | unknown.\n Type `str`. '
self.subject = None
' Individual service was done for/to.\n Type `FHIRReference` (represented as `dict` in JSON). '
self.supportingInformation = None
' Further information supporting this charge.\n List of `FHIRReference` items (represented as `dict` in JSON). '
super(ChargeItem, self).__init__(jsondict=jsondict, strict=strict) | def __init__(self, jsondict=None, strict=True):
' Initialize all valid properties.\n \n :raises: FHIRValidationError on validation errors, unless strict is False\n :param dict jsondict: A JSON dictionary to use for initialization\n :param bool strict: If True (the default), invalid variables will raise a TypeError\n '
self.account = None
' Account to place this charge.\n List of `FHIRReference` items (represented as `dict` in JSON). '
self.bodysite = None
' Anatomical location, if relevant.\n List of `CodeableConcept` items (represented as `dict` in JSON). '
self.code = None
' A code that identifies the charge, like a billing code.\n Type `CodeableConcept` (represented as `dict` in JSON). '
self.context = None
' Encounter / Episode associated with event.\n Type `FHIRReference` (represented as `dict` in JSON). '
self.costCenter = None
' Organization that has ownership of the (potential, future) revenue.\n Type `FHIRReference` (represented as `dict` in JSON). '
self.definitionCanonical = None
' Resource defining the code of this ChargeItem.\n List of `str` items. '
self.definitionUri = None
' Defining information about the code of this charge item.\n List of `str` items. '
self.enteredDate = None
' Date the charge item was entered.\n Type `FHIRDate` (represented as `str` in JSON). '
self.enterer = None
' Individual who was entering.\n Type `FHIRReference` (represented as `dict` in JSON). '
self.factorOverride = None
' Factor overriding the associated rules.\n Type `float`. '
self.identifier = None
' Business Identifier for item.\n List of `Identifier` items (represented as `dict` in JSON). '
self.note = None
' Comments made about the ChargeItem.\n List of `Annotation` items (represented as `dict` in JSON). '
self.occurrenceDateTime = None
' When the charged service was applied.\n Type `FHIRDate` (represented as `str` in JSON). '
self.occurrencePeriod = None
' When the charged service was applied.\n Type `Period` (represented as `dict` in JSON). '
self.occurrenceTiming = None
' When the charged service was applied.\n Type `Timing` (represented as `dict` in JSON). '
self.overrideReason = None
' Reason for overriding the list price/factor.\n Type `str`. '
self.partOf = None
' Part of referenced ChargeItem.\n List of `FHIRReference` items (represented as `dict` in JSON). '
self.performer = None
' Who performed charged service.\n List of `ChargeItemPerformer` items (represented as `dict` in JSON). '
self.performingOrganization = None
' Organization providing the charged service.\n Type `FHIRReference` (represented as `dict` in JSON). '
self.priceOverride = None
' Price overriding the associated rules.\n Type `Money` (represented as `dict` in JSON). '
self.productCodeableConcept = None
' Product charged.\n Type `CodeableConcept` (represented as `dict` in JSON). '
self.productReference = None
' Product charged.\n Type `FHIRReference` (represented as `dict` in JSON). '
self.quantity = None
' Quantity of which the charge item has been serviced.\n Type `Quantity` (represented as `dict` in JSON). '
self.reason = None
' Why was the charged service rendered?.\n List of `CodeableConcept` items (represented as `dict` in JSON). '
self.requestingOrganization = None
' Organization requesting the charged service.\n Type `FHIRReference` (represented as `dict` in JSON). '
self.service = None
' Which rendered service is being charged?.\n List of `FHIRReference` items (represented as `dict` in JSON). '
self.status = None
' planned | billable | not-billable | aborted | billed | entered-in-\n error | unknown.\n Type `str`. '
self.subject = None
' Individual service was done for/to.\n Type `FHIRReference` (represented as `dict` in JSON). '
self.supportingInformation = None
' Further information supporting this charge.\n List of `FHIRReference` items (represented as `dict` in JSON). '
super(ChargeItem, self).__init__(jsondict=jsondict, strict=strict)<|docstring|>Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError<|endoftext|> |
847a9cdec91f636ebf80546cf2eacf3ce917080453493aa09924fde6ee882e8d | def __init__(self, jsondict=None, strict=True):
' Initialize all valid properties.\n \n :raises: FHIRValidationError on validation errors, unless strict is False\n :param dict jsondict: A JSON dictionary to use for initialization\n :param bool strict: If True (the default), invalid variables will raise a TypeError\n '
self.actor = None
' Individual who was performing.\n Type `FHIRReference` (represented as `dict` in JSON). '
self.function = None
' What type of performance was done.\n Type `CodeableConcept` (represented as `dict` in JSON). '
super(ChargeItemPerformer, self).__init__(jsondict=jsondict, strict=strict) | Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError | fhirclient/models/chargeitem.py | __init__ | zeel-dev/client-py | 418 | python | def __init__(self, jsondict=None, strict=True):
' Initialize all valid properties.\n \n :raises: FHIRValidationError on validation errors, unless strict is False\n :param dict jsondict: A JSON dictionary to use for initialization\n :param bool strict: If True (the default), invalid variables will raise a TypeError\n '
self.actor = None
' Individual who was performing.\n Type `FHIRReference` (represented as `dict` in JSON). '
self.function = None
' What type of performance was done.\n Type `CodeableConcept` (represented as `dict` in JSON). '
super(ChargeItemPerformer, self).__init__(jsondict=jsondict, strict=strict) | def __init__(self, jsondict=None, strict=True):
' Initialize all valid properties.\n \n :raises: FHIRValidationError on validation errors, unless strict is False\n :param dict jsondict: A JSON dictionary to use for initialization\n :param bool strict: If True (the default), invalid variables will raise a TypeError\n '
self.actor = None
' Individual who was performing.\n Type `FHIRReference` (represented as `dict` in JSON). '
self.function = None
' What type of performance was done.\n Type `CodeableConcept` (represented as `dict` in JSON). '
super(ChargeItemPerformer, self).__init__(jsondict=jsondict, strict=strict)<|docstring|>Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError<|endoftext|> |
1fe821bcfdc966135f20ea72f2666c7fe55bbd1a9676b752c7c4aa1b361c5d44 | @operation
def service(service, running=True, restarted=False, reloaded=False, command=None, enabled=None, state=None, host=None):
'\n Manage the state of BSD init services.\n\n + service: name of the service to manage\n + running: whether the service should be running\n + restarted: whether the service should be restarted\n + reloaded: whether the service should be reloaded\n + command: custom command to pass like: ``/etc/rc.d/<service> <command>``\n + enabled: whether this service should be enabled/disabled on boot\n '
status_argument = 'status'
if (host.get_fact(Os) == 'OpenBSD'):
status_argument = 'check'
(yield handle_service_control(host, service, RcdStatus, 'test -e /etc/rc.d/{0} && /etc/rc.d/{0} {1} || /usr/local/etc/rc.d/{0} {1}', running, restarted, reloaded, command, status_argument=status_argument))
if isinstance(enabled, bool):
(yield files.line('/etc/rc.conf.local', '^{0}_enable='.format(service), replace='{0}_enable="YES"'.format(service), present=enabled, state=state, host=host)) | Manage the state of BSD init services.
+ service: name of the service to manage
+ running: whether the service should be running
+ restarted: whether the service should be restarted
+ reloaded: whether the service should be reloaded
+ command: custom command to pass like: ``/etc/rc.d/<service> <command>``
+ enabled: whether this service should be enabled/disabled on boot | pyinfra/operations/bsdinit.py | service | GerardoGR/pyinfra | 1,532 | python | @operation
def service(service, running=True, restarted=False, reloaded=False, command=None, enabled=None, state=None, host=None):
'\n Manage the state of BSD init services.\n\n + service: name of the service to manage\n + running: whether the service should be running\n + restarted: whether the service should be restarted\n + reloaded: whether the service should be reloaded\n + command: custom command to pass like: ``/etc/rc.d/<service> <command>``\n + enabled: whether this service should be enabled/disabled on boot\n '
status_argument = 'status'
if (host.get_fact(Os) == 'OpenBSD'):
status_argument = 'check'
(yield handle_service_control(host, service, RcdStatus, 'test -e /etc/rc.d/{0} && /etc/rc.d/{0} {1} || /usr/local/etc/rc.d/{0} {1}', running, restarted, reloaded, command, status_argument=status_argument))
if isinstance(enabled, bool):
(yield files.line('/etc/rc.conf.local', '^{0}_enable='.format(service), replace='{0}_enable="YES"'.format(service), present=enabled, state=state, host=host)) | @operation
def service(service, running=True, restarted=False, reloaded=False, command=None, enabled=None, state=None, host=None):
'\n Manage the state of BSD init services.\n\n + service: name of the service to manage\n + running: whether the service should be running\n + restarted: whether the service should be restarted\n + reloaded: whether the service should be reloaded\n + command: custom command to pass like: ``/etc/rc.d/<service> <command>``\n + enabled: whether this service should be enabled/disabled on boot\n '
status_argument = 'status'
if (host.get_fact(Os) == 'OpenBSD'):
status_argument = 'check'
(yield handle_service_control(host, service, RcdStatus, 'test -e /etc/rc.d/{0} && /etc/rc.d/{0} {1} || /usr/local/etc/rc.d/{0} {1}', running, restarted, reloaded, command, status_argument=status_argument))
if isinstance(enabled, bool):
(yield files.line('/etc/rc.conf.local', '^{0}_enable='.format(service), replace='{0}_enable="YES"'.format(service), present=enabled, state=state, host=host))<|docstring|>Manage the state of BSD init services.
+ service: name of the service to manage
+ running: whether the service should be running
+ restarted: whether the service should be restarted
+ reloaded: whether the service should be reloaded
+ command: custom command to pass like: ``/etc/rc.d/<service> <command>``
+ enabled: whether this service should be enabled/disabled on boot<|endoftext|> |
f9576351d53dd3b49d2054d65dbd41c9c9a8df2d77fdf0ff2804868e353074c4 | def fit(self, df: pd.DataFrame) -> BasePreprocessor:
'Fits the Preprocessor and creates required attributes\n '
df_wide = self._prepare_wide(df)
self.wide_crossed_cols = df_wide.columns.tolist()
if self.already_dummies:
dummy_cols = [c for c in self.wide_crossed_cols if (c not in self.already_dummies)]
self.one_hot_enc.fit(df_wide[dummy_cols])
else:
self.one_hot_enc.fit(df_wide[self.wide_crossed_cols])
return self | Fits the Preprocessor and creates required attributes | pytorch_widedeep/preprocessing/_preprocessors.py | fit | yuanzhiKe/pytorch-widedeep | 0 | python | def fit(self, df: pd.DataFrame) -> BasePreprocessor:
'\n '
df_wide = self._prepare_wide(df)
self.wide_crossed_cols = df_wide.columns.tolist()
if self.already_dummies:
dummy_cols = [c for c in self.wide_crossed_cols if (c not in self.already_dummies)]
self.one_hot_enc.fit(df_wide[dummy_cols])
else:
self.one_hot_enc.fit(df_wide[self.wide_crossed_cols])
return self | def fit(self, df: pd.DataFrame) -> BasePreprocessor:
'\n '
df_wide = self._prepare_wide(df)
self.wide_crossed_cols = df_wide.columns.tolist()
if self.already_dummies:
dummy_cols = [c for c in self.wide_crossed_cols if (c not in self.already_dummies)]
self.one_hot_enc.fit(df_wide[dummy_cols])
else:
self.one_hot_enc.fit(df_wide[self.wide_crossed_cols])
return self<|docstring|>Fits the Preprocessor and creates required attributes<|endoftext|> |
d622cd9ed55b3b179421dd2db7762f960453d0fe9bc8544d180b662e1e90f222 | def transform(self, df: pd.DataFrame) -> Union[(sparse_matrix, np.ndarray)]:
'Returns the processed dataframe as a one hot encoded dense or\n sparse matrix\n '
try:
self.one_hot_enc.categories_
except:
raise NotFittedError("This WidePreprocessor instance is not fitted yet. Call 'fit' with appropriate arguments before using this estimator.")
df_wide = self._prepare_wide(df)
if self.already_dummies:
X_oh_1 = df_wide[self.already_dummies].values
dummy_cols = [c for c in self.wide_crossed_cols if (c not in self.already_dummies)]
X_oh_2 = self.one_hot_enc.transform(df_wide[dummy_cols])
return np.hstack((X_oh_1, X_oh_2))
else:
return self.one_hot_enc.transform(df_wide[self.wide_crossed_cols]) | Returns the processed dataframe as a one hot encoded dense or
sparse matrix | pytorch_widedeep/preprocessing/_preprocessors.py | transform | yuanzhiKe/pytorch-widedeep | 0 | python | def transform(self, df: pd.DataFrame) -> Union[(sparse_matrix, np.ndarray)]:
'Returns the processed dataframe as a one hot encoded dense or\n sparse matrix\n '
try:
self.one_hot_enc.categories_
except:
raise NotFittedError("This WidePreprocessor instance is not fitted yet. Call 'fit' with appropriate arguments before using this estimator.")
df_wide = self._prepare_wide(df)
if self.already_dummies:
X_oh_1 = df_wide[self.already_dummies].values
dummy_cols = [c for c in self.wide_crossed_cols if (c not in self.already_dummies)]
X_oh_2 = self.one_hot_enc.transform(df_wide[dummy_cols])
return np.hstack((X_oh_1, X_oh_2))
else:
return self.one_hot_enc.transform(df_wide[self.wide_crossed_cols]) | def transform(self, df: pd.DataFrame) -> Union[(sparse_matrix, np.ndarray)]:
'Returns the processed dataframe as a one hot encoded dense or\n sparse matrix\n '
try:
self.one_hot_enc.categories_
except:
raise NotFittedError("This WidePreprocessor instance is not fitted yet. Call 'fit' with appropriate arguments before using this estimator.")
df_wide = self._prepare_wide(df)
if self.already_dummies:
X_oh_1 = df_wide[self.already_dummies].values
dummy_cols = [c for c in self.wide_crossed_cols if (c not in self.already_dummies)]
X_oh_2 = self.one_hot_enc.transform(df_wide[dummy_cols])
return np.hstack((X_oh_1, X_oh_2))
else:
return self.one_hot_enc.transform(df_wide[self.wide_crossed_cols])<|docstring|>Returns the processed dataframe as a one hot encoded dense or
sparse matrix<|endoftext|> |
c9f300c1cb98163dc005fbee55ccbbcbdbec8c5c2fc575a6898bd7bcd35e5abb | def fit_transform(self, df: pd.DataFrame) -> Union[(sparse_matrix, np.ndarray)]:
'Combines ``fit`` and ``transform``\n '
return self.fit(df).transform(df) | Combines ``fit`` and ``transform`` | pytorch_widedeep/preprocessing/_preprocessors.py | fit_transform | yuanzhiKe/pytorch-widedeep | 0 | python | def fit_transform(self, df: pd.DataFrame) -> Union[(sparse_matrix, np.ndarray)]:
'\n '
return self.fit(df).transform(df) | def fit_transform(self, df: pd.DataFrame) -> Union[(sparse_matrix, np.ndarray)]:
'\n '
return self.fit(df).transform(df)<|docstring|>Combines ``fit`` and ``transform``<|endoftext|> |
fd128eaa5aed0912078a6c977af729508f8ef1a6187eb99d2d9a33a780da25cc | def fit(self, df: pd.DataFrame) -> BasePreprocessor:
'Fits the Preprocessor and creates required attributes\n '
if (self.embed_cols is not None):
df_emb = self._prepare_embed(df)
self.label_encoder = LabelEncoder(df_emb.columns.tolist()).fit(df_emb)
self.embeddings_input: List = []
for (k, v) in self.label_encoder.encoding_dict.items():
self.embeddings_input.append((k, len(v), self.embed_dim[k]))
if (self.continuous_cols is not None):
df_cont = self._prepare_continuous(df)
if self.scale:
df_std = df_cont[self.standardize_cols]
self.scaler = StandardScaler().fit(df_std.values)
else:
warnings.warn('Continuous columns will not be normalised')
return self | Fits the Preprocessor and creates required attributes | pytorch_widedeep/preprocessing/_preprocessors.py | fit | yuanzhiKe/pytorch-widedeep | 0 | python | def fit(self, df: pd.DataFrame) -> BasePreprocessor:
'\n '
if (self.embed_cols is not None):
df_emb = self._prepare_embed(df)
self.label_encoder = LabelEncoder(df_emb.columns.tolist()).fit(df_emb)
self.embeddings_input: List = []
for (k, v) in self.label_encoder.encoding_dict.items():
self.embeddings_input.append((k, len(v), self.embed_dim[k]))
if (self.continuous_cols is not None):
df_cont = self._prepare_continuous(df)
if self.scale:
df_std = df_cont[self.standardize_cols]
self.scaler = StandardScaler().fit(df_std.values)
else:
warnings.warn('Continuous columns will not be normalised')
return self | def fit(self, df: pd.DataFrame) -> BasePreprocessor:
'\n '
if (self.embed_cols is not None):
df_emb = self._prepare_embed(df)
self.label_encoder = LabelEncoder(df_emb.columns.tolist()).fit(df_emb)
self.embeddings_input: List = []
for (k, v) in self.label_encoder.encoding_dict.items():
self.embeddings_input.append((k, len(v), self.embed_dim[k]))
if (self.continuous_cols is not None):
df_cont = self._prepare_continuous(df)
if self.scale:
df_std = df_cont[self.standardize_cols]
self.scaler = StandardScaler().fit(df_std.values)
else:
warnings.warn('Continuous columns will not be normalised')
return self<|docstring|>Fits the Preprocessor and creates required attributes<|endoftext|> |
533decfb86cd3316bf3805eb98fe5820153e6c7819d5da548fc380f27fd0270b | def transform(self, df: pd.DataFrame) -> np.ndarray:
'Returns the processed ``dataframe`` as a np.ndarray\n '
if (self.embed_cols is not None):
df_emb = self._prepare_embed(df)
df_emb = self.label_encoder.transform(df_emb)
if (self.continuous_cols is not None):
df_cont = self._prepare_continuous(df)
if self.scale:
try:
self.scaler.mean_
except:
raise NotFittedError("This DensePreprocessor instance is not fitted yet. Call 'fit' with appropriate arguments before using this estimator.")
df_std = df_cont[self.standardize_cols]
df_cont[self.standardize_cols] = self.scaler.transform(df_std.values)
try:
df_deep = pd.concat([df_emb, df_cont], axis=1)
except:
try:
df_deep = df_emb.copy()
except:
df_deep = df_cont.copy()
self.deep_column_idx = {k: v for (v, k) in enumerate(df_deep.columns)}
return df_deep.values | Returns the processed ``dataframe`` as a np.ndarray | pytorch_widedeep/preprocessing/_preprocessors.py | transform | yuanzhiKe/pytorch-widedeep | 0 | python | def transform(self, df: pd.DataFrame) -> np.ndarray:
'\n '
if (self.embed_cols is not None):
df_emb = self._prepare_embed(df)
df_emb = self.label_encoder.transform(df_emb)
if (self.continuous_cols is not None):
df_cont = self._prepare_continuous(df)
if self.scale:
try:
self.scaler.mean_
except:
raise NotFittedError("This DensePreprocessor instance is not fitted yet. Call 'fit' with appropriate arguments before using this estimator.")
df_std = df_cont[self.standardize_cols]
df_cont[self.standardize_cols] = self.scaler.transform(df_std.values)
try:
df_deep = pd.concat([df_emb, df_cont], axis=1)
except:
try:
df_deep = df_emb.copy()
except:
df_deep = df_cont.copy()
self.deep_column_idx = {k: v for (v, k) in enumerate(df_deep.columns)}
return df_deep.values | def transform(self, df: pd.DataFrame) -> np.ndarray:
'\n '
if (self.embed_cols is not None):
df_emb = self._prepare_embed(df)
df_emb = self.label_encoder.transform(df_emb)
if (self.continuous_cols is not None):
df_cont = self._prepare_continuous(df)
if self.scale:
try:
self.scaler.mean_
except:
raise NotFittedError("This DensePreprocessor instance is not fitted yet. Call 'fit' with appropriate arguments before using this estimator.")
df_std = df_cont[self.standardize_cols]
df_cont[self.standardize_cols] = self.scaler.transform(df_std.values)
try:
df_deep = pd.concat([df_emb, df_cont], axis=1)
except:
try:
df_deep = df_emb.copy()
except:
df_deep = df_cont.copy()
self.deep_column_idx = {k: v for (v, k) in enumerate(df_deep.columns)}
return df_deep.values<|docstring|>Returns the processed ``dataframe`` as a np.ndarray<|endoftext|> |
be67c3e0099774cc65a775f8efe523a449b776a890c45aacc304d9aa877efa3c | def fit_transform(self, df: pd.DataFrame) -> np.ndarray:
'Combines ``fit`` and ``transform``\n '
return self.fit(df).transform(df) | Combines ``fit`` and ``transform`` | pytorch_widedeep/preprocessing/_preprocessors.py | fit_transform | yuanzhiKe/pytorch-widedeep | 0 | python | def fit_transform(self, df: pd.DataFrame) -> np.ndarray:
'\n '
return self.fit(df).transform(df) | def fit_transform(self, df: pd.DataFrame) -> np.ndarray:
'\n '
return self.fit(df).transform(df)<|docstring|>Combines ``fit`` and ``transform``<|endoftext|> |
610d95ea75cd9add03d73ce89cd4261b720b02b9761e5923dae8b0a7a3319207 | def fit(self, df: pd.DataFrame) -> BasePreprocessor:
'Builds the vocabulary\n '
texts = df[self.text_col].tolist()
tokens = get_texts(texts)
self.vocab = Vocab.create(tokens, max_vocab=self.max_vocab, min_freq=self.min_freq)
if self.verbose:
print('The vocabulary contains {} tokens'.format(len(self.vocab.stoi)))
return self | Builds the vocabulary | pytorch_widedeep/preprocessing/_preprocessors.py | fit | yuanzhiKe/pytorch-widedeep | 0 | python | def fit(self, df: pd.DataFrame) -> BasePreprocessor:
'\n '
texts = df[self.text_col].tolist()
tokens = get_texts(texts)
self.vocab = Vocab.create(tokens, max_vocab=self.max_vocab, min_freq=self.min_freq)
if self.verbose:
print('The vocabulary contains {} tokens'.format(len(self.vocab.stoi)))
return self | def fit(self, df: pd.DataFrame) -> BasePreprocessor:
'\n '
texts = df[self.text_col].tolist()
tokens = get_texts(texts)
self.vocab = Vocab.create(tokens, max_vocab=self.max_vocab, min_freq=self.min_freq)
if self.verbose:
print('The vocabulary contains {} tokens'.format(len(self.vocab.stoi)))
return self<|docstring|>Builds the vocabulary<|endoftext|> |
02ce5a10526618adf2b5d5a2141cb7611861cbb896f7ad738560db23fa0ad262 | def transform(self, df: pd.DataFrame) -> np.ndarray:
'Returns the padded, `numericalised` sequences\n '
try:
self.vocab
except:
raise NotFittedError("This TextPreprocessor instance is not fitted yet. Call 'fit' with appropriate arguments before using this estimator.")
texts = df[self.text_col].tolist()
self.tokens = get_texts(texts)
sequences = [self.vocab.numericalize(t) for t in self.tokens]
padded_seq = np.array([pad_sequences(s, maxlen=self.maxlen) for s in sequences])
if (self.word_vectors_path is not None):
self.embedding_matrix = build_embeddings_matrix(self.vocab, self.word_vectors_path, self.min_freq)
return padded_seq | Returns the padded, `numericalised` sequences | pytorch_widedeep/preprocessing/_preprocessors.py | transform | yuanzhiKe/pytorch-widedeep | 0 | python | def transform(self, df: pd.DataFrame) -> np.ndarray:
'\n '
try:
self.vocab
except:
raise NotFittedError("This TextPreprocessor instance is not fitted yet. Call 'fit' with appropriate arguments before using this estimator.")
texts = df[self.text_col].tolist()
self.tokens = get_texts(texts)
sequences = [self.vocab.numericalize(t) for t in self.tokens]
padded_seq = np.array([pad_sequences(s, maxlen=self.maxlen) for s in sequences])
if (self.word_vectors_path is not None):
self.embedding_matrix = build_embeddings_matrix(self.vocab, self.word_vectors_path, self.min_freq)
return padded_seq | def transform(self, df: pd.DataFrame) -> np.ndarray:
'\n '
try:
self.vocab
except:
raise NotFittedError("This TextPreprocessor instance is not fitted yet. Call 'fit' with appropriate arguments before using this estimator.")
texts = df[self.text_col].tolist()
self.tokens = get_texts(texts)
sequences = [self.vocab.numericalize(t) for t in self.tokens]
padded_seq = np.array([pad_sequences(s, maxlen=self.maxlen) for s in sequences])
if (self.word_vectors_path is not None):
self.embedding_matrix = build_embeddings_matrix(self.vocab, self.word_vectors_path, self.min_freq)
return padded_seq<|docstring|>Returns the padded, `numericalised` sequences<|endoftext|> |
be67c3e0099774cc65a775f8efe523a449b776a890c45aacc304d9aa877efa3c | def fit_transform(self, df: pd.DataFrame) -> np.ndarray:
'Combines ``fit`` and ``transform``\n '
return self.fit(df).transform(df) | Combines ``fit`` and ``transform`` | pytorch_widedeep/preprocessing/_preprocessors.py | fit_transform | yuanzhiKe/pytorch-widedeep | 0 | python | def fit_transform(self, df: pd.DataFrame) -> np.ndarray:
'\n '
return self.fit(df).transform(df) | def fit_transform(self, df: pd.DataFrame) -> np.ndarray:
'\n '
return self.fit(df).transform(df)<|docstring|>Combines ``fit`` and ``transform``<|endoftext|> |
49256f5193ee528358348a2db5e0a253d72c398b1428f885a72b7c488aff5077 | def fit(self, df: pd.DataFrame) -> BasePreprocessor:
'Simply instantiates the Preprocessors\n :obj:`AspectAwarePreprocessor`` and :obj:`SimplePreprocessor` for image\n resizing.\n\n See\n :class:`pytorch_widedeep.utils.image_utils.AspectAwarePreprocessor`\n and :class:`pytorch_widedeep.utils.image_utils.SimplePreprocessor`.\n\n '
self.aap = AspectAwarePreprocessor(self.width, self.height)
self.spp = SimplePreprocessor(self.width, self.height)
self._compute_normalising_metrics = True
return self | Simply instantiates the Preprocessors
:obj:`AspectAwarePreprocessor`` and :obj:`SimplePreprocessor` for image
resizing.
See
:class:`pytorch_widedeep.utils.image_utils.AspectAwarePreprocessor`
and :class:`pytorch_widedeep.utils.image_utils.SimplePreprocessor`. | pytorch_widedeep/preprocessing/_preprocessors.py | fit | yuanzhiKe/pytorch-widedeep | 0 | python | def fit(self, df: pd.DataFrame) -> BasePreprocessor:
'Simply instantiates the Preprocessors\n :obj:`AspectAwarePreprocessor`` and :obj:`SimplePreprocessor` for image\n resizing.\n\n See\n :class:`pytorch_widedeep.utils.image_utils.AspectAwarePreprocessor`\n and :class:`pytorch_widedeep.utils.image_utils.SimplePreprocessor`.\n\n '
self.aap = AspectAwarePreprocessor(self.width, self.height)
self.spp = SimplePreprocessor(self.width, self.height)
self._compute_normalising_metrics = True
return self | def fit(self, df: pd.DataFrame) -> BasePreprocessor:
'Simply instantiates the Preprocessors\n :obj:`AspectAwarePreprocessor`` and :obj:`SimplePreprocessor` for image\n resizing.\n\n See\n :class:`pytorch_widedeep.utils.image_utils.AspectAwarePreprocessor`\n and :class:`pytorch_widedeep.utils.image_utils.SimplePreprocessor`.\n\n '
self.aap = AspectAwarePreprocessor(self.width, self.height)
self.spp = SimplePreprocessor(self.width, self.height)
self._compute_normalising_metrics = True
return self<|docstring|>Simply instantiates the Preprocessors
:obj:`AspectAwarePreprocessor`` and :obj:`SimplePreprocessor` for image
resizing.
See
:class:`pytorch_widedeep.utils.image_utils.AspectAwarePreprocessor`
and :class:`pytorch_widedeep.utils.image_utils.SimplePreprocessor`.<|endoftext|> |
ede9a5589c7f5b11d6a0b6fe87e8b8f48baa0d06d590724f369259efbfebcccf | def transform(self, df: pd.DataFrame) -> np.ndarray:
'Resizes the images to the input height and width.\n '
try:
self.aap
except:
raise NotFittedError("This ImagePreprocessor instance is not fitted yet. Call 'fit' with appropriate arguments before using this estimator.")
image_list = df[self.img_col].tolist()
if self.verbose:
print('Reading Images from {}'.format(self.img_path))
imgs = [cv2.imread('/'.join([self.img_path, img])) for img in image_list]
aspect = [(im.shape[0], im.shape[1]) for im in imgs]
aspect_r = [(a[0] / a[1]) for a in aspect]
diff_idx = [i for (i, r) in enumerate(aspect_r) if (r != 1.0)]
if self.verbose:
print('Resizing')
resized_imgs = []
for (i, img) in tqdm(enumerate(imgs), total=len(imgs), disable=(self.verbose != 1)):
if (i in diff_idx):
resized_imgs.append(self.aap.preprocess(img))
else:
resized_imgs.append(self.spp.preprocess(img))
if self._compute_normalising_metrics:
if self.verbose:
print('Computing normalisation metrics')
(mean_R, mean_G, mean_B) = ([], [], [])
(std_R, std_G, std_B) = ([], [], [])
for rsz_img in resized_imgs:
((mean_b, mean_g, mean_r), (std_b, std_g, std_r)) = cv2.meanStdDev(rsz_img)
mean_R.append(mean_r)
mean_G.append(mean_g)
mean_B.append(mean_b)
std_R.append(std_r)
std_G.append(std_g)
std_B.append(std_b)
self.normalise_metrics = dict(mean={'R': (np.mean(mean_R) / 255.0), 'G': (np.mean(mean_G) / 255.0), 'B': (np.mean(mean_B) / 255.0)}, std={'R': (np.mean(std_R) / 255.0), 'G': (np.mean(std_G) / 255.0), 'B': (np.mean(std_B) / 255.0)})
self._compute_normalising_metrics = False
return np.asarray(resized_imgs) | Resizes the images to the input height and width. | pytorch_widedeep/preprocessing/_preprocessors.py | transform | yuanzhiKe/pytorch-widedeep | 0 | python | def transform(self, df: pd.DataFrame) -> np.ndarray:
'\n '
try:
self.aap
except:
raise NotFittedError("This ImagePreprocessor instance is not fitted yet. Call 'fit' with appropriate arguments before using this estimator.")
image_list = df[self.img_col].tolist()
if self.verbose:
print('Reading Images from {}'.format(self.img_path))
imgs = [cv2.imread('/'.join([self.img_path, img])) for img in image_list]
aspect = [(im.shape[0], im.shape[1]) for im in imgs]
aspect_r = [(a[0] / a[1]) for a in aspect]
diff_idx = [i for (i, r) in enumerate(aspect_r) if (r != 1.0)]
if self.verbose:
print('Resizing')
resized_imgs = []
for (i, img) in tqdm(enumerate(imgs), total=len(imgs), disable=(self.verbose != 1)):
if (i in diff_idx):
resized_imgs.append(self.aap.preprocess(img))
else:
resized_imgs.append(self.spp.preprocess(img))
if self._compute_normalising_metrics:
if self.verbose:
print('Computing normalisation metrics')
(mean_R, mean_G, mean_B) = ([], [], [])
(std_R, std_G, std_B) = ([], [], [])
for rsz_img in resized_imgs:
((mean_b, mean_g, mean_r), (std_b, std_g, std_r)) = cv2.meanStdDev(rsz_img)
mean_R.append(mean_r)
mean_G.append(mean_g)
mean_B.append(mean_b)
std_R.append(std_r)
std_G.append(std_g)
std_B.append(std_b)
self.normalise_metrics = dict(mean={'R': (np.mean(mean_R) / 255.0), 'G': (np.mean(mean_G) / 255.0), 'B': (np.mean(mean_B) / 255.0)}, std={'R': (np.mean(std_R) / 255.0), 'G': (np.mean(std_G) / 255.0), 'B': (np.mean(std_B) / 255.0)})
self._compute_normalising_metrics = False
return np.asarray(resized_imgs) | def transform(self, df: pd.DataFrame) -> np.ndarray:
'\n '
try:
self.aap
except:
raise NotFittedError("This ImagePreprocessor instance is not fitted yet. Call 'fit' with appropriate arguments before using this estimator.")
image_list = df[self.img_col].tolist()
if self.verbose:
print('Reading Images from {}'.format(self.img_path))
imgs = [cv2.imread('/'.join([self.img_path, img])) for img in image_list]
aspect = [(im.shape[0], im.shape[1]) for im in imgs]
aspect_r = [(a[0] / a[1]) for a in aspect]
diff_idx = [i for (i, r) in enumerate(aspect_r) if (r != 1.0)]
if self.verbose:
print('Resizing')
resized_imgs = []
for (i, img) in tqdm(enumerate(imgs), total=len(imgs), disable=(self.verbose != 1)):
if (i in diff_idx):
resized_imgs.append(self.aap.preprocess(img))
else:
resized_imgs.append(self.spp.preprocess(img))
if self._compute_normalising_metrics:
if self.verbose:
print('Computing normalisation metrics')
(mean_R, mean_G, mean_B) = ([], [], [])
(std_R, std_G, std_B) = ([], [], [])
for rsz_img in resized_imgs:
((mean_b, mean_g, mean_r), (std_b, std_g, std_r)) = cv2.meanStdDev(rsz_img)
mean_R.append(mean_r)
mean_G.append(mean_g)
mean_B.append(mean_b)
std_R.append(std_r)
std_G.append(std_g)
std_B.append(std_b)
self.normalise_metrics = dict(mean={'R': (np.mean(mean_R) / 255.0), 'G': (np.mean(mean_G) / 255.0), 'B': (np.mean(mean_B) / 255.0)}, std={'R': (np.mean(std_R) / 255.0), 'G': (np.mean(std_G) / 255.0), 'B': (np.mean(std_B) / 255.0)})
self._compute_normalising_metrics = False
return np.asarray(resized_imgs)<|docstring|>Resizes the images to the input height and width.<|endoftext|> |
be67c3e0099774cc65a775f8efe523a449b776a890c45aacc304d9aa877efa3c | def fit_transform(self, df: pd.DataFrame) -> np.ndarray:
'Combines ``fit`` and ``transform``\n '
return self.fit(df).transform(df) | Combines ``fit`` and ``transform`` | pytorch_widedeep/preprocessing/_preprocessors.py | fit_transform | yuanzhiKe/pytorch-widedeep | 0 | python | def fit_transform(self, df: pd.DataFrame) -> np.ndarray:
'\n '
return self.fit(df).transform(df) | def fit_transform(self, df: pd.DataFrame) -> np.ndarray:
'\n '
return self.fit(df).transform(df)<|docstring|>Combines ``fit`` and ``transform``<|endoftext|> |
46f8be63e47f10c2849b485790f71585e584083eeeee3d74f7b97fb42a2ad80a | def storeimages(destination_path='C:\\spotlight'):
'\tUsage : storeimages( destination_path ) -> None\n\t\tStore the cached Microsoft Spotlight Images in the computer to the \n\t\tdestination specified.\n\t\tParams -\n\t\t\tdestination_path : \n\t\t\t\tPath of the folder where Images will be saved.\n\t\t\t\tdefault value : "C:\\spotlight"\n\t\t\t\tIf the provided path does not represent an existing \n\t\t\t\tdirectory,then a new directory will be created with same \n\t\t\t\tpath, If possible.\n\t\tErrors:\n\t\t\tValueError : \n\t\t\tIf the given path represents an already existing file and not \n\t\t\ta directory.\t\t\t\t\n\t'
import os, hashlib
from PIL import Image
folder = os.path.join(os.getenv('userprofile'), 'AppData\\Local\\Packages\\Microsoft.Windows.ContentDeliveryManager_cw5n1h2txyewy\\LocalState\\Assets')
name = ''
if (not os.path.exists(destination_path)):
os.mkdir(destination_path)
if (not os.path.isdir(destination_path)):
raise ValueError('Given path cannot be used as a directory!')
files = os.listdir(destination_path)
for file in os.listdir(folder):
try:
img = Image.open(os.path.join(folder, file))
if ((img.height == 1080) and (img.width == 1920)):
f = open(os.path.join(folder, file), 'rb')
name = (hashlib.md5(f.read()).hexdigest() + '.jpeg')
f.close()
if (name not in files):
img.save(os.path.join(destination_path, name))
files.append(name)
img.close()
except OSError:
continue | Usage : storeimages( destination_path ) -> None
Store the cached Microsoft Spotlight Images in the computer to the
destination specified.
Params -
destination_path :
Path of the folder where Images will be saved.
default value : "C:\spotlight"
If the provided path does not represent an existing
directory,then a new directory will be created with same
path, If possible.
Errors:
ValueError :
If the given path represents an already existing file and not
a directory. | spotlightpy/spotlight.py | storeimages | neil-vqa/spotlightpy | 0 | python | def storeimages(destination_path='C:\\spotlight'):
'\tUsage : storeimages( destination_path ) -> None\n\t\tStore the cached Microsoft Spotlight Images in the computer to the \n\t\tdestination specified.\n\t\tParams -\n\t\t\tdestination_path : \n\t\t\t\tPath of the folder where Images will be saved.\n\t\t\t\tdefault value : "C:\\spotlight"\n\t\t\t\tIf the provided path does not represent an existing \n\t\t\t\tdirectory,then a new directory will be created with same \n\t\t\t\tpath, If possible.\n\t\tErrors:\n\t\t\tValueError : \n\t\t\tIf the given path represents an already existing file and not \n\t\t\ta directory.\t\t\t\t\n\t'
import os, hashlib
from PIL import Image
folder = os.path.join(os.getenv('userprofile'), 'AppData\\Local\\Packages\\Microsoft.Windows.ContentDeliveryManager_cw5n1h2txyewy\\LocalState\\Assets')
name =
if (not os.path.exists(destination_path)):
os.mkdir(destination_path)
if (not os.path.isdir(destination_path)):
raise ValueError('Given path cannot be used as a directory!')
files = os.listdir(destination_path)
for file in os.listdir(folder):
try:
img = Image.open(os.path.join(folder, file))
if ((img.height == 1080) and (img.width == 1920)):
f = open(os.path.join(folder, file), 'rb')
name = (hashlib.md5(f.read()).hexdigest() + '.jpeg')
f.close()
if (name not in files):
img.save(os.path.join(destination_path, name))
files.append(name)
img.close()
except OSError:
continue | def storeimages(destination_path='C:\\spotlight'):
'\tUsage : storeimages( destination_path ) -> None\n\t\tStore the cached Microsoft Spotlight Images in the computer to the \n\t\tdestination specified.\n\t\tParams -\n\t\t\tdestination_path : \n\t\t\t\tPath of the folder where Images will be saved.\n\t\t\t\tdefault value : "C:\\spotlight"\n\t\t\t\tIf the provided path does not represent an existing \n\t\t\t\tdirectory,then a new directory will be created with same \n\t\t\t\tpath, If possible.\n\t\tErrors:\n\t\t\tValueError : \n\t\t\tIf the given path represents an already existing file and not \n\t\t\ta directory.\t\t\t\t\n\t'
import os, hashlib
from PIL import Image
folder = os.path.join(os.getenv('userprofile'), 'AppData\\Local\\Packages\\Microsoft.Windows.ContentDeliveryManager_cw5n1h2txyewy\\LocalState\\Assets')
name =
if (not os.path.exists(destination_path)):
os.mkdir(destination_path)
if (not os.path.isdir(destination_path)):
raise ValueError('Given path cannot be used as a directory!')
files = os.listdir(destination_path)
for file in os.listdir(folder):
try:
img = Image.open(os.path.join(folder, file))
if ((img.height == 1080) and (img.width == 1920)):
f = open(os.path.join(folder, file), 'rb')
name = (hashlib.md5(f.read()).hexdigest() + '.jpeg')
f.close()
if (name not in files):
img.save(os.path.join(destination_path, name))
files.append(name)
img.close()
except OSError:
continue<|docstring|>Usage : storeimages( destination_path ) -> None
Store the cached Microsoft Spotlight Images in the computer to the
destination specified.
Params -
destination_path :
Path of the folder where Images will be saved.
default value : "C:\spotlight"
If the provided path does not represent an existing
directory,then a new directory will be created with same
path, If possible.
Errors:
ValueError :
If the given path represents an already existing file and not
a directory.<|endoftext|> |
be9d8a66fbf5e71cd060d51a5588c60cc551067be512bd3157a31237558f46e7 | def getimages():
'\tUsage : getimages(None) -> list(PIL.Image)\n\t\treturn a list of PIL.Image.Image objects where each object is\n\t\ta Microsoft Spotlight JPEG image of resolution 1920x1080.\t\t\n\t'
import os
from PIL import Image
folder = os.path.join(os.getenv('userprofile'), 'AppData\\Local\\Packages\\Microsoft.Windows.ContentDeliveryManager_cw5n1h2txyewy\\LocalState\\Assets')
images = []
for file in os.listdir(folder):
img = Image.open(os.path.join(folder, file))
if ((img.height == 1080) and (img.width == 1920)):
images.append(img)
img.close()
return images | Usage : getimages(None) -> list(PIL.Image)
return a list of PIL.Image.Image objects where each object is
a Microsoft Spotlight JPEG image of resolution 1920x1080. | spotlightpy/spotlight.py | getimages | neil-vqa/spotlightpy | 0 | python | def getimages():
'\tUsage : getimages(None) -> list(PIL.Image)\n\t\treturn a list of PIL.Image.Image objects where each object is\n\t\ta Microsoft Spotlight JPEG image of resolution 1920x1080.\t\t\n\t'
import os
from PIL import Image
folder = os.path.join(os.getenv('userprofile'), 'AppData\\Local\\Packages\\Microsoft.Windows.ContentDeliveryManager_cw5n1h2txyewy\\LocalState\\Assets')
images = []
for file in os.listdir(folder):
img = Image.open(os.path.join(folder, file))
if ((img.height == 1080) and (img.width == 1920)):
images.append(img)
img.close()
return images | def getimages():
'\tUsage : getimages(None) -> list(PIL.Image)\n\t\treturn a list of PIL.Image.Image objects where each object is\n\t\ta Microsoft Spotlight JPEG image of resolution 1920x1080.\t\t\n\t'
import os
from PIL import Image
folder = os.path.join(os.getenv('userprofile'), 'AppData\\Local\\Packages\\Microsoft.Windows.ContentDeliveryManager_cw5n1h2txyewy\\LocalState\\Assets')
images = []
for file in os.listdir(folder):
img = Image.open(os.path.join(folder, file))
if ((img.height == 1080) and (img.width == 1920)):
images.append(img)
img.close()
return images<|docstring|>Usage : getimages(None) -> list(PIL.Image)
return a list of PIL.Image.Image objects where each object is
a Microsoft Spotlight JPEG image of resolution 1920x1080.<|endoftext|> |
55738cb54bd738c2899cf857dabf77f3eb79e743575ab00ece2ce54cc9387f3d | @patch('calico_ctl.checksystem.enforce_root', autospec=True)
@patch('calico_ctl.checksystem._check_modules', autospec=True, return_value=True)
@patch('calico_ctl.checksystem._check_docker_version', autospec=True, return_value=True)
@patch('calico_ctl.checksystem._check_etcd_version', autospec=True, return_value=True)
def test_check_system(self, m_check_etcd_version, m_check_docker_version, m_check_kernel_modules, m_enforce_root):
'\n Test for check_system when all checks pass\n\n Assert that the function returns True\n '
test_return = check_system(quit_if_error=True)
m_enforce_root.assert_called_once_with()
m_check_kernel_modules.assert_called_once_with()
m_check_docker_version.assert_called_once_with(False)
m_check_etcd_version.assert_called_once_with()
for check in test_return:
self.assertTrue(check) | Test for check_system when all checks pass
Assert that the function returns True | calicoctl/tests/unit/checksystem_test.py | test_check_system | tomdee/calico-containers | 0 | python | @patch('calico_ctl.checksystem.enforce_root', autospec=True)
@patch('calico_ctl.checksystem._check_modules', autospec=True, return_value=True)
@patch('calico_ctl.checksystem._check_docker_version', autospec=True, return_value=True)
@patch('calico_ctl.checksystem._check_etcd_version', autospec=True, return_value=True)
def test_check_system(self, m_check_etcd_version, m_check_docker_version, m_check_kernel_modules, m_enforce_root):
'\n Test for check_system when all checks pass\n\n Assert that the function returns True\n '
test_return = check_system(quit_if_error=True)
m_enforce_root.assert_called_once_with()
m_check_kernel_modules.assert_called_once_with()
m_check_docker_version.assert_called_once_with(False)
m_check_etcd_version.assert_called_once_with()
for check in test_return:
self.assertTrue(check) | @patch('calico_ctl.checksystem.enforce_root', autospec=True)
@patch('calico_ctl.checksystem._check_modules', autospec=True, return_value=True)
@patch('calico_ctl.checksystem._check_docker_version', autospec=True, return_value=True)
@patch('calico_ctl.checksystem._check_etcd_version', autospec=True, return_value=True)
def test_check_system(self, m_check_etcd_version, m_check_docker_version, m_check_kernel_modules, m_enforce_root):
'\n Test for check_system when all checks pass\n\n Assert that the function returns True\n '
test_return = check_system(quit_if_error=True)
m_enforce_root.assert_called_once_with()
m_check_kernel_modules.assert_called_once_with()
m_check_docker_version.assert_called_once_with(False)
m_check_etcd_version.assert_called_once_with()
for check in test_return:
self.assertTrue(check)<|docstring|>Test for check_system when all checks pass
Assert that the function returns True<|endoftext|> |
3dc036e63f27f0c6e322c61a3e41ffa1c55c027798de56a7ae5a735e2feef6ee | @parameterized.expand([(True, False), (False, True)])
@patch('calico_ctl.checksystem.enforce_root', autospec=True)
@patch('calico_ctl.checksystem._check_modules', autospec=True)
@patch('calico_ctl.checksystem._check_docker_version', autospec=True)
@patch('calico_ctl.checksystem._check_etcd_version', autospec=True, return_value=True)
def test_check_system_bad_state_do_not_quit(self, kernel_status, docker_version_status, m_check_etcd_version, m_check_docker_version, m_check_kernel_modules, m_enforce_root):
'\n Test for check_system when one of the system checks fails\n\n This test does not quit if there is an error -\n Assert that the function returns False\n\n :param kernel_status: return_value for _check_modules\n :param docker_version_status: return_value for _check_docker_version\n '
m_check_kernel_modules.return_value = kernel_status
m_check_docker_version.return_value = docker_version_status
test_return = check_system(quit_if_error=False)
self.assertIn(False, test_return) | Test for check_system when one of the system checks fails
This test does not quit if there is an error -
Assert that the function returns False
:param kernel_status: return_value for _check_modules
:param docker_version_status: return_value for _check_docker_version | calicoctl/tests/unit/checksystem_test.py | test_check_system_bad_state_do_not_quit | tomdee/calico-containers | 0 | python | @parameterized.expand([(True, False), (False, True)])
@patch('calico_ctl.checksystem.enforce_root', autospec=True)
@patch('calico_ctl.checksystem._check_modules', autospec=True)
@patch('calico_ctl.checksystem._check_docker_version', autospec=True)
@patch('calico_ctl.checksystem._check_etcd_version', autospec=True, return_value=True)
def test_check_system_bad_state_do_not_quit(self, kernel_status, docker_version_status, m_check_etcd_version, m_check_docker_version, m_check_kernel_modules, m_enforce_root):
'\n Test for check_system when one of the system checks fails\n\n This test does not quit if there is an error -\n Assert that the function returns False\n\n :param kernel_status: return_value for _check_modules\n :param docker_version_status: return_value for _check_docker_version\n '
m_check_kernel_modules.return_value = kernel_status
m_check_docker_version.return_value = docker_version_status
test_return = check_system(quit_if_error=False)
self.assertIn(False, test_return) | @parameterized.expand([(True, False), (False, True)])
@patch('calico_ctl.checksystem.enforce_root', autospec=True)
@patch('calico_ctl.checksystem._check_modules', autospec=True)
@patch('calico_ctl.checksystem._check_docker_version', autospec=True)
@patch('calico_ctl.checksystem._check_etcd_version', autospec=True, return_value=True)
def test_check_system_bad_state_do_not_quit(self, kernel_status, docker_version_status, m_check_etcd_version, m_check_docker_version, m_check_kernel_modules, m_enforce_root):
'\n Test for check_system when one of the system checks fails\n\n This test does not quit if there is an error -\n Assert that the function returns False\n\n :param kernel_status: return_value for _check_modules\n :param docker_version_status: return_value for _check_docker_version\n '
m_check_kernel_modules.return_value = kernel_status
m_check_docker_version.return_value = docker_version_status
test_return = check_system(quit_if_error=False)
self.assertIn(False, test_return)<|docstring|>Test for check_system when one of the system checks fails
This test does not quit if there is an error -
Assert that the function returns False
:param kernel_status: return_value for _check_modules
:param docker_version_status: return_value for _check_docker_version<|endoftext|> |
dd85b3e6783f307464c43ca8b58cd3e4e1bde8985e6261513f111549d3dcf40f | @parameterized.expand([(True, False), (False, True)])
@patch('calico_ctl.checksystem.enforce_root', autospec=True)
@patch('calico_ctl.checksystem._check_modules', autospec=True)
@patch('calico_ctl.checksystem._check_docker_version', autospec=True)
@patch('calico_ctl.checksystem._check_etcd_version', autospec=True, return_value=True)
def test_check_system_bad_state_quit(self, kernel_status, docker_version_status, m_check_etcd_version, m_check_docker_version, m_check_kernel_modules, m_enforce_root):
'\n Test for check_system when one of the system checks fails\n\n This test exits if there is a detected error -\n Assert that the system exits\n\n :param kernel_status: return_value for _check_modules patch\n :param docker_version_status: return_value for _check_docker_version patch\n '
m_check_kernel_modules.return_value = kernel_status
m_check_docker_version.return_value = docker_version_status
self.assertRaises(SystemExit, check_system, quit_if_error=True) | Test for check_system when one of the system checks fails
This test exits if there is a detected error -
Assert that the system exits
:param kernel_status: return_value for _check_modules patch
:param docker_version_status: return_value for _check_docker_version patch | calicoctl/tests/unit/checksystem_test.py | test_check_system_bad_state_quit | tomdee/calico-containers | 0 | python | @parameterized.expand([(True, False), (False, True)])
@patch('calico_ctl.checksystem.enforce_root', autospec=True)
@patch('calico_ctl.checksystem._check_modules', autospec=True)
@patch('calico_ctl.checksystem._check_docker_version', autospec=True)
@patch('calico_ctl.checksystem._check_etcd_version', autospec=True, return_value=True)
def test_check_system_bad_state_quit(self, kernel_status, docker_version_status, m_check_etcd_version, m_check_docker_version, m_check_kernel_modules, m_enforce_root):
'\n Test for check_system when one of the system checks fails\n\n This test exits if there is a detected error -\n Assert that the system exits\n\n :param kernel_status: return_value for _check_modules patch\n :param docker_version_status: return_value for _check_docker_version patch\n '
m_check_kernel_modules.return_value = kernel_status
m_check_docker_version.return_value = docker_version_status
self.assertRaises(SystemExit, check_system, quit_if_error=True) | @parameterized.expand([(True, False), (False, True)])
@patch('calico_ctl.checksystem.enforce_root', autospec=True)
@patch('calico_ctl.checksystem._check_modules', autospec=True)
@patch('calico_ctl.checksystem._check_docker_version', autospec=True)
@patch('calico_ctl.checksystem._check_etcd_version', autospec=True, return_value=True)
def test_check_system_bad_state_quit(self, kernel_status, docker_version_status, m_check_etcd_version, m_check_docker_version, m_check_kernel_modules, m_enforce_root):
'\n Test for check_system when one of the system checks fails\n\n This test exits if there is a detected error -\n Assert that the system exits\n\n :param kernel_status: return_value for _check_modules patch\n :param docker_version_status: return_value for _check_docker_version patch\n '
m_check_kernel_modules.return_value = kernel_status
m_check_docker_version.return_value = docker_version_status
self.assertRaises(SystemExit, check_system, quit_if_error=True)<|docstring|>Test for check_system when one of the system checks fails
This test exits if there is a detected error -
Assert that the system exits
:param kernel_status: return_value for _check_modules patch
:param docker_version_status: return_value for _check_docker_version patch<|endoftext|> |
c0e29d427012ca30978cfd0f01261f9d2a11722454079b2382c00ddb2a06d89a | @parameterized.expand([(['mod_one', 'mod_four'], True), (['mod_four', 'mod_five'], True), (['mod_invalid'], False), (['mod_one', 'mod_invalid'], False), (['mod_four', 'mod_invalid'], False)])
@patch('__builtin__.open', autospec=True)
@patch('sys.stderr', autospec=True)
@patch('calico_ctl.checksystem.check_output', autospec=True, return_value='version')
def test_check_modules_double_open(self, requirements, expected_return, m_get_version, m_stderr, m_open):
'Test _check_module for different requirements (opening 2 files)\n Use parameterized requirements to test a variety of states in which\n modules may or not be found. Check the number of calls to open().\n Numbered modules exist within the mocked files and should be valid.\n check_modules should return False if searching for the invalid module.\n '
m_file = Mock()
m_file.readlines.side_effect = [['/mod_one.ko', '/mod_two.ko', '/mod_three.ko'], ['/mod_four.ko', '/mod_five.ko']]
m_open.return_value = m_file
with patch('calico_ctl.checksystem.REQUIRED_MODULES', requirements):
return_val = _check_modules()
self.assertEquals(return_val, expected_return)
m_open.assert_has_calls([call('/lib/modules/version/modules.dep'), call().readlines(), call('/lib/modules/version/modules.builtin'), call().readlines()]) | Test _check_module for different requirements (opening 2 files)
Use parameterized requirements to test a variety of states in which
modules may or not be found. Check the number of calls to open().
Numbered modules exist within the mocked files and should be valid.
check_modules should return False if searching for the invalid module. | calicoctl/tests/unit/checksystem_test.py | test_check_modules_double_open | tomdee/calico-containers | 0 | python | @parameterized.expand([(['mod_one', 'mod_four'], True), (['mod_four', 'mod_five'], True), (['mod_invalid'], False), (['mod_one', 'mod_invalid'], False), (['mod_four', 'mod_invalid'], False)])
@patch('__builtin__.open', autospec=True)
@patch('sys.stderr', autospec=True)
@patch('calico_ctl.checksystem.check_output', autospec=True, return_value='version')
def test_check_modules_double_open(self, requirements, expected_return, m_get_version, m_stderr, m_open):
'Test _check_module for different requirements (opening 2 files)\n Use parameterized requirements to test a variety of states in which\n modules may or not be found. Check the number of calls to open().\n Numbered modules exist within the mocked files and should be valid.\n check_modules should return False if searching for the invalid module.\n '
m_file = Mock()
m_file.readlines.side_effect = [['/mod_one.ko', '/mod_two.ko', '/mod_three.ko'], ['/mod_four.ko', '/mod_five.ko']]
m_open.return_value = m_file
with patch('calico_ctl.checksystem.REQUIRED_MODULES', requirements):
return_val = _check_modules()
self.assertEquals(return_val, expected_return)
m_open.assert_has_calls([call('/lib/modules/version/modules.dep'), call().readlines(), call('/lib/modules/version/modules.builtin'), call().readlines()]) | @parameterized.expand([(['mod_one', 'mod_four'], True), (['mod_four', 'mod_five'], True), (['mod_invalid'], False), (['mod_one', 'mod_invalid'], False), (['mod_four', 'mod_invalid'], False)])
@patch('__builtin__.open', autospec=True)
@patch('sys.stderr', autospec=True)
@patch('calico_ctl.checksystem.check_output', autospec=True, return_value='version')
def test_check_modules_double_open(self, requirements, expected_return, m_get_version, m_stderr, m_open):
'Test _check_module for different requirements (opening 2 files)\n Use parameterized requirements to test a variety of states in which\n modules may or not be found. Check the number of calls to open().\n Numbered modules exist within the mocked files and should be valid.\n check_modules should return False if searching for the invalid module.\n '
m_file = Mock()
m_file.readlines.side_effect = [['/mod_one.ko', '/mod_two.ko', '/mod_three.ko'], ['/mod_four.ko', '/mod_five.ko']]
m_open.return_value = m_file
with patch('calico_ctl.checksystem.REQUIRED_MODULES', requirements):
return_val = _check_modules()
self.assertEquals(return_val, expected_return)
m_open.assert_has_calls([call('/lib/modules/version/modules.dep'), call().readlines(), call('/lib/modules/version/modules.builtin'), call().readlines()])<|docstring|>Test _check_module for different requirements (opening 2 files)
Use parameterized requirements to test a variety of states in which
modules may or not be found. Check the number of calls to open().
Numbered modules exist within the mocked files and should be valid.
check_modules should return False if searching for the invalid module.<|endoftext|> |
af3794bc188960ee351917e2ed11b048053061bcfdcbd4c9b2890a6dcdaa1c37 | @parameterized.expand([(['mod_one', 'mod_two'], True), (['mod_three'], True)])
@patch('__builtin__.open', autospec=True)
@patch('sys.stderr', autospec=True)
@patch('calico_ctl.checksystem.check_output', autospec=True, return_value='version')
def test_check_modules_single_open(self, requirements, expected_return, m_get_version, m_stderr, m_open):
'Test _check_module for different requirements (opening 1 file)\n Use parameterized requirements to test a variety of states in which\n modules may or not be found. Check the number of calls to open().\n Numbered modules exist within the mocked file and should be valid.\n '
m_file = Mock()
m_file.readlines.return_value = ['/mod_one.ko', '/mod_two.ko', '/mod_three.ko']
m_open.return_value = m_file
with patch('calico_ctl.checksystem.REQUIRED_MODULES', requirements):
return_val = _check_modules()
m_open.assert_called_once_with('/lib/modules/version/modules.dep')
self.assertEquals(return_val, expected_return) | Test _check_module for different requirements (opening 1 file)
Use parameterized requirements to test a variety of states in which
modules may or not be found. Check the number of calls to open().
Numbered modules exist within the mocked file and should be valid. | calicoctl/tests/unit/checksystem_test.py | test_check_modules_single_open | tomdee/calico-containers | 0 | python | @parameterized.expand([(['mod_one', 'mod_two'], True), (['mod_three'], True)])
@patch('__builtin__.open', autospec=True)
@patch('sys.stderr', autospec=True)
@patch('calico_ctl.checksystem.check_output', autospec=True, return_value='version')
def test_check_modules_single_open(self, requirements, expected_return, m_get_version, m_stderr, m_open):
'Test _check_module for different requirements (opening 1 file)\n Use parameterized requirements to test a variety of states in which\n modules may or not be found. Check the number of calls to open().\n Numbered modules exist within the mocked file and should be valid.\n '
m_file = Mock()
m_file.readlines.return_value = ['/mod_one.ko', '/mod_two.ko', '/mod_three.ko']
m_open.return_value = m_file
with patch('calico_ctl.checksystem.REQUIRED_MODULES', requirements):
return_val = _check_modules()
m_open.assert_called_once_with('/lib/modules/version/modules.dep')
self.assertEquals(return_val, expected_return) | @parameterized.expand([(['mod_one', 'mod_two'], True), (['mod_three'], True)])
@patch('__builtin__.open', autospec=True)
@patch('sys.stderr', autospec=True)
@patch('calico_ctl.checksystem.check_output', autospec=True, return_value='version')
def test_check_modules_single_open(self, requirements, expected_return, m_get_version, m_stderr, m_open):
'Test _check_module for different requirements (opening 1 file)\n Use parameterized requirements to test a variety of states in which\n modules may or not be found. Check the number of calls to open().\n Numbered modules exist within the mocked file and should be valid.\n '
m_file = Mock()
m_file.readlines.return_value = ['/mod_one.ko', '/mod_two.ko', '/mod_three.ko']
m_open.return_value = m_file
with patch('calico_ctl.checksystem.REQUIRED_MODULES', requirements):
return_val = _check_modules()
m_open.assert_called_once_with('/lib/modules/version/modules.dep')
self.assertEquals(return_val, expected_return)<|docstring|>Test _check_module for different requirements (opening 1 file)
Use parameterized requirements to test a variety of states in which
modules may or not be found. Check the number of calls to open().
Numbered modules exist within the mocked file and should be valid.<|endoftext|> |
a131ef28057e771b6c9cf9ae96518dbc07db53c1a1f30000bfa19bb2f7504307 | @parameterized.expand([(['mod_one', 'mod_two'], True), (['mod_three', 'mod_invalid'], False)])
@patch('__builtin__.open', autospec=True)
@patch('sys.stderr', autospec=True)
@patch('calico_ctl.checksystem.check_output', autospec=True)
def test_check_modules_lsmod(self, requirements, expected_return, m_check_out, m_stderr, m_open):
'Test _check_module using lsmod\n Cause failure on file open and check_system should\n find modules in lsmod output.\n '
m_open.side_effect = CalledProcessError
m_check_out.return_value = 'mod_one\n mod_two\n mod_three\n'
with patch('calico_ctl.checksystem.REQUIRED_MODULES', requirements):
return_val = _check_modules()
self.assertEquals(return_val, expected_return) | Test _check_module using lsmod
Cause failure on file open and check_system should
find modules in lsmod output. | calicoctl/tests/unit/checksystem_test.py | test_check_modules_lsmod | tomdee/calico-containers | 0 | python | @parameterized.expand([(['mod_one', 'mod_two'], True), (['mod_three', 'mod_invalid'], False)])
@patch('__builtin__.open', autospec=True)
@patch('sys.stderr', autospec=True)
@patch('calico_ctl.checksystem.check_output', autospec=True)
def test_check_modules_lsmod(self, requirements, expected_return, m_check_out, m_stderr, m_open):
'Test _check_module using lsmod\n Cause failure on file open and check_system should\n find modules in lsmod output.\n '
m_open.side_effect = CalledProcessError
m_check_out.return_value = 'mod_one\n mod_two\n mod_three\n'
with patch('calico_ctl.checksystem.REQUIRED_MODULES', requirements):
return_val = _check_modules()
self.assertEquals(return_val, expected_return) | @parameterized.expand([(['mod_one', 'mod_two'], True), (['mod_three', 'mod_invalid'], False)])
@patch('__builtin__.open', autospec=True)
@patch('sys.stderr', autospec=True)
@patch('calico_ctl.checksystem.check_output', autospec=True)
def test_check_modules_lsmod(self, requirements, expected_return, m_check_out, m_stderr, m_open):
'Test _check_module using lsmod\n Cause failure on file open and check_system should\n find modules in lsmod output.\n '
m_open.side_effect = CalledProcessError
m_check_out.return_value = 'mod_one\n mod_two\n mod_three\n'
with patch('calico_ctl.checksystem.REQUIRED_MODULES', requirements):
return_val = _check_modules()
self.assertEquals(return_val, expected_return)<|docstring|>Test _check_module using lsmod
Cause failure on file open and check_system should
find modules in lsmod output.<|endoftext|> |
7b3a309968b6084ef1c3ec5648deeba8a6aa437770cfe0e5ed9e4533a8cb7604 | @patch('sys.stderr', autospec=True)
@patch('calico_ctl.checksystem.check_output', autospec=True)
def test_check_modules_error(self, m_check_out, m_stderr):
'Test _check_module lsmod failure\n All check_output calls raise an error, meaning check_system\n should return false.\n '
m_check_out.side_effect = CalledProcessError
return_val = _check_modules()
self.assertFalse(return_val) | Test _check_module lsmod failure
All check_output calls raise an error, meaning check_system
should return false. | calicoctl/tests/unit/checksystem_test.py | test_check_modules_error | tomdee/calico-containers | 0 | python | @patch('sys.stderr', autospec=True)
@patch('calico_ctl.checksystem.check_output', autospec=True)
def test_check_modules_error(self, m_check_out, m_stderr):
'Test _check_module lsmod failure\n All check_output calls raise an error, meaning check_system\n should return false.\n '
m_check_out.side_effect = CalledProcessError
return_val = _check_modules()
self.assertFalse(return_val) | @patch('sys.stderr', autospec=True)
@patch('calico_ctl.checksystem.check_output', autospec=True)
def test_check_modules_error(self, m_check_out, m_stderr):
'Test _check_module lsmod failure\n All check_output calls raise an error, meaning check_system\n should return false.\n '
m_check_out.side_effect = CalledProcessError
return_val = _check_modules()
self.assertFalse(return_val)<|docstring|>Test _check_module lsmod failure
All check_output calls raise an error, meaning check_system
should return false.<|endoftext|> |
f335f55f327072aaf32ebc31de26e17be5eb5c1930b9cbeb3a1e6bbc4aedbbcc | def get_attributes(self) -> Dict[(str, Any)]:
'\n Returns:\n Dict[str, Any]: A map of template attributes needed to render a graphviz graph.\n '
if (not self.classes):
self.classes = ComponentMeta.get_class_instances()
if (not self.module_methods):
self.module_methods = ComponentMeta.get_module_method_instances()
if (not self.decorators):
self.decorators = ComponentMeta.get_decorator_instances()
attrs = {'graph': self.graph_attributes, 'classes': [], 'module_methods': [], 'decorators': [], 'subpackages': {}}
for node in self.classes:
attrs['classes'].append(asdict(node))
if node.subpackage:
if (not attrs['subpackages'].get(node.subpackage)):
attrs['subpackages'][node.subpackage] = []
attrs['subpackages'][node.subpackage].append(node.name)
for node in self.module_methods:
attrs['module_methods'].append(asdict(node))
if node.subpackage:
if (not attrs['subpackages'].get(node.subpackage)):
attrs['subpackages'][node.subpackage] = []
attrs['subpackages'][node.subpackage].append(node.name)
for node in self.decorators:
attrs['decorators'].append(asdict(node))
if node.subpackage:
if (not attrs['subpackages'].get(node.subpackage)):
attrs['subpackages'][node.subpackage] = []
attrs['subpackages'][node.subpackage].append(node.name)
return attrs | Returns:
Dict[str, Any]: A map of template attributes needed to render a graphviz graph. | pyviz/renderers/dot.py | get_attributes | KCarretto/pyviz | 1 | python | def get_attributes(self) -> Dict[(str, Any)]:
'\n Returns:\n Dict[str, Any]: A map of template attributes needed to render a graphviz graph.\n '
if (not self.classes):
self.classes = ComponentMeta.get_class_instances()
if (not self.module_methods):
self.module_methods = ComponentMeta.get_module_method_instances()
if (not self.decorators):
self.decorators = ComponentMeta.get_decorator_instances()
attrs = {'graph': self.graph_attributes, 'classes': [], 'module_methods': [], 'decorators': [], 'subpackages': {}}
for node in self.classes:
attrs['classes'].append(asdict(node))
if node.subpackage:
if (not attrs['subpackages'].get(node.subpackage)):
attrs['subpackages'][node.subpackage] = []
attrs['subpackages'][node.subpackage].append(node.name)
for node in self.module_methods:
attrs['module_methods'].append(asdict(node))
if node.subpackage:
if (not attrs['subpackages'].get(node.subpackage)):
attrs['subpackages'][node.subpackage] = []
attrs['subpackages'][node.subpackage].append(node.name)
for node in self.decorators:
attrs['decorators'].append(asdict(node))
if node.subpackage:
if (not attrs['subpackages'].get(node.subpackage)):
attrs['subpackages'][node.subpackage] = []
attrs['subpackages'][node.subpackage].append(node.name)
return attrs | def get_attributes(self) -> Dict[(str, Any)]:
'\n Returns:\n Dict[str, Any]: A map of template attributes needed to render a graphviz graph.\n '
if (not self.classes):
self.classes = ComponentMeta.get_class_instances()
if (not self.module_methods):
self.module_methods = ComponentMeta.get_module_method_instances()
if (not self.decorators):
self.decorators = ComponentMeta.get_decorator_instances()
attrs = {'graph': self.graph_attributes, 'classes': [], 'module_methods': [], 'decorators': [], 'subpackages': {}}
for node in self.classes:
attrs['classes'].append(asdict(node))
if node.subpackage:
if (not attrs['subpackages'].get(node.subpackage)):
attrs['subpackages'][node.subpackage] = []
attrs['subpackages'][node.subpackage].append(node.name)
for node in self.module_methods:
attrs['module_methods'].append(asdict(node))
if node.subpackage:
if (not attrs['subpackages'].get(node.subpackage)):
attrs['subpackages'][node.subpackage] = []
attrs['subpackages'][node.subpackage].append(node.name)
for node in self.decorators:
attrs['decorators'].append(asdict(node))
if node.subpackage:
if (not attrs['subpackages'].get(node.subpackage)):
attrs['subpackages'][node.subpackage] = []
attrs['subpackages'][node.subpackage].append(node.name)
return attrs<|docstring|>Returns:
Dict[str, Any]: A map of template attributes needed to render a graphviz graph.<|endoftext|> |
00b18fd42ee2d452ca71613484a73488d1e3da11d4495ddfc28488c2166737cb | @app.route('/')
def index():
' Root URL response '
return ('Reminder: return some useful information in json format about the service here', status.HTTP_200_OK) | Root URL response | service/routes.py | index | LLmaomao2020/customers | 5 | python | @app.route('/')
def index():
' '
return ('Reminder: return some useful information in json format about the service here', status.HTTP_200_OK) | @app.route('/')
def index():
' '
return ('Reminder: return some useful information in json format about the service here', status.HTTP_200_OK)<|docstring|>Root URL response<|endoftext|> |
8b4bb6b6a27280fca065ccfd2e7dd49169575be1bacca9794269d0288f0b7279 | def init_db():
' Initialies the SQLAlchemy app '
global app
YourResourceModel.init_db(app) | Initialies the SQLAlchemy app | service/routes.py | init_db | LLmaomao2020/customers | 5 | python | def init_db():
' '
global app
YourResourceModel.init_db(app) | def init_db():
' '
global app
YourResourceModel.init_db(app)<|docstring|>Initialies the SQLAlchemy app<|endoftext|> |
618da51b1a41f212205f9c4970a80db5a8f9725e75a7d40fdff0f4db52f33a7a | def lnlike_ellflatpriormarginalized(F_obs, F_obs_var, F_mod):
'\n Fit linear model to one Gaussian data set (formulation 3)\n\n Parameters\n ----------\n F_obs, F_obs_var : ndarray (nobj, ..., n_pix_y)\n data and data variances\n F_mod : ndarray (..., n_components, n_pix_y)\n design matrix of linear model\n\n Returns\n -------\n logfml : ndarray (nobj, )\n log likelihood values with parameters marginalised and at best fit\n ellML : ndarray (nobj, ndim)\n Best fit MAP parameters\n\n '
FOT = tf.reduce_sum(((F_mod * F_obs) / F_obs_var), axis=(- 1))
FOO = tf.reduce_sum((tf.square(F_obs) / F_obs_var), axis=(- 1))
FTT = tf.reduce_sum((tf.square(F_mod) / F_obs_var), axis=(- 1))
LogSigma_det = tf.reduce_sum(tf.math.log(F_obs_var), axis=(- 1))
Chi2 = (FOO - tf.multiply(tf.divide(FOT, FTT), FOT))
LogDenom = (LogSigma_det + tf.math.log(FTT))
LnMarglike = (((- 0.5) * Chi2) - (0.5 * LogDenom))
ellML = (FOT / FTT)
return (LnMarglike, ellML) | Fit linear model to one Gaussian data set (formulation 3)
Parameters
----------
F_obs, F_obs_var : ndarray (nobj, ..., n_pix_y)
data and data variances
F_mod : ndarray (..., n_components, n_pix_y)
design matrix of linear model
Returns
-------
logfml : ndarray (nobj, )
log likelihood values with parameters marginalised and at best fit
ellML : ndarray (nobj, ndim)
Best fit MAP parameters | gasp/marginallikelihoods_tf.py | lnlike_ellflatpriormarginalized | ixkael/gasp | 0 | python | def lnlike_ellflatpriormarginalized(F_obs, F_obs_var, F_mod):
'\n Fit linear model to one Gaussian data set (formulation 3)\n\n Parameters\n ----------\n F_obs, F_obs_var : ndarray (nobj, ..., n_pix_y)\n data and data variances\n F_mod : ndarray (..., n_components, n_pix_y)\n design matrix of linear model\n\n Returns\n -------\n logfml : ndarray (nobj, )\n log likelihood values with parameters marginalised and at best fit\n ellML : ndarray (nobj, ndim)\n Best fit MAP parameters\n\n '
FOT = tf.reduce_sum(((F_mod * F_obs) / F_obs_var), axis=(- 1))
FOO = tf.reduce_sum((tf.square(F_obs) / F_obs_var), axis=(- 1))
FTT = tf.reduce_sum((tf.square(F_mod) / F_obs_var), axis=(- 1))
LogSigma_det = tf.reduce_sum(tf.math.log(F_obs_var), axis=(- 1))
Chi2 = (FOO - tf.multiply(tf.divide(FOT, FTT), FOT))
LogDenom = (LogSigma_det + tf.math.log(FTT))
LnMarglike = (((- 0.5) * Chi2) - (0.5 * LogDenom))
ellML = (FOT / FTT)
return (LnMarglike, ellML) | def lnlike_ellflatpriormarginalized(F_obs, F_obs_var, F_mod):
'\n Fit linear model to one Gaussian data set (formulation 3)\n\n Parameters\n ----------\n F_obs, F_obs_var : ndarray (nobj, ..., n_pix_y)\n data and data variances\n F_mod : ndarray (..., n_components, n_pix_y)\n design matrix of linear model\n\n Returns\n -------\n logfml : ndarray (nobj, )\n log likelihood values with parameters marginalised and at best fit\n ellML : ndarray (nobj, ndim)\n Best fit MAP parameters\n\n '
FOT = tf.reduce_sum(((F_mod * F_obs) / F_obs_var), axis=(- 1))
FOO = tf.reduce_sum((tf.square(F_obs) / F_obs_var), axis=(- 1))
FTT = tf.reduce_sum((tf.square(F_mod) / F_obs_var), axis=(- 1))
LogSigma_det = tf.reduce_sum(tf.math.log(F_obs_var), axis=(- 1))
Chi2 = (FOO - tf.multiply(tf.divide(FOT, FTT), FOT))
LogDenom = (LogSigma_det + tf.math.log(FTT))
LnMarglike = (((- 0.5) * Chi2) - (0.5 * LogDenom))
ellML = (FOT / FTT)
return (LnMarglike, ellML)<|docstring|>Fit linear model to one Gaussian data set (formulation 3)
Parameters
----------
F_obs, F_obs_var : ndarray (nobj, ..., n_pix_y)
data and data variances
F_mod : ndarray (..., n_components, n_pix_y)
design matrix of linear model
Returns
-------
logfml : ndarray (nobj, )
log likelihood values with parameters marginalised and at best fit
ellML : ndarray (nobj, ndim)
Best fit MAP parameters<|endoftext|> |
475d6f77709472eab71e4c700b69b068130fc906a5483585bf1313fe10f33d2d | def lnlike_ellflatpriormarginalized_multiple(y, yinvvar, mods):
'\n Fit linear model to one Gaussian data set (formulation 1)\n\n Parameters\n ----------\n y, yinvvar : ndarray (nobj, ..., n_pix_y)\n data and data inverse variances\n M_T : ndarray (..., n_components, n_pix_y)\n design matrix of linear model\n\n Returns\n -------\n logfml : ndarray (nobj, )\n log likelihood values with parameters marginalised and at best fit\n theta_map : ndarray (nobj, ndim)\n Best fit MAP parameters\n theta_cov : ndarray (nobj, ndim, ndim)\n Parameter covariance\n\n '
eta = tf.reduce_sum((mods * (y * yinvvar)[(..., None, :)]), axis=(- 1))
H = tf.matmul(mods, tf.transpose((mods * yinvvar[(..., None, :)]), [0, 1, 3, 2]))
mu = tf.linalg.solve(H, eta[(..., None)])[(..., 0)]
etaHinveta = tf.reduce_sum((eta * mu), axis=(- 1))
yyvarinvy = tf.reduce_sum(((y * y) * yinvvar), axis=(- 1))
dets = (tf.linalg.logdet(H) - tf.reduce_sum(tf.where((yinvvar > 0), tf.math.log(yinvvar), (yinvvar * 0)), axis=(- 1)))
scalar = (tf.cast((tf.shape(mods)[(- 1)] - tf.shape(mods)[(- 2)]), T) * log2pi)
LnMarglike = ((- 0.5) * (((scalar + dets) + yyvarinvy) - etaHinveta))
covar = tf.linalg.inv(H)
return (LnMarglike, mu, covar) | Fit linear model to one Gaussian data set (formulation 1)
Parameters
----------
y, yinvvar : ndarray (nobj, ..., n_pix_y)
data and data inverse variances
M_T : ndarray (..., n_components, n_pix_y)
design matrix of linear model
Returns
-------
logfml : ndarray (nobj, )
log likelihood values with parameters marginalised and at best fit
theta_map : ndarray (nobj, ndim)
Best fit MAP parameters
theta_cov : ndarray (nobj, ndim, ndim)
Parameter covariance | gasp/marginallikelihoods_tf.py | lnlike_ellflatpriormarginalized_multiple | ixkael/gasp | 0 | python | def lnlike_ellflatpriormarginalized_multiple(y, yinvvar, mods):
'\n Fit linear model to one Gaussian data set (formulation 1)\n\n Parameters\n ----------\n y, yinvvar : ndarray (nobj, ..., n_pix_y)\n data and data inverse variances\n M_T : ndarray (..., n_components, n_pix_y)\n design matrix of linear model\n\n Returns\n -------\n logfml : ndarray (nobj, )\n log likelihood values with parameters marginalised and at best fit\n theta_map : ndarray (nobj, ndim)\n Best fit MAP parameters\n theta_cov : ndarray (nobj, ndim, ndim)\n Parameter covariance\n\n '
eta = tf.reduce_sum((mods * (y * yinvvar)[(..., None, :)]), axis=(- 1))
H = tf.matmul(mods, tf.transpose((mods * yinvvar[(..., None, :)]), [0, 1, 3, 2]))
mu = tf.linalg.solve(H, eta[(..., None)])[(..., 0)]
etaHinveta = tf.reduce_sum((eta * mu), axis=(- 1))
yyvarinvy = tf.reduce_sum(((y * y) * yinvvar), axis=(- 1))
dets = (tf.linalg.logdet(H) - tf.reduce_sum(tf.where((yinvvar > 0), tf.math.log(yinvvar), (yinvvar * 0)), axis=(- 1)))
scalar = (tf.cast((tf.shape(mods)[(- 1)] - tf.shape(mods)[(- 2)]), T) * log2pi)
LnMarglike = ((- 0.5) * (((scalar + dets) + yyvarinvy) - etaHinveta))
covar = tf.linalg.inv(H)
return (LnMarglike, mu, covar) | def lnlike_ellflatpriormarginalized_multiple(y, yinvvar, mods):
'\n Fit linear model to one Gaussian data set (formulation 1)\n\n Parameters\n ----------\n y, yinvvar : ndarray (nobj, ..., n_pix_y)\n data and data inverse variances\n M_T : ndarray (..., n_components, n_pix_y)\n design matrix of linear model\n\n Returns\n -------\n logfml : ndarray (nobj, )\n log likelihood values with parameters marginalised and at best fit\n theta_map : ndarray (nobj, ndim)\n Best fit MAP parameters\n theta_cov : ndarray (nobj, ndim, ndim)\n Parameter covariance\n\n '
eta = tf.reduce_sum((mods * (y * yinvvar)[(..., None, :)]), axis=(- 1))
H = tf.matmul(mods, tf.transpose((mods * yinvvar[(..., None, :)]), [0, 1, 3, 2]))
mu = tf.linalg.solve(H, eta[(..., None)])[(..., 0)]
etaHinveta = tf.reduce_sum((eta * mu), axis=(- 1))
yyvarinvy = tf.reduce_sum(((y * y) * yinvvar), axis=(- 1))
dets = (tf.linalg.logdet(H) - tf.reduce_sum(tf.where((yinvvar > 0), tf.math.log(yinvvar), (yinvvar * 0)), axis=(- 1)))
scalar = (tf.cast((tf.shape(mods)[(- 1)] - tf.shape(mods)[(- 2)]), T) * log2pi)
LnMarglike = ((- 0.5) * (((scalar + dets) + yyvarinvy) - etaHinveta))
covar = tf.linalg.inv(H)
return (LnMarglike, mu, covar)<|docstring|>Fit linear model to one Gaussian data set (formulation 1)
Parameters
----------
y, yinvvar : ndarray (nobj, ..., n_pix_y)
data and data inverse variances
M_T : ndarray (..., n_components, n_pix_y)
design matrix of linear model
Returns
-------
logfml : ndarray (nobj, )
log likelihood values with parameters marginalised and at best fit
theta_map : ndarray (nobj, ndim)
Best fit MAP parameters
theta_cov : ndarray (nobj, ndim, ndim)
Parameter covariance<|endoftext|> |
6faf38ef8d774ce86ea807a475b1cf3cbd4799245ff4e8dff89bd978889dfbd1 | def logmarglike_onetransfergaussian(y, yinvvar, M_T):
'\n Fit linear model to one Gaussian data set (formulation 2)\n\n Parameters\n ----------\n y, yinvvar : ndarray (nobj, ..., n_pix_y)\n data and data inverse variances\n M_T : ndarray (..., n_components, n_pix_y)\n design matrix of linear model\n\n Returns\n -------\n logfml : ndarray (nobj, )\n log likelihood values with parameters marginalised and at best fit\n theta_map : ndarray (nobj, ndim)\n Best fit MAP parameters\n theta_cov : ndarray (nobj, ndim, ndim)\n Parameter covariance\n\n '
nt = tf.cast(tf.shape(M_T)[(- 2)], T)
ny = tf.cast(tf.math.count_nonzero(tf.where((yinvvar > 0))), T)
M = tf.transpose(M_T, [0, 2, 1])
Hbar = tf.matmul(M_T, (M * yinvvar[(..., :, None)]))
etabar = tf.reduce_sum((M_T * (y * yinvvar)[(..., None, :)]), axis=(- 1))
theta_map = tf.linalg.solve(Hbar, etabar[(..., None)])[(..., 0)]
theta_cov = tf.linalg.inv(Hbar)
xi1 = ((- 0.5) * (((ny * log2pi) + tf.reduce_sum(((y * y) * yinvvar), axis=(- 1))) - tf.reduce_sum(tf.where((yinvvar > 0), tf.math.log(yinvvar), (yinvvar * 0)), axis=(- 1))))
logdetHbar = tf.linalg.logdet(Hbar)
xi2 = ((- 0.5) * (((nt * log2pi) - logdetHbar) + tf.reduce_sum((etabar * theta_map), axis=(- 1))))
logfml = (xi1 - xi2)
return (logfml, theta_map, theta_cov) | Fit linear model to one Gaussian data set (formulation 2)
Parameters
----------
y, yinvvar : ndarray (nobj, ..., n_pix_y)
data and data inverse variances
M_T : ndarray (..., n_components, n_pix_y)
design matrix of linear model
Returns
-------
logfml : ndarray (nobj, )
log likelihood values with parameters marginalised and at best fit
theta_map : ndarray (nobj, ndim)
Best fit MAP parameters
theta_cov : ndarray (nobj, ndim, ndim)
Parameter covariance | gasp/marginallikelihoods_tf.py | logmarglike_onetransfergaussian | ixkael/gasp | 0 | python | def logmarglike_onetransfergaussian(y, yinvvar, M_T):
'\n Fit linear model to one Gaussian data set (formulation 2)\n\n Parameters\n ----------\n y, yinvvar : ndarray (nobj, ..., n_pix_y)\n data and data inverse variances\n M_T : ndarray (..., n_components, n_pix_y)\n design matrix of linear model\n\n Returns\n -------\n logfml : ndarray (nobj, )\n log likelihood values with parameters marginalised and at best fit\n theta_map : ndarray (nobj, ndim)\n Best fit MAP parameters\n theta_cov : ndarray (nobj, ndim, ndim)\n Parameter covariance\n\n '
nt = tf.cast(tf.shape(M_T)[(- 2)], T)
ny = tf.cast(tf.math.count_nonzero(tf.where((yinvvar > 0))), T)
M = tf.transpose(M_T, [0, 2, 1])
Hbar = tf.matmul(M_T, (M * yinvvar[(..., :, None)]))
etabar = tf.reduce_sum((M_T * (y * yinvvar)[(..., None, :)]), axis=(- 1))
theta_map = tf.linalg.solve(Hbar, etabar[(..., None)])[(..., 0)]
theta_cov = tf.linalg.inv(Hbar)
xi1 = ((- 0.5) * (((ny * log2pi) + tf.reduce_sum(((y * y) * yinvvar), axis=(- 1))) - tf.reduce_sum(tf.where((yinvvar > 0), tf.math.log(yinvvar), (yinvvar * 0)), axis=(- 1))))
logdetHbar = tf.linalg.logdet(Hbar)
xi2 = ((- 0.5) * (((nt * log2pi) - logdetHbar) + tf.reduce_sum((etabar * theta_map), axis=(- 1))))
logfml = (xi1 - xi2)
return (logfml, theta_map, theta_cov) | def logmarglike_onetransfergaussian(y, yinvvar, M_T):
'\n Fit linear model to one Gaussian data set (formulation 2)\n\n Parameters\n ----------\n y, yinvvar : ndarray (nobj, ..., n_pix_y)\n data and data inverse variances\n M_T : ndarray (..., n_components, n_pix_y)\n design matrix of linear model\n\n Returns\n -------\n logfml : ndarray (nobj, )\n log likelihood values with parameters marginalised and at best fit\n theta_map : ndarray (nobj, ndim)\n Best fit MAP parameters\n theta_cov : ndarray (nobj, ndim, ndim)\n Parameter covariance\n\n '
nt = tf.cast(tf.shape(M_T)[(- 2)], T)
ny = tf.cast(tf.math.count_nonzero(tf.where((yinvvar > 0))), T)
M = tf.transpose(M_T, [0, 2, 1])
Hbar = tf.matmul(M_T, (M * yinvvar[(..., :, None)]))
etabar = tf.reduce_sum((M_T * (y * yinvvar)[(..., None, :)]), axis=(- 1))
theta_map = tf.linalg.solve(Hbar, etabar[(..., None)])[(..., 0)]
theta_cov = tf.linalg.inv(Hbar)
xi1 = ((- 0.5) * (((ny * log2pi) + tf.reduce_sum(((y * y) * yinvvar), axis=(- 1))) - tf.reduce_sum(tf.where((yinvvar > 0), tf.math.log(yinvvar), (yinvvar * 0)), axis=(- 1))))
logdetHbar = tf.linalg.logdet(Hbar)
xi2 = ((- 0.5) * (((nt * log2pi) - logdetHbar) + tf.reduce_sum((etabar * theta_map), axis=(- 1))))
logfml = (xi1 - xi2)
return (logfml, theta_map, theta_cov)<|docstring|>Fit linear model to one Gaussian data set (formulation 2)
Parameters
----------
y, yinvvar : ndarray (nobj, ..., n_pix_y)
data and data inverse variances
M_T : ndarray (..., n_components, n_pix_y)
design matrix of linear model
Returns
-------
logfml : ndarray (nobj, )
log likelihood values with parameters marginalised and at best fit
theta_map : ndarray (nobj, ndim)
Best fit MAP parameters
theta_cov : ndarray (nobj, ndim, ndim)
Parameter covariance<|endoftext|> |
2167540f9ce177b3363f11adf9f7e54403a11af7091857c9b1484e04f20ea994 | def logmarglike_twotransfergaussians(ells, y, yinvvar, M_T, z, zinvvar, R_T, perm=[0, 2, 1]):
'\n Fit linear model to two Gaussian data sets\n\n Parameters\n ----------\n ells : ndarray (nobj, )\n scaling between the data: y = ell * z\n y, yinvvar : ndarray (nobj, ..., n_pix_y)\n data and data inverse variances\n M_T : ndarray (..., n_components, n_pix_y)\n design matrix of linear model\n z, zinvvar : ndarray (nobj, ..., n_pix_z)\n data and data inverse variances for z\n R_T : ndarray (..., n_components, n_pix_z)\n design matrix of linear model for z\n perm : list\n permutation to get M and R from R_T and M_T\n\n Returns\n -------\n logfml : ndarray (nobj, )\n log likelihood values with parameters marginalised and at best fit\n theta_map : ndarray (nobj, ndim)\n Best fit MAP parameters\n theta_cov : ndarray (nobj, ndim, ndim)\n Parameter covariance\n\n '
log2pi = tf.cast(tf.math.log((2.0 * np.pi)), T)
nt = tf.cast(tf.shape(M_T)[(- 2)], T)
ny = tf.cast(tf.math.count_nonzero(tf.where((yinvvar > 0))), T)
nz = tf.cast(tf.math.count_nonzero(tf.where((zinvvar > 0))), T)
M = tf.transpose(M_T, perm)
R = tf.transpose(R_T, perm)
Hbar = (((ells[(..., None, None)] ** 2) * tf.matmul(R_T, (R * zinvvar[(..., :, None)]))) + tf.matmul(M_T, (M * yinvvar[(..., :, None)])))
etabar = ((ells[(..., None)] * tf.reduce_sum((R_T * (z * zinvvar)[(..., None, :)]), axis=(- 1))) + tf.reduce_sum((M_T * (y * yinvvar)[(..., None, :)]), axis=(- 1)))
theta_map = tf.linalg.solve(Hbar, etabar[(..., None)])[(..., 0)]
theta_cov = tf.linalg.inv(Hbar)
logdetH = (tf.reduce_sum(tf.where((zinvvar > 0), tf.math.log(zinvvar), (zinvvar * 0)), axis=(- 1)) + tf.reduce_sum(tf.where((yinvvar > 0), tf.math.log(yinvvar), (yinvvar * 0)), axis=(- 1)))
xi1 = ((- 0.5) * (((((ny + nz) * log2pi) - logdetH) + tf.reduce_sum(((y * y) * yinvvar), axis=(- 1))) + tf.reduce_sum(((z * z) * zinvvar), axis=(- 1))))
logdetHbar = tf.linalg.logdet(Hbar)
xi2 = ((- 0.5) * (((nt * log2pi) - logdetHbar) + tf.reduce_sum((etabar * theta_map), axis=(- 1))))
logfml = (xi1 - xi2)
return (logfml, theta_map, theta_cov) | Fit linear model to two Gaussian data sets
Parameters
----------
ells : ndarray (nobj, )
scaling between the data: y = ell * z
y, yinvvar : ndarray (nobj, ..., n_pix_y)
data and data inverse variances
M_T : ndarray (..., n_components, n_pix_y)
design matrix of linear model
z, zinvvar : ndarray (nobj, ..., n_pix_z)
data and data inverse variances for z
R_T : ndarray (..., n_components, n_pix_z)
design matrix of linear model for z
perm : list
permutation to get M and R from R_T and M_T
Returns
-------
logfml : ndarray (nobj, )
log likelihood values with parameters marginalised and at best fit
theta_map : ndarray (nobj, ndim)
Best fit MAP parameters
theta_cov : ndarray (nobj, ndim, ndim)
Parameter covariance | gasp/marginallikelihoods_tf.py | logmarglike_twotransfergaussians | ixkael/gasp | 0 | python | def logmarglike_twotransfergaussians(ells, y, yinvvar, M_T, z, zinvvar, R_T, perm=[0, 2, 1]):
'\n Fit linear model to two Gaussian data sets\n\n Parameters\n ----------\n ells : ndarray (nobj, )\n scaling between the data: y = ell * z\n y, yinvvar : ndarray (nobj, ..., n_pix_y)\n data and data inverse variances\n M_T : ndarray (..., n_components, n_pix_y)\n design matrix of linear model\n z, zinvvar : ndarray (nobj, ..., n_pix_z)\n data and data inverse variances for z\n R_T : ndarray (..., n_components, n_pix_z)\n design matrix of linear model for z\n perm : list\n permutation to get M and R from R_T and M_T\n\n Returns\n -------\n logfml : ndarray (nobj, )\n log likelihood values with parameters marginalised and at best fit\n theta_map : ndarray (nobj, ndim)\n Best fit MAP parameters\n theta_cov : ndarray (nobj, ndim, ndim)\n Parameter covariance\n\n '
log2pi = tf.cast(tf.math.log((2.0 * np.pi)), T)
nt = tf.cast(tf.shape(M_T)[(- 2)], T)
ny = tf.cast(tf.math.count_nonzero(tf.where((yinvvar > 0))), T)
nz = tf.cast(tf.math.count_nonzero(tf.where((zinvvar > 0))), T)
M = tf.transpose(M_T, perm)
R = tf.transpose(R_T, perm)
Hbar = (((ells[(..., None, None)] ** 2) * tf.matmul(R_T, (R * zinvvar[(..., :, None)]))) + tf.matmul(M_T, (M * yinvvar[(..., :, None)])))
etabar = ((ells[(..., None)] * tf.reduce_sum((R_T * (z * zinvvar)[(..., None, :)]), axis=(- 1))) + tf.reduce_sum((M_T * (y * yinvvar)[(..., None, :)]), axis=(- 1)))
theta_map = tf.linalg.solve(Hbar, etabar[(..., None)])[(..., 0)]
theta_cov = tf.linalg.inv(Hbar)
logdetH = (tf.reduce_sum(tf.where((zinvvar > 0), tf.math.log(zinvvar), (zinvvar * 0)), axis=(- 1)) + tf.reduce_sum(tf.where((yinvvar > 0), tf.math.log(yinvvar), (yinvvar * 0)), axis=(- 1)))
xi1 = ((- 0.5) * (((((ny + nz) * log2pi) - logdetH) + tf.reduce_sum(((y * y) * yinvvar), axis=(- 1))) + tf.reduce_sum(((z * z) * zinvvar), axis=(- 1))))
logdetHbar = tf.linalg.logdet(Hbar)
xi2 = ((- 0.5) * (((nt * log2pi) - logdetHbar) + tf.reduce_sum((etabar * theta_map), axis=(- 1))))
logfml = (xi1 - xi2)
return (logfml, theta_map, theta_cov) | def logmarglike_twotransfergaussians(ells, y, yinvvar, M_T, z, zinvvar, R_T, perm=[0, 2, 1]):
'\n Fit linear model to two Gaussian data sets\n\n Parameters\n ----------\n ells : ndarray (nobj, )\n scaling between the data: y = ell * z\n y, yinvvar : ndarray (nobj, ..., n_pix_y)\n data and data inverse variances\n M_T : ndarray (..., n_components, n_pix_y)\n design matrix of linear model\n z, zinvvar : ndarray (nobj, ..., n_pix_z)\n data and data inverse variances for z\n R_T : ndarray (..., n_components, n_pix_z)\n design matrix of linear model for z\n perm : list\n permutation to get M and R from R_T and M_T\n\n Returns\n -------\n logfml : ndarray (nobj, )\n log likelihood values with parameters marginalised and at best fit\n theta_map : ndarray (nobj, ndim)\n Best fit MAP parameters\n theta_cov : ndarray (nobj, ndim, ndim)\n Parameter covariance\n\n '
log2pi = tf.cast(tf.math.log((2.0 * np.pi)), T)
nt = tf.cast(tf.shape(M_T)[(- 2)], T)
ny = tf.cast(tf.math.count_nonzero(tf.where((yinvvar > 0))), T)
nz = tf.cast(tf.math.count_nonzero(tf.where((zinvvar > 0))), T)
M = tf.transpose(M_T, perm)
R = tf.transpose(R_T, perm)
Hbar = (((ells[(..., None, None)] ** 2) * tf.matmul(R_T, (R * zinvvar[(..., :, None)]))) + tf.matmul(M_T, (M * yinvvar[(..., :, None)])))
etabar = ((ells[(..., None)] * tf.reduce_sum((R_T * (z * zinvvar)[(..., None, :)]), axis=(- 1))) + tf.reduce_sum((M_T * (y * yinvvar)[(..., None, :)]), axis=(- 1)))
theta_map = tf.linalg.solve(Hbar, etabar[(..., None)])[(..., 0)]
theta_cov = tf.linalg.inv(Hbar)
logdetH = (tf.reduce_sum(tf.where((zinvvar > 0), tf.math.log(zinvvar), (zinvvar * 0)), axis=(- 1)) + tf.reduce_sum(tf.where((yinvvar > 0), tf.math.log(yinvvar), (yinvvar * 0)), axis=(- 1)))
xi1 = ((- 0.5) * (((((ny + nz) * log2pi) - logdetH) + tf.reduce_sum(((y * y) * yinvvar), axis=(- 1))) + tf.reduce_sum(((z * z) * zinvvar), axis=(- 1))))
logdetHbar = tf.linalg.logdet(Hbar)
xi2 = ((- 0.5) * (((nt * log2pi) - logdetHbar) + tf.reduce_sum((etabar * theta_map), axis=(- 1))))
logfml = (xi1 - xi2)
return (logfml, theta_map, theta_cov)<|docstring|>Fit linear model to two Gaussian data sets
Parameters
----------
ells : ndarray (nobj, )
scaling between the data: y = ell * z
y, yinvvar : ndarray (nobj, ..., n_pix_y)
data and data inverse variances
M_T : ndarray (..., n_components, n_pix_y)
design matrix of linear model
z, zinvvar : ndarray (nobj, ..., n_pix_z)
data and data inverse variances for z
R_T : ndarray (..., n_components, n_pix_z)
design matrix of linear model for z
perm : list
permutation to get M and R from R_T and M_T
Returns
-------
logfml : ndarray (nobj, )
log likelihood values with parameters marginalised and at best fit
theta_map : ndarray (nobj, ndim)
Best fit MAP parameters
theta_cov : ndarray (nobj, ndim, ndim)
Parameter covariance<|endoftext|> |
13d9054af39119912d2697c6fd139de375cdf6003b527a61fa2d6331dbfd81de | def logmarglike_threetransfergaussians(ells, y, yinvvar, M_T, z, zinvvar, R_T, mu, muinvvar):
'\n Fit linear model to three Gaussian data sets\n\n Parameters\n ----------\n ells : ndarray (nobj, )\n scaling between the data: y = ell * z\n y, yinvvar : ndarray (nobj, ..., n_pix_y)\n data and data inverse variances\n M_T : ndarray (..., n_components, n_pix_y)\n design matrix of linear model\n z, zinvvar : ndarray (nobj, ..., n_pix_z)\n data and data variances for y\n R_T : ndarray (..., n_components, n_pix_z)\n design matrix of linear model for z\n mu, muinvvar : ndarray ( ..., n_components)\n data and data variances for y\n\n Returns\n -------\n logfml : ndarray (nobj, )\n log likelihood values with parameters marginalised and at best fit\n theta_map : ndarray (nobj, ndim)\n Best fit MAP parameters\n theta_cov : ndarray (nobj, ndim, ndim)\n Parameter covariance\n\n '
log2pi = tf.cast(tf.math.log((2.0 * np.pi)), T)
nt = tf.cast(tf.shape(M_T)[(- 2)], T)
nobj = tf.cast(tf.shape(y)[0], T)
ny = tf.cast(tf.math.count_nonzero(tf.where((yinvvar > 0))), T)
nz = tf.cast(tf.math.count_nonzero(tf.where((zinvvar > 0))), T)
nm = tf.cast(tf.math.count_nonzero(tf.where((muinvvar > 0))), T)
M = tf.transpose(M_T, [0, 2, 1])
R = tf.transpose(R_T, [0, 2, 1])
Hbar = ((((ells[(:, None, None)] ** 2) * tf.matmul(R_T, (R * zinvvar[(..., :, None)]))) + tf.matmul(M_T, (M * yinvvar[(..., :, None)]))) + ((tf.eye(nt, dtype=T)[(None, :, :)] * tf.ones((nobj, 1, 1), dtype=T)) * muinvvar[(..., :, None)]))
etabar = (((ells[(:, None)] * tf.reduce_sum((R_T * (z * zinvvar)[(..., None, :)]), axis=(- 1))) + tf.reduce_sum((M_T * (y * yinvvar)[(..., None, :)]), axis=(- 1))) + tf.reduce_sum((mu * muinvvar)[(..., None, :)], axis=(- 1)))
theta_map = tf.linalg.solve(Hbar, etabar[(..., None)])[(..., 0)]
theta_cov = tf.linalg.inv(Hbar)
logdetH = ((tf.reduce_sum(tf.where((zinvvar > 0), tf.math.log(zinvvar), (zinvvar * 0)), axis=(- 1)) + tf.reduce_sum(tf.where((yinvvar > 0), tf.math.log(yinvvar), (yinvvar * 0)), axis=(- 1))) + tf.reduce_sum(tf.where((muinvvar > 0), tf.math.log(muinvvar), (muinvvar * 0)), axis=(- 1)))
xi1 = ((- 0.5) * (((((((ny + nz) + nm) * log2pi) - logdetH) + tf.reduce_sum(((y * y) * yinvvar), axis=(- 1))) + tf.reduce_sum(((z * z) * zinvvar), axis=(- 1))) + tf.reduce_sum(((mu * mu) * muinvvar), axis=(- 1))))
logdetHbar = tf.linalg.logdet(Hbar)
xi2 = ((- 0.5) * (((nt * log2pi) - logdetHbar) + tf.reduce_sum((etabar * theta_map), axis=(- 1))))
logfml = (xi1 - xi2)
return (logfml, theta_map, theta_cov) | Fit linear model to three Gaussian data sets
Parameters
----------
ells : ndarray (nobj, )
scaling between the data: y = ell * z
y, yinvvar : ndarray (nobj, ..., n_pix_y)
data and data inverse variances
M_T : ndarray (..., n_components, n_pix_y)
design matrix of linear model
z, zinvvar : ndarray (nobj, ..., n_pix_z)
data and data variances for y
R_T : ndarray (..., n_components, n_pix_z)
design matrix of linear model for z
mu, muinvvar : ndarray ( ..., n_components)
data and data variances for y
Returns
-------
logfml : ndarray (nobj, )
log likelihood values with parameters marginalised and at best fit
theta_map : ndarray (nobj, ndim)
Best fit MAP parameters
theta_cov : ndarray (nobj, ndim, ndim)
Parameter covariance | gasp/marginallikelihoods_tf.py | logmarglike_threetransfergaussians | ixkael/gasp | 0 | python | def logmarglike_threetransfergaussians(ells, y, yinvvar, M_T, z, zinvvar, R_T, mu, muinvvar):
'\n Fit linear model to three Gaussian data sets\n\n Parameters\n ----------\n ells : ndarray (nobj, )\n scaling between the data: y = ell * z\n y, yinvvar : ndarray (nobj, ..., n_pix_y)\n data and data inverse variances\n M_T : ndarray (..., n_components, n_pix_y)\n design matrix of linear model\n z, zinvvar : ndarray (nobj, ..., n_pix_z)\n data and data variances for y\n R_T : ndarray (..., n_components, n_pix_z)\n design matrix of linear model for z\n mu, muinvvar : ndarray ( ..., n_components)\n data and data variances for y\n\n Returns\n -------\n logfml : ndarray (nobj, )\n log likelihood values with parameters marginalised and at best fit\n theta_map : ndarray (nobj, ndim)\n Best fit MAP parameters\n theta_cov : ndarray (nobj, ndim, ndim)\n Parameter covariance\n\n '
log2pi = tf.cast(tf.math.log((2.0 * np.pi)), T)
nt = tf.cast(tf.shape(M_T)[(- 2)], T)
nobj = tf.cast(tf.shape(y)[0], T)
ny = tf.cast(tf.math.count_nonzero(tf.where((yinvvar > 0))), T)
nz = tf.cast(tf.math.count_nonzero(tf.where((zinvvar > 0))), T)
nm = tf.cast(tf.math.count_nonzero(tf.where((muinvvar > 0))), T)
M = tf.transpose(M_T, [0, 2, 1])
R = tf.transpose(R_T, [0, 2, 1])
Hbar = ((((ells[(:, None, None)] ** 2) * tf.matmul(R_T, (R * zinvvar[(..., :, None)]))) + tf.matmul(M_T, (M * yinvvar[(..., :, None)]))) + ((tf.eye(nt, dtype=T)[(None, :, :)] * tf.ones((nobj, 1, 1), dtype=T)) * muinvvar[(..., :, None)]))
etabar = (((ells[(:, None)] * tf.reduce_sum((R_T * (z * zinvvar)[(..., None, :)]), axis=(- 1))) + tf.reduce_sum((M_T * (y * yinvvar)[(..., None, :)]), axis=(- 1))) + tf.reduce_sum((mu * muinvvar)[(..., None, :)], axis=(- 1)))
theta_map = tf.linalg.solve(Hbar, etabar[(..., None)])[(..., 0)]
theta_cov = tf.linalg.inv(Hbar)
logdetH = ((tf.reduce_sum(tf.where((zinvvar > 0), tf.math.log(zinvvar), (zinvvar * 0)), axis=(- 1)) + tf.reduce_sum(tf.where((yinvvar > 0), tf.math.log(yinvvar), (yinvvar * 0)), axis=(- 1))) + tf.reduce_sum(tf.where((muinvvar > 0), tf.math.log(muinvvar), (muinvvar * 0)), axis=(- 1)))
xi1 = ((- 0.5) * (((((((ny + nz) + nm) * log2pi) - logdetH) + tf.reduce_sum(((y * y) * yinvvar), axis=(- 1))) + tf.reduce_sum(((z * z) * zinvvar), axis=(- 1))) + tf.reduce_sum(((mu * mu) * muinvvar), axis=(- 1))))
logdetHbar = tf.linalg.logdet(Hbar)
xi2 = ((- 0.5) * (((nt * log2pi) - logdetHbar) + tf.reduce_sum((etabar * theta_map), axis=(- 1))))
logfml = (xi1 - xi2)
return (logfml, theta_map, theta_cov) | def logmarglike_threetransfergaussians(ells, y, yinvvar, M_T, z, zinvvar, R_T, mu, muinvvar):
'\n Fit linear model to three Gaussian data sets\n\n Parameters\n ----------\n ells : ndarray (nobj, )\n scaling between the data: y = ell * z\n y, yinvvar : ndarray (nobj, ..., n_pix_y)\n data and data inverse variances\n M_T : ndarray (..., n_components, n_pix_y)\n design matrix of linear model\n z, zinvvar : ndarray (nobj, ..., n_pix_z)\n data and data variances for y\n R_T : ndarray (..., n_components, n_pix_z)\n design matrix of linear model for z\n mu, muinvvar : ndarray ( ..., n_components)\n data and data variances for y\n\n Returns\n -------\n logfml : ndarray (nobj, )\n log likelihood values with parameters marginalised and at best fit\n theta_map : ndarray (nobj, ndim)\n Best fit MAP parameters\n theta_cov : ndarray (nobj, ndim, ndim)\n Parameter covariance\n\n '
log2pi = tf.cast(tf.math.log((2.0 * np.pi)), T)
nt = tf.cast(tf.shape(M_T)[(- 2)], T)
nobj = tf.cast(tf.shape(y)[0], T)
ny = tf.cast(tf.math.count_nonzero(tf.where((yinvvar > 0))), T)
nz = tf.cast(tf.math.count_nonzero(tf.where((zinvvar > 0))), T)
nm = tf.cast(tf.math.count_nonzero(tf.where((muinvvar > 0))), T)
M = tf.transpose(M_T, [0, 2, 1])
R = tf.transpose(R_T, [0, 2, 1])
Hbar = ((((ells[(:, None, None)] ** 2) * tf.matmul(R_T, (R * zinvvar[(..., :, None)]))) + tf.matmul(M_T, (M * yinvvar[(..., :, None)]))) + ((tf.eye(nt, dtype=T)[(None, :, :)] * tf.ones((nobj, 1, 1), dtype=T)) * muinvvar[(..., :, None)]))
etabar = (((ells[(:, None)] * tf.reduce_sum((R_T * (z * zinvvar)[(..., None, :)]), axis=(- 1))) + tf.reduce_sum((M_T * (y * yinvvar)[(..., None, :)]), axis=(- 1))) + tf.reduce_sum((mu * muinvvar)[(..., None, :)], axis=(- 1)))
theta_map = tf.linalg.solve(Hbar, etabar[(..., None)])[(..., 0)]
theta_cov = tf.linalg.inv(Hbar)
logdetH = ((tf.reduce_sum(tf.where((zinvvar > 0), tf.math.log(zinvvar), (zinvvar * 0)), axis=(- 1)) + tf.reduce_sum(tf.where((yinvvar > 0), tf.math.log(yinvvar), (yinvvar * 0)), axis=(- 1))) + tf.reduce_sum(tf.where((muinvvar > 0), tf.math.log(muinvvar), (muinvvar * 0)), axis=(- 1)))
xi1 = ((- 0.5) * (((((((ny + nz) + nm) * log2pi) - logdetH) + tf.reduce_sum(((y * y) * yinvvar), axis=(- 1))) + tf.reduce_sum(((z * z) * zinvvar), axis=(- 1))) + tf.reduce_sum(((mu * mu) * muinvvar), axis=(- 1))))
logdetHbar = tf.linalg.logdet(Hbar)
xi2 = ((- 0.5) * (((nt * log2pi) - logdetHbar) + tf.reduce_sum((etabar * theta_map), axis=(- 1))))
logfml = (xi1 - xi2)
return (logfml, theta_map, theta_cov)<|docstring|>Fit linear model to three Gaussian data sets
Parameters
----------
ells : ndarray (nobj, )
scaling between the data: y = ell * z
y, yinvvar : ndarray (nobj, ..., n_pix_y)
data and data inverse variances
M_T : ndarray (..., n_components, n_pix_y)
design matrix of linear model
z, zinvvar : ndarray (nobj, ..., n_pix_z)
data and data variances for y
R_T : ndarray (..., n_components, n_pix_z)
design matrix of linear model for z
mu, muinvvar : ndarray ( ..., n_components)
data and data variances for y
Returns
-------
logfml : ndarray (nobj, )
log likelihood values with parameters marginalised and at best fit
theta_map : ndarray (nobj, ndim)
Best fit MAP parameters
theta_cov : ndarray (nobj, ndim, ndim)
Parameter covariance<|endoftext|> |
e2413d408f4db57745dc0ffb096c2f93004db1649b3130a95347b35f9f1746d4 | def _main():
'Main function.\n '
args = _parse_input_arguments()
(mesh, point_data, field_data) = meshplex.read(args.filename, timestep=args.timestep)
num_nodes = len(mesh.node_coords)
if (not (args.mu is None)):
mu = args.mu
print(('Using mu=%g from command line.' % mu))
elif ('mu' in field_data):
mu = field_data['mu']
else:
raise ValueError('Parameter mu not found in file. Please provide on command line.')
if (not (args.g is None)):
g = args.g
print(('Using g=%g from command line.' % g))
elif ('g' in field_data):
g = field_data['g']
else:
raise ValueError('Parameter g not found in file. Please provide on command line.')
nls_modeleval = nme.NlsModelEvaluator(mesh=mesh, V=point_data['V'], A=point_data['A'], preconditioner_type='exact', num_amg_cycles=1)
psi0 = (point_data['psi'][(:, 0)] + (1j * point_data['psi'][(:, 1)]))
if args.bordering:
x0 = np.empty((num_nodes + 1), dtype=complex)
x0[0:num_nodes] = psi0
x0[(- 1)] = 0.0
modeleval = bme.BorderedModelEvaluator(nls_modeleval)
else:
x0 = psi0
modeleval = nls_modeleval
if (not args.series):
(eigenvals, X) = _compute_eigenvalues(args.operator, args.eigenvalue_type, args.num_eigenvalues, None, x0[(:, None)], modeleval, mu, g)
print('The following eigenvalues were computed:')
print(sorted(eigenvals))
print('Residuals:')
for k in range(len(eigenvals)):
z = (X[(0::2, k)] + (1j * X[(1::2, k)]))
z /= np.sqrt(modeleval.inner_product(z, z))
y0 = (modeleval.get_jacobian(x0, mu, g) * z)
print(np.linalg.norm((y0 - (eigenvals[k] * z))))
print('Storing corresponding eigenstates...', end=' ')
k = 0
for k in range(len(eigenvals)):
filename = ('eigen%d.vtu' % k)
z = (X[(0::2, k)] + (1j * X[(1::2, k)]))
z /= np.sqrt(modeleval.inner_product(z, z))
mesh.write(filename, point_data={'psi': point_data['psi'], 'A': point_data['A'], 'V': point_data['V'], 'eigen': z}, field_data={'g': g, 'mu': mu, 'eigenvalue': eigenvals[k]})
print('done.')
else:
X = np.ones((len(mesh.node_coords), 1))
steps = 51
mus = np.linspace(0.0, 0.5, steps)
eigenvals_list = []
for mu in mus:
modeleval.set_parameter(mu)
(eigenvals, X) = _compute_eigenvalues(args.operator, args.eigenvalue_type, args.num_eigenvalues, X[(:, 0)], modeleval, mu, g)
eigenvals_list.append(eigenvals)
_plot_eigenvalue_series(mus, eigenvals_list)
pp.title(('%s eigenvalues of %s' % (args.eigenvalue_type, args.operator)))
pp.xlabel('$\\mu$')
pp.show()
return | Main function. | tools/operator_eigenvalues.py | _main | nschloe/pynosh | 8 | python | def _main():
'\n '
args = _parse_input_arguments()
(mesh, point_data, field_data) = meshplex.read(args.filename, timestep=args.timestep)
num_nodes = len(mesh.node_coords)
if (not (args.mu is None)):
mu = args.mu
print(('Using mu=%g from command line.' % mu))
elif ('mu' in field_data):
mu = field_data['mu']
else:
raise ValueError('Parameter mu not found in file. Please provide on command line.')
if (not (args.g is None)):
g = args.g
print(('Using g=%g from command line.' % g))
elif ('g' in field_data):
g = field_data['g']
else:
raise ValueError('Parameter g not found in file. Please provide on command line.')
nls_modeleval = nme.NlsModelEvaluator(mesh=mesh, V=point_data['V'], A=point_data['A'], preconditioner_type='exact', num_amg_cycles=1)
psi0 = (point_data['psi'][(:, 0)] + (1j * point_data['psi'][(:, 1)]))
if args.bordering:
x0 = np.empty((num_nodes + 1), dtype=complex)
x0[0:num_nodes] = psi0
x0[(- 1)] = 0.0
modeleval = bme.BorderedModelEvaluator(nls_modeleval)
else:
x0 = psi0
modeleval = nls_modeleval
if (not args.series):
(eigenvals, X) = _compute_eigenvalues(args.operator, args.eigenvalue_type, args.num_eigenvalues, None, x0[(:, None)], modeleval, mu, g)
print('The following eigenvalues were computed:')
print(sorted(eigenvals))
print('Residuals:')
for k in range(len(eigenvals)):
z = (X[(0::2, k)] + (1j * X[(1::2, k)]))
z /= np.sqrt(modeleval.inner_product(z, z))
y0 = (modeleval.get_jacobian(x0, mu, g) * z)
print(np.linalg.norm((y0 - (eigenvals[k] * z))))
print('Storing corresponding eigenstates...', end=' ')
k = 0
for k in range(len(eigenvals)):
filename = ('eigen%d.vtu' % k)
z = (X[(0::2, k)] + (1j * X[(1::2, k)]))
z /= np.sqrt(modeleval.inner_product(z, z))
mesh.write(filename, point_data={'psi': point_data['psi'], 'A': point_data['A'], 'V': point_data['V'], 'eigen': z}, field_data={'g': g, 'mu': mu, 'eigenvalue': eigenvals[k]})
print('done.')
else:
X = np.ones((len(mesh.node_coords), 1))
steps = 51
mus = np.linspace(0.0, 0.5, steps)
eigenvals_list = []
for mu in mus:
modeleval.set_parameter(mu)
(eigenvals, X) = _compute_eigenvalues(args.operator, args.eigenvalue_type, args.num_eigenvalues, X[(:, 0)], modeleval, mu, g)
eigenvals_list.append(eigenvals)
_plot_eigenvalue_series(mus, eigenvals_list)
pp.title(('%s eigenvalues of %s' % (args.eigenvalue_type, args.operator)))
pp.xlabel('$\\mu$')
pp.show()
return | def _main():
'\n '
args = _parse_input_arguments()
(mesh, point_data, field_data) = meshplex.read(args.filename, timestep=args.timestep)
num_nodes = len(mesh.node_coords)
if (not (args.mu is None)):
mu = args.mu
print(('Using mu=%g from command line.' % mu))
elif ('mu' in field_data):
mu = field_data['mu']
else:
raise ValueError('Parameter mu not found in file. Please provide on command line.')
if (not (args.g is None)):
g = args.g
print(('Using g=%g from command line.' % g))
elif ('g' in field_data):
g = field_data['g']
else:
raise ValueError('Parameter g not found in file. Please provide on command line.')
nls_modeleval = nme.NlsModelEvaluator(mesh=mesh, V=point_data['V'], A=point_data['A'], preconditioner_type='exact', num_amg_cycles=1)
psi0 = (point_data['psi'][(:, 0)] + (1j * point_data['psi'][(:, 1)]))
if args.bordering:
x0 = np.empty((num_nodes + 1), dtype=complex)
x0[0:num_nodes] = psi0
x0[(- 1)] = 0.0
modeleval = bme.BorderedModelEvaluator(nls_modeleval)
else:
x0 = psi0
modeleval = nls_modeleval
if (not args.series):
(eigenvals, X) = _compute_eigenvalues(args.operator, args.eigenvalue_type, args.num_eigenvalues, None, x0[(:, None)], modeleval, mu, g)
print('The following eigenvalues were computed:')
print(sorted(eigenvals))
print('Residuals:')
for k in range(len(eigenvals)):
z = (X[(0::2, k)] + (1j * X[(1::2, k)]))
z /= np.sqrt(modeleval.inner_product(z, z))
y0 = (modeleval.get_jacobian(x0, mu, g) * z)
print(np.linalg.norm((y0 - (eigenvals[k] * z))))
print('Storing corresponding eigenstates...', end=' ')
k = 0
for k in range(len(eigenvals)):
filename = ('eigen%d.vtu' % k)
z = (X[(0::2, k)] + (1j * X[(1::2, k)]))
z /= np.sqrt(modeleval.inner_product(z, z))
mesh.write(filename, point_data={'psi': point_data['psi'], 'A': point_data['A'], 'V': point_data['V'], 'eigen': z}, field_data={'g': g, 'mu': mu, 'eigenvalue': eigenvals[k]})
print('done.')
else:
X = np.ones((len(mesh.node_coords), 1))
steps = 51
mus = np.linspace(0.0, 0.5, steps)
eigenvals_list = []
for mu in mus:
modeleval.set_parameter(mu)
(eigenvals, X) = _compute_eigenvalues(args.operator, args.eigenvalue_type, args.num_eigenvalues, X[(:, 0)], modeleval, mu, g)
eigenvals_list.append(eigenvals)
_plot_eigenvalue_series(mus, eigenvals_list)
pp.title(('%s eigenvalues of %s' % (args.eigenvalue_type, args.operator)))
pp.xlabel('$\\mu$')
pp.show()
return<|docstring|>Main function.<|endoftext|> |
47cef6ed1f8bfea33b59f6478d4548f1675d13664d61fd9b3590fdefb256a51e | def _complex2real(op):
'For a given complex-valued operator C^n -> C^n, returns the\n corresponding real-valued operator R^{2n} -> R^{2n}.'
def _jacobian_wrap_apply(x):
z = (x[0::2] + (1j * x[1::2]))
z_out = (op * z)
x_out = np.empty(x.shape)
x_out[0::2] = z_out.real
x_out[1::2] = z_out.imag
return x_out
return LinearOperator(((2 * op.shape[0]), (2 * op.shape[1])), _jacobian_wrap_apply, dtype=float) | For a given complex-valued operator C^n -> C^n, returns the
corresponding real-valued operator R^{2n} -> R^{2n}. | tools/operator_eigenvalues.py | _complex2real | nschloe/pynosh | 8 | python | def _complex2real(op):
'For a given complex-valued operator C^n -> C^n, returns the\n corresponding real-valued operator R^{2n} -> R^{2n}.'
def _jacobian_wrap_apply(x):
z = (x[0::2] + (1j * x[1::2]))
z_out = (op * z)
x_out = np.empty(x.shape)
x_out[0::2] = z_out.real
x_out[1::2] = z_out.imag
return x_out
return LinearOperator(((2 * op.shape[0]), (2 * op.shape[1])), _jacobian_wrap_apply, dtype=float) | def _complex2real(op):
'For a given complex-valued operator C^n -> C^n, returns the\n corresponding real-valued operator R^{2n} -> R^{2n}.'
def _jacobian_wrap_apply(x):
z = (x[0::2] + (1j * x[1::2]))
z_out = (op * z)
x_out = np.empty(x.shape)
x_out[0::2] = z_out.real
x_out[1::2] = z_out.imag
return x_out
return LinearOperator(((2 * op.shape[0]), (2 * op.shape[1])), _jacobian_wrap_apply, dtype=float)<|docstring|>For a given complex-valued operator C^n -> C^n, returns the
corresponding real-valued operator R^{2n} -> R^{2n}.<|endoftext|> |
36f58c77b4d0faafdb12ea0de9ef8115767a317dae7e06644fa4fd27ecd55c93 | def _plot_eigenvalue_series(x, eigenvals_list):
"Plotting series of eigenvalues can be hard to make visually appealing.\n The reason for this is that at each data point, the values are mostly\n ordered in some way, not respecting previous calculations. When two\n eigenvalues 'cross' -- and this notion doesn't actually exist -- then the\n colors of the two crossing parts change.\n This function tries to take care of this by guessing which are the\n corresponding values by linear extrapolation.\n "
def _linear_extrapolation(x0, x1, Y0, Y1, x2):
'Linear extrapolation of the data sets (x0,Y0), (x1,Y1) to x2.\n '
return (((((Y1 - Y0) * x2) + (x1 * Y0)) - (Y1 * x0)) / (x1 - x0))
def _permutation_match(y, y2):
'Returns the permutation of y that best matches y2.\n '
n = len(y2)
assert (len(y) == n)
y_new = np.empty(n)
y_masked = np.ma.array(y, mask=np.zeros(n, dtype=bool))
for k in range(n):
min_index = np.argmin(abs((y_masked - y2[k])))
y_new[k] = y_masked[min_index]
y_masked.mask[min_index] = True
return y_new
len_list = len(eigenvals_list)
num_eigenvalues = len(eigenvals_list[0])
reordered_eigenvalues = np.zeros((num_eigenvalues, len_list), dtype=float)
reordered_eigenvalues[(:, 0)] = eigenvals_list[0]
eigenvals_extrapolation = reordered_eigenvalues[(:, 0)]
for (k, eigenvalues) in enumerate(eigenvals_list[1:]):
reordered_eigenvalues[(:, (k + 1))] = _permutation_match(eigenvalues, eigenvals_extrapolation)
if ((k + 2) < len(x)):
eigenvals_extrapolation = _linear_extrapolation(x[k], x[(k + 1)], reordered_eigenvalues[(:, k)], reordered_eigenvalues[(:, (k + 1))], x[(k + 2)])
for k in range(num_eigenvalues):
pp.plot(x, reordered_eigenvalues[(k, :)], '-x')
return | Plotting series of eigenvalues can be hard to make visually appealing.
The reason for this is that at each data point, the values are mostly
ordered in some way, not respecting previous calculations. When two
eigenvalues 'cross' -- and this notion doesn't actually exist -- then the
colors of the two crossing parts change.
This function tries to take care of this by guessing which are the
corresponding values by linear extrapolation. | tools/operator_eigenvalues.py | _plot_eigenvalue_series | nschloe/pynosh | 8 | python | def _plot_eigenvalue_series(x, eigenvals_list):
"Plotting series of eigenvalues can be hard to make visually appealing.\n The reason for this is that at each data point, the values are mostly\n ordered in some way, not respecting previous calculations. When two\n eigenvalues 'cross' -- and this notion doesn't actually exist -- then the\n colors of the two crossing parts change.\n This function tries to take care of this by guessing which are the\n corresponding values by linear extrapolation.\n "
def _linear_extrapolation(x0, x1, Y0, Y1, x2):
'Linear extrapolation of the data sets (x0,Y0), (x1,Y1) to x2.\n '
return (((((Y1 - Y0) * x2) + (x1 * Y0)) - (Y1 * x0)) / (x1 - x0))
def _permutation_match(y, y2):
'Returns the permutation of y that best matches y2.\n '
n = len(y2)
assert (len(y) == n)
y_new = np.empty(n)
y_masked = np.ma.array(y, mask=np.zeros(n, dtype=bool))
for k in range(n):
min_index = np.argmin(abs((y_masked - y2[k])))
y_new[k] = y_masked[min_index]
y_masked.mask[min_index] = True
return y_new
len_list = len(eigenvals_list)
num_eigenvalues = len(eigenvals_list[0])
reordered_eigenvalues = np.zeros((num_eigenvalues, len_list), dtype=float)
reordered_eigenvalues[(:, 0)] = eigenvals_list[0]
eigenvals_extrapolation = reordered_eigenvalues[(:, 0)]
for (k, eigenvalues) in enumerate(eigenvals_list[1:]):
reordered_eigenvalues[(:, (k + 1))] = _permutation_match(eigenvalues, eigenvals_extrapolation)
if ((k + 2) < len(x)):
eigenvals_extrapolation = _linear_extrapolation(x[k], x[(k + 1)], reordered_eigenvalues[(:, k)], reordered_eigenvalues[(:, (k + 1))], x[(k + 2)])
for k in range(num_eigenvalues):
pp.plot(x, reordered_eigenvalues[(k, :)], '-x')
return | def _plot_eigenvalue_series(x, eigenvals_list):
"Plotting series of eigenvalues can be hard to make visually appealing.\n The reason for this is that at each data point, the values are mostly\n ordered in some way, not respecting previous calculations. When two\n eigenvalues 'cross' -- and this notion doesn't actually exist -- then the\n colors of the two crossing parts change.\n This function tries to take care of this by guessing which are the\n corresponding values by linear extrapolation.\n "
def _linear_extrapolation(x0, x1, Y0, Y1, x2):
'Linear extrapolation of the data sets (x0,Y0), (x1,Y1) to x2.\n '
return (((((Y1 - Y0) * x2) + (x1 * Y0)) - (Y1 * x0)) / (x1 - x0))
def _permutation_match(y, y2):
'Returns the permutation of y that best matches y2.\n '
n = len(y2)
assert (len(y) == n)
y_new = np.empty(n)
y_masked = np.ma.array(y, mask=np.zeros(n, dtype=bool))
for k in range(n):
min_index = np.argmin(abs((y_masked - y2[k])))
y_new[k] = y_masked[min_index]
y_masked.mask[min_index] = True
return y_new
len_list = len(eigenvals_list)
num_eigenvalues = len(eigenvals_list[0])
reordered_eigenvalues = np.zeros((num_eigenvalues, len_list), dtype=float)
reordered_eigenvalues[(:, 0)] = eigenvals_list[0]
eigenvals_extrapolation = reordered_eigenvalues[(:, 0)]
for (k, eigenvalues) in enumerate(eigenvals_list[1:]):
reordered_eigenvalues[(:, (k + 1))] = _permutation_match(eigenvalues, eigenvals_extrapolation)
if ((k + 2) < len(x)):
eigenvals_extrapolation = _linear_extrapolation(x[k], x[(k + 1)], reordered_eigenvalues[(:, k)], reordered_eigenvalues[(:, (k + 1))], x[(k + 2)])
for k in range(num_eigenvalues):
pp.plot(x, reordered_eigenvalues[(k, :)], '-x')
return<|docstring|>Plotting series of eigenvalues can be hard to make visually appealing.
The reason for this is that at each data point, the values are mostly
ordered in some way, not respecting previous calculations. When two
eigenvalues 'cross' -- and this notion doesn't actually exist -- then the
colors of the two crossing parts change.
This function tries to take care of this by guessing which are the
corresponding values by linear extrapolation.<|endoftext|> |
a7219372387fe792de429e597e200bba02eb6cdd9e1aa7e0a83aebacbb747512 | def _parse_input_arguments():
'Parse input arguments.\n '
import argparse
parser = argparse.ArgumentParser(description='Compute a few eigenvalues of a specified operator.')
parser.add_argument('filename', metavar='FILE', type=str, help='ExodusII file containing the geometry and initial state')
parser.add_argument('--timestep', '-t', metavar='TIMESTEP', dest='timestep', type=int, default=0, help='read a particular time step (default: 0)')
parser.add_argument('--operator', '-o', metavar='OPERATOR', required=True, choices=['k', 'p', 'j', 'pj'], help='operator to compute the eigenvalues of (default: k)')
parser.add_argument('--numeigenvalues', '-k', dest='num_eigenvalues', type=int, default=6, help='the number of eigenvalues to compute (default: 6)')
parser.add_argument('--series', '-s', dest='series', action='store_true', default=False, help='compute a series of eigenvalues for different mu (default: False)')
parser.add_argument('--type', '-y', dest='eigenvalue_type', default='SM', choices=['SM', 'LM'], help='the type of eigenvalues to compute (default: SM (smallest magnitude))')
parser.add_argument('--mu', '-m', dest='mu', type=float, help='magnetic vector potential multiplier')
parser.add_argument('--g', '-g', dest='g', type=float, help='coupling parameter')
parser.add_argument('--bordering', '-b', default=False, action='store_true', help='use the bordered formulation to counter the nullspace (default: false)')
args = parser.parse_args()
return args | Parse input arguments. | tools/operator_eigenvalues.py | _parse_input_arguments | nschloe/pynosh | 8 | python | def _parse_input_arguments():
'\n '
import argparse
parser = argparse.ArgumentParser(description='Compute a few eigenvalues of a specified operator.')
parser.add_argument('filename', metavar='FILE', type=str, help='ExodusII file containing the geometry and initial state')
parser.add_argument('--timestep', '-t', metavar='TIMESTEP', dest='timestep', type=int, default=0, help='read a particular time step (default: 0)')
parser.add_argument('--operator', '-o', metavar='OPERATOR', required=True, choices=['k', 'p', 'j', 'pj'], help='operator to compute the eigenvalues of (default: k)')
parser.add_argument('--numeigenvalues', '-k', dest='num_eigenvalues', type=int, default=6, help='the number of eigenvalues to compute (default: 6)')
parser.add_argument('--series', '-s', dest='series', action='store_true', default=False, help='compute a series of eigenvalues for different mu (default: False)')
parser.add_argument('--type', '-y', dest='eigenvalue_type', default='SM', choices=['SM', 'LM'], help='the type of eigenvalues to compute (default: SM (smallest magnitude))')
parser.add_argument('--mu', '-m', dest='mu', type=float, help='magnetic vector potential multiplier')
parser.add_argument('--g', '-g', dest='g', type=float, help='coupling parameter')
parser.add_argument('--bordering', '-b', default=False, action='store_true', help='use the bordered formulation to counter the nullspace (default: false)')
args = parser.parse_args()
return args | def _parse_input_arguments():
'\n '
import argparse
parser = argparse.ArgumentParser(description='Compute a few eigenvalues of a specified operator.')
parser.add_argument('filename', metavar='FILE', type=str, help='ExodusII file containing the geometry and initial state')
parser.add_argument('--timestep', '-t', metavar='TIMESTEP', dest='timestep', type=int, default=0, help='read a particular time step (default: 0)')
parser.add_argument('--operator', '-o', metavar='OPERATOR', required=True, choices=['k', 'p', 'j', 'pj'], help='operator to compute the eigenvalues of (default: k)')
parser.add_argument('--numeigenvalues', '-k', dest='num_eigenvalues', type=int, default=6, help='the number of eigenvalues to compute (default: 6)')
parser.add_argument('--series', '-s', dest='series', action='store_true', default=False, help='compute a series of eigenvalues for different mu (default: False)')
parser.add_argument('--type', '-y', dest='eigenvalue_type', default='SM', choices=['SM', 'LM'], help='the type of eigenvalues to compute (default: SM (smallest magnitude))')
parser.add_argument('--mu', '-m', dest='mu', type=float, help='magnetic vector potential multiplier')
parser.add_argument('--g', '-g', dest='g', type=float, help='coupling parameter')
parser.add_argument('--bordering', '-b', default=False, action='store_true', help='use the bordered formulation to counter the nullspace (default: false)')
args = parser.parse_args()
return args<|docstring|>Parse input arguments.<|endoftext|> |
0838b41445fa350740aa13640ad60a62805c457ecb97f1c51aab2d4b2f405467 | def _linear_extrapolation(x0, x1, Y0, Y1, x2):
'Linear extrapolation of the data sets (x0,Y0), (x1,Y1) to x2.\n '
return (((((Y1 - Y0) * x2) + (x1 * Y0)) - (Y1 * x0)) / (x1 - x0)) | Linear extrapolation of the data sets (x0,Y0), (x1,Y1) to x2. | tools/operator_eigenvalues.py | _linear_extrapolation | nschloe/pynosh | 8 | python | def _linear_extrapolation(x0, x1, Y0, Y1, x2):
'\n '
return (((((Y1 - Y0) * x2) + (x1 * Y0)) - (Y1 * x0)) / (x1 - x0)) | def _linear_extrapolation(x0, x1, Y0, Y1, x2):
'\n '
return (((((Y1 - Y0) * x2) + (x1 * Y0)) - (Y1 * x0)) / (x1 - x0))<|docstring|>Linear extrapolation of the data sets (x0,Y0), (x1,Y1) to x2.<|endoftext|> |
bb4fb144b4a7900c45b191f3ff69804d3116e7907485ac1b350fc00eda5a866e | def _permutation_match(y, y2):
'Returns the permutation of y that best matches y2.\n '
n = len(y2)
assert (len(y) == n)
y_new = np.empty(n)
y_masked = np.ma.array(y, mask=np.zeros(n, dtype=bool))
for k in range(n):
min_index = np.argmin(abs((y_masked - y2[k])))
y_new[k] = y_masked[min_index]
y_masked.mask[min_index] = True
return y_new | Returns the permutation of y that best matches y2. | tools/operator_eigenvalues.py | _permutation_match | nschloe/pynosh | 8 | python | def _permutation_match(y, y2):
'\n '
n = len(y2)
assert (len(y) == n)
y_new = np.empty(n)
y_masked = np.ma.array(y, mask=np.zeros(n, dtype=bool))
for k in range(n):
min_index = np.argmin(abs((y_masked - y2[k])))
y_new[k] = y_masked[min_index]
y_masked.mask[min_index] = True
return y_new | def _permutation_match(y, y2):
'\n '
n = len(y2)
assert (len(y) == n)
y_new = np.empty(n)
y_masked = np.ma.array(y, mask=np.zeros(n, dtype=bool))
for k in range(n):
min_index = np.argmin(abs((y_masked - y2[k])))
y_new[k] = y_masked[min_index]
y_masked.mask[min_index] = True
return y_new<|docstring|>Returns the permutation of y that best matches y2.<|endoftext|> |
e4fb7c2d8984f3012ee2592d077f96a3f3614eb1a436f5531fdfd7a3cc2d558e | def test_wikipedia_example1(self):
'Test of Wikipedia example\n\n The example for the following QR decomposition is taken from\n http://en.wikipedia.org/wiki/QR_decomposition#Using_the_Gram.E2.80.93Schmidt_process\n '
A = np.array([[12, (- 51), 4], [6, 167, (- 68)], [(- 4), 24, (- 41)]], dtype=np.float64)
(Q, R) = qr_decomposition.gram_schmidt_process(A)
Q_desired = np.array([[0.8571, (- 0.3943), (- 0.3314)], [0.4286, 0.9029, 0.0343], [(- 0.2857), 0.1714, (- 0.9429)]], dtype=np.float64)
R_desired = np.array([[14, 21, (- 14)], [0, 175, (- 70)], [0, 0, 35]], dtype=np.float64)
npt.assert_almost_equal(Q, Q_desired, 4)
npt.assert_almost_equal(R, R_desired, 4) | Test of Wikipedia example
The example for the following QR decomposition is taken from
http://en.wikipedia.org/wiki/QR_decomposition#Using_the_Gram.E2.80.93Schmidt_process | qr_decomposition/tests/test_gram_schmidt_process.py | test_wikipedia_example1 | QGravityGRGW/qr_decomposition | 20 | python | def test_wikipedia_example1(self):
'Test of Wikipedia example\n\n The example for the following QR decomposition is taken from\n http://en.wikipedia.org/wiki/QR_decomposition#Using_the_Gram.E2.80.93Schmidt_process\n '
A = np.array([[12, (- 51), 4], [6, 167, (- 68)], [(- 4), 24, (- 41)]], dtype=np.float64)
(Q, R) = qr_decomposition.gram_schmidt_process(A)
Q_desired = np.array([[0.8571, (- 0.3943), (- 0.3314)], [0.4286, 0.9029, 0.0343], [(- 0.2857), 0.1714, (- 0.9429)]], dtype=np.float64)
R_desired = np.array([[14, 21, (- 14)], [0, 175, (- 70)], [0, 0, 35]], dtype=np.float64)
npt.assert_almost_equal(Q, Q_desired, 4)
npt.assert_almost_equal(R, R_desired, 4) | def test_wikipedia_example1(self):
'Test of Wikipedia example\n\n The example for the following QR decomposition is taken from\n http://en.wikipedia.org/wiki/QR_decomposition#Using_the_Gram.E2.80.93Schmidt_process\n '
A = np.array([[12, (- 51), 4], [6, 167, (- 68)], [(- 4), 24, (- 41)]], dtype=np.float64)
(Q, R) = qr_decomposition.gram_schmidt_process(A)
Q_desired = np.array([[0.8571, (- 0.3943), (- 0.3314)], [0.4286, 0.9029, 0.0343], [(- 0.2857), 0.1714, (- 0.9429)]], dtype=np.float64)
R_desired = np.array([[14, 21, (- 14)], [0, 175, (- 70)], [0, 0, 35]], dtype=np.float64)
npt.assert_almost_equal(Q, Q_desired, 4)
npt.assert_almost_equal(R, R_desired, 4)<|docstring|>Test of Wikipedia example
The example for the following QR decomposition is taken from
http://en.wikipedia.org/wiki/QR_decomposition#Using_the_Gram.E2.80.93Schmidt_process<|endoftext|> |
ded2612ee281b46d88cb20806cd42af6d6013ad119d175066ae1bb94ac040af5 | def f(x):
'\n Quadratic function.\n '
return ((x ** 2) + 5) | Quadratic function. | f.py | f | brandaogbs/mf | 0 | python | def f(x):
'\n \n '
return ((x ** 2) + 5) | def f(x):
'\n \n '
return ((x ** 2) + 5)<|docstring|>Quadratic function.<|endoftext|> |
fc711b75b735df2507b20a656a28cd0dcf7eceab6d1af50bea16d8072fb8fe8f | def df(x):
'\n Derivative of `f` with respect to `x`.\n '
return (2 * x) | Derivative of `f` with respect to `x`. | f.py | df | brandaogbs/mf | 0 | python | def df(x):
'\n \n '
return (2 * x) | def df(x):
'\n \n '
return (2 * x)<|docstring|>Derivative of `f` with respect to `x`.<|endoftext|> |
b68a14e0b9ad24d85a9b96beb22804ce322722944b73f030704e3d7e74a10c80 | def error(self, arg, get=False):
'Short summary.\n\n Parameters\n ----------\n arg : str\n String to print\n get : bool\n If true, returns a string with the formated string\n\n Returns\n -------\n str\n If get = true, returns a string with the formated string\n\n '
if (not get):
print((Fore.RED + '[ERROR]: {}'.format(arg)))
print(Style.RESET_ALL)
exit((- 1))
else:
return '[ERROR]: {}'.format(arg) | Short summary.
Parameters
----------
arg : str
String to print
get : bool
If true, returns a string with the formated string
Returns
-------
str
If get = true, returns a string with the formated string | FunTOTP/interface.py | error | Z33DD/FunTOTP | 3 | python | def error(self, arg, get=False):
'Short summary.\n\n Parameters\n ----------\n arg : str\n String to print\n get : bool\n If true, returns a string with the formated string\n\n Returns\n -------\n str\n If get = true, returns a string with the formated string\n\n '
if (not get):
print((Fore.RED + '[ERROR]: {}'.format(arg)))
print(Style.RESET_ALL)
exit((- 1))
else:
return '[ERROR]: {}'.format(arg) | def error(self, arg, get=False):
'Short summary.\n\n Parameters\n ----------\n arg : str\n String to print\n get : bool\n If true, returns a string with the formated string\n\n Returns\n -------\n str\n If get = true, returns a string with the formated string\n\n '
if (not get):
print((Fore.RED + '[ERROR]: {}'.format(arg)))
print(Style.RESET_ALL)
exit((- 1))
else:
return '[ERROR]: {}'.format(arg)<|docstring|>Short summary.
Parameters
----------
arg : str
String to print
get : bool
If true, returns a string with the formated string
Returns
-------
str
If get = true, returns a string with the formated string<|endoftext|> |
e9a091ce9029bed43111222c48721571f5eceff97dd54ba7f412fceb746a4f25 | def register_task(name):
"\n New tasks can be added to fairseq with the\n :func:`~fairseq.tasks.register_task` function decorator.\n\n For example::\n\n @register_task('classification')\n class ClassificationTask(FairseqTask):\n (...)\n\n .. note::\n\n All Tasks must implement the :class:`~fairseq.tasks.FairseqTask`\n interface.\n\n Please see the\n\n Args:\n name (str): the name of the task\n "
def register_task_cls(cls):
if (name in TASK_REGISTRY):
raise ValueError('Cannot register duplicate task ({})'.format(name))
if (not issubclass(cls, FairseqTask)):
raise ValueError('Task ({}: {}) must extend FairseqTask'.format(name, cls.__name__))
if (cls.__name__ in TASK_CLASS_NAMES):
raise ValueError('Cannot register task with duplicate class name ({})'.format(cls.__name__))
TASK_REGISTRY[name] = cls
TASK_CLASS_NAMES.add(cls.__name__)
return cls
return register_task_cls | New tasks can be added to fairseq with the
:func:`~fairseq.tasks.register_task` function decorator.
For example::
@register_task('classification')
class ClassificationTask(FairseqTask):
(...)
.. note::
All Tasks must implement the :class:`~fairseq.tasks.FairseqTask`
interface.
Please see the
Args:
name (str): the name of the task | infoxlm/fairseq/fairseq/tasks/__init__.py | register_task | codenet/unilm | 5,129 | python | def register_task(name):
"\n New tasks can be added to fairseq with the\n :func:`~fairseq.tasks.register_task` function decorator.\n\n For example::\n\n @register_task('classification')\n class ClassificationTask(FairseqTask):\n (...)\n\n .. note::\n\n All Tasks must implement the :class:`~fairseq.tasks.FairseqTask`\n interface.\n\n Please see the\n\n Args:\n name (str): the name of the task\n "
def register_task_cls(cls):
if (name in TASK_REGISTRY):
raise ValueError('Cannot register duplicate task ({})'.format(name))
if (not issubclass(cls, FairseqTask)):
raise ValueError('Task ({}: {}) must extend FairseqTask'.format(name, cls.__name__))
if (cls.__name__ in TASK_CLASS_NAMES):
raise ValueError('Cannot register task with duplicate class name ({})'.format(cls.__name__))
TASK_REGISTRY[name] = cls
TASK_CLASS_NAMES.add(cls.__name__)
return cls
return register_task_cls | def register_task(name):
"\n New tasks can be added to fairseq with the\n :func:`~fairseq.tasks.register_task` function decorator.\n\n For example::\n\n @register_task('classification')\n class ClassificationTask(FairseqTask):\n (...)\n\n .. note::\n\n All Tasks must implement the :class:`~fairseq.tasks.FairseqTask`\n interface.\n\n Please see the\n\n Args:\n name (str): the name of the task\n "
def register_task_cls(cls):
if (name in TASK_REGISTRY):
raise ValueError('Cannot register duplicate task ({})'.format(name))
if (not issubclass(cls, FairseqTask)):
raise ValueError('Task ({}: {}) must extend FairseqTask'.format(name, cls.__name__))
if (cls.__name__ in TASK_CLASS_NAMES):
raise ValueError('Cannot register task with duplicate class name ({})'.format(cls.__name__))
TASK_REGISTRY[name] = cls
TASK_CLASS_NAMES.add(cls.__name__)
return cls
return register_task_cls<|docstring|>New tasks can be added to fairseq with the
:func:`~fairseq.tasks.register_task` function decorator.
For example::
@register_task('classification')
class ClassificationTask(FairseqTask):
(...)
.. note::
All Tasks must implement the :class:`~fairseq.tasks.FairseqTask`
interface.
Please see the
Args:
name (str): the name of the task<|endoftext|> |
b1914ccf6e455bcfd89a950b3d36ee5ba929bbebe9bb57873ca98bb18c52d1ed | def get_coordinate(record: TreasureTuple) -> str:
'\n :param record: tuple - a (treasure, coordinate) pair.\n :return: str - the extracted map coordinate.\n '
return record[1] | :param record: tuple - a (treasure, coordinate) pair.
:return: str - the extracted map coordinate. | problems/exercism/tisbury-treasure-hunt/tuples.py | get_coordinate | JayMonari/py-personal | 0 | python | def get_coordinate(record: TreasureTuple) -> str:
'\n :param record: tuple - a (treasure, coordinate) pair.\n :return: str - the extracted map coordinate.\n '
return record[1] | def get_coordinate(record: TreasureTuple) -> str:
'\n :param record: tuple - a (treasure, coordinate) pair.\n :return: str - the extracted map coordinate.\n '
return record[1]<|docstring|>:param record: tuple - a (treasure, coordinate) pair.
:return: str - the extracted map coordinate.<|endoftext|> |
98a9ff500e4ad3751e121bed226968ea3347415100343470d018e4db0a49dbd3 | def convert_coordinate(coordinate: str) -> Tuple[(str, str)]:
'\n :param coordinate: str - a string map coordinate\n :return: tuple - the string coordinate seperated into its individual components.\n '
return (coordinate[0], coordinate[1]) | :param coordinate: str - a string map coordinate
:return: tuple - the string coordinate seperated into its individual components. | problems/exercism/tisbury-treasure-hunt/tuples.py | convert_coordinate | JayMonari/py-personal | 0 | python | def convert_coordinate(coordinate: str) -> Tuple[(str, str)]:
'\n :param coordinate: str - a string map coordinate\n :return: tuple - the string coordinate seperated into its individual components.\n '
return (coordinate[0], coordinate[1]) | def convert_coordinate(coordinate: str) -> Tuple[(str, str)]:
'\n :param coordinate: str - a string map coordinate\n :return: tuple - the string coordinate seperated into its individual components.\n '
return (coordinate[0], coordinate[1])<|docstring|>:param coordinate: str - a string map coordinate
:return: tuple - the string coordinate seperated into its individual components.<|endoftext|> |
3d0f9473c5bd4ccc978c08cfdc330a605ef1b091b56d6658bfdf9cf0e760f946 | def compare_records(azara_record: TreasureTuple, rui_record: LocationTuple) -> bool:
'\n :param azara_record: tuple - a (treasure, coordinate) pair.\n :param rui_record: tuple - a (location, coordinate, quadrant) trio.\n :return: bool - True if coordinates match, False otherwise.\n '
return (azara_record[1] == ''.join(rui_record[1])) | :param azara_record: tuple - a (treasure, coordinate) pair.
:param rui_record: tuple - a (location, coordinate, quadrant) trio.
:return: bool - True if coordinates match, False otherwise. | problems/exercism/tisbury-treasure-hunt/tuples.py | compare_records | JayMonari/py-personal | 0 | python | def compare_records(azara_record: TreasureTuple, rui_record: LocationTuple) -> bool:
'\n :param azara_record: tuple - a (treasure, coordinate) pair.\n :param rui_record: tuple - a (location, coordinate, quadrant) trio.\n :return: bool - True if coordinates match, False otherwise.\n '
return (azara_record[1] == .join(rui_record[1])) | def compare_records(azara_record: TreasureTuple, rui_record: LocationTuple) -> bool:
'\n :param azara_record: tuple - a (treasure, coordinate) pair.\n :param rui_record: tuple - a (location, coordinate, quadrant) trio.\n :return: bool - True if coordinates match, False otherwise.\n '
return (azara_record[1] == .join(rui_record[1]))<|docstring|>:param azara_record: tuple - a (treasure, coordinate) pair.
:param rui_record: tuple - a (location, coordinate, quadrant) trio.
:return: bool - True if coordinates match, False otherwise.<|endoftext|> |
131acadf18eff80eb9c6f088d3cff340438ce80f629444fdf4e8e3602e315e8e | def create_record(azara_record: TreasureTuple, rui_record: LocationTuple) -> MaybeRecord:
'\n :param azara_record: tuple - a (treasure, coordinate) pair.\n :param rui_record: tuple - a (location, coordinate, quadrant) trio.\n :return: tuple - combined record, or "not a match" if the records are incompatible.\n '
if (not compare_records(azara_record, rui_record)):
return 'not a match'
return (*azara_record, *rui_record) | :param azara_record: tuple - a (treasure, coordinate) pair.
:param rui_record: tuple - a (location, coordinate, quadrant) trio.
:return: tuple - combined record, or "not a match" if the records are incompatible. | problems/exercism/tisbury-treasure-hunt/tuples.py | create_record | JayMonari/py-personal | 0 | python | def create_record(azara_record: TreasureTuple, rui_record: LocationTuple) -> MaybeRecord:
'\n :param azara_record: tuple - a (treasure, coordinate) pair.\n :param rui_record: tuple - a (location, coordinate, quadrant) trio.\n :return: tuple - combined record, or "not a match" if the records are incompatible.\n '
if (not compare_records(azara_record, rui_record)):
return 'not a match'
return (*azara_record, *rui_record) | def create_record(azara_record: TreasureTuple, rui_record: LocationTuple) -> MaybeRecord:
'\n :param azara_record: tuple - a (treasure, coordinate) pair.\n :param rui_record: tuple - a (location, coordinate, quadrant) trio.\n :return: tuple - combined record, or "not a match" if the records are incompatible.\n '
if (not compare_records(azara_record, rui_record)):
return 'not a match'
return (*azara_record, *rui_record)<|docstring|>:param azara_record: tuple - a (treasure, coordinate) pair.
:param rui_record: tuple - a (location, coordinate, quadrant) trio.
:return: tuple - combined record, or "not a match" if the records are incompatible.<|endoftext|> |
0d0ddcbe21f79670af5ff4a0100273747164b26f5a73bb17fa2a64ff02e3d311 | def clean_up(combined_record: CombinedRecord) -> str:
'\n :param combined_record_group: tuple of tuples - everything from both participants.\n :return: tuple of tuples - everything "cleaned", with excess coordinates and information removed.\n '
report: List[str] = []
for tup in combined_record:
cleaned = tuple((val for (i, val) in enumerate(tup) if (i != 1)))
report.append(str(cleaned))
return ('\n'.join(report) + '\n') | :param combined_record_group: tuple of tuples - everything from both participants.
:return: tuple of tuples - everything "cleaned", with excess coordinates and information removed. | problems/exercism/tisbury-treasure-hunt/tuples.py | clean_up | JayMonari/py-personal | 0 | python | def clean_up(combined_record: CombinedRecord) -> str:
'\n :param combined_record_group: tuple of tuples - everything from both participants.\n :return: tuple of tuples - everything "cleaned", with excess coordinates and information removed.\n '
report: List[str] = []
for tup in combined_record:
cleaned = tuple((val for (i, val) in enumerate(tup) if (i != 1)))
report.append(str(cleaned))
return ('\n'.join(report) + '\n') | def clean_up(combined_record: CombinedRecord) -> str:
'\n :param combined_record_group: tuple of tuples - everything from both participants.\n :return: tuple of tuples - everything "cleaned", with excess coordinates and information removed.\n '
report: List[str] = []
for tup in combined_record:
cleaned = tuple((val for (i, val) in enumerate(tup) if (i != 1)))
report.append(str(cleaned))
return ('\n'.join(report) + '\n')<|docstring|>:param combined_record_group: tuple of tuples - everything from both participants.
:return: tuple of tuples - everything "cleaned", with excess coordinates and information removed.<|endoftext|> |
d3d446c0fb9f1d61852afe6a3c037d529d5c6438f32ee95c4fd2c712296b0c3c | def __init__(self, delete_zip_file=False):
'\n\n Parameters\n ----------\n delete_zip_file : bool, optional\n Whether to delete the zip file, value from True or False, by default False\n '
self.delete_zip_file = delete_zip_file | Parameters
----------
delete_zip_file : bool, optional
Whether to delete the zip file, value from True or False, by default False | qlib/tests/data.py | __init__ | jinniuai/qlib | 8,637 | python | def __init__(self, delete_zip_file=False):
'\n\n Parameters\n ----------\n delete_zip_file : bool, optional\n Whether to delete the zip file, value from True or False, by default False\n '
self.delete_zip_file = delete_zip_file | def __init__(self, delete_zip_file=False):
'\n\n Parameters\n ----------\n delete_zip_file : bool, optional\n Whether to delete the zip file, value from True or False, by default False\n '
self.delete_zip_file = delete_zip_file<|docstring|>Parameters
----------
delete_zip_file : bool, optional
Whether to delete the zip file, value from True or False, by default False<|endoftext|> |
6f8163942514c4c1e64ecde6880b33c4bde707123fd0eee3741c2d532a97f8cb | def qlib_data(self, name='qlib_data', target_dir='~/.qlib/qlib_data/cn_data', version=None, interval='1d', region='cn', delete_old=True, exists_skip=False):
'download cn qlib data from remote\n\n Parameters\n ----------\n target_dir: str\n data save directory\n name: str\n dataset name, value from [qlib_data, qlib_data_simple], by default qlib_data\n version: str\n data version, value from [v1, ...], by default None(use script to specify version)\n interval: str\n data freq, value from [1d], by default 1d\n region: str\n data region, value from [cn, us], by default cn\n delete_old: bool\n delete an existing directory, by default True\n exists_skip: bool\n exists skip, by default False\n\n Examples\n ---------\n # get 1d data\n python get_data.py qlib_data --name qlib_data --target_dir ~/.qlib/qlib_data/cn_data --interval 1d --region cn\n\n # get 1min data\n python get_data.py qlib_data --name qlib_data --target_dir ~/.qlib/qlib_data/cn_data_1min --interval 1min --region cn\n -------\n\n '
if (exists_skip and exists_qlib_data(target_dir)):
logger.warning(f'''Data already exists: {target_dir}, the data download will be skipped
If downloading is required: `exists_skip=False` or `change target_dir`''')
return
qlib_version = '.'.join(re.findall('(\\d+)\\.+', qlib.__version__))
def _get_file_name(v):
return self.QLIB_DATA_NAME.format(dataset_name=name, region=region.lower(), interval=interval.lower(), qlib_version=v)
file_name = _get_file_name(qlib_version)
if (not self.check_dataset(file_name, version)):
file_name = _get_file_name('latest')
self._download_data(file_name.lower(), target_dir, delete_old, dataset_version=version) | download cn qlib data from remote
Parameters
----------
target_dir: str
data save directory
name: str
dataset name, value from [qlib_data, qlib_data_simple], by default qlib_data
version: str
data version, value from [v1, ...], by default None(use script to specify version)
interval: str
data freq, value from [1d], by default 1d
region: str
data region, value from [cn, us], by default cn
delete_old: bool
delete an existing directory, by default True
exists_skip: bool
exists skip, by default False
Examples
---------
# get 1d data
python get_data.py qlib_data --name qlib_data --target_dir ~/.qlib/qlib_data/cn_data --interval 1d --region cn
# get 1min data
python get_data.py qlib_data --name qlib_data --target_dir ~/.qlib/qlib_data/cn_data_1min --interval 1min --region cn
------- | qlib/tests/data.py | qlib_data | jinniuai/qlib | 8,637 | python | def qlib_data(self, name='qlib_data', target_dir='~/.qlib/qlib_data/cn_data', version=None, interval='1d', region='cn', delete_old=True, exists_skip=False):
'download cn qlib data from remote\n\n Parameters\n ----------\n target_dir: str\n data save directory\n name: str\n dataset name, value from [qlib_data, qlib_data_simple], by default qlib_data\n version: str\n data version, value from [v1, ...], by default None(use script to specify version)\n interval: str\n data freq, value from [1d], by default 1d\n region: str\n data region, value from [cn, us], by default cn\n delete_old: bool\n delete an existing directory, by default True\n exists_skip: bool\n exists skip, by default False\n\n Examples\n ---------\n # get 1d data\n python get_data.py qlib_data --name qlib_data --target_dir ~/.qlib/qlib_data/cn_data --interval 1d --region cn\n\n # get 1min data\n python get_data.py qlib_data --name qlib_data --target_dir ~/.qlib/qlib_data/cn_data_1min --interval 1min --region cn\n -------\n\n '
if (exists_skip and exists_qlib_data(target_dir)):
logger.warning(f'Data already exists: {target_dir}, the data download will be skipped
If downloading is required: `exists_skip=False` or `change target_dir`')
return
qlib_version = '.'.join(re.findall('(\\d+)\\.+', qlib.__version__))
def _get_file_name(v):
return self.QLIB_DATA_NAME.format(dataset_name=name, region=region.lower(), interval=interval.lower(), qlib_version=v)
file_name = _get_file_name(qlib_version)
if (not self.check_dataset(file_name, version)):
file_name = _get_file_name('latest')
self._download_data(file_name.lower(), target_dir, delete_old, dataset_version=version) | def qlib_data(self, name='qlib_data', target_dir='~/.qlib/qlib_data/cn_data', version=None, interval='1d', region='cn', delete_old=True, exists_skip=False):
'download cn qlib data from remote\n\n Parameters\n ----------\n target_dir: str\n data save directory\n name: str\n dataset name, value from [qlib_data, qlib_data_simple], by default qlib_data\n version: str\n data version, value from [v1, ...], by default None(use script to specify version)\n interval: str\n data freq, value from [1d], by default 1d\n region: str\n data region, value from [cn, us], by default cn\n delete_old: bool\n delete an existing directory, by default True\n exists_skip: bool\n exists skip, by default False\n\n Examples\n ---------\n # get 1d data\n python get_data.py qlib_data --name qlib_data --target_dir ~/.qlib/qlib_data/cn_data --interval 1d --region cn\n\n # get 1min data\n python get_data.py qlib_data --name qlib_data --target_dir ~/.qlib/qlib_data/cn_data_1min --interval 1min --region cn\n -------\n\n '
if (exists_skip and exists_qlib_data(target_dir)):
logger.warning(f'Data already exists: {target_dir}, the data download will be skipped
If downloading is required: `exists_skip=False` or `change target_dir`')
return
qlib_version = '.'.join(re.findall('(\\d+)\\.+', qlib.__version__))
def _get_file_name(v):
return self.QLIB_DATA_NAME.format(dataset_name=name, region=region.lower(), interval=interval.lower(), qlib_version=v)
file_name = _get_file_name(qlib_version)
if (not self.check_dataset(file_name, version)):
file_name = _get_file_name('latest')
self._download_data(file_name.lower(), target_dir, delete_old, dataset_version=version)<|docstring|>download cn qlib data from remote
Parameters
----------
target_dir: str
data save directory
name: str
dataset name, value from [qlib_data, qlib_data_simple], by default qlib_data
version: str
data version, value from [v1, ...], by default None(use script to specify version)
interval: str
data freq, value from [1d], by default 1d
region: str
data region, value from [cn, us], by default cn
delete_old: bool
delete an existing directory, by default True
exists_skip: bool
exists skip, by default False
Examples
---------
# get 1d data
python get_data.py qlib_data --name qlib_data --target_dir ~/.qlib/qlib_data/cn_data --interval 1d --region cn
# get 1min data
python get_data.py qlib_data --name qlib_data --target_dir ~/.qlib/qlib_data/cn_data_1min --interval 1min --region cn
-------<|endoftext|> |
c38b222afa1d55a769584e968faae77d2e92abc9075528a4e8d2dbf32a46aaf9 | def csv_data_cn(self, target_dir='~/.qlib/csv_data/cn_data'):
'download cn csv data from remote\n\n Parameters\n ----------\n target_dir: str\n data save directory\n\n Examples\n ---------\n python get_data.py csv_data_cn --target_dir ~/.qlib/csv_data/cn_data\n -------\n\n '
file_name = 'csv_data_cn.zip'
self._download_data(file_name, target_dir) | download cn csv data from remote
Parameters
----------
target_dir: str
data save directory
Examples
---------
python get_data.py csv_data_cn --target_dir ~/.qlib/csv_data/cn_data
------- | qlib/tests/data.py | csv_data_cn | jinniuai/qlib | 8,637 | python | def csv_data_cn(self, target_dir='~/.qlib/csv_data/cn_data'):
'download cn csv data from remote\n\n Parameters\n ----------\n target_dir: str\n data save directory\n\n Examples\n ---------\n python get_data.py csv_data_cn --target_dir ~/.qlib/csv_data/cn_data\n -------\n\n '
file_name = 'csv_data_cn.zip'
self._download_data(file_name, target_dir) | def csv_data_cn(self, target_dir='~/.qlib/csv_data/cn_data'):
'download cn csv data from remote\n\n Parameters\n ----------\n target_dir: str\n data save directory\n\n Examples\n ---------\n python get_data.py csv_data_cn --target_dir ~/.qlib/csv_data/cn_data\n -------\n\n '
file_name = 'csv_data_cn.zip'
self._download_data(file_name, target_dir)<|docstring|>download cn csv data from remote
Parameters
----------
target_dir: str
data save directory
Examples
---------
python get_data.py csv_data_cn --target_dir ~/.qlib/csv_data/cn_data
-------<|endoftext|> |
62a7e257d9fa2390846d8bbfd8c6c77196b48aa07c4a025c522a6cce97bd408c | @mod.capture(rule=f'{alt_digits}')
def digit(m) -> str:
'One digit'
return int(digits_map[m[0]]) | One digit | base/number_cardinals.py | digit | gimpf/talon-conf | 5 | python | @mod.capture(rule=f'{alt_digits}')
def digit(m) -> str:
return int(digits_map[m[0]]) | @mod.capture(rule=f'{alt_digits}')
def digit(m) -> str:
return int(digits_map[m[0]])<|docstring|>One digit<|endoftext|> |
993c9765a989caa3cf1d8c65d3827cb760559d07f0f1e1d6badc4faf0f1e929b | @mod.capture(rule=f'<number_small> [{alt_scales} ([and] (<number_small> | {alt_scales} | <number_small> {alt_scales}))*]')
def number_scaled(m) -> str:
'Returns a series of numbers as a string'
return fuse_num(fuse_scale(fuse_num(fuse_scale(list(m), 3))))[0] | Returns a series of numbers as a string | base/number_cardinals.py | number_scaled | gimpf/talon-conf | 5 | python | @mod.capture(rule=f'<number_small> [{alt_scales} ([and] (<number_small> | {alt_scales} | <number_small> {alt_scales}))*]')
def number_scaled(m) -> str:
return fuse_num(fuse_scale(fuse_num(fuse_scale(list(m), 3))))[0] | @mod.capture(rule=f'<number_small> [{alt_scales} ([and] (<number_small> | {alt_scales} | <number_small> {alt_scales}))*]')
def number_scaled(m) -> str:
return fuse_num(fuse_scale(fuse_num(fuse_scale(list(m), 3))))[0]<|docstring|>Returns a series of numbers as a string<|endoftext|> |
610bdaeed3d7ff19f0ba4dc70ba5c3335cc28b690f4343b46c7b39d8532d660a | def __init__(self, pl_module: LightningModule):
"\n Wraps the user's LightningModule and redirects the forward call to the appropriate\n method, either ``training_step``, ``validation_step`` or ``test_step``.\n If the LightningModule is in none of the states `training`, `testing` or `validation`,\n the inputs will be redirected to the\n :meth:`~pytorch_lightning.core.lightning.LightningModule.predict` method.\n Inheriting classes may also modify the inputs or outputs of forward.\n\n Args:\n pl_module: the model to wrap\n "
super().__init__()
self.module = pl_module | Wraps the user's LightningModule and redirects the forward call to the appropriate
method, either ``training_step``, ``validation_step`` or ``test_step``.
If the LightningModule is in none of the states `training`, `testing` or `validation`,
the inputs will be redirected to the
:meth:`~pytorch_lightning.core.lightning.LightningModule.predict` method.
Inheriting classes may also modify the inputs or outputs of forward.
Args:
pl_module: the model to wrap | pytorch_lightning/overrides/base.py | __init__ | neighthan/pytorch-lightning | 3 | python | def __init__(self, pl_module: LightningModule):
"\n Wraps the user's LightningModule and redirects the forward call to the appropriate\n method, either ``training_step``, ``validation_step`` or ``test_step``.\n If the LightningModule is in none of the states `training`, `testing` or `validation`,\n the inputs will be redirected to the\n :meth:`~pytorch_lightning.core.lightning.LightningModule.predict` method.\n Inheriting classes may also modify the inputs or outputs of forward.\n\n Args:\n pl_module: the model to wrap\n "
super().__init__()
self.module = pl_module | def __init__(self, pl_module: LightningModule):
"\n Wraps the user's LightningModule and redirects the forward call to the appropriate\n method, either ``training_step``, ``validation_step`` or ``test_step``.\n If the LightningModule is in none of the states `training`, `testing` or `validation`,\n the inputs will be redirected to the\n :meth:`~pytorch_lightning.core.lightning.LightningModule.predict` method.\n Inheriting classes may also modify the inputs or outputs of forward.\n\n Args:\n pl_module: the model to wrap\n "
super().__init__()
self.module = pl_module<|docstring|>Wraps the user's LightningModule and redirects the forward call to the appropriate
method, either ``training_step``, ``validation_step`` or ``test_step``.
If the LightningModule is in none of the states `training`, `testing` or `validation`,
the inputs will be redirected to the
:meth:`~pytorch_lightning.core.lightning.LightningModule.predict` method.
Inheriting classes may also modify the inputs or outputs of forward.
Args:
pl_module: the model to wrap<|endoftext|> |
be61f6239ffbb29be4a6b04fbbb5a47126cb76adafa354d123b3f9a8b89d7753 | @swiftTest
@skipUnlessDarwin
def test(self):
"Test that the default runtime library path can be recovered even if\n paths weren't serialized."
self.build()
log = self.getBuildArtifact('types.log')
command_result = lldb.SBCommandReturnObject()
interpreter = self.dbg.GetCommandInterpreter()
interpreter.HandleCommand(('log enable lldb types -f ' + log), command_result)
(target, process, thread, bkpt) = lldbutil.run_to_name_breakpoint(self, 'main')
self.expect('p 1')
logfile = open(log, 'r')
in_expr_log = 0
found = 0
for line in logfile:
if line.startswith(' SwiftASTContextForExpressions::LogConfiguration(SwiftASTContext'):
in_expr_log += 1
if (in_expr_log and ('Runtime library paths' in line) and ('2 items' in line)):
found += 1
self.assertEqual(in_expr_log, 1)
self.assertEqual(found, 1) | Test that the default runtime library path can be recovered even if
paths weren't serialized. | lldb/test/API/lang/swift/runtime_library_path/TestSwiftRuntimeLibraryPath.py | test | cbjeukendrup/llvm-project | 605 | python | @swiftTest
@skipUnlessDarwin
def test(self):
"Test that the default runtime library path can be recovered even if\n paths weren't serialized."
self.build()
log = self.getBuildArtifact('types.log')
command_result = lldb.SBCommandReturnObject()
interpreter = self.dbg.GetCommandInterpreter()
interpreter.HandleCommand(('log enable lldb types -f ' + log), command_result)
(target, process, thread, bkpt) = lldbutil.run_to_name_breakpoint(self, 'main')
self.expect('p 1')
logfile = open(log, 'r')
in_expr_log = 0
found = 0
for line in logfile:
if line.startswith(' SwiftASTContextForExpressions::LogConfiguration(SwiftASTContext'):
in_expr_log += 1
if (in_expr_log and ('Runtime library paths' in line) and ('2 items' in line)):
found += 1
self.assertEqual(in_expr_log, 1)
self.assertEqual(found, 1) | @swiftTest
@skipUnlessDarwin
def test(self):
"Test that the default runtime library path can be recovered even if\n paths weren't serialized."
self.build()
log = self.getBuildArtifact('types.log')
command_result = lldb.SBCommandReturnObject()
interpreter = self.dbg.GetCommandInterpreter()
interpreter.HandleCommand(('log enable lldb types -f ' + log), command_result)
(target, process, thread, bkpt) = lldbutil.run_to_name_breakpoint(self, 'main')
self.expect('p 1')
logfile = open(log, 'r')
in_expr_log = 0
found = 0
for line in logfile:
if line.startswith(' SwiftASTContextForExpressions::LogConfiguration(SwiftASTContext'):
in_expr_log += 1
if (in_expr_log and ('Runtime library paths' in line) and ('2 items' in line)):
found += 1
self.assertEqual(in_expr_log, 1)
self.assertEqual(found, 1)<|docstring|>Test that the default runtime library path can be recovered even if
paths weren't serialized.<|endoftext|> |
d56edde8dc3c249155398f0b6f28b9f0a7c082cb6b341f391dc90b528d7e1fef | def init_logger(filename):
'Initializes logger'
logging.basicConfig(filename=os.path.basename(filename).replace('.py', '.log'), filemode='w', level=logging.INFO)
stderr_logger = logging.StreamHandler()
stderr_logger.setFormatter(logging.Formatter(logging.BASIC_FORMAT))
logging.getLogger().addHandler(stderr_logger)
return | Initializes logger | file_functions.py | init_logger | xaviernogueira/gcs_gui | 4 | python | def init_logger(filename):
logging.basicConfig(filename=os.path.basename(filename).replace('.py', '.log'), filemode='w', level=logging.INFO)
stderr_logger = logging.StreamHandler()
stderr_logger.setFormatter(logging.Formatter(logging.BASIC_FORMAT))
logging.getLogger().addHandler(stderr_logger)
return | def init_logger(filename):
logging.basicConfig(filename=os.path.basename(filename).replace('.py', '.log'), filemode='w', level=logging.INFO)
stderr_logger = logging.StreamHandler()
stderr_logger.setFormatter(logging.Formatter(logging.BASIC_FORMAT))
logging.getLogger().addHandler(stderr_logger)
return<|docstring|>Initializes logger<|endoftext|> |
77a465cdbea985d7f19b2113732ee62d332a7939b085f4b75083bbd6b8117f2e | def cmd(command):
'Executes command prompt command'
try:
res = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except:
msg = ('Command failed: %s' % command)
logger.error(msg)
raise Exception(msg)
msg = res.communicate()[1]
msg_str = str(msg, 'utf-8')
if (('http://lastools.org/LICENSE.txt' not in msg_str) and (len(msg_str) > 0)):
logger.info(msg)
return | Executes command prompt command | file_functions.py | cmd | xaviernogueira/gcs_gui | 4 | python | def cmd(command):
try:
res = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except:
msg = ('Command failed: %s' % command)
logger.error(msg)
raise Exception(msg)
msg = res.communicate()[1]
msg_str = str(msg, 'utf-8')
if (('http://lastools.org/LICENSE.txt' not in msg_str) and (len(msg_str) > 0)):
logger.info(msg)
return | def cmd(command):
try:
res = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except:
msg = ('Command failed: %s' % command)
logger.error(msg)
raise Exception(msg)
msg = res.communicate()[1]
msg_str = str(msg, 'utf-8')
if (('http://lastools.org/LICENSE.txt' not in msg_str) and (len(msg_str) > 0)):
logger.info(msg)
return<|docstring|>Executes command prompt command<|endoftext|> |
56f88d6ebeba2a38ba40eadb16b051c65c7865e4415343ae55010b7f5abb0ede | def browse(root, entry, select='file', ftypes=[('All files', '*')]):
'GUI button command: opens browser window and adds selected file/folder to entry'
if (select == 'file'):
filename = filedialog.askopenfilename(parent=root, title='Choose a file', filetypes=ftypes)
if (filename != None):
entry.delete(0, END)
entry.insert(END, filename)
elif (select == 'files'):
files = filedialog.askopenfilenames(parent=root, title='Choose files', filetypes=ftypes)
l = root.tk.splitlist(files)
entry.delete(0, END)
entry.insert(END, l)
elif (select == 'folder'):
dirname = filedialog.askdirectory(parent=root, initialdir=entry.get(), title='Choose a directory')
if (len(dirname) > 0):
entry.delete(0, END)
entry.insert(END, (dirname + '/')) | GUI button command: opens browser window and adds selected file/folder to entry | file_functions.py | browse | xaviernogueira/gcs_gui | 4 | python | def browse(root, entry, select='file', ftypes=[('All files', '*')]):
if (select == 'file'):
filename = filedialog.askopenfilename(parent=root, title='Choose a file', filetypes=ftypes)
if (filename != None):
entry.delete(0, END)
entry.insert(END, filename)
elif (select == 'files'):
files = filedialog.askopenfilenames(parent=root, title='Choose files', filetypes=ftypes)
l = root.tk.splitlist(files)
entry.delete(0, END)
entry.insert(END, l)
elif (select == 'folder'):
dirname = filedialog.askdirectory(parent=root, initialdir=entry.get(), title='Choose a directory')
if (len(dirname) > 0):
entry.delete(0, END)
entry.insert(END, (dirname + '/')) | def browse(root, entry, select='file', ftypes=[('All files', '*')]):
if (select == 'file'):
filename = filedialog.askopenfilename(parent=root, title='Choose a file', filetypes=ftypes)
if (filename != None):
entry.delete(0, END)
entry.insert(END, filename)
elif (select == 'files'):
files = filedialog.askopenfilenames(parent=root, title='Choose files', filetypes=ftypes)
l = root.tk.splitlist(files)
entry.delete(0, END)
entry.insert(END, l)
elif (select == 'folder'):
dirname = filedialog.askdirectory(parent=root, initialdir=entry.get(), title='Choose a directory')
if (len(dirname) > 0):
entry.delete(0, END)
entry.insert(END, (dirname + '/'))<|docstring|>GUI button command: opens browser window and adds selected file/folder to entry<|endoftext|> |
2a7246594d46e55c6d909f2401cbfad07a0de0338bf2b491f5f4a2b84952c6cd | def err_info(func):
'Wrapper to show error message when a command fails'
def wrapper(*args, **kwargs):
try:
func(*args, **kwargs)
except Exception as e:
logger.info(e)
return wrapper | Wrapper to show error message when a command fails | file_functions.py | err_info | xaviernogueira/gcs_gui | 4 | python | def err_info(func):
def wrapper(*args, **kwargs):
try:
func(*args, **kwargs)
except Exception as e:
logger.info(e)
return wrapper | def err_info(func):
def wrapper(*args, **kwargs):
try:
func(*args, **kwargs)
except Exception as e:
logger.info(e)
return wrapper<|docstring|>Wrapper to show error message when a command fails<|endoftext|> |
ca54c94decff15985f24a55094d6794eb14149dbd83935243cc1123b59d30be1 | def check_use(filepath):
'Checks if a file or list of files is in use by another process\n If the file cannot be opened or there is an associated .lock file, it throws an exception.\n '
if (type(filepath) == list):
for f in filepath:
check_use(f)
return
file_object = None
if os.path.exists(filepath):
try:
buffer_size = 8
file_object = open(filepath, 'a', buffer_size)
if file_object:
for filename in os.listdir(os.path.dirname(filepath)):
if (filename.startswith(os.path.basename(filepath)) and filename.endswith('.lock')):
logger.error(('%s is open in another program. Close the file and try again.' % filepath))
raise Exception(('%s is open in another program. Close the file and try again.' % filepath))
except IOError:
logger.error(('%s is open in another program. Close the file and try again.' % filepath))
raise Exception(('%s is open in another program. Close the file and try again.' % filepath))
finally:
if file_object:
file_object.close()
return | Checks if a file or list of files is in use by another process
If the file cannot be opened or there is an associated .lock file, it throws an exception. | file_functions.py | check_use | xaviernogueira/gcs_gui | 4 | python | def check_use(filepath):
'Checks if a file or list of files is in use by another process\n If the file cannot be opened or there is an associated .lock file, it throws an exception.\n '
if (type(filepath) == list):
for f in filepath:
check_use(f)
return
file_object = None
if os.path.exists(filepath):
try:
buffer_size = 8
file_object = open(filepath, 'a', buffer_size)
if file_object:
for filename in os.listdir(os.path.dirname(filepath)):
if (filename.startswith(os.path.basename(filepath)) and filename.endswith('.lock')):
logger.error(('%s is open in another program. Close the file and try again.' % filepath))
raise Exception(('%s is open in another program. Close the file and try again.' % filepath))
except IOError:
logger.error(('%s is open in another program. Close the file and try again.' % filepath))
raise Exception(('%s is open in another program. Close the file and try again.' % filepath))
finally:
if file_object:
file_object.close()
return | def check_use(filepath):
'Checks if a file or list of files is in use by another process\n If the file cannot be opened or there is an associated .lock file, it throws an exception.\n '
if (type(filepath) == list):
for f in filepath:
check_use(f)
return
file_object = None
if os.path.exists(filepath):
try:
buffer_size = 8
file_object = open(filepath, 'a', buffer_size)
if file_object:
for filename in os.listdir(os.path.dirname(filepath)):
if (filename.startswith(os.path.basename(filepath)) and filename.endswith('.lock')):
logger.error(('%s is open in another program. Close the file and try again.' % filepath))
raise Exception(('%s is open in another program. Close the file and try again.' % filepath))
except IOError:
logger.error(('%s is open in another program. Close the file and try again.' % filepath))
raise Exception(('%s is open in another program. Close the file and try again.' % filepath))
finally:
if file_object:
file_object.close()
return<|docstring|>Checks if a file or list of files is in use by another process
If the file cannot be opened or there is an associated .lock file, it throws an exception.<|endoftext|> |
dc4d944784b88a9fd9cbdbbb97eac1c559a2a84b95c0f359e1260e31cde1d839 | def split_list(l, break_pts):
'returns list l split up into sublists at break point indices'
l_0 = len(l)
sl = []
if (len(break_pts) == 0):
return [l]
else:
for brk in break_pts:
delta_l = (l_0 - len(l))
sl.append(l[:(brk - delta_l)])
l = l[(brk - delta_l):]
sl.append(l)
return sl | returns list l split up into sublists at break point indices | file_functions.py | split_list | xaviernogueira/gcs_gui | 4 | python | def split_list(l, break_pts):
l_0 = len(l)
sl = []
if (len(break_pts) == 0):
return [l]
else:
for brk in break_pts:
delta_l = (l_0 - len(l))
sl.append(l[:(brk - delta_l)])
l = l[(brk - delta_l):]
sl.append(l)
return sl | def split_list(l, break_pts):
l_0 = len(l)
sl = []
if (len(break_pts) == 0):
return [l]
else:
for brk in break_pts:
delta_l = (l_0 - len(l))
sl.append(l[:(brk - delta_l)])
l = l[(brk - delta_l):]
sl.append(l)
return sl<|docstring|>returns list l split up into sublists at break point indices<|endoftext|> |
71e7cde15bc3d3c1e11ce67253428523ab5bc6aa96b884ab4fc77acb88185706 | def split_reaches(l, new_reach_pts):
'splits l into sections where new_reach_pts contains the starting indices for each slice'
new_reach_pts = sorted(new_reach_pts)
sl = [l[i1:i2] for (i1, i2) in zip(new_reach_pts, new_reach_pts[1:])]
last_index = new_reach_pts[(- 1)]
sl.append(l[last_index:])
return sl | splits l into sections where new_reach_pts contains the starting indices for each slice | file_functions.py | split_reaches | xaviernogueira/gcs_gui | 4 | python | def split_reaches(l, new_reach_pts):
new_reach_pts = sorted(new_reach_pts)
sl = [l[i1:i2] for (i1, i2) in zip(new_reach_pts, new_reach_pts[1:])]
last_index = new_reach_pts[(- 1)]
sl.append(l[last_index:])
return sl | def split_reaches(l, new_reach_pts):
new_reach_pts = sorted(new_reach_pts)
sl = [l[i1:i2] for (i1, i2) in zip(new_reach_pts, new_reach_pts[1:])]
last_index = new_reach_pts[(- 1)]
sl.append(l[last_index:])
return sl<|docstring|>splits l into sections where new_reach_pts contains the starting indices for each slice<|endoftext|> |
e6db2b0e70d9bbbed1c4a27ded8b3c244e3bd025b6720f5c2265d6a62e1fe42d | def tif_to_poly(tif):
'Converts .tif raster to a single polygon covering area that is not null'
ras = arcpy.Raster(tif)
int_raster = arcpy.sa.Con((arcpy.sa.IsNull(ras) == False), 1)
poly = arcpy.RasterToPolygon_conversion(int_raster, tif.replace('.tif', '.shp'), 'NO_SIMPLIFY')
return poly.getOutput(0) | Converts .tif raster to a single polygon covering area that is not null | file_functions.py | tif_to_poly | xaviernogueira/gcs_gui | 4 | python | def tif_to_poly(tif):
ras = arcpy.Raster(tif)
int_raster = arcpy.sa.Con((arcpy.sa.IsNull(ras) == False), 1)
poly = arcpy.RasterToPolygon_conversion(int_raster, tif.replace('.tif', '.shp'), 'NO_SIMPLIFY')
return poly.getOutput(0) | def tif_to_poly(tif):
ras = arcpy.Raster(tif)
int_raster = arcpy.sa.Con((arcpy.sa.IsNull(ras) == False), 1)
poly = arcpy.RasterToPolygon_conversion(int_raster, tif.replace('.tif', '.shp'), 'NO_SIMPLIFY')
return poly.getOutput(0)<|docstring|>Converts .tif raster to a single polygon covering area that is not null<|endoftext|> |
42cdd579a1b35cd047a020902b348f1a84971693e9de114a60881b61c5d823f8 | def tableToCSV(input_table, csv_filepath, fld_to_remove_override=[], keep_fields=[]):
'Returns the file path of a csv containing the attributes table of a shapefile or other table'
fld_list = arcpy.ListFields(input_table)
fld_names = [str(fld.name) for fld in fld_list]
if (len(fld_to_remove_override) > 0):
for field in fld_to_remove_override:
try:
fld_names.remove(field)
except:
("Can't delete field: %s" % field)
elif (len(keep_fields) > 0):
fld_names = [i for i in fld_names if (i in keep_fields)]
with open(csv_filepath, 'w', newline='') as csv_file:
writer = csv.writer(csv_file)
writer.writerow(fld_names)
with arcpy.da.SearchCursor(input_table, fld_names) as cursor:
for row in cursor:
writer.writerow(row)
print((csv_filepath + ' CREATED'))
csv_file.close()
return csv_filepath | Returns the file path of a csv containing the attributes table of a shapefile or other table | file_functions.py | tableToCSV | xaviernogueira/gcs_gui | 4 | python | def tableToCSV(input_table, csv_filepath, fld_to_remove_override=[], keep_fields=[]):
fld_list = arcpy.ListFields(input_table)
fld_names = [str(fld.name) for fld in fld_list]
if (len(fld_to_remove_override) > 0):
for field in fld_to_remove_override:
try:
fld_names.remove(field)
except:
("Can't delete field: %s" % field)
elif (len(keep_fields) > 0):
fld_names = [i for i in fld_names if (i in keep_fields)]
with open(csv_filepath, 'w', newline=) as csv_file:
writer = csv.writer(csv_file)
writer.writerow(fld_names)
with arcpy.da.SearchCursor(input_table, fld_names) as cursor:
for row in cursor:
writer.writerow(row)
print((csv_filepath + ' CREATED'))
csv_file.close()
return csv_filepath | def tableToCSV(input_table, csv_filepath, fld_to_remove_override=[], keep_fields=[]):
fld_list = arcpy.ListFields(input_table)
fld_names = [str(fld.name) for fld in fld_list]
if (len(fld_to_remove_override) > 0):
for field in fld_to_remove_override:
try:
fld_names.remove(field)
except:
("Can't delete field: %s" % field)
elif (len(keep_fields) > 0):
fld_names = [i for i in fld_names if (i in keep_fields)]
with open(csv_filepath, 'w', newline=) as csv_file:
writer = csv.writer(csv_file)
writer.writerow(fld_names)
with arcpy.da.SearchCursor(input_table, fld_names) as cursor:
for row in cursor:
writer.writerow(row)
print((csv_filepath + ' CREATED'))
csv_file.close()
return csv_filepath<|docstring|>Returns the file path of a csv containing the attributes table of a shapefile or other table<|endoftext|> |
35d69dae6774023de7c926f75692e79395faa990ddb0338df2c5a8ae08267803 | def delete_gis_files(file_loc):
'This function accepts a GIS file location (eg. \\shapefile.shp) and deletes the file as well\n as any other related file (eg. shapefile.prj, shapefile.cpg). This function supports .tif, .shp, and .dbf'
suffix = file_loc[(- 4):]
prefix = file_loc[:(- 4)]
if (suffix == '.shp'):
suf_list = ['.shp', '.cpg', '.dbf', '.prj', '.sbn', '.sbx', '.shp.xlm', '.shx']
elif (suffix == '.tif'):
suf_list = ['.tif', '.tif.aux.xml', '.tfw', '.tif.ovr', '.tif.vat.cpg', '.tif.vat.dbf']
elif (suffix == '.dbf'):
suf_list = ['.dbf', '.cpg', '.dbf.xml']
elif (suffix == '.csv'):
suf_list = ['.csv']
counter = 0
for suf in suf_list:
file = (prefix + suf)
if os.path.exists(file):
try:
os.remove(file)
except:
print(("Couldn't delete %s" % file))
else:
counter += 1
print(('Couldnt find %s files sub-files. Not normally and issue but if overwrite errors raise this could be the culprit!' % counter)) | This function accepts a GIS file location (eg. \shapefile.shp) and deletes the file as well
as any other related file (eg. shapefile.prj, shapefile.cpg). This function supports .tif, .shp, and .dbf | file_functions.py | delete_gis_files | xaviernogueira/gcs_gui | 4 | python | def delete_gis_files(file_loc):
'This function accepts a GIS file location (eg. \\shapefile.shp) and deletes the file as well\n as any other related file (eg. shapefile.prj, shapefile.cpg). This function supports .tif, .shp, and .dbf'
suffix = file_loc[(- 4):]
prefix = file_loc[:(- 4)]
if (suffix == '.shp'):
suf_list = ['.shp', '.cpg', '.dbf', '.prj', '.sbn', '.sbx', '.shp.xlm', '.shx']
elif (suffix == '.tif'):
suf_list = ['.tif', '.tif.aux.xml', '.tfw', '.tif.ovr', '.tif.vat.cpg', '.tif.vat.dbf']
elif (suffix == '.dbf'):
suf_list = ['.dbf', '.cpg', '.dbf.xml']
elif (suffix == '.csv'):
suf_list = ['.csv']
counter = 0
for suf in suf_list:
file = (prefix + suf)
if os.path.exists(file):
try:
os.remove(file)
except:
print(("Couldn't delete %s" % file))
else:
counter += 1
print(('Couldnt find %s files sub-files. Not normally and issue but if overwrite errors raise this could be the culprit!' % counter)) | def delete_gis_files(file_loc):
'This function accepts a GIS file location (eg. \\shapefile.shp) and deletes the file as well\n as any other related file (eg. shapefile.prj, shapefile.cpg). This function supports .tif, .shp, and .dbf'
suffix = file_loc[(- 4):]
prefix = file_loc[:(- 4)]
if (suffix == '.shp'):
suf_list = ['.shp', '.cpg', '.dbf', '.prj', '.sbn', '.sbx', '.shp.xlm', '.shx']
elif (suffix == '.tif'):
suf_list = ['.tif', '.tif.aux.xml', '.tfw', '.tif.ovr', '.tif.vat.cpg', '.tif.vat.dbf']
elif (suffix == '.dbf'):
suf_list = ['.dbf', '.cpg', '.dbf.xml']
elif (suffix == '.csv'):
suf_list = ['.csv']
counter = 0
for suf in suf_list:
file = (prefix + suf)
if os.path.exists(file):
try:
os.remove(file)
except:
print(("Couldn't delete %s" % file))
else:
counter += 1
print(('Couldnt find %s files sub-files. Not normally and issue but if overwrite errors raise this could be the culprit!' % counter))<|docstring|>This function accepts a GIS file location (eg. \shapefile.shp) and deletes the file as well
as any other related file (eg. shapefile.prj, shapefile.cpg). This function supports .tif, .shp, and .dbf<|endoftext|> |
f37fa65a537ac58553304e29b280acea2313e10d1f13136de0e59a0dec18f8a0 | def find_suffix(csv_location):
'This function takes a csv table location and finds the suffix unaffected by stage.\n Ex: C://documents//2p3ft_gcs_table.csv would return ft_gcs_table as a string'
base = os.path.basename(csv_location)
if (str.isnumeric(base[0]) == True):
index = 0
base_snip = base[0]
while ((base_snip != 'f') and (base_snip != 'm')):
index += 1
base_snip = base[index]
suffix = str(base[index:])
else:
print('csv filename not suitable. Please have stage height and units in name at the start of the filename. Ex: 2p3ft_gcs_table.csv or 1m_gcs_table.csv')
return suffix | This function takes a csv table location and finds the suffix unaffected by stage.
Ex: C://documents//2p3ft_gcs_table.csv would return ft_gcs_table as a string | file_functions.py | find_suffix | xaviernogueira/gcs_gui | 4 | python | def find_suffix(csv_location):
'This function takes a csv table location and finds the suffix unaffected by stage.\n Ex: C://documents//2p3ft_gcs_table.csv would return ft_gcs_table as a string'
base = os.path.basename(csv_location)
if (str.isnumeric(base[0]) == True):
index = 0
base_snip = base[0]
while ((base_snip != 'f') and (base_snip != 'm')):
index += 1
base_snip = base[index]
suffix = str(base[index:])
else:
print('csv filename not suitable. Please have stage height and units in name at the start of the filename. Ex: 2p3ft_gcs_table.csv or 1m_gcs_table.csv')
return suffix | def find_suffix(csv_location):
'This function takes a csv table location and finds the suffix unaffected by stage.\n Ex: C://documents//2p3ft_gcs_table.csv would return ft_gcs_table as a string'
base = os.path.basename(csv_location)
if (str.isnumeric(base[0]) == True):
index = 0
base_snip = base[0]
while ((base_snip != 'f') and (base_snip != 'm')):
index += 1
base_snip = base[index]
suffix = str(base[index:])
else:
print('csv filename not suitable. Please have stage height and units in name at the start of the filename. Ex: 2p3ft_gcs_table.csv or 1m_gcs_table.csv')
return suffix<|docstring|>This function takes a csv table location and finds the suffix unaffected by stage.
Ex: C://documents//2p3ft_gcs_table.csv would return ft_gcs_table as a string<|endoftext|> |
2a521ae4034a418f2b02b823d67350219ff0ca8faeb527bd9ce3a54d3719aafe | def float_keyz_format(z):
'This function takes a float key z argument and retrusn its equivalent formatted string.\n ex: 5.3 -> 5p3, or 10.0 -> 10p0'
z_str = ''
if ((z >= 10.0) and isinstance(z, float)):
z_str = ((str(z)[0:2] + 'p') + str(z)[3])
elif ((z < 10.0) and isinstance(z, float)):
z_str = ((str(z)[0] + 'p') + str(z)[2])
elif isinstance(z, int):
z_str = (str(z) + 'p0')
try:
return z_str
except (z_str == ''):
print('Key z list parameters not valid. Please fill list with int or float.') | This function takes a float key z argument and retrusn its equivalent formatted string.
ex: 5.3 -> 5p3, or 10.0 -> 10p0 | file_functions.py | float_keyz_format | xaviernogueira/gcs_gui | 4 | python | def float_keyz_format(z):
'This function takes a float key z argument and retrusn its equivalent formatted string.\n ex: 5.3 -> 5p3, or 10.0 -> 10p0'
z_str =
if ((z >= 10.0) and isinstance(z, float)):
z_str = ((str(z)[0:2] + 'p') + str(z)[3])
elif ((z < 10.0) and isinstance(z, float)):
z_str = ((str(z)[0] + 'p') + str(z)[2])
elif isinstance(z, int):
z_str = (str(z) + 'p0')
try:
return z_str
except (z_str == ):
print('Key z list parameters not valid. Please fill list with int or float.') | def float_keyz_format(z):
'This function takes a float key z argument and retrusn its equivalent formatted string.\n ex: 5.3 -> 5p3, or 10.0 -> 10p0'
z_str =
if ((z >= 10.0) and isinstance(z, float)):
z_str = ((str(z)[0:2] + 'p') + str(z)[3])
elif ((z < 10.0) and isinstance(z, float)):
z_str = ((str(z)[0] + 'p') + str(z)[2])
elif isinstance(z, int):
z_str = (str(z) + 'p0')
try:
return z_str
except (z_str == ):
print('Key z list parameters not valid. Please fill list with int or float.')<|docstring|>This function takes a float key z argument and retrusn its equivalent formatted string.
ex: 5.3 -> 5p3, or 10.0 -> 10p0<|endoftext|> |
92a8dd79c1b7b11611cb1366e077d3af028e1b6a0df80bb4f93f9cd29aa0769f | def string_to_list(string, format=''):
'Splits a string at each comma and produces a list. format parameter allows the type of the output to\n be designated'
if (string != ''):
str_split = string.split(',')
if (format == 'int'):
out_list = [int(i) for i in str_split]
elif (format == 'float'):
out_list = [float(i) for i in str_split]
else:
out_list = []
return out_list | Splits a string at each comma and produces a list. format parameter allows the type of the output to
be designated | file_functions.py | string_to_list | xaviernogueira/gcs_gui | 4 | python | def string_to_list(string, format=):
'Splits a string at each comma and produces a list. format parameter allows the type of the output to\n be designated'
if (string != ):
str_split = string.split(',')
if (format == 'int'):
out_list = [int(i) for i in str_split]
elif (format == 'float'):
out_list = [float(i) for i in str_split]
else:
out_list = []
return out_list | def string_to_list(string, format=):
'Splits a string at each comma and produces a list. format parameter allows the type of the output to\n be designated'
if (string != ):
str_split = string.split(',')
if (format == 'int'):
out_list = [int(i) for i in str_split]
elif (format == 'float'):
out_list = [float(i) for i in str_split]
else:
out_list = []
return out_list<|docstring|>Splits a string at each comma and produces a list. format parameter allows the type of the output to
be designated<|endoftext|> |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.