text
stringlengths 213
32.3k
|
---|
from copy import deepcopy
import math
import numpy as np
from scipy import fftpack
# XXX explore cuda optimization at some point.
from ..io.pick import _pick_data_channels, pick_info
from ..utils import verbose, warn, fill_doc, _validate_type
from ..parallel import parallel_func, check_n_jobs
from .tfr import AverageTFR, _get_data
def _check_input_st(x_in, n_fft):
"""Aux function."""
# flatten to 2 D and memorize original shape
n_times = x_in.shape[-1]
def _is_power_of_two(n):
return not (n > 0 and ((n & (n - 1))))
if n_fft is None or (not _is_power_of_two(n_fft) and n_times > n_fft):
# Compute next power of 2
n_fft = 2 ** int(math.ceil(math.log(n_times, 2)))
elif n_fft < n_times:
raise ValueError("n_fft cannot be smaller than signal size. "
"Got %s < %s." % (n_fft, n_times))
if n_times < n_fft:
warn('The input signal is shorter ({}) than "n_fft" ({}). '
'Applying zero padding.'.format(x_in.shape[-1], n_fft))
zero_pad = n_fft - n_times
pad_array = np.zeros(x_in.shape[:-1] + (zero_pad,), x_in.dtype)
x_in = np.concatenate((x_in, pad_array), axis=-1)
else:
zero_pad = 0
return x_in, n_fft, zero_pad
def _precompute_st_windows(n_samp, start_f, stop_f, sfreq, width):
"""Precompute stockwell Gaussian windows (in the freq domain)."""
tw = fftpack.fftfreq(n_samp, 1. / sfreq) / n_samp
tw = np.r_[tw[:1], tw[1:][::-1]]
k = width # 1 for classical stowckwell transform
f_range = np.arange(start_f, stop_f, 1)
windows = np.empty((len(f_range), len(tw)), dtype=np.complex128)
for i_f, f in enumerate(f_range):
if f == 0.:
window = np.ones(len(tw))
else:
window = ((f / (np.sqrt(2. * np.pi) * k)) *
np.exp(-0.5 * (1. / k ** 2.) * (f ** 2.) * tw ** 2.))
window /= window.sum() # normalisation
windows[i_f] = fftpack.fft(window)
return windows
def _st(x, start_f, windows):
"""Compute ST based on Ali Moukadem MATLAB code (used in tests)."""
n_samp = x.shape[-1]
ST = np.empty(x.shape[:-1] + (len(windows), n_samp), dtype=np.complex128)
# do the work
Fx = fftpack.fft(x)
XF = np.concatenate([Fx, Fx], axis=-1)
for i_f, window in enumerate(windows):
f = start_f + i_f
ST[..., i_f, :] = fftpack.ifft(XF[..., f:f + n_samp] * window)
return ST
def _st_power_itc(x, start_f, compute_itc, zero_pad, decim, W):
"""Aux function."""
n_samp = x.shape[-1]
n_out = (n_samp - zero_pad)
n_out = n_out // decim + bool(n_out % decim)
psd = np.empty((len(W), n_out))
itc = np.empty_like(psd) if compute_itc else None
X = fftpack.fft(x)
XX = np.concatenate([X, X], axis=-1)
for i_f, window in enumerate(W):
f = start_f + i_f
ST = fftpack.ifft(XX[:, f:f + n_samp] * window)
if zero_pad > 0:
TFR = ST[:, :-zero_pad:decim]
else:
TFR = ST[:, ::decim]
TFR_abs = np.abs(TFR)
TFR_abs[TFR_abs == 0] = 1.
if compute_itc:
TFR /= TFR_abs
itc[i_f] = np.abs(np.mean(TFR, axis=0))
TFR_abs *= TFR_abs
psd[i_f] = np.mean(TFR_abs, axis=0)
return psd, itc
@fill_doc
def tfr_array_stockwell(data, sfreq, fmin=None, fmax=None, n_fft=None,
width=1.0, decim=1, return_itc=False, n_jobs=1):
"""Compute power and intertrial coherence using Stockwell (S) transform.
Same computation as `~mne.time_frequency.tfr_stockwell`, but operates on
:class:`NumPy arrays <numpy.ndarray>` instead of `~mne.Epochs` objects.
See :footcite:`Stockwell2007,MoukademEtAl2014,WheatEtAl2010,JonesEtAl2006`
for more information.
Parameters
----------
data : ndarray, shape (n_epochs, n_channels, n_times)
The signal to transform.
sfreq : float
The sampling frequency.
fmin : None, float
The minimum frequency to include. If None defaults to the minimum fft
frequency greater than zero.
fmax : None, float
The maximum frequency to include. If None defaults to the maximum fft.
n_fft : int | None
The length of the windows used for FFT. If None, it defaults to the
next power of 2 larger than the signal length.
width : float
The width of the Gaussian window. If < 1, increased temporal
resolution, if > 1, increased frequency resolution. Defaults to 1.
(classical S-Transform).
decim : int
The decimation factor on the time axis. To reduce memory usage.
return_itc : bool
Return intertrial coherence (ITC) as well as averaged power.
%(n_jobs)s
Returns
-------
st_power : ndarray
The multitaper power of the Stockwell transformed data.
The last two dimensions are frequency and time.
itc : ndarray
The intertrial coherence. Only returned if return_itc is True.
freqs : ndarray
The frequencies.
See Also
--------
mne.time_frequency.tfr_stockwell
mne.time_frequency.tfr_multitaper
mne.time_frequency.tfr_array_multitaper
mne.time_frequency.tfr_morlet
mne.time_frequency.tfr_array_morlet
References
----------
.. footbibliography::
"""
_validate_type(data, np.ndarray, 'data')
if data.ndim != 3:
raise ValueError(
'data must be 3D with shape (n_epochs, n_channels, n_times), '
f'got {data.shape}')
n_epochs, n_channels = data.shape[:2]
n_out = data.shape[2] // decim + bool(data.shape[-1] % decim)
data, n_fft_, zero_pad = _check_input_st(data, n_fft)
freqs = fftpack.fftfreq(n_fft_, 1. / sfreq)
if fmin is None:
fmin = freqs[freqs > 0][0]
if fmax is None:
fmax = freqs.max()
start_f = np.abs(freqs - fmin).argmin()
stop_f = np.abs(freqs - fmax).argmin()
freqs = freqs[start_f:stop_f]
W = _precompute_st_windows(data.shape[-1], start_f, stop_f, sfreq, width)
n_freq = stop_f - start_f
psd = np.empty((n_channels, n_freq, n_out))
itc = np.empty((n_channels, n_freq, n_out)) if return_itc else None
parallel, my_st, _ = parallel_func(_st_power_itc, n_jobs)
tfrs = parallel(my_st(data[:, c, :], start_f, return_itc, zero_pad,
decim, W)
for c in range(n_channels))
for c, (this_psd, this_itc) in enumerate(iter(tfrs)):
psd[c] = this_psd
if this_itc is not None:
itc[c] = this_itc
return psd, itc, freqs
@verbose
def tfr_stockwell(inst, fmin=None, fmax=None, n_fft=None,
width=1.0, decim=1, return_itc=False, n_jobs=1,
verbose=None):
"""Compute Time-Frequency Representation (TFR) using Stockwell Transform.
Same computation as `~mne.time_frequency.tfr_array_stockwell`, but operates
on `~mne.Epochs` objects instead of :class:`NumPy arrays <numpy.ndarray>`.
See :footcite:`Stockwell2007,MoukademEtAl2014,WheatEtAl2010,JonesEtAl2006`
for more information.
Parameters
----------
inst : Epochs | Evoked
The epochs or evoked object.
fmin : None, float
The minimum frequency to include. If None defaults to the minimum fft
frequency greater than zero.
fmax : None, float
The maximum frequency to include. If None defaults to the maximum fft.
n_fft : int | None
The length of the windows used for FFT. If None, it defaults to the
next power of 2 larger than the signal length.
width : float
The width of the Gaussian window. If < 1, increased temporal
resolution, if > 1, increased frequency resolution. Defaults to 1.
(classical S-Transform).
decim : int
The decimation factor on the time axis. To reduce memory usage.
return_itc : bool
Return intertrial coherence (ITC) as well as averaged power.
n_jobs : int
The number of jobs to run in parallel (over channels).
%(verbose)s
Returns
-------
power : AverageTFR
The averaged power.
itc : AverageTFR
The intertrial coherence. Only returned if return_itc is True.
See Also
--------
mne.time_frequency.tfr_array_stockwell
mne.time_frequency.tfr_multitaper
mne.time_frequency.tfr_array_multitaper
mne.time_frequency.tfr_morlet
mne.time_frequency.tfr_array_morlet
Notes
-----
.. versionadded:: 0.9.0
References
----------
.. footbibliography::
"""
# verbose dec is used b/c subfunctions are verbose
data = _get_data(inst, return_itc)
picks = _pick_data_channels(inst.info)
info = pick_info(inst.info, picks)
data = data[:, picks, :]
n_jobs = check_n_jobs(n_jobs)
power, itc, freqs = tfr_array_stockwell(data, sfreq=info['sfreq'],
fmin=fmin, fmax=fmax, n_fft=n_fft,
width=width, decim=decim,
return_itc=return_itc,
n_jobs=n_jobs)
times = inst.times[::decim].copy()
nave = len(data)
out = AverageTFR(info, power, times, freqs, nave, method='stockwell-power')
if return_itc:
out = (out, AverageTFR(deepcopy(info), itc, times.copy(),
freqs.copy(), nave, method='stockwell-itc'))
return out
|
from typing import Optional, Any, Sequence, Tuple, Type, Callable, List
from typing import Union
from tensornetwork.backends import abstract_backend
from tensornetwork.backends.tensorflow import decompositions
from tensornetwork.backends.tensorflow import tensordot2
import functools as fct
import operator as op
# This might seem bad, but pytype treats tf.Tensor as Any anyway, so
# we don't actually lose anything by doing this.
import numpy as np
Tensor = Any
# pylint: disable=abstract-method
class TensorFlowBackend(abstract_backend.AbstractBackend):
"""See base_backend.BaseBackend for documentation."""
def __init__(self) -> None:
# pylint: disable=global-variable-undefined
global tf
super().__init__()
try:
# pylint: disable=import-outside-toplevel
import tensorflow
except ImportError as err:
raise ImportError("Tensorflow not installed, please switch to a "
"different backend or install Tensorflow.") from err
tf = tensorflow
self.name = "tensorflow"
def tensordot(self, a: Tensor, b: Tensor,
axes: Union[int, Sequence[Sequence[int]]]) -> Tensor:
return tensordot2.tensordot(tf, a, b, axes)
def reshape(self, tensor: Tensor, shape: Tensor) -> Tensor:
return tf.reshape(tensor, shape)
def transpose(self, tensor, perm=None) -> Tensor:
return tf.transpose(tensor, perm)
def slice(self, tensor: Tensor, start_indices: Tuple[int, ...],
slice_sizes: Tuple[int, ...]) -> Tensor:
if len(start_indices) != len(slice_sizes):
raise ValueError("Lengths of start_indices and slice_sizes must be"
"identical.")
return tf.slice(tensor, start_indices, slice_sizes)
def svd(
self,
tensor: Tensor,
pivot_axis: int = -1,
max_singular_values: Optional[int] = None,
max_truncation_error: Optional[float] = None,
relative: Optional[bool] = False
) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
return decompositions.svd(
tf,
tensor,
pivot_axis,
max_singular_values,
max_truncation_error,
relative=relative)
def qr(self, tensor: Tensor, pivot_axis: int = -1,
non_negative_diagonal: bool = False) -> Tuple[Tensor, Tensor]:
return decompositions.qr(tf, tensor, pivot_axis, non_negative_diagonal)
def rq(self, tensor: Tensor, pivot_axis: int = -1,
non_negative_diagonal: bool = False) -> Tuple[Tensor, Tensor]:
return decompositions.rq(tf, tensor, pivot_axis, non_negative_diagonal)
def shape_concat(self, values: Tensor, axis: int) -> Tensor:
return tf.concat(values, axis)
def shape_tensor(self, tensor: Tensor) -> Tensor:
return tf.shape(tensor)
def shape_tuple(self, tensor: Tensor) -> Tuple[Optional[int], ...]:
return tuple(tensor.shape.as_list())
def sparse_shape(self, tensor: Tensor) -> Tuple[Optional[int], ...]:
return self.shape_tuple(tensor)
def shape_prod(self, values: Tensor) -> Tensor:
return tf.reduce_prod(values)
def sqrt(self, tensor: Tensor) -> Tensor:
return tf.sqrt(tensor)
def convert_to_tensor(self, tensor: Tensor) -> Tensor:
result = tf.convert_to_tensor(tensor)
return result
def outer_product(self, tensor1: Tensor, tensor2: Tensor) -> Tensor:
return tensordot2.tensordot(tf, tensor1, tensor2, 0)
#pylint: disable=unused-argument
def einsum(self,
expression: str,
*tensors: Tensor,
optimize: bool = True) -> Tensor:
return tf.einsum(expression, *tensors)
def norm(self, tensor: Tensor) -> Tensor:
return tf.linalg.norm(tensor)
def eye(self,
N: int,
dtype: Optional[Type[np.number]] = None,
M: Optional[int] = None) -> Tensor:
dtype = dtype if dtype is not None else tf.float64
return tf.eye(num_rows=N, num_columns=M, dtype=dtype)
def ones(self,
shape: Tuple[int, ...],
dtype: Optional[Type[np.number]] = None) -> Tensor:
dtype = dtype if dtype is not None else tf.float64
return tf.ones(shape=shape, dtype=dtype)
def zeros(self,
shape: Tuple[int, ...],
dtype: Optional[Type[np.number]] = None) -> Tensor:
dtype = dtype if dtype is not None else tf.float64
return tf.zeros(shape, dtype=dtype)
def randn(self,
shape: Tuple[int, ...],
dtype: Optional[Type[np.number]] = None,
seed: Optional[int] = None) -> Tensor:
if seed:
tf.random.set_seed(seed)
dtype = dtype if dtype is not None else tf.float64
if (dtype is tf.complex128) or (dtype is tf.complex64):
return tf.complex(
tf.random.normal(shape=shape, dtype=dtype.real_dtype),
tf.random.normal(shape=shape, dtype=dtype.real_dtype))
return tf.random.normal(shape=shape, dtype=dtype)
def random_uniform(self,
shape: Tuple[int, ...],
boundaries: Optional[Tuple[float, float]] = (0.0, 1.0),
dtype: Optional[Type[np.number]] = None,
seed: Optional[int] = None) -> Tensor:
if seed:
tf.random.set_seed(seed)
dtype = dtype if dtype is not None else tf.float64
if (dtype is tf.complex128) or (dtype is tf.complex64):
#pylint: disable=unexpected-keyword-arg
return tf.complex(
tf.random.uniform(
shape=shape,
minval=boundaries[0],
maxval=boundaries[1],
dtype=dtype.real_dtype),
tf.random.uniform(
shape=shape,
minval=boundaries[0],
maxval=boundaries[1],
dtype=dtype.real_dtype))
tf.random.set_seed(10)
#pylint: disable=unexpected-keyword-arg
a = tf.random.uniform(
shape=shape, minval=boundaries[0], maxval=boundaries[1], dtype=dtype)
return a
def conj(self, tensor: Tensor) -> Tensor:
return tf.math.conj(tensor)
def eigh(self, matrix: Tensor) -> Tuple[Tensor, Tensor]:
return tf.linalg.eigh(matrix)
def addition(self, tensor1: Tensor, tensor2: Tensor) -> Tensor:
return tensor1 + tensor2
def subtraction(self, tensor1: Tensor, tensor2: Tensor) -> Tensor:
return tensor1 - tensor2
def multiply(self, tensor1: Tensor, tensor2: Tensor) -> Tensor:
return tensor1 * tensor2
def divide(self, tensor1: Tensor, tensor2: Tensor) -> Tensor:
return tensor1 / tensor2
def index_update(self, tensor: Tensor, mask: Tensor,
assignee: Tensor) -> Tensor:
#returns a copy (unfortunately)
return tf.where(mask, assignee, tensor)
def inv(self, matrix: Tensor) -> Tensor:
if len(matrix.shape) > 2:
raise ValueError("input to tensorflow backend method `inv` has shape {}. "
"Only matrices are supported.".format(tf.shape(matrix)))
return tf.linalg.inv(matrix)
def broadcast_right_multiplication(self, tensor1: Tensor,
tensor2: Tensor) -> Tensor:
if len(tensor2.shape) != 1:
raise ValueError("only order-1 tensors are allowed for `tensor2`, "
"found `tensor2.shape = {}`".format(tf.shape(tensor2)))
return tensor1 * tensor2
def broadcast_left_multiplication(self, tensor1: Tensor,
tensor2: Tensor) -> Tensor:
if len(tensor1.shape) != 1:
raise ValueError("only order-1 tensors are allowed for `tensor1`,"
" found `tensor1.shape = {}`".format(tf.shape(tensor1)))
t1_broadcast_shape = self.shape_concat(
[self.shape_tensor(tensor1), [1] * (len(tensor2.shape) - 1)], axis=-1)
return tensor2 * self.reshape(tensor1, t1_broadcast_shape)
def sin(self, tensor: Tensor) -> Tensor:
return tf.math.sin(tensor)
def cos(self, tensor: Tensor) -> Tensor:
return tf.math.cos(tensor)
def exp(self, tensor: Tensor) -> Tensor:
return tf.math.exp(tensor)
def log(self, tensor: Tensor) -> Tensor:
return tf.math.log(tensor)
def expm(self, matrix: Tensor) -> Tensor:
if len(matrix.shape) != 2:
raise ValueError("input to tensorflow backend method `expm` has shape {}."
" Only matrices are supported.".format(matrix.shape))
if matrix.shape[0] != matrix.shape[1]:
raise ValueError("input to tensorflow backend method `expm` only supports"
"N*N matrix, {x}*{y} matrix is given".format(
x=matrix.shape[0], y=matrix.shape[1]))
return tf.linalg.expm(matrix)
def jit(self, fun: Callable, *args: List, **kwargs: dict) -> Callable:
# tf.function is slow and bad.
return fun
def sum(self,
tensor: Tensor,
axis: Optional[Sequence[int]] = None,
keepdims: bool = False) -> Tensor:
return tf.math.reduce_sum(tensor, axis=axis, keepdims=keepdims)
def matmul(self, tensor1: Tensor, tensor2: Tensor) -> Tensor:
if (tensor1.ndim <= 1) or (tensor2.ndim <= 1):
raise ValueError("inputs to `matmul` have to be a tensors of order > 1,")
return tf.matmul(tensor1, tensor2)
def diagonal(self, tensor: Tensor, offset: int = 0, axis1: int = -2,
axis2: int = -1) -> Tensor:
"""Return specified diagonals.
If tensor is 2-D, returns the diagonal of tensor with the given offset,
i.e., the collection of elements of the form a[i, i+offset].
If a has more than two dimensions, then the axes specified by
axis1 and axis2 are used to determine the 2-D sub-array whose diagonal is
returned. The shape of the resulting array can be determined by removing
axis1 and axis2 and appending an index to the right equal to the size of the
resulting diagonals.
This function only extracts diagonals. If you
wish to create diagonal matrices from vectors, use diagflat.
Args:
tensor: A tensor.
offset: Offset of the diagonal from the main diagonal.
axis1, axis2: Axis to be used as the first/second axis of the 2D
sub-arrays from which the diagonals should be taken.
Defaults to second-last and last axis (note this
differs from the NumPy defaults).
These arguments are not supported in the TensorFlow
backend and an error will be raised if they are
specified.
Returns:
array_of_diagonals: A dim = min(1, tensor.ndim - 2) tensor storing
the batched diagonals.
"""
if axis1 != -2 or axis2 != -1:
errstr = (f"axis1={axis1}, axis2={axis2} must be -2, -1 (the defaults)"
f"with TensorFlow backend.")
raise NotImplementedError(errstr)
#pylint: disable=unexpected-keyword-arg
return tf.linalg.diag_part(tensor, k=offset)
def diagflat(self, tensor: Tensor, k: int = 0) -> Tensor:
""" Flattens tensor and creates a new matrix of zeros with its elements
on the k'th diagonal.
Args:
tensor: A tensor.
k : The diagonal upon which to place its elements.
Returns:
tensor: A new tensor with all zeros save the specified diagonal.
"""
#pylint: disable=unexpected-keyword-arg
return tf.linalg.diag(tensor, k=k)
def trace(self, tensor: Tensor, offset: int = 0, axis1: int = -2,
axis2: int = -1) -> Tensor:
"""Return summed entries along diagonals.
If tensor is 2-D, the sum is over the
diagonal of tensor with the given offset,
i.e., the collection of elements of the form a[i, i+offset].
If a has more than two dimensions, then the axes specified by
axis1 and axis2 are used to determine the 2-D sub-array whose diagonal is
summed.
Args:
tensor: A tensor.
offset: Offset of the diagonal from the main diagonal.
This argument is not supported in the TensorFlow
backend and an error will be raised if they are
specified.
axis1, axis2: Axis to be used as the first/second axis of the 2D
sub-arrays from which the diagonals should be taken.
Defaults to first/second axis.
These arguments are not supported in the TensorFlow
backend and an error will be raised if they are
specified.
Returns:
array_of_diagonals: The batched summed diagonals.
"""
if offset != 0:
errstr = (f"offset = {offset} must be 0 (the default)"
f"with TensorFlow backend.")
raise NotImplementedError(errstr)
if axis1 == axis2:
raise ValueError(f"axis1 = {axis1} cannot equal axis2 = {axis2}")
N = len(tensor.shape)
if N > 25:
raise ValueError(f"Currently only tensors with ndim <= 25 can be traced"
f"in the TensorFlow backend (yours was {N})")
if axis1 < 0:
axis1 = N+axis1
if axis2 < 0:
axis2 = N+axis2
inds = list(map(chr, range(98, 98+N)))
indsout = [i for n, i in enumerate(inds) if n not in (axis1, axis2)]
inds[axis1] = 'a'
inds[axis2] = 'a'
return tf.einsum(''.join(inds) + '->' +''.join(indsout), tensor)
def abs(self, tensor: Tensor) -> Tensor:
"""
Returns the elementwise absolute value of tensor.
Args:
tensor: An input tensor.
Returns:
tensor: Its elementwise absolute value.
"""
return tf.math.abs(tensor)
def sign(self, tensor: Tensor) -> Tensor:
"""
Returns an elementwise tensor with entries
y[i] = 1, 0, -1 where tensor[i] > 0, == 0, and < 0 respectively.
For complex input the behaviour of this function may depend on the backend.
The TensorFlow version returns y[i] = x[i] / abs(x[i]).
Args:
tensor: The input tensor.
"""
return tf.math.sign(tensor)
def item(self, tensor):
numel = 0
if len(tensor.shape) > 0:
numel = fct.reduce(op.mul, tensor.shape)
if numel != 1:
raise ValueError(f"expected tensor with one element, "
f"got {tensor.shape}")
if numel == 1:
return tensor[0]
return tensor
def power(self, a: Tensor, b: Union[Tensor, float]) -> Tensor:
"""
Returns the exponentiation of tensor a raised to b.
If b is a tensor, then the exponentiation is element-wise
between the two tensors, with a as the base and b as the power.
Note that a and b must be broadcastable to the same shape if
b is a tensor.
If b is a scalar, then the exponentiation is each value in a
raised to the power of b.
Args:
a: The tensor containing the bases.
b: The tensor containing the powers; or a single scalar as the power.
Returns:
The tensor that is each element of a raised to the
power of b. Note that the shape of the returned tensor
is that produced by the broadcast of a and b.
"""
return tf.math.pow(a, b)
|
from homeassistant.components.abode import ATTR_DEVICE_ID
from homeassistant.components.sensor import DOMAIN as SENSOR_DOMAIN
from homeassistant.const import (
ATTR_DEVICE_CLASS,
ATTR_FRIENDLY_NAME,
ATTR_UNIT_OF_MEASUREMENT,
DEVICE_CLASS_HUMIDITY,
PERCENTAGE,
TEMP_CELSIUS,
)
from .common import setup_platform
async def test_entity_registry(hass):
"""Tests that the devices are registered in the entity registry."""
await setup_platform(hass, SENSOR_DOMAIN)
entity_registry = await hass.helpers.entity_registry.async_get_registry()
entry = entity_registry.async_get("sensor.environment_sensor_humidity")
assert entry.unique_id == "13545b21f4bdcd33d9abd461f8443e65-humidity"
async def test_attributes(hass):
"""Test the sensor attributes are correct."""
await setup_platform(hass, SENSOR_DOMAIN)
state = hass.states.get("sensor.environment_sensor_humidity")
assert state.state == "32.0"
assert state.attributes.get(ATTR_DEVICE_ID) == "RF:02148e70"
assert not state.attributes.get("battery_low")
assert not state.attributes.get("no_response")
assert state.attributes.get("device_type") == "LM"
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PERCENTAGE
assert state.attributes.get(ATTR_FRIENDLY_NAME) == "Environment Sensor Humidity"
assert state.attributes.get(ATTR_DEVICE_CLASS) == DEVICE_CLASS_HUMIDITY
state = hass.states.get("sensor.environment_sensor_lux")
assert state.state == "1.0"
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == "lux"
state = hass.states.get("sensor.environment_sensor_temperature")
# Abodepy device JSON reports 19.5, but Home Assistant shows 19.4
assert state.state == "19.4"
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == TEMP_CELSIUS
|
import warnings
from sklearn.cluster import KMeans, MiniBatchKMeans, AgglomerativeClustering, Birch, FeatureAgglomeration, SpectralClustering
import six
import numpy as np
from .._shared.helpers import *
from .format_data import format_data as formatter
# dictionary of models
models = {
'KMeans': KMeans,
'MiniBatchKMeans': MiniBatchKMeans,
'AgglomerativeClustering': AgglomerativeClustering,
'FeatureAgglomeration': FeatureAgglomeration,
'Birch': Birch,
'SpectralClustering': SpectralClustering,
}
try:
from hdbscan import HDBSCAN
_has_hdbscan = True
models.update({'HDBSCAN': HDBSCAN})
except ImportError:
_has_hdbscan = False
@memoize
def cluster(x, cluster='KMeans', n_clusters=3, ndims=None, format_data=True):
"""
Performs clustering analysis and returns a list of cluster labels
Parameters
----------
x : A Numpy array, Pandas Dataframe or list of arrays/dfs
The data to be clustered. You can pass a single array/df or a list.
If a list is passed, the arrays will be stacked and the clustering
will be performed across all lists (i.e. not within each list).
cluster : str or dict
Model to use to discover clusters. Support algorithms are: KMeans,
MiniBatchKMeans, AgglomerativeClustering, Birch, FeatureAgglomeration,
SpectralClustering and HDBSCAN (default: KMeans). Can be passed as a
string, but for finer control of the model parameters, pass as a
dictionary, e.g. reduce={'model' : 'KMeans', 'params' : {'max_iter' : 100}}.
See scikit-learn specific model docs for details on parameters supported for
each model.
n_clusters : int
Number of clusters to discover. Not required for HDBSCAN.
format_data : bool
Whether or not to first call the format_data function (default: True).
ndims : None
Deprecated argument. Please use new analyze function to perform
combinations of transformations
Returns
----------
cluster_labels : list
An list of cluster labels
"""
if cluster == None:
return x
elif (isinstance(cluster, six.string_types) and cluster=='HDBSCAN') or \
(isinstance(cluster, dict) and cluster['model']=='HDBSCAN'):
if not _has_hdbscan:
raise ImportError('HDBSCAN is not installed. Please install hdbscan>=0.8.11')
if ndims != None:
warnings.warn('The ndims argument is now deprecated. Ignoring dimensionality reduction step.')
if format_data:
x = formatter(x, ppca=True)
# if reduce is a string, find the corresponding model
if isinstance(cluster, six.string_types):
model = models[cluster]
if cluster != 'HDBSCAN':
model_params = {
'n_clusters' : n_clusters
}
else:
model_params = {}
# if its a dict, use custom params
elif type(cluster) is dict:
if isinstance(cluster['model'], six.string_types):
model = models[cluster['model']]
model_params = cluster['params']
# initialize model
model = model(**model_params)
# fit the model
model.fit(np.vstack(x))
# return the labels
return list(model.labels_)
|
import pytest
import plumbum
from plumbum import local
from plumbum._testtools import (
skip_on_windows
)
try:
from plumbum.cmd import printenv
except ImportError:
pass
@skip_on_windows
class TestEnv:
def test_change_env(self):
with local.env(silly=12):
assert 12 == local.env['silly']
actual = set(x.split('=')[0] for x in printenv().splitlines() if '=' in x)
localenv = set(x[0] for x in local.env)
print(actual, localenv)
assert localenv == actual
assert len(local.env) == len(actual)
def test_dictlike(self):
keys = set(x.split('=')[0] for x in printenv().splitlines() if '=' in x)
values = set(x.split('=', 1)[1].strip() for x in printenv().splitlines() if '=' in x)
assert keys == set(local.env.keys())
assert len(values) == len(set(local.env.values()))
def test_custom_env(self):
with local.env():
items = {'one':'OnE', 'tww':'TWOO'}
local.env.update(items)
assert 'tww' in local.env
local.env.clear()
assert 'tww' not in local.env
def test_item(self):
with local.env():
local.env['simple_plum'] = 'thing'
assert 'simple_plum' in local.env
del local.env['simple_plum']
assert 'simple_plum' not in local.env
local.env['simple_plum'] = 'thing'
assert 'simple_plum' in local.env
assert 'thing' == local.env.pop('simple_plum')
assert 'simple_plum' not in local.env
local.env['simple_plum'] = 'thing'
assert 'simple_plum' not in local.env
@skip_on_windows
def test_home(self):
assert local.env.home == local.env['HOME']
old_home = local.env.home
with local.env():
local.env.home = 'Nobody'
assert local.env.home == local.env['HOME']
assert local.env.home == 'Nobody'
assert local.env.home == old_home
@skip_on_windows
def test_user(self):
assert local.env.user
|
from homeassistant.components.nightscout.const import (
ATTR_DATE,
ATTR_DELTA,
ATTR_DEVICE,
ATTR_DIRECTION,
)
from homeassistant.const import ATTR_ICON, STATE_UNAVAILABLE
from tests.components.nightscout import (
GLUCOSE_READINGS,
init_integration,
init_integration_empty_response,
init_integration_unavailable,
)
async def test_sensor_state(hass):
"""Test sensor state data."""
await init_integration(hass)
test_glucose_sensor = hass.states.get("sensor.blood_sugar")
assert test_glucose_sensor.state == str(
GLUCOSE_READINGS[0].sgv # pylint: disable=maybe-no-member
)
async def test_sensor_error(hass):
"""Test sensor state data."""
await init_integration_unavailable(hass)
test_glucose_sensor = hass.states.get("sensor.blood_sugar")
assert test_glucose_sensor.state == STATE_UNAVAILABLE
async def test_sensor_empty_response(hass):
"""Test sensor state data."""
await init_integration_empty_response(hass)
test_glucose_sensor = hass.states.get("sensor.blood_sugar")
assert test_glucose_sensor.state == STATE_UNAVAILABLE
async def test_sensor_attributes(hass):
"""Test sensor attributes."""
await init_integration(hass)
test_glucose_sensor = hass.states.get("sensor.blood_sugar")
reading = GLUCOSE_READINGS[0]
assert reading is not None
attr = test_glucose_sensor.attributes
assert attr[ATTR_DATE] == reading.date # pylint: disable=maybe-no-member
assert attr[ATTR_DELTA] == reading.delta # pylint: disable=maybe-no-member
assert attr[ATTR_DEVICE] == reading.device # pylint: disable=maybe-no-member
assert attr[ATTR_DIRECTION] == reading.direction # pylint: disable=maybe-no-member
assert attr[ATTR_ICON] == "mdi:arrow-bottom-right"
|
from openwebif.api import CreateDevice
import voluptuous as vol
from homeassistant.components.media_player import MediaPlayerEntity
from homeassistant.components.media_player.const import (
MEDIA_TYPE_TVSHOW,
SUPPORT_NEXT_TRACK,
SUPPORT_PAUSE,
SUPPORT_PREVIOUS_TRACK,
SUPPORT_SELECT_SOURCE,
SUPPORT_STOP,
SUPPORT_TURN_OFF,
SUPPORT_TURN_ON,
SUPPORT_VOLUME_MUTE,
SUPPORT_VOLUME_SET,
SUPPORT_VOLUME_STEP,
)
from homeassistant.const import (
CONF_HOST,
CONF_NAME,
CONF_PASSWORD,
CONF_PORT,
CONF_SSL,
CONF_USERNAME,
STATE_OFF,
STATE_ON,
STATE_PLAYING,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.config_validation import PLATFORM_SCHEMA
ATTR_MEDIA_CURRENTLY_RECORDING = "media_currently_recording"
ATTR_MEDIA_DESCRIPTION = "media_description"
ATTR_MEDIA_END_TIME = "media_end_time"
ATTR_MEDIA_START_TIME = "media_start_time"
CONF_USE_CHANNEL_ICON = "use_channel_icon"
CONF_DEEP_STANDBY = "deep_standby"
CONF_MAC_ADDRESS = "mac_address"
CONF_SOURCE_BOUQUET = "source_bouquet"
DEFAULT_NAME = "Enigma2 Media Player"
DEFAULT_PORT = 80
DEFAULT_SSL = False
DEFAULT_USE_CHANNEL_ICON = False
DEFAULT_USERNAME = "root"
DEFAULT_PASSWORD = "dreambox"
DEFAULT_DEEP_STANDBY = False
DEFAULT_MAC_ADDRESS = ""
DEFAULT_SOURCE_BOUQUET = ""
SUPPORTED_ENIGMA2 = (
SUPPORT_VOLUME_SET
| SUPPORT_VOLUME_MUTE
| SUPPORT_TURN_OFF
| SUPPORT_NEXT_TRACK
| SUPPORT_STOP
| SUPPORT_PREVIOUS_TRACK
| SUPPORT_VOLUME_STEP
| SUPPORT_TURN_ON
| SUPPORT_PAUSE
| SUPPORT_SELECT_SOURCE
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_USERNAME, default=DEFAULT_USERNAME): cv.string,
vol.Optional(CONF_PASSWORD, default=DEFAULT_PASSWORD): cv.string,
vol.Optional(CONF_SSL, default=DEFAULT_SSL): cv.boolean,
vol.Optional(
CONF_USE_CHANNEL_ICON, default=DEFAULT_USE_CHANNEL_ICON
): cv.boolean,
vol.Optional(CONF_DEEP_STANDBY, default=DEFAULT_DEEP_STANDBY): cv.boolean,
vol.Optional(CONF_MAC_ADDRESS, default=DEFAULT_MAC_ADDRESS): cv.string,
vol.Optional(CONF_SOURCE_BOUQUET, default=DEFAULT_SOURCE_BOUQUET): cv.string,
}
)
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up of an enigma2 media player."""
if discovery_info:
# Discovery gives us the streaming service port (8001)
# which is not useful as OpenWebif never runs on that port.
# So use the default port instead.
config[CONF_PORT] = DEFAULT_PORT
config[CONF_NAME] = discovery_info["hostname"]
config[CONF_HOST] = discovery_info["host"]
config[CONF_USERNAME] = DEFAULT_USERNAME
config[CONF_PASSWORD] = DEFAULT_PASSWORD
config[CONF_SSL] = DEFAULT_SSL
config[CONF_USE_CHANNEL_ICON] = DEFAULT_USE_CHANNEL_ICON
config[CONF_MAC_ADDRESS] = DEFAULT_MAC_ADDRESS
config[CONF_DEEP_STANDBY] = DEFAULT_DEEP_STANDBY
config[CONF_SOURCE_BOUQUET] = DEFAULT_SOURCE_BOUQUET
device = CreateDevice(
host=config[CONF_HOST],
port=config.get(CONF_PORT),
username=config.get(CONF_USERNAME),
password=config.get(CONF_PASSWORD),
is_https=config[CONF_SSL],
prefer_picon=config.get(CONF_USE_CHANNEL_ICON),
mac_address=config.get(CONF_MAC_ADDRESS),
turn_off_to_deep=config.get(CONF_DEEP_STANDBY),
source_bouquet=config.get(CONF_SOURCE_BOUQUET),
)
add_devices([Enigma2Device(config[CONF_NAME], device)], True)
class Enigma2Device(MediaPlayerEntity):
"""Representation of an Enigma2 box."""
def __init__(self, name, device):
"""Initialize the Enigma2 device."""
self._name = name
self.e2_box = device
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def state(self):
"""Return the state of the device."""
if self.e2_box.is_recording_playback:
return STATE_PLAYING
return STATE_OFF if self.e2_box.in_standby else STATE_ON
@property
def supported_features(self):
"""Flag of media commands that are supported."""
return SUPPORTED_ENIGMA2
def turn_off(self):
"""Turn off media player."""
self.e2_box.turn_off()
def turn_on(self):
"""Turn the media player on."""
self.e2_box.turn_on()
@property
def media_title(self):
"""Title of current playing media."""
return self.e2_box.current_service_channel_name
@property
def media_series_title(self):
"""Return the title of current episode of TV show."""
return self.e2_box.current_programme_name
@property
def media_channel(self):
"""Channel of current playing media."""
return self.e2_box.current_service_channel_name
@property
def media_content_id(self):
"""Service Ref of current playing media."""
return self.e2_box.current_service_ref
@property
def media_content_type(self):
"""Type of video currently playing."""
return MEDIA_TYPE_TVSHOW
@property
def is_volume_muted(self):
"""Boolean if volume is currently muted."""
return self.e2_box.muted
@property
def media_image_url(self):
"""Picon url for the channel."""
return self.e2_box.picon_url
def set_volume_level(self, volume):
"""Set volume level, range 0..1."""
self.e2_box.set_volume(int(volume * 100))
def volume_up(self):
"""Volume up the media player."""
self.e2_box.set_volume(int(self.e2_box.volume * 100) + 5)
def volume_down(self):
"""Volume down media player."""
self.e2_box.set_volume(int(self.e2_box.volume * 100) - 5)
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
return self.e2_box.volume
def media_stop(self):
"""Send stop command."""
self.e2_box.set_stop()
def media_play(self):
"""Play media."""
self.e2_box.toggle_play_pause()
def media_pause(self):
"""Pause the media player."""
self.e2_box.toggle_play_pause()
def media_next_track(self):
"""Send next track command."""
self.e2_box.set_channel_up()
def media_previous_track(self):
"""Send next track command."""
self.e2_box.set_channel_down()
def mute_volume(self, mute):
"""Mute or unmute."""
self.e2_box.mute_volume()
@property
def source(self):
"""Return the current input source."""
return self.e2_box.current_service_channel_name
@property
def source_list(self):
"""List of available input sources."""
return self.e2_box.source_list
def select_source(self, source):
"""Select input source."""
self.e2_box.select_source(self.e2_box.sources[source])
def update(self):
"""Update state of the media_player."""
self.e2_box.update()
@property
def device_state_attributes(self):
"""Return device specific state attributes.
isRecording: Is the box currently recording.
currservice_fulldescription: Full program description.
currservice_begin: is in the format '21:00'.
currservice_end: is in the format '21:00'.
"""
if self.e2_box.in_standby:
return {}
return {
ATTR_MEDIA_CURRENTLY_RECORDING: self.e2_box.status_info["isRecording"],
ATTR_MEDIA_DESCRIPTION: self.e2_box.status_info[
"currservice_fulldescription"
],
ATTR_MEDIA_START_TIME: self.e2_box.status_info["currservice_begin"],
ATTR_MEDIA_END_TIME: self.e2_box.status_info["currservice_end"],
}
|
import logging
import sys
import os.path
from gensim.corpora import sources, dmlcorpus
PREFIX = 'dmlcz'
AT_HOME = False
if AT_HOME:
SOURCE_LIST = [
sources.DmlCzSource('dmlcz', '/Users/kofola/workspace/dml/data/dmlcz/'),
sources.DmlSource('numdam', '/Users/kofola/workspace/dml/data/numdam/'),
sources.ArxmlivSource('arxmliv', '/Users/kofola/workspace/dml/data/arxmliv/'),
]
RESULT_DIR = '/Users/kofola/workspace/dml/data/results'
else:
SOURCE_LIST = [
sources.DmlCzSource('dmlcz', '/data/dmlcz/data/share'),
sources.DmlSource('numdam', '/data/dmlcz/data/numdam'),
sources.ArxmlivSource('arxmliv', '/data/dmlcz/data/arxmliv'),
]
RESULT_DIR = '/data/dmlcz/xrehurek/results'
def buildDmlCorpus(config):
dml = dmlcorpus.DmlCorpus()
dml.processConfig(config, shuffle=True)
dml.buildDictionary()
dml.dictionary.filterExtremes(noBelow=5, noAbove=0.3) # ignore too (in)frequent words
dml.save(config.resultFile('.pkl'))
dml.saveAsText() # save id mappings and documents as text data (matrix market format)
return dml
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s')
logging.root.setLevel(level=logging.INFO)
logging.info("running %s", ' '.join(sys.argv))
program = os.path.basename(sys.argv[0])
# check and process input arguments
if len(sys.argv) < 2:
print(globals()['__doc__'] % locals())
sys.exit(1)
language = sys.argv[1]
# construct the config, which holds information about sources, data file filenames etc.
config = dmlcorpus.DmlConfig('%s_%s' % (PREFIX, language), resultDir=RESULT_DIR, acceptLangs=[language])
for source in SOURCE_LIST:
config.addSource(source)
buildDmlCorpus(config)
logging.info("finished running %s", program)
|
import logging
from pyvlx import PyVLX, PyVLXException
import voluptuous as vol
from homeassistant.const import CONF_HOST, CONF_PASSWORD, EVENT_HOMEASSISTANT_STOP
from homeassistant.helpers import discovery
import homeassistant.helpers.config_validation as cv
DOMAIN = "velux"
DATA_VELUX = "data_velux"
SUPPORTED_DOMAINS = ["cover", "scene"]
_LOGGER = logging.getLogger(__name__)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{vol.Required(CONF_HOST): cv.string, vol.Required(CONF_PASSWORD): cv.string}
)
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass, config):
"""Set up the velux component."""
try:
hass.data[DATA_VELUX] = VeluxModule(hass, config[DOMAIN])
hass.data[DATA_VELUX].setup()
await hass.data[DATA_VELUX].async_start()
except PyVLXException as ex:
_LOGGER.exception("Can't connect to velux interface: %s", ex)
return False
for component in SUPPORTED_DOMAINS:
hass.async_create_task(
discovery.async_load_platform(hass, component, DOMAIN, {}, config)
)
return True
class VeluxModule:
"""Abstraction for velux component."""
def __init__(self, hass, domain_config):
"""Initialize for velux component."""
self.pyvlx = None
self._hass = hass
self._domain_config = domain_config
def setup(self):
"""Velux component setup."""
async def on_hass_stop(event):
"""Close connection when hass stops."""
_LOGGER.debug("Velux interface terminated")
await self.pyvlx.disconnect()
self._hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, on_hass_stop)
host = self._domain_config.get(CONF_HOST)
password = self._domain_config.get(CONF_PASSWORD)
self.pyvlx = PyVLX(host=host, password=password)
async def async_start(self):
"""Start velux component."""
_LOGGER.debug("Velux interface started")
await self.pyvlx.load_scenes()
await self.pyvlx.load_nodes()
|
def convert_to_bytes(resp):
resp = convert_body_to_bytes(resp)
return resp
def convert_to_unicode(resp):
resp = convert_body_to_unicode(resp)
return resp
def convert_body_to_bytes(resp):
"""
If the request body is a string, encode it to bytes (for python3 support)
By default yaml serializes to utf-8 encoded bytestrings.
When this cassette is loaded by python3, it's automatically decoded
into unicode strings. This makes sure that it stays a bytestring, since
that's what all the internal httplib machinery is expecting.
For more info on py3 yaml:
http://pyyaml.org/wiki/PyYAMLDocumentation#Python3support
"""
try:
if resp["body"]["string"] is not None and not isinstance(resp["body"]["string"], bytes):
resp["body"]["string"] = resp["body"]["string"].encode("utf-8")
except (KeyError, TypeError, UnicodeEncodeError):
# The thing we were converting either wasn't a dictionary or didn't
# have the keys we were expecting. Some of the tests just serialize
# and deserialize a string.
# Also, sometimes the thing actually is binary, so if you can't encode
# it, just give up.
pass
return resp
def _convert_string_to_unicode(string):
"""
If the string is bytes, decode it to a string (for python3 support)
"""
result = string
try:
if string is not None and not isinstance(string, str):
result = string.decode("utf-8")
except (TypeError, UnicodeDecodeError, AttributeError):
# Sometimes the string actually is binary or StringIO object,
# so if you can't decode it, just give up.
pass
return result
def convert_body_to_unicode(resp):
"""
If the request or responses body is bytes, decode it to a string
(for python3 support)
"""
if type(resp) is not dict:
# Some of the tests just serialize and deserialize a string.
return _convert_string_to_unicode(resp)
else:
body = resp.get("body")
if body is not None:
try:
body["string"] = _convert_string_to_unicode(body["string"])
except (KeyError, TypeError, AttributeError):
# The thing we were converting either wasn't a dictionary or
# didn't have the keys we were expecting.
# For example request object has no 'string' key.
resp["body"] = _convert_string_to_unicode(body)
return resp
|
from functools import partial
import logging
import requests
from synology.surveillance_station import SurveillanceStation
import voluptuous as vol
from homeassistant.components.camera import PLATFORM_SCHEMA, Camera
from homeassistant.const import (
CONF_NAME,
CONF_PASSWORD,
CONF_TIMEOUT,
CONF_URL,
CONF_USERNAME,
CONF_VERIFY_SSL,
CONF_WHITELIST,
)
from homeassistant.helpers.aiohttp_client import (
async_aiohttp_proxy_web,
async_get_clientsession,
)
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = "Synology Camera"
DEFAULT_TIMEOUT = 5
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Required(CONF_URL): cv.string,
vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): cv.positive_int,
vol.Optional(CONF_WHITELIST, default=[]): cv.ensure_list,
vol.Optional(CONF_VERIFY_SSL, default=True): cv.boolean,
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up a Synology IP Camera."""
_LOGGER.warning(
"The Synology integration is deprecated."
" Please use the Synology DSM integration"
" (https://www.home-assistant.io/integrations/synology_dsm/) instead."
" This integration will be removed in version 0.118.0."
)
verify_ssl = config.get(CONF_VERIFY_SSL)
timeout = config.get(CONF_TIMEOUT)
try:
surveillance = await hass.async_add_executor_job(
partial(
SurveillanceStation,
config.get(CONF_URL),
config.get(CONF_USERNAME),
config.get(CONF_PASSWORD),
verify_ssl=verify_ssl,
timeout=timeout,
)
)
except (requests.exceptions.RequestException, ValueError):
_LOGGER.exception("Error when initializing SurveillanceStation")
return False
cameras = surveillance.get_all_cameras()
# add cameras
devices = []
for camera in cameras:
if not config[CONF_WHITELIST] or camera.name in config[CONF_WHITELIST]:
device = SynologyCamera(surveillance, camera.camera_id, verify_ssl)
devices.append(device)
async_add_entities(devices)
class SynologyCamera(Camera):
"""An implementation of a Synology NAS based IP camera."""
def __init__(self, surveillance, camera_id, verify_ssl):
"""Initialize a Synology Surveillance Station camera."""
super().__init__()
self._surveillance = surveillance
self._camera_id = camera_id
self._verify_ssl = verify_ssl
self._camera = self._surveillance.get_camera(camera_id)
self._motion_setting = self._surveillance.get_motion_setting(camera_id)
self.is_streaming = self._camera.is_enabled
def camera_image(self):
"""Return bytes of camera image."""
return self._surveillance.get_camera_image(self._camera_id)
async def handle_async_mjpeg_stream(self, request):
"""Return a MJPEG stream image response directly from the camera."""
streaming_url = self._camera.video_stream_url
websession = async_get_clientsession(self.hass, self._verify_ssl)
stream_coro = websession.get(streaming_url)
return await async_aiohttp_proxy_web(self.hass, request, stream_coro)
@property
def name(self):
"""Return the name of this device."""
return self._camera.name
@property
def is_recording(self):
"""Return true if the device is recording."""
return self._camera.is_recording
@property
def should_poll(self):
"""Update the recording state periodically."""
return True
def update(self):
"""Update the status of the camera."""
self._surveillance.update()
self._camera = self._surveillance.get_camera(self._camera.camera_id)
self._motion_setting = self._surveillance.get_motion_setting(
self._camera.camera_id
)
self.is_streaming = self._camera.is_enabled
@property
def motion_detection_enabled(self):
"""Return the camera motion detection status."""
return self._motion_setting.is_enabled
def enable_motion_detection(self):
"""Enable motion detection in the camera."""
self._surveillance.enable_motion_detection(self._camera_id)
def disable_motion_detection(self):
"""Disable motion detection in camera."""
self._surveillance.disable_motion_detection(self._camera_id)
|
from homeassistant.components.binary_sensor import BinarySensorEntity
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from . import WiffiEntity
from .const import CREATE_ENTITY_SIGNAL
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up platform for a new integration.
Called by the HA framework after async_forward_entry_setup has been called
during initialization of a new integration (= wiffi).
"""
@callback
def _create_entity(device, metric):
"""Create platform specific entities."""
entities = []
if metric.is_bool:
entities.append(BoolEntity(device, metric, config_entry.options))
async_add_entities(entities)
async_dispatcher_connect(hass, CREATE_ENTITY_SIGNAL, _create_entity)
class BoolEntity(WiffiEntity, BinarySensorEntity):
"""Entity for wiffi metrics which have a boolean value."""
def __init__(self, device, metric, options):
"""Initialize the entity."""
super().__init__(device, metric, options)
self._value = metric.value
self.reset_expiration_date()
@property
def is_on(self):
"""Return the state of the entity."""
return self._value
@callback
def _update_value_callback(self, device, metric):
"""Update the value of the entity.
Called if a new message has been received from the wiffi device.
"""
self.reset_expiration_date()
self._value = metric.value
self.async_write_ha_state()
|
from pytile.errors import TileError
from homeassistant import data_entry_flow
from homeassistant.components.tile import DOMAIN
from homeassistant.config_entries import SOURCE_IMPORT, SOURCE_USER
from homeassistant.const import CONF_PASSWORD, CONF_USERNAME
from tests.async_mock import patch
from tests.common import MockConfigEntry
async def test_duplicate_error(hass):
"""Test that errors are shown when duplicates are added."""
conf = {
CONF_USERNAME: "[email protected]",
CONF_PASSWORD: "123abc",
}
MockConfigEntry(domain=DOMAIN, unique_id="[email protected]", data=conf).add_to_hass(
hass
)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=conf
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
async def test_invalid_credentials(hass):
"""Test that invalid credentials key throws an error."""
conf = {
CONF_USERNAME: "[email protected]",
CONF_PASSWORD: "123abc",
}
with patch(
"homeassistant.components.tile.config_flow.async_login",
side_effect=TileError,
):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=conf
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {"base": "invalid_auth"}
async def test_step_import(hass):
"""Test that the import step works."""
conf = {
CONF_USERNAME: "[email protected]",
CONF_PASSWORD: "123abc",
}
with patch(
"homeassistant.components.tile.async_setup_entry", return_value=True
), patch("homeassistant.components.tile.config_flow.async_login"):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}, data=conf
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "[email protected]"
assert result["data"] == {
CONF_USERNAME: "[email protected]",
CONF_PASSWORD: "123abc",
}
async def test_step_user(hass):
"""Test that the user step works."""
conf = {
CONF_USERNAME: "[email protected]",
CONF_PASSWORD: "123abc",
}
with patch(
"homeassistant.components.tile.async_setup_entry", return_value=True
), patch("homeassistant.components.tile.config_flow.async_login"):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=conf
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "[email protected]"
assert result["data"] == {
CONF_USERNAME: "[email protected]",
CONF_PASSWORD: "123abc",
}
|
import asyncio
from pyvolumio import CannotConnectError, Volumio
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_HOST, CONF_PORT
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from .const import DATA_INFO, DATA_VOLUMIO, DOMAIN
PLATFORMS = ["media_player"]
async def async_setup(hass: HomeAssistant, config: dict):
"""Set up the Volumio component."""
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Set up Volumio from a config entry."""
volumio = Volumio(
entry.data[CONF_HOST], entry.data[CONF_PORT], async_get_clientsession(hass)
)
try:
info = await volumio.get_system_version()
except CannotConnectError as error:
raise ConfigEntryNotReady from error
hass.data.setdefault(DOMAIN, {})[entry.entry_id] = {
DATA_VOLUMIO: volumio,
DATA_INFO: info,
}
for component in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, component)
)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Unload a config entry."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, component)
for component in PLATFORMS
]
)
)
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
|
from uuid import uuid4
from pysmartthings import CAPABILITIES, AppEntity, Capability
from homeassistant.components.smartthings import smartapp
from homeassistant.components.smartthings.const import (
CONF_REFRESH_TOKEN,
DATA_MANAGER,
DOMAIN,
)
from tests.async_mock import AsyncMock, Mock, patch
from tests.common import MockConfigEntry
async def test_update_app(hass, app):
"""Test update_app does not save if app is current."""
await smartapp.update_app(hass, app)
assert app.save.call_count == 0
async def test_update_app_updated_needed(hass, app):
"""Test update_app updates when an app is needed."""
mock_app = Mock(AppEntity)
mock_app.app_name = "Test"
await smartapp.update_app(hass, mock_app)
assert mock_app.save.call_count == 1
assert mock_app.app_name == "Test"
assert mock_app.display_name == app.display_name
assert mock_app.description == app.description
assert mock_app.webhook_target_url == app.webhook_target_url
assert mock_app.app_type == app.app_type
assert mock_app.single_instance == app.single_instance
assert mock_app.classifications == app.classifications
async def test_smartapp_update_saves_token(
hass, smartthings_mock, location, device_factory
):
"""Test update saves token."""
# Arrange
entry = MockConfigEntry(
domain=DOMAIN, data={"installed_app_id": str(uuid4()), "app_id": str(uuid4())}
)
entry.add_to_hass(hass)
app = Mock()
app.app_id = entry.data["app_id"]
request = Mock()
request.installed_app_id = entry.data["installed_app_id"]
request.auth_token = str(uuid4())
request.refresh_token = str(uuid4())
request.location_id = location.location_id
# Act
await smartapp.smartapp_update(hass, request, None, app)
# Assert
assert entry.data[CONF_REFRESH_TOKEN] == request.refresh_token
async def test_smartapp_uninstall(hass, config_entry):
"""Test the config entry is unloaded when the app is uninstalled."""
config_entry.add_to_hass(hass)
app = Mock()
app.app_id = config_entry.data["app_id"]
request = Mock()
request.installed_app_id = config_entry.data["installed_app_id"]
with patch.object(hass.config_entries, "async_remove") as remove:
await smartapp.smartapp_uninstall(hass, request, None, app)
assert remove.call_count == 1
async def test_smartapp_webhook(hass):
"""Test the smartapp webhook calls the manager."""
manager = Mock()
manager.handle_request = AsyncMock(return_value={})
hass.data[DOMAIN][DATA_MANAGER] = manager
request = Mock()
request.headers = []
request.json = AsyncMock(return_value={})
result = await smartapp.smartapp_webhook(hass, "", request)
assert result.body == b"{}"
async def test_smartapp_sync_subscriptions(
hass, smartthings_mock, device_factory, subscription_factory
):
"""Test synchronization adds and removes and ignores unused."""
smartthings_mock.subscriptions.return_value = [
subscription_factory(Capability.thermostat),
subscription_factory(Capability.switch),
subscription_factory(Capability.switch_level),
]
devices = [
device_factory("", [Capability.battery, "ping"]),
device_factory("", [Capability.switch, Capability.switch_level]),
device_factory("", [Capability.switch, Capability.execute]),
]
await smartapp.smartapp_sync_subscriptions(
hass, str(uuid4()), str(uuid4()), str(uuid4()), devices
)
assert smartthings_mock.subscriptions.call_count == 1
assert smartthings_mock.delete_subscription.call_count == 1
assert smartthings_mock.create_subscription.call_count == 1
async def test_smartapp_sync_subscriptions_up_to_date(
hass, smartthings_mock, device_factory, subscription_factory
):
"""Test synchronization does nothing when current."""
smartthings_mock.subscriptions.return_value = [
subscription_factory(Capability.battery),
subscription_factory(Capability.switch),
subscription_factory(Capability.switch_level),
]
devices = [
device_factory("", [Capability.battery, "ping"]),
device_factory("", [Capability.switch, Capability.switch_level]),
device_factory("", [Capability.switch]),
]
await smartapp.smartapp_sync_subscriptions(
hass, str(uuid4()), str(uuid4()), str(uuid4()), devices
)
assert smartthings_mock.subscriptions.call_count == 1
assert smartthings_mock.delete_subscription.call_count == 0
assert smartthings_mock.create_subscription.call_count == 0
async def test_smartapp_sync_subscriptions_limit_warning(
hass, smartthings_mock, device_factory, subscription_factory, caplog
):
"""Test synchronization over the limit logs a warning."""
smartthings_mock.subscriptions.return_value = []
devices = [
device_factory("", CAPABILITIES),
]
await smartapp.smartapp_sync_subscriptions(
hass, str(uuid4()), str(uuid4()), str(uuid4()), devices
)
assert (
"Some device attributes may not receive push updates and there may be "
"subscription creation failures" in caplog.text
)
async def test_smartapp_sync_subscriptions_handles_exceptions(
hass, smartthings_mock, device_factory, subscription_factory
):
"""Test synchronization does nothing when current."""
smartthings_mock.delete_subscription.side_effect = Exception
smartthings_mock.create_subscription.side_effect = Exception
smartthings_mock.subscriptions.return_value = [
subscription_factory(Capability.battery),
subscription_factory(Capability.switch),
subscription_factory(Capability.switch_level),
]
devices = [
device_factory("", [Capability.thermostat, "ping"]),
device_factory("", [Capability.switch, Capability.switch_level]),
device_factory("", [Capability.switch]),
]
await smartapp.smartapp_sync_subscriptions(
hass, str(uuid4()), str(uuid4()), str(uuid4()), devices
)
assert smartthings_mock.subscriptions.call_count == 1
assert smartthings_mock.delete_subscription.call_count == 1
assert smartthings_mock.create_subscription.call_count == 1
|
from importlib import import_module
from django.core.exceptions import ImproperlyConfigured
def load_model_class(model_path):
"""
Load by import a class by a string path like:
'module.models.MyModel'.
This mechanism allows extension and customization of
the Entry model class.
"""
dot = model_path.rindex('.')
module_name = model_path[:dot]
class_name = model_path[dot + 1:]
try:
_class = getattr(import_module(module_name), class_name)
return _class
except (ImportError, AttributeError):
raise ImproperlyConfigured('%s cannot be imported' % model_path)
|
import chainer
from chainercv.links import Conv2DBNActiv
from chainercv.links.model.yolo.yolo_v2 import _leaky_relu
from chainercv.links.model.yolo.yolo_v2 import _maxpool
from chainercv.links.model.yolo import YOLOv2Base
class DarknetExtractor(chainer.ChainList):
"""A Darknet based feature extractor for YOLOv2Tiny.
This is a feature extractor for
:class:`~chainercv.links.model.yolo.YOLOv2Tiny`
"""
insize = 416
grid = 13
def __init__(self):
super(DarknetExtractor, self).__init__()
# Darknet
for k in range(7):
self.append(Conv2DBNActiv(16 << k, 3, pad=1, activ=_leaky_relu))
# additional link
self.append(Conv2DBNActiv(1024, 3, pad=1, activ=_leaky_relu))
def forward(self, x):
"""Compute a feature map from a batch of images.
Args:
x (ndarray): An array holding a batch of images.
The images should be resized to :math:`416\\times 416`.
Returns:
Variable:
"""
h = x
for i, link in enumerate(self):
h = link(h)
if i < 5:
h = _maxpool(h, 2)
elif i == 5:
h = _maxpool(h, 2, stride=1)
return h
class YOLOv2Tiny(YOLOv2Base):
"""YOLOv2 tiny.
This is a model of YOLOv2 tiny a.k.a. Tiny YOLO.
This model uses :class:`~chainercv.links.model.yolo.DarknetExtractor` as
its feature extractor.
Args:
n_fg_class (int): The number of classes excluding the background.
pretrained_model (string): The weight file to be loaded.
This can take :obj:`'voc0712'`, `filepath` or :obj:`None`.
The default value is :obj:`None`.
* :obj:`'voc0712'`: Load weights trained on trainval split of \
PASCAL VOC 2007 and 2012. \
The weight file is downloaded and cached automatically. \
:obj:`n_fg_class` must be :obj:`20` or :obj:`None`. \
These weights were converted from the darknet model \
provided by `the original implementation \
<https://pjreddie.com/darknet/yolov2/>`_. \
The conversion code is \
`chainercv/examples/yolo/darknet2npz.py`.
* `filepath`: A path of npz file. In this case, :obj:`n_fg_class` \
must be specified properly.
* :obj:`None`: Do not load weights.
"""
_extractor = DarknetExtractor
_models = {
'voc0712': {
'param': {'n_fg_class': 20},
'url': 'https://chainercv-models.preferred.jp/'
'yolo_v2_tiny_voc0712_converted_2018_10_19.npz',
'cv2': True
},
}
_anchors = (
(1.19, 1.08),
(4.41, 3.42),
(11.38, 6.63),
(5.11, 9.42),
(10.52, 16.62))
|
import coverage
from tests.coveragetest import CoverageTest
from tests.goldtest import compare, gold_path
class AnnotationGoldTest1(CoverageTest):
"""Test the annotate feature with gold files."""
def make_multi(self):
"""Make a few source files we need for the tests."""
self.make_file("multi.py", """\
import a.a
import b.b
a.a.a(1)
b.b.b(2)
""")
self.make_file("a/__init__.py")
self.make_file("a/a.py", """\
def a(x):
if x == 1:
print("x is 1")
else:
print("x is not 1")
""")
self.make_file("b/__init__.py")
self.make_file("b/b.py", """\
def b(x):
msg = "x is %s" % x
print(msg)
""")
def test_multi(self):
self.make_multi()
cov = coverage.Coverage()
self.start_import_stop(cov, "multi")
cov.annotate()
compare(gold_path("annotate/multi"), ".", "*,cover")
def test_annotate_dir(self):
self.make_multi()
cov = coverage.Coverage(source=["."])
self.start_import_stop(cov, "multi")
cov.annotate(directory="out_anno_dir")
compare(gold_path("annotate/anno_dir"), "out_anno_dir", "*,cover")
def test_encoding(self):
self.make_file("utf8.py", """\
# -*- coding: utf-8 -*-
# This comment has an accent: é
print("spam eggs")
""")
cov = coverage.Coverage()
self.start_import_stop(cov, "utf8")
cov.annotate()
compare(gold_path("annotate/encodings"), ".", "*,cover")
def test_white(self):
self.make_file("white.py", """\
# A test case sent to me by Steve White
def f(self):
if self==1:
pass
elif self.m('fred'):
pass
elif (g==1) and (b==2):
pass
elif self.m('fred')==True:
pass
elif ((g==1) and (b==2))==True:
pass
else:
pass
def g(x):
if x == 1:
a = 1
else:
a = 2
g(1)
def h(x):
if 0: #pragma: no cover
pass
if x == 1:
a = 1
else:
a = 2
h(2)
""")
cov = coverage.Coverage()
self.start_import_stop(cov, "white")
cov.annotate()
compare(gold_path("annotate/annotate"), ".", "*,cover")
|
from test import CollectorTestCase
from test import get_collector_config
from servertechpdu import ServerTechPDUCollector
class TestServerTechPDUCollector(CollectorTestCase):
def setUp(self, allowed_names=None):
if not allowed_names:
allowed_names = []
config = get_collector_config('ServerTechPDUCollector', {
'allowed_names': allowed_names,
'interval': 1
})
self.collector = ServerTechPDUCollector(config, None)
def test_import(self):
self.assertTrue(ServerTechPDUCollector)
|
import os
from datetime import datetime
from dateutil.tz import gettz, tzlocal
from nikola.plugin_categories import Command
class CommandStatus(Command):
"""Display site status."""
name = "status"
doc_purpose = "display site status"
doc_description = "Show information about the posts and site deployment."
doc_usage = '[-d|--list-drafts] [-m|--list-modified] [-p|--list-private] [-P|--list-published] [-s|--list-scheduled]'
logger = None
cmd_options = [
{
'name': 'list_drafts',
'short': 'd',
'long': 'list-drafts',
'type': bool,
'default': False,
'help': 'List all drafts',
},
{
'name': 'list_modified',
'short': 'm',
'long': 'list-modified',
'type': bool,
'default': False,
'help': 'List all modified files since last deployment',
},
{
'name': 'list_private',
'short': 'p',
'long': 'list-private',
'type': bool,
'default': False,
'help': 'List all private posts',
},
{
'name': 'list_published',
'short': 'P',
'long': 'list-published',
'type': bool,
'default': False,
'help': 'List all published posts',
},
{
'name': 'list_scheduled',
'short': 's',
'long': 'list-scheduled',
'type': bool,
'default': False,
'help': 'List all scheduled posts',
},
]
def _execute(self, options, args):
"""Display site status."""
self.site.scan_posts()
last_deploy = self.site.state.get('last_deploy')
if last_deploy is not None:
last_deploy = datetime.strptime(last_deploy, "%Y-%m-%dT%H:%M:%S.%f")
last_deploy_offset = datetime.utcnow() - last_deploy
else:
print("It does not seem like you've ever deployed the site (or cache missing).")
if last_deploy:
fmod_since_deployment = []
for root, dirs, files in os.walk(self.site.config["OUTPUT_FOLDER"], followlinks=True):
if not dirs and not files:
continue
for fname in files:
fpath = os.path.join(root, fname)
fmodtime = datetime.fromtimestamp(os.stat(fpath).st_mtime)
if fmodtime.replace(tzinfo=tzlocal()) > last_deploy.replace(tzinfo=gettz("UTC")).astimezone(tz=tzlocal()):
fmod_since_deployment.append(fpath)
if len(fmod_since_deployment) > 0:
print("{0} output files modified since last deployment {1} ago.".format(str(len(fmod_since_deployment)), self.human_time(last_deploy_offset)))
if options['list_modified']:
for fpath in fmod_since_deployment:
print("Modified: '{0}'".format(fpath))
else:
print("Last deployment {0} ago.".format(self.human_time(last_deploy_offset)))
now = datetime.utcnow().replace(tzinfo=gettz("UTC"))
posts_count = len(self.site.all_posts)
# find all published posts
posts_published = [post for post in self.site.all_posts if post.use_in_feeds]
posts_published = sorted(posts_published, key=lambda post: post.source_path)
# find all private posts
posts_private = [post for post in self.site.all_posts if post.is_private]
posts_private = sorted(posts_private, key=lambda post: post.source_path)
# find all drafts
posts_drafts = [post for post in self.site.all_posts if post.is_draft]
posts_drafts = sorted(posts_drafts, key=lambda post: post.source_path)
# find all scheduled posts with offset from now until publishing time
posts_scheduled = [
(post.date - now, post) for post in self.site.all_posts
if post.publish_later and not (post.is_draft or post.is_private)
]
posts_scheduled = sorted(posts_scheduled, key=lambda offset_post: (offset_post[0], offset_post[1].source_path))
if len(posts_scheduled) > 0:
if options['list_scheduled']:
for offset, post in posts_scheduled:
print("Scheduled: '{1}' ({2}; source: {3}) in {0}".format(self.human_time(offset), post.meta('title'), post.permalink(), post.source_path))
else:
offset, post = posts_scheduled[0]
print("{0} to next scheduled post ('{1}'; {2}; source: {3}).".format(self.human_time(offset), post.meta('title'), post.permalink(), post.source_path))
if options['list_drafts']:
for post in posts_drafts:
print("Draft: '{0}' ({1}; source: {2})".format(post.meta('title'), post.permalink(), post.source_path))
if options['list_private']:
for post in posts_private:
print("Private: '{0}' ({1}; source: {2})".format(post.meta('title'), post.permalink(), post.source_path))
if options['list_published']:
for post in posts_published:
print("Published: '{0}' ({1}; source: {2})".format(post.meta('title'), post.permalink(), post.source_path))
print("{0} posts in total, {1} scheduled, {2} drafts, {3} private and {4} published.".format(posts_count, len(posts_scheduled), len(posts_drafts), len(posts_private), len(posts_published)))
def human_time(self, dt):
"""Translate time into a human-friendly representation."""
days = dt.days
hours = dt.seconds / 60 // 60
minutes = dt.seconds / 60 - (hours * 60)
if days > 0:
return "{0:.0f} days and {1:.0f} hours".format(days, hours)
elif hours > 0:
return "{0:.0f} hours and {1:.0f} minutes".format(hours, minutes)
elif minutes:
return "{0:.0f} minutes".format(minutes)
return False
|
import pytest
from homeassistant.components import sensor
import homeassistant.components.geo_rss_events.sensor as geo_rss_events
from homeassistant.const import (
ATTR_FRIENDLY_NAME,
ATTR_ICON,
ATTR_UNIT_OF_MEASUREMENT,
EVENT_HOMEASSISTANT_START,
)
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from tests.async_mock import MagicMock, patch
from tests.common import assert_setup_component, async_fire_time_changed
URL = "http://geo.rss.local/geo_rss_events.xml"
VALID_CONFIG_WITH_CATEGORIES = {
sensor.DOMAIN: [
{
"platform": "geo_rss_events",
geo_rss_events.CONF_URL: URL,
geo_rss_events.CONF_CATEGORIES: ["Category 1"],
}
]
}
VALID_CONFIG = {
sensor.DOMAIN: [{"platform": "geo_rss_events", geo_rss_events.CONF_URL: URL}]
}
"""Test the GeoRss service updater."""
@pytest.fixture
def mock_feed():
"""Pytest fixture for homeassistant.components.geo_rss_events.sensor.GenericFeed."""
with patch(
"homeassistant.components.geo_rss_events.sensor.GenericFeed"
) as mock_feed:
yield mock_feed
def _generate_mock_feed_entry(
external_id, title, distance_to_home, coordinates, category
):
"""Construct a mock feed entry for testing purposes."""
feed_entry = MagicMock()
feed_entry.external_id = external_id
feed_entry.title = title
feed_entry.distance_to_home = distance_to_home
feed_entry.coordinates = coordinates
feed_entry.category = category
return feed_entry
async def test_setup(hass, mock_feed):
"""Test the general setup of the platform."""
# Set up some mock feed entries for this test.
mock_entry_1 = _generate_mock_feed_entry(
"1234", "Title 1", 15.5, (-31.0, 150.0), "Category 1"
)
mock_entry_2 = _generate_mock_feed_entry(
"2345", "Title 2", 20.5, (-31.1, 150.1), "Category 1"
)
mock_feed.return_value.update.return_value = "OK", [mock_entry_1, mock_entry_2]
utcnow = dt_util.utcnow()
# Patching 'utcnow' to gain more control over the timed update.
with patch("homeassistant.util.dt.utcnow", return_value=utcnow):
with assert_setup_component(1, sensor.DOMAIN):
assert await async_setup_component(hass, sensor.DOMAIN, VALID_CONFIG)
# Artificially trigger update.
hass.bus.fire(EVENT_HOMEASSISTANT_START)
# Collect events.
await hass.async_block_till_done()
all_states = hass.states.async_all()
assert len(all_states) == 1
state = hass.states.get("sensor.event_service_any")
assert state is not None
assert state.name == "Event Service Any"
assert int(state.state) == 2
assert state.attributes == {
ATTR_FRIENDLY_NAME: "Event Service Any",
ATTR_UNIT_OF_MEASUREMENT: "Events",
ATTR_ICON: "mdi:alert",
"Title 1": "16km",
"Title 2": "20km",
}
# Simulate an update - empty data, but successful update,
# so no changes to entities.
mock_feed.return_value.update.return_value = "OK_NO_DATA", None
async_fire_time_changed(hass, utcnow + geo_rss_events.SCAN_INTERVAL)
await hass.async_block_till_done()
all_states = hass.states.async_all()
assert len(all_states) == 1
state = hass.states.get("sensor.event_service_any")
assert int(state.state) == 2
# Simulate an update - empty data, removes all entities
mock_feed.return_value.update.return_value = "ERROR", None
async_fire_time_changed(hass, utcnow + 2 * geo_rss_events.SCAN_INTERVAL)
await hass.async_block_till_done()
all_states = hass.states.async_all()
assert len(all_states) == 1
state = hass.states.get("sensor.event_service_any")
assert int(state.state) == 0
assert state.attributes == {
ATTR_FRIENDLY_NAME: "Event Service Any",
ATTR_UNIT_OF_MEASUREMENT: "Events",
ATTR_ICON: "mdi:alert",
}
async def test_setup_with_categories(hass, mock_feed):
"""Test the general setup of the platform."""
# Set up some mock feed entries for this test.
mock_entry_1 = _generate_mock_feed_entry(
"1234", "Title 1", 15.5, (-31.0, 150.0), "Category 1"
)
mock_entry_2 = _generate_mock_feed_entry(
"2345", "Title 2", 20.5, (-31.1, 150.1), "Category 1"
)
mock_feed.return_value.update.return_value = "OK", [mock_entry_1, mock_entry_2]
with assert_setup_component(1, sensor.DOMAIN):
assert await async_setup_component(
hass, sensor.DOMAIN, VALID_CONFIG_WITH_CATEGORIES
)
# Artificially trigger update.
hass.bus.fire(EVENT_HOMEASSISTANT_START)
# Collect events.
await hass.async_block_till_done()
all_states = hass.states.async_all()
assert len(all_states) == 1
state = hass.states.get("sensor.event_service_category_1")
assert state is not None
assert state.name == "Event Service Category 1"
assert int(state.state) == 2
assert state.attributes == {
ATTR_FRIENDLY_NAME: "Event Service Category 1",
ATTR_UNIT_OF_MEASUREMENT: "Events",
ATTR_ICON: "mdi:alert",
"Title 1": "16km",
"Title 2": "20km",
}
|
from nikola.plugin_categories import Command
class CommandNewPage(Command):
"""Create a new page."""
name = "new_page"
doc_usage = "[options] [path]"
doc_purpose = "create a new page in the site"
cmd_options = [
{
'name': 'title',
'short': 't',
'long': 'title',
'type': str,
'default': '',
'help': 'Title for the page.'
},
{
'name': 'author',
'short': 'a',
'long': 'author',
'type': str,
'default': '',
'help': 'Author of the post.'
},
{
'name': 'onefile',
'short': '1',
'type': bool,
'default': False,
'help': 'Create the page with embedded metadata (single file format)'
},
{
'name': 'twofile',
'short': '2',
'type': bool,
'default': False,
'help': 'Create the page with separate metadata (two file format)'
},
{
'name': 'edit',
'short': 'e',
'type': bool,
'default': False,
'help': 'Open the page (and meta file, if any) in $EDITOR after creation.'
},
{
'name': 'content_format',
'short': 'f',
'long': 'format',
'type': str,
'default': '',
'help': 'Markup format for the page (use --available-formats for list)',
},
{
'name': 'available-formats',
'short': 'F',
'long': 'available-formats',
'type': bool,
'default': False,
'help': 'List all available input formats'
},
{
'name': 'import',
'short': 'i',
'long': 'import',
'type': str,
'default': '',
'help': 'Import an existing file instead of creating a placeholder'
},
]
def _execute(self, options, args):
"""Create a new page."""
# Defaults for some values that don’t apply to pages and the is_page option (duh!)
options['tags'] = ''
options['schedule'] = False
options['is_page'] = True
options['date-path'] = False
# Even though stuff was split into `new_page`, it’s easier to do it
# there not to duplicate the code.
p = self.site.plugin_manager.getPluginByName('new_post', 'Command').plugin_object
return p.execute(options, args)
|
import io
import os
import sys
import unittest
import cherrypy
from cherrypy.test import helper
localDir = os.path.join(os.getcwd(), os.path.dirname(__file__))
def StringIOFromNative(x):
return io.StringIO(str(x))
def setup_server():
@cherrypy.config(foo='this', bar='that')
class Root:
def __init__(self):
cherrypy.config.namespaces['db'] = self.db_namespace
def db_namespace(self, k, v):
if k == 'scheme':
self.db = v
@cherrypy.expose(alias=('global_', 'xyz'))
def index(self, key):
return cherrypy.request.config.get(key, 'None')
@cherrypy.expose
def repr(self, key):
return repr(cherrypy.request.config.get(key, None))
@cherrypy.expose
def dbscheme(self):
return self.db
@cherrypy.expose
@cherrypy.config(**{'request.body.attempt_charsets': ['utf-16']})
def plain(self, x):
return x
favicon_ico = cherrypy.tools.staticfile.handler(
filename=os.path.join(localDir, '../favicon.ico'))
@cherrypy.config(foo='this2', baz='that2')
class Foo:
@cherrypy.expose
def index(self, key):
return cherrypy.request.config.get(key, 'None')
nex = index
@cherrypy.expose
@cherrypy.config(**{'response.headers.X-silly': 'sillyval'})
def silly(self):
return 'Hello world'
# Test the expose and config decorators
@cherrypy.config(foo='this3', **{'bax': 'this4'})
@cherrypy.expose
def bar(self, key):
return repr(cherrypy.request.config.get(key, None))
class Another:
@cherrypy.expose
def index(self, key):
return str(cherrypy.request.config.get(key, 'None'))
def raw_namespace(key, value):
if key == 'input.map':
handler = cherrypy.request.handler
def wrapper():
params = cherrypy.request.params
for name, coercer in value.copy().items():
try:
params[name] = coercer(params[name])
except KeyError:
pass
return handler()
cherrypy.request.handler = wrapper
elif key == 'output':
handler = cherrypy.request.handler
def wrapper():
# 'value' is a type (like int or str).
return value(handler())
cherrypy.request.handler = wrapper
@cherrypy.config(**{'raw.output': repr})
class Raw:
@cherrypy.expose
@cherrypy.config(**{'raw.input.map': {'num': int}})
def incr(self, num):
return num + 1
ioconf = StringIOFromNative("""
[/]
neg: -1234
filename: os.path.join(sys.prefix, "hello.py")
thing1: cherrypy.lib.httputil.response_codes[404]
thing2: __import__('cherrypy.tutorial', globals(), locals(), ['']).thing2
complex: 3+2j
mul: 6*3
ones: "11"
twos: "22"
stradd: %%(ones)s + %%(twos)s + "33"
[/favicon.ico]
tools.staticfile.filename = %r
""" % os.path.join(localDir, 'static/dirback.jpg'))
root = Root()
root.foo = Foo()
root.raw = Raw()
app = cherrypy.tree.mount(root, config=ioconf)
app.request_class.namespaces['raw'] = raw_namespace
cherrypy.tree.mount(Another(), '/another')
cherrypy.config.update({'luxuryyacht': 'throatwobblermangrove',
'db.scheme': r'sqlite///memory',
})
# Client-side code #
class ConfigTests(helper.CPWebCase):
setup_server = staticmethod(setup_server)
def testConfig(self):
tests = [
('/', 'nex', 'None'),
('/', 'foo', 'this'),
('/', 'bar', 'that'),
('/xyz', 'foo', 'this'),
('/foo/', 'foo', 'this2'),
('/foo/', 'bar', 'that'),
('/foo/', 'bax', 'None'),
('/foo/bar', 'baz', "'that2'"),
('/foo/nex', 'baz', 'that2'),
# If 'foo' == 'this', then the mount point '/another' leaks into
# '/'.
('/another/', 'foo', 'None'),
]
for path, key, expected in tests:
self.getPage(path + '?key=' + key)
self.assertBody(expected)
expectedconf = {
# From CP defaults
'tools.log_headers.on': False,
'tools.log_tracebacks.on': True,
'request.show_tracebacks': True,
'log.screen': False,
'environment': 'test_suite',
'engine.autoreload.on': False,
# From global config
'luxuryyacht': 'throatwobblermangrove',
# From Root._cp_config
'bar': 'that',
# From Foo._cp_config
'baz': 'that2',
# From Foo.bar._cp_config
'foo': 'this3',
'bax': 'this4',
}
for key, expected in expectedconf.items():
self.getPage('/foo/bar?key=' + key)
self.assertBody(repr(expected))
def testUnrepr(self):
self.getPage('/repr?key=neg')
self.assertBody('-1234')
self.getPage('/repr?key=filename')
self.assertBody(repr(os.path.join(sys.prefix, 'hello.py')))
self.getPage('/repr?key=thing1')
self.assertBody(repr(cherrypy.lib.httputil.response_codes[404]))
if not getattr(cherrypy.server, 'using_apache', False):
# The object ID's won't match up when using Apache, since the
# server and client are running in different processes.
self.getPage('/repr?key=thing2')
from cherrypy.tutorial import thing2
self.assertBody(repr(thing2))
self.getPage('/repr?key=complex')
self.assertBody('(3+2j)')
self.getPage('/repr?key=mul')
self.assertBody('18')
self.getPage('/repr?key=stradd')
self.assertBody(repr('112233'))
def testRespNamespaces(self):
self.getPage('/foo/silly')
self.assertHeader('X-silly', 'sillyval')
self.assertBody('Hello world')
def testCustomNamespaces(self):
self.getPage('/raw/incr?num=12')
self.assertBody('13')
self.getPage('/dbscheme')
self.assertBody(r'sqlite///memory')
def testHandlerToolConfigOverride(self):
# Assert that config overrides tool constructor args. Above, we set
# the favicon in the page handler to be '../favicon.ico',
# but then overrode it in config to be './static/dirback.jpg'.
self.getPage('/favicon.ico')
self.assertBody(open(os.path.join(localDir, 'static/dirback.jpg'),
'rb').read())
def test_request_body_namespace(self):
self.getPage('/plain', method='POST', headers=[
('Content-Type', 'application/x-www-form-urlencoded'),
('Content-Length', '13')],
body=b'\xff\xfex\x00=\xff\xfea\x00b\x00c\x00')
self.assertBody('abc')
class VariableSubstitutionTests(unittest.TestCase):
setup_server = staticmethod(setup_server)
def test_config(self):
from textwrap import dedent
# variable substitution with [DEFAULT]
conf = dedent("""
[DEFAULT]
dir = "/some/dir"
my.dir = %(dir)s + "/sub"
[my]
my.dir = %(dir)s + "/my/dir"
my.dir2 = %(my.dir)s + '/dir2'
""")
fp = StringIOFromNative(conf)
cherrypy.config.update(fp)
self.assertEqual(cherrypy.config['my']['my.dir'], '/some/dir/my/dir')
self.assertEqual(cherrypy.config['my']
['my.dir2'], '/some/dir/my/dir/dir2')
class CallablesInConfigTest(unittest.TestCase):
setup_server = staticmethod(setup_server)
def test_call_with_literal_dict(self):
from textwrap import dedent
conf = dedent("""
[my]
value = dict(**{'foo': 'bar'})
""")
fp = StringIOFromNative(conf)
cherrypy.config.update(fp)
self.assertEqual(cherrypy.config['my']['value'], {'foo': 'bar'})
def test_call_with_kwargs(self):
from textwrap import dedent
conf = dedent("""
[my]
value = dict(foo="buzz", **cherrypy._test_dict)
""")
test_dict = {
'foo': 'bar',
'bar': 'foo',
'fizz': 'buzz'
}
cherrypy._test_dict = test_dict
fp = StringIOFromNative(conf)
cherrypy.config.update(fp)
test_dict['foo'] = 'buzz'
self.assertEqual(cherrypy.config['my']['value']['foo'], 'buzz')
self.assertEqual(cherrypy.config['my']['value'], test_dict)
del cherrypy._test_dict
|
import time
from datetime import datetime as dt
import pytest
from mock import patch, MagicMock
from pandas.util.testing import assert_frame_equal
from pymongo.errors import OperationFailure
from arctic.arctic import Arctic, VERSION_STORE
from arctic.exceptions import LibraryNotFoundException, QuotaExceededException
from ..util import get_large_ts
def test_connect_to_Arctic_string(mongo_host):
arctic = Arctic(mongo_host=mongo_host)
assert arctic.list_libraries() == []
assert arctic.mongo_host == mongo_host
def test_connect_to_Arctic_connection(mongo_server, mongo_host):
arctic = Arctic(mongo_server.api)
assert arctic.list_libraries() == []
assert arctic.mongo_host == mongo_host
def test_reset_Arctic(mongo_host, library_name):
arctic = Arctic(mongo_host=mongo_host)
arctic.list_libraries()
arctic.initialize_library(library_name, VERSION_STORE)
c = arctic._conn
assert arctic[library_name]._arctic_lib._curr_conn is c
arctic.reset()
assert c is not arctic._conn
assert len(c.nodes) == 0
assert arctic[library_name]._arctic_lib._curr_conn is arctic._conn
def test_re_authenticate_on_arctic_reset(mongo_host, library_name):
from collections import namedtuple
Cred = namedtuple('Cred', 'user, password')
with patch('arctic.arctic.authenticate') as auth_mock, \
patch('arctic.arctic.get_auth') as get_auth_mock:
auth_mock.return_value = True
get_auth_mock.return_value = Cred(user='a_username', password='a_passwd')
arctic = Arctic(mongo_host=mongo_host)
arctic.initialize_library(library_name, VERSION_STORE)
vstore = arctic[library_name]
vstore.list_symbols()
auth_mock.reset_mock()
arctic.reset()
assert auth_mock.call_count > 0
auth_mock.reset_mock()
vstore.list_symbols()
assert auth_mock.call_count == 0
def test_simple(library):
sym = 'symbol'
data = get_large_ts(100)
library.write(sym, data)
orig = dt.now()
time.sleep(1) # Move the timestamp on 1ms
data2 = get_large_ts(100)
library.write(sym, data2, prune_previous_version=False)
# Get the timeseries, it should be the same
read2 = library.read(sym).data
assert_frame_equal(read2, data2)
# Ensure we can get the previous version
read = library.read(sym, as_of=orig).data
assert_frame_equal(read, data)
def test_indexes(arctic):
c = arctic._conn
arctic.initialize_library("library", VERSION_STORE, segment='month')
chunk = c.arctic.library.index_information()
index_version = chunk['_id_']['v'] # Mongo 3.2 has index v1, 3.4 and 3.5 have v2 (3.4 can run in compabitility mode with v1)
assert chunk == {u'_id_': {u'key': [(u'_id', 1)], u'ns': u'arctic.library', u'v': index_version},
u'symbol_1_parent_1_segment_1': {u'background': True,
u'key': [(u'symbol', 1),
(u'parent', 1),
(u'segment', 1)],
u'ns': u'arctic.library',
u'unique': True,
u'v': index_version},
u'symbol_1_sha_1': {u'background': True,
u'key': [(u'symbol', 1), (u'sha', 1)],
u'ns': u'arctic.library',
u'unique': True,
u'v': index_version},
u'symbol_hashed': {u'background': True,
u'key': [(u'symbol', u'hashed')],
u'ns': u'arctic.library',
u'v': index_version},
u'symbol_1_sha_1_segment_1': {u'background': True,
u'key': [(u'symbol', 1), (u'sha', 1), (u'segment', 1)],
u'ns': u'arctic.library',
u'unique': True,
u'v': index_version}}
snapshots = c.arctic.library.snapshots.index_information()
assert snapshots == {u'_id_': {u'key': [(u'_id', 1)],
u'ns': u'arctic.library.snapshots',
u'v': index_version},
u'name_1': {u'background': True,
u'key': [(u'name', 1)],
u'ns': u'arctic.library.snapshots',
u'unique': True,
u'v': index_version}}
versions = c.arctic.library.versions.index_information()
assert versions == {u'_id_': {u'key': [(u'_id', 1)],
u'ns': u'arctic.library.versions',
u'v': index_version},
u'symbol_1__id_-1': {u'background': True,
u'key': [(u'symbol', 1), (u'_id', -1)],
u'ns': u'arctic.library.versions',
u'v': index_version},
u'symbol_1_version_-1': {u'background': True,
u'key': [(u'symbol', 1), (u'version', -1)],
u'ns': u'arctic.library.versions',
u'unique': True,
u'v': index_version}}
version_nums = c.arctic.library.version_nums.index_information()
assert version_nums == {u'_id_': {u'key': [(u'_id', 1)],
u'ns': u'arctic.library.version_nums',
u'v': index_version},
u'symbol_1': {u'background': True,
u'key': [(u'symbol', 1)],
u'ns': u'arctic.library.version_nums',
u'unique': True,
u'v': index_version}}
def test_delete_library(arctic, library, library_name):
mongo = arctic._conn
# create a library2 library too - ensure that this isn't deleted
arctic.initialize_library('user.library2', VERSION_STORE, segment='month')
library.write('asdf', get_large_ts(1))
assert 'TEST' in mongo.arctic_test.list_collection_names()
assert 'TEST.versions' in mongo.arctic_test.list_collection_names()
assert 'library2' in mongo.arctic_user.list_collection_names()
assert 'library2.versions' in mongo.arctic_user.list_collection_names()
arctic.delete_library(library_name)
assert 'TEST' not in mongo.arctic_user.list_collection_names()
assert 'TEST.versions' not in mongo.arctic_user.list_collection_names()
with pytest.raises(LibraryNotFoundException):
arctic[library_name]
with pytest.raises(LibraryNotFoundException):
arctic['arctic_{}'.format(library_name)]
assert 'library2' in mongo.arctic_user.list_collection_names()
assert 'library2.versions' in mongo.arctic_user.list_collection_names()
def test_quota(arctic, library, library_name):
thing = list(range(100))
library._arctic_lib.set_quota(10)
assert arctic.get_quota(library_name) == 10
assert library._arctic_lib.get_quota() == 10
library.write('thing', thing)
with pytest.raises(QuotaExceededException):
library.write('ts', thing)
library.write('ts', thing)
library.write('ts', thing)
library.write('ts', thing)
with pytest.raises(QuotaExceededException):
arctic.check_quota(library_name)
def test_check_quota(arctic, library, library_name):
with patch('arctic.arctic.logger.info') as info:
arctic.check_quota(library_name)
assert info.call_count == 1
def test_default_mongo_retry_timout():
now = time.time()
with pytest.raises(LibraryNotFoundException):
Arctic('unresolved-host', serverSelectionTimeoutMS=0)['some.lib']
assert time.time() - now < 1.
def test_lib_rename(arctic):
arctic.initialize_library('test')
l = arctic['test']
l.write('test_data', 'abc')
arctic.rename_library('test', 'new_name')
l = arctic['new_name']
assert(l.read('test_data').data == 'abc')
with pytest.raises(LibraryNotFoundException) as e:
l = arctic['test']
assert('Library test' in str(e.value))
assert('test' not in arctic.list_libraries())
def test_lib_rename_namespace(arctic):
arctic.initialize_library('namespace.test')
l = arctic['namespace.test']
l.write('test_data', 'abc')
with pytest.raises(ValueError) as e:
arctic.rename_library('namespace.test', 'new_namespace.test')
assert('Collection can only be renamed in the same database' in str(e.value))
arctic.rename_library('namespace.test', 'namespace.newlib')
l = arctic['namespace.newlib']
assert(l.read('test_data').data == 'abc')
with pytest.raises(LibraryNotFoundException) as e:
l = arctic['namespace.test']
assert('Library namespace.test' in str(e.value))
assert('namespace.test' not in arctic.list_libraries())
def test_lib_type(arctic):
arctic.initialize_library('test')
assert(arctic.get_library_type('test') == VERSION_STORE)
def test_library_exists(arctic):
arctic.initialize_library('test')
assert arctic.library_exists('test')
assert not arctic.library_exists('nonexistentlib')
def test_library_exists_no_auth(arctic):
arctic.initialize_library('test')
with patch('arctic.arctic.ArcticLibraryBinding') as AB:
AB.return_value = MagicMock(
get_library_type=MagicMock(side_effect=OperationFailure("not authorized on arctic to execute command")))
assert arctic.library_exists('test')
assert AB.return_value.get_library_type.called
assert not arctic.library_exists('nonexistentlib')
def test_list_libraries_cached(arctic):
# default in arctic is to cache list_libraries.
libs = ['test1', 'test2']
for lib in libs:
arctic.initialize_library(lib)
# Cached data should have been appended to cache.
assert sorted(libs) == sorted(arctic.list_libraries()) == sorted(arctic._list_libraries())
# Should default to uncached list_libraries if cache is empty.
with patch('arctic.arctic.Arctic._list_libraries', return_value=libs) as uncached_list_libraries:
# Empty cache manually.
arctic._conn.meta_db.cache.remove({})
assert arctic._list_libraries_cached() == libs
uncached_list_libraries.assert_called()
# Reload cache and check that it has data
arctic.reload_cache()
assert sorted(arctic._cache.get('list_libraries')) == sorted(libs)
# Should fetch it from cache the second time.
with patch('arctic.arctic.Arctic._list_libraries', return_value=libs) as uncached_list_libraries:
assert sorted(arctic._list_libraries_cached()) == sorted(libs)
uncached_list_libraries.assert_not_called()
def test_initialize_library_adds_to_cache(arctic):
libs = ['test1', 'test2']
for lib in libs:
arctic.initialize_library(lib)
arctic.reload_cache()
assert arctic._list_libraries_cached() == arctic._list_libraries()
# Add another lib
arctic.initialize_library('test3')
assert sorted(arctic._cache.get('list_libraries')) == ['test1', 'test2', 'test3']
def test_cache_does_not_return_stale_data(arctic):
libs = ['test1', 'test2']
for lib in libs:
arctic.initialize_library(lib)
arctic.reload_cache()
assert arctic._list_libraries_cached() == arctic._list_libraries()
time.sleep(0.2)
# Should call uncached list_libraries if the data is stale according to caller.
with patch('arctic.arctic.Arctic._list_libraries', return_value=libs) as uncached_list_libraries:
assert arctic._list_libraries_cached(newer_than_secs=0.1) == libs
uncached_list_libraries.assert_called()
def test_renaming_returns_new_name_in_cache(arctic):
libs = ['test1', 'test2']
for lib in libs:
arctic.initialize_library(lib)
assert sorted(arctic._list_libraries_cached()) == sorted(arctic._list_libraries())
arctic.rename_library('test1', 'test3')
assert sorted(arctic._list_libraries_cached()) == sorted(['test2', 'test3'])
def test_deleting_library_removes_it_from_cache(arctic):
libs = ['test1', 'test2']
for lib in libs:
arctic.initialize_library(lib)
arctic.delete_library('test1')
assert arctic._list_libraries_cached() == arctic._list_libraries() == arctic.list_libraries() == ['test2']
def test_disable_cache_by_settings(arctic):
lib = 'test1'
arctic.initialize_library(lib)
# Should be enabled by default
assert arctic._list_libraries_cached() == arctic._list_libraries()
arctic._cache.set_caching_state(enabled=False)
# Should not return cached results now.
with patch('arctic.arctic.Arctic._list_libraries', return_value=[lib]) as uncached_list_libraries:
with patch('arctic.arctic.Arctic._list_libraries_cached', return_value=[lib]) as cached_list_libraries:
arctic.list_libraries()
uncached_list_libraries.assert_called()
cached_list_libraries.assert_not_called()
arctic._cache.set_caching_state(enabled=True)
# Should used cached data again.
with patch('arctic.arctic.Arctic._list_libraries', return_value=[lib]) as uncached_list_libraries_e:
with patch('arctic.arctic.Arctic._list_libraries_cached', return_value=[lib]) as cached_list_libraries_e:
arctic.list_libraries()
uncached_list_libraries_e.assert_not_called()
cached_list_libraries_e.assert_called()
|
import numpy as np
import pandas as pd
from hypertools.tools import format_data
from hypertools.plot.plot import plot
def test_np_array():
data = np.random.rand(100,10)
assert isinstance(format_data(data), list)
assert isinstance(format_data(data)[0], np.ndarray)
def test_df():
data = pd.DataFrame(np.random.rand(100,10))
assert isinstance(format_data(data), list)
assert isinstance(format_data(data)[0], np.ndarray)
def test_text():
data = ['here is some test text', 'and a little more', 'and more']
assert isinstance(format_data(data), list)
assert isinstance(format_data(data)[0], np.ndarray)
def test_str():
res = format_data('here is some test text')
assert isinstance(res, list)
assert isinstance(res[0], np.ndarray)
def test_mixed_list():
mat = np.random.rand(3,20)
df = pd.DataFrame(np.random.rand(3,20))
text = ['here is some test text', 'and a little more', 'and more']
string = 'a string'
res = format_data([mat, df, text, string])
assert isinstance(res, list)
assert all(map(lambda x: isinstance(x, np.ndarray), res))
def test_geo():
geo = plot(np.random.rand(100,10), show=False)
assert isinstance(format_data(geo), list)
assert isinstance(format_data(geo)[0], np.ndarray)
def test_missing_data():
data = np.random.rand(100,10)
data[0][0]=np.nan
geo = plot(data, show=False)
assert isinstance(format_data(geo), list)
assert isinstance(format_data(geo)[0], np.ndarray)
def test_force_align():
mat = np.random.rand(4,3)
df = pd.DataFrame(np.random.rand(4,3))
text = ['here is some test text', 'and a little more', 'and more', 'just a bit more']
res = format_data([mat, df, text])
assert isinstance(res, list)
assert all(map(lambda x: isinstance(x, np.ndarray), res))
assert all(map(lambda x: x.shape[1]==100, res))
|
from qutebrowser.api import cmdutils, apitypes
@cmdutils.register(modes=[cmdutils.KeyMode.caret])
@cmdutils.argument('tab', value=cmdutils.Value.cur_tab)
@cmdutils.argument('count', value=cmdutils.Value.count)
def move_to_next_line(tab: apitypes.Tab, count: int = 1) -> None:
"""Move the cursor or selection to the next line.
Args:
count: How many lines to move.
"""
tab.caret.move_to_next_line(count)
@cmdutils.register(modes=[cmdutils.KeyMode.caret])
@cmdutils.argument('tab', value=cmdutils.Value.cur_tab)
@cmdutils.argument('count', value=cmdutils.Value.count)
def move_to_prev_line(tab: apitypes.Tab, count: int = 1) -> None:
"""Move the cursor or selection to the prev line.
Args:
count: How many lines to move.
"""
tab.caret.move_to_prev_line(count)
@cmdutils.register(modes=[cmdutils.KeyMode.caret])
@cmdutils.argument('tab', value=cmdutils.Value.cur_tab)
@cmdutils.argument('count', value=cmdutils.Value.count)
def move_to_next_char(tab: apitypes.Tab, count: int = 1) -> None:
"""Move the cursor or selection to the next char.
Args:
count: How many lines to move.
"""
tab.caret.move_to_next_char(count)
@cmdutils.register(modes=[cmdutils.KeyMode.caret])
@cmdutils.argument('tab', value=cmdutils.Value.cur_tab)
@cmdutils.argument('count', value=cmdutils.Value.count)
def move_to_prev_char(tab: apitypes.Tab, count: int = 1) -> None:
"""Move the cursor or selection to the previous char.
Args:
count: How many chars to move.
"""
tab.caret.move_to_prev_char(count)
@cmdutils.register(modes=[cmdutils.KeyMode.caret])
@cmdutils.argument('tab', value=cmdutils.Value.cur_tab)
@cmdutils.argument('count', value=cmdutils.Value.count)
def move_to_end_of_word(tab: apitypes.Tab, count: int = 1) -> None:
"""Move the cursor or selection to the end of the word.
Args:
count: How many words to move.
"""
tab.caret.move_to_end_of_word(count)
@cmdutils.register(modes=[cmdutils.KeyMode.caret])
@cmdutils.argument('tab', value=cmdutils.Value.cur_tab)
@cmdutils.argument('count', value=cmdutils.Value.count)
def move_to_next_word(tab: apitypes.Tab, count: int = 1) -> None:
"""Move the cursor or selection to the next word.
Args:
count: How many words to move.
"""
tab.caret.move_to_next_word(count)
@cmdutils.register(modes=[cmdutils.KeyMode.caret])
@cmdutils.argument('tab', value=cmdutils.Value.cur_tab)
@cmdutils.argument('count', value=cmdutils.Value.count)
def move_to_prev_word(tab: apitypes.Tab, count: int = 1) -> None:
"""Move the cursor or selection to the previous word.
Args:
count: How many words to move.
"""
tab.caret.move_to_prev_word(count)
@cmdutils.register(modes=[cmdutils.KeyMode.caret])
@cmdutils.argument('tab', value=cmdutils.Value.cur_tab)
def move_to_start_of_line(tab: apitypes.Tab) -> None:
"""Move the cursor or selection to the start of the line."""
tab.caret.move_to_start_of_line()
@cmdutils.register(modes=[cmdutils.KeyMode.caret])
@cmdutils.argument('tab', value=cmdutils.Value.cur_tab)
def move_to_end_of_line(tab: apitypes.Tab) -> None:
"""Move the cursor or selection to the end of line."""
tab.caret.move_to_end_of_line()
@cmdutils.register(modes=[cmdutils.KeyMode.caret])
@cmdutils.argument('tab', value=cmdutils.Value.cur_tab)
@cmdutils.argument('count', value=cmdutils.Value.count)
def move_to_start_of_next_block(tab: apitypes.Tab, count: int = 1) -> None:
"""Move the cursor or selection to the start of next block.
Args:
count: How many blocks to move.
"""
tab.caret.move_to_start_of_next_block(count)
@cmdutils.register(modes=[cmdutils.KeyMode.caret])
@cmdutils.argument('tab', value=cmdutils.Value.cur_tab)
@cmdutils.argument('count', value=cmdutils.Value.count)
def move_to_start_of_prev_block(tab: apitypes.Tab, count: int = 1) -> None:
"""Move the cursor or selection to the start of previous block.
Args:
count: How many blocks to move.
"""
tab.caret.move_to_start_of_prev_block(count)
@cmdutils.register(modes=[cmdutils.KeyMode.caret])
@cmdutils.argument('tab', value=cmdutils.Value.cur_tab)
@cmdutils.argument('count', value=cmdutils.Value.count)
def move_to_end_of_next_block(tab: apitypes.Tab, count: int = 1) -> None:
"""Move the cursor or selection to the end of next block.
Args:
count: How many blocks to move.
"""
tab.caret.move_to_end_of_next_block(count)
@cmdutils.register(modes=[cmdutils.KeyMode.caret])
@cmdutils.argument('tab', value=cmdutils.Value.cur_tab)
@cmdutils.argument('count', value=cmdutils.Value.count)
def move_to_end_of_prev_block(tab: apitypes.Tab, count: int = 1) -> None:
"""Move the cursor or selection to the end of previous block.
Args:
count: How many blocks to move.
"""
tab.caret.move_to_end_of_prev_block(count)
@cmdutils.register(modes=[cmdutils.KeyMode.caret])
@cmdutils.argument('tab', value=cmdutils.Value.cur_tab)
def move_to_start_of_document(tab: apitypes.Tab) -> None:
"""Move the cursor or selection to the start of the document."""
tab.caret.move_to_start_of_document()
@cmdutils.register(modes=[cmdutils.KeyMode.caret])
@cmdutils.argument('tab', value=cmdutils.Value.cur_tab)
def move_to_end_of_document(tab: apitypes.Tab) -> None:
"""Move the cursor or selection to the end of the document."""
tab.caret.move_to_end_of_document()
@cmdutils.register(modes=[cmdutils.KeyMode.caret])
@cmdutils.argument('tab', value=cmdutils.Value.cur_tab)
def toggle_selection(tab: apitypes.Tab, line: bool = False) -> None:
"""Toggle caret selection mode.
Args:
line: Enables line-selection.
"""
tab.caret.toggle_selection(line)
@cmdutils.register(modes=[cmdutils.KeyMode.caret])
@cmdutils.argument('tab', value=cmdutils.Value.cur_tab)
def drop_selection(tab: apitypes.Tab) -> None:
"""Drop selection and keep selection mode enabled."""
tab.caret.drop_selection()
@cmdutils.register()
@cmdutils.argument('tab_obj', value=cmdutils.Value.cur_tab)
def follow_selected(tab_obj: apitypes.Tab, *, tab: bool = False) -> None:
"""Follow the selected text.
Args:
tab: Load the selected link in a new tab.
"""
try:
tab_obj.caret.follow_selected(tab=tab)
except apitypes.WebTabError as e:
raise cmdutils.CommandError(str(e))
@cmdutils.register(modes=[cmdutils.KeyMode.caret])
@cmdutils.argument('tab', value=cmdutils.Value.cur_tab)
def reverse_selection(tab: apitypes.Tab) -> None:
"""Swap the stationary and moving end of the current selection."""
tab.caret.reverse_selection()
|
import logging
import pytest
class LogFailHandler(logging.Handler):
"""A logging handler which makes tests fail on unexpected messages."""
def __init__(self, level=logging.NOTSET, min_level=logging.WARNING):
self._min_level = min_level
super().__init__(level)
def emit(self, record):
logger = logging.getLogger(record.name)
root_logger = logging.getLogger()
if logger.name == 'messagemock':
return
if (logger.level == record.levelno or
root_logger.level == record.levelno):
# caplog.at_level(...) was used with the level of this message,
# i.e. it was expected.
return
if record.levelno < self._min_level:
return
pytest.fail("Got logging message on logger {} with level {}: "
"{}!".format(record.name, record.levelname,
record.getMessage()))
@pytest.fixture(scope='session', autouse=True)
def fail_on_logging():
handler = LogFailHandler()
logging.getLogger().addHandler(handler)
yield
logging.getLogger().removeHandler(handler)
handler.close()
|
import unittest
import lightgbm as lgb
import pandas as pd
from common import gpu_test
class TestLightgbm(unittest.TestCase):
# Based on the "simple_example" from their documentation:
# https://github.com/Microsoft/LightGBM/blob/master/examples/python-guide/simple_example.py
def test_cpu(self):
lgb_train, lgb_eval = self.load_datasets()
params = {
'task': 'train',
'boosting_type': 'gbdt',
'objective': 'regression',
'metric': {'l2', 'auc'},
'num_leaves': 31,
'learning_rate': 0.05,
'feature_fraction': 0.9,
'bagging_fraction': 0.8,
'bagging_freq': 5,
'verbose': 0
}
# Run only one round for faster test
gbm = lgb.train(params,
lgb_train,
num_boost_round=1,
valid_sets=lgb_eval,
early_stopping_rounds=1)
self.assertEqual(1, gbm.best_iteration)
@gpu_test
def test_gpu(self):
lgb_train, lgb_eval = self.load_datasets()
params = {
'boosting_type': 'gbdt',
'objective': 'regression',
'metric': 'auc',
'num_leaves': 31,
'learning_rate': 0.05,
'feature_fraction': 0.9,
'bagging_fraction': 0.8,
'bagging_freq': 5,
'verbose': 1,
'device': 'gpu'
}
# Run only one round for faster test
gbm = lgb.train(params,
lgb_train,
num_boost_round=1,
valid_sets=lgb_eval,
early_stopping_rounds=1)
self.assertEqual(1, gbm.best_iteration)
def load_datasets(self):
df_train = pd.read_csv('/input/tests/data/lgb_train.csv', header=None, sep='\t')
df_test = pd.read_csv('/input/tests/data/lgb_test.csv', header=None, sep='\t')
y_train = df_train[0]
y_test = df_test[0]
X_train = df_train.drop(0, axis=1)
X_test = df_test.drop(0, axis=1)
lgb_train = lgb.Dataset(X_train, y_train)
lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train)
return (lgb_train, lgb_eval)
|
import io
import os
import pytest
from nikola import __main__
from .helper import cd
from .test_demo_build import prepare_demo_site
from .test_empty_build import ( # NOQA
test_avoid_double_slash_in_rss,
test_index_in_sitemap,
)
def test_check_links_fail(build, output_dir, target_dir):
os.unlink(os.path.join(output_dir, "archive.html"))
with cd(target_dir):
result = __main__.main(["check", "-l"])
assert result != 0
def test_check_files_fail(build, output_dir, target_dir):
manually_added_file = os.path.join(output_dir, "foobar")
with io.open(manually_added_file, "w+", encoding="utf8") as outf:
outf.write("foo")
with cd(target_dir):
result = __main__.main(["check", "-f"])
assert result != 0
@pytest.fixture(scope="module")
def build(target_dir):
"""Fill the site with demo content and build it."""
prepare_demo_site(target_dir)
with cd(target_dir):
__main__.main(["build"])
|
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from entropy import EntropyStatCollector
##########################################################################
class TestEntropyStatCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('EntropyStatCollector', {
})
self.collector = EntropyStatCollector(config, None)
def test_import(self):
self.assertTrue(EntropyStatCollector)
##########################################################################
if __name__ == "__main__":
unittest.main()
|
from datetime import timedelta
from homeassistant.components.canary.const import DOMAIN, MANUFACTURER
from homeassistant.components.canary.sensor import (
ATTR_AIR_QUALITY,
STATE_AIR_QUALITY_ABNORMAL,
STATE_AIR_QUALITY_NORMAL,
STATE_AIR_QUALITY_VERY_ABNORMAL,
)
from homeassistant.const import (
ATTR_UNIT_OF_MEASUREMENT,
DEVICE_CLASS_BATTERY,
DEVICE_CLASS_HUMIDITY,
DEVICE_CLASS_SIGNAL_STRENGTH,
DEVICE_CLASS_TEMPERATURE,
PERCENTAGE,
SIGNAL_STRENGTH_DECIBELS_MILLIWATT,
TEMP_CELSIUS,
)
from homeassistant.setup import async_setup_component
from homeassistant.util.dt import utcnow
from . import mock_device, mock_location, mock_reading
from tests.async_mock import patch
from tests.common import async_fire_time_changed, mock_device_registry, mock_registry
async def test_sensors_pro(hass, canary) -> None:
"""Test the creation and values of the sensors for Canary Pro."""
await async_setup_component(hass, "persistent_notification", {})
registry = mock_registry(hass)
device_registry = mock_device_registry(hass)
online_device_at_home = mock_device(20, "Dining Room", True, "Canary Pro")
instance = canary.return_value
instance.get_locations.return_value = [
mock_location(100, "Home", True, devices=[online_device_at_home]),
]
instance.get_latest_readings.return_value = [
mock_reading("temperature", "21.12"),
mock_reading("humidity", "50.46"),
mock_reading("air_quality", "0.59"),
]
config = {DOMAIN: {"username": "test-username", "password": "test-password"}}
with patch("homeassistant.components.canary.PLATFORMS", ["sensor"]):
assert await async_setup_component(hass, DOMAIN, config)
await hass.async_block_till_done()
sensors = {
"home_dining_room_temperature": (
"20_temperature",
"21.12",
TEMP_CELSIUS,
DEVICE_CLASS_TEMPERATURE,
None,
),
"home_dining_room_humidity": (
"20_humidity",
"50.46",
PERCENTAGE,
DEVICE_CLASS_HUMIDITY,
None,
),
"home_dining_room_air_quality": (
"20_air_quality",
"0.59",
None,
None,
"mdi:weather-windy",
),
}
for (sensor_id, data) in sensors.items():
entity_entry = registry.async_get(f"sensor.{sensor_id}")
assert entity_entry
assert entity_entry.device_class == data[3]
assert entity_entry.unique_id == data[0]
assert entity_entry.original_icon == data[4]
state = hass.states.get(f"sensor.{sensor_id}")
assert state
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == data[2]
assert state.state == data[1]
device = device_registry.async_get_device({(DOMAIN, "20")}, set())
assert device
assert device.manufacturer == MANUFACTURER
assert device.name == "Dining Room"
assert device.model == "Canary Pro"
async def test_sensors_attributes_pro(hass, canary) -> None:
"""Test the creation and values of the sensors attributes for Canary Pro."""
await async_setup_component(hass, "persistent_notification", {})
online_device_at_home = mock_device(20, "Dining Room", True, "Canary Pro")
instance = canary.return_value
instance.get_locations.return_value = [
mock_location(100, "Home", True, devices=[online_device_at_home]),
]
instance.get_latest_readings.return_value = [
mock_reading("temperature", "21.12"),
mock_reading("humidity", "50.46"),
mock_reading("air_quality", "0.59"),
]
config = {DOMAIN: {"username": "test-username", "password": "test-password"}}
with patch("homeassistant.components.canary.PLATFORMS", ["sensor"]):
assert await async_setup_component(hass, DOMAIN, config)
await hass.async_block_till_done()
entity_id = "sensor.home_dining_room_air_quality"
state = hass.states.get(entity_id)
assert state
assert state.attributes[ATTR_AIR_QUALITY] == STATE_AIR_QUALITY_ABNORMAL
instance.get_latest_readings.return_value = [
mock_reading("temperature", "21.12"),
mock_reading("humidity", "50.46"),
mock_reading("air_quality", "0.4"),
]
future = utcnow() + timedelta(seconds=30)
async_fire_time_changed(hass, future)
await hass.helpers.entity_component.async_update_entity(entity_id)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state
assert state.attributes[ATTR_AIR_QUALITY] == STATE_AIR_QUALITY_VERY_ABNORMAL
instance.get_latest_readings.return_value = [
mock_reading("temperature", "21.12"),
mock_reading("humidity", "50.46"),
mock_reading("air_quality", "1.0"),
]
future += timedelta(seconds=30)
async_fire_time_changed(hass, future)
await hass.helpers.entity_component.async_update_entity(entity_id)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state
assert state.attributes[ATTR_AIR_QUALITY] == STATE_AIR_QUALITY_NORMAL
async def test_sensors_flex(hass, canary) -> None:
"""Test the creation and values of the sensors for Canary Flex."""
await async_setup_component(hass, "persistent_notification", {})
registry = mock_registry(hass)
device_registry = mock_device_registry(hass)
online_device_at_home = mock_device(20, "Dining Room", True, "Canary Flex")
instance = canary.return_value
instance.get_locations.return_value = [
mock_location(100, "Home", True, devices=[online_device_at_home]),
]
instance.get_latest_readings.return_value = [
mock_reading("battery", "70.4567"),
mock_reading("wifi", "-57"),
]
config = {DOMAIN: {"username": "test-username", "password": "test-password"}}
with patch("homeassistant.components.canary.PLATFORMS", ["sensor"]):
assert await async_setup_component(hass, DOMAIN, config)
await hass.async_block_till_done()
sensors = {
"home_dining_room_battery": (
"20_battery",
"70.46",
PERCENTAGE,
DEVICE_CLASS_BATTERY,
None,
),
"home_dining_room_wifi": (
"20_wifi",
"-57.0",
SIGNAL_STRENGTH_DECIBELS_MILLIWATT,
DEVICE_CLASS_SIGNAL_STRENGTH,
None,
),
}
for (sensor_id, data) in sensors.items():
entity_entry = registry.async_get(f"sensor.{sensor_id}")
assert entity_entry
assert entity_entry.device_class == data[3]
assert entity_entry.unique_id == data[0]
assert entity_entry.original_icon == data[4]
state = hass.states.get(f"sensor.{sensor_id}")
assert state
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == data[2]
assert state.state == data[1]
device = device_registry.async_get_device({(DOMAIN, "20")}, set())
assert device
assert device.manufacturer == MANUFACTURER
assert device.name == "Dining Room"
assert device.model == "Canary Flex"
|
try:
import json
except ImportError:
import simplejson as json
import urllib2
import diamond.collector
class PhpFpmCollector(diamond.collector.Collector):
def get_default_config_help(self):
config_help = super(PhpFpmCollector, self).get_default_config_help()
config_help.update({
'uri': 'Path part of the URL, with or without the leading /',
'host': 'Host part of the URL',
'port': 'Port part of the URL',
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(PhpFpmCollector, self).get_default_config()
config.update({
'host': 'localhost',
'port': 80,
'uri': 'fpm-status',
'byte_unit': ['byte'],
'path': 'phpfpm',
})
return config
def collect(self):
#
# if there is a / in front remove it
if self.config['uri'][0] == '/':
self.config['uri'] = self.config['uri'][1:]
try:
response = urllib2.urlopen("http://%s:%s/%s?json" % (
self.config['host'], int(self.config['port']),
self.config['uri']))
except Exception as e:
self.log.error('Couldnt connect to php-fpm status page: %s', e)
return {}
try:
j = json.loads(response.read())
except Exception as e:
self.log.error('Couldnt parse json: %s', e)
return {}
valid_metrics = [
'accepted_conn',
'listen_queue',
'max_listen_queue',
'listen_queue_len',
'idle_processes',
'active_processes',
'total_processes',
'max_active_processes',
'max_children_reached',
'slow_requests'
]
for k, v in j.items():
#
# php-fpm has spaces in the keys so lets replace all spaces with _
k = k.replace(" ", "_")
if k in valid_metrics:
self.publish(k, v)
|
import asyncio
import logging
from typing import Any, Dict, Iterable, Optional
from homeassistant.components.cover import (
ATTR_CURRENT_POSITION,
ATTR_CURRENT_TILT_POSITION,
ATTR_POSITION,
ATTR_TILT_POSITION,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
SERVICE_CLOSE_COVER,
SERVICE_CLOSE_COVER_TILT,
SERVICE_OPEN_COVER,
SERVICE_OPEN_COVER_TILT,
SERVICE_SET_COVER_POSITION,
SERVICE_SET_COVER_TILT_POSITION,
STATE_CLOSED,
STATE_CLOSING,
STATE_OPEN,
STATE_OPENING,
)
from homeassistant.core import Context, State
from homeassistant.helpers.typing import HomeAssistantType
from . import DOMAIN
_LOGGER = logging.getLogger(__name__)
VALID_STATES = {STATE_CLOSED, STATE_CLOSING, STATE_OPEN, STATE_OPENING}
async def _async_reproduce_state(
hass: HomeAssistantType,
state: State,
*,
context: Optional[Context] = None,
reproduce_options: Optional[Dict[str, Any]] = None,
) -> None:
"""Reproduce a single state."""
cur_state = hass.states.get(state.entity_id)
if cur_state is None:
_LOGGER.warning("Unable to find entity %s", state.entity_id)
return
if state.state not in VALID_STATES:
_LOGGER.warning(
"Invalid state specified for %s: %s", state.entity_id, state.state
)
return
# Return if we are already at the right state.
if (
cur_state.state == state.state
and cur_state.attributes.get(ATTR_CURRENT_POSITION)
== state.attributes.get(ATTR_CURRENT_POSITION)
and cur_state.attributes.get(ATTR_CURRENT_TILT_POSITION)
== state.attributes.get(ATTR_CURRENT_TILT_POSITION)
):
return
service_data = {ATTR_ENTITY_ID: state.entity_id}
service_data_tilting = {ATTR_ENTITY_ID: state.entity_id}
if not (
cur_state.state == state.state
and cur_state.attributes.get(ATTR_CURRENT_POSITION)
== state.attributes.get(ATTR_CURRENT_POSITION)
):
# Open/Close
if state.state in [STATE_CLOSED, STATE_CLOSING]:
service = SERVICE_CLOSE_COVER
elif state.state in [STATE_OPEN, STATE_OPENING]:
if (
ATTR_CURRENT_POSITION in cur_state.attributes
and ATTR_CURRENT_POSITION in state.attributes
):
service = SERVICE_SET_COVER_POSITION
service_data[ATTR_POSITION] = state.attributes[ATTR_CURRENT_POSITION]
else:
service = SERVICE_OPEN_COVER
await hass.services.async_call(
DOMAIN, service, service_data, context=context, blocking=True
)
if (
ATTR_CURRENT_TILT_POSITION in state.attributes
and ATTR_CURRENT_TILT_POSITION in cur_state.attributes
and cur_state.attributes.get(ATTR_CURRENT_TILT_POSITION)
!= state.attributes.get(ATTR_CURRENT_TILT_POSITION)
):
# Tilt position
if state.attributes.get(ATTR_CURRENT_TILT_POSITION) == 100:
service_tilting = SERVICE_OPEN_COVER_TILT
elif state.attributes.get(ATTR_CURRENT_TILT_POSITION) == 0:
service_tilting = SERVICE_CLOSE_COVER_TILT
else:
service_tilting = SERVICE_SET_COVER_TILT_POSITION
service_data_tilting[ATTR_TILT_POSITION] = state.attributes[
ATTR_CURRENT_TILT_POSITION
]
await hass.services.async_call(
DOMAIN,
service_tilting,
service_data_tilting,
context=context,
blocking=True,
)
async def async_reproduce_states(
hass: HomeAssistantType,
states: Iterable[State],
*,
context: Optional[Context] = None,
reproduce_options: Optional[Dict[str, Any]] = None,
) -> None:
"""Reproduce Cover states."""
# Reproduce states in parallel.
await asyncio.gather(
*(
_async_reproduce_state(
hass, state, context=context, reproduce_options=reproduce_options
)
for state in states
)
)
|
from unittest import mock
import pytest
from meld.misc import all_same, calc_syncpoint, merge_intervals
@pytest.mark.parametrize("intervals, expected", [
# Dominated by a single range
([(1, 5), (5, 9), (10, 11), (0, 20)], [(0, 20)]),
# No overlap
([(1, 5), (6, 9), (10, 11)], [(1, 5), (6, 9), (10, 11)]),
# Two overlap points between ranges
([(1, 5), (5, 9), (10, 12), (11, 20)], [(1, 9), (10, 20)]),
# Two overlap points between ranges, out of order
([(5, 9), (1, 5), (11, 20), (10, 12)], [(1, 9), (10, 20)]),
# Two equal ranges
([(1, 5), (7, 8), (1, 5)], [(1, 5), (7, 8)]),
# Three ranges overlap
([(1, 5), (4, 10), (9, 15)], [(1, 15)])
])
def test_merge_intervals(intervals, expected):
merged = merge_intervals(intervals)
assert merged == expected
@pytest.mark.parametrize("value, page_size, lower, upper, expected", [
# Boring top
(0, 100, 0, 1000, 0.0),
# Above the top!
(0, 100, 100, 1000, 0.0),
# Normal top scaling
(25, 100, 0, 1000, 0.25),
(50, 100, 0, 1000, 0.5),
# Scaling with a lower offset
(25, 100, 25, 1000, 0.0),
(50, 100, 25, 1000, 0.25),
# Somewhere in the middle
(500, 100, 0, 1000, 0.5),
# Normal bottom scaling
(850, 100, 0, 1000, 0.5),
(875, 100, 0, 1000, 0.75),
# Boring bottom
(900, 100, 0, 1000, 1.0),
# Below the bottom!
(1100, 100, 0, 1000, 1.0),
])
def test_calc_syncpoint(value, page_size, lower, upper, expected):
import gi
gi.require_version("Gtk", "3.0")
from gi.repository import Gtk
adjustment = Gtk.Adjustment()
adjustment.configure(value, lower, upper, 1, 1, page_size)
syncpoint = calc_syncpoint(adjustment)
assert syncpoint == expected
@pytest.mark.parametrize("lst, expected", [
(None, True),
([], True),
([0], True),
([1], True),
([0, 0], True),
([0, 1], False),
([1, 0], False),
([1, 1], True),
([0, 0, 0], True),
([0, 0, 1], False),
([0, 1, 0], False),
([0, 1, 1], False),
([1, 0, 0], False),
([1, 0, 1], False),
([1, 1, 0], False),
([1, 1, 1], True)
])
def test_all_same(lst, expected):
assert all_same(lst) == expected
@pytest.mark.parametrize("os_name, paths, expected", [
('posix', ['/tmp/foo1', '/tmp/foo2'], ['foo1', 'foo2']),
('posix', ['/tmp/foo1', '/tmp/foo2', '/tmp/foo3'], ['foo1', 'foo2', 'foo3']),
('posix', ['/tmp/bar/foo1', '/tmp/woo/foo2'], ['foo1', 'foo2']),
('posix', ['/tmp/bar/foo1', '/tmp/woo/foo1'], ['[bar] foo1', '[woo] foo1']),
('posix', ['/tmp/bar/foo1', '/tmp/woo/foo1', '/tmp/ree/foo1'], ['[bar] foo1', '[woo] foo1', '[ree] foo1']),
('posix', ['/tmp/bar/deep/deep', '/tmp/bar/shallow'], ['deep', 'shallow']),
('posix', ['/tmp/bar/deep/deep/foo1', '/tmp/bar/shallow/foo1'], ['[deep] foo1', '[shallow] foo1']),
# This case doesn't actually make much sense, so it's not that bad
# that our output is... somewhat unclear.
('posix', ['/tmp/bar/subdir/subsub', '/tmp/bar/'], ['subsub', 'bar']),
('nt', ['C:\\Users\\hmm\\bar', 'C:\\Users\\hmm\\foo'], ['bar', 'foo']),
('nt', ['C:\\Users\\bar\\hmm', 'C:\\Users\\foo\\hmm'], ['[bar] hmm', '[foo] hmm']),
# Check that paths with no commonality are handled
('posix', ['nothing in', 'common'], ['nothing in', 'common']),
('posix', ['<unnamed>', '/tmp/real/path'], ['<unnamed>', '/tmp/real/path']),
])
def test_shorten_names(os_name, paths, expected):
from meld.misc import shorten_names
with mock.patch('os.name', os_name):
assert shorten_names(*paths) == expected
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import logging
from compare_gan import utils
from compare_gan.architectures import arch_ops
from compare_gan.architectures import resnet_biggan_deep
import tensorflow as tf
class ResNet5BigGanDeepTest(tf.test.TestCase):
def testNumberOfParameters(self):
with tf.Graph().as_default():
batch_size = 2
z = tf.zeros((batch_size, 128))
y = tf.one_hot(tf.ones((batch_size,), dtype=tf.int32), 1000)
generator = resnet_biggan_deep.Generator(
image_shape=(128, 128, 3),
batch_norm_fn=arch_ops.conditional_batch_norm)
fake_images = generator(z, y=y, is_training=True, reuse=False)
self.assertEqual(fake_images.shape.as_list(), [batch_size, 128, 128, 3])
discriminator = resnet_biggan_deep.Discriminator()
predictions = discriminator(fake_images, y, is_training=True)
self.assertLen(predictions, 3)
t_vars = tf.trainable_variables()
g_vars = [var for var in t_vars if "generator" in var.name]
d_vars = [var for var in t_vars if "discriminator" in var.name]
g_param_overview = utils.get_parameter_overview(g_vars, limit=None)
d_param_overview = utils.get_parameter_overview(d_vars, limit=None)
g_param_overview = g_param_overview.split("\n")
logging.info("Generator variables:")
for i in range(0, len(g_param_overview), 80):
logging.info("\n%s", "\n".join(g_param_overview[i:i + 80]))
logging.info("Discriminator variables:\n%s", d_param_overview)
g_num_weights = sum([v.get_shape().num_elements() for v in g_vars])
self.assertEqual(g_num_weights, 50244484)
d_num_weights = sum([v.get_shape().num_elements() for v in d_vars])
self.assertEqual(d_num_weights, 34590210)
if __name__ == "__main__":
tf.test.main()
|
import argparse
import numpy as np
import chainer
from chainer.links import Convolution2D
from chainer import serializers
from chainercv.experimental.links import YOLOv2Tiny
from chainercv.links import Conv2DBNActiv
from chainercv.links import YOLOv2
from chainercv.links import YOLOv3
def load_param(file, param):
if isinstance(param, chainer.Variable):
param = param.array
param[:] = np.fromfile(file, dtype=np.float32, count=param.size) \
.reshape(param.shape)
def load_link(file, link):
if isinstance(link, Convolution2D):
load_param(file, link.b)
load_param(file, link.W)
elif isinstance(link, Conv2DBNActiv):
load_param(file, link.bn.beta)
load_param(file, link.bn.gamma)
load_param(file, link.bn.avg_mean)
load_param(file, link.bn.avg_var)
load_param(file, link.conv.W)
elif isinstance(link, chainer.ChainList):
for l in link:
load_link(file, l)
def reorder_loc(conv, n_fg_class):
# xy -> yx
for data in (conv.W.array, conv.b.array):
data = data.reshape(
(-1, 4 + 1 + n_fg_class) + data.shape[1:])
data[:, [1, 0, 3, 2]] = data[:, :4].copy()
def load_yolo_v2(file, model):
load_link(file, model.extractor)
load_link(file, model.subnet)
reorder_loc(model.subnet, model.n_fg_class)
def load_yolo_v3(file, model):
for i, link in enumerate(model.extractor):
load_link(file, link)
if i in {33, 39, 45}:
subnet = model.subnet[(i - 33) // 6]
load_link(file, subnet)
for subnet in model.subnet:
reorder_loc(subnet[-1], model.n_fg_class)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--model', choices=('yolo_v2', 'yolo_v2_tiny', 'yolo_v3'),
default='yolo_v2')
parser.add_argument('--n-fg-class', type=int, default=80)
parser.add_argument('darknetmodel')
parser.add_argument('output')
args = parser.parse_args()
if args.model == 'yolo_v2':
model = YOLOv2(n_fg_class=args.n_fg_class)
elif args.model == 'yolo_v2_tiny':
model = YOLOv2Tiny(n_fg_class=args.n_fg_class)
elif args.model == 'yolo_v3':
model = YOLOv3(n_fg_class=args.n_fg_class)
with chainer.using_config('train', False):
model(np.empty((1, 3, model.insize, model.insize), dtype=np.float32))
with open(args.darknetmodel, mode='rb') as f:
major = np.fromfile(f, dtype=np.int32, count=1)
minor = np.fromfile(f, dtype=np.int32, count=1)
np.fromfile(f, dtype=np.int32, count=1) # revision
if major * 10 + minor >= 2 and major < 1000 and minor < 1000:
np.fromfile(f, dtype=np.int64, count=1) # seen
else:
np.fromfile(f, dtype=np.int32, count=1) # seen
if args.model == 'yolo_v2':
load_yolo_v2(f, model)
elif args.model == 'yolo_v2_tiny':
load_yolo_v2(f, model)
elif args.model == 'yolo_v3':
load_yolo_v3(f, model)
serializers.save_npz(args.output, model)
if __name__ == '__main__':
main()
|
import os
from qutebrowser.browser.webkit.network import networkreply
from qutebrowser.utils import jinja
def get_file_list(basedir, all_files, filterfunc):
"""Get a list of files filtered by a filter function and sorted by name.
Args:
basedir: The parent directory of all files.
all_files: The list of files to filter and sort.
filterfunc: The filter function.
Return:
A list of dicts. Each dict contains the name and absname keys.
"""
items = []
for filename in all_files:
absname = os.path.join(basedir, filename)
if filterfunc(absname):
items.append({'name': filename, 'absname': absname})
return sorted(items, key=lambda v: v['name'].lower())
def is_root(directory):
"""Check if the directory is the root directory.
Args:
directory: The directory to check.
Return:
Whether the directory is a root directory or not.
"""
# If you're curious as why this works:
# dirname('/') = '/'
# dirname('/home') = '/'
# dirname('/home/') = '/home'
# dirname('/home/foo') = '/home'
# basically, for files (no trailing slash) it removes the file part, and
# for directories, it removes the trailing slash, so the only way for this
# to be equal is if the directory is the root directory.
return os.path.dirname(directory) == directory
def parent_dir(directory):
"""Return the parent directory for the given directory.
Args:
directory: The path to the directory.
Return:
The path to the parent directory.
"""
return os.path.normpath(os.path.join(directory, os.pardir))
def dirbrowser_html(path):
"""Get the directory browser web page.
Args:
path: The directory path.
Return:
The HTML of the web page.
"""
title = "Browse directory: {}".format(path)
if is_root(path):
parent = None
else:
parent = parent_dir(path)
try:
all_files = os.listdir(path)
except OSError as e:
html = jinja.render('error.html',
title="Error while reading directory",
url='file:///{}'.format(path), error=str(e))
return html.encode('UTF-8', errors='xmlcharrefreplace')
files = get_file_list(path, all_files, os.path.isfile)
directories = get_file_list(path, all_files, os.path.isdir)
html = jinja.render('dirbrowser.html', title=title, url=path,
parent=parent, files=files, directories=directories)
return html.encode('UTF-8', errors='xmlcharrefreplace')
def handler(request, _operation, _current_url):
"""Handler for a file:// URL.
Args:
request: QNetworkRequest to answer to.
_operation: The HTTP operation being done.
_current_url: The page we're on currently.
Return:
A QNetworkReply for directories, None for files.
"""
path = request.url().toLocalFile()
try:
if os.path.isdir(path):
data = dirbrowser_html(path)
return networkreply.FixedDataNetworkReply(
request, data, 'text/html')
return None
except UnicodeEncodeError:
return None
|
import unittest
from perfkitbenchmarker import disk_iops_to_capacity
class DiskIOPSToCapacityTest(unittest.TestCase):
def testSetCPUCountAWS(self):
AWSconfig = disk_iops_to_capacity.DiskIOPSToCapacity(300, 'AWS')
self.assertEquals(AWSconfig.GetCPUCount(), 2)
def testSetCPUCountGCP(self):
GCPconfig = disk_iops_to_capacity.DiskIOPSToCapacity(300, 'GCP')
self.assertEquals(GCPconfig.GetCPUCount(), 1)
def testSetNumberDisksAWS(self):
AWSconfig1 = disk_iops_to_capacity.DiskIOPSToCapacity(300, 'AWS')
self.assertEqual(AWSconfig1.GetNumberDisks(), 1)
AWSconfig2 = disk_iops_to_capacity.DiskIOPSToCapacity(25000, 'AWS')
self.assertEqual(AWSconfig2.GetNumberDisks(), 3)
AWSconfig3 = disk_iops_to_capacity.DiskIOPSToCapacity(20000, 'AWS')
self.assertEqual(AWSconfig3.GetNumberDisks(), 2)
def testSetNumberDisksGCP(self):
GCPconfig1 = disk_iops_to_capacity.DiskIOPSToCapacity(50, 'GCP')
self.assertEqual(GCPconfig1.GetNumberDisks(), 1)
def testSetStorageSizeAWS(self):
AWSconfig1 = disk_iops_to_capacity.DiskIOPSToCapacity(50, 'AWS')
self.assertEqual(AWSconfig1.GetSize(), 2)
AWSconfig2 = disk_iops_to_capacity.DiskIOPSToCapacity(100, 'AWS')
self.assertEqual(AWSconfig2.GetSize(), 2)
AWSconfig3 = disk_iops_to_capacity.DiskIOPSToCapacity(300, 'AWS')
self.assertEqual(AWSconfig3.GetSize(), 300 * 3)
AWSconfig4 = disk_iops_to_capacity.DiskIOPSToCapacity(9999, 'AWS')
self.assertEqual(AWSconfig4.GetSize(), 9999 * 3)
AWSconfig4 = disk_iops_to_capacity.DiskIOPSToCapacity(10000, 'AWS')
self.assertEqual(AWSconfig4.GetSize(), 3580)
def testSetStorageSizeGCP(self):
GCPconfig1 = disk_iops_to_capacity.DiskIOPSToCapacity(1, 'GCP')
self.assertEqual(GCPconfig1.GetSize(), 1)
GCPconfig1 = disk_iops_to_capacity.DiskIOPSToCapacity(300, 'GCP')
self.assertEqual(GCPconfig1.GetSize(), 10)
GCPconfig2 = disk_iops_to_capacity.DiskIOPSToCapacity(30000, 'GCP')
self.assertEqual(GCPconfig2.GetSize(), 1000)
def testValidateProvider(self):
self.assertRaises(disk_iops_to_capacity.InvalidProviderError,
disk_iops_to_capacity.DiskIOPSToCapacity, 300,
'NONPROVIDER')
def testValidateIOPS(self):
self.assertRaises(disk_iops_to_capacity.InvalidIOPSError,
disk_iops_to_capacity.DiskIOPSToCapacity, 0, 'AWS')
self.assertRaises(disk_iops_to_capacity.InvalidIOPSError,
disk_iops_to_capacity.DiskIOPSToCapacity, 50000, 'GCP')
self.assertRaises(disk_iops_to_capacity.InvalidIOPSError,
disk_iops_to_capacity.DiskIOPSToCapacity, 90000, 'AWS')
def testValidateStorageType(self):
self.assertRaises(disk_iops_to_capacity.InvalidStorageTypeError,
disk_iops_to_capacity.DiskIOPSToCapacity, 100, 'AWS',
'ebs-piops')
self.assertRaises(disk_iops_to_capacity.InvalidStorageTypeError,
disk_iops_to_capacity.DiskIOPSToCapacity, 100, 'GCP',
'pd-hhd')
if __name__ == '__main__':
unittest.main()
|
import numpy as np
from tensornetwork.network_components import Node, contract_between
from tensornetwork.network_operations import split_node_full_svd
from tensornetwork.linalg.node_linalg import conj
from tensornetwork.backends import backend_factory
from functools import partial
from tensornetwork.backends.decorators import jit
import warnings
from tensornetwork.ncon_interface import ncon
from tensornetwork.backend_contextmanager import get_default_backend
from tensornetwork.backends.abstract_backend import AbstractBackend
from typing import Any, List, Optional, Text, Type, Union, Dict, Sequence
Tensor = Any
class BaseMPS:
"""The base class for MPS. All MPS should be derived from BaseMPS `BaseMPS`
is an infinite matrix product state with a finite unitcell.
Important attributes:
* `BaseMPS.tensors`: A list of mps tensors.
* `BaseMPS.center_position`: The location of the orthogonality site
* `BaseMPS.connector_matrix`: A rank-2 `Tensor` stored in a `Node`.
`BaseMPS.connector_matrix` Connects unit cells back to themselves.
To stack different unit cells, the `BaseMPS.connector_matrix` is
absorbed into the rightmost (by convention) mps tensor prior
to stacking.
To obtain a sequence of `Tensor` objects `[tensor_1,...,tensor_N]`
which can be arbitrarily stacked, i.e.
`stacked_tensors=[tensor_1,...,tensor_N, tensor_1, ..., tensor_N,...]`
use the `BaseMPS.get_tensor` function. This function automatically
absorbs `BaseMPS.connector_matrix` into the correct `Tensoor` object
to ensure that `Tensors`s (i.e. the mps tensors) can be consistently
stacked without gauge jumps.
The orthogonality center can be be shifted using the
`BaseMPS.position` method, which uses uses QR and RQ methods to shift
`center_position`.
"""
def __init__(self,
tensors: List[Tensor],
center_position: Optional[int] = None,
connector_matrix: Optional[Tensor] = None,
backend: Optional[Union[Text, AbstractBackend]] = None) -> None:
"""Initialize a BaseMPS.
Args:
tensors: A list of `Tensor` objects.
center_position: The initial position of the center site.
connector_matrix: A `Tensor` of rank 2 connecting
different unitcells. A value `None` is equivalent to an identity
`connector_matrix`.
backend: The name of the backend that should be used to perform
contractions. Available backends are currently 'numpy', 'tensorflow',
'pytorch', 'jax'
"""
if (center_position is not None) and (center_position < 0 or
center_position >= len(tensors)):
raise ValueError("`center_position = {}` is different from `None` and "
"not between 0 <= center_position < {}".format(
center_position, len(tensors)))
if backend is None:
backend = get_default_backend()
if isinstance(backend, AbstractBackend):
self.backend = backend
else:
self.backend = backend_factory.get_backend(backend)
# the dtype is deduced from the tensor object.
self.tensors = [self.backend.convert_to_tensor(t) for t in tensors]
if not all(
[self.tensors[0].dtype == tensor.dtype for tensor in self.tensors]):
raise TypeError('not all dtypes in BaseMPS.tensors are the same')
self.connector_matrix = connector_matrix
self.center_position = center_position
########################################################################
########## define functions for jitted operations ##########
########################################################################
@partial(jit, backend=self.backend, static_argnums=(1,))
def svd(tensor, max_singular_values=None):
return self.backend.svd(tensor=tensor, pivot_axis=2,
max_singular_values=max_singular_values)
self.svd = svd
@partial(jit, backend=self.backend)
def qr(tensor):
return self.backend.qr(tensor, 2)
self.qr = qr
@partial(jit, backend=self.backend)
def rq(tensor):
return self.backend.rq(tensor, 1)
self.rq = rq
self.norm = self.backend.jit(self.backend.norm)
########################################################################
########################################################################
########################################################################
def left_transfer_operator(self, A, l, Abar):
return ncon([A, l, Abar], [[1, 2, -1], [1, 3], [3, 2, -2]],
backend=self.backend.name)
def right_transfer_operator(self, B, r, Bbar):
return ncon([B, r, Bbar], [[-1, 2, 1], [1, 3], [-2, 2, 3]],
backend=self.backend.name)
def __len__(self) -> int:
return len(self.tensors)
def position(self, site: int, normalize: Optional[bool] = True) -> np.number:
"""Shift `center_position` to `site`.
Args:
site: The site to which FiniteMPS.center_position should be shifted
normalize: If `True`, normalize matrices when shifting.
Returns:
`Tensor`: The norm of the tensor at `FiniteMPS.center_position`
Raises:
ValueError: If `center_position` is `None`.
"""
if self.center_position is None:
raise ValueError(
"BaseMPS.center_position is `None`, cannot shift `center_position`."
"Reset `center_position` manually or use `canonicalize`")
#`site` has to be between 0 and len(mps) - 1
if site >= len(self.tensors) or site < 0:
raise ValueError('site = {} not between values'
' 0 < site < N = {}'.format(site, len(self)))
#nothing to do
if site == self.center_position:
Z = self.norm(self.tensors[self.center_position])
if normalize:
self.tensors[self.center_position] /= Z
return Z
#shift center_position to the right using QR decomposition
if site > self.center_position:
n = self.center_position
for n in range(self.center_position, site):
Q, R = self.qr(self.tensors[n])
self.tensors[n] = Q
self.tensors[n + 1] = ncon([R, self.tensors[n + 1]],
[[-1, 1], [1, -2, -3]],
backend=self.backend.name)
Z = self.norm(self.tensors[n + 1])
# for an mps with > O(10) sites one needs to normalize to avoid
# over or underflow errors; this takes care of the normalization
if normalize:
self.tensors[n + 1] /= Z
self.center_position = site
#shift center_position to the left using RQ decomposition
else:
for n in reversed(range(site + 1, self.center_position + 1)):
R, Q = self.rq(self.tensors[n])
# for an mps with > O(10) sites one needs to normalize to avoid
# over or underflow errors; this takes care of the normalization
self.tensors[n] = Q #Q is a right-isometric tensor of rank 3
self.tensors[n - 1] = ncon([self.tensors[n - 1], R],
[[-1, -2, 1], [1, -3]],
backend=self.backend.name)
Z = self.norm(self.tensors[n - 1])
if normalize:
self.tensors[n - 1] /= Z
self.center_position = site
#return the norm of the last R tensor (useful for checks)
return Z
@property
def dtype(self) -> Type[np.number]:
if not all(
[self.tensors[0].dtype == tensor.dtype for tensor in self.tensors]):
raise TypeError('not all dtype in BaseMPS.tensors are the same')
return self.tensors[0].dtype
def save(self, path: str):
raise NotImplementedError()
@property
def bond_dimensions(self) -> List:
"""A list of bond dimensions of `BaseMPS`"""
return [self.tensors[0].shape[0]] + [t.shape[2] for t in self.tensors]
@property
def physical_dimensions(self) -> List:
"""A list of physical Hilbert-space dimensions of `BaseMPS`"""
return [t.shape[1] for t in self.tensors]
def right_envs(self, sites: Sequence[int]) -> Dict:
raise NotImplementedError()
def left_envs(self, sites: Sequence[int]) -> Dict:
raise NotImplementedError()
def apply_transfer_operator(self, site: int, direction: Union[Text, int],
matrix: Tensor) -> Tensor:
"""Compute the action of the MPS transfer-operator at site `site`.
Args:
site: A site of the MPS
direction:
* if `1, 'l'` or `'left'`: compute the left-action
of the MPS transfer-operator at `site` on the input `matrix`.
* if `-1, 'r'` or `'right'`: compute the right-action
of the MPS transfer-operator at `site` on the input `matrix`
matrix: A rank-2 tensor or matrix.
Returns:
`Tensor`: The result of applying the MPS transfer-operator to `matrix`
"""
if direction in (1, 'l', 'left'):
return self.left_transfer_operator(self.tensors[site], matrix,
self.backend.conj(self.tensors[site]))
if direction in (-1, 'r', 'right'):
return self.right_transfer_operator(self.tensors[site], matrix,
self.backend.conj(self.tensors[site]))
raise ValueError(f'unknown value {direction} for direction')
def measure_local_operator(self, ops: List[Tensor],
sites: Sequence[int]) -> List:
"""Measure the expectation value of local operators `ops` site `sites`.
Args:
ops: A list Tensors of rank 2; the local operators to be measured.
sites: Sites where `ops` act.
Returns:
List: measurements :math:`\\langle` `ops[n]`:math:`\\rangle`
for n in `sites`
Raises:
ValueError if `len(ops) != len(sites)`
"""
if not len(ops) == len(sites):
raise ValueError('measure_1site_ops: len(ops) has to be len(sites)!')
right_envs = self.right_envs(sites)
left_envs = self.left_envs(sites)
res = []
for n, site in enumerate(sites):
O = Node(ops[n], backend=self.backend)
R = Node(right_envs[site], backend=self.backend)
L = Node(left_envs[site], backend=self.backend)
A = Node(self.tensors[site], backend=self.backend)
conj_A = conj(A)
O[1] ^ A[1]
O[0] ^ conj_A[1]
R[0] ^ A[2]
R[1] ^ conj_A[2]
L[0] ^ A[0]
L[1] ^ conj_A[0]
result = L @ A @ O @ conj_A @ R
res.append(self.backend.item(result.tensor))
return res
def measure_two_body_correlator(self, op1: Tensor, op2: Tensor, site1: int,
sites2: Sequence[int]) -> List:
"""
Compute the correlator
:math:`\\langle` `op1[site1], op2[s]`:math:`\\rangle`
between `site1` and all sites `s` in `sites2`. If `s == site1`,
`op2[s]` will be applied first.
Args:
op1: Tensor of rank 2; the local operator at `site1`.
op2: Tensor of rank 2; the local operator at `sites2`.
site1: The site where `op1` acts
sites2: Sites where operator `op2` acts.
Returns:
List: Correlator :math:`\\langle` `op1[site1], op2[s]`:math:`\\rangle`
for `s` :math:`\\in` `sites2`.
Raises:
ValueError if `site1` is out of range
"""
N = len(self)
if site1 < 0:
raise ValueError(
"Site site1 out of range: {} not between 0 <= site < N = {}.".format(
site1, N))
sites2 = np.array(sites2) #enable logical indexing
# we break the computation into two parts:
# first we get all correlators <op2(site2) op1(site1)> with site2 < site1
# then all correlators <op1(site1) op2(site2)> with site2 >= site1
# get all sites smaller than site1
left_sites = np.sort(sites2[sites2 < site1])
# get all sites larger than site1
right_sites = np.sort(sites2[sites2 > site1])
# compute all neccessary right reduced
# density matrices in one go. This is
# more efficient than calling right_envs
# for each site individually
rs = self.right_envs(
np.append(site1, np.mod(right_sites, N)).astype(np.int64))
ls = self.left_envs(
np.append(np.mod(left_sites, N), site1).astype(np.int64))
c = []
if len(left_sites) > 0:
A = Node(self.tensors[site1], backend=self.backend)
O1 = Node(op1, backend=self.backend)
conj_A = conj(A)
R = Node(rs[site1], backend=self.backend)
R[0] ^ A[2]
R[1] ^ conj_A[2]
A[1] ^ O1[1]
conj_A[1] ^ O1[0]
R = ((R @ A) @ O1) @ conj_A
n1 = np.min(left_sites)
# -- A--------
# | |
# compute op1(site1) |
# | |
# -- A*-------
# and evolve it to the left by contracting tensors at site2 < site1
# if site2 is in `sites2`, calculate the observable
#
# ---A--........-- A--------
# | | | |
# | op2(site2) op1(site1)|
# | | | |
# ---A--........-- A*-------
for n in range(site1 - 1, n1 - 1, -1):
if n in left_sites:
A = Node(self.tensors[n % N], backend=self.backend)
conj_A = conj(A)
O2 = Node(op2, backend=self.backend)
L = Node(ls[n % N], backend=self.backend)
L[0] ^ A[0]
L[1] ^ conj_A[0]
O2[0] ^ conj_A[1]
O2[1] ^ A[1]
R[0] ^ A[2]
R[1] ^ conj_A[2]
res = (((L @ A) @ O2) @ conj_A) @ R
c.append(res.tensor)
if n > n1:
R = Node(
self.apply_transfer_operator(n % N, 'right', R.tensor),
backend=self.backend)
c = list(reversed(c))
# compute <op2(site1)op1(site1)>
if site1 in sites2:
O1 = Node(op1, backend=self.backend)
O2 = Node(op2, backend=self.backend)
L = Node(ls[site1], backend=self.backend)
R = Node(rs[site1], backend=self.backend)
A = Node(self.tensors[site1], backend=self.backend)
conj_A = conj(A)
O1[1] ^ O2[0]
L[0] ^ A[0]
L[1] ^ conj_A[0]
R[0] ^ A[2]
R[1] ^ conj_A[2]
A[1] ^ O2[1]
conj_A[1] ^ O1[0]
O = O1 @ O2
res = (((L @ A) @ O) @ conj_A) @ R
c.append(res.tensor)
# compute <op1(site1) op2(site2)> for site1 < site2
if len(right_sites) > 0:
A = Node(self.tensors[site1], backend=self.backend)
conj_A = conj(A)
L = Node(ls[site1], backend=self.backend)
O1 = Node(op1, backend=self.backend)
L[0] ^ A[0]
L[1] ^ conj_A[0]
A[1] ^ O1[1]
conj_A[1] ^ O1[0]
L = L @ A @ O1 @ conj_A
n2 = np.max(right_sites)
# -- A--
# | |
# compute | op1(site1)
# | |
# -- A*--
# and evolve it to the right by contracting tensors at site2 > site1
# if site2 is in `sites2`, calculate the observable
#
# ---A--........-- A--------
# | | | |
# | op1(site1) op2(site2)|
# | | | |
# ---A--........-- A*-------
for n in range(site1 + 1, n2 + 1):
if n in right_sites:
R = Node(rs[n % N], backend=self.backend)
A = Node(self.tensors[n % N], backend=self.backend)
conj_A = conj(A)
O2 = Node(op2, backend=self.backend)
A[0] ^ L[0]
conj_A[0] ^ L[1]
O2[0] ^ conj_A[1]
O2[1] ^ A[1]
R[0] ^ A[2]
R[1] ^ conj_A[2]
res = L @ A @ O2 @ conj_A @ R
c.append(res.tensor)
if n < n2:
L = Node(
self.apply_transfer_operator(n % N, 'left', L.tensor),
backend=self.backend)
return [self.backend.item(o) for o in c]
def apply_two_site_gate(self,
gate: Tensor,
site1: int,
site2: int,
max_singular_values: Optional[int] = None,
max_truncation_err: Optional[float] = None) -> Tensor:
"""Apply a two-site gate to an MPS. This routine will in general destroy
any canonical form of the state. If a canonical form is needed, the user
can restore it using `FiniteMPS.position`.
Args:
gate: A two-body gate.
site1: The first site where the gate acts.
site2: The second site where the gate acts.
max_singular_values: The maximum number of singular values to keep.
max_truncation_err: The maximum allowed truncation error.
Returns:
`Tensor`: A scalar tensor containing the truncated weight of the
truncation.
"""
if len(gate.shape) != 4:
raise ValueError('rank of gate is {} but has to be 4'.format(
len(gate.shape)))
if site1 < 0 or site1 >= len(self) - 1:
raise ValueError(
'site1 = {} is not between 0 <= site < N - 1 = {}'.format(
site1, len(self)))
if site2 < 1 or site2 >= len(self):
raise ValueError('site2 = {} is not between 1 <= site < N = {}'.format(
site2, len(self)))
if site2 <= site1:
raise ValueError('site2 = {} has to be larger than site2 = {}'.format(
site2, site1))
if site2 != site1 + 1:
raise ValueError("Found site2 ={}, site1={}. Only nearest "
"neighbor gates are currently"
"supported".format(site2, site1))
if (max_singular_values or
max_truncation_err) and self.center_position not in (site1, site2):
raise ValueError(
'center_position = {}, but gate is applied at sites {}, {}. '
'Truncation should only be done if the gate '
'is applied at the center position of the MPS'.format(
self.center_position, site1, site2))
gate_node = Node(gate, backend=self.backend)
node1 = Node(self.tensors[site1], backend=self.backend)
node2 = Node(self.tensors[site2], backend=self.backend)
node1[2] ^ node2[0]
gate_node[2] ^ node1[1]
gate_node[3] ^ node2[1]
left_edges = [node1[0], gate_node[0]]
right_edges = [gate_node[1], node2[2]]
result = node1 @ node2 @ gate_node
U, S, V, tw = split_node_full_svd(
result,
left_edges=left_edges,
right_edges=right_edges,
max_singular_values=max_singular_values,
max_truncation_err=max_truncation_err,
left_name=node1.name,
right_name=node2.name)
V.reorder_edges([S[1]] + right_edges)
left_edges = left_edges + [S[1]]
res = contract_between(U, S, name=U.name).reorder_edges(left_edges)
self.tensors[site1] = res.tensor
self.tensors[site2] = V.tensor
return tw
def apply_one_site_gate(self, gate: Tensor, site: int) -> None:
"""Apply a one-site gate to an MPS. This routine will in general destroy
any canonical form of the state. If a canonical form is needed, the user
can restore it using `FiniteMPS.position`
Args:
gate: a one-body gate
site: the site where the gate should be applied
"""
if len(gate.shape) != 2:
raise ValueError('rank of gate is {} but has to be 2'.format(
len(gate.shape)))
if site < 0 or site >= len(self):
raise ValueError('site = {} is not between 0 <= site < N={}'.format(
site, len(self)))
self.tensors[site] = ncon([gate, self.tensors[site]],
[[-2, 1], [-1, 1, -3]],
backend=self.backend.name)
def check_orthonormality(self, which: Text, site: int) -> Tensor:
"""Check orthonormality of tensor at site `site`.
Args:
which: * if `'l'` or `'left'`: check left orthogonality
* if `'r`' or `'right'`: check right orthogonality
site: The site of the tensor.
Returns:
scalar `Tensor`: The L2 norm of the deviation from identity.
Raises:
ValueError: If which is different from 'l','left', 'r' or 'right'.
"""
if which not in ('l', 'left', 'r', 'right'):
raise ValueError(
"Wrong value `which`={}. "
"`which` as to be 'l','left', 'r' or 'right.".format(which))
n1 = Node(
self.get_tensor(site),
backend=self.backend) #we need to absorb the connector_matrix
n2 = conj(n1)
if which in ('l', 'left'):
n1[0] ^ n2[0]
n1[1] ^ n2[1]
else:
n1[2] ^ n2[2]
n1[1] ^ n2[1]
result = (n1 @ n2).tensor
tmp = result - self.backend.eye(
N=self.backend.sparse_shape(result)[0],
M=self.backend.sparse_shape(result)[1],
dtype=self.dtype)
return self.backend.sqrt(
ncon([tmp, self.backend.conj(tmp)], [[1, 2], [1, 2]],
backend=self.backend))
# pylint: disable=inconsistent-return-statements
def check_canonical(self) -> Any:
"""Check whether the MPS is in a canonical form.
If `center_position` is `None`, no check is performed.
Returns:
The L2 norm of the vector of local deviations.
"""
if self.center_position is None:
warnings.warn(
"BaseMPS.center_position is `None`. Skipping `check_canonical`")
return
deviations = []
for site in range(len(self.tensors)):
if site < self.center_position:
deviation = self.check_orthonormality('l', site)
elif site > self.center_position:
deviation = self.check_orthonormality('r', site)
else:
continue
deviations.append(deviation**2)
return self.backend.sqrt(sum(deviations[1:], deviations[0]))
def get_tensor(self, site: int) -> Tensor:
"""Returns the `Tensor` object at `site`.
If `site==len(self) - 1` `BaseMPS.connector_matrix`
is absorbed fromt the right-hand side into the returned
`Tensor` object.
Args:
site: The site for which to return the `Node`.
Returns:
`Tensor`: The tensor at `site`.
"""
if site >= len(self):
raise IndexError(
'index `site` = {} is out of range for len(mps)= {}'.format(
site, len(self)))
if site < 0:
raise ValueError(
'index `site` has to be larger than 0 (found `site`={}).'.format(
site))
if (site == len(self) - 1) and (self.connector_matrix is not None):
return ncon([self.tensors[site], self.connector_matrix],
[[-1, -2, 1], [1, -3]],
backend=self.backend.name)
return self.tensors[site]
def canonicalize(self, *args, **kwargs) -> np.number:
raise NotImplementedError()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
slim = tf.contrib.slim
def block35(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
"""Builds the 35x35 resnet block."""
with tf.variable_scope(scope, 'Block35', [net], reuse=reuse):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 32, 1, scope='Conv2d_1x1')
with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 32, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, 32, 3, scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
tower_conv2_0 = slim.conv2d(net, 32, 1, scope='Conv2d_0a_1x1')
tower_conv2_1 = slim.conv2d(tower_conv2_0, 48, 3, scope='Conv2d_0b_3x3')
tower_conv2_2 = slim.conv2d(tower_conv2_1, 64, 3, scope='Conv2d_0c_3x3')
mixed = tf.concat(axis=3, values=[tower_conv, tower_conv1_1, tower_conv2_2])
up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None,
activation_fn=None, scope='Conv2d_1x1')
net += scale * up
if activation_fn:
net = activation_fn(net)
return net
def block17(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
"""Builds the 17x17 resnet block."""
with tf.variable_scope(scope, 'Block17', [net], reuse=reuse):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 192, 1, scope='Conv2d_1x1')
with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 128, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, 160, [1, 7],
scope='Conv2d_0b_1x7')
tower_conv1_2 = slim.conv2d(tower_conv1_1, 192, [7, 1],
scope='Conv2d_0c_7x1')
mixed = tf.concat(axis=3, values=[tower_conv, tower_conv1_2])
up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None,
activation_fn=None, scope='Conv2d_1x1')
net += scale * up
if activation_fn:
net = activation_fn(net)
return net
def block8(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
"""Builds the 8x8 resnet block."""
with tf.variable_scope(scope, 'Block8', [net], reuse=reuse):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 192, 1, scope='Conv2d_1x1')
with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 192, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, 224, [1, 3],
scope='Conv2d_0b_1x3')
tower_conv1_2 = slim.conv2d(tower_conv1_1, 256, [3, 1],
scope='Conv2d_0c_3x1')
mixed = tf.concat(axis=3, values=[tower_conv, tower_conv1_2])
up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None,
activation_fn=None, scope='Conv2d_1x1')
net += scale * up
if activation_fn:
net = activation_fn(net)
return net
def inception_resnet_v2(inputs, num_classes=1001, is_training=True,
dropout_keep_prob=0.8,
reuse=None,
scope='InceptionResnetV2'):
"""Creates the Inception Resnet V2 model.
Args:
inputs: a 4-D tensor of size [batch_size, height, width, 3].
num_classes: number of predicted classes.
is_training: whether is training or not.
dropout_keep_prob: float, the fraction to keep before final layer.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
Returns:
logits: the logits outputs of the model.
end_points: the set of end_points from the inception model.
"""
end_points = {}
with tf.variable_scope(scope, 'InceptionResnetV2', [inputs], reuse=reuse):
with slim.arg_scope([slim.batch_norm, slim.dropout],
is_training=is_training):
with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
stride=1, padding='SAME'):
# 149 x 149 x 32
net = slim.conv2d(inputs, 32, 3, stride=2, padding='VALID',
scope='Conv2d_1a_3x3')
end_points['Conv2d_1a_3x3'] = net
# 147 x 147 x 32
net = slim.conv2d(net, 32, 3, padding='VALID',
scope='Conv2d_2a_3x3')
end_points['Conv2d_2a_3x3'] = net
# 147 x 147 x 64
net = slim.conv2d(net, 64, 3, scope='Conv2d_2b_3x3')
end_points['Conv2d_2b_3x3'] = net
# 73 x 73 x 64
net = slim.max_pool2d(net, 3, stride=2, padding='VALID',
scope='MaxPool_3a_3x3')
end_points['MaxPool_3a_3x3'] = net
# 73 x 73 x 80
net = slim.conv2d(net, 80, 1, padding='VALID',
scope='Conv2d_3b_1x1')
end_points['Conv2d_3b_1x1'] = net
# 71 x 71 x 192
net = slim.conv2d(net, 192, 3, padding='VALID',
scope='Conv2d_4a_3x3')
end_points['Conv2d_4a_3x3'] = net
# 35 x 35 x 192
net = slim.max_pool2d(net, 3, stride=2, padding='VALID',
scope='MaxPool_5a_3x3')
end_points['MaxPool_5a_3x3'] = net
# 35 x 35 x 320
with tf.variable_scope('Mixed_5b'):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 96, 1, scope='Conv2d_1x1')
with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 48, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, 64, 5,
scope='Conv2d_0b_5x5')
with tf.variable_scope('Branch_2'):
tower_conv2_0 = slim.conv2d(net, 64, 1, scope='Conv2d_0a_1x1')
tower_conv2_1 = slim.conv2d(tower_conv2_0, 96, 3,
scope='Conv2d_0b_3x3')
tower_conv2_2 = slim.conv2d(tower_conv2_1, 96, 3,
scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
tower_pool = slim.avg_pool2d(net, 3, stride=1, padding='SAME',
scope='AvgPool_0a_3x3')
tower_pool_1 = slim.conv2d(tower_pool, 64, 1,
scope='Conv2d_0b_1x1')
net = tf.concat(axis=3, values=[tower_conv, tower_conv1_1,
tower_conv2_2, tower_pool_1])
end_points['Mixed_5b'] = net
net = slim.repeat(net, 10, block35, scale=0.17)
# 17 x 17 x 1088
with tf.variable_scope('Mixed_6a'):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 384, 3, stride=2, padding='VALID',
scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, 256, 3,
scope='Conv2d_0b_3x3')
tower_conv1_2 = slim.conv2d(tower_conv1_1, 384, 3,
stride=2, padding='VALID',
scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_2'):
tower_pool = slim.max_pool2d(net, 3, stride=2, padding='VALID',
scope='MaxPool_1a_3x3')
net = tf.concat(axis=3, values=[tower_conv, tower_conv1_2, tower_pool])
end_points['Mixed_6a'] = net
net = slim.repeat(net, 20, block17, scale=0.10)
# Auxiliary tower
with tf.variable_scope('AuxLogits'):
aux = slim.avg_pool2d(net, 5, stride=3, padding='VALID',
scope='Conv2d_1a_3x3')
aux = slim.conv2d(aux, 128, 1, scope='Conv2d_1b_1x1')
aux = slim.conv2d(aux, 768, aux.get_shape()[1:3],
padding='VALID', scope='Conv2d_2a_5x5')
aux = slim.flatten(aux)
aux = slim.fully_connected(aux, num_classes, activation_fn=None,
scope='Logits')
end_points['AuxLogits'] = aux
with tf.variable_scope('Mixed_7a'):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1')
tower_conv_1 = slim.conv2d(tower_conv, 384, 3, stride=2,
padding='VALID', scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'):
tower_conv1 = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1, 288, 3, stride=2,
padding='VALID', scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_2'):
tower_conv2 = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1')
tower_conv2_1 = slim.conv2d(tower_conv2, 288, 3,
scope='Conv2d_0b_3x3')
tower_conv2_2 = slim.conv2d(tower_conv2_1, 320, 3, stride=2,
padding='VALID', scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_3'):
tower_pool = slim.max_pool2d(net, 3, stride=2, padding='VALID',
scope='MaxPool_1a_3x3')
net = tf.concat(axis=3, values=[tower_conv_1, tower_conv1_1,
tower_conv2_2, tower_pool])
end_points['Mixed_7a'] = net
net = slim.repeat(net, 9, block8, scale=0.20)
net = block8(net, activation_fn=None)
net = slim.conv2d(net, 1536, 1, scope='Conv2d_7b_1x1')
end_points['Conv2d_7b_1x1'] = net
with tf.variable_scope('Logits'):
end_points['PrePool'] = net
net = slim.avg_pool2d(net, net.get_shape()[1:3], padding='VALID',
scope='AvgPool_1a_8x8')
net = slim.flatten(net)
net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
scope='Dropout')
end_points['PreLogitsFlatten'] = net
logits = slim.fully_connected(net, num_classes, activation_fn=None,
scope='Logits')
end_points['Logits'] = logits
end_points['Predictions'] = tf.nn.softmax(logits, name='Predictions')
return logits, end_points
inception_resnet_v2.default_image_size = 299
def inception_resnet_v2_arg_scope(weight_decay=0.00004,
batch_norm_decay=0.9997,
batch_norm_epsilon=0.001):
"""Yields the scope with the default parameters for inception_resnet_v2.
Args:
weight_decay: the weight decay for weights variables.
batch_norm_decay: decay for the moving average of batch_norm momentums.
batch_norm_epsilon: small float added to variance to avoid dividing by zero.
Returns:
a arg_scope with the parameters needed for inception_resnet_v2.
"""
# Set weight_decay for weights in conv2d and fully_connected layers.
with slim.arg_scope([slim.conv2d, slim.fully_connected],
weights_regularizer=slim.l2_regularizer(weight_decay),
biases_regularizer=slim.l2_regularizer(weight_decay)):
batch_norm_params = {
'decay': batch_norm_decay,
'epsilon': batch_norm_epsilon,
}
# Set activation_fn and parameters for batch_norm.
with slim.arg_scope([slim.conv2d], activation_fn=tf.nn.relu,
normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params) as scope:
return scope
|
import logging
from pyControl4.account import C4Account
from pyControl4.director import C4Director
from pyControl4.error_handling import BadToken
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_HOST, CONF_PASSWORD, CONF_TOKEN, CONF_USERNAME
from homeassistant.core import HomeAssistant
from homeassistant.helpers import aiohttp_client
from .const import (
CONF_ACCOUNT,
CONF_CONTROLLER_UNIQUE_ID,
CONF_DIRECTOR,
CONF_DIRECTOR_TOKEN_EXPIRATION,
DOMAIN,
)
_LOGGER = logging.getLogger(__name__)
async def director_update_data(
hass: HomeAssistant, entry: ConfigEntry, var: str
) -> dict:
"""Retrieve data from the Control4 director for update_coordinator."""
# possibly implement usage of director_token_expiration to start
# token refresh without waiting for error to occur
try:
director = hass.data[DOMAIN][entry.entry_id][CONF_DIRECTOR]
data = await director.getAllItemVariableValue(var)
except BadToken:
_LOGGER.info("Updating Control4 director token")
await refresh_tokens(hass, entry)
director = hass.data[DOMAIN][entry.entry_id][CONF_DIRECTOR]
data = await director.getAllItemVariableValue(var)
return {key["id"]: key for key in data}
async def refresh_tokens(hass: HomeAssistant, entry: ConfigEntry):
"""Store updated authentication and director tokens in hass.data."""
config = entry.data
account_session = aiohttp_client.async_get_clientsession(hass)
account = C4Account(config[CONF_USERNAME], config[CONF_PASSWORD], account_session)
await account.getAccountBearerToken()
controller_unique_id = config[CONF_CONTROLLER_UNIQUE_ID]
director_token_dict = await account.getDirectorBearerToken(controller_unique_id)
director_session = aiohttp_client.async_get_clientsession(hass, verify_ssl=False)
director = C4Director(
config[CONF_HOST], director_token_dict[CONF_TOKEN], director_session
)
director_token_expiry = director_token_dict["token_expiration"]
_LOGGER.debug("Saving new tokens in hass data")
entry_data = hass.data[DOMAIN][entry.entry_id]
entry_data[CONF_ACCOUNT] = account
entry_data[CONF_DIRECTOR] = director
entry_data[CONF_DIRECTOR_TOKEN_EXPIRATION] = director_token_expiry
|
from django.db import migrations
def create_index(apps, schema_editor):
if schema_editor.connection.vendor != "postgresql":
return
schema_editor.execute("DROP INDEX memory_source_fulltext")
schema_editor.execute(
"CREATE INDEX memory_source_trgm ON memory_memory USING GIN (source gin_trgm_ops)"
)
def drop_index(apps, schema_editor):
if schema_editor.connection.vendor != "postgresql":
return
schema_editor.execute("DROP INDEX memory_source_trgm")
schema_editor.execute(
"CREATE INDEX memory_source_fulltext ON memory_memory "
"USING GIN (to_tsvector('english', source))"
)
class Migration(migrations.Migration):
dependencies = [
("memory", "0006_memory_update"),
("trans", "0064_fulltext_index"),
]
operations = [
migrations.RunPython(create_index, drop_index, elidable=False, atomic=False)
]
|
import enum
from typing import Optional, Type
from .. import data_manager
from .base import IdentifierData, BaseDriver, ConfigCategory
from .json import JsonDriver
from .postgres import PostgresDriver
__all__ = [
"get_driver",
"ConfigCategory",
"IdentifierData",
"BaseDriver",
"JsonDriver",
"PostgresDriver",
"BackendType",
]
class BackendType(enum.Enum):
"""Represents storage backend type."""
#: JSON storage backend.
JSON = "JSON"
#: Postgres storage backend.
POSTGRES = "Postgres"
# Dead drivers below retained for error handling.
MONGOV1 = "MongoDB"
MONGO = "MongoDBV2"
_DRIVER_CLASSES = {BackendType.JSON: JsonDriver, BackendType.POSTGRES: PostgresDriver}
def _get_driver_class_include_old(storage_type: Optional[BackendType] = None) -> Type[BaseDriver]:
"""
ONLY for use in CLI for moving data away from a no longer supported backend
"""
if storage_type and storage_type == BackendType.MONGO:
from ._mongo import MongoDriver
return MongoDriver
else:
return get_driver_class(storage_type)
def get_driver_class(storage_type: Optional[BackendType] = None) -> Type[BaseDriver]:
"""Get the driver class for the given storage type.
Parameters
----------
storage_type : Optional[BackendType]
The backend you want a driver class for. Omit to try to obtain
the backend from data manager.
Returns
-------
Type[BaseDriver]
A subclass of `BaseDriver`.
Raises
------
ValueError
If there is no driver for the given storage type.
"""
if storage_type is None:
storage_type = BackendType(data_manager.storage_type())
try:
return _DRIVER_CLASSES[storage_type]
except KeyError:
raise ValueError(f"No driver found for storage type {storage_type}") from None
def get_driver(
cog_name: str,
identifier: str,
storage_type: Optional[BackendType] = None,
*,
allow_old: bool = False,
**kwargs,
):
"""Get a driver instance.
Parameters
----------
cog_name : str
The cog's name.
identifier : str
The cog's discriminator.
storage_type : Optional[BackendType]
The backend you want a driver for. Omit to try to obtain the
backend from data manager.
**kwargs
Driver-specific keyword arguments.
Returns
-------
BaseDriver
A driver instance.
Raises
------
RuntimeError
If the storage type is MongoV1, Mongo, or invalid.
"""
if storage_type is None:
try:
storage_type = BackendType(data_manager.storage_type())
except RuntimeError:
storage_type = BackendType.JSON
try:
if not allow_old:
driver_cls: Type[BaseDriver] = get_driver_class(storage_type)
else:
driver_cls: Type[BaseDriver] = _get_driver_class_include_old(storage_type)
except ValueError:
if storage_type in (BackendType.MONGOV1, BackendType.MONGO):
raise RuntimeError(
"Please convert to JSON first to continue using the bot."
"Mongo support was removed in 3.2."
) from None
else:
raise RuntimeError(f"Invalid driver type: '{storage_type}'") from None
return driver_cls(cog_name, identifier, **kwargs)
|
from six.moves import cPickle as pickle
from socket import gethostbyname, gethostname
import os
def determine_master(port=4000):
"""Determine address of master so that workers
can connect to it. If the environment variable
SPARK_LOCAL_IP is set, that address will be used.
:param port: port on which the application runs
:return: Master address
Example usage:
SPARK_LOCAL_IP=127.0.0.1 spark-submit --master \
local[8] examples/mllib_mlp.py
"""
if os.environ.get('SPARK_LOCAL_IP'):
return os.environ['SPARK_LOCAL_IP'] + ":" + str(port)
else:
return gethostbyname(gethostname()) + ":" + str(port)
def _receive_all(socket, num_bytes):
"""Reads `num_bytes` bytes from the specified socket.
:param socket: open socket instance
:param num_bytes: number of bytes to read
:return: received data
"""
buffer = ''
buffer_size = 0
bytes_left = num_bytes
while buffer_size < num_bytes:
data = socket.recv(bytes_left)
delta = len(data)
buffer_size += delta
bytes_left -= delta
buffer += data
return buffer
def receive(socket, num_bytes=20):
"""Receive data frame from open socket.
:param socket: open socket instance
:param num_bytes: number of bytes to read
:return: received data
"""
length = int(_receive_all(socket, num_bytes).decode())
serialized_data = _receive_all(socket, length)
return pickle.loads(serialized_data)
def send(socket, data, num_bytes=20):
"""Send data to specified socket.
:param socket: open socket instance
:param data: data to send
:param num_bytes: number of bytes to read
:return: received data
"""
pickled_data = pickle.dumps(data, -1)
length = str(len(pickled_data)).zfill(num_bytes)
socket.sendall(length.encode())
socket.sendall(pickled_data)
|
import os
import pytest
import kombu
from .common import (
BasicFunctionality, BaseExchangeTypes,
BaseTimeToLive, BasePriority, BaseFailover
)
def get_connection(hostname, port, vhost):
return kombu.Connection(f'pyamqp://{hostname}:{port}')
def get_failover_connection(hostname, port, vhost):
return kombu.Connection(
f'pyamqp://localhost:12345;pyamqp://{hostname}:{port}'
)
@pytest.fixture()
def invalid_connection():
return kombu.Connection('pyamqp://localhost:12345')
@pytest.fixture()
def connection(request):
return get_connection(
hostname=os.environ.get('RABBITMQ_HOST', 'localhost'),
port=os.environ.get('RABBITMQ_5672_TCP', '5672'),
vhost=getattr(
request.config, "slaveinput", {}
).get("slaveid", None),
)
@pytest.fixture()
def failover_connection(request):
return get_failover_connection(
hostname=os.environ.get('RABBITMQ_HOST', 'localhost'),
port=os.environ.get('RABBITMQ_5672_TCP', '5672'),
vhost=getattr(
request.config, "slaveinput", {}
).get("slaveid", None),
)
@pytest.mark.env('py-amqp')
@pytest.mark.flaky(reruns=5, reruns_delay=2)
class test_PyAMQPBasicFunctionality(BasicFunctionality):
pass
@pytest.mark.env('py-amqp')
@pytest.mark.flaky(reruns=5, reruns_delay=2)
class test_PyAMQPBaseExchangeTypes(BaseExchangeTypes):
pass
@pytest.mark.env('py-amqp')
@pytest.mark.flaky(reruns=5, reruns_delay=2)
class test_PyAMQPTimeToLive(BaseTimeToLive):
pass
@pytest.mark.env('py-amqp')
@pytest.mark.flaky(reruns=5, reruns_delay=2)
class test_PyAMQPPriority(BasePriority):
pass
@pytest.mark.env('py-amqp')
@pytest.mark.flaky(reruns=5, reruns_delay=2)
class test_PyAMQPFailover(BaseFailover):
pass
|
revision = "f2383bf08fbc"
down_revision = "c87cb989af04"
import sqlalchemy as sa
from alembic import op
def upgrade():
op.create_index(
"ix_certificates_id_desc",
"certificates",
[sa.text("id DESC")],
unique=True,
postgresql_using="btree",
)
def downgrade():
op.drop_index("ix_certificates_id_desc", table_name="certificates")
|
from typing import Optional
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from .const import (
ATTR_DEFAULT_ENABLED,
ATTR_DEVICE_CLASS,
ATTR_ICON,
ATTR_MEASUREMENT,
ATTR_NAME,
ATTR_SECTION,
ATTR_UNIT_OF_MEASUREMENT,
DOMAIN,
SENSOR_ENTITIES,
)
from .coordinator import ToonDataUpdateCoordinator
from .models import (
ToonBoilerDeviceEntity,
ToonDisplayDeviceEntity,
ToonElectricityMeterDeviceEntity,
ToonEntity,
ToonGasMeterDeviceEntity,
ToonSolarDeviceEntity,
ToonWaterMeterDeviceEntity,
)
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities
) -> None:
"""Set up Toon sensors based on a config entry."""
coordinator = hass.data[DOMAIN][entry.entry_id]
sensors = [
ToonElectricityMeterDeviceSensor(coordinator, key=key)
for key in (
"power_average_daily",
"power_average",
"power_daily_cost",
"power_daily_value",
"power_meter_reading_low",
"power_meter_reading",
"power_value",
"solar_meter_reading_low_produced",
"solar_meter_reading_produced",
)
]
sensors.extend(
[ToonDisplayDeviceSensor(coordinator, key="current_display_temperature")]
)
sensors.extend(
[
ToonGasMeterDeviceSensor(coordinator, key=key)
for key in (
"gas_average_daily",
"gas_average",
"gas_daily_cost",
"gas_daily_usage",
"gas_meter_reading",
"gas_value",
)
]
)
sensors.extend(
[
ToonWaterMeterDeviceSensor(coordinator, key=key)
for key in (
"water_average_daily",
"water_average",
"water_daily_cost",
"water_daily_usage",
"water_meter_reading",
"water_value",
)
]
)
if coordinator.data.agreement.is_toon_solar:
sensors.extend(
[
ToonSolarDeviceSensor(coordinator, key=key)
for key in [
"solar_value",
"solar_maximum",
"solar_produced",
"solar_average_produced",
"power_usage_day_produced_solar",
"power_usage_day_from_grid_usage",
"power_usage_day_to_grid_usage",
"power_usage_current_covered_by_solar",
]
]
)
if coordinator.data.thermostat.have_opentherm_boiler:
sensors.extend(
[
ToonBoilerDeviceSensor(
coordinator, key="thermostat_info_current_modulation_level"
)
]
)
async_add_entities(sensors, True)
class ToonSensor(ToonEntity):
"""Defines a Toon sensor."""
def __init__(self, coordinator: ToonDataUpdateCoordinator, *, key: str) -> None:
"""Initialize the Toon sensor."""
self.key = key
super().__init__(
coordinator,
enabled_default=SENSOR_ENTITIES[key][ATTR_DEFAULT_ENABLED],
icon=SENSOR_ENTITIES[key][ATTR_ICON],
name=SENSOR_ENTITIES[key][ATTR_NAME],
)
@property
def unique_id(self) -> str:
"""Return the unique ID for this sensor."""
agreement_id = self.coordinator.data.agreement.agreement_id
# This unique ID is a bit ugly and contains unneeded information.
# It is here for legacy / backward compatible reasons.
return f"{DOMAIN}_{agreement_id}_sensor_{self.key}"
@property
def state(self) -> Optional[str]:
"""Return the state of the sensor."""
section = getattr(
self.coordinator.data, SENSOR_ENTITIES[self.key][ATTR_SECTION]
)
return getattr(section, SENSOR_ENTITIES[self.key][ATTR_MEASUREMENT])
@property
def unit_of_measurement(self) -> Optional[str]:
"""Return the unit this state is expressed in."""
return SENSOR_ENTITIES[self.key][ATTR_UNIT_OF_MEASUREMENT]
@property
def device_class(self) -> Optional[str]:
"""Return the device class."""
return SENSOR_ENTITIES[self.key][ATTR_DEVICE_CLASS]
class ToonElectricityMeterDeviceSensor(ToonSensor, ToonElectricityMeterDeviceEntity):
"""Defines a Electricity Meter sensor."""
class ToonGasMeterDeviceSensor(ToonSensor, ToonGasMeterDeviceEntity):
"""Defines a Gas Meter sensor."""
class ToonWaterMeterDeviceSensor(ToonSensor, ToonWaterMeterDeviceEntity):
"""Defines a Water Meter sensor."""
class ToonSolarDeviceSensor(ToonSensor, ToonSolarDeviceEntity):
"""Defines a Solar sensor."""
class ToonBoilerDeviceSensor(ToonSensor, ToonBoilerDeviceEntity):
"""Defines a Boiler sensor."""
class ToonDisplayDeviceSensor(ToonSensor, ToonDisplayDeviceEntity):
"""Defines a Display sensor."""
|
import functools
import os
import sys
from collections import defaultdict
import blinker
import natsort
from nikola.plugin_categories import SignalHandler
from nikola import utils, hierarchy_utils
class TaxonomiesClassifier(SignalHandler):
"""Classify posts and pages by taxonomies."""
name = "classify_taxonomies"
def _do_classification(self, site):
# Needed to avoid strange errors during tests
if site is not self.site:
return
# Get list of enabled taxonomy plugins and initialize data structures
taxonomies = site.taxonomy_plugins.values()
site.posts_per_classification = {}
for taxonomy in taxonomies:
site.posts_per_classification[taxonomy.classification_name] = {
lang: defaultdict(set) for lang in site.config['TRANSLATIONS'].keys()
}
# Classify posts
for post in site.timeline:
# Do classify pages, but don’t classify posts that are hidden
# (draft/private/future)
if post.is_post and not post.use_in_feeds:
continue
for taxonomy in taxonomies:
if taxonomy.apply_to_posts if post.is_post else taxonomy.apply_to_pages:
classifications = {}
for lang in site.config['TRANSLATIONS'].keys():
# Extract classifications for this language
classifications[lang] = taxonomy.classify(post, lang)
if not taxonomy.more_than_one_classifications_per_post and len(classifications[lang]) > 1:
raise ValueError("Too many {0} classifications for post {1}".format(taxonomy.classification_name, post.source_path))
# Add post to sets
for classification in classifications[lang]:
while True:
site.posts_per_classification[taxonomy.classification_name][lang][classification].add(post)
if not taxonomy.include_posts_from_subhierarchies or not taxonomy.has_hierarchy:
break
classification_path = taxonomy.extract_hierarchy(classification)
if len(classification_path) <= 1:
if len(classification_path) == 0 or not taxonomy.include_posts_into_hierarchy_root:
break
classification = taxonomy.recombine_classification_from_hierarchy(classification_path[:-1])
# Sort everything.
site.page_count_per_classification = {}
site.hierarchy_per_classification = {}
site.flat_hierarchy_per_classification = {}
site.hierarchy_lookup_per_classification = {}
for taxonomy in taxonomies:
site.page_count_per_classification[taxonomy.classification_name] = {}
# Sort post lists
for lang, posts_per_classification in site.posts_per_classification[taxonomy.classification_name].items():
# Ensure implicit classifications are inserted
for classification in taxonomy.get_implicit_classifications(lang):
if classification not in posts_per_classification:
posts_per_classification[classification] = []
site.page_count_per_classification[taxonomy.classification_name][lang] = {}
# Convert sets to lists and sort them
for classification in list(posts_per_classification.keys()):
posts = list(posts_per_classification[classification])
posts = self.site.sort_posts_chronologically(posts, lang)
taxonomy.sort_posts(posts, classification, lang)
posts_per_classification[classification] = posts
# Create hierarchy information
if taxonomy.has_hierarchy:
site.hierarchy_per_classification[taxonomy.classification_name] = {}
site.flat_hierarchy_per_classification[taxonomy.classification_name] = {}
site.hierarchy_lookup_per_classification[taxonomy.classification_name] = {}
for lang, posts_per_classification in site.posts_per_classification[taxonomy.classification_name].items():
# Compose hierarchy
hierarchy = {}
for classification in posts_per_classification.keys():
hier = taxonomy.extract_hierarchy(classification)
node = hierarchy
for he in hier:
if he not in node:
node[he] = {}
node = node[he]
hierarchy_lookup = {}
def create_hierarchy(hierarchy, parent=None, level=0):
"""Create hierarchy."""
result = {}
for name, children in hierarchy.items():
node = hierarchy_utils.TreeNode(name, parent)
node.children = create_hierarchy(children, node, level + 1)
node.classification_path = [pn.name for pn in node.get_path()]
node.classification_name = taxonomy.recombine_classification_from_hierarchy(node.classification_path)
hierarchy_lookup[node.classification_name] = node
result[node.name] = node
classifications = natsort.natsorted(result.keys(), alg=natsort.ns.F | natsort.ns.IC)
taxonomy.sort_classifications(classifications, lang, level=level)
return [result[classification] for classification in classifications]
root_list = create_hierarchy(hierarchy)
if '' in posts_per_classification:
node = hierarchy_utils.TreeNode('', parent=None)
node.children = root_list
node.classification_path = []
node.classification_name = ''
hierarchy_lookup[node.name] = node
root_list = [node]
flat_hierarchy = hierarchy_utils.flatten_tree_structure(root_list)
# Store result
site.hierarchy_per_classification[taxonomy.classification_name][lang] = root_list
site.flat_hierarchy_per_classification[taxonomy.classification_name][lang] = flat_hierarchy
site.hierarchy_lookup_per_classification[taxonomy.classification_name][lang] = hierarchy_lookup
taxonomy.postprocess_posts_per_classification(site.posts_per_classification[taxonomy.classification_name],
site.flat_hierarchy_per_classification[taxonomy.classification_name],
site.hierarchy_lookup_per_classification[taxonomy.classification_name])
else:
taxonomy.postprocess_posts_per_classification(site.posts_per_classification[taxonomy.classification_name])
# Check for valid paths and for collisions
taxonomy_outputs = {lang: dict() for lang in site.config['TRANSLATIONS'].keys()}
quit = False
for taxonomy in taxonomies:
# Check for collisions (per language)
for lang in site.config['TRANSLATIONS'].keys():
if not taxonomy.is_enabled(lang):
continue
for classification, posts in site.posts_per_classification[taxonomy.classification_name][lang].items():
# Do we actually generate this classification page?
filtered_posts = [x for x in posts if self.site.config["SHOW_UNTRANSLATED_POSTS"] or x.is_translation_available(lang)]
generate_list = taxonomy.should_generate_classification_page(classification, filtered_posts, lang)
if not generate_list:
continue
# Obtain path as tuple
path = site.path_handlers[taxonomy.classification_name](classification, lang)
# Check that path is OK
for path_element in path:
if len(path_element) == 0:
utils.LOGGER.error("{0} {1} yields invalid path '{2}'!".format(taxonomy.classification_name.title(), classification, '/'.join(path)))
quit = True
# Combine path
path = os.path.join(*[os.path.normpath(p) for p in path if p != '.'])
# Determine collisions
if path in taxonomy_outputs[lang]:
other_classification_name, other_classification, other_posts = taxonomy_outputs[lang][path]
if other_classification_name == taxonomy.classification_name and other_classification == classification:
taxonomy_outputs[lang][path][2].extend(filtered_posts)
else:
utils.LOGGER.error('You have classifications that are too similar: {0} "{1}" and {2} "{3}" both result in output path {4} for language {5}.'.format(
taxonomy.classification_name, classification, other_classification_name, other_classification, path, lang))
utils.LOGGER.error('{0} "{1}" is used in: {2}'.format(
taxonomy.classification_name.title(), classification, ', '.join(sorted([p.source_path for p in filtered_posts]))))
utils.LOGGER.error('{0} "{1}" is used in: {2}'.format(
other_classification_name.title(), other_classification, ', '.join(sorted([p.source_path for p in other_posts]))))
quit = True
else:
taxonomy_outputs[lang][path] = (taxonomy.classification_name, classification, list(posts))
if quit:
sys.exit(1)
blinker.signal('taxonomies_classified').send(site)
def _get_filtered_list(self, taxonomy, classification, lang):
"""Return the filtered list of posts for this classification and language."""
post_list = self.site.posts_per_classification[taxonomy.classification_name][lang].get(classification, [])
if self.site.config["SHOW_UNTRANSLATED_POSTS"]:
return post_list
else:
return [x for x in post_list if x.is_translation_available(lang)]
@staticmethod
def _compute_number_of_pages(filtered_posts, posts_count):
"""Given a list of posts and the maximal number of posts per page, computes the number of pages needed."""
return min(1, (len(filtered_posts) + posts_count - 1) // posts_count)
def _postprocess_path(self, path, lang, append_index='auto', dest_type='page', page_info=None, alternative_path=False):
"""Postprocess a generated path.
Takes the path `path` for language `lang`, and postprocesses it.
It appends `site.config['INDEX_FILE']` depending on `append_index`
(which can have the values `'always'`, `'never'` and `'auto'`) and
`site.config['PRETTY_URLS']`.
It also modifies/adds the extension of the last path element resp.
`site.config['INDEX_FILE']` depending on `dest_type`, which can be
`'feed'`, `'rss'` or `'page'`.
If `dest_type` is `'page'`, `page_info` can be `None` or a tuple
of two integers: the page number and the number of pages. This will
be used to append the correct page number by calling
`utils.adjust_name_for_index_path_list` and
`utils.get_displayed_page_number`.
If `alternative_path` is set to `True`, `utils.adjust_name_for_index_path_list`
is called with `force_addition=True`, resulting in an alternative path for the
first page of an index or Atom feed by including the page number into the path.
"""
# Forcing extension for Atom feeds and RSS feeds
force_extension = None
if dest_type == 'feed':
force_extension = self.site.config['ATOM_EXTENSION']
elif dest_type == 'rss':
force_extension = self.site.config['RSS_EXTENSION']
# Determine how to extend path
path = [_f for _f in path if _f]
if force_extension is not None:
if len(path) == 0 and dest_type == 'rss':
path = [self.site.config['RSS_FILENAME_BASE'](lang)]
elif len(path) == 0 and dest_type == 'feed':
path = [self.site.config['ATOM_FILENAME_BASE'](lang)]
elif len(path) == 0 or append_index == 'always':
path = path + [os.path.splitext(self.site.config['INDEX_FILE'])[0]]
elif len(path) > 0 and append_index == 'never':
path[-1] = os.path.splitext(path[-1])[0]
path[-1] += force_extension
elif (self.site.config['PRETTY_URLS'] and append_index != 'never') or len(path) == 0 or append_index == 'always':
path = path + [self.site.config['INDEX_FILE']]
elif append_index != 'never':
path[-1] += '.html'
# Create path
result = [_f for _f in [self.site.config['TRANSLATIONS'][lang]] + path if _f]
if page_info is not None and dest_type in ('page', 'feed'):
result = utils.adjust_name_for_index_path_list(result,
page_info[0],
utils.get_displayed_page_number(page_info[0], page_info[1], self.site),
lang,
self.site, force_addition=alternative_path, extension=force_extension)
return result
@staticmethod
def _parse_path_result(result):
"""Interpret the return values of taxonomy.get_path() and taxonomy.get_overview_path() as if all three return values were given."""
if not isinstance(result[0], (list, tuple)):
# The result must be a list or tuple of strings. Wrap into a tuple
result = (result, )
path = result[0]
append_index = result[1] if len(result) > 1 else 'auto'
page_info = result[2] if len(result) > 2 else None
return path, append_index, page_info
def _taxonomy_index_path(self, name, lang, taxonomy):
"""Return path to the classification overview."""
result = taxonomy.get_overview_path(lang)
path, append_index, _ = self._parse_path_result(result)
return self._postprocess_path(path, lang, append_index=append_index, dest_type='list')
def _taxonomy_path(self, name, lang, taxonomy, dest_type='page', page=None, alternative_path=False):
"""Return path to a classification."""
if taxonomy.has_hierarchy:
result = taxonomy.get_path(taxonomy.extract_hierarchy(name), lang, dest_type=dest_type)
else:
result = taxonomy.get_path(name, lang, dest_type=dest_type)
path, append_index, page_ = self._parse_path_result(result)
if page is not None:
page = int(page)
else:
page = page_
page_info = None
if taxonomy.show_list_as_index and page is not None:
number_of_pages = self.site.page_count_per_classification[taxonomy.classification_name][lang].get(name)
if number_of_pages is None:
number_of_pages = self._compute_number_of_pages(self._get_filtered_list(taxonomy, name, lang), self.site.config['INDEX_DISPLAY_POST_COUNT'])
self.site.page_count_per_classification[taxonomy.classification_name][lang][name] = number_of_pages
page_info = (page, number_of_pages)
return self._postprocess_path(path, lang, append_index=append_index, dest_type=dest_type, page_info=page_info)
def _taxonomy_atom_path(self, name, lang, taxonomy, page=None, alternative_path=False):
"""Return path to a classification Atom feed."""
return self._taxonomy_path(name, lang, taxonomy, dest_type='feed', page=page, alternative_path=alternative_path)
def _taxonomy_rss_path(self, name, lang, taxonomy):
"""Return path to a classification RSS feed."""
return self._taxonomy_path(name, lang, taxonomy, dest_type='rss')
def _register_path_handlers(self, taxonomy):
functions = (
('{0}_index', self._taxonomy_index_path),
('{0}', self._taxonomy_path),
('{0}_atom', self._taxonomy_atom_path),
('{0}_rss', self._taxonomy_rss_path),
)
for name, function in functions:
name = name.format(taxonomy.classification_name)
p = functools.partial(function, taxonomy=taxonomy)
doc = taxonomy.path_handler_docstrings[name]
if doc is not False:
p.__doc__ = doc
self.site.register_path_handler(name, p)
def set_site(self, site):
"""Set site, which is a Nikola instance."""
super().set_site(site)
# Add hook for after post scanning
blinker.signal("scanned").connect(self._do_classification)
# Register path handlers
for taxonomy in site.taxonomy_plugins.values():
self._register_path_handlers(taxonomy)
|
import numpy as np
from tensornetwork.block_sparse.index import (Index, fuse_index_pair,
fuse_indices)
from tensornetwork.block_sparse.charge import (U1Charge, BaseCharge,
fuse_charges)
import pytest
def test_index():
D = 10
B = 4
dtype = np.int16
np.random.seed(10)
q1 = U1Charge(np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype))
q2 = U1Charge(np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype))
i = Index(charges=[q1, q2], flow=[False, True])
assert len(i) == D**2
assert i.dim == D**2
def test_index_eq():
q1 = U1Charge(np.array([-1, -2, 0, 8, 7]))
q2 = U1Charge(np.array([-1, -2, 0, 8, 7]))
q3 = U1Charge(np.array([-1, 0, 8, 7]))
i1 = Index(charges=q1, flow=False)
i2 = Index(charges=q2, flow=False)
i3 = Index(charges=q3, flow=False)
i4 = Index(charges=[q1, q2], flow=[False, True])
i5 = Index(charges=[q1, q2], flow=[False, True])
i6 = Index(charges=[q1, q2], flow=[False, False])
assert i1 == i2
assert i1 != i3
assert i1 != i4
assert i4 == i5
assert i5 != i6
def test_index_flip_flow():
D = 10
B = 4
dtype = np.int16
np.random.seed(10)
q1 = U1Charge(np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype))
q2 = U1Charge(np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype))
i = Index(charges=[q1, q2], flow=[False, True])
np.testing.assert_allclose(i.flip_flow().flow, [True, False])
def test_index_charges():
D = 10
B = 4
dtype = np.int16
np.random.seed(10)
q1 = U1Charge(np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype))
q2 = U1Charge(np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype))
i = Index(charges=[q1, q2], flow=[False, True])
fused = fuse_charges([q1, q2], [False, True])
np.testing.assert_allclose(i.charges.charges, fused.charges)
def test_index_fusion_mul():
D = 10
B = 4
dtype = np.int16
np.random.seed(10)
q1 = U1Charge(np.random.randint(-B // 2, B // 2 + 1,
D).astype(dtype)) #quantum numbers on leg 1
q2 = U1Charge(np.random.randint(-B // 2, B // 2 + 1,
D).astype(dtype)) #quantum numbers on leg 1
charges = [q1, q2]
i1 = Index(charges=q1, flow=False) #index on leg 1
i2 = Index(charges=q2, flow=False) #index on leg 2
i12 = i1 * i2
for n in range(i12.charges.charges.shape[1]):
assert np.all(i12._charges[n].charges == charges[n].charges)
assert np.all(i12.charges.charges == (q1 + q2).charges)
def test_fuse_indices():
D = 10
B = 4
dtype = np.int16
np.random.seed(10)
q1 = U1Charge(np.random.randint(-B // 2, B // 2 + 1,
D).astype(dtype)) #quantum numbers on leg 1
q2 = U1Charge(np.random.randint(-B // 2, B // 2 + 1,
D).astype(dtype)) #quantum numbers on leg 1
charges = [q1, q2]
i1 = Index(charges=q1, flow=False) #index on leg 1
i2 = Index(charges=q2, flow=False) #index on leg 2
i12 = fuse_indices([i1, i2])
for n in range(i12.charges.charges.shape[1]):
assert np.all(i12._charges[n].charges == charges[n].charges)
assert np.all(i12.charges.charges == (q1 + q2).charges)
def test_index_copy():
D = 10
B = 4
dtype = np.int16
np.random.seed(10)
q1 = U1Charge(np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype))
q2 = U1Charge(np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype))
i = Index(charges=[q1, q2], flow=[False, True])
icopy = i.copy()
assert not np.any([a is b for a, b in zip(i._charges, icopy._charges)])
assert i.flow is not icopy.flow
def test_index_copy_2():
D = 10
B = 4
dtype = np.int16
np.random.seed(10)
q1 = U1Charge(np.random.randint(-B // 2, B // 2 + 1,
D).astype(dtype)) #quantum numbers on leg 1
q2 = U1Charge(np.random.randint(-B // 2, B // 2 + 1,
D).astype(dtype)) #quantum numbers on leg 1
i1 = Index(charges=q1, flow=False)
i2 = Index(charges=q2, flow=False)
i3 = Index(charges=q1, flow=True)
i4 = Index(charges=q2, flow=True)
i12 = i1 * i2
i34 = i3 * i4
i1234 = i12 * i34
i1234_copy = i1234.copy()
flat1234 = i1234_copy.flat_charges
assert flat1234[0] is not i1.flat_charges[0]
assert flat1234[1] is not i2.flat_charges[0]
assert flat1234[2] is not i3.flat_charges[0]
assert flat1234[3] is not i4.flat_charges[0]
def test_index_raises():
D = 10
B = 4
dtype = np.int16
np.random.seed(10)
q1 = U1Charge(np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype))
q2 = U1Charge(np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype))
with pytest.raises(TypeError):
Index(charges=[q1, q2], flow=[2, True])
def test_repr():
D = 10
B = 4
dtype = np.int16
np.random.seed(10)
q1 = U1Charge(np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype))
q2 = U1Charge(np.random.randint(-B // 2, B // 2 + 1, D).astype(dtype))
index = Index(charges=[q1, q2], flow=[False, True])
dense_shape = f"Dimension: {str(index.dim)} \n"
charge_str = str(index._charges).replace('\n,', ',\n')
charge_str = charge_str.replace('\n', '\n ')
charges = f"Charges: {charge_str} \n"
flow_info = f"Flows: {str(index.flow)} \n"
res = f"Index:\n {dense_shape} {charges} {flow_info} "
assert res == index.__repr__()
|
from __future__ import division
import itertools
import numpy as np
import chainer
from chainercv import utils
class MultiboxCoder(object):
"""A helper class to encode/decode bounding boxes.
This class encodes :obj:`(bbox, label)` to :obj:`(mb_loc, mb_label)`
and decodes :obj:`(mb_loc, mb_conf)` to :obj:`(bbox, label, score)`.
These encoding/decoding are used in Single Shot Multibox Detector [#]_.
* :obj:`mb_loc`: An array representing offsets and scales \
from the default bounding boxes. \
Its shape is :math:`(K, 4)`, where :math:`K` is the number of \
the default bounding boxes. \
The second axis is composed by \
:math:`(\Delta y, \Delta x, \Delta h, \Delta w)`. \
These values are computed by the following formulas.
* :math:`\Delta y = (b_y - m_y) / (m_h * v_0)`
* :math:`\Delta x = (b_x - m_x) / (m_w * v_0)`
* :math:`\Delta h = log(b_h / m_h) / v_1`
* :math:`\Delta w = log(b_w / m_w) / v_1`
:math:`(m_y, m_x)` and :math:`(m_h, m_w)` are \
center coodinates and size of a default bounding box. \
:math:`(b_y, b_x)` and :math:`(b_h, b_w)` are \
center coodinates and size of \
a given bounding boxes that is assined to the default bounding box. \
:math:`(v_0, v_1)` are coefficients that can be set \
by argument :obj:`variance`.
* :obj:`mb_label`: An array representing classes of \
ground truth bounding boxes. Its shape is :math:`(K,)`.
* :obj:`mb_conf`: An array representing classes of \
predicted bounding boxes. Its shape is :math:`(K, n\_fg\_class + 1)`.
.. [#] Wei Liu, Dragomir Anguelov, Dumitru Erhan, Christian Szegedy,
Scott Reed, Cheng-Yang Fu, Alexander C. Berg.
SSD: Single Shot MultiBox Detector. ECCV 2016.
Args:
grids (iterable of ints): An iterable of integers.
Each integer indicates the size of a feature map.
aspect_ratios (iterable of tuples of ints):
An iterable of tuples of integers
used to compute the default bounding boxes.
Each tuple indicates the aspect ratios of
the default bounding boxes at each feature maps.
The length of this iterable should be :obj:`len(grids)`.
steps (iterable of floats): The step size for each feature map.
The length of this iterable should be :obj:`len(grids)`.
sizes (iterable of floats): The base size of default bounding boxes
for each feature map.
The length of this iterable should be :obj:`len(grids) + 1`.
variance (tuple of floats): Two coefficients for encoding/decoding
the locations of bounding boxes. The first value is used to
encode/decode coordinates of the centers.
The second value is used to encode/decode the sizes of
bounding boxes.
"""
def __init__(self, grids, aspect_ratios, steps, sizes, variance):
if not len(aspect_ratios) == len(grids):
raise ValueError('The length of aspect_ratios is wrong.')
if not len(steps) == len(grids):
raise ValueError('The length of steps is wrong.')
if not len(sizes) == len(grids) + 1:
raise ValueError('The length of sizes is wrong.')
default_bbox = []
for k, grid in enumerate(grids):
for v, u in itertools.product(range(grid), repeat=2):
cy = (v + 0.5) * steps[k]
cx = (u + 0.5) * steps[k]
s = sizes[k]
default_bbox.append((cy, cx, s, s))
s = np.sqrt(sizes[k] * sizes[k + 1])
default_bbox.append((cy, cx, s, s))
s = sizes[k]
for ar in aspect_ratios[k]:
default_bbox.append(
(cy, cx, s / np.sqrt(ar), s * np.sqrt(ar)))
default_bbox.append(
(cy, cx, s * np.sqrt(ar), s / np.sqrt(ar)))
# (center_y, center_x, height, width)
self._default_bbox = np.stack(default_bbox)
self._variance = variance
@property
def xp(self):
return chainer.backends.cuda.get_array_module(self._default_bbox)
def to_cpu(self):
self._default_bbox = chainer.backends.cuda.to_cpu(self._default_bbox)
def to_gpu(self, device=None):
self._default_bbox = chainer.backends.cuda.to_gpu(
self._default_bbox, device=device)
def encode(self, bbox, label, iou_thresh=0.5):
"""Encodes coordinates and classes of bounding boxes.
This method encodes :obj:`bbox` and :obj:`label` to :obj:`mb_loc`
and :obj:`mb_label`, which are used to compute multibox loss.
Args:
bbox (array): A float array of shape :math:`(R, 4)`,
where :math:`R` is the number of bounding boxes in an image.
Each bounding box is organized by
:math:`(y_{min}, x_{min}, y_{max}, x_{max})`
in the second axis.
label (array) : An integer array of shape :math:`(R,)`.
Each value indicates the class of the bounding box.
iou_thresh (float): The threshold value to determine
a default bounding box is assigned to a ground truth
or not. The default value is :obj:`0.5`.
Returns:
tuple of two arrays:
This method returns a tuple of two arrays,
:obj:`(mb_loc, mb_label)`.
* **mb_loc**: A float array of shape :math:`(K, 4)`, \
where :math:`K` is the number of default bounding boxes.
* **mb_label**: An integer array of shape :math:`(K,)`.
"""
xp = self.xp
if len(bbox) == 0:
return (
xp.zeros(self._default_bbox.shape, dtype=np.float32),
xp.zeros(self._default_bbox.shape[0], dtype=np.int32))
iou = utils.bbox_iou(
xp.hstack((
self._default_bbox[:, :2] - self._default_bbox[:, 2:] / 2,
self._default_bbox[:, :2] + self._default_bbox[:, 2:] / 2)),
bbox)
index = xp.empty(len(self._default_bbox), dtype=int)
# -1 is for background
index[:] = -1
masked_iou = iou.copy()
while True:
i, j = xp.unravel_index(masked_iou.argmax(), masked_iou.shape)
if masked_iou[i, j] <= 1e-6:
break
index[i] = j
masked_iou[i, :] = 0
masked_iou[:, j] = 0
mask = xp.logical_and(index < 0, iou.max(axis=1) >= iou_thresh)
index[mask] = iou[mask].argmax(axis=1)
mb_bbox = bbox[index].copy()
# (y_min, x_min, y_max, x_max) -> (y_min, x_min, height, width)
mb_bbox[:, 2:] -= mb_bbox[:, :2]
# (y_min, x_min, height, width) -> (center_y, center_x, height, width)
mb_bbox[:, :2] += mb_bbox[:, 2:] / 2
mb_loc = xp.empty_like(mb_bbox)
mb_loc[:, :2] = (mb_bbox[:, :2] - self._default_bbox[:, :2]) / \
(self._variance[0] * self._default_bbox[:, 2:])
mb_loc[:, 2:] = xp.log(mb_bbox[:, 2:] / self._default_bbox[:, 2:]) / \
self._variance[1]
# [0, n_fg_class - 1] -> [1, n_fg_class]
mb_label = label[index] + 1
# 0 is for background
mb_label[index < 0] = 0
return mb_loc.astype(np.float32), mb_label.astype(np.int32)
def decode(self, mb_loc, mb_conf, nms_thresh=0.45, score_thresh=0.6):
"""Decodes back to coordinates and classes of bounding boxes.
This method decodes :obj:`mb_loc` and :obj:`mb_conf` returned
by a SSD network back to :obj:`bbox`, :obj:`label` and :obj:`score`.
Args:
mb_loc (array): A float array whose shape is
:math:`(K, 4)`, :math:`K` is the number of
default bounding boxes.
mb_conf (array): A float array whose shape is
:math:`(K, n\_fg\_class + 1)`.
nms_thresh (float): The threshold value
for :func:`~chainercv.utils.non_maximum_suppression`.
The default value is :obj:`0.45`.
score_thresh (float): The threshold value for confidence score.
If a bounding box whose confidence score is lower than
this value, the bounding box will be suppressed.
The default value is :obj:`0.6`.
Returns:
tuple of three arrays:
This method returns a tuple of three arrays,
:obj:`(bbox, label, score)`.
* **bbox**: A float array of shape :math:`(R, 4)`, \
where :math:`R` is the number of bounding boxes in a image. \
Each bounding box is organized by \
:math:`(y_{min}, x_{min}, y_{max}, x_{max})` \
in the second axis.
* **label** : An integer array of shape :math:`(R,)`. \
Each value indicates the class of the bounding box.
* **score** : A float array of shape :math:`(R,)`. \
Each value indicates how confident the prediction is.
"""
xp = self.xp
# (center_y, center_x, height, width)
mb_bbox = self._default_bbox.copy()
mb_bbox[:, :2] += mb_loc[:, :2] * self._variance[0] \
* self._default_bbox[:, 2:]
mb_bbox[:, 2:] *= xp.exp(mb_loc[:, 2:] * self._variance[1])
# (center_y, center_x, height, width) -> (y_min, x_min, height, width)
mb_bbox[:, :2] -= mb_bbox[:, 2:] / 2
# (center_y, center_x, height, width) -> (y_min, x_min, y_max, x_max)
mb_bbox[:, 2:] += mb_bbox[:, :2]
# softmax
mb_score = xp.exp(mb_conf)
mb_score /= mb_score.sum(axis=1, keepdims=True)
bbox = []
label = []
score = []
for l in range(mb_conf.shape[1] - 1):
bbox_l = mb_bbox
# the l-th class corresponds for the (l + 1)-th column.
score_l = mb_score[:, l + 1]
mask = score_l >= score_thresh
bbox_l = bbox_l[mask]
score_l = score_l[mask]
if nms_thresh is not None:
indices = utils.non_maximum_suppression(
bbox_l, nms_thresh, score_l)
bbox_l = bbox_l[indices]
score_l = score_l[indices]
bbox.append(bbox_l)
label.append(xp.array((l,) * len(bbox_l)))
score.append(score_l)
bbox = xp.vstack(bbox).astype(np.float32)
label = xp.hstack(label).astype(np.int32)
score = xp.hstack(score).astype(np.float32)
return bbox, label, score
|
__docformat__ = "restructuredtext en"
import sys
from warnings import warn
from logilab.common.changelog import Version
class DeprecationWrapper(object):
"""proxy to print a warning on access to any attribute of the wrapped object
"""
def __init__(self, proxied, msg=None):
self._proxied = proxied
self._msg = msg
def __getattr__(self, attr):
warn(self._msg, DeprecationWarning, stacklevel=2)
return getattr(self._proxied, attr)
def __setattr__(self, attr, value):
if attr in ('_proxied', '_msg'):
self.__dict__[attr] = value
else:
warn(self._msg, DeprecationWarning, stacklevel=2)
setattr(self._proxied, attr, value)
class DeprecationManager(object):
"""Manage the deprecation message handling. Messages are dropped for
versions more recent than the 'compatible' version. Example::
deprecator = deprecation.DeprecationManager("module_name")
deprecator.compatibility('1.3')
deprecator.warn('1.2', "message.")
@deprecator.deprecated('1.2', 'Message')
def any_func():
pass
class AnyClass(object):
__metaclass__ = deprecator.class_deprecated('1.2')
"""
def __init__(self, module_name=None):
"""
"""
self.module_name = module_name
self.compatible_version = None
def compatibility(self, compatible_version):
"""Set the compatible version.
"""
self.compatible_version = Version(compatible_version)
def deprecated(self, version=None, reason=None, stacklevel=2, name=None, doc=None):
"""Display a deprecation message only if the version is older than the
compatible version.
"""
def decorator(func):
message = reason or 'The function "%s" is deprecated'
if '%s' in message:
message %= func.__name__
def wrapped(*args, **kwargs):
self.warn(version, message, stacklevel+1)
return func(*args, **kwargs)
return wrapped
return decorator
def class_deprecated(self, version=None):
class metaclass(type):
"""metaclass to print a warning on instantiation of a deprecated class"""
def __call__(cls, *args, **kwargs):
msg = getattr(cls, "__deprecation_warning__",
"%(cls)s is deprecated") % {'cls': cls.__name__}
self.warn(version, msg, stacklevel=3)
return type.__call__(cls, *args, **kwargs)
return metaclass
def moved(self, version, modpath, objname):
"""use to tell that a callable has been moved to a new module.
It returns a callable wrapper, so that when its called a warning is printed
telling where the object can be found, import is done (and not before) and
the actual object is called.
NOTE: the usage is somewhat limited on classes since it will fail if the
wrapper is use in a class ancestors list, use the `class_moved` function
instead (which has no lazy import feature though).
"""
def callnew(*args, **kwargs):
from logilab.common.modutils import load_module_from_name
message = "object %s has been moved to module %s" % (objname, modpath)
self.warn(version, message)
m = load_module_from_name(modpath)
return getattr(m, objname)(*args, **kwargs)
return callnew
def class_renamed(self, version, old_name, new_class, message=None):
clsdict = {}
if message is None:
message = '%s is deprecated, use %s' % (old_name, new_class.__name__)
clsdict['__deprecation_warning__'] = message
try:
# new-style class
return self.class_deprecated(version)(old_name, (new_class,), clsdict)
except (NameError, TypeError):
# old-style class
warn = self.warn
class DeprecatedClass(new_class):
"""FIXME: There might be a better way to handle old/new-style class
"""
def __init__(self, *args, **kwargs):
warn(version, message, stacklevel=3)
new_class.__init__(self, *args, **kwargs)
return DeprecatedClass
def class_moved(self, version, new_class, old_name=None, message=None):
"""nice wrapper around class_renamed when a class has been moved into
another module
"""
if old_name is None:
old_name = new_class.__name__
if message is None:
message = 'class %s is now available as %s.%s' % (
old_name, new_class.__module__, new_class.__name__)
return self.class_renamed(version, old_name, new_class, message)
def warn(self, version=None, reason="", stacklevel=2):
"""Display a deprecation message only if the version is older than the
compatible version.
"""
if (self.compatible_version is None
or version is None
or Version(version) < self.compatible_version):
if self.module_name and version:
reason = '[%s %s] %s' % (self.module_name, version, reason)
elif self.module_name:
reason = '[%s] %s' % (self.module_name, reason)
elif version:
reason = '[%s] %s' % (version, reason)
warn(reason, DeprecationWarning, stacklevel=stacklevel)
_defaultdeprecator = DeprecationManager()
def deprecated(reason=None, stacklevel=2, name=None, doc=None):
return _defaultdeprecator.deprecated(None, reason, stacklevel, name, doc)
class_deprecated = _defaultdeprecator.class_deprecated()
def moved(modpath, objname):
return _defaultdeprecator.moved(None, modpath, objname)
moved.__doc__ = _defaultdeprecator.moved.__doc__
def class_renamed(old_name, new_class, message=None):
"""automatically creates a class which fires a DeprecationWarning
when instantiated.
>>> Set = class_renamed('Set', set, 'Set is now replaced by set')
>>> s = Set()
sample.py:57: DeprecationWarning: Set is now replaced by set
s = Set()
>>>
"""
return _defaultdeprecator.class_renamed(None, old_name, new_class, message)
def class_moved(new_class, old_name=None, message=None):
return _defaultdeprecator.class_moved(None, new_class, old_name, message)
class_moved.__doc__ = _defaultdeprecator.class_moved.__doc__
|
import json
import logging
import subprocess
import sys
import time
from absl import flags
FLAGS = flags.FLAGS
flags.DEFINE_integer('num_streams', 1, 'Number of netperf processes to run')
flags.DEFINE_string('netperf_cmd', None,
'netperf command to run')
flags.DEFINE_integer('port_start', None,
'Starting port for netperf command and data ports')
def Main():
# Parse command-line flags
try:
FLAGS(sys.argv)
except flags.Error as e:
logging.error('%s\nUsage: %s ARGS\n%s', e, sys.argv[0], FLAGS)
sys.exit(1)
netperf_cmd = FLAGS.netperf_cmd
num_streams = FLAGS.num_streams
port_start = FLAGS.port_start
assert netperf_cmd
assert num_streams >= 1
assert port_start
stdouts = [None] * num_streams
stderrs = [None] * num_streams
return_codes = [None] * num_streams
processes = [None] * num_streams
# Start all of the netperf processes
begin_starting_processes = time.time()
for i in range(num_streams):
command_port = port_start + i * 2
data_port = port_start + i * 2 + 1
cmd = netperf_cmd.format(command_port=command_port, data_port=data_port)
processes[i] = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
universal_newlines=True)
end_starting_processes = time.time()
# Wait for all of the netperf processes to finish and save their return codes
for i, process in enumerate(processes):
stdouts[i], stderrs[i] = process.communicate()
return_codes[i] = process.returncode
# Dump the stdouts, stderrs, and return_codes to stdout in json form
print(json.dumps((stdouts, stderrs, return_codes,
begin_starting_processes, end_starting_processes)))
if __name__ == '__main__':
sys.exit(Main())
|
import argparse
import matplotlib.pyplot as plt
import chainer
from chainercv.datasets import voc_bbox_label_names
from chainercv.experimental.links import YOLOv2Tiny
from chainercv.links import YOLOv2
from chainercv.links import YOLOv3
from chainercv import utils
from chainercv.visualizations import vis_bbox
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--model', choices=('yolo_v2', 'yolo_v2_tiny', 'yolo_v3'),
default='yolo_v2')
parser.add_argument('--gpu', type=int, default=-1)
parser.add_argument('--pretrained-model')
parser.add_argument(
'--dataset', choices=('voc',), default='voc')
parser.add_argument('image')
args = parser.parse_args()
if args.model == 'yolo_v2':
cls = YOLOv2
elif args.model == 'yolo_v2_tiny':
cls = YOLOv2Tiny
elif args.model == 'yolo_v3':
cls = YOLOv3
if args.dataset == 'voc':
if args.pretrained_model is None:
args.pretrained_model = 'voc0712'
label_names = voc_bbox_label_names
model = cls(n_fg_class=len(label_names),
pretrained_model=args.pretrained_model)
if args.gpu >= 0:
chainer.cuda.get_device_from_id(args.gpu).use()
model.to_gpu()
img = utils.read_image(args.image, color=True)
bboxes, labels, scores = model.predict([img])
bbox, label, score = bboxes[0], labels[0], scores[0]
vis_bbox(
img, bbox, label, score, label_names=label_names)
plt.show()
if __name__ == '__main__':
main()
|
import numpy as np
import pytest
from hypertools.tools.cluster import cluster
from hypertools.plot.plot import plot
cluster1 = np.random.multivariate_normal(np.zeros(3), np.eye(3), size=100)
cluster2 = np.random.multivariate_normal(np.zeros(3)+100, np.eye(3), size=100)
data = np.vstack([cluster1, cluster2])
labels = cluster(data, n_clusters=2)
def test_cluster_n_clusters():
assert len(set(labels))==2
def test_cluster_returns_list():
assert type(labels) is list
def test_cluster_hdbscan():
try:
from hdbscan import HDBSCAN
_has_hdbscan = True
except:
_has_hdbscan = False
if _has_hdbscan:
hdbscan_labels = cluster(data, cluster='HDBSCAN')
assert len(set(hdbscan_labels)) == 2
else:
with pytest.raises(ImportError):
hdbscan_labels = cluster(data, cluster='HDBSCAN')
|
import datetime
from homeassistant.components.zwave import binary_sensor, const
from tests.async_mock import patch
from tests.mock.zwave import MockEntityValues, MockNode, MockValue, value_changed
def test_get_device_detects_none(mock_openzwave):
"""Test device is not returned."""
node = MockNode()
value = MockValue(data=False, node=node)
values = MockEntityValues(primary=value)
device = binary_sensor.get_device(node=node, values=values, node_config={})
assert device is None
def test_get_device_detects_trigger_sensor(mock_openzwave):
"""Test device is a trigger sensor."""
node = MockNode(manufacturer_id="013c", product_type="0002", product_id="0002")
value = MockValue(data=False, node=node)
values = MockEntityValues(primary=value)
device = binary_sensor.get_device(node=node, values=values, node_config={})
assert isinstance(device, binary_sensor.ZWaveTriggerSensor)
assert device.device_class == "motion"
def test_get_device_detects_workaround_sensor(mock_openzwave):
"""Test that workaround returns a binary sensor."""
node = MockNode(manufacturer_id="010f", product_type="0b00")
value = MockValue(
data=False, node=node, command_class=const.COMMAND_CLASS_SENSOR_ALARM
)
values = MockEntityValues(primary=value)
device = binary_sensor.get_device(node=node, values=values, node_config={})
assert isinstance(device, binary_sensor.ZWaveBinarySensor)
def test_get_device_detects_sensor(mock_openzwave):
"""Test that device returns a binary sensor."""
node = MockNode()
value = MockValue(
data=False, node=node, command_class=const.COMMAND_CLASS_SENSOR_BINARY
)
values = MockEntityValues(primary=value)
device = binary_sensor.get_device(node=node, values=values, node_config={})
assert isinstance(device, binary_sensor.ZWaveBinarySensor)
def test_binary_sensor_value_changed(mock_openzwave):
"""Test value changed for binary sensor."""
node = MockNode()
value = MockValue(
data=False, node=node, command_class=const.COMMAND_CLASS_SENSOR_BINARY
)
values = MockEntityValues(primary=value)
device = binary_sensor.get_device(node=node, values=values, node_config={})
assert not device.is_on
value.data = True
value_changed(value)
assert device.is_on
async def test_trigger_sensor_value_changed(hass, mock_openzwave):
"""Test value changed for trigger sensor."""
node = MockNode(manufacturer_id="013c", product_type="0002", product_id="0002")
value = MockValue(data=False, node=node)
value_off_delay = MockValue(data=15, node=node)
values = MockEntityValues(primary=value, off_delay=value_off_delay)
device = binary_sensor.get_device(node=node, values=values, node_config={})
assert not device.is_on
value.data = True
await hass.async_add_executor_job(value_changed, value)
assert device.invalidate_after is None
device.hass = hass
value.data = True
await hass.async_add_executor_job(value_changed, value)
assert device.is_on
test_time = device.invalidate_after - datetime.timedelta(seconds=1)
with patch("homeassistant.util.dt.utcnow", return_value=test_time):
assert device.is_on
test_time = device.invalidate_after
with patch("homeassistant.util.dt.utcnow", return_value=test_time):
assert not device.is_on
|
import os
from absl import flags
from perfkitbenchmarker import configs
from perfkitbenchmarker import flag_util
from perfkitbenchmarker import hpc_util
from perfkitbenchmarker import sample
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.linux_packages import cuda_toolkit
from perfkitbenchmarker.linux_packages import nvidia_driver
from perfkitbenchmarker.linux_packages import shoc_benchmark_suite
flags.DEFINE_integer(
'stencil2d_iterations', 5, 'number of iterations to run', lower_bound=1)
flag_util.DEFINE_integerlist(
'stencil2d_problem_sizes',
flag_util.IntegerList([4096]),
'problem sizes to run. Can specify a single '
'number, like --stencil2d_problem_sizes=4096 '
'or a list like --stencil2d_problem_sizes='
'1024,4096',
on_nonincreasing=flag_util.IntegerListParser.WARN, module_name=__name__)
FLAGS = flags.FLAGS
MACHINEFILE = 'machinefile'
BENCHMARK_NAME = 'stencil2d'
BENCHMARK_VERSION = '0.25'
BENCHMARK_CONFIG = """
stencil2d:
description: Runs Stencil2D from SHOC Benchmark Suite.\
Specify the number of VMs with --num_vms
vm_groups:
default:
vm_spec:
GCP:
image: ubuntu-1604-xenial-v20170330
image_project: ubuntu-os-cloud
machine_type: n1-standard-4
gpu_type: k80
gpu_count: 1
zone: us-east1-d
boot_disk_size: 200
AWS:
image: ami-d15a75c7
machine_type: p2.xlarge
zone: us-east-1
boot_disk_size: 200
Azure:
image: Canonical:UbuntuServer:16.04.0-LTS:latest
machine_type: Standard_NC6
zone: eastus
vm_count: null
"""
def GetConfig(user_config):
config = configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
return config
def CheckPrerequisites(benchmark_config):
"""Verifies that the required resources are present.
Raises:
perfkitbenchmarker.data.ResourceNotFound: On missing resource.
"""
pass
def _InstallAndAuthenticateVm(vm):
"""Install SHOC, ensure correct GPU state, and authenticate the VM for ssh.
Args:
vm: vm to operate on.
"""
vm.Install('shoc_benchmark_suite')
nvidia_driver.SetAndConfirmGpuClocks(vm)
vm.AuthenticateVm() # Configure ssh between vms for MPI
def Prepare(benchmark_spec):
"""Install SHOC and push the machinefile.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
vm_util.RunThreaded(_InstallAndAuthenticateVm, benchmark_spec.vms)
master_vm = benchmark_spec.vms[0]
benchmark_spec.num_gpus = nvidia_driver.QueryNumberOfGpus(master_vm)
hpc_util.CreateMachineFile(benchmark_spec.vms,
lambda _: benchmark_spec.num_gpus,
MACHINEFILE)
def _CreateMedianStencilOutputSample(stencil2d_output, sample_name, pretty_name,
metadata):
"""Extract the specified sample from the stencil2d output.
Args:
stencil2d_output: output from a Stencil2D run
sample_name: the name of the sample, as it appears in stencil2d_output
pretty_name: the name to use for the created sample
metadata: metadata to add the sample
Returns:
A sample.Sample with the name, pretty_name, containing the value and units
as parsed from stencil2d_output, along with metadata.
"""
results = [
x for x in stencil2d_output.splitlines() if x.find(sample_name) != -1
][0].split()
units = results[2]
value = float(results[3])
return sample.Sample(pretty_name, value, units, metadata)
def _MakeSamplesFromStencilOutput(stdout, metadata):
"""Make and return a list of samples, parsed from the Stencil2D output.
Args:
stdout: Stencil2D output
metadata: metadata to append to the returned samples
Returns:
A list of sample.Samples
"""
results = [
_CreateMedianStencilOutputSample(stdout, 'DP_Sten2D(median)',
'Stencil2D DP median', metadata),
_CreateMedianStencilOutputSample(stdout, 'DP_Sten2D(stddev)',
'Stencil2D DP stddev', metadata),
_CreateMedianStencilOutputSample(stdout, 'SP_Sten2D(median)',
'Stencil2D SP median', metadata),
_CreateMedianStencilOutputSample(stdout, 'SP_Sten2D(stddev)',
'Stencil2D SP stddev',
metadata) # pyformat: disable
]
return results
def _RunSingleIteration(master_vm, problem_size, num_processes, num_iterations,
metadata):
"""Run a single iteration of Stencil2D and return a list of samples.
Args:
master_vm: master VM which will start the MPI job
problem_size: a single dimension of the Stencil2D problem_size
num_processes: total number of MPI processes to launch (number of GPUs)
num_iterations: number of Stencil2D iterations to run
metadata: metadata to append to returned samples
Returns:
A list of sample.Samples
"""
stencil2d_path = os.path.join(shoc_benchmark_suite.SHOC_BIN_DIR, 'TP', 'CUDA',
'Stencil2D')
current_problem_size = '%s,%s' % (problem_size, problem_size)
run_as_root = '--allow-run-as-root' if FLAGS.mpirun_allow_run_as_root else ''
run_command = ('mpirun --hostfile %s -np %s %s %s --customSize %s -n %s' %
(MACHINEFILE, num_processes, run_as_root, stencil2d_path,
current_problem_size, num_iterations))
metadata['run_command'] = run_command
metadata['problem_size'] = current_problem_size
stdout, _ = master_vm.RemoteCommand(run_command, should_log=True)
return _MakeSamplesFromStencilOutput(stdout, metadata.copy())
def Run(benchmark_spec):
"""Runs the Stencil2D benchmark. GPU clock speeds must be set already.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
Returns:
A list of sample.Sample objects.
"""
vms = benchmark_spec.vms
num_gpus = benchmark_spec.num_gpus
master_vm = vms[0]
num_iterations = FLAGS.stencil2d_iterations
problem_sizes = FLAGS.stencil2d_problem_sizes
num_processes = len(vms) * num_gpus
metadata = {}
metadata.update(cuda_toolkit.GetMetadata(master_vm))
metadata['benchmark_version'] = BENCHMARK_VERSION
metadata['num_iterations'] = num_iterations
metadata['num_nodes'] = len(vms)
metadata['num_processes'] = num_processes
results = []
for problem_size in problem_sizes:
results.extend(
_RunSingleIteration(master_vm, problem_size, num_processes,
num_iterations, metadata))
return results
def Cleanup(benchmark_spec):
pass
|
from pkg_resources import EntryPoint, iter_entry_points
from subliminal.extensions import RegistrableExtensionManager, provider_manager, default_providers, disabled_providers
def test_registrable_extension_manager_all_extensions():
manager = RegistrableExtensionManager('subliminal.providers', [
'de7cidda = subliminal.providers.addic7ed:Addic7edProvider'
])
extensions = sorted(e.name for e in manager)
assert len(extensions) == 9
assert extensions == ['addic7ed', 'argenteam', 'de7cidda', 'legendastv', 'opensubtitles', 'podnapisi', 'shooter',
'thesubdb', 'tvsubtitles']
def test_registrable_extension_manager_internal_extension():
manager = RegistrableExtensionManager('subliminal.test_providers', [
'addic7ed = subliminal.providers.addic7ed:Addic7edProvider',
'opensubtitles = subliminal.providers.opensubtitles:OpenSubtitlesProvider',
'podnapisi = subliminal.providers.podnapisi:PodnapisiProvider',
'thesubdb = subliminal.providers.thesubdb:TheSubDBProvider',
'tvsubtitles = subliminal.providers.tvsubtitles:TVsubtitlesProvider'
])
assert len(list(manager)) == 5
assert len(manager.internal_extensions) == 5
def test_registrable_extension_manager_register():
manager = RegistrableExtensionManager('subliminal.test_providers', [
'addic7ed = subliminal.providers.addic7ed:Addic7edProvider',
'opensubtitles = subliminal.providers.opensubtitles:OpenSubtitlesProvider'
])
assert len(list(manager)) == 2
manager.register('de7cidda = subliminal.providers.addic7ed:Addic7edProvider')
assert len(list(manager)) == 3
assert 'de7cidda' in manager.names()
def test_registrable_extension_manager_unregister():
manager = RegistrableExtensionManager('subliminal.test_providers', [
'thesubdb = subliminal.providers.thesubdb:TheSubDBProvider',
'tvsubtitles = subliminal.providers.tvsubtitles:TVsubtitlesProvider'
])
assert len(list(manager)) == 2
manager.register('de7cidda = subliminal.providers.addic7ed:Addic7edProvider')
manager.unregister('de7cidda = subliminal.providers.addic7ed:Addic7edProvider')
assert len(list(manager)) == 2
assert set(manager.names()) == {'thesubdb', 'tvsubtitles'}
def test_provider_manager():
setup_names = {ep.name for ep in iter_entry_points(provider_manager.namespace)}
internal_names = {EntryPoint.parse(iep).name for iep in provider_manager.internal_extensions}
enabled_names = set(default_providers)
disabled_names = set(disabled_providers)
assert setup_names == enabled_names
assert internal_names == enabled_names | disabled_names
|
from collections import defaultdict
import unittest
from unittest import mock
import pytest
import voluptuous as vol
from homeassistant.components.blackbird.const import DOMAIN, SERVICE_SETALLZONES
from homeassistant.components.blackbird.media_player import (
DATA_BLACKBIRD,
PLATFORM_SCHEMA,
setup_platform,
)
from homeassistant.components.media_player.const import (
SUPPORT_SELECT_SOURCE,
SUPPORT_TURN_OFF,
SUPPORT_TURN_ON,
)
from homeassistant.const import STATE_OFF, STATE_ON
import tests.common
class AttrDict(dict):
"""Helper class for mocking attributes."""
def __setattr__(self, name, value):
"""Set attribute."""
self[name] = value
def __getattr__(self, item):
"""Get attribute."""
return self[item]
class MockBlackbird:
"""Mock for pyblackbird object."""
def __init__(self):
"""Init mock object."""
self.zones = defaultdict(lambda: AttrDict(power=True, av=1))
def zone_status(self, zone_id):
"""Get zone status."""
status = self.zones[zone_id]
status.zone = zone_id
return AttrDict(status)
def set_zone_source(self, zone_id, source_idx):
"""Set source for zone."""
self.zones[zone_id].av = source_idx
def set_zone_power(self, zone_id, power):
"""Turn zone on/off."""
self.zones[zone_id].power = power
def set_all_zone_source(self, source_idx):
"""Set source for all zones."""
self.zones[3].av = source_idx
class TestBlackbirdSchema(unittest.TestCase):
"""Test Blackbird schema."""
def test_valid_serial_schema(self):
"""Test valid schema."""
valid_schema = {
"platform": "blackbird",
"port": "/dev/ttyUSB0",
"zones": {
1: {"name": "a"},
2: {"name": "a"},
3: {"name": "a"},
4: {"name": "a"},
5: {"name": "a"},
6: {"name": "a"},
7: {"name": "a"},
8: {"name": "a"},
},
"sources": {
1: {"name": "a"},
2: {"name": "a"},
3: {"name": "a"},
4: {"name": "a"},
5: {"name": "a"},
6: {"name": "a"},
7: {"name": "a"},
8: {"name": "a"},
},
}
PLATFORM_SCHEMA(valid_schema)
def test_valid_socket_schema(self):
"""Test valid schema."""
valid_schema = {
"platform": "blackbird",
"host": "192.168.1.50",
"zones": {
1: {"name": "a"},
2: {"name": "a"},
3: {"name": "a"},
4: {"name": "a"},
5: {"name": "a"},
},
"sources": {
1: {"name": "a"},
2: {"name": "a"},
3: {"name": "a"},
4: {"name": "a"},
},
}
PLATFORM_SCHEMA(valid_schema)
def test_invalid_schemas(self):
"""Test invalid schemas."""
schemas = (
{}, # Empty
None, # None
# Port and host used concurrently
{
"platform": "blackbird",
"port": "/dev/ttyUSB0",
"host": "192.168.1.50",
"name": "Name",
"zones": {1: {"name": "a"}},
"sources": {1: {"name": "b"}},
},
# Port or host missing
{
"platform": "blackbird",
"name": "Name",
"zones": {1: {"name": "a"}},
"sources": {1: {"name": "b"}},
},
# Invalid zone number
{
"platform": "blackbird",
"port": "/dev/ttyUSB0",
"name": "Name",
"zones": {11: {"name": "a"}},
"sources": {1: {"name": "b"}},
},
# Invalid source number
{
"platform": "blackbird",
"port": "/dev/ttyUSB0",
"name": "Name",
"zones": {1: {"name": "a"}},
"sources": {9: {"name": "b"}},
},
# Zone missing name
{
"platform": "blackbird",
"port": "/dev/ttyUSB0",
"name": "Name",
"zones": {1: {}},
"sources": {1: {"name": "b"}},
},
# Source missing name
{
"platform": "blackbird",
"port": "/dev/ttyUSB0",
"name": "Name",
"zones": {1: {"name": "a"}},
"sources": {1: {}},
},
)
for value in schemas:
with pytest.raises(vol.MultipleInvalid):
PLATFORM_SCHEMA(value)
class TestBlackbirdMediaPlayer(unittest.TestCase):
"""Test the media_player module."""
def setUp(self):
"""Set up the test case."""
self.blackbird = MockBlackbird()
self.hass = tests.common.get_test_home_assistant()
self.hass.start()
# Note, source dictionary is unsorted!
with mock.patch(
"homeassistant.components.blackbird.media_player.get_blackbird",
new=lambda *a: self.blackbird,
):
setup_platform(
self.hass,
{
"platform": "blackbird",
"port": "/dev/ttyUSB0",
"zones": {3: {"name": "Zone name"}},
"sources": {
1: {"name": "one"},
3: {"name": "three"},
2: {"name": "two"},
},
},
lambda *args, **kwargs: None,
{},
)
self.hass.block_till_done()
self.media_player = self.hass.data[DATA_BLACKBIRD]["/dev/ttyUSB0-3"]
self.media_player.hass = self.hass
self.media_player.entity_id = "media_player.zone_3"
self.addCleanup(self.tear_down_cleanup)
def tear_down_cleanup(self):
"""Tear down the test case."""
self.hass.stop()
def test_setup_platform(self, *args):
"""Test setting up platform."""
# One service must be registered
assert self.hass.services.has_service(DOMAIN, SERVICE_SETALLZONES)
assert len(self.hass.data[DATA_BLACKBIRD]) == 1
assert self.hass.data[DATA_BLACKBIRD]["/dev/ttyUSB0-3"].name == "Zone name"
def test_setallzones_service_call_with_entity_id(self):
"""Test set all zone source service call with entity id."""
self.media_player.update()
assert "Zone name" == self.media_player.name
assert STATE_ON == self.media_player.state
assert "one" == self.media_player.source
# Call set all zones service
self.hass.services.call(
DOMAIN,
SERVICE_SETALLZONES,
{"entity_id": "media_player.zone_3", "source": "three"},
blocking=True,
)
# Check that source was changed
assert 3 == self.blackbird.zones[3].av
self.media_player.update()
assert "three" == self.media_player.source
def test_setallzones_service_call_without_entity_id(self):
"""Test set all zone source service call without entity id."""
self.media_player.update()
assert "Zone name" == self.media_player.name
assert STATE_ON == self.media_player.state
assert "one" == self.media_player.source
# Call set all zones service
self.hass.services.call(
DOMAIN, SERVICE_SETALLZONES, {"source": "three"}, blocking=True
)
# Check that source was changed
assert 3 == self.blackbird.zones[3].av
self.media_player.update()
assert "three" == self.media_player.source
def test_update(self):
"""Test updating values from blackbird."""
assert self.media_player.state is None
assert self.media_player.source is None
self.media_player.update()
assert STATE_ON == self.media_player.state
assert "one" == self.media_player.source
def test_name(self):
"""Test name property."""
assert "Zone name" == self.media_player.name
def test_state(self):
"""Test state property."""
assert self.media_player.state is None
self.media_player.update()
assert STATE_ON == self.media_player.state
self.blackbird.zones[3].power = False
self.media_player.update()
assert STATE_OFF == self.media_player.state
def test_supported_features(self):
"""Test supported features property."""
assert (
SUPPORT_TURN_ON | SUPPORT_TURN_OFF | SUPPORT_SELECT_SOURCE
== self.media_player.supported_features
)
def test_source(self):
"""Test source property."""
assert self.media_player.source is None
self.media_player.update()
assert "one" == self.media_player.source
def test_media_title(self):
"""Test media title property."""
assert self.media_player.media_title is None
self.media_player.update()
assert "one" == self.media_player.media_title
def test_source_list(self):
"""Test source list property."""
# Note, the list is sorted!
assert ["one", "two", "three"] == self.media_player.source_list
def test_select_source(self):
"""Test source selection methods."""
self.media_player.update()
assert "one" == self.media_player.source
self.media_player.select_source("two")
assert 2 == self.blackbird.zones[3].av
self.media_player.update()
assert "two" == self.media_player.source
# Trying to set unknown source.
self.media_player.select_source("no name")
assert 2 == self.blackbird.zones[3].av
self.media_player.update()
assert "two" == self.media_player.source
def test_turn_on(self):
"""Testing turning on the zone."""
self.blackbird.zones[3].power = False
self.media_player.update()
assert STATE_OFF == self.media_player.state
self.media_player.turn_on()
assert self.blackbird.zones[3].power
self.media_player.update()
assert STATE_ON == self.media_player.state
def test_turn_off(self):
"""Testing turning off the zone."""
self.blackbird.zones[3].power = True
self.media_player.update()
assert STATE_ON == self.media_player.state
self.media_player.turn_off()
assert not self.blackbird.zones[3].power
self.media_player.update()
assert STATE_OFF == self.media_player.state
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import flags
from perfkitbenchmarker import linux_packages
from six.moves import range
flags.DEFINE_integer('redis_total_num_processes', 1,
'Total number of redis server processes.',
lower_bound=1)
flags.DEFINE_boolean('redis_enable_aof', False,
'Enable append-only file (AOF) with appendfsync always.')
flags.DEFINE_string('redis_server_version', '5.0.5',
'Version of redis server to use.')
REDIS_FIRST_PORT = 6379
REDIS_PID_FILE = 'redis.pid'
FLAGS = flags.FLAGS
REDIS_GIT = 'https://github.com/antirez/redis.git'
def _GetRedisTarName():
return 'redis-%s.tar.gz' % FLAGS.redis_server_version
def GetRedisDir():
return '%s/redis' % linux_packages.INSTALL_DIR
def _Install(vm):
"""Installs the redis package on the VM."""
vm.Install('build_tools')
vm.Install('wget')
vm.RemoteCommand('cd %s; git clone %s' %
(linux_packages.INSTALL_DIR, REDIS_GIT))
vm.RemoteCommand('cd %s && git checkout %s && make' % (
GetRedisDir(), FLAGS.redis_server_version))
def YumInstall(vm):
"""Installs the redis package on the VM."""
vm.InstallPackages('tcl-devel')
_Install(vm)
def AptInstall(vm):
"""Installs the redis package on the VM."""
vm.InstallPackages('tcl-dev')
_Install(vm)
def Configure(vm):
"""Configure redis server."""
sed_cmd = (
r"sed -i -e '/^save /d' -e 's/# *save \"\"/save \"\"/' "
"{0}/redis.conf").format(GetRedisDir())
vm.RemoteCommand(
'sudo sed -i "s/bind/#bind/g" {0}/redis.conf'.format(GetRedisDir()))
vm.RemoteCommand(
'sudo sed -i "s/protected-mode yes/protected-mode no/g" {0}/redis.conf'.
format(GetRedisDir()))
vm.RemoteCommand(sed_cmd)
if FLAGS.redis_enable_aof:
vm.RemoteCommand(
r'sed -i -e "s/appendonly no/appendonly yes/g" {0}/redis.conf'.format(
GetRedisDir()))
vm.RemoteCommand((
r'sed -i -e "s/appendfsync everysec/# appendfsync everysec/g" '
r'{0}/redis.conf'
).format(GetRedisDir()))
vm.RemoteCommand((
r'sed -i -e "s/# appendfsync always/appendfsync always/g" '
r'{0}/redis.conf'
).format(GetRedisDir()))
for i in range(FLAGS.redis_total_num_processes):
port = REDIS_FIRST_PORT + i
vm.RemoteCommand(
('cp {0}/redis.conf {0}/redis-{1}.conf').format(GetRedisDir(), port))
vm.RemoteCommand(
r'sed -i -e "s/port %d/port %d/g" %s/redis-%d.conf' %
(REDIS_FIRST_PORT, port, GetRedisDir(), port))
def Start(vm):
"""Start redis server process."""
for i in range(FLAGS.redis_total_num_processes):
port = REDIS_FIRST_PORT + i
vm.RemoteCommand(
('nohup sudo {0}/src/redis-server {0}/redis-{1}.conf '
'&> /dev/null & echo $! > {0}/{2}-{1}').format(
GetRedisDir(), port, REDIS_PID_FILE))
def Cleanup(vm):
"""Remove redis."""
vm.RemoteCommand('sudo pkill redis-server')
|
import pytest
from homeassistant import data_entry_flow
from homeassistant.components import locative
from homeassistant.components.device_tracker import DOMAIN as DEVICE_TRACKER_DOMAIN
from homeassistant.components.locative import DOMAIN, TRACKER_UPDATE
from homeassistant.config import async_process_ha_core_config
from homeassistant.const import HTTP_OK, HTTP_UNPROCESSABLE_ENTITY
from homeassistant.helpers.dispatcher import DATA_DISPATCHER
from homeassistant.setup import async_setup_component
from tests.async_mock import patch
# pylint: disable=redefined-outer-name
@pytest.fixture(autouse=True)
def mock_dev_track(mock_device_tracker_conf):
"""Mock device tracker config loading."""
pass
@pytest.fixture
async def locative_client(loop, hass, hass_client):
"""Locative mock client."""
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {}})
await hass.async_block_till_done()
with patch("homeassistant.components.device_tracker.legacy.update_config"):
return await hass_client()
@pytest.fixture
async def webhook_id(hass, locative_client):
"""Initialize the Geofency component and get the webhook_id."""
await async_process_ha_core_config(
hass,
{"internal_url": "http://example.local:8123"},
)
result = await hass.config_entries.flow.async_init(
"locative", context={"source": "user"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM, result
result = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
await hass.async_block_till_done()
return result["result"].data["webhook_id"]
async def test_missing_data(locative_client, webhook_id):
"""Test missing data."""
url = f"/api/webhook/{webhook_id}"
data = {
"latitude": 1.0,
"longitude": 1.1,
"device": "123",
"id": "Home",
"trigger": "enter",
}
# No data
req = await locative_client.post(url)
assert req.status == HTTP_UNPROCESSABLE_ENTITY
# No latitude
copy = data.copy()
del copy["latitude"]
req = await locative_client.post(url, data=copy)
assert req.status == HTTP_UNPROCESSABLE_ENTITY
# No device
copy = data.copy()
del copy["device"]
req = await locative_client.post(url, data=copy)
assert req.status == HTTP_UNPROCESSABLE_ENTITY
# No location
copy = data.copy()
del copy["id"]
req = await locative_client.post(url, data=copy)
assert req.status == HTTP_UNPROCESSABLE_ENTITY
# No trigger
copy = data.copy()
del copy["trigger"]
req = await locative_client.post(url, data=copy)
assert req.status == HTTP_UNPROCESSABLE_ENTITY
# Test message
copy = data.copy()
copy["trigger"] = "test"
req = await locative_client.post(url, data=copy)
assert req.status == HTTP_OK
# Test message, no location
copy = data.copy()
copy["trigger"] = "test"
del copy["id"]
req = await locative_client.post(url, data=copy)
assert req.status == HTTP_OK
# Unknown trigger
copy = data.copy()
copy["trigger"] = "foobar"
req = await locative_client.post(url, data=copy)
assert req.status == HTTP_UNPROCESSABLE_ENTITY
async def test_enter_and_exit(hass, locative_client, webhook_id):
"""Test when there is a known zone."""
url = f"/api/webhook/{webhook_id}"
data = {
"latitude": 40.7855,
"longitude": -111.7367,
"device": "123",
"id": "Home",
"trigger": "enter",
}
# Enter the Home
req = await locative_client.post(url, data=data)
await hass.async_block_till_done()
assert req.status == HTTP_OK
state_name = hass.states.get(
"{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"])
).state
assert state_name == "home"
data["id"] = "HOME"
data["trigger"] = "exit"
# Exit Home
req = await locative_client.post(url, data=data)
await hass.async_block_till_done()
assert req.status == HTTP_OK
state_name = hass.states.get(
"{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"])
).state
assert state_name == "not_home"
data["id"] = "hOmE"
data["trigger"] = "enter"
# Enter Home again
req = await locative_client.post(url, data=data)
await hass.async_block_till_done()
assert req.status == HTTP_OK
state_name = hass.states.get(
"{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"])
).state
assert state_name == "home"
data["trigger"] = "exit"
# Exit Home
req = await locative_client.post(url, data=data)
await hass.async_block_till_done()
assert req.status == HTTP_OK
state_name = hass.states.get(
"{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"])
).state
assert state_name == "not_home"
data["id"] = "work"
data["trigger"] = "enter"
# Enter Work
req = await locative_client.post(url, data=data)
await hass.async_block_till_done()
assert req.status == HTTP_OK
state_name = hass.states.get(
"{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"])
).state
assert state_name == "work"
async def test_exit_after_enter(hass, locative_client, webhook_id):
"""Test when an exit message comes after an enter message."""
url = f"/api/webhook/{webhook_id}"
data = {
"latitude": 40.7855,
"longitude": -111.7367,
"device": "123",
"id": "Home",
"trigger": "enter",
}
# Enter Home
req = await locative_client.post(url, data=data)
await hass.async_block_till_done()
assert req.status == HTTP_OK
state = hass.states.get("{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"]))
assert state.state == "home"
data["id"] = "Work"
# Enter Work
req = await locative_client.post(url, data=data)
await hass.async_block_till_done()
assert req.status == HTTP_OK
state = hass.states.get("{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"]))
assert state.state == "work"
data["id"] = "Home"
data["trigger"] = "exit"
# Exit Home
req = await locative_client.post(url, data=data)
await hass.async_block_till_done()
assert req.status == HTTP_OK
state = hass.states.get("{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"]))
assert state.state == "work"
async def test_exit_first(hass, locative_client, webhook_id):
"""Test when an exit message is sent first on a new device."""
url = f"/api/webhook/{webhook_id}"
data = {
"latitude": 40.7855,
"longitude": -111.7367,
"device": "new_device",
"id": "Home",
"trigger": "exit",
}
# Exit Home
req = await locative_client.post(url, data=data)
await hass.async_block_till_done()
assert req.status == HTTP_OK
state = hass.states.get("{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"]))
assert state.state == "not_home"
async def test_two_devices(hass, locative_client, webhook_id):
"""Test updating two different devices."""
url = f"/api/webhook/{webhook_id}"
data_device_1 = {
"latitude": 40.7855,
"longitude": -111.7367,
"device": "device_1",
"id": "Home",
"trigger": "exit",
}
# Exit Home
req = await locative_client.post(url, data=data_device_1)
await hass.async_block_till_done()
assert req.status == HTTP_OK
state = hass.states.get(
"{}.{}".format(DEVICE_TRACKER_DOMAIN, data_device_1["device"])
)
assert state.state == "not_home"
# Enter Home
data_device_2 = dict(data_device_1)
data_device_2["device"] = "device_2"
data_device_2["trigger"] = "enter"
req = await locative_client.post(url, data=data_device_2)
await hass.async_block_till_done()
assert req.status == HTTP_OK
state = hass.states.get(
"{}.{}".format(DEVICE_TRACKER_DOMAIN, data_device_2["device"])
)
assert state.state == "home"
state = hass.states.get(
"{}.{}".format(DEVICE_TRACKER_DOMAIN, data_device_1["device"])
)
assert state.state == "not_home"
@pytest.mark.xfail(
reason="The device_tracker component does not support unloading yet."
)
async def test_load_unload_entry(hass, locative_client, webhook_id):
"""Test that the appropriate dispatch signals are added and removed."""
url = f"/api/webhook/{webhook_id}"
data = {
"latitude": 40.7855,
"longitude": -111.7367,
"device": "new_device",
"id": "Home",
"trigger": "exit",
}
# Exit Home
req = await locative_client.post(url, data=data)
await hass.async_block_till_done()
assert req.status == HTTP_OK
state = hass.states.get("{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"]))
assert state.state == "not_home"
assert len(hass.data[DATA_DISPATCHER][TRACKER_UPDATE]) == 1
entry = hass.config_entries.async_entries(DOMAIN)[0]
await locative.async_unload_entry(hass, entry)
await hass.async_block_till_done()
assert not hass.data[DATA_DISPATCHER][TRACKER_UPDATE]
|
import pytest
from django.contrib.auth.models import AnonymousUser
from django.contrib.messages.storage import default_storage
from shop.conf import app_settings
from shop.models.cart import CartModel
from shop.models.defaults.customer import Customer
from shop.modifiers.pool import CartModifiersPool
from shop.views.cart import CartViewSet, WatchViewSet
from shop.modifiers.pool import cart_modifiers_pool
from rest_framework.reverse import reverse
CartModifiersPool.USE_CACHE = False
@pytest.fixture(name='filled_cart')
@pytest.mark.django_db
def test_add_to_cart(commodity_factory, api_client, rf):
# add a product to the cart
product = commodity_factory()
data = {'quantity': 2, 'product': product.id}
response = api_client.post(reverse('shop:cart-list'), data)
assert response.status_code == 201
assert response.data['quantity'] == 2
assert response.data['unit_price'] == str(product.unit_price)
assert response.data['line_total'] == str(2 * product.unit_price)
# verify that the product is in the cart
request = rf.get('/my-cart')
request.session = api_client.session
request.user = AnonymousUser()
request.customer = Customer.objects.get_from_request(request)
filled_cart = CartModel.objects.get_from_request(request)
filled_cart.update(request)
assert filled_cart.num_items == 1
items = filled_cart.items.all()
assert items[0].product == product
assert items[0].quantity == 2
assert filled_cart.subtotal == 2 * product.unit_price
return filled_cart
@pytest.mark.django_db
def test_list_cart(api_rf, filled_cart):
request = api_rf.get('/shop/api/cart')
request.customer = filled_cart.customer
response = CartViewSet.as_view({'get': 'list'})(request)
assert response.status_code == 200
assert response.data['num_items'] == 1
assert response.data['total_quantity'] == 2
assert response.data['subtotal'] == str(filled_cart.subtotal)
assert response.data['total'] == str(filled_cart.total)
@pytest.mark.django_db
def test_unowned_cart(customer_factory, api_rf, filled_cart):
request = api_rf.get('/shop/api/cart')
request.customer = customer_factory()
response = CartViewSet.as_view({'get': 'list'})(request)
assert response.data['num_items'] == 0
@pytest.mark.django_db
def test_change_quantity(api_rf, filled_cart):
product = filled_cart.items.all()[0].product
data = {'quantity': 3, 'product': product.id}
request = api_rf.put('/shop/api/cart', data)
request.customer = filled_cart.customer
response = CartViewSet.as_view({'put': 'update'})(request, pk=product.id)
assert response.status_code == 200
filled_cart.refresh_from_db()
assert filled_cart.num_items == 1
assert filled_cart.items.all()[0].quantity == 3
@pytest.mark.django_db
def test_too_greedy(session, api_rf, filled_cart):
product = filled_cart.items.all()[0].product
data = {'quantity': 10, 'product': product.id}
request = api_rf.put('/shop/api/cart', data)
request.customer = filled_cart.customer
request.session = session
request._messages = default_storage(request)
response = CartViewSet.as_view({'put': 'update'})(request, pk=product.id)
assert response.status_code == 200
filled_cart.refresh_from_db()
assert filled_cart.num_items == 1
assert filled_cart.items.all()[0].quantity == 5 # not 10, as requested
@pytest.mark.django_db
def test_remove_item(api_rf, filled_cart):
product = filled_cart.items.all()[0].product
request = api_rf.delete(reverse('shop:cart-list'))
request.customer = filled_cart.customer
response = CartViewSet.as_view({'delete': 'destroy'})(request, pk=product.id)
assert response.status_code == 200
filled_cart.refresh_from_db()
assert filled_cart.num_items == 0
assert filled_cart.items.count() == 0
@pytest.fixture(name='watch_list')
@pytest.mark.django_db
def test_watch_cart_item(api_rf, filled_cart):
product = filled_cart.items.all()[0].product
data = {'quantity': 0, 'product': product.id}
request = api_rf.put('/shop/api/cart', data)
request.customer = filled_cart.customer
response = WatchViewSet.as_view({'put': 'update'})(request, pk=product.id)
assert response.status_code == 200
filled_cart.refresh_from_db()
assert filled_cart.num_items == 0
assert filled_cart.items.all()[0].quantity == 0
return filled_cart
@pytest.mark.django_db
def test_add_watch_item(api_rf, watch_list):
product = watch_list.items.all()[0].product
data = {'quantity': 1, 'product': product.id}
request = api_rf.put('/shop/api/cart', data)
request.customer = watch_list.customer
response = CartViewSet.as_view({'put': 'update'})(request, pk=product.id)
assert response.status_code == 200
watch_list.refresh_from_db()
assert watch_list.num_items == 1
assert watch_list.items.all()[0].quantity == 1
@pytest.mark.django_db
def test_include_tax_modifier(api_rf, filled_cart):
request = api_rf.get('/shop/api/cart')
request.customer = filled_cart.customer
response = CartViewSet.as_view({'get': 'list'})(request)
assert response.status_code == 200
assert response.data['subtotal'] == str(filled_cart.subtotal)
tax_rate = 1 + app_settings.SHOP_VALUE_ADDED_TAX / 100
assert response.data['total'] == str(filled_cart.subtotal * tax_rate)
@pytest.mark.django_db
def test_payment_modifiers_with_same_processors(api_rf, filled_cart):
for modifier_to_test in cart_modifiers_pool.get_payment_modifiers():
for modifier_for_id in cart_modifiers_pool.get_payment_modifiers():
if modifier_to_test.is_active(modifier_for_id.identifier):
assert modifier_for_id.identifier == modifier_to_test.identifier
|
from collections import namedtuple
import logging
from habitipy.aio import HabitipyAsync
import voluptuous as vol
from homeassistant.const import (
CONF_API_KEY,
CONF_NAME,
CONF_PATH,
CONF_SENSORS,
CONF_URL,
)
from homeassistant.helpers import config_validation as cv, discovery
from homeassistant.helpers.aiohttp_client import async_get_clientsession
_LOGGER = logging.getLogger(__name__)
CONF_API_USER = "api_user"
DEFAULT_URL = "https://habitica.com"
DOMAIN = "habitica"
ST = SensorType = namedtuple("SensorType", ["name", "icon", "unit", "path"])
SENSORS_TYPES = {
"name": ST("Name", None, "", ["profile", "name"]),
"hp": ST("HP", "mdi:heart", "HP", ["stats", "hp"]),
"maxHealth": ST("max HP", "mdi:heart", "HP", ["stats", "maxHealth"]),
"mp": ST("Mana", "mdi:auto-fix", "MP", ["stats", "mp"]),
"maxMP": ST("max Mana", "mdi:auto-fix", "MP", ["stats", "maxMP"]),
"exp": ST("EXP", "mdi:star", "EXP", ["stats", "exp"]),
"toNextLevel": ST("Next Lvl", "mdi:star", "EXP", ["stats", "toNextLevel"]),
"lvl": ST("Lvl", "mdi:arrow-up-bold-circle-outline", "Lvl", ["stats", "lvl"]),
"gp": ST("Gold", "mdi:currency-usd-circle", "Gold", ["stats", "gp"]),
"class": ST("Class", "mdi:sword", "", ["stats", "class"]),
}
INSTANCE_SCHEMA = vol.Schema(
{
vol.Optional(CONF_URL, default=DEFAULT_URL): cv.url,
vol.Optional(CONF_NAME): cv.string,
vol.Required(CONF_API_USER): cv.string,
vol.Required(CONF_API_KEY): cv.string,
vol.Optional(CONF_SENSORS, default=list(SENSORS_TYPES)): vol.All(
cv.ensure_list, vol.Unique(), [vol.In(list(SENSORS_TYPES))]
),
}
)
has_unique_values = vol.Schema(vol.Unique())
# because we want a handy alias
def has_all_unique_users(value):
"""Validate that all API users are unique."""
api_users = [user[CONF_API_USER] for user in value]
has_unique_values(api_users)
return value
def has_all_unique_users_names(value):
"""Validate that all user's names are unique and set if any is set."""
names = [user.get(CONF_NAME) for user in value]
if None in names and any(name is not None for name in names):
raise vol.Invalid("user names of all users must be set if any is set")
if not all(name is None for name in names):
has_unique_values(names)
return value
INSTANCE_LIST_SCHEMA = vol.All(
cv.ensure_list, has_all_unique_users, has_all_unique_users_names, [INSTANCE_SCHEMA]
)
CONFIG_SCHEMA = vol.Schema({DOMAIN: INSTANCE_LIST_SCHEMA}, extra=vol.ALLOW_EXTRA)
SERVICE_API_CALL = "api_call"
ATTR_NAME = CONF_NAME
ATTR_PATH = CONF_PATH
ATTR_ARGS = "args"
EVENT_API_CALL_SUCCESS = f"{DOMAIN}_{SERVICE_API_CALL}_success"
SERVICE_API_CALL_SCHEMA = vol.Schema(
{
vol.Required(ATTR_NAME): str,
vol.Required(ATTR_PATH): vol.All(cv.ensure_list, [str]),
vol.Optional(ATTR_ARGS): dict,
}
)
async def async_setup(hass, config):
"""Set up the Habitica service."""
conf = config[DOMAIN]
data = hass.data[DOMAIN] = {}
websession = async_get_clientsession(hass)
class HAHabitipyAsync(HabitipyAsync):
"""Closure API class to hold session."""
def __call__(self, **kwargs):
return super().__call__(websession, **kwargs)
for instance in conf:
url = instance[CONF_URL]
username = instance[CONF_API_USER]
password = instance[CONF_API_KEY]
name = instance.get(CONF_NAME)
config_dict = {"url": url, "login": username, "password": password}
api = HAHabitipyAsync(config_dict)
user = await api.user.get()
if name is None:
name = user["profile"]["name"]
data[name] = api
if CONF_SENSORS in instance:
hass.async_create_task(
discovery.async_load_platform(
hass,
"sensor",
DOMAIN,
{"name": name, "sensors": instance[CONF_SENSORS]},
config,
)
)
async def handle_api_call(call):
name = call.data[ATTR_NAME]
path = call.data[ATTR_PATH]
api = hass.data[DOMAIN].get(name)
if api is None:
_LOGGER.error("API_CALL: User '%s' not configured", name)
return
try:
for element in path:
api = api[element]
except KeyError:
_LOGGER.error(
"API_CALL: Path %s is invalid for API on '{%s}' element", path, element
)
return
kwargs = call.data.get(ATTR_ARGS, {})
data = await api(**kwargs)
hass.bus.async_fire(
EVENT_API_CALL_SUCCESS, {"name": name, "path": path, "data": data}
)
hass.services.async_register(
DOMAIN, SERVICE_API_CALL, handle_api_call, schema=SERVICE_API_CALL_SCHEMA
)
return True
|
import logging
import os.path
from . import criterions as cr
from . import cumulative_criterions as cum_cr
from ..Console import Plugin as ConsolePlugin
from ...common.interfaces import AbstractPlugin, AggregateResultListener, AbstractInfoWidget
logger = logging.getLogger(__name__)
class Plugin(AbstractPlugin, AggregateResultListener):
""" Plugin that accepts criterion classes and triggers autostop """
SECTION = 'autostop'
def __init__(self, core, cfg, name):
AbstractPlugin.__init__(self, core, cfg, name)
AggregateResultListener.__init__(self)
self.cause_criterion = None
self.imbalance_rps = 0
self._criterions = {}
self.custom_criterions = []
self.counting = []
self._stop_report_path = ''
@staticmethod
def get_key():
return __file__
def get_counting(self):
""" get criterions that are activated """
return self.counting
def add_counting(self, obj):
""" add criterion that activated """
self.counting += [obj]
def add_criterion_class(self, criterion_class):
""" add new criterion class """
self.custom_criterions += [criterion_class]
def get_available_options(self):
return ["autostop", "report_file"]
def configure(self):
aggregator = self.core.job.aggregator
aggregator.add_result_listener(self)
self._stop_report_path = os.path.join(
self.core.artifacts_dir,
self.get_option("report_file", 'autostop_report.txt'))
self.add_criterion_class(cr.AvgTimeCriterion)
self.add_criterion_class(cr.NetCodesCriterion)
self.add_criterion_class(cr.HTTPCodesCriterion)
self.add_criterion_class(cr.QuantileCriterion)
self.add_criterion_class(cr.SteadyCumulativeQuantilesCriterion)
self.add_criterion_class(cr.TimeLimitCriterion)
self.add_criterion_class(cum_cr.TotalFracTimeCriterion)
self.add_criterion_class(cum_cr.TotalHTTPCodesCriterion)
self.add_criterion_class(cum_cr.TotalNetCodesCriterion)
self.add_criterion_class(cum_cr.TotalNegativeHTTPCodesCriterion)
self.add_criterion_class(cum_cr.TotalNegativeNetCodesCriterion)
self.add_criterion_class(cum_cr.TotalHTTPTrendCriterion)
self.add_criterion_class(cum_cr.QuantileOfSaturationCriterion)
def prepare_test(self):
criterions = self.get_option("autostop")
for criterion_str in criterions:
if not criterion_str:
continue
self.log.debug("Criterion string: %s", criterion_str)
self._criterions[criterion_str] = self.__create_criterion(
criterion_str)
self.log.debug("Criterion objects: %s", self._criterions)
try:
console = self.core.get_plugin_of_type(ConsolePlugin)
except Exception as ex: # pylint: disable=W0703
self.log.debug("Console not found: %s", ex)
console = None
if console:
console.add_info_widget(AutostopWidget(self))
def is_test_finished(self):
if self.cause_criterion:
self.log.warning(
"Autostop criterion requested test stop: %s",
self.cause_criterion.explain())
return self.cause_criterion.get_rc()
else:
return -1
def __create_criterion(self, criterion_str):
""" instantiate criterion from config string """
parsed = criterion_str.split("(")
type_str = parsed[0].strip().lower()
parsed[1] = parsed[1].split(")")[0].strip()
for criterion_class in self.custom_criterions:
if criterion_class.get_type_string() == type_str:
return criterion_class(self, parsed[1])
raise ValueError(
"Unsupported autostop criterion type: %s" % criterion_str)
def on_aggregated_data(self, data, stat):
self.counting = []
if not self.cause_criterion:
for criterion_text, criterion in self._criterions.items():
if criterion.notify(data, stat):
self.cause_criterion = criterion
if self.cause_criterion.cause_second:
self.imbalance_rps = int(self.cause_criterion.cause_second[1]["metrics"]["reqps"])
if not self.imbalance_rps:
self.imbalance_rps = int(
self.cause_criterion.cause_second[0]["overall"]["interval_real"]["len"])
self.core.publish('autostop', 'rps', self.imbalance_rps)
self.core.publish('autostop', 'reason', criterion.explain())
self.log.warning(
"Autostop criterion requested test stop on %d rps: %s", self.imbalance_rps, criterion_text)
open(self._stop_report_path, 'w').write(criterion_text)
self.core.add_artifact_file(self._stop_report_path)
class AutostopWidget(AbstractInfoWidget):
""" widget that displays counting criterions """
def __init__(self, sender):
AbstractInfoWidget.__init__(self)
self.owner = sender
def get_index(self):
return 25
def render(self, screen):
res = []
candidates = self.owner.get_counting()
for candidate in candidates:
text, perc = candidate.widget_explain()
if perc >= 0.95:
res += [screen.markup.RED_DARK + text + screen.markup.RESET]
elif perc >= 0.8:
res += [screen.markup.RED + text + screen.markup.RESET]
elif perc >= 0.5:
res += [screen.markup.YELLOW + text + screen.markup.RESET]
else:
res += [text]
if res:
return "Autostop:\n " + ("\n ".join(res))
else:
return ''
|
import unittest
import doctest
import urwid
import urwid.numedit
def load_tests(loader, tests, ignore):
module_doctests = [
urwid.widget,
urwid.wimp,
urwid.decoration,
urwid.display_common,
urwid.main_loop,
urwid.numedit,
urwid.monitored_list,
urwid.raw_display,
'urwid.split_repr', # override function with same name
urwid.util,
urwid.signals,
urwid.graphics,
]
for m in module_doctests:
tests.addTests(doctest.DocTestSuite(m,
optionflags=doctest.ELLIPSIS | doctest.IGNORE_EXCEPTION_DETAIL))
return tests
|
import logging
import pytest
import voluptuous as vol
import homeassistant.components.demo.notify as demo
import homeassistant.components.notify as notify
from homeassistant.core import callback
from homeassistant.helpers import discovery
from homeassistant.setup import async_setup_component
from tests.async_mock import patch
from tests.common import assert_setup_component
CONFIG = {notify.DOMAIN: {"platform": "demo"}}
@pytest.fixture
def events(hass):
"""Fixture that catches notify events."""
events = []
hass.bus.async_listen(demo.EVENT_NOTIFY, callback(lambda e: events.append(e)))
yield events
@pytest.fixture
def calls():
"""Fixture to calls."""
return []
@pytest.fixture
def record_calls(calls):
"""Fixture to record calls."""
@callback
def record_calls(*args):
"""Record calls."""
calls.append(args)
return record_calls
@pytest.fixture(name="mock_demo_notify")
def mock_demo_notify_fixture():
"""Mock demo notify service."""
with patch("homeassistant.components.demo.notify.get_service", autospec=True) as ns:
yield ns
async def setup_notify(hass):
"""Test setup."""
with assert_setup_component(1, notify.DOMAIN) as config:
assert await async_setup_component(hass, notify.DOMAIN, CONFIG)
assert config[notify.DOMAIN]
await hass.async_block_till_done()
async def test_no_notify_service(hass, mock_demo_notify, caplog):
"""Test missing platform notify service instance."""
caplog.set_level(logging.ERROR)
mock_demo_notify.return_value = None
await setup_notify(hass)
await hass.async_block_till_done()
assert mock_demo_notify.called
assert "Failed to initialize notification service demo" in caplog.text
async def test_discover_notify(hass, mock_demo_notify):
"""Test discovery of notify demo platform."""
assert notify.DOMAIN not in hass.config.components
mock_demo_notify.return_value = None
await discovery.async_load_platform(
hass, "notify", "demo", {"test_key": "test_val"}, {"notify": {}}
)
await hass.async_block_till_done()
assert notify.DOMAIN in hass.config.components
assert mock_demo_notify.called
assert mock_demo_notify.mock_calls[0][1] == (
hass,
{},
{"test_key": "test_val"},
)
async def test_sending_none_message(hass, events):
"""Test send with None as message."""
await setup_notify(hass)
with pytest.raises(vol.Invalid):
await hass.services.async_call(
notify.DOMAIN, notify.SERVICE_NOTIFY, {notify.ATTR_MESSAGE: None}
)
await hass.async_block_till_done()
assert len(events) == 0
async def test_sending_templated_message(hass, events):
"""Send a templated message."""
await setup_notify(hass)
hass.states.async_set("sensor.temperature", 10)
data = {
notify.ATTR_MESSAGE: "{{states.sensor.temperature.state}}",
notify.ATTR_TITLE: "{{ states.sensor.temperature.name }}",
}
await hass.services.async_call(notify.DOMAIN, notify.SERVICE_NOTIFY, data)
await hass.async_block_till_done()
last_event = events[-1]
assert last_event.data[notify.ATTR_TITLE] == "temperature"
assert last_event.data[notify.ATTR_MESSAGE] == "10"
async def test_method_forwards_correct_data(hass, events):
"""Test that all data from the service gets forwarded to service."""
await setup_notify(hass)
data = {
notify.ATTR_MESSAGE: "my message",
notify.ATTR_TITLE: "my title",
notify.ATTR_DATA: {"hello": "world"},
}
await hass.services.async_call(notify.DOMAIN, notify.SERVICE_NOTIFY, data)
await hass.async_block_till_done()
assert len(events) == 1
data = events[0].data
assert {
"message": "my message",
"title": "my title",
"data": {"hello": "world"},
} == data
async def test_calling_notify_from_script_loaded_from_yaml_without_title(hass, events):
"""Test if we can call a notify from a script."""
await setup_notify(hass)
step = {
"service": "notify.notify",
"data": {
"data": {"push": {"sound": "US-EN-Morgan-Freeman-Roommate-Is-Arriving.wav"}}
},
"data_template": {"message": "Test 123 {{ 2 + 2 }}\n"},
}
await async_setup_component(
hass, "script", {"script": {"test": {"sequence": step}}}
)
await hass.services.async_call("script", "test")
await hass.async_block_till_done()
assert len(events) == 1
assert {
"message": "Test 123 4",
"data": {"push": {"sound": "US-EN-Morgan-Freeman-Roommate-Is-Arriving.wav"}},
} == events[0].data
async def test_calling_notify_from_script_loaded_from_yaml_with_title(hass, events):
"""Test if we can call a notify from a script."""
await setup_notify(hass)
step = {
"service": "notify.notify",
"data": {
"data": {"push": {"sound": "US-EN-Morgan-Freeman-Roommate-Is-Arriving.wav"}}
},
"data_template": {"message": "Test 123 {{ 2 + 2 }}\n", "title": "Test"},
}
await async_setup_component(
hass, "script", {"script": {"test": {"sequence": step}}}
)
await hass.services.async_call("script", "test")
await hass.async_block_till_done()
assert len(events) == 1
assert {
"message": "Test 123 4",
"title": "Test",
"data": {"push": {"sound": "US-EN-Morgan-Freeman-Roommate-Is-Arriving.wav"}},
} == events[0].data
async def test_targets_are_services(hass):
"""Test that all targets are exposed as individual services."""
await setup_notify(hass)
assert hass.services.has_service("notify", "demo") is not None
service = "demo_test_target_name"
assert hass.services.has_service("notify", service) is not None
async def test_messages_to_targets_route(hass, calls, record_calls):
"""Test message routing to specific target services."""
await setup_notify(hass)
hass.bus.async_listen_once("notify", record_calls)
await hass.services.async_call(
"notify",
"demo_test_target_name",
{"message": "my message", "title": "my title", "data": {"hello": "world"}},
)
await hass.async_block_till_done()
data = calls[0][0].data
assert {
"message": "my message",
"target": ["test target id"],
"title": "my title",
"data": {"hello": "world"},
} == data
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import logging
import posixpath
from absl import flags
from perfkitbenchmarker import configs
from perfkitbenchmarker import data
from perfkitbenchmarker import errors
from perfkitbenchmarker import linux_virtual_machine
from perfkitbenchmarker import sample
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.configs import benchmark_config_spec
from perfkitbenchmarker.configs import option_decoders
import six
class CopyThroughputBenchmarkSpec(
benchmark_config_spec.BenchmarkConfigSpec):
def __init__(self, component_full_name, **kwargs):
self.data_size_in_mb = None
super(CopyThroughputBenchmarkSpec, self).__init__(
component_full_name, **kwargs)
@classmethod
def _GetOptionDecoderConstructions(cls):
"""Gets decoder classes and constructor args for each configurable option.
Returns:
dict. Maps option name string to a (ConfigOptionDecoder class, dict) pair.
The pair specifies a decoder class and its __init__() keyword
arguments to construct in order to decode the named option.
"""
result = super(CopyThroughputBenchmarkSpec,
cls)._GetOptionDecoderConstructions()
result.update({
'data_size_in_mb': (option_decoders.FloatDecoder, {'default': None}),
})
return result
flags.DEFINE_enum('copy_benchmark_mode', 'cp', ['cp', 'dd', 'scp'],
'Runs either cp, dd or scp tests.')
flags.DEFINE_integer('copy_benchmark_single_file_mb', None, 'If set, a '
'single file of the specified number of MB is used '
'instead of the normal cloud-storage-workload.sh basket '
'of files. Not supported when copy_benchmark_mode is dd')
FLAGS = flags.FLAGS
BENCHMARK_NAME = 'copy_throughput'
BENCHMARK_CONFIG = """
copy_throughput:
description: Get cp and scp performance between vms.
vm_groups:
default:
vm_spec: *default_single_core
disk_spec: *default_500_gb
disk_count: 2
vm_count: 1
"""
BENCHMARK_CONFIG_SPEC_CLASS = CopyThroughputBenchmarkSpec
# Preferred SCP ciphers, in order of preference:
CIPHERS = ['aes128-cbc', 'aes128-ctr']
DATA_FILE = 'cloud-storage-workload.sh'
# size of default data
DEFAULT_DATA_SIZE_IN_MB = 256.1
# Unit for all benchmarks
UNIT = 'MB/sec'
def GetConfig(user_config):
"""Decide number of vms needed and return infomation for copy benchmark."""
if FLAGS.copy_benchmark_mode == 'dd' and FLAGS.copy_benchmark_single_file_mb:
raise errors.Setup.InvalidFlagConfigurationError(
'Flag copy_benchmark_single_file_mb is not supported when flag '
'copy_benchmark_mode is dd.')
config = configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
if FLAGS.copy_benchmark_mode == 'scp':
config['vm_groups']['default']['vm_count'] = 2
config['vm_groups']['default']['disk_count'] = 1
if FLAGS.copy_benchmark_single_file_mb:
config['data_size_in_mb'] = FLAGS.copy_benchmark_single_file_mb
return config
def CheckPrerequisites(benchmark_config):
"""Verifies that the required resources are present.
Args:
benchmark_config: Unused
Raises:
perfkitbenchmarker.data.ResourceNotFound: On missing resource.
"""
del benchmark_config # unused
data.ResourcePath(DATA_FILE)
def PrepareDataFile(vm, data_size_in_mb):
"""Generate data file on vm to destination directory.
Args:
vm: The VM which needs the data file.
data_size_in_mb: The size of the data file in MB.
"""
file_path = data.ResourcePath(DATA_FILE)
vm.PushFile(file_path, '%s/' % vm.GetScratchDir(0))
if data_size_in_mb:
vm.RemoteCommand('cd %s/; bash cloud-storage-workload.sh single_file %s'
% (vm.GetScratchDir(0), data_size_in_mb))
else:
vm.RemoteCommand('cd %s/; bash cloud-storage-workload.sh'
% vm.GetScratchDir(0))
def PreparePrivateKey(vm):
vm.AuthenticateVm()
def Prepare(benchmark_spec):
"""Prepare vms with additional scratch disks and create vms for scp.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
vms = benchmark_spec.vms
vm_util.RunThreaded(PreparePrivateKey, vms)
args = [((vm, benchmark_spec.config.data_size_in_mb), {})
for vm in benchmark_spec.vms]
vm_util.RunThreaded(PrepareDataFile, args)
def RunCp(vms, data_size_in_mb, metadata):
"""Runs cp benchmarks and parses results.
Args:
vms: The VMs running cp benchmarks.
data_size_in_mb: The size of the data file in MB.
metadata: The base metadata to attach to the sample.
Returns:
A list of sample.Sample objects.
"""
cmd = ('rm -rf %s/*; sudo sync; sudo sysctl vm.drop_caches=3; '
'time cp %s/data/* %s/; ' %
(vms[0].GetScratchDir(1), vms[0].GetScratchDir(0),
vms[0].GetScratchDir(1)))
_, res = vms[0].RemoteCommand(cmd)
logging.info(res)
time_used = vm_util.ParseTimeCommandResult(res)
return [sample.Sample('cp throughput', data_size_in_mb / time_used, UNIT,
metadata=metadata)]
def RunDd(vms, data_size_in_mb, metadata):
"""Run dd benchmark and parses results.
Args:
vms: The VMs running dd benchmarks.
data_size_in_mb: The size of the data file in MB.
metadata: The metadata to attach to the sample.
Returns:
A list of samples. Each sample is a 4-tuple of (benchmark_name, value, unit,
metadata), as accepted by PerfKitBenchmarkerPublisher.AddSamples.
"""
vm = vms[0]
cmd = ('rm -rf %s/*; sudo sync; sudo sysctl vm.drop_caches=3; '
'time for i in {0..99}; do dd if=%s/data/file-$i.dat '
'of=%s/file-$i.dat bs=262144; done' %
(vm.GetScratchDir(1), vm.GetScratchDir(0),
vm.GetScratchDir(1)))
_, res = vm.RemoteCommand(cmd)
logging.info(res)
time_used = vm_util.ParseTimeCommandResult(res)
return [sample.Sample('dd throughput', data_size_in_mb / time_used, UNIT,
metadata=metadata)]
def AvailableCiphers(vm):
"""Returns the set of ciphers accepted by the vm's SSH server."""
ciphers, _ = vm.RemoteCommand('sshd -T | grep ^ciphers ')
return set(ciphers.split()[1].split(','))
def ChooseSshCipher(vms):
"""Returns the most-preferred cipher that's available to all vms."""
available = functools.reduce(lambda a, b: a & b,
[AvailableCiphers(vm) for vm in vms])
for cipher in CIPHERS:
if cipher in available:
return cipher
raise Exception('None of the preferred ciphers (%s) are available (%s).'
% (CIPHERS, available))
def RunScp(vms, data_size_in_mb, metadata):
"""Run scp benchmark.
Args:
vms: The vms running scp commands.
data_size_in_mb: The size of the data file in MB.
metadata: The metadata to attach to the sample.
Returns:
A list of samples. Each sample is a 4-tuple of (benchmark_name, value, unit,
metadata), as accepted by PerfKitBenchmarkerPublisher.AddSamples.
"""
cipher = ChooseSshCipher(vms)
result = RunScpSingleDirection(
vms[0], vms[1], cipher, data_size_in_mb, metadata)
result += RunScpSingleDirection(
vms[1], vms[0], cipher, data_size_in_mb, metadata)
return result
MODE_FUNCTION_DICTIONARY = {
'cp': RunCp,
'dd': RunDd,
'scp': RunScp}
def RunScpSingleDirection(sending_vm, receiving_vm, cipher,
data_size_in_mb, base_metadata):
"""Run scp from sending_vm to receiving_vm and parse results.
If 'receiving_vm' is accessible via internal IP from 'sending_vm', throughput
over internal IP addresses will be tested in addition to external IP
addresses.
Args:
sending_vm: The originating VM for the scp command.
receiving_vm: The destination VM for the scp command.
cipher: Name of the SSH cipher to use.
data_size_in_mb: The size of the data file in MB.
base_metadata: The base metadata to attach to the sample.
Returns:
A list of sample.Sample objects.
"""
results = []
metadata = base_metadata.copy()
for vm_specifier, vm in ('receiving', receiving_vm), ('sending', sending_vm):
for k, v in six.iteritems(vm.GetResourceMetadata()):
metadata['{0}_{1}'.format(vm_specifier, k)] = v
cmd_template = ('sudo sync; sudo sysctl vm.drop_caches=3; '
'time /usr/bin/scp -o StrictHostKeyChecking=no -i %s -c %s '
'%s %s@%%s:%%s/;') % (
linux_virtual_machine.REMOTE_KEY_PATH, cipher,
'%s/data/*' % sending_vm.GetScratchDir(0),
receiving_vm.user_name)
def RunForIpAddress(ip_address, ip_type):
"""Run SCP benchmark against a destination IP address."""
target_dir = posixpath.join(receiving_vm.GetScratchDir(0), ip_type)
cmd = cmd_template % (ip_address, target_dir)
receiving_vm.RemoteCommand('mkdir %s' % target_dir)
meta = metadata.copy()
meta['ip_type'] = ip_type
_, res = sending_vm.RemoteCommand(cmd)
time_used = vm_util.ParseTimeCommandResult(res)
result = data_size_in_mb / time_used
receiving_vm.RemoteCommand('rm -rf %s' % target_dir)
return sample.Sample('scp throughput', result, UNIT, meta)
if vm_util.ShouldRunOnExternalIpAddress():
results.append(RunForIpAddress(receiving_vm.ip_address, 'external'))
if vm_util.ShouldRunOnInternalIpAddress(sending_vm, receiving_vm):
results.append(RunForIpAddress(receiving_vm.internal_ip, 'internal'))
return results
def Run(benchmark_spec):
"""Run cp/scp on target vms.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
Returns:
A list of throughput samples. Each sample contains
the sample metric (string), value (float), unit (string), and metadata
(dict).
"""
vms = benchmark_spec.vms
data_size_for_calculation = DEFAULT_DATA_SIZE_IN_MB
if benchmark_spec.config.data_size_in_mb:
data_size_for_calculation = benchmark_spec.config.data_size_in_mb
metadata = {'copy_benchmark_single_file_mb':
benchmark_spec.config.data_size_in_mb}
results = MODE_FUNCTION_DICTIONARY[FLAGS.copy_benchmark_mode](
vms, data_size_for_calculation, metadata)
return results
def Cleanup(benchmark_spec): # pylint: disable=unused-argument
"""Cleanup function.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
pass
|
import argparse
import matplotlib.pyplot as plt
import chainer
from chainercv.datasets import voc_bbox_label_names
from chainercv.links import FasterRCNNVGG16
from chainercv import utils
from chainercv.visualizations import vis_bbox
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', type=int, default=-1)
parser.add_argument('--pretrained-model')
parser.add_argument(
'--dataset', choices=('voc',), default='voc')
parser.add_argument('image')
args = parser.parse_args()
if args.dataset == 'voc':
if args.pretrained_model is None:
args.pretrained_model = 'voc07'
label_names = voc_bbox_label_names
model = FasterRCNNVGG16(
n_fg_class=len(label_names),
pretrained_model=args.pretrained_model)
if args.gpu >= 0:
chainer.cuda.get_device_from_id(args.gpu).use()
model.to_gpu()
img = utils.read_image(args.image, color=True)
bboxes, labels, scores = model.predict([img])
bbox, label, score = bboxes[0], labels[0], scores[0]
vis_bbox(
img, bbox, label, score, label_names=label_names)
plt.show()
if __name__ == '__main__':
main()
|
from copy import deepcopy
import pytest
import homeassistant.components.nextbus.sensor as nextbus
import homeassistant.components.sensor as sensor
from homeassistant.setup import async_setup_component
from tests.async_mock import patch
from tests.common import assert_setup_component
VALID_AGENCY = "sf-muni"
VALID_ROUTE = "F"
VALID_STOP = "5650"
VALID_AGENCY_TITLE = "San Francisco Muni"
VALID_ROUTE_TITLE = "F-Market & Wharves"
VALID_STOP_TITLE = "Market St & 7th St"
SENSOR_ID_SHORT = "sensor.sf_muni_f"
CONFIG_BASIC = {
"sensor": {
"platform": "nextbus",
"agency": VALID_AGENCY,
"route": VALID_ROUTE,
"stop": VALID_STOP,
}
}
CONFIG_INVALID_MISSING = {"sensor": {"platform": "nextbus"}}
BASIC_RESULTS = {
"predictions": {
"agencyTitle": VALID_AGENCY_TITLE,
"routeTitle": VALID_ROUTE_TITLE,
"stopTitle": VALID_STOP_TITLE,
"direction": {
"title": "Outbound",
"prediction": [
{"minutes": "1", "epochTime": "1553807371000"},
{"minutes": "2", "epochTime": "1553807372000"},
{"minutes": "3", "epochTime": "1553807373000"},
],
},
}
}
async def assert_setup_sensor(hass, config, count=1):
"""Set up the sensor and assert it's been created."""
with assert_setup_component(count):
assert await async_setup_component(hass, sensor.DOMAIN, config)
await hass.async_block_till_done()
@pytest.fixture
def mock_nextbus():
"""Create a mock py_nextbus module."""
with patch(
"homeassistant.components.nextbus.sensor.NextBusClient"
) as NextBusClient:
yield NextBusClient
@pytest.fixture
def mock_nextbus_predictions(mock_nextbus):
"""Create a mock of NextBusClient predictions."""
instance = mock_nextbus.return_value
instance.get_predictions_for_multi_stops.return_value = BASIC_RESULTS
yield instance.get_predictions_for_multi_stops
@pytest.fixture
def mock_nextbus_lists(mock_nextbus):
"""Mock all list functions in nextbus to test validate logic."""
instance = mock_nextbus.return_value
instance.get_agency_list.return_value = {
"agency": [{"tag": "sf-muni", "title": "San Francisco Muni"}]
}
instance.get_route_list.return_value = {
"route": [{"tag": "F", "title": "F - Market & Wharves"}]
}
instance.get_route_config.return_value = {
"route": {"stop": [{"tag": "5650", "title": "Market St & 7th St"}]}
}
async def test_valid_config(hass, mock_nextbus, mock_nextbus_lists):
"""Test that sensor is set up properly with valid config."""
await assert_setup_sensor(hass, CONFIG_BASIC)
async def test_invalid_config(hass, mock_nextbus, mock_nextbus_lists):
"""Checks that component is not setup when missing information."""
await assert_setup_sensor(hass, CONFIG_INVALID_MISSING, count=0)
async def test_validate_tags(hass, mock_nextbus, mock_nextbus_lists):
"""Test that additional validation against the API is successful."""
# with self.subTest('Valid everything'):
assert nextbus.validate_tags(mock_nextbus(), VALID_AGENCY, VALID_ROUTE, VALID_STOP)
# with self.subTest('Invalid agency'):
assert not nextbus.validate_tags(
mock_nextbus(), "not-valid", VALID_ROUTE, VALID_STOP
)
# with self.subTest('Invalid route'):
assert not nextbus.validate_tags(mock_nextbus(), VALID_AGENCY, "0", VALID_STOP)
# with self.subTest('Invalid stop'):
assert not nextbus.validate_tags(mock_nextbus(), VALID_AGENCY, VALID_ROUTE, 0)
async def test_verify_valid_state(
hass, mock_nextbus, mock_nextbus_lists, mock_nextbus_predictions
):
"""Verify all attributes are set from a valid response."""
await assert_setup_sensor(hass, CONFIG_BASIC)
mock_nextbus_predictions.assert_called_once_with(
[{"stop_tag": VALID_STOP, "route_tag": VALID_ROUTE}], VALID_AGENCY
)
state = hass.states.get(SENSOR_ID_SHORT)
assert state is not None
assert state.state == "2019-03-28T21:09:31+00:00"
assert state.attributes["agency"] == VALID_AGENCY_TITLE
assert state.attributes["route"] == VALID_ROUTE_TITLE
assert state.attributes["stop"] == VALID_STOP_TITLE
assert state.attributes["direction"] == "Outbound"
assert state.attributes["upcoming"] == "1, 2, 3"
async def test_message_dict(
hass, mock_nextbus, mock_nextbus_lists, mock_nextbus_predictions
):
"""Verify that a single dict message is rendered correctly."""
mock_nextbus_predictions.return_value = {
"predictions": {
"agencyTitle": VALID_AGENCY_TITLE,
"routeTitle": VALID_ROUTE_TITLE,
"stopTitle": VALID_STOP_TITLE,
"message": {"text": "Message"},
"direction": {
"title": "Outbound",
"prediction": [
{"minutes": "1", "epochTime": "1553807371000"},
{"minutes": "2", "epochTime": "1553807372000"},
{"minutes": "3", "epochTime": "1553807373000"},
],
},
}
}
await assert_setup_sensor(hass, CONFIG_BASIC)
state = hass.states.get(SENSOR_ID_SHORT)
assert state is not None
assert state.attributes["message"] == "Message"
async def test_message_list(
hass, mock_nextbus, mock_nextbus_lists, mock_nextbus_predictions
):
"""Verify that a list of messages are rendered correctly."""
mock_nextbus_predictions.return_value = {
"predictions": {
"agencyTitle": VALID_AGENCY_TITLE,
"routeTitle": VALID_ROUTE_TITLE,
"stopTitle": VALID_STOP_TITLE,
"message": [{"text": "Message 1"}, {"text": "Message 2"}],
"direction": {
"title": "Outbound",
"prediction": [
{"minutes": "1", "epochTime": "1553807371000"},
{"minutes": "2", "epochTime": "1553807372000"},
{"minutes": "3", "epochTime": "1553807373000"},
],
},
}
}
await assert_setup_sensor(hass, CONFIG_BASIC)
state = hass.states.get(SENSOR_ID_SHORT)
assert state is not None
assert state.attributes["message"] == "Message 1 -- Message 2"
async def test_direction_list(
hass, mock_nextbus, mock_nextbus_lists, mock_nextbus_predictions
):
"""Verify that a list of messages are rendered correctly."""
mock_nextbus_predictions.return_value = {
"predictions": {
"agencyTitle": VALID_AGENCY_TITLE,
"routeTitle": VALID_ROUTE_TITLE,
"stopTitle": VALID_STOP_TITLE,
"message": [{"text": "Message 1"}, {"text": "Message 2"}],
"direction": [
{
"title": "Outbound",
"prediction": [
{"minutes": "1", "epochTime": "1553807371000"},
{"minutes": "2", "epochTime": "1553807372000"},
{"minutes": "3", "epochTime": "1553807373000"},
],
},
{
"title": "Outbound 2",
"prediction": {"minutes": "0", "epochTime": "1553807374000"},
},
],
}
}
await assert_setup_sensor(hass, CONFIG_BASIC)
state = hass.states.get(SENSOR_ID_SHORT)
assert state is not None
assert state.state == "2019-03-28T21:09:31+00:00"
assert state.attributes["agency"] == VALID_AGENCY_TITLE
assert state.attributes["route"] == VALID_ROUTE_TITLE
assert state.attributes["stop"] == VALID_STOP_TITLE
assert state.attributes["direction"] == "Outbound, Outbound 2"
assert state.attributes["upcoming"] == "0, 1, 2, 3"
async def test_custom_name(
hass, mock_nextbus, mock_nextbus_lists, mock_nextbus_predictions
):
"""Verify that a custom name can be set via config."""
config = deepcopy(CONFIG_BASIC)
config["sensor"]["name"] = "Custom Name"
await assert_setup_sensor(hass, config)
state = hass.states.get("sensor.custom_name")
assert state is not None
async def test_no_predictions(
hass, mock_nextbus, mock_nextbus_predictions, mock_nextbus_lists
):
"""Verify there are no exceptions when no predictions are returned."""
mock_nextbus_predictions.return_value = {}
await assert_setup_sensor(hass, CONFIG_BASIC)
state = hass.states.get(SENSOR_ID_SHORT)
assert state is not None
assert state.state == "unknown"
async def test_verify_no_upcoming(
hass, mock_nextbus, mock_nextbus_lists, mock_nextbus_predictions
):
"""Verify attributes are set despite no upcoming times."""
mock_nextbus_predictions.return_value = {
"predictions": {
"agencyTitle": VALID_AGENCY_TITLE,
"routeTitle": VALID_ROUTE_TITLE,
"stopTitle": VALID_STOP_TITLE,
"direction": {"title": "Outbound", "prediction": []},
}
}
await assert_setup_sensor(hass, CONFIG_BASIC)
state = hass.states.get(SENSOR_ID_SHORT)
assert state is not None
assert state.state == "unknown"
assert state.attributes["upcoming"] == "No upcoming predictions"
|
import os
from urllib.request import urlopen, Request
import re
import base64
import mimetypes
from flexx import flx
_leaflet_url = 'https://cdnjs.cloudflare.com/ajax/libs/leaflet/'
_leaflet_version = '1.0.3'
_leaflet_icons = [
'marker-icon.png',
'marker-icon-2x.png',
'marker-shadow.png',
]
if 'LEAFLET_DIR' in os.environ:
_base_url = 'file://%s' % os.environ['LEAFLET_DIR']
else:
_base_url = '%s/%s' % (_leaflet_url, _leaflet_version)
mimetypes.init()
def _get_code(item):
""" Get a text item from _base_url
"""
url = '%s/%s' % (_base_url, item)
req = Request(url, headers={'User-Agent': 'flexx/%s' % flx.__version__})
return urlopen(req).read().decode()
def _get_data(item_or_url):
""" Get a binary item from url or _base_url
"""
if '://' in item_or_url:
url = item_or_url
else:
url = '%s/%s' % (_base_url, item_or_url)
req = Request(url, headers={'User-Agent': 'flexx/%s' % flx.__version__})
return urlopen(req).read()
def _embed_css_resources(css, types=('.png',)):
""" Replace urls in css with data urls
"""
type_str = '|'.join('\%s' % t for t in types)
rx = re.compile('(url\s*\(\s*(.*(%s))\s*\))' % type_str)
found = rx.findall(css)
for match, item, ext in found:
data = base64.b64encode(_get_data(item)).decode()
mime = mimetypes.types_map[ext]
repl = 'url(data:%s;base64,%s)' % (mime, data)
css = css.replace(match, repl)
return css
flx.assets.associate_asset(
__name__,
'leaflet.js',
lambda: _get_code('leaflet.js'),
)
flx.assets.associate_asset(
__name__,
'leaflet.css',
lambda: _embed_css_resources(_get_code('leaflet.css')),
)
for icon in _leaflet_icons:
flx.assets.add_shared_data(icon, _get_data('images/%s' % icon))
class LeafletWidget(flx.Widget):
""" A widget that shows a slippy/tile-map using Leaflet.
"""
layers = flx.ListProp([], doc="""
List of tilemap layer tuples: (url, 'Layer').
""")
zoom = flx.IntProp(8, settable=True, doc="""
Zoom level for the map.
""")
min_zoom = flx.IntProp(0, settable=True, doc="""
self zoom level for the map.
""")
max_zoom = flx.IntProp(18, settable=True, doc="""
Maximum zoom level for the map.
""")
center = flx.FloatPairProp((5.2, 5.5), settable=True, doc="""
The center of the map.
""")
show_layers = flx.BoolProp(False, settable=True, doc="""
Whether to show layers-icon on the top-right of the map.
""")
show_scale = flx.BoolProp(False, settable=True, doc="""
Whether to show scale at bottom-left of map.
""")
@flx.action
def add_layer(self, url, name=None):
""" Add a layer to the map.
"""
# Avoid duplicates
self.remove_layer(url)
if name:
self.remove_layer(name)
# Add layer
layers = self.layers + [(url, name or 'Layer')]
self._mutate_layers(layers)
@flx.action
def remove_layer(self, url_or_name):
""" Remove a layer from the map by url or name.
"""
layers = list(self.layers)
for i in reversed(range(len(layers))):
if url_or_name in layers[i]:
layers.pop(i)
self._mutate_layers(layers)
def _create_dom(self):
global L, document
node = document.createElement('div')
self.mapnode = document.createElement('div')
node.appendChild(self.mapnode)
self.mapnode.id = 'maproot'
self.mapnode.style.position = 'absolute'
self.mapnode.style.top = '0px'
self.mapnode.style.left = '0px'
self.map = L.map(self.mapnode)
self.map.on('zoomend', self.map_handle_zoom)
self.map.on('moveend', self.map_handle_move)
self.map.on('click', self.map_handle_mouse)
self.map.on('dblclick', self.map_handle_mouse)
# Container to keep track of leaflet layer objects
self.layer_container = []
self.layer_control = L.control.layers()
self.scale = L.control.scale({'imperial': False, 'maxWidth': 200})
# Set the path for icon images
L.Icon.Default.prototype.options.imagePath = '_data/shared/'
return node
def map_handle_zoom(self, e):
global isNaN
zoom = self.map.getZoom()
if isNaN(zoom):
return
if zoom != self.zoom:
self.set_zoom(zoom)
def map_handle_move(self, e):
center_coord = self.map.getCenter()
center = center_coord.lat, center_coord.lng
if center != self.center:
self.set_center(center)
def map_handle_mouse(self, e):
latlng = [e.latlng.lat, e.latlng.lng]
xy = [e.layerPoint.x, e.layerPoint.y]
self.pointer_event(e.type, latlng, xy)
@flx.emitter
def pointer_event(self, event, latlng, xy):
return {'event': event, 'latlng': latlng, 'xy': xy}
@flx.reaction
def __handle_zoom(self):
self.map.setZoom(self.zoom)
@flx.reaction
def __handle_min_zoom(self):
self.map.setMinZoom(self.min_zoom)
@flx.reaction
def __handle_max_zoom(self):
self.map.setMaxZoom(self.max_zoom)
@flx.reaction
def __handle_center(self):
self.map.panTo(self.center)
@flx.reaction
def __handle_show_layers(self):
if self.show_layers:
self.map.addControl(self.layer_control)
else:
self.map.removeControl(self.layer_control)
@flx.reaction
def __handle_show_scale(self):
if self.show_scale:
self.map.addControl(self.scale)
else:
self.map.removeControl(self.scale)
@flx.reaction
def __size_changed(self):
size = self.size
if size[0] or size[1]:
self.mapnode.style.width = size[0] + 'px'
self.mapnode.style.height = size[1] + 'px'
# Notify the map that it's container's size changed
self.map.invalidateSize()
@flx.reaction
def __layers_changed(self):
global L
for layer in self.layer_container:
self.layer_control.removeLayer(layer)
if self.map.hasLayer(layer):
self.map.removeLayer(layer)
for layer_url, layer_name in self.layers:
if not layer_url.endswith('.png'):
if not layer_url.endswith('/'):
layer_url += '/'
layer_url += '{z}/{x}/{y}.png'
new_layer = L.tileLayer(layer_url)
self.layer_container.append(new_layer)
self.map.addLayer(new_layer)
self.layer_control.addOverlay(new_layer, layer_name)
class LeafletExample(flx.Widget):
def init(self):
with flx.HBox():
self.leaflet = LeafletWidget(
flex=1,
center=(52, 4.1),
zoom=12,
show_scale=lambda: self.cbs.checked,
show_layers=lambda: self.cbl.checked,
)
with flx.VBox():
self.btna = flx.Button(text='Add SeaMap')
self.btnr = flx.Button(text='Remove SeaMap')
self.cbs = flx.CheckBox(text='Show scale')
self.cbl = flx.CheckBox(text='Show layers')
self.list = flx.VBox()
flx.Widget(flex=1)
self.leaflet.add_layer('http://a.tile.openstreetmap.org/', 'OpenStreetMap')
@flx.reaction('btna.pointer_click')
def handle_seamap_add(self, *events):
self.leaflet.add_layer('http://t1.openseamap.org/seamark/', 'OpenSeaMap')
@flx.reaction('btnr.pointer_click')
def handle_seamap_remove(self, *events):
self.leaflet.remove_layer('http://t1.openseamap.org/seamark/', 'OpenSeaMap')
# @flx.reaction('cbs.checked', 'cbl.checked')
# def handle_checkboxes(self, *events):
# self.leaflet.set_show_scale(self.cbs.checked
# self.leaflet.show_layers = self.cbl.checked
@flx.reaction('leaflet.pointer_event')
def handle_leaflet_mouse(self, *events):
global L
ev = events[-1]
latlng = tuple(ev['latlng'])
flx.Label(text='%f, %f' % (int(100*latlng[0])/100, int(100*latlng[1])/100),
parent=self.list)
latlng = tuple(ev['latlng'])
if ev['event'] == 'click':
m = L.marker(ev['latlng'])
m.bindTooltip('%f, %f' % (latlng[0], latlng[1]))
m.addTo(self.leaflet.map)
if __name__ == '__main__':
flx.launch(LeafletExample, 'firefox')
flx.run()
|
import pytest
import sh
from molecule import config
from molecule.dependency import shell
@pytest.fixture
def _dependency_section_data():
return {
'dependency': {
'name': 'shell',
'command': 'ls -l -a /tmp',
'options': {
'foo': 'bar',
},
'env': {
'FOO': 'bar',
}
}
}
# NOTE(retr0h): The use of the `patched_config_validate` fixture, disables
# config.Config._validate from executing. Thus preventing odd side-effects
# throughout patched.assert_called unit tests.
@pytest.fixture
def _instance(_dependency_section_data, patched_config_validate,
config_instance):
return shell.Shell(config_instance)
def test_config_private_member(_instance):
assert isinstance(_instance._config, config.Config)
def test_default_options_property(_instance):
x = {}
assert x == _instance.default_options
def test_default_env_property(_instance):
assert 'MOLECULE_FILE' in _instance.default_env
assert 'MOLECULE_INVENTORY_FILE' in _instance.default_env
assert 'MOLECULE_SCENARIO_DIRECTORY' in _instance.default_env
assert 'MOLECULE_INSTANCE_CONFIG' in _instance.default_env
@pytest.mark.parametrize(
'config_instance', ['_dependency_section_data'], indirect=True)
def test_name_property(_instance):
assert 'shell' == _instance.name
def test_enabled_property(_instance):
assert _instance.enabled
@pytest.mark.parametrize(
'config_instance', ['_dependency_section_data'], indirect=True)
def test_options_property(_instance):
x = {'foo': 'bar'}
assert x == _instance.options
@pytest.mark.parametrize(
'config_instance', ['_dependency_section_data'], indirect=True)
def test_options_property_handles_cli_args(_instance):
_instance._config.args = {}
x = {'foo': 'bar'}
assert x == _instance.options
@pytest.mark.parametrize(
'config_instance', ['_dependency_section_data'], indirect=True)
def test_env_property(_instance):
assert 'bar' == _instance.env['FOO']
@pytest.mark.parametrize(
'config_instance', ['_dependency_section_data'], indirect=True)
def test_bake(_instance):
_instance.bake()
x = [
str(sh.ls),
'-l',
'-a',
'/tmp',
]
result = str(_instance._sh_command).split()
assert sorted(x) == sorted(result)
def test_execute(patched_run_command, patched_logger_success, _instance):
_instance._sh_command = 'patched-command'
_instance.execute()
patched_run_command.assert_called_once_with('patched-command', debug=False)
msg = 'Dependency completed successfully.'
patched_logger_success.assert_called_once_with(msg)
def test_execute_does_not_execute_when_disabled(
patched_run_command, patched_logger_warn, _instance):
_instance._config.config['dependency']['enabled'] = False
_instance.execute()
assert not patched_run_command.called
msg = 'Skipping, dependency is disabled.'
patched_logger_warn.assert_called_once_with(msg)
@pytest.mark.parametrize(
'config_instance', ['_dependency_section_data'], indirect=True)
def test_execute_bakes(patched_run_command, _instance):
_instance.execute()
assert _instance._sh_command is not None
assert 1 == patched_run_command.call_count
@pytest.mark.parametrize(
'config_instance', ['_dependency_section_data'], indirect=True)
def test_executes_catches_and_exits_return_code(patched_run_command,
_instance):
patched_run_command.side_effect = sh.ErrorReturnCode_1(sh.ls, b'', b'')
with pytest.raises(SystemExit) as e:
_instance.execute()
assert 1 == e.value.code
def test_has_command_configured(_instance):
assert _instance._has_command_configured()
|
from unittest import mock
from pyfritzhome import LoginError
import pytest
from requests.exceptions import HTTPError
from homeassistant.components.fritzbox.const import DOMAIN
from homeassistant.components.ssdp import (
ATTR_SSDP_LOCATION,
ATTR_UPNP_FRIENDLY_NAME,
ATTR_UPNP_UDN,
)
from homeassistant.const import CONF_DEVICES, CONF_HOST, CONF_PASSWORD, CONF_USERNAME
from homeassistant.helpers.typing import HomeAssistantType
from . import MOCK_CONFIG
from tests.async_mock import Mock, patch
MOCK_USER_DATA = MOCK_CONFIG[DOMAIN][CONF_DEVICES][0]
MOCK_SSDP_DATA = {
ATTR_SSDP_LOCATION: "https://fake_host:12345/test",
ATTR_UPNP_FRIENDLY_NAME: "fake_name",
ATTR_UPNP_UDN: "uuid:only-a-test",
}
@pytest.fixture(name="fritz")
def fritz_fixture() -> Mock:
"""Patch libraries."""
with patch("homeassistant.components.fritzbox.config_flow.Fritzhome") as fritz:
yield fritz
async def test_user(hass: HomeAssistantType, fritz: Mock):
"""Test starting a flow by user."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "user"}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input=MOCK_USER_DATA
)
assert result["type"] == "create_entry"
assert result["title"] == "fake_host"
assert result["data"][CONF_HOST] == "fake_host"
assert result["data"][CONF_PASSWORD] == "fake_pass"
assert result["data"][CONF_USERNAME] == "fake_user"
assert not result["result"].unique_id
async def test_user_auth_failed(hass: HomeAssistantType, fritz: Mock):
"""Test starting a flow by user with authentication failure."""
fritz().login.side_effect = [LoginError("Boom"), mock.DEFAULT]
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "user"}, data=MOCK_USER_DATA
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert result["errors"]["base"] == "invalid_auth"
async def test_user_not_successful(hass: HomeAssistantType, fritz: Mock):
"""Test starting a flow by user but no connection found."""
fritz().login.side_effect = OSError("Boom")
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "user"}, data=MOCK_USER_DATA
)
assert result["type"] == "abort"
assert result["reason"] == "no_devices_found"
async def test_user_already_configured(hass: HomeAssistantType, fritz: Mock):
"""Test starting a flow by user when already configured."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "user"}, data=MOCK_USER_DATA
)
assert result["type"] == "create_entry"
assert not result["result"].unique_id
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "user"}, data=MOCK_USER_DATA
)
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
async def test_import(hass: HomeAssistantType, fritz: Mock):
"""Test starting a flow by import."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "import"}, data=MOCK_USER_DATA
)
assert result["type"] == "create_entry"
assert result["title"] == "fake_host"
assert result["data"][CONF_HOST] == "fake_host"
assert result["data"][CONF_PASSWORD] == "fake_pass"
assert result["data"][CONF_USERNAME] == "fake_user"
assert not result["result"].unique_id
async def test_ssdp(hass: HomeAssistantType, fritz: Mock):
"""Test starting a flow from discovery."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "ssdp"}, data=MOCK_SSDP_DATA
)
assert result["type"] == "form"
assert result["step_id"] == "confirm"
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={CONF_PASSWORD: "fake_pass", CONF_USERNAME: "fake_user"},
)
assert result["type"] == "create_entry"
assert result["title"] == "fake_name"
assert result["data"][CONF_HOST] == "fake_host"
assert result["data"][CONF_PASSWORD] == "fake_pass"
assert result["data"][CONF_USERNAME] == "fake_user"
assert result["result"].unique_id == "only-a-test"
async def test_ssdp_no_friendly_name(hass: HomeAssistantType, fritz: Mock):
"""Test starting a flow from discovery without friendly name."""
MOCK_NO_NAME = MOCK_SSDP_DATA.copy()
del MOCK_NO_NAME[ATTR_UPNP_FRIENDLY_NAME]
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "ssdp"}, data=MOCK_NO_NAME
)
assert result["type"] == "form"
assert result["step_id"] == "confirm"
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={CONF_PASSWORD: "fake_pass", CONF_USERNAME: "fake_user"},
)
assert result["type"] == "create_entry"
assert result["title"] == "fake_host"
assert result["data"][CONF_HOST] == "fake_host"
assert result["data"][CONF_PASSWORD] == "fake_pass"
assert result["data"][CONF_USERNAME] == "fake_user"
assert result["result"].unique_id == "only-a-test"
async def test_ssdp_auth_failed(hass: HomeAssistantType, fritz: Mock):
"""Test starting a flow from discovery with authentication failure."""
fritz().login.side_effect = LoginError("Boom")
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "ssdp"}, data=MOCK_SSDP_DATA
)
assert result["type"] == "form"
assert result["step_id"] == "confirm"
assert result["errors"] == {}
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={CONF_PASSWORD: "whatever", CONF_USERNAME: "whatever"},
)
assert result["type"] == "form"
assert result["step_id"] == "confirm"
assert result["errors"]["base"] == "invalid_auth"
async def test_ssdp_not_successful(hass: HomeAssistantType, fritz: Mock):
"""Test starting a flow from discovery but no device found."""
fritz().login.side_effect = OSError("Boom")
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "ssdp"}, data=MOCK_SSDP_DATA
)
assert result["type"] == "form"
assert result["step_id"] == "confirm"
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={CONF_PASSWORD: "whatever", CONF_USERNAME: "whatever"},
)
assert result["type"] == "abort"
assert result["reason"] == "no_devices_found"
async def test_ssdp_not_supported(hass: HomeAssistantType, fritz: Mock):
"""Test starting a flow from discovery with unsupported device."""
fritz().get_device_elements.side_effect = HTTPError("Boom")
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "ssdp"}, data=MOCK_SSDP_DATA
)
assert result["type"] == "form"
assert result["step_id"] == "confirm"
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={CONF_PASSWORD: "whatever", CONF_USERNAME: "whatever"},
)
assert result["type"] == "abort"
assert result["reason"] == "not_supported"
async def test_ssdp_already_in_progress_unique_id(hass: HomeAssistantType, fritz: Mock):
"""Test starting a flow from discovery twice."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "ssdp"}, data=MOCK_SSDP_DATA
)
assert result["type"] == "form"
assert result["step_id"] == "confirm"
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "ssdp"}, data=MOCK_SSDP_DATA
)
assert result["type"] == "abort"
assert result["reason"] == "already_in_progress"
async def test_ssdp_already_in_progress_host(hass: HomeAssistantType, fritz: Mock):
"""Test starting a flow from discovery twice."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "ssdp"}, data=MOCK_SSDP_DATA
)
assert result["type"] == "form"
assert result["step_id"] == "confirm"
MOCK_NO_UNIQUE_ID = MOCK_SSDP_DATA.copy()
del MOCK_NO_UNIQUE_ID[ATTR_UPNP_UDN]
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "ssdp"}, data=MOCK_NO_UNIQUE_ID
)
assert result["type"] == "abort"
assert result["reason"] == "already_in_progress"
async def test_ssdp_already_configured(hass: HomeAssistantType, fritz: Mock):
"""Test starting a flow from discovery when already configured."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "user"}, data=MOCK_USER_DATA
)
assert result["type"] == "create_entry"
assert not result["result"].unique_id
result2 = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "ssdp"}, data=MOCK_SSDP_DATA
)
assert result2["type"] == "abort"
assert result2["reason"] == "already_configured"
assert result["result"].unique_id == "only-a-test"
|
import os
from tempfile import TemporaryDirectory
from radicale import pathutils
class StorageCreateCollectionMixin:
def create_collection(self, href, items=None, props=None):
folder = self._get_collection_root_folder()
# Path should already be sanitized
sane_path = pathutils.strip_path(href)
filesystem_path = pathutils.path_to_filesystem(folder, sane_path)
if not props:
self._makedirs_synced(filesystem_path)
return self._collection_class(
self, pathutils.unstrip_path(sane_path, True))
parent_dir = os.path.dirname(filesystem_path)
self._makedirs_synced(parent_dir)
# Create a temporary directory with an unsafe name
with TemporaryDirectory(
prefix=".Radicale.tmp-", dir=parent_dir) as tmp_dir:
# The temporary directory itself can't be renamed
tmp_filesystem_path = os.path.join(tmp_dir, "collection")
os.makedirs(tmp_filesystem_path)
col = self._collection_class(
self, pathutils.unstrip_path(sane_path, True),
filesystem_path=tmp_filesystem_path)
col.set_meta(props)
if items is not None:
if props.get("tag") == "VCALENDAR":
col._upload_all_nonatomic(items, suffix=".ics")
elif props.get("tag") == "VADDRESSBOOK":
col._upload_all_nonatomic(items, suffix=".vcf")
if os.path.lexists(filesystem_path):
pathutils.rename_exchange(tmp_filesystem_path, filesystem_path)
else:
os.rename(tmp_filesystem_path, filesystem_path)
self._sync_directory(parent_dir)
return self._collection_class(
self, pathutils.unstrip_path(sane_path, True))
|
from homeassistant import __main__ as main
from homeassistant.const import REQUIRED_PYTHON_VER
from tests.async_mock import PropertyMock, patch
@patch("sys.exit")
def test_validate_python(mock_exit):
"""Test validate Python version method."""
with patch("sys.version_info", new_callable=PropertyMock(return_value=(2, 7, 8))):
main.validate_python()
assert mock_exit.called is True
mock_exit.reset_mock()
with patch("sys.version_info", new_callable=PropertyMock(return_value=(3, 2, 0))):
main.validate_python()
assert mock_exit.called is True
mock_exit.reset_mock()
with patch("sys.version_info", new_callable=PropertyMock(return_value=(3, 4, 2))):
main.validate_python()
assert mock_exit.called is True
mock_exit.reset_mock()
with patch("sys.version_info", new_callable=PropertyMock(return_value=(3, 5, 2))):
main.validate_python()
assert mock_exit.called is True
mock_exit.reset_mock()
with patch(
"sys.version_info",
new_callable=PropertyMock(
return_value=(REQUIRED_PYTHON_VER[0] - 1,) + REQUIRED_PYTHON_VER[1:]
),
):
main.validate_python()
assert mock_exit.called is True
mock_exit.reset_mock()
with patch(
"sys.version_info", new_callable=PropertyMock(return_value=REQUIRED_PYTHON_VER)
):
main.validate_python()
assert mock_exit.called is False
mock_exit.reset_mock()
with patch(
"sys.version_info",
new_callable=PropertyMock(
return_value=(REQUIRED_PYTHON_VER[:2]) + (REQUIRED_PYTHON_VER[2] + 1,)
),
):
main.validate_python()
assert mock_exit.called is False
mock_exit.reset_mock()
|
import re
from django.shortcuts import redirect
from django.urls import reverse
from django.utils.http import urlencode
from django.utils.translation import gettext as _
from weblate.legal.models import Agreement
from weblate.utils import messages
class RequireTOSMiddleware:
"""Middleware to enforce TOS confirmation on certain requests."""
def __init__(self, get_response=None):
self.get_response = get_response
# Ignored paths regexp, mostly covers API and legal pages
self.matcher = re.compile(
r"^/(legal|about|contact|api|static|widgets|data|hooks)/"
)
def process_view(self, request, view_func, view_args, view_kwargs):
"""Check request whether user has agreed to TOS."""
# We intercept only GET requests for authenticated users
if request.method != "GET" or not request.user.is_authenticated:
return None
# Some paths are ignored
if self.matcher.match(request.path):
return None
# Check TOS agreement
agreement = Agreement.objects.get_or_create(user=request.user)[0]
if not agreement.is_current():
messages.info(
request,
_(
"We have new version of the Terms of Service document, "
"please read it and confirm that you agree with it."
),
)
return redirect(
"{}?{}".format(
reverse("legal:confirm"),
urlencode({"next": request.get_full_path()}),
)
)
# Explicitly return None for all non-matching requests
return None
def __call__(self, request):
return self.get_response(request)
|
from django.core.management.base import CommandError
from django.db import transaction
from weblate.lang.models import Language
from weblate.trans.models import Component, Translation, Unit
from weblate.utils.management.base import BaseCommand
class WeblateComponentCommand(BaseCommand):
"""Command which accepts project/component/--all params to process."""
needs_repo = False
def add_arguments(self, parser):
parser.add_argument(
"--all",
action="store_true",
dest="all",
default=False,
help="process all components",
)
parser.add_argument(
"component",
nargs="*",
help="Slug <project/component> of component to process",
)
def get_units(self, **options):
"""Return list of units matching parameters."""
if options["all"]:
return Unit.objects.all()
return Unit.objects.filter(
translation__component__in=self.get_components(**options)
)
def iterate_units(self, **options):
"""Memory effective iteration over units."""
units = self.get_units(**options).order_by("pk")
count = units.count()
if not count:
return
current = 0
last = units.order_by("-pk")[0].pk
done = 0
step = 1000
# Iterate over chunks
while current < last:
self.stdout.write("Processing {:.1f}%".format(done * 100.0 / count))
with transaction.atomic():
step_units = units.filter(pk__gt=current)[:step].prefetch()
for unit in step_units:
current = unit.pk
done += 1
yield unit
self.stdout.write("Operation completed")
def get_translations(self, **options):
"""Return list of translations matching parameters."""
return Translation.objects.prefetch().filter(
component__in=self.get_components(**options)
)
def get_components(self, **options):
"""Return list of components matching parameters."""
if options["all"]:
# all components
if self.needs_repo:
result = Component.objects.exclude(repo__startswith="weblate:/")
else:
result = Component.objects.all()
elif not options["component"]:
# no argumets to filter projects
self.stderr.write(
"Please specify either --all " "or at least one <project/component>"
)
raise CommandError("Nothing to process!")
else:
# start with none and add found
result = Component.objects.none()
# process arguments
for arg in options["component"]:
# do we have also component?
parts = arg.split("/")
# filter by project
found = Component.objects.filter(project__slug=parts[0])
# filter by component if available
if len(parts) == 2:
found = found.filter(slug=parts[1])
# warn on no match
if found.count() == 0:
self.stderr.write(f'"{arg}" did not match any components')
raise CommandError("Nothing to process!")
# merge results
result |= found
return result
def handle(self, *args, **options):
"""The actual logic of the command.
Subclasses must implement this method.
"""
raise NotImplementedError()
class WeblateLangCommand(WeblateComponentCommand):
"""Command accepting additional language parameter.
It can filter list of languages to process.
"""
def add_arguments(self, parser):
super().add_arguments(parser)
parser.add_argument(
"--lang",
action="store",
dest="lang",
default=None,
help="Limit only to given languages (comma separated list)",
)
def get_units(self, **options):
"""Return list of units matching parameters."""
units = super().get_units(**options)
if options["lang"] is not None:
units = units.filter(translation__language__code=options["lang"])
return units
def get_translations(self, **options):
"""Return list of translations matching parameters."""
result = super().get_translations(**options)
if options["lang"] is not None:
langs = options["lang"].split(",")
result = result.filter(language_code__in=langs)
return result
def handle(self, *args, **options):
"""The actual logic of the command.
Subclasses must implement this method.
"""
raise NotImplementedError()
class WeblateTranslationCommand(BaseCommand):
"""Command with target of one translation."""
def add_arguments(self, parser):
parser.add_argument("project", help="Slug of project")
parser.add_argument("component", help="Slug of component")
parser.add_argument("language", help="Slug of language")
def get_translation(self, **options):
"""Get translation object."""
try:
component = Component.objects.get(
project__slug=options["project"], slug=options["component"]
)
except Component.DoesNotExist:
raise CommandError("No matching translation component found!")
try:
return Translation.objects.get(
component=component, language__code=options["language"]
)
except Translation.DoesNotExist:
if "add" in options and options["add"]:
language = Language.objects.fuzzy_get(options["language"])
if component.add_new_language(language, None):
return Translation.objects.get(
component=component, language=language
)
raise CommandError("No matching translation project found!")
def handle(self, *args, **options):
"""The actual logic of the command.
Subclasses must implement this method.
"""
raise NotImplementedError()
|
from Handler import Handler
from graphite import GraphiteHandler
from copy import deepcopy
class MultiGraphiteHandler(Handler):
"""
Implements the abstract Handler class, sending data to multiple
graphite servers by using two instances of GraphiteHandler
"""
def __init__(self, config=None):
"""
Create a new instance of the MultiGraphiteHandler class
"""
# Initialize Handler
Handler.__init__(self, config)
self.handlers = []
# Initialize Options
hosts = self.config['host']
for host in hosts:
config = deepcopy(self.config)
config['host'] = host
self.handlers.append(GraphiteHandler(config))
def get_default_config_help(self):
"""
Returns the help text for the configuration options for this handler
"""
config = super(MultiGraphiteHandler, self).get_default_config_help()
config.update({
'host': 'Hostname, Hostname, Hostname',
'port': 'Port',
'proto': 'udp or tcp',
'timeout': '',
'batch': 'How many to store before sending to the graphite server',
'max_backlog_multiplier': 'how many batches to store before trimming', # NOQA
'trim_backlog_multiplier': 'Trim down how many batches',
})
return config
def get_default_config(self):
"""
Return the default config for the handler
"""
config = super(MultiGraphiteHandler, self).get_default_config()
config.update({
'host': ['localhost'],
'port': 2003,
'proto': 'tcp',
'timeout': 15,
'batch': 1,
'max_backlog_multiplier': 5,
'trim_backlog_multiplier': 4,
})
return config
def process(self, metric):
"""
Process a metric by passing it to GraphiteHandler
instances
"""
for handler in self.handlers:
handler.process(metric)
def flush(self):
"""Flush metrics in queue"""
for handler in self.handlers:
handler.flush()
|
import numpy as np
from ..evoked import Evoked
from ..epochs import BaseEpochs
from ..io import BaseRaw
from ..event import find_events
from ..io.pick import _picks_to_idx
from ..utils import _check_preload, _check_option, fill_doc
def _get_window(start, end):
"""Return window which has length as much as parameter start - end."""
from scipy.signal import hann
window = 1 - np.r_[hann(4)[:2],
np.ones(np.abs(end - start) - 4),
hann(4)[-2:]].T
return window
def _fix_artifact(data, window, picks, first_samp, last_samp, mode):
"""Modify original data by using parameter data."""
from scipy.interpolate import interp1d
if mode == 'linear':
x = np.array([first_samp, last_samp])
f = interp1d(x, data[:, (first_samp, last_samp)][picks])
xnew = np.arange(first_samp, last_samp)
interp_data = f(xnew)
data[picks, first_samp:last_samp] = interp_data
if mode == 'window':
data[picks, first_samp:last_samp] = \
data[picks, first_samp:last_samp] * window[np.newaxis, :]
@fill_doc
def fix_stim_artifact(inst, events=None, event_id=None, tmin=0.,
tmax=0.01, mode='linear', stim_channel=None,
picks=None):
"""Eliminate stimulation's artifacts from instance.
.. note:: This function operates in-place, consider passing
``inst.copy()`` if this is not desired.
Parameters
----------
inst : instance of Raw or Epochs or Evoked
The data.
events : array, shape (n_events, 3)
The list of events. Required only when inst is Raw.
event_id : int
The id of the events generating the stimulation artifacts.
If None, read all events. Required only when inst is Raw.
tmin : float
Start time of the interpolation window in seconds.
tmax : float
End time of the interpolation window in seconds.
mode : 'linear' | 'window'
Way to fill the artifacted time interval.
'linear' does linear interpolation
'window' applies a (1 - hanning) window.
stim_channel : str | None
Stim channel to use.
%(picks_all_data)s
Returns
-------
inst : instance of Raw or Evoked or Epochs
Instance with modified data.
"""
_check_option('mode', mode, ['linear', 'window'])
s_start = int(np.ceil(inst.info['sfreq'] * tmin))
s_end = int(np.ceil(inst.info['sfreq'] * tmax))
if (mode == "window") and (s_end - s_start) < 4:
raise ValueError('Time range is too short. Use a larger interval '
'or set mode to "linear".')
window = None
if mode == 'window':
window = _get_window(s_start, s_end)
picks = _picks_to_idx(inst.info, picks, 'data', exclude=())
_check_preload(inst, 'fix_stim_artifact')
if isinstance(inst, BaseRaw):
if events is None:
events = find_events(inst, stim_channel=stim_channel)
if len(events) == 0:
raise ValueError('No events are found')
if event_id is None:
events_sel = np.arange(len(events))
else:
events_sel = (events[:, 2] == event_id)
event_start = events[events_sel, 0]
data = inst._data
for event_idx in event_start:
first_samp = int(event_idx) - inst.first_samp + s_start
last_samp = int(event_idx) - inst.first_samp + s_end
_fix_artifact(data, window, picks, first_samp, last_samp, mode)
elif isinstance(inst, BaseEpochs):
if inst.reject is not None:
raise RuntimeError('Reject is already applied. Use reject=None '
'in the constructor.')
e_start = int(np.ceil(inst.info['sfreq'] * inst.tmin))
first_samp = s_start - e_start
last_samp = s_end - e_start
data = inst._data
for epoch in data:
_fix_artifact(epoch, window, picks, first_samp, last_samp, mode)
elif isinstance(inst, Evoked):
first_samp = s_start - inst.first
last_samp = s_end - inst.first
data = inst.data
_fix_artifact(data, window, picks, first_samp, last_samp, mode)
else:
raise TypeError('Not a Raw or Epochs or Evoked (got %s).' % type(inst))
return inst
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from compare_gan.metrics import eval_task
import numpy as np
import tensorflow as tf
class GeneratorConditionNumberTask(eval_task.EvalTask):
"""Computes the generator condition number.
Computes the condition number for metric Tensor of the generator Jacobian.
This condition number is computed locally for each z sample in a minibatch.
Returns the mean log condition number and standard deviation across the
minibatch.
Follows the methods in https://arxiv.org/abs/1802.08768.
"""
_CONDITION_NUMBER_COUNT = "log_condition_number_count"
_CONDITION_NUMBER_MEAN = "log_condition_number_mean"
_CONDITION_NUMBER_STD = "log_condition_number_std"
def metric_list(self):
return frozenset([
self._CONDITION_NUMBER_COUNT, self._CONDITION_NUMBER_MEAN,
self._CONDITION_NUMBER_STD
])
def run_in_session(self, options, sess, gan, real_images):
del options, real_images
result_dict = {}
result = compute_generator_condition_number(sess, gan)
result_dict[self._CONDITION_NUMBER_COUNT] = len(result)
result_dict[self._CONDITION_NUMBER_MEAN] = np.mean(result)
result_dict[self._CONDITION_NUMBER_STD] = np.std(result)
return result_dict
def compute_generator_condition_number(sess, gan):
"""Computes the generator condition number.
Computes the Jacobian of the generator in session, then postprocesses to get
the condition number.
Args:
sess: tf.Session object.
gan: AbstractGAN object, that is already present in the current tf.Graph.
Returns:
A list of length gan.batch_size. Each element is the condition number
computed at a single z sample within a minibatch.
"""
shape = gan.fake_images.get_shape().as_list()
flat_generator_output = tf.reshape(
gan.fake_images, [gan.batch_size, np.prod(shape[1:])])
tf_jacobian = compute_jacobian(
xs=gan.z, fx=flat_generator_output)
z_sample = gan.z_generator(gan.batch_size, gan.z_dim)
np_jacobian = sess.run(tf_jacobian, feed_dict={gan.z: z_sample})
result_dict = analyze_jacobian(np_jacobian)
return result_dict["metric_tensor"]["log_condition_number"]
def compute_jacobian(xs, fx):
"""Computes df/dx matrix.
We assume x and fx are both batched, so the shape of the Jacobian is:
[fx.shape[0]] + fx.shape[1:] + xs.shape[1:]
This function computes the grads inside a TF loop so that we don't
end up storing many extra copies of the function we are taking the
Jacobian of.
Args:
xs: input tensor(s) of arbitrary shape.
fx: f(x) tensor of arbitrary shape.
Returns:
df/dx tensor of shape [fx.shape[0], fx.shape[1], xs.shape[1]].
"""
# Declares an iterator and tensor array loop variables for the gradients.
n = fx.get_shape().as_list()[1]
loop_vars = [tf.constant(0, tf.int32), tf.TensorArray(xs.dtype, n)]
def accumulator(j, result):
return (j + 1, result.write(j, tf.gradients(fx[:, j], xs)[0]))
# Iterates over all elements of the gradient and computes all partial
# derivatives.
_, df_dxs = tf.while_loop(lambda j, _: j < n, accumulator, loop_vars)
df_dx = df_dxs.stack()
df_dx = tf.transpose(df_dx, perm=[1, 0, 2])
return df_dx
def _analyze_metric_tensor(metric_tensor):
"""Analyzes a metric tensor.
Args:
metric_tensor: A numpy array of shape [batch, dim, dim]
Returns:
A dict containing spectral statstics.
"""
# eigenvalues will have shape [batch, dim].
eigenvalues, _ = np.linalg.eig(metric_tensor)
# Shape [batch,].
condition_number = np.linalg.cond(metric_tensor)
log_condition_number = np.log(condition_number)
(_, logdet) = np.linalg.slogdet(metric_tensor)
return {
"eigenvalues": eigenvalues,
"logdet": logdet,
"log_condition_number": log_condition_number
}
def analyze_jacobian(jacobian_array):
"""Computes eigenvalue statistics of the Jacobian.
Computes the eigenvalues and condition number of the metric tensor for the
Jacobian evaluated at each element of the batch and the mean metric tensor
across the batch.
Args:
jacobian_array: A numpy array holding the Jacobian.
Returns:
A dict of spectral statistics with two elements, one containing stats
for every metric tensor in the batch, another for the mean metric tensor.
"""
# Shape [batch, x_dim, fx_dim].
jacobian_transpose = np.transpose(jacobian_array, [0, 2, 1])
# Shape [batch, x_dim, x_dim].
metric_tensor = np.matmul(jacobian_transpose, jacobian_array)
mean_metric_tensor = np.mean(metric_tensor, 0)
# Reshapes to have a dummy batch dimension.
mean_metric_tensor = np.reshape(mean_metric_tensor,
(1,) + metric_tensor.shape[1:])
return {
"metric_tensor": _analyze_metric_tensor(metric_tensor),
"mean_metric_tensor": _analyze_metric_tensor(mean_metric_tensor)
}
|
import copy
import datetime
import logging
from absl import flags
from perfkitbenchmarker import configs
from perfkitbenchmarker import sample
from perfkitbenchmarker import spark_service
BENCHMARK_NAME = 'hadoop_terasort'
BENCHMARK_CONFIG = """
hadoop_terasort:
description: Run the Apache Hadoop MapReduce Terasort benchmark on a cluster.
spark_service:
service_type: managed
worker_group:
vm_spec:
GCP:
machine_type: n1-standard-4
boot_disk_size: 500
AWS:
machine_type: m4.xlarge
vm_count: 2
"""
TERAGEN = 'teragen'
TERASORT = 'terasort'
TERAVALIDATE = 'teravalidate'
flags.DEFINE_integer('terasort_num_rows', 10000,
'Number of 100-byte rows to generate.')
flags.DEFINE_string('terasort_unsorted_dir', 'tera_gen_data', 'Location of '
'the unsorted data. TeraGen writes here, and TeraSort '
'reads from here.')
flags.DEFINE_string('terasort_data_base', 'terasort_data/',
'The benchmark will append to this to create three '
'directories: one for the generated, unsorted data, '
'one for the sorted data, and one for the validate '
'data. If using a static cluster or if using object '
'storage buckets, you must cleanup.')
flags.DEFINE_bool('terasort_append_timestamp', True, 'Append a timestamp to '
'the directories given by terasort_unsorted_dir, '
'terasort_sorted_dir, and terasort_validate_dir')
FLAGS = flags.FLAGS
def GetConfig(user_config):
return configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
def Prepare(benchmark_spec):
pass
def Run(benchmark_spec):
"""Executes the given jar on the specified Spark cluster.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
Returns:
A list of sample.Sample objects.
"""
spark_cluster = benchmark_spec.spark_service
start = datetime.datetime.now()
terasort_jar = spark_cluster.GetExampleJar(spark_service.HADOOP_JOB_TYPE)
results = []
metadata = copy.copy(spark_cluster.GetMetadata())
logging.info('metadata %s ' % str(metadata))
base_dir = FLAGS.terasort_data_base
if FLAGS.terasort_append_timestamp:
time_string = datetime.datetime.now().strftime('%Y%m%d%H%S')
base_dir += time_string
base_dir += '/'
unsorted_dir = base_dir + 'unsorted'
sorted_dir = base_dir + 'sorted'
validate_dir = base_dir + 'validate'
metadata.update({'terasort_num_rows': FLAGS.terasort_num_rows,
'terasort_sorted_dir': sorted_dir,
'terasort_unsorted_dir': unsorted_dir,
'terasort_validate_dir': validate_dir})
gen_args = [TERAGEN, str(FLAGS.terasort_num_rows), unsorted_dir]
sort_args = [TERASORT, unsorted_dir, sorted_dir]
validate_args = [TERAVALIDATE, sorted_dir, validate_dir]
stages = [('generate', gen_args),
('sort', sort_args),
('validate', validate_args)]
cumulative_runtime = 0
for (label, args) in stages:
stats = spark_cluster.SubmitJob(terasort_jar,
None,
job_type=spark_service.HADOOP_JOB_TYPE,
job_arguments=args)
if not stats[spark_service.SUCCESS]:
raise Exception('Stage {0} unsuccessful'.format(label))
current_time = datetime.datetime.now()
wall_time = (current_time - start).total_seconds()
results.append(sample.Sample(label + '_wall_time',
wall_time,
'seconds', metadata))
start = current_time
if spark_service.RUNTIME in stats:
results.append(sample.Sample(label + '_runtime',
stats[spark_service.RUNTIME],
'seconds', metadata))
cumulative_runtime += stats[spark_service.RUNTIME]
if spark_service.WAITING in stats:
results.append(sample.Sample(label + '_pending_time',
stats[spark_service.WAITING],
'seconds', metadata))
results.append(sample.Sample('cumulative_runtime',
cumulative_runtime,
'seconds', metadata))
if not spark_cluster.user_managed:
create_time = (spark_cluster.resource_ready_time -
spark_cluster.create_start_time)
results.append(sample.Sample('cluster_create_time',
create_time,
'seconds', metadata))
return results
def Cleanup(benchmark_spec):
pass
|
import numpy as np
from hypertools.tools.align import align
from hypertools.tools.load import load
# weights = load('weights')
weights = [np.random.rand(10, 300) for i in range(3)]
geo = load('spiral')
data1 = geo.get_data()[0]
def test_procrustes():
rot = np.array([[-0.89433495, -0.44719485, -0.01348182],
[-0.43426149, 0.87492975, -0.21427761],
[-0.10761949, 0.18578133, 0.97667976]])
data2 = np.dot(data1, rot)
result = align([data1,data2])
assert np.allclose(result[0],result[1])
def test_hyper():
rot = np.array([[-0.89433495, -0.44719485, -0.01348182],
[-0.43426149, 0.87492975, -0.21427761],
[-0.10761949, 0.18578133, 0.97667976]])
data2 = np.dot(data1, rot)
result = align([data1,data2], align='hyper')
assert np.allclose(result[0],result[1], rtol=1) #note: this tolerance is probably too high, but fails at anything lower
def test_SRM():
rot = np.array([[-0.89433495, -0.44719485, -0.01348182],
[-0.43426149, 0.87492975, -0.21427761],
[-0.10761949, 0.18578133, 0.97667976]])
data2 = np.dot(data1, rot)
result = align([data1,data2], align='SRM')
assert np.allclose(result[0],result[1], rtol=1)
def test_align_shapes():
# Should return data with the same shape as input data
aligned = align(weights)
assert all(al.shape == wt.shape for al, wt in zip(aligned, weights))
def test_align_geo():
aligned = align(geo)
assert np.allclose(aligned[0], aligned[1])
|
from __future__ import absolute_import, unicode_literals
import os
import pytest
import kaptan
from tmuxp import config, exc
from . import example_dir
from .fixtures import config as fixtures
TMUXP_DIR = os.path.join(os.path.dirname(__file__), '.tmuxp')
def load_yaml(yaml):
return kaptan.Kaptan(handler='yaml').import_config(yaml).get()
def load_config(_file):
return kaptan.Kaptan().import_config(_file).get()
def test_export_json(tmpdir):
json_config_file = tmpdir.join('config.json')
configparser = kaptan.Kaptan()
configparser.import_config(fixtures.sampleconfig.sampleconfigdict)
json_config_data = configparser.export('json', indent=2)
json_config_file.write(json_config_data)
new_config = kaptan.Kaptan()
new_config_data = new_config.import_config(str(json_config_file)).get()
assert fixtures.sampleconfig.sampleconfigdict == new_config_data
def test_export_yaml(tmpdir):
yaml_config_file = tmpdir.join('config.yaml')
configparser = kaptan.Kaptan()
sampleconfig = config.inline(fixtures.sampleconfig.sampleconfigdict)
configparser.import_config(sampleconfig)
yaml_config_data = configparser.export('yaml', indent=2, default_flow_style=False)
yaml_config_file.write(yaml_config_data)
new_config_data = load_config(str(yaml_config_file))
assert fixtures.sampleconfig.sampleconfigdict == new_config_data
def test_scan_config(tmpdir):
configs = []
garbage_file = tmpdir.join('config.psd')
garbage_file.write('wat')
for r, d, f in os.walk(str(tmpdir)):
for filela in (x for x in f if x.endswith(('.json', '.ini', 'yaml'))):
configs.append(str(tmpdir.join(filela)))
files = 0
if tmpdir.join('config.json').check():
files += 1
assert str(tmpdir.join('config.json')) in configs
if tmpdir.join('config.yaml').check():
files += 1
assert str(tmpdir.join('config.yaml')) in configs
if tmpdir.join('config.ini').check():
files += 1
assert str(tmpdir.join('config.ini')) in configs
assert len(configs) == files
def test_config_expand1():
"""Expand shell commands from string to list."""
test_config = config.expand(fixtures.expand1.before_config)
assert test_config == fixtures.expand1.after_config
def test_config_expand2():
"""Expand shell commands from string to list."""
unexpanded_dict = load_yaml(fixtures.expand2.unexpanded_yaml)
expanded_dict = load_yaml(fixtures.expand2.expanded_yaml)
assert config.expand(unexpanded_dict) == expanded_dict
"""Tests for :meth:`config.inline()`."""
ibefore_config = { # inline config
'session_name': 'sampleconfig',
'start_directory': '~',
'windows': [
{
'shell_command': ['top'],
'window_name': 'editor',
'panes': [{'shell_command': ['vim']}, {'shell_command': ['cowsay "hey"']}],
'layout': 'main-verticle',
},
{
'window_name': 'logging',
'panes': [{'shell_command': ['tail -F /var/log/syslog']}],
},
{'options': {'automatic-rename': True}, 'panes': [{'shell_command': ['htop']}]},
],
}
iafter_config = {
'session_name': 'sampleconfig',
'start_directory': '~',
'windows': [
{
'shell_command': 'top',
'window_name': 'editor',
'panes': ['vim', 'cowsay "hey"'],
'layout': 'main-verticle',
},
{'window_name': 'logging', 'panes': ['tail -F /var/log/syslog']},
{'options': {'automatic-rename': True}, 'panes': ['htop']},
],
}
def test_inline_config():
""":meth:`config.inline()` shell commands list to string."""
test_config = config.inline(ibefore_config)
assert test_config == iafter_config
"""Test config inheritance for the nested 'start_command'."""
inheritance_config_before = {
'session_name': 'sampleconfig',
'start_directory': '/',
'windows': [
{
'window_name': 'editor',
'start_directory': '~',
'panes': [{'shell_command': ['vim']}, {'shell_command': ['cowsay "hey"']}],
'layout': 'main-verticle',
},
{
'window_name': 'logging',
'panes': [{'shell_command': ['tail -F /var/log/syslog']}],
},
{'window_name': 'shufu', 'panes': [{'shell_command': ['htop']}]},
{'options': {'automatic-rename': True}, 'panes': [{'shell_command': ['htop']}]},
],
}
inheritance_config_after = {
'session_name': 'sampleconfig',
'start_directory': '/',
'windows': [
{
'window_name': 'editor',
'start_directory': '~',
'panes': [{'shell_command': ['vim']}, {'shell_command': ['cowsay "hey"']}],
'layout': 'main-verticle',
},
{
'window_name': 'logging',
'panes': [{'shell_command': ['tail -F /var/log/syslog']}],
},
{'window_name': 'shufu', 'panes': [{'shell_command': ['htop']}]},
{'options': {'automatic-rename': True}, 'panes': [{'shell_command': ['htop']}]},
],
}
def test_inheritance_config():
config = inheritance_config_before
# TODO: Look at verifying window_start_directory
# if 'start_directory' in config:
# session_start_directory = config['start_directory']
# else:
# session_start_directory = None
# for windowconfitem in config['windows']:
# window_start_directory = None
#
# if 'start_directory' in windowconfitem:
# window_start_directory = windowconfitem['start_directory']
# elif session_start_directory:
# window_start_directory = session_start_directory
#
# for paneconfitem in windowconfitem['panes']:
# if 'start_directory' in paneconfitem:
# pane_start_directory = paneconfitem['start_directory']
# elif window_start_directory:
# paneconfitem['start_directory'] = window_start_directory
# elif session_start_directory:
# paneconfitem['start_directory'] = session_start_directory
assert config == inheritance_config_after
def test_shell_command_before():
"""Config inheritance for the nested 'start_command'."""
test_config = fixtures.shell_command_before.config_unexpanded
test_config = config.expand(test_config)
assert test_config == fixtures.shell_command_before.config_expanded
test_config = config.trickle(test_config)
assert test_config == fixtures.shell_command_before.config_after
def test_in_session_scope():
sconfig = load_yaml(fixtures.shell_command_before_session.before)
config.validate_schema(sconfig)
assert config.expand(sconfig) == sconfig
assert config.expand(config.trickle(sconfig)) == load_yaml(
fixtures.shell_command_before_session.expected
)
def test_trickle_relative_start_directory():
test_config = config.trickle(fixtures.trickle.before)
assert test_config == fixtures.trickle.expected
def test_trickle_window_with_no_pane_config():
test_yaml = """
session_name: test_session
windows:
- window_name: test_1
panes:
- shell_command:
- ls -l
- window_name: test_no_panes
"""
sconfig = load_yaml(test_yaml)
config.validate_schema(sconfig)
assert config.expand(config.trickle(sconfig))['windows'][1]['panes'][0] == {
'shell_command': []
}
def test_expands_blank_panes():
"""Expand blank config into full form.
Handle ``NoneType`` and 'blank'::
# nothing, None, 'blank'
'panes': [
None,
'blank'
]
# should be blank
'panes': [
'shell_command': []
]
Blank strings::
panes: [
''
]
# should output to:
panes:
'shell_command': ['']
"""
yaml_config_file = os.path.join(example_dir, 'blank-panes.yaml')
test_config = load_config(yaml_config_file)
assert config.expand(test_config) == fixtures.expand_blank.expected
def test_no_session_name():
yaml_config = """
- window_name: editor
panes:
shell_command:
- tail -F /var/log/syslog
start_directory: /var/log
- window_name: logging
automatic-rename: true
panes:
- shell_command:
- htop
"""
sconfig = kaptan.Kaptan(handler='yaml')
sconfig = sconfig.import_config(yaml_config).get()
with pytest.raises(exc.ConfigError) as excinfo:
config.validate_schema(sconfig)
assert excinfo.matches(r'requires "session_name"')
def test_no_windows():
yaml_config = """
session_name: test session
"""
sconfig = kaptan.Kaptan(handler='yaml')
sconfig = sconfig.import_config(yaml_config).get()
with pytest.raises(exc.ConfigError) as excinfo:
config.validate_schema(sconfig)
assert excinfo.match(r'list of "windows"')
def test_no_window_name():
yaml_config = """
session_name: test session
windows:
- window_name: editor
panes:
shell_command:
- tail -F /var/log/syslog
start_directory: /var/log
- automatic-rename: true
panes:
- shell_command:
- htop
"""
sconfig = kaptan.Kaptan(handler='yaml')
sconfig = sconfig.import_config(yaml_config).get()
with pytest.raises(exc.ConfigError) as excinfo:
config.validate_schema(sconfig)
assert excinfo.matches('missing "window_name"')
def test_replaces_env_variables(monkeypatch):
env_key = "TESTHEY92"
env_val = "HEYO1"
yaml_config = """
start_directory: {TEST_VAR}/test
shell_command_before: {TEST_VAR}/test2
before_script: {TEST_VAR}/test3
session_name: hi - {TEST_VAR}
options:
default-command: {TEST_VAR}/lol
global_options:
default-shell: {TEST_VAR}/moo
windows:
- window_name: editor
panes:
- shell_command:
- tail -F /var/log/syslog
start_directory: /var/log
- window_name: logging @ {TEST_VAR}
automatic-rename: true
panes:
- shell_command:
- htop
""".format(
TEST_VAR="${%s}" % env_key
)
sconfig = load_yaml(yaml_config)
monkeypatch.setenv(str(env_key), str(env_val))
sconfig = config.expand(sconfig)
assert "%s/test" % env_val == sconfig['start_directory']
assert "%s/test2" % env_val in sconfig['shell_command_before']
assert "%s/test3" % env_val == sconfig['before_script']
assert "hi - %s" % env_val == sconfig['session_name']
assert "%s/moo" % env_val == sconfig['global_options']['default-shell']
assert "%s/lol" % env_val == sconfig['options']['default-command']
assert "logging @ %s" % env_val == sconfig['windows'][1]['window_name']
|
import time
import json
import arrow
from flask_script import Manager
from flask import current_app
from lemur.extensions import sentry
from lemur.constants import SUCCESS_METRIC_STATUS
from lemur.plugins import plugins
from lemur.plugins.lemur_acme.plugin import AcmeHandler
from lemur.plugins.lemur_aws import s3
manager = Manager(
usage="Handles all ACME related tasks"
)
@manager.option(
"-d",
"--domain",
dest="domain",
required=True,
help="Name of the Domain to store to (ex. \"_acme-chall.test.com\".",
)
@manager.option(
"-t",
"--token",
dest="token",
required=True,
help="Value of the Token to store in DNS as content.",
)
def dnstest(domain, token):
"""
Create, verify, and delete DNS TXT records using an autodetected provider.
"""
print("[+] Starting ACME Tests.")
change_id = (domain, token)
acme_handler = AcmeHandler()
acme_handler.autodetect_dns_providers(domain)
if not acme_handler.dns_providers_for_domain[domain]:
raise Exception(f"No DNS providers found for domain: {format(domain)}.")
# Create TXT Records
for dns_provider in acme_handler.dns_providers_for_domain[domain]:
dns_provider_plugin = acme_handler.get_dns_provider(dns_provider.provider_type)
dns_provider_options = json.loads(dns_provider.credentials)
account_number = dns_provider_options.get("account_id")
print(f"[+] Creating TXT Record in `{dns_provider.name}` provider")
change_id = dns_provider_plugin.create_txt_record(domain, token, account_number)
print("[+] Verifying TXT Record has propagated to DNS.")
print("[+] This step could take a while...")
time.sleep(10)
# Verify TXT Records
for dns_provider in acme_handler.dns_providers_for_domain[domain]:
dns_provider_plugin = acme_handler.get_dns_provider(dns_provider.provider_type)
dns_provider_options = json.loads(dns_provider.credentials)
account_number = dns_provider_options.get("account_id")
try:
dns_provider_plugin.wait_for_dns_change(change_id, account_number)
print(f"[+] Verified TXT Record in `{dns_provider.name}` provider")
except Exception:
sentry.captureException()
current_app.logger.debug(
f"Unable to resolve DNS challenge for change_id: {change_id}, account_id: "
f"{account_number}",
exc_info=True,
)
print(f"[+] Unable to Verify TXT Record in `{dns_provider.name}` provider")
time.sleep(10)
# Delete TXT Records
for dns_provider in acme_handler.dns_providers_for_domain[domain]:
dns_provider_plugin = acme_handler.get_dns_provider(dns_provider.provider_type)
dns_provider_options = json.loads(dns_provider.credentials)
account_number = dns_provider_options.get("account_id")
# TODO(csine@: Add Exception Handling
dns_provider_plugin.delete_txt_record(change_id, account_number, domain, token)
print(f"[+] Deleted TXT Record in `{dns_provider.name}` provider")
status = SUCCESS_METRIC_STATUS
print("[+] Done with ACME Tests.")
@manager.option(
"-t",
"--token",
dest="token",
default="date: " + arrow.utcnow().format("YYYY-MM-DDTHH-mm-ss"),
required=False,
help="Value of the Token",
)
@manager.option(
"-n",
"--token_name",
dest="token_name",
default="Token-" + arrow.utcnow().format("YYYY-MM-DDTHH-mm-ss"),
required=False,
help="path",
)
@manager.option(
"-p",
"--prefix",
dest="prefix",
default="test/",
required=False,
help="S3 bucket prefix",
)
@manager.option(
"-a",
"--account_number",
dest="account_number",
required=True,
help="AWS Account",
)
@manager.option(
"-b",
"--bucket_name",
dest="bucket_name",
required=True,
help="Bucket Name",
)
def upload_acme_token_s3(token, token_name, prefix, account_number, bucket_name):
"""
This method serves for testing the upload_acme_token to S3, fetching the token to verify it, and then deleting it.
It mainly serves for testing purposes.
:param token:
:param token_name:
:param prefix:
:param account_number:
:param bucket_name:
:return:
"""
additional_options = [
{
"name": "bucket",
"value": bucket_name,
"type": "str",
"required": True,
"validation": r"[0-9a-z.-]{3,63}",
"helpMessage": "Must be a valid S3 bucket name!",
},
{
"name": "accountNumber",
"type": "str",
"value": account_number,
"required": True,
"validation": r"[0-9]{12}",
"helpMessage": "A valid AWS account number with permission to access S3",
},
{
"name": "region",
"type": "str",
"default": "us-east-1",
"required": False,
"helpMessage": "Region bucket exists",
"available": ["us-east-1", "us-west-2", "eu-west-1"],
},
{
"name": "encrypt",
"type": "bool",
"value": False,
"required": False,
"helpMessage": "Enable server side encryption",
"default": True,
},
{
"name": "prefix",
"type": "str",
"value": prefix,
"required": False,
"helpMessage": "Must be a valid S3 object prefix!",
},
]
p = plugins.get("aws-s3")
p.upload_acme_token(token_name, token, additional_options)
if not prefix.endswith("/"):
prefix + "/"
token_res = s3.get(bucket_name, prefix + token_name, account_number=account_number)
assert(token_res == token)
s3.delete(bucket_name, prefix + token_name, account_number=account_number)
|
from __future__ import division
import argparse
import multiprocessing
import numpy as np
import PIL
import chainer
import chainer.functions as F
import chainer.links as L
from chainer.optimizer_hooks import WeightDecay
from chainer import serializers
from chainer import training
from chainer.training import extensions
import chainermn
from chainercv.chainer_experimental.datasets.sliceable import TransformDataset
from chainercv.chainer_experimental.training.extensions import make_shift
from chainercv.links.model.fpn.misc import scale_img
from chainercv import transforms
from chainercv.datasets import coco_instance_segmentation_label_names
from chainercv.datasets import COCOInstanceSegmentationDataset
from chainercv.links import MaskRCNNFPNResNet101
from chainercv.links import MaskRCNNFPNResNet50
from chainercv.datasets import coco_bbox_label_names
from chainercv.datasets import COCOBboxDataset
from chainercv.links import FasterRCNNFPNResNet101
from chainercv.links import FasterRCNNFPNResNet50
from chainercv.links.model.fpn import bbox_head_loss_post
from chainercv.links.model.fpn import bbox_head_loss_pre
from chainercv.links.model.fpn import mask_head_loss_post
from chainercv.links.model.fpn import mask_head_loss_pre
from chainercv.links.model.fpn import rpn_loss
# https://docs.chainer.org/en/stable/tips.html#my-training-process-gets-stuck-when-using-multiprocessiterator
try:
import cv2
cv2.setNumThreads(0)
except ImportError:
pass
class TrainChain(chainer.Chain):
def __init__(self, model):
super(TrainChain, self).__init__()
with self.init_scope():
self.model = model
def forward(self, imgs, bboxes, labels, masks=None):
B = len(imgs)
pad_size = np.array(
[im.shape[1:] for im in imgs]).max(axis=0)
pad_size = (
np.ceil(
pad_size / self.model.stride) * self.model.stride).astype(int)
x = np.zeros(
(len(imgs), 3, pad_size[0], pad_size[1]), dtype=np.float32)
for i, img in enumerate(imgs):
_, H, W = img.shape
x[i, :, :H, :W] = img
x = self.xp.array(x)
bboxes = [self.xp.array(bbox) for bbox in bboxes]
labels = [self.xp.array(label) for label in labels]
sizes = [img.shape[1:] for img in imgs]
with chainer.using_config('train', False):
hs = self.model.extractor(x)
rpn_locs, rpn_confs = self.model.rpn(hs)
anchors = self.model.rpn.anchors(h.shape[2:] for h in hs)
rpn_loc_loss, rpn_conf_loss = rpn_loss(
rpn_locs, rpn_confs, anchors, sizes, bboxes)
rois, roi_indices = self.model.rpn.decode(
rpn_locs, rpn_confs, anchors, x.shape)
rois = self.xp.vstack([rois] + bboxes)
roi_indices = self.xp.hstack(
[roi_indices]
+ [self.xp.array((i,) * len(bbox))
for i, bbox in enumerate(bboxes)])
rois, roi_indices = self.model.bbox_head.distribute(rois, roi_indices)
rois, roi_indices, head_gt_locs, head_gt_labels = bbox_head_loss_pre(
rois, roi_indices, self.model.bbox_head.std, bboxes, labels)
head_locs, head_confs = self.model.bbox_head(hs, rois, roi_indices)
head_loc_loss, head_conf_loss = bbox_head_loss_post(
head_locs, head_confs,
roi_indices, head_gt_locs, head_gt_labels, B)
mask_loss = 0
if masks is not None:
# For reducing unnecessary CPU/GPU copy, `masks` is kept in CPU.
pad_masks = [
np.zeros(
(mask.shape[0], pad_size[0], pad_size[1]), dtype=np.bool)
for mask in masks]
for i, mask in enumerate(masks):
_, H, W = mask.shape
pad_masks[i][:, :H, :W] = mask
masks = pad_masks
mask_rois, mask_roi_indices, gt_segms, gt_mask_labels =\
mask_head_loss_pre(
rois, roi_indices, masks, bboxes,
head_gt_labels, self.model.mask_head.segm_size)
n_roi = sum([len(roi) for roi in mask_rois])
if n_roi > 0:
segms = self.model.mask_head(hs, mask_rois, mask_roi_indices)
mask_loss = mask_head_loss_post(
segms, mask_roi_indices, gt_segms, gt_mask_labels, B)
else:
# Compute dummy variables to complete the computational graph
mask_rois[0] = self.xp.array([[0, 0, 1, 1]], dtype=np.float32)
mask_roi_indices[0] = self.xp.array([0], dtype=np.int32)
segms = self.model.mask_head(hs, mask_rois, mask_roi_indices)
mask_loss = 0 * F.sum(segms)
loss = (rpn_loc_loss + rpn_conf_loss +
head_loc_loss + head_conf_loss + mask_loss)
chainer.reporter.report({
'loss': loss,
'loss/rpn/loc': rpn_loc_loss, 'loss/rpn/conf': rpn_conf_loss,
'loss/bbox_head/loc': head_loc_loss,
'loss/bbox_head/conf': head_conf_loss,
'loss/mask_head': mask_loss},
self)
return loss
class Transform(object):
def __init__(self, min_size, max_size, mean):
self.min_size = min_size
self.max_size = max_size
self.mean = mean
def __call__(self, in_data):
if len(in_data) == 4:
img, mask, label, bbox = in_data
else:
img, bbox, label = in_data
# Flipping
img, params = transforms.random_flip(
img, x_random=True, return_param=True)
x_flip = params['x_flip']
bbox = transforms.flip_bbox(
bbox, img.shape[1:], x_flip=x_flip)
# Scaling and mean subtraction
img, scale = scale_img(
img, self.min_size, self.max_size)
img -= self.mean
bbox = bbox * scale
if len(in_data) == 4:
mask = transforms.flip(mask, x_flip=x_flip)
mask = transforms.resize(
mask.astype(np.float32),
img.shape[1:],
interpolation=PIL.Image.NEAREST).astype(np.bool)
return img, bbox, label, mask
else:
return img, bbox, label
def converter(batch, device=None):
# do not send data to gpu (device is ignored)
return tuple(list(v) for v in zip(*batch))
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--model',
choices=('mask_rcnn_fpn_resnet50', 'mask_rcnn_fpn_resnet101',
'faster_rcnn_fpn_resnet50', 'faster_rcnn_fpn_resnet101'),
default='faster_rcnn_fpn_resnet50')
parser.add_argument('--batchsize', type=int, default=16)
parser.add_argument('--iteration', type=int, default=90000)
parser.add_argument('--step', type=int, nargs='*', default=[60000, 80000])
parser.add_argument('--out', default='result')
parser.add_argument('--resume')
args = parser.parse_args()
# https://docs.chainer.org/en/stable/chainermn/tutorial/tips_faqs.html#using-multiprocessiterator
if hasattr(multiprocessing, 'set_start_method'):
multiprocessing.set_start_method('forkserver')
p = multiprocessing.Process()
p.start()
p.join()
comm = chainermn.create_communicator('pure_nccl')
device = comm.intra_rank
if args.model == 'faster_rcnn_fpn_resnet50':
mode = 'bbox'
model = FasterRCNNFPNResNet50(
n_fg_class=len(coco_bbox_label_names),
pretrained_model='imagenet')
elif args.model == 'faster_rcnn_fpn_resnet101':
mode = 'bbox'
model = FasterRCNNFPNResNet101(
n_fg_class=len(coco_bbox_label_names),
pretrained_model='imagenet')
elif args.model == 'mask_rcnn_fpn_resnet50':
mode = 'instance_segmentation'
model = MaskRCNNFPNResNet50(
n_fg_class=len(coco_instance_segmentation_label_names),
pretrained_model='imagenet')
elif args.model == 'mask_rcnn_fpn_resnet101':
mode = 'instance_segmentation'
model = MaskRCNNFPNResNet101(
n_fg_class=len(coco_instance_segmentation_label_names),
pretrained_model='imagenet')
model.use_preset('evaluate')
train_chain = TrainChain(model)
chainer.cuda.get_device_from_id(device).use()
train_chain.to_gpu()
if mode == 'bbox':
train = TransformDataset(
COCOBboxDataset(year='2017', split='train'),
('img', 'bbox', 'label'),
Transform(800, 1333, model.extractor.mean))
elif mode == 'instance_segmentation':
train = TransformDataset(
COCOInstanceSegmentationDataset(split='train', return_bbox=True),
('img', 'bbox', 'label', 'mask'),
Transform(800, 1333, model.extractor.mean))
if comm.rank == 0:
indices = np.arange(len(train))
else:
indices = None
indices = chainermn.scatter_dataset(indices, comm, shuffle=True)
train = train.slice[indices]
train_iter = chainer.iterators.MultiprocessIterator(
train, args.batchsize // comm.size,
n_processes=args.batchsize // comm.size,
shared_mem=100 * 1000 * 1000 * 4)
optimizer = chainermn.create_multi_node_optimizer(
chainer.optimizers.MomentumSGD(), comm)
optimizer.setup(train_chain)
optimizer.add_hook(WeightDecay(0.0001))
model.extractor.base.conv1.disable_update()
model.extractor.base.res2.disable_update()
for link in model.links():
if isinstance(link, L.BatchNormalization):
link.disable_update()
n_iteration = args.iteration * 16 / args.batchsize
updater = training.updaters.StandardUpdater(
train_iter, optimizer, converter=converter, device=device)
trainer = training.Trainer(
updater, (n_iteration, 'iteration'), args.out)
@make_shift('lr')
def lr_schedule(trainer):
base_lr = 0.02 * args.batchsize / 16
warm_up_duration = 500
warm_up_rate = 1 / 3
iteration = trainer.updater.iteration
if iteration < warm_up_duration:
rate = warm_up_rate \
+ (1 - warm_up_rate) * iteration / warm_up_duration
else:
rate = 1
for step in args.step:
if iteration >= step * 16 / args.batchsize:
rate *= 0.1
return base_lr * rate
trainer.extend(lr_schedule)
if comm.rank == 0:
log_interval = 10, 'iteration'
trainer.extend(extensions.LogReport(trigger=log_interval))
trainer.extend(extensions.observe_lr(), trigger=log_interval)
trainer.extend(extensions.PrintReport(
['epoch', 'iteration', 'lr', 'main/loss',
'main/loss/rpn/loc', 'main/loss/rpn/conf',
'main/loss/bbox_head/loc', 'main/loss/bbox_head/conf',
'main/loss/mask_head'
]),
trigger=log_interval)
trainer.extend(extensions.ProgressBar(update_interval=10))
trainer.extend(extensions.snapshot(), trigger=(10000, 'iteration'))
trainer.extend(
extensions.snapshot_object(
model, 'model_iter_{.updater.iteration}'),
trigger=(n_iteration, 'iteration'))
if args.resume:
serializers.load_npz(args.resume, trainer, strict=False)
trainer.run()
if __name__ == '__main__':
main()
|
import voluptuous as vol
from homeassistant.components.switch import PLATFORM_SCHEMA, SwitchEntity
from homeassistant.const import CONF_NAME
import homeassistant.helpers.config_validation as cv
from . import CONF_ADS_VAR, DATA_ADS, STATE_KEY_STATE, AdsEntity
DEFAULT_NAME = "ADS Switch"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_ADS_VAR): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up switch platform for ADS."""
ads_hub = hass.data.get(DATA_ADS)
name = config[CONF_NAME]
ads_var = config[CONF_ADS_VAR]
add_entities([AdsSwitch(ads_hub, name, ads_var)])
class AdsSwitch(AdsEntity, SwitchEntity):
"""Representation of an ADS switch device."""
async def async_added_to_hass(self):
"""Register device notification."""
await self.async_initialize_device(self._ads_var, self._ads_hub.PLCTYPE_BOOL)
@property
def is_on(self):
"""Return True if the entity is on."""
return self._state_dict[STATE_KEY_STATE]
def turn_on(self, **kwargs):
"""Turn the switch on."""
self._ads_hub.write_by_name(self._ads_var, True, self._ads_hub.PLCTYPE_BOOL)
def turn_off(self, **kwargs):
"""Turn the switch off."""
self._ads_hub.write_by_name(self._ads_var, False, self._ads_hub.PLCTYPE_BOOL)
|
import base
from docker_registry.core import compat
json = compat.json
class TestTags(base.TestCase):
def test_simple(self, repos_name=None):
if repos_name is None:
repos_name = self.gen_random_string()
image_id = self.gen_hex_string()
layer_data = self.gen_random_string(1024)
self.upload_image(image_id, parent_id=None, layer=layer_data)
# test tags create
url = '/v1/repositories/foo/{0}/tags/latest'.format(repos_name)
headers = {'User-Agent':
'docker/0.7.2-dev go/go1.2 os/ostest arch/archtest'}
resp = self.http_client.put(url,
headers=headers,
data=json.dumps(image_id))
self.assertEqual(resp.status_code, 200, resp.data)
url = '/v1/repositories/foo/{0}/tags/test'.format(repos_name)
resp = self.http_client.put(url,
data=json.dumps(image_id))
self.assertEqual(resp.status_code, 200, resp.data)
# test tags read
url = '/v1/repositories/foo/{0}/tags/latest'.format(repos_name)
resp = self.http_client.get(url)
self.assertEqual(resp.status_code, 200, resp.data)
# Note(dmp): unicode patch XXX not applied assume requests does the job
self.assertEqual(json.loads(resp.data), image_id, resp.data)
# test repository json
url = '/v1/repositories/foo/{0}/json'.format(repos_name)
resp = self.http_client.get(url)
self.assertEqual(resp.status_code, 200, resp.data)
# Note(dmp): unicode patch XXX not applied assume requests does the job
props = json.loads(resp.data)
self.assertEqual(props['docker_version'], '0.7.2-dev')
self.assertEqual(props['docker_go_version'], 'go1.2')
self.assertEqual(props['os'], 'ostest')
self.assertEqual(props['arch'], 'archtest')
# test repository tags json
url = '/v1/repositories/foo/{0}/tags/latest/json'.format(repos_name)
resp = self.http_client.get(url)
self.assertEqual(resp.status_code, 200, resp.data)
# Note(dmp): unicode patch XXX not applied assume requests does the job
props = json.loads(resp.data)
self.assertEqual(props['docker_version'], '0.7.2-dev')
self.assertEqual(props['docker_go_version'], 'go1.2')
self.assertEqual(props['os'], 'ostest')
self.assertEqual(props['arch'], 'archtest')
# test tags update
url = '/v1/repositories/foo/{0}/tags/latest'.format(repos_name)
headers = {'User-Agent':
'docker/0.7.2-dev go/go1.2 os/ostest arch/changedarch'}
resp = self.http_client.put(url,
headers=headers,
data=json.dumps(image_id))
self.assertEqual(resp.status_code, 200, resp.data)
url = '/v1/repositories/foo/{0}/tags/test'.format(repos_name)
resp = self.http_client.put(url,
headers=headers,
data=json.dumps(image_id))
self.assertEqual(resp.status_code, 200, resp.data)
# test repository latest tag json update
url = '/v1/repositories/foo/{0}/tags/latest/json'.format(repos_name)
resp = self.http_client.get(url)
self.assertEqual(resp.status_code, 200, resp.data)
# Note(dmp): unicode patch XXX not applied assume requests does the job
props = json.loads(resp.data)
self.assertEqual(props['docker_version'], '0.7.2-dev')
self.assertEqual(props['docker_go_version'], 'go1.2')
self.assertEqual(props['os'], 'ostest')
self.assertEqual(props['arch'], 'changedarch')
# test repository test tag json update
url = '/v1/repositories/foo/{0}/tags/test/json'.format(repos_name)
resp = self.http_client.get(url)
self.assertEqual(resp.status_code, 200, resp.data)
# Note(dmp): unicode patch XXX not applied assume requests does the job
props = json.loads(resp.data)
self.assertEqual(props['docker_version'], '0.7.2-dev')
self.assertEqual(props['docker_go_version'], 'go1.2')
self.assertEqual(props['os'], 'ostest')
self.assertEqual(props['arch'], 'changedarch')
# test tags list
url = '/v1/repositories/foo/{0}/tags'.format(repos_name)
resp = self.http_client.get(url)
self.assertEqual(resp.status_code, 200, resp.data)
# Note(dmp): unicode patch XXX not applied assume requests does the job
self.assertEqual(len(json.loads(resp.data)), 2, resp.data)
# test tag delete
url = '/v1/repositories/foo/{0}/tags/latest'.format(repos_name)
resp = self.http_client.delete(url)
self.assertEqual(resp.status_code, 200, resp.data)
url = '/v1/repositories/foo/{0}/tags'.format(repos_name)
resp = self.http_client.get(url)
self.assertEqual(resp.status_code, 200, resp.data)
url = '/v1/repositories/foo/{0}/tags/latest'.format(repos_name)
resp = self.http_client.get(url)
self.assertEqual(resp.status_code, 404, resp.data)
# test whole delete
url = '/v1/repositories/foo/{0}/'.format(repos_name)
resp = self.http_client.delete(url)
self.assertEqual(resp.status_code, 200, resp.data)
url = '/v1/repositories/foo/{0}/tags'.format(repos_name)
resp = self.http_client.get(url)
self.assertEqual(resp.status_code, 404, resp.data)
def test_notfound(self):
notexist = self.gen_random_string()
url = '/v1/repositories/{0}/bar/tags'.format(notexist)
resp = self.http_client.get(url)
self.assertEqual(resp.status_code, 404, resp.data)
def test_special_chars(self):
repos_name = '{0}%$_-test'.format(self.gen_random_string(5))
self.test_simple(repos_name)
def test_tag_name_validation(self):
repos_name = self.gen_random_string()
image_id = self.gen_hex_string()
layer_data = self.gen_random_string(1024)
self.upload_image(image_id, parent_id=None, layer=layer_data)
headers = {'User-Agent':
'docker/0.7.2-dev go/go1.2 os/ostest arch/archtest'}
url = lambda tag: '/v1/repositories/foo/{0}/tags/{1}'.format(
repos_name, tag
)
tag_name = '$<invalid>'
resp = self.http_client.put(
url(tag_name), headers=headers, data=json.dumps(image_id)
)
self.assertEqual(resp.status_code, 400)
tag_name = '.invalid'
resp = self.http_client.put(
url(tag_name), headers=headers, data=json.dumps(image_id)
)
self.assertEqual(resp.status_code, 400)
tag_name = '-invalid'
resp = self.http_client.put(
url(tag_name), headers=headers, data=json.dumps(image_id)
)
self.assertEqual(resp.status_code, 400)
tag_name = '_valid'
resp = self.http_client.put(
url(tag_name), headers=headers, data=json.dumps(image_id)
)
self.assertEqual(resp.status_code, 200, resp.data)
|
from homeassistant.components.mobile_app.const import DATA_DELETED_IDS, DOMAIN
from .const import CALL_SERVICE
from tests.common import async_mock_service
async def test_unload_unloads(hass, create_registrations, webhook_client):
"""Test we clean up when we unload."""
# Second config entry is the one without encryption
config_entry = hass.config_entries.async_entries("mobile_app")[1]
webhook_id = config_entry.data["webhook_id"]
calls = async_mock_service(hass, "test", "mobile_app")
# Test it works
await webhook_client.post(f"/api/webhook/{webhook_id}", json=CALL_SERVICE)
assert len(calls) == 1
await hass.config_entries.async_unload(config_entry.entry_id)
# Test it no longer works
await webhook_client.post(f"/api/webhook/{webhook_id}", json=CALL_SERVICE)
assert len(calls) == 1
async def test_remove_entry(hass, create_registrations):
"""Test we clean up when we remove entry."""
for config_entry in hass.config_entries.async_entries("mobile_app"):
await hass.config_entries.async_remove(config_entry.entry_id)
assert config_entry.data["webhook_id"] in hass.data[DOMAIN][DATA_DELETED_IDS]
dev_reg = await hass.helpers.device_registry.async_get_registry()
assert len(dev_reg.devices) == 0
ent_reg = await hass.helpers.entity_registry.async_get_registry()
assert len(ent_reg.entities) == 0
|
from __future__ import print_function, division
from abc import abstractmethod
from plumbum.lib import six, _setdoc
from plumbum import local
import os
try:
from configparser import ConfigParser, NoOptionError, NoSectionError # Py3
except ImportError:
from ConfigParser import ConfigParser, NoOptionError, NoSectionError # type: ignore
class ConfigBase(six.ABC):
"""Base class for Config parsers.
:param filename: The file to use
The ``with`` statement can be used to automatically try to read on entering and write if changed on exiting. Otherwise, use ``.read`` and ``.write`` as needed. Set and get the options using ``[]`` syntax.
Usage:
with Config("~/.myprog_rc") as conf:
value = conf.get("option", "default")
value2 = conf["option"] # shortcut for default=None
"""
__slots__ = "filename changed".split()
def __init__(self, filename):
self.filename = local.path(filename)
self.changed = False
def __enter__(self):
try:
self.read()
except FileNotFoundError:
pass
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self.changed:
self.write()
@abstractmethod
def read(self):
'''Read in the linked file'''
pass
@abstractmethod
def write(self):
'''Write out the linked file'''
self.changed = False
@abstractmethod
def _get(self, option):
'''Internal get function for subclasses'''
pass
@abstractmethod
def _set(self, option, value):
'''Internal set function for subclasses. Must return the value that was set.'''
pass
def get(self, option, default=None):
"Get an item from the store, returns default if fails"
try:
return self._get(option)
except KeyError:
self.changed = True
return self._set(option, default)
def set(self, option, value):
"""Set an item, mark this object as changed"""
self.changed = True
self._set(option, value)
def __getitem__(self, option):
return self._get(option)
def __setitem__(self, option, value):
return self.set(option, value)
class ConfigINI(ConfigBase):
DEFAULT_SECTION = 'DEFAULT'
slots = "parser".split()
def __init__(self, filename):
super(ConfigINI, self).__init__(filename)
self.parser = ConfigParser()
@_setdoc(ConfigBase)
def read(self):
self.parser.read(self.filename)
super(ConfigINI, self).read()
@_setdoc(ConfigBase)
def write(self):
with open(self.filename, 'w') as f:
self.parser.write(f)
super(ConfigINI, self).write()
@classmethod
def _sec_opt(cls, option):
if '.' not in option:
sec = cls.DEFAULT_SECTION
else:
sec, option = option.split('.', 1)
return sec, option
@_setdoc(ConfigBase)
def _get(self, option):
sec, option = self._sec_opt(option)
try:
return self.parser.get(sec, option)
except (NoSectionError, NoOptionError):
raise KeyError("{sec}:{option}".format(sec=sec, option=option))
@_setdoc(ConfigBase)
def _set(self, option, value):
sec, option = self._sec_opt(option)
try:
self.parser.set(sec, option, str(value))
except NoSectionError:
self.parser.add_section(sec)
self.parser.set(sec, option, str(value))
return str(value)
Config = ConfigINI
|
import asyncio
from datetime import timedelta
import logging
from brother import Brother, SnmpError, UnsupportedModel
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_HOST, CONF_TYPE, EVENT_HOMEASSISTANT_STOP
from homeassistant.core import Config, HomeAssistant
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed
from .const import DOMAIN
PLATFORMS = ["sensor"]
SCAN_INTERVAL = timedelta(seconds=30)
_LOGGER = logging.getLogger(__name__)
async def async_setup(hass: HomeAssistant, config: Config):
"""Set up the Brother component."""
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Set up Brother from a config entry."""
host = entry.data[CONF_HOST]
kind = entry.data[CONF_TYPE]
coordinator = BrotherDataUpdateCoordinator(hass, host=host, kind=kind)
await coordinator.async_refresh()
if not coordinator.last_update_success:
raise ConfigEntryNotReady
hass.data.setdefault(DOMAIN, {})
hass.data[DOMAIN][entry.entry_id] = coordinator
for component in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, component)
)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Unload a config entry."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, component)
for component in PLATFORMS
]
)
)
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id).shutdown()
return unload_ok
class BrotherDataUpdateCoordinator(DataUpdateCoordinator):
"""Class to manage fetching Brother data from the printer."""
def __init__(self, hass, host, kind):
"""Initialize."""
self.brother = Brother(host, kind=kind)
self._unsub_stop = hass.bus.async_listen(
EVENT_HOMEASSISTANT_STOP, self._handle_ha_stop
)
super().__init__(
hass,
_LOGGER,
name=DOMAIN,
update_interval=SCAN_INTERVAL,
)
async def _async_update_data(self):
"""Update data via library."""
# Race condition on shutdown. Stop all the fetches.
if self._unsub_stop is None:
return None
try:
await self.brother.async_update()
except (ConnectionError, SnmpError, UnsupportedModel) as error:
raise UpdateFailed(error) from error
return self.brother.data
def shutdown(self):
"""Shutdown the Brother coordinator."""
self._unsub_stop()
self._unsub_stop = None
self.brother.shutdown()
def _handle_ha_stop(self, _):
"""Handle Home Assistant stopping."""
self.shutdown()
|
import argparse
import json
import sys
from collections import defaultdict
from paasta_tools.api.client import get_paasta_oapi_client
from paasta_tools.utils import load_system_paasta_config
def parse_capacity_check_options():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
"type", choices=["cpus", "mem", "disk"], type=str, help="The resource to check."
)
parser.add_argument(
"--warn",
"-w",
dest="warn",
type=float,
default=80,
help="Level to emit a warning status.",
)
parser.add_argument(
"--crit",
"-c",
dest="crit",
type=float,
default=90,
help="Level to emit a critical status.",
)
parser.add_argument(
"--overrides",
dest="overrides",
type=str,
help="json file of per-attribute overrides.\n"
"In the format [{groupings: {attribute: value, ...}, warn: {cpus: num, disk: num, mem: num}, "
"crit: {cpus: num, disk: num, mem: num}}, ...]",
)
parser.add_argument(
"--cluster",
dest="cluster",
type=str,
help="Cluster to check. Defaults to looking for the current cluster.",
)
parser.add_argument(
"--attributes",
dest="attributes",
type=str,
default="pool",
help="Comma separated list of attributes to check.\n"
"Checks combinations of attributes",
)
options = parser.parse_args()
return options
def calc_percent_usage(resource_item, value_to_check):
values = resource_item[value_to_check]
if values["total"] == 0:
return 0
return 100 * (values["used"] / values["total"])
def error_message(failures, level, cluster, value_to_check):
result = f"{level} cluster {cluster} {value_to_check} usage:\n"
results = []
for f in failures:
attrs = ", ".join(["{}: {}".format(e["attr"], e["value"]) for e in f["attrs"]])
results.append(
" {} is at {:.2f} percent {}, maximum {:.2f} percent".format(
attrs, f["current"], value_to_check, f["maximum"]
)
)
result += "\n".join(results)
return result
def get_check_from_overrides(overrides, default_check, groupings):
"""Get the overrides dict from overrides with the same groupings as groupings,
or return the default"""
checks = [o for o in overrides if o["groupings"] == groupings]
if len(checks) == 0:
return default_check
elif len(checks) == 1:
return checks[0]
else:
group_string = ", ".join([f"{k}: {v}" for k, v in groupings.items()])
print("UNKNOWN Multiple overrides specified for %s" % group_string)
sys.exit(3)
def read_overrides(override_file):
if override_file:
with open(override_file, "r") as f:
return json.loads(f.read())
else:
return {}
def run_capacity_check():
options = parse_capacity_check_options()
system_paasta_config = load_system_paasta_config()
cluster = (
options.cluster
if options.cluster is not None
else system_paasta_config.get_cluster()
)
value_to_check = options.type
client = get_paasta_oapi_client(cluster=cluster)
if client is None:
print("UNKNOWN Failed to load paasta api client")
sys.exit(3)
overrides = read_overrides(options.overrides)
attributes = options.attributes.split(",")
try:
resource_use = client.resources.resources(groupings=attributes)
except client.api_error as e:
print(f"UNKNOWN received exception from paasta api:\n\t%s{e}")
sys.exit(3)
default_check = {
"warn": {"cpus": options.warn, "mem": options.warn, "disk": options.warn},
"crit": {"cpus": options.crit, "mem": options.crit, "disk": options.crit},
}
failures = defaultdict(list)
for usage_value in resource_use.value:
check = get_check_from_overrides(
overrides, default_check, usage_value["groupings"]
)
usage_percent = calc_percent_usage(usage_value, value_to_check)
for c in ["crit", "warn"]:
if usage_percent > check[c][value_to_check]:
failures[c].append(
{
"attrs": [
{"attr": a, "value": v}
for a, v in usage_value["groupings"].items()
],
"maximum": check[c][value_to_check],
"current": usage_percent,
}
)
break
return_value = [0]
if len(failures["crit"]) > 0:
result = error_message(failures["crit"], "CRITICAL", cluster, value_to_check)
print(result)
return_value.append(2)
if len(failures["warn"]) > 0:
result = error_message(failures["warn"], "WARNING", cluster, value_to_check)
print(result)
return_value.append(1)
if max(return_value) == 0:
print(f"OK cluster {cluster} is below critical capacity in {value_to_check}")
sys.exit(max(return_value))
if __name__ == "__main__":
run_capacity_check()
|
import datetime
import logging
from pyeconet.api import PyEcoNet
import voluptuous as vol
from homeassistant.components.water_heater import (
PLATFORM_SCHEMA,
STATE_ECO,
STATE_ELECTRIC,
STATE_GAS,
STATE_HEAT_PUMP,
STATE_HIGH_DEMAND,
STATE_OFF,
STATE_PERFORMANCE,
SUPPORT_OPERATION_MODE,
SUPPORT_TARGET_TEMPERATURE,
WaterHeaterEntity,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_TEMPERATURE,
CONF_PASSWORD,
CONF_USERNAME,
TEMP_FAHRENHEIT,
)
import homeassistant.helpers.config_validation as cv
from .const import DOMAIN, SERVICE_ADD_VACATION, SERVICE_DELETE_VACATION
_LOGGER = logging.getLogger(__name__)
ATTR_VACATION_START = "next_vacation_start_date"
ATTR_VACATION_END = "next_vacation_end_date"
ATTR_ON_VACATION = "on_vacation"
ATTR_TODAYS_ENERGY_USAGE = "todays_energy_usage"
ATTR_IN_USE = "in_use"
ATTR_START_DATE = "start_date"
ATTR_END_DATE = "end_date"
ATTR_LOWER_TEMP = "lower_temp"
ATTR_UPPER_TEMP = "upper_temp"
ATTR_IS_ENABLED = "is_enabled"
SUPPORT_FLAGS_HEATER = SUPPORT_TARGET_TEMPERATURE | SUPPORT_OPERATION_MODE
ADD_VACATION_SCHEMA = vol.Schema(
{
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
vol.Optional(ATTR_START_DATE): cv.positive_int,
vol.Required(ATTR_END_DATE): cv.positive_int,
}
)
DELETE_VACATION_SCHEMA = vol.Schema({vol.Optional(ATTR_ENTITY_ID): cv.entity_ids})
ECONET_DATA = "econet"
ECONET_STATE_TO_HA = {
"Energy Saver": STATE_ECO,
"gas": STATE_GAS,
"High Demand": STATE_HIGH_DEMAND,
"Off": STATE_OFF,
"Performance": STATE_PERFORMANCE,
"Heat Pump Only": STATE_HEAT_PUMP,
"Electric-Only": STATE_ELECTRIC,
"Electric": STATE_ELECTRIC,
"Heat Pump": STATE_HEAT_PUMP,
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Required(CONF_USERNAME): cv.string, vol.Required(CONF_PASSWORD): cv.string}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the EcoNet water heaters."""
hass.data[ECONET_DATA] = {}
hass.data[ECONET_DATA]["water_heaters"] = []
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
econet = PyEcoNet(username, password)
water_heaters = econet.get_water_heaters()
hass_water_heaters = [
EcoNetWaterHeater(water_heater) for water_heater in water_heaters
]
add_entities(hass_water_heaters)
hass.data[ECONET_DATA]["water_heaters"].extend(hass_water_heaters)
def service_handle(service):
"""Handle the service calls."""
entity_ids = service.data.get("entity_id")
all_heaters = hass.data[ECONET_DATA]["water_heaters"]
_heaters = [
x for x in all_heaters if not entity_ids or x.entity_id in entity_ids
]
for _water_heater in _heaters:
if service.service == SERVICE_ADD_VACATION:
start = service.data.get(ATTR_START_DATE)
end = service.data.get(ATTR_END_DATE)
_water_heater.add_vacation(start, end)
if service.service == SERVICE_DELETE_VACATION:
for vacation in _water_heater.water_heater.vacations:
vacation.delete()
_water_heater.schedule_update_ha_state(True)
hass.services.register(
DOMAIN, SERVICE_ADD_VACATION, service_handle, schema=ADD_VACATION_SCHEMA
)
hass.services.register(
DOMAIN, SERVICE_DELETE_VACATION, service_handle, schema=DELETE_VACATION_SCHEMA
)
class EcoNetWaterHeater(WaterHeaterEntity):
"""Representation of an EcoNet water heater."""
def __init__(self, water_heater):
"""Initialize the water heater."""
self.water_heater = water_heater
self.supported_modes = self.water_heater.supported_modes
self.econet_state_to_ha = {}
self.ha_state_to_econet = {}
for mode in ECONET_STATE_TO_HA:
if mode in self.supported_modes:
self.econet_state_to_ha[mode] = ECONET_STATE_TO_HA.get(mode)
for key, value in self.econet_state_to_ha.items():
self.ha_state_to_econet[value] = key
for mode in self.supported_modes:
if mode not in ECONET_STATE_TO_HA:
error = f"Invalid operation mode mapping. {mode} doesn't map. Please report this."
_LOGGER.error(error)
@property
def name(self):
"""Return the device name."""
return self.water_heater.name
@property
def available(self):
"""Return if the the device is online or not."""
return self.water_heater.is_connected
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return TEMP_FAHRENHEIT
@property
def device_state_attributes(self):
"""Return the optional device state attributes."""
data = {}
vacations = self.water_heater.get_vacations()
if vacations:
data[ATTR_VACATION_START] = vacations[0].start_date
data[ATTR_VACATION_END] = vacations[0].end_date
data[ATTR_ON_VACATION] = self.water_heater.is_on_vacation
todays_usage = self.water_heater.total_usage_for_today
if todays_usage:
data[ATTR_TODAYS_ENERGY_USAGE] = todays_usage
data[ATTR_IN_USE] = self.water_heater.in_use
if self.water_heater.lower_temp is not None:
data[ATTR_LOWER_TEMP] = round(self.water_heater.lower_temp, 2)
if self.water_heater.upper_temp is not None:
data[ATTR_UPPER_TEMP] = round(self.water_heater.upper_temp, 2)
if self.water_heater.is_enabled is not None:
data[ATTR_IS_ENABLED] = self.water_heater.is_enabled
return data
@property
def current_operation(self):
"""
Return current operation as one of the following.
["eco", "heat_pump", "high_demand", "electric_only"]
"""
current_op = self.econet_state_to_ha.get(self.water_heater.mode)
return current_op
@property
def operation_list(self):
"""List of available operation modes."""
op_list = []
for mode in self.supported_modes:
ha_mode = self.econet_state_to_ha.get(mode)
if ha_mode is not None:
op_list.append(ha_mode)
return op_list
@property
def supported_features(self):
"""Return the list of supported features."""
return SUPPORT_FLAGS_HEATER
def set_temperature(self, **kwargs):
"""Set new target temperature."""
target_temp = kwargs.get(ATTR_TEMPERATURE)
if target_temp is not None:
self.water_heater.set_target_set_point(target_temp)
else:
_LOGGER.error("A target temperature must be provided")
def set_operation_mode(self, operation_mode):
"""Set operation mode."""
op_mode_to_set = self.ha_state_to_econet.get(operation_mode)
if op_mode_to_set is not None:
self.water_heater.set_mode(op_mode_to_set)
else:
_LOGGER.error("An operation mode must be provided")
def add_vacation(self, start, end):
"""Add a vacation to this water heater."""
if not start:
start = datetime.datetime.now()
else:
start = datetime.datetime.fromtimestamp(start)
end = datetime.datetime.fromtimestamp(end)
self.water_heater.set_vacation_mode(start, end)
def update(self):
"""Get the latest date."""
self.water_heater.update_state()
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self.water_heater.set_point
@property
def min_temp(self):
"""Return the minimum temperature."""
return self.water_heater.min_set_point
@property
def max_temp(self):
"""Return the maximum temperature."""
return self.water_heater.max_set_point
|
from weblate.trans.management.commands import WeblateComponentCommand
from weblate.trans.tasks import perform_update
class Command(WeblateComponentCommand):
help = "updates git repos"
needs_repo = True
def add_arguments(self, parser):
super().add_arguments(parser)
parser.add_argument(
"--foreground",
action="store_true",
default=False,
help="Perform load in foreground (by default backgroud task is used)",
)
def handle(self, *args, **options):
if options["foreground"]:
updater = perform_update
else:
updater = perform_update.delay
for component in self.get_components(*args, **options):
updater("Component", component.pk)
|
from __future__ import unicode_literals
import os
import re
from lib.fun.fun import cool, charanger
from lib.data.data import pystrs, pyoptions
def confmatcher(resource):
configures = []
if not os.path.isfile(resource):
matches = re.findall(pyoptions.confpattern, resource.strip())
for match in matches:
for m in match:
configures.append(m.strip())
else:
with open(resource) as f:
for item in f.readlines():
confline = item.strip()
if len(confline) >= 1 and confline[0] == pyoptions.annotator:
pass
else:
matches = re.findall(pyoptions.confpattern, confline)
for match in matches:
for m in match:
configures.append(m.strip())
if configures:
if len(configures) // 5 > 10:
exit(pyoptions.CRLF + cool.red('[-] Max support 10 parser'))
else:
return configures
else:
exit(pyoptions.CRLF + cool.red('[-] Match configuration file for nothing'))
def elementparser(configures):
dicts = {pystrs.conf_head: [], pystrs.conf_char: [], pystrs.conf_minlen: [], pystrs.conf_maxlen: [],
pystrs.conf_encode: [], pystrs.conf_tail: []}
for x in range(1, len(configures) + 1):
count = x - 1
x %= 5
if x == 1:
dicts[pystrs.conf_head].append(configures[count].strip())
elif x == 2:
dicts[pystrs.conf_char].append(charanger(configures[count]))
elif x == 3:
dicts[pystrs.conf_minlen].append(configures[count].split(pyoptions.length_split)[0])
dicts[pystrs.conf_maxlen].append(configures[count].split(pyoptions.length_split)[1])
elif x == 4:
dicts[pystrs.conf_encode].append(configures[count])
elif x == 0:
dicts[pystrs.conf_tail].append(configures[count].strip())
return dicts
|
import pytest
from decimal import Decimal
from django.core.exceptions import ValidationError
from shop.money.money_maker import MoneyMaker
from shop.money.fields import MoneyField
EUR = MoneyMaker('EUR')
def test_deconstruct():
f1 = MoneyField(currency='EUR', default=EUR(0))
name, path, args, kwargs = f1.deconstruct()
f2 = MoneyField(*args, **kwargs)
assert f1.currency_code == f2.currency_code
assert f1.decimal_places == f2.decimal_places
assert f1.default == f2.default
def test_to_python():
f = MoneyField(currency='EUR', null=True)
assert f.to_python(3) == EUR('3')
assert f.to_python('3.14') == EUR('3.14')
assert f.to_python(None) == EUR()
assert f.to_python(EUR(3)) == EUR('3')
with pytest.raises(ValidationError):
f.to_python('abc')
def test_get_prep_value():
f = MoneyField(currency='EUR', null=True)
assert f.get_prep_value(EUR('3')) == Decimal('3')
def test_from_db_value():
f = MoneyField(currency='EUR', null=True)
assert f.from_db_value(Decimal('3'), None, None) == EUR('3')
assert f.from_db_value(3.45, None, None) == EUR('3.45')
assert f.from_db_value(None, None, None) is None
def test_get_default():
OneEuro = EUR(1)
f = MoneyField(currency='EUR', null=True)
assert f.get_default() is None
f = MoneyField(currency='EUR', null=True, default=EUR())
assert f.get_default() == EUR()
f = MoneyField(currency='EUR', null=False, default=OneEuro)
assert f.get_default() == OneEuro
|
import unittest
from perfkitbenchmarker import sample
from perfkitbenchmarker import test_util
from perfkitbenchmarker.linux_benchmarks import scimark2_benchmark
TEST_OUTPUT_C = """;;; C small
** **
** SciMark2 Numeric Benchmark, see http://math.nist.gov/scimark **
** for details. (Results can be submitted to [email protected]) **
** **
Using 2.00 seconds min time per kenel.
Composite Score: 1596.04
FFT Mflops: 1568.64 (N=1024)
SOR Mflops: 1039.98 (100 x 100)
MonteCarlo: Mflops: 497.64
Sparse matmult Mflops: 1974.39 (N=1000, nz=5000)
LU Mflops: 2899.56 (M=100, N=100)
"""
TEST_OUTPUT_JAVA = """;;; Java small
SciMark 2.0a
Composite Score: 1716.3662351463677
FFT (1024): 1000.1380057152871
SOR (100x100): 1353.1987180103354
Monte Carlo : 727.7138820888014
Sparse matmult (N=1000, nz=5000): 1495.40225150659
LU (100x100): 4005.3783184108247
java.vendor: Oracle Corporation
java.version: 1.7.0_75
os.arch: amd64
os.name: Linux
os.version: 3.16.0-25-generic
"""
EXPECTED_C_METADATA = {
'benchmark_language': 'C',
'benchmark_size': 'small',
}
EXPECTED_JAVA_METADATA = {
'benchmark_language': 'Java',
'benchmark_size': 'small',
'java.vendor': 'Oracle Corporation',
'os.version': '3.16.0-25-generic',
'os.arch': 'amd64',
'os.name': 'Linux',
'java.version': '1.7.0_75',
}
EXPECTED_RESULT_C = [
sample.Sample(metric='Composite Score', value=1596.04,
unit='Mflops', metadata=EXPECTED_C_METADATA),
sample.Sample(metric='FFT (N=1024)', value=1568.64,
unit='Mflops', metadata=EXPECTED_C_METADATA),
sample.Sample(metric='SOR (100 x 100)', value=1039.98,
unit='Mflops', metadata=EXPECTED_C_METADATA),
sample.Sample(metric='MonteCarlo', value=497.64,
unit='Mflops', metadata=EXPECTED_C_METADATA),
sample.Sample(metric='Sparse matmult (N=1000, nz=5000)', value=1974.39,
unit='Mflops', metadata=EXPECTED_C_METADATA),
sample.Sample(metric='LU (M=100, N=100)', value=2899.56,
unit='Mflops', metadata=EXPECTED_C_METADATA),
]
EXPECTED_RESULT_JAVA = [
sample.Sample(metric='Composite Score', value=1716.3662351463677,
unit='Mflops', metadata=EXPECTED_JAVA_METADATA),
sample.Sample(metric='FFT (1024)', value=1000.1380057152871,
unit='Mflops', metadata=EXPECTED_JAVA_METADATA),
sample.Sample(metric='SOR (100x100)', value=1353.1987180103354,
unit='Mflops', metadata=EXPECTED_JAVA_METADATA),
sample.Sample(metric='Monte Carlo', value=727.7138820888014,
unit='Mflops', metadata=EXPECTED_JAVA_METADATA),
sample.Sample(metric='Sparse matmult (N=1000, nz=5000)',
value=1495.40225150659, unit='Mflops',
metadata=EXPECTED_JAVA_METADATA),
sample.Sample(metric='LU (100x100)', value=4005.3783184108247,
unit='Mflops', metadata=EXPECTED_JAVA_METADATA),
]
class Scimark2BenchmarkTestCase(unittest.TestCase, test_util.SamplesTestMixin):
def testParseResultsC(self):
samples = scimark2_benchmark.ParseResults(TEST_OUTPUT_C)
self.assertSampleListsEqualUpToTimestamp(samples, EXPECTED_RESULT_C)
def testParseResultsJava(self):
samples = scimark2_benchmark.ParseResults(TEST_OUTPUT_JAVA)
self.assertSampleListsEqualUpToTimestamp(samples, EXPECTED_RESULT_JAVA)
def testParseResultsCombined(self):
samples = scimark2_benchmark.ParseResults(TEST_OUTPUT_C + TEST_OUTPUT_JAVA)
self.assertSampleListsEqualUpToTimestamp(
samples,
EXPECTED_RESULT_C + EXPECTED_RESULT_JAVA)
if __name__ == '__main__':
unittest.main()
|
from datetime import timedelta
import logging
from fritzconnection.core.exceptions import FritzConnectionException
from fritzconnection.lib.fritzstatus import FritzStatus
from requests.exceptions import RequestException
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import CONF_HOST, CONF_NAME, STATE_UNAVAILABLE
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
_LOGGER = logging.getLogger(__name__)
CONF_DEFAULT_NAME = "fritz_netmonitor"
CONF_DEFAULT_IP = "169.254.1.1" # This IP is valid for all FRITZ!Box routers.
ATTR_BYTES_RECEIVED = "bytes_received"
ATTR_BYTES_SENT = "bytes_sent"
ATTR_TRANSMISSION_RATE_UP = "transmission_rate_up"
ATTR_TRANSMISSION_RATE_DOWN = "transmission_rate_down"
ATTR_EXTERNAL_IP = "external_ip"
ATTR_IS_CONNECTED = "is_connected"
ATTR_IS_LINKED = "is_linked"
ATTR_MAX_BYTE_RATE_DOWN = "max_byte_rate_down"
ATTR_MAX_BYTE_RATE_UP = "max_byte_rate_up"
ATTR_UPTIME = "uptime"
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=5)
STATE_ONLINE = "online"
STATE_OFFLINE = "offline"
ICON = "mdi:web"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_NAME, default=CONF_DEFAULT_NAME): cv.string,
vol.Optional(CONF_HOST, default=CONF_DEFAULT_IP): cv.string,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the FRITZ!Box monitor sensors."""
name = config.get(CONF_NAME)
host = config.get(CONF_HOST)
try:
fstatus = FritzStatus(address=host)
except (ValueError, TypeError, FritzConnectionException):
fstatus = None
if fstatus is None:
_LOGGER.error("Failed to establish connection to FRITZ!Box: %s", host)
return 1
_LOGGER.info("Successfully connected to FRITZ!Box")
add_entities([FritzboxMonitorSensor(name, fstatus)], True)
class FritzboxMonitorSensor(Entity):
"""Implementation of a fritzbox monitor sensor."""
def __init__(self, name, fstatus):
"""Initialize the sensor."""
self._name = name
self._fstatus = fstatus
self._state = STATE_UNAVAILABLE
self._is_linked = self._is_connected = None
self._external_ip = self._uptime = None
self._bytes_sent = self._bytes_received = None
self._transmission_rate_up = None
self._transmission_rate_down = None
self._max_byte_rate_up = self._max_byte_rate_down = None
@property
def name(self):
"""Return the name of the sensor."""
return self._name.rstrip()
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return ICON
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def state_attributes(self):
"""Return the device state attributes."""
# Don't return attributes if FritzBox is unreachable
if self._state == STATE_UNAVAILABLE:
return {}
return {
ATTR_IS_LINKED: self._is_linked,
ATTR_IS_CONNECTED: self._is_connected,
ATTR_EXTERNAL_IP: self._external_ip,
ATTR_UPTIME: self._uptime,
ATTR_BYTES_SENT: self._bytes_sent,
ATTR_BYTES_RECEIVED: self._bytes_received,
ATTR_TRANSMISSION_RATE_UP: self._transmission_rate_up,
ATTR_TRANSMISSION_RATE_DOWN: self._transmission_rate_down,
ATTR_MAX_BYTE_RATE_UP: self._max_byte_rate_up,
ATTR_MAX_BYTE_RATE_DOWN: self._max_byte_rate_down,
}
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Retrieve information from the FritzBox."""
try:
self._is_linked = self._fstatus.is_linked
self._is_connected = self._fstatus.is_connected
self._external_ip = self._fstatus.external_ip
self._uptime = self._fstatus.uptime
self._bytes_sent = self._fstatus.bytes_sent
self._bytes_received = self._fstatus.bytes_received
transmission_rate = self._fstatus.transmission_rate
self._transmission_rate_up = transmission_rate[0]
self._transmission_rate_down = transmission_rate[1]
self._max_byte_rate_up = self._fstatus.max_byte_rate[0]
self._max_byte_rate_down = self._fstatus.max_byte_rate[1]
self._state = STATE_ONLINE if self._is_connected else STATE_OFFLINE
except RequestException as err:
self._state = STATE_UNAVAILABLE
_LOGGER.warning("Could not reach FRITZ!Box: %s", err)
|
import asyncio
from functools import wraps
import logging
import secrets
from aiohttp.web import HTTPBadRequest, Request, Response, json_response
from nacl.secret import SecretBox
import voluptuous as vol
from homeassistant.components import notify as hass_notify, tag
from homeassistant.components.binary_sensor import (
DEVICE_CLASSES as BINARY_SENSOR_CLASSES,
)
from homeassistant.components.camera import SUPPORT_STREAM as CAMERA_SUPPORT_STREAM
from homeassistant.components.device_tracker import (
ATTR_BATTERY,
ATTR_GPS,
ATTR_GPS_ACCURACY,
ATTR_LOCATION_NAME,
)
from homeassistant.components.frontend import MANIFEST_JSON
from homeassistant.components.sensor import DEVICE_CLASSES as SENSOR_CLASSES
from homeassistant.components.zone.const import DOMAIN as ZONE_DOMAIN
from homeassistant.const import (
ATTR_DOMAIN,
ATTR_SERVICE,
ATTR_SERVICE_DATA,
ATTR_SUPPORTED_FEATURES,
CONF_WEBHOOK_ID,
HTTP_BAD_REQUEST,
HTTP_CREATED,
)
from homeassistant.core import EventOrigin
from homeassistant.exceptions import HomeAssistantError, ServiceNotFound
from homeassistant.helpers import (
config_validation as cv,
device_registry as dr,
template,
)
from homeassistant.helpers.dispatcher import async_dispatcher_send
from homeassistant.helpers.typing import HomeAssistantType
from homeassistant.util.decorator import Registry
from .const import (
ATTR_ALTITUDE,
ATTR_APP_DATA,
ATTR_APP_VERSION,
ATTR_CAMERA_ENTITY_ID,
ATTR_COURSE,
ATTR_DEVICE_ID,
ATTR_DEVICE_NAME,
ATTR_EVENT_DATA,
ATTR_EVENT_TYPE,
ATTR_MANUFACTURER,
ATTR_MODEL,
ATTR_OS_VERSION,
ATTR_SENSOR_ATTRIBUTES,
ATTR_SENSOR_DEVICE_CLASS,
ATTR_SENSOR_ICON,
ATTR_SENSOR_NAME,
ATTR_SENSOR_STATE,
ATTR_SENSOR_TYPE,
ATTR_SENSOR_TYPE_BINARY_SENSOR,
ATTR_SENSOR_TYPE_SENSOR,
ATTR_SENSOR_UNIQUE_ID,
ATTR_SENSOR_UOM,
ATTR_SPEED,
ATTR_SUPPORTS_ENCRYPTION,
ATTR_TEMPLATE,
ATTR_TEMPLATE_VARIABLES,
ATTR_VERTICAL_ACCURACY,
ATTR_WEBHOOK_DATA,
ATTR_WEBHOOK_ENCRYPTED,
ATTR_WEBHOOK_ENCRYPTED_DATA,
ATTR_WEBHOOK_TYPE,
CONF_CLOUDHOOK_URL,
CONF_REMOTE_UI_URL,
CONF_SECRET,
DATA_CONFIG_ENTRIES,
DATA_DELETED_IDS,
DATA_STORE,
DOMAIN,
ERR_ENCRYPTION_ALREADY_ENABLED,
ERR_ENCRYPTION_NOT_AVAILABLE,
ERR_ENCRYPTION_REQUIRED,
ERR_INVALID_FORMAT,
ERR_SENSOR_NOT_REGISTERED,
SIGNAL_LOCATION_UPDATE,
SIGNAL_SENSOR_UPDATE,
)
from .helpers import (
_decrypt_payload,
empty_okay_response,
error_response,
registration_context,
safe_registration,
savable_state,
supports_encryption,
webhook_response,
)
_LOGGER = logging.getLogger(__name__)
DELAY_SAVE = 10
WEBHOOK_COMMANDS = Registry()
COMBINED_CLASSES = set(BINARY_SENSOR_CLASSES + SENSOR_CLASSES)
SENSOR_TYPES = [ATTR_SENSOR_TYPE_BINARY_SENSOR, ATTR_SENSOR_TYPE_SENSOR]
WEBHOOK_PAYLOAD_SCHEMA = vol.Schema(
{
vol.Required(ATTR_WEBHOOK_TYPE): cv.string,
vol.Required(ATTR_WEBHOOK_DATA, default={}): vol.Any(dict, list),
vol.Optional(ATTR_WEBHOOK_ENCRYPTED, default=False): cv.boolean,
vol.Optional(ATTR_WEBHOOK_ENCRYPTED_DATA): cv.string,
}
)
def validate_schema(schema):
"""Decorate a webhook function with a schema."""
if isinstance(schema, dict):
schema = vol.Schema(schema)
def wrapper(func):
"""Wrap function so we validate schema."""
@wraps(func)
async def validate_and_run(hass, config_entry, data):
"""Validate input and call handler."""
try:
data = schema(data)
except vol.Invalid as ex:
err = vol.humanize.humanize_error(data, ex)
_LOGGER.error("Received invalid webhook payload: %s", err)
return empty_okay_response()
return await func(hass, config_entry, data)
return validate_and_run
return wrapper
async def handle_webhook(
hass: HomeAssistantType, webhook_id: str, request: Request
) -> Response:
"""Handle webhook callback."""
if webhook_id in hass.data[DOMAIN][DATA_DELETED_IDS]:
return Response(status=410)
config_entry = hass.data[DOMAIN][DATA_CONFIG_ENTRIES][webhook_id]
device_name = config_entry.data[ATTR_DEVICE_NAME]
try:
req_data = await request.json()
except ValueError:
_LOGGER.warning("Received invalid JSON from mobile_app device: %s", device_name)
return empty_okay_response(status=HTTP_BAD_REQUEST)
if (
ATTR_WEBHOOK_ENCRYPTED not in req_data
and config_entry.data[ATTR_SUPPORTS_ENCRYPTION]
):
_LOGGER.warning(
"Refusing to accept unencrypted webhook from %s",
device_name,
)
return error_response(ERR_ENCRYPTION_REQUIRED, "Encryption required")
try:
req_data = WEBHOOK_PAYLOAD_SCHEMA(req_data)
except vol.Invalid as ex:
err = vol.humanize.humanize_error(req_data, ex)
_LOGGER.error(
"Received invalid webhook from %s with payload: %s", device_name, err
)
return empty_okay_response()
webhook_type = req_data[ATTR_WEBHOOK_TYPE]
webhook_payload = req_data.get(ATTR_WEBHOOK_DATA, {})
if req_data[ATTR_WEBHOOK_ENCRYPTED]:
enc_data = req_data[ATTR_WEBHOOK_ENCRYPTED_DATA]
webhook_payload = _decrypt_payload(config_entry.data[CONF_SECRET], enc_data)
if webhook_type not in WEBHOOK_COMMANDS:
_LOGGER.error(
"Received invalid webhook from %s of type: %s", device_name, webhook_type
)
return empty_okay_response()
_LOGGER.debug(
"Received webhook payload from %s for type %s: %s",
device_name,
webhook_type,
webhook_payload,
)
# Shield so we make sure we finish the webhook, even if sender hangs up.
return await asyncio.shield(
WEBHOOK_COMMANDS[webhook_type](hass, config_entry, webhook_payload)
)
@WEBHOOK_COMMANDS.register("call_service")
@validate_schema(
{
vol.Required(ATTR_DOMAIN): cv.string,
vol.Required(ATTR_SERVICE): cv.string,
vol.Optional(ATTR_SERVICE_DATA, default={}): dict,
}
)
async def webhook_call_service(hass, config_entry, data):
"""Handle a call service webhook."""
try:
await hass.services.async_call(
data[ATTR_DOMAIN],
data[ATTR_SERVICE],
data[ATTR_SERVICE_DATA],
blocking=True,
context=registration_context(config_entry.data),
)
except (vol.Invalid, ServiceNotFound, Exception) as ex:
_LOGGER.error(
"Error when calling service during mobile_app "
"webhook (device name: %s): %s",
config_entry.data[ATTR_DEVICE_NAME],
ex,
)
raise HTTPBadRequest() from ex
return empty_okay_response()
@WEBHOOK_COMMANDS.register("fire_event")
@validate_schema(
{
vol.Required(ATTR_EVENT_TYPE): cv.string,
vol.Optional(ATTR_EVENT_DATA, default={}): dict,
}
)
async def webhook_fire_event(hass, config_entry, data):
"""Handle a fire event webhook."""
event_type = data[ATTR_EVENT_TYPE]
hass.bus.async_fire(
event_type,
data[ATTR_EVENT_DATA],
EventOrigin.remote,
context=registration_context(config_entry.data),
)
return empty_okay_response()
@WEBHOOK_COMMANDS.register("stream_camera")
@validate_schema({vol.Required(ATTR_CAMERA_ENTITY_ID): cv.string})
async def webhook_stream_camera(hass, config_entry, data):
"""Handle a request to HLS-stream a camera."""
camera = hass.states.get(data[ATTR_CAMERA_ENTITY_ID])
if camera is None:
return webhook_response(
{"success": False},
registration=config_entry.data,
status=HTTP_BAD_REQUEST,
)
resp = {"mjpeg_path": "/api/camera_proxy_stream/%s" % (camera.entity_id)}
if camera.attributes[ATTR_SUPPORTED_FEATURES] & CAMERA_SUPPORT_STREAM:
try:
resp["hls_path"] = await hass.components.camera.async_request_stream(
camera.entity_id, "hls"
)
except HomeAssistantError:
resp["hls_path"] = None
else:
resp["hls_path"] = None
return webhook_response(resp, registration=config_entry.data)
@WEBHOOK_COMMANDS.register("render_template")
@validate_schema(
{
str: {
vol.Required(ATTR_TEMPLATE): cv.string,
vol.Optional(ATTR_TEMPLATE_VARIABLES, default={}): dict,
}
}
)
async def webhook_render_template(hass, config_entry, data):
"""Handle a render template webhook."""
resp = {}
for key, item in data.items():
try:
tpl = template.Template(item[ATTR_TEMPLATE], hass)
resp[key] = tpl.async_render(item.get(ATTR_TEMPLATE_VARIABLES))
except template.TemplateError as ex:
resp[key] = {"error": str(ex)}
return webhook_response(resp, registration=config_entry.data)
@WEBHOOK_COMMANDS.register("update_location")
@validate_schema(
{
vol.Optional(ATTR_LOCATION_NAME): cv.string,
vol.Required(ATTR_GPS): cv.gps,
vol.Required(ATTR_GPS_ACCURACY): cv.positive_int,
vol.Optional(ATTR_BATTERY): cv.positive_int,
vol.Optional(ATTR_SPEED): cv.positive_int,
vol.Optional(ATTR_ALTITUDE): vol.Coerce(float),
vol.Optional(ATTR_COURSE): cv.positive_int,
vol.Optional(ATTR_VERTICAL_ACCURACY): cv.positive_int,
}
)
async def webhook_update_location(hass, config_entry, data):
"""Handle an update location webhook."""
hass.helpers.dispatcher.async_dispatcher_send(
SIGNAL_LOCATION_UPDATE.format(config_entry.entry_id), data
)
return empty_okay_response()
@WEBHOOK_COMMANDS.register("update_registration")
@validate_schema(
{
vol.Optional(ATTR_APP_DATA, default={}): dict,
vol.Required(ATTR_APP_VERSION): cv.string,
vol.Required(ATTR_DEVICE_NAME): cv.string,
vol.Required(ATTR_MANUFACTURER): cv.string,
vol.Required(ATTR_MODEL): cv.string,
vol.Optional(ATTR_OS_VERSION): cv.string,
}
)
async def webhook_update_registration(hass, config_entry, data):
"""Handle an update registration webhook."""
new_registration = {**config_entry.data, **data}
device_registry = await dr.async_get_registry(hass)
device_registry.async_get_or_create(
config_entry_id=config_entry.entry_id,
identifiers={(DOMAIN, config_entry.data[ATTR_DEVICE_ID])},
manufacturer=new_registration[ATTR_MANUFACTURER],
model=new_registration[ATTR_MODEL],
name=new_registration[ATTR_DEVICE_NAME],
sw_version=new_registration[ATTR_OS_VERSION],
)
hass.config_entries.async_update_entry(config_entry, data=new_registration)
await hass_notify.async_reload(hass, DOMAIN)
return webhook_response(
safe_registration(new_registration),
registration=new_registration,
)
@WEBHOOK_COMMANDS.register("enable_encryption")
async def webhook_enable_encryption(hass, config_entry, data):
"""Handle a encryption enable webhook."""
if config_entry.data[ATTR_SUPPORTS_ENCRYPTION]:
_LOGGER.warning(
"Refusing to enable encryption for %s because it is already enabled!",
config_entry.data[ATTR_DEVICE_NAME],
)
return error_response(
ERR_ENCRYPTION_ALREADY_ENABLED, "Encryption already enabled"
)
if not supports_encryption():
_LOGGER.warning(
"Unable to enable encryption for %s because libsodium is unavailable!",
config_entry.data[ATTR_DEVICE_NAME],
)
return error_response(ERR_ENCRYPTION_NOT_AVAILABLE, "Encryption is unavailable")
secret = secrets.token_hex(SecretBox.KEY_SIZE)
data = {**config_entry.data, ATTR_SUPPORTS_ENCRYPTION: True, CONF_SECRET: secret}
hass.config_entries.async_update_entry(config_entry, data=data)
return json_response({"secret": secret})
@WEBHOOK_COMMANDS.register("register_sensor")
@validate_schema(
{
vol.Optional(ATTR_SENSOR_ATTRIBUTES, default={}): dict,
vol.Optional(ATTR_SENSOR_DEVICE_CLASS): vol.All(
vol.Lower, vol.In(COMBINED_CLASSES)
),
vol.Required(ATTR_SENSOR_NAME): cv.string,
vol.Required(ATTR_SENSOR_TYPE): vol.In(SENSOR_TYPES),
vol.Required(ATTR_SENSOR_UNIQUE_ID): cv.string,
vol.Optional(ATTR_SENSOR_UOM): cv.string,
vol.Optional(ATTR_SENSOR_STATE, default=None): vol.Any(
None, bool, str, int, float
),
vol.Optional(ATTR_SENSOR_ICON, default="mdi:cellphone"): cv.icon,
}
)
async def webhook_register_sensor(hass, config_entry, data):
"""Handle a register sensor webhook."""
entity_type = data[ATTR_SENSOR_TYPE]
unique_id = data[ATTR_SENSOR_UNIQUE_ID]
device_name = config_entry.data[ATTR_DEVICE_NAME]
unique_store_key = f"{config_entry.data[CONF_WEBHOOK_ID]}_{unique_id}"
existing_sensor = unique_store_key in hass.data[DOMAIN][entity_type]
data[CONF_WEBHOOK_ID] = config_entry.data[CONF_WEBHOOK_ID]
# If sensor already is registered, update current state instead
if existing_sensor:
_LOGGER.debug(
"Re-register for %s of existing sensor %s", device_name, unique_id
)
entry = hass.data[DOMAIN][entity_type][unique_store_key]
data = {**entry, **data}
hass.data[DOMAIN][entity_type][unique_store_key] = data
hass.data[DOMAIN][DATA_STORE].async_delay_save(
lambda: savable_state(hass), DELAY_SAVE
)
if existing_sensor:
async_dispatcher_send(hass, SIGNAL_SENSOR_UPDATE, data)
else:
register_signal = f"{DOMAIN}_{data[ATTR_SENSOR_TYPE]}_register"
async_dispatcher_send(hass, register_signal, data)
return webhook_response(
{"success": True},
registration=config_entry.data,
status=HTTP_CREATED,
)
@WEBHOOK_COMMANDS.register("update_sensor_states")
@validate_schema(
vol.All(
cv.ensure_list,
[
# Partial schema, enough to identify schema.
# We don't validate everything because otherwise 1 invalid sensor
# will invalidate all sensors.
vol.Schema(
{
vol.Required(ATTR_SENSOR_TYPE): vol.In(SENSOR_TYPES),
vol.Required(ATTR_SENSOR_UNIQUE_ID): cv.string,
},
extra=vol.ALLOW_EXTRA,
)
],
)
)
async def webhook_update_sensor_states(hass, config_entry, data):
"""Handle an update sensor states webhook."""
sensor_schema_full = vol.Schema(
{
vol.Optional(ATTR_SENSOR_ATTRIBUTES, default={}): dict,
vol.Optional(ATTR_SENSOR_ICON, default="mdi:cellphone"): cv.icon,
vol.Required(ATTR_SENSOR_STATE): vol.Any(None, bool, str, int, float),
vol.Required(ATTR_SENSOR_TYPE): vol.In(SENSOR_TYPES),
vol.Required(ATTR_SENSOR_UNIQUE_ID): cv.string,
}
)
device_name = config_entry.data[ATTR_DEVICE_NAME]
resp = {}
for sensor in data:
entity_type = sensor[ATTR_SENSOR_TYPE]
unique_id = sensor[ATTR_SENSOR_UNIQUE_ID]
unique_store_key = f"{config_entry.data[CONF_WEBHOOK_ID]}_{unique_id}"
if unique_store_key not in hass.data[DOMAIN][entity_type]:
_LOGGER.error(
"Refusing to update %s non-registered sensor: %s",
device_name,
unique_store_key,
)
err_msg = f"{entity_type} {unique_id} is not registered"
resp[unique_id] = {
"success": False,
"error": {"code": ERR_SENSOR_NOT_REGISTERED, "message": err_msg},
}
continue
entry = hass.data[DOMAIN][entity_type][unique_store_key]
try:
sensor = sensor_schema_full(sensor)
except vol.Invalid as err:
err_msg = vol.humanize.humanize_error(sensor, err)
_LOGGER.error(
"Received invalid sensor payload from %s for %s: %s",
device_name,
unique_id,
err_msg,
)
resp[unique_id] = {
"success": False,
"error": {"code": ERR_INVALID_FORMAT, "message": err_msg},
}
continue
new_state = {**entry, **sensor}
hass.data[DOMAIN][entity_type][unique_store_key] = new_state
async_dispatcher_send(hass, SIGNAL_SENSOR_UPDATE, new_state)
resp[unique_id] = {"success": True}
hass.data[DOMAIN][DATA_STORE].async_delay_save(
lambda: savable_state(hass), DELAY_SAVE
)
return webhook_response(resp, registration=config_entry.data)
@WEBHOOK_COMMANDS.register("get_zones")
async def webhook_get_zones(hass, config_entry, data):
"""Handle a get zones webhook."""
zones = [
hass.states.get(entity_id)
for entity_id in sorted(hass.states.async_entity_ids(ZONE_DOMAIN))
]
return webhook_response(zones, registration=config_entry.data)
@WEBHOOK_COMMANDS.register("get_config")
async def webhook_get_config(hass, config_entry, data):
"""Handle a get config webhook."""
hass_config = hass.config.as_dict()
resp = {
"latitude": hass_config["latitude"],
"longitude": hass_config["longitude"],
"elevation": hass_config["elevation"],
"unit_system": hass_config["unit_system"],
"location_name": hass_config["location_name"],
"time_zone": hass_config["time_zone"],
"components": hass_config["components"],
"version": hass_config["version"],
"theme_color": MANIFEST_JSON["theme_color"],
}
if CONF_CLOUDHOOK_URL in config_entry.data:
resp[CONF_CLOUDHOOK_URL] = config_entry.data[CONF_CLOUDHOOK_URL]
try:
resp[CONF_REMOTE_UI_URL] = hass.components.cloud.async_remote_ui_url()
except hass.components.cloud.CloudNotAvailable:
pass
return webhook_response(resp, registration=config_entry.data)
@WEBHOOK_COMMANDS.register("scan_tag")
@validate_schema({vol.Required("tag_id"): cv.string})
async def webhook_scan_tag(hass, config_entry, data):
"""Handle a fire event webhook."""
await tag.async_scan_tag(
hass,
data["tag_id"],
config_entry.data[ATTR_DEVICE_ID],
registration_context(config_entry.data),
)
return empty_okay_response()
|
import logging
import pyatmo
from homeassistant.components.light import LightEntity
from homeassistant.core import callback
from homeassistant.exceptions import PlatformNotReady
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from .const import (
DATA_HANDLER,
DOMAIN,
EVENT_TYPE_LIGHT_MODE,
MANUFACTURER,
SIGNAL_NAME,
)
from .data_handler import CAMERA_DATA_CLASS_NAME, NetatmoDataHandler
from .netatmo_entity_base import NetatmoBase
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(hass, entry, async_add_entities):
"""Set up the Netatmo camera light platform."""
if "access_camera" not in entry.data["token"]["scope"]:
_LOGGER.info(
"Cameras are currently not supported with this authentication method"
)
return
data_handler = hass.data[DOMAIN][entry.entry_id][DATA_HANDLER]
async def get_entities():
"""Retrieve Netatmo entities."""
await data_handler.register_data_class(
CAMERA_DATA_CLASS_NAME, CAMERA_DATA_CLASS_NAME, None
)
entities = []
all_cameras = []
if CAMERA_DATA_CLASS_NAME not in data_handler.data:
raise PlatformNotReady
try:
for home in data_handler.data[CAMERA_DATA_CLASS_NAME].cameras.values():
for camera in home.values():
all_cameras.append(camera)
except pyatmo.NoDevice:
_LOGGER.debug("No cameras found")
for camera in all_cameras:
if camera["type"] == "NOC":
if not data_handler.webhook:
raise PlatformNotReady
_LOGGER.debug("Adding camera light %s %s", camera["id"], camera["name"])
entities.append(
NetatmoLight(
data_handler,
camera["id"],
camera["type"],
camera["home_id"],
)
)
return entities
async_add_entities(await get_entities(), True)
class NetatmoLight(NetatmoBase, LightEntity):
"""Representation of a Netatmo Presence camera light."""
def __init__(
self,
data_handler: NetatmoDataHandler,
camera_id: str,
camera_type: str,
home_id: str,
):
"""Initialize a Netatmo Presence camera light."""
LightEntity.__init__(self)
super().__init__(data_handler)
self._data_classes.append(
{"name": CAMERA_DATA_CLASS_NAME, SIGNAL_NAME: CAMERA_DATA_CLASS_NAME}
)
self._id = camera_id
self._home_id = home_id
self._model = camera_type
self._device_name = self._data.get_camera(camera_id).get("name")
self._name = f"{MANUFACTURER} {self._device_name}"
self._is_on = False
self._unique_id = f"{self._id}-light"
async def async_added_to_hass(self) -> None:
"""Entity created."""
await super().async_added_to_hass()
self._listeners.append(
async_dispatcher_connect(
self.hass,
f"signal-{DOMAIN}-webhook-{EVENT_TYPE_LIGHT_MODE}",
self.handle_event,
)
)
@callback
def handle_event(self, event):
"""Handle webhook events."""
data = event["data"]
if not data.get("camera_id"):
return
if (
data["home_id"] == self._home_id
and data["camera_id"] == self._id
and data["push_type"] == "NOC-light_mode"
):
self._is_on = bool(data["sub_type"] == "on")
self.async_write_ha_state()
return
@property
def is_on(self):
"""Return true if light is on."""
return self._is_on
def turn_on(self, **kwargs):
"""Turn camera floodlight on."""
_LOGGER.debug("Turn camera '%s' on", self._name)
self._data.set_state(
home_id=self._home_id,
camera_id=self._id,
floodlight="on",
)
def turn_off(self, **kwargs):
"""Turn camera floodlight into auto mode."""
_LOGGER.debug("Turn camera '%s' to auto mode", self._name)
self._data.set_state(
home_id=self._home_id,
camera_id=self._id,
floodlight="auto",
)
@callback
def async_update_callback(self):
"""Update the entity's state."""
self._is_on = bool(self._data.get_light_state(self._id) == "on")
|
from datetime import datetime
import pytest
from homeassistant.components.sensor import (
DEVICE_CLASS_CURRENT,
DOMAIN as SENSOR_DOMAIN,
)
from homeassistant.components.wled.const import (
ATTR_LED_COUNT,
ATTR_MAX_POWER,
CURRENT_MA,
DOMAIN,
)
from homeassistant.const import (
ATTR_DEVICE_CLASS,
ATTR_ICON,
ATTR_UNIT_OF_MEASUREMENT,
DATA_BYTES,
PERCENTAGE,
SIGNAL_STRENGTH_DECIBELS_MILLIWATT,
)
from homeassistant.core import HomeAssistant
from homeassistant.util import dt as dt_util
from tests.async_mock import patch
from tests.components.wled import init_integration
from tests.test_util.aiohttp import AiohttpClientMocker
async def test_sensors(
hass: HomeAssistant, aioclient_mock: AiohttpClientMocker
) -> None:
"""Test the creation and values of the WLED sensors."""
entry = await init_integration(hass, aioclient_mock, skip_setup=True)
registry = await hass.helpers.entity_registry.async_get_registry()
# Pre-create registry entries for disabled by default sensors
registry.async_get_or_create(
SENSOR_DOMAIN,
DOMAIN,
"aabbccddeeff_uptime",
suggested_object_id="wled_rgb_light_uptime",
disabled_by=None,
)
registry.async_get_or_create(
SENSOR_DOMAIN,
DOMAIN,
"aabbccddeeff_free_heap",
suggested_object_id="wled_rgb_light_free_memory",
disabled_by=None,
)
registry.async_get_or_create(
SENSOR_DOMAIN,
DOMAIN,
"aabbccddeeff_wifi_signal",
suggested_object_id="wled_rgb_light_wifi_signal",
disabled_by=None,
)
registry.async_get_or_create(
SENSOR_DOMAIN,
DOMAIN,
"aabbccddeeff_wifi_rssi",
suggested_object_id="wled_rgb_light_wifi_rssi",
disabled_by=None,
)
registry.async_get_or_create(
SENSOR_DOMAIN,
DOMAIN,
"aabbccddeeff_wifi_channel",
suggested_object_id="wled_rgb_light_wifi_channel",
disabled_by=None,
)
registry.async_get_or_create(
SENSOR_DOMAIN,
DOMAIN,
"aabbccddeeff_wifi_bssid",
suggested_object_id="wled_rgb_light_wifi_bssid",
disabled_by=None,
)
# Setup
test_time = datetime(2019, 11, 11, 9, 10, 32, tzinfo=dt_util.UTC)
with patch("homeassistant.components.wled.sensor.utcnow", return_value=test_time):
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
state = hass.states.get("sensor.wled_rgb_light_estimated_current")
assert state
assert state.attributes.get(ATTR_ICON) == "mdi:power"
assert state.attributes.get(ATTR_LED_COUNT) == 30
assert state.attributes.get(ATTR_MAX_POWER) == 850
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == CURRENT_MA
assert state.attributes.get(ATTR_DEVICE_CLASS) == DEVICE_CLASS_CURRENT
assert state.state == "470"
entry = registry.async_get("sensor.wled_rgb_light_estimated_current")
assert entry
assert entry.unique_id == "aabbccddeeff_estimated_current"
state = hass.states.get("sensor.wled_rgb_light_uptime")
assert state
assert state.attributes.get(ATTR_ICON) == "mdi:clock-outline"
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) is None
assert state.state == "2019-11-11T09:10:00+00:00"
entry = registry.async_get("sensor.wled_rgb_light_uptime")
assert entry
assert entry.unique_id == "aabbccddeeff_uptime"
state = hass.states.get("sensor.wled_rgb_light_free_memory")
assert state
assert state.attributes.get(ATTR_ICON) == "mdi:memory"
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == DATA_BYTES
assert state.state == "14600"
entry = registry.async_get("sensor.wled_rgb_light_free_memory")
assert entry
assert entry.unique_id == "aabbccddeeff_free_heap"
state = hass.states.get("sensor.wled_rgb_light_wifi_signal")
assert state
assert state.attributes.get(ATTR_ICON) == "mdi:wifi"
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PERCENTAGE
assert state.state == "76"
entry = registry.async_get("sensor.wled_rgb_light_wifi_signal")
assert entry
assert entry.unique_id == "aabbccddeeff_wifi_signal"
state = hass.states.get("sensor.wled_rgb_light_wifi_rssi")
assert state
assert state.attributes.get(ATTR_ICON) == "mdi:wifi"
assert (
state.attributes.get(ATTR_UNIT_OF_MEASUREMENT)
== SIGNAL_STRENGTH_DECIBELS_MILLIWATT
)
assert state.state == "-62"
entry = registry.async_get("sensor.wled_rgb_light_wifi_rssi")
assert entry
assert entry.unique_id == "aabbccddeeff_wifi_rssi"
state = hass.states.get("sensor.wled_rgb_light_wifi_channel")
assert state
assert state.attributes.get(ATTR_ICON) == "mdi:wifi"
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) is None
assert state.state == "11"
entry = registry.async_get("sensor.wled_rgb_light_wifi_channel")
assert entry
assert entry.unique_id == "aabbccddeeff_wifi_channel"
state = hass.states.get("sensor.wled_rgb_light_wifi_bssid")
assert state
assert state.attributes.get(ATTR_ICON) == "mdi:wifi"
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) is None
assert state.state == "AA:AA:AA:AA:AA:BB"
entry = registry.async_get("sensor.wled_rgb_light_wifi_bssid")
assert entry
assert entry.unique_id == "aabbccddeeff_wifi_bssid"
@pytest.mark.parametrize(
"entity_id",
(
"sensor.wled_rgb_light_uptime",
"sensor.wled_rgb_light_free_memory",
"sensor.wled_rgb_light_wi_fi_signal",
"sensor.wled_rgb_light_wi_fi_rssi",
"sensor.wled_rgb_light_wi_fi_channel",
"sensor.wled_rgb_light_wi_fi_bssid",
),
)
async def test_disabled_by_default_sensors(
hass: HomeAssistant, aioclient_mock: AiohttpClientMocker, entity_id: str
) -> None:
"""Test the disabled by default WLED sensors."""
await init_integration(hass, aioclient_mock)
registry = await hass.helpers.entity_registry.async_get_registry()
state = hass.states.get(entity_id)
assert state is None
entry = registry.async_get(entity_id)
assert entry
assert entry.disabled
assert entry.disabled_by == "integration"
|
import functools
import logging
import os
import posixpath
import re
import time
from absl import flags
from perfkitbenchmarker import data
from perfkitbenchmarker import linux_packages
from perfkitbenchmarker import regex_util
from perfkitbenchmarker import vm_util
FLAGS = flags.FLAGS
flags.DEFINE_string('hadoop_version', '3.2.1', 'Version of hadoop.')
DATA_FILES = ['hadoop/core-site.xml.j2', 'hadoop/yarn-site.xml.j2',
'hadoop/hdfs-site.xml', 'hadoop/mapred-site.xml.j2',
'hadoop/hadoop-env.sh.j2', 'hadoop/workers.j2']
START_HADOOP_SCRIPT = 'hadoop/start-hadoop.sh.j2'
HADOOP_DIR = posixpath.join(linux_packages.INSTALL_DIR, 'hadoop')
HADOOP_BIN = posixpath.join(HADOOP_DIR, 'bin')
HADOOP_SBIN = posixpath.join(HADOOP_DIR, 'sbin')
HADOOP_CONF_DIR = posixpath.join(HADOOP_DIR, 'etc', 'hadoop')
HADOOP_PRIVATE_KEY = posixpath.join(HADOOP_CONF_DIR, 'hadoop_keyfile')
def CheckPrerequisites():
"""Verifies that the required resources are present.
Raises:
perfkitbenchmarker.data.ResourceNotFound: On missing resource.
"""
for resource in DATA_FILES + [START_HADOOP_SCRIPT]:
data.ResourcePath(resource)
def _Install(vm):
vm.Install('openjdk')
vm.Install('curl')
hadoop_url = ('https://www-us.apache.org/dist/hadoop/common/hadoop-{0}/'
'hadoop-{0}.tar.gz').format(FLAGS.hadoop_version)
vm.RemoteCommand(('mkdir {0} && curl -L {1} | '
'tar -C {0} --strip-components=1 -xzf -').format(
HADOOP_DIR, hadoop_url))
def YumInstall(vm):
"""Installs Hadoop on the VM."""
vm.InstallPackages('snappy')
_Install(vm)
def AptInstall(vm):
"""Installs Hadoop on the VM."""
libsnappy = 'libsnappy1'
if not vm.HasPackage(libsnappy):
# libsnappy's name on ubuntu16.04 is libsnappy1v5. Let's try that instead.
libsnappy = 'libsnappy1v5'
vm.InstallPackages(libsnappy)
_Install(vm)
# Scheduling constants.
# Give 90% of VM memory to YARN for scheduling.
# This is roguhly consistent with Dataproc 2.0+
YARN_MEMORY_FRACTION = 0.9
# Give 80% of the memory YARN schedules to the JVM Heap space.
# This is probably conservative on more memory mahcines, but is a traditonal
# rule of thumb.
HEAP_MEMORY_RATIO = 0.8
# Schedule slightly more tasks than vCPUs. This was found to be optimal for
# sorting 240 GB using standard GCE virtual machines with sufficient disk.
# Using a grid seach.
# TODO(pclay): Confirm results generalize to larger data sizes.
MAP_SLOTS_PER_CORE = 1.5
REDUCE_SLOTS_PER_CORE = 4 / 3
def _RenderConfig(
vm,
master,
workers,
memory_fraction=YARN_MEMORY_FRACTION):
"""Load Hadoop Condfiguration on VM."""
# Use first worker to get worker configuration
worker = workers[0]
num_workers = len(workers)
worker_cores = worker.NumCpusForBenchmark()
yarn_memory_mb = int((vm.total_memory_kb / 1024) * memory_fraction)
# Reserve 1 GB per worker for AppMaster containers.
usable_memory_mb = yarn_memory_mb - 1024
# YARN generally schedules based on memory (and ignores cores). We invert this
# by calculating memory in terms of cores. This means that changing
# machine memory will not change scheduling simply change the memory given to
# each task.
maps_per_node = int(worker_cores * MAP_SLOTS_PER_CORE)
map_memory_mb = usable_memory_mb // maps_per_node
map_heap_mb = int(map_memory_mb * HEAP_MEMORY_RATIO)
reduces_per_node = int(worker_cores * REDUCE_SLOTS_PER_CORE)
reduce_memory_mb = usable_memory_mb // reduces_per_node
reduce_heap_mb = int(reduce_memory_mb * HEAP_MEMORY_RATIO)
# This property is only used for generating data like teragen.
# Divide 2 to avoid tiny files on large clusters.
num_map_tasks = maps_per_node * num_workers
# This determines the number of reduce tasks in Terasort and is critical to
# scale with the cluster.
num_reduce_tasks = reduces_per_node * num_workers
if vm.scratch_disks:
# TODO(pclay): support multiple scratch disks. A current suboptimal
# workaround is RAID0 local_ssds with --num_striped_disks.
scratch_dir = posixpath.join(vm.GetScratchDir(), 'hadoop')
else:
scratch_dir = posixpath.join('/tmp/pkb/local_scratch', 'hadoop')
context = {
'master_ip': master.internal_ip,
'worker_ips': [vm.internal_ip for vm in workers],
'scratch_dir': scratch_dir,
'worker_vcpus': worker_cores,
'hadoop_private_key': HADOOP_PRIVATE_KEY,
'user': vm.user_name,
'yarn_memory_mb': yarn_memory_mb,
'map_memory_mb': map_memory_mb,
'map_heap_mb': map_heap_mb,
'num_map_tasks': num_map_tasks,
'reduce_memory_mb': reduce_memory_mb,
'reduce_heap_mb': reduce_heap_mb,
'num_reduce_tasks': num_reduce_tasks,
}
for file_name in DATA_FILES:
file_path = data.ResourcePath(file_name)
if (file_name == 'hadoop/workers.j2' and
FLAGS.hadoop_version.split('.')[0] < '3'):
file_name = 'hadoop/slaves.j2'
remote_path = posixpath.join(HADOOP_CONF_DIR,
os.path.basename(file_name))
if file_name.endswith('.j2'):
vm.RenderTemplate(file_path, os.path.splitext(remote_path)[0], context)
else:
vm.RemoteCopy(file_path, remote_path)
def _GetHDFSOnlineNodeCount(master):
cmd = '{0} dfsadmin -report'.format(posixpath.join(HADOOP_BIN, 'hdfs'))
stdout = master.RemoteCommand(cmd)[0]
avail_str = regex_util.ExtractGroup(r'Live datanodes\s+\((\d+)\):', stdout)
return int(avail_str)
def _GetYARNOnlineNodeCount(master):
cmd = '{0} node -list -all'.format(posixpath.join(HADOOP_BIN, 'yarn'))
stdout = master.RemoteCommand(cmd)[0]
return len(re.findall(r'RUNNING', stdout))
def ConfigureAndStart(master, workers, start_yarn=True):
"""Configure hadoop on a cluster.
Args:
master: VM. Master VM - will be the HDFS NameNode, YARN ResourceManager.
workers: List of VMs. Each VM will run an HDFS DataNode, YARN node.
start_yarn: bool. Start YARN and JobHistory server? Set to False if HDFS is
the only service required. Default: True.
"""
vms = [master] + workers
# If there are no workers set up in pseudo-distributed mode, where the master
# node runs the worker daemons.
workers = workers or [master]
fn = functools.partial(
_RenderConfig,
master=master,
workers=workers)
vm_util.RunThreaded(fn, vms)
master.RemoteCommand(
"rm -f {0} && ssh-keygen -q -t rsa -N '' -f {0}".format(
HADOOP_PRIVATE_KEY))
public_key = master.RemoteCommand('cat {0}.pub'.format(HADOOP_PRIVATE_KEY))[0]
def AddKey(vm):
vm.RemoteCommand('echo "{0}" >> ~/.ssh/authorized_keys'.format(public_key))
vm_util.RunThreaded(AddKey, vms)
context = {'hadoop_dir': HADOOP_DIR,
'vm_ips': [vm.internal_ip for vm in vms],
'start_yarn': start_yarn}
# HDFS setup and formatting, YARN startup
script_path = posixpath.join(HADOOP_DIR, 'start-hadoop.sh')
master.RenderTemplate(data.ResourcePath(START_HADOOP_SCRIPT),
script_path, context=context)
master.RemoteCommand('bash {0}'.format(script_path), should_log=True)
logging.info('Sleeping 10s for Hadoop nodes to join.')
time.sleep(10)
logging.info('Checking HDFS status.')
hdfs_online_count = _GetHDFSOnlineNodeCount(master)
if hdfs_online_count != len(workers):
raise ValueError('Not all nodes running HDFS: {0} < {1}'.format(
hdfs_online_count, len(workers)))
else:
logging.info('HDFS running on all %d workers', len(workers))
if start_yarn:
logging.info('Checking YARN status.')
yarn_online_count = _GetYARNOnlineNodeCount(master)
if yarn_online_count != len(workers):
raise ValueError('Not all nodes running YARN: {0} < {1}'.format(
yarn_online_count, len(workers)))
else:
logging.info('YARN running on all %d workers', len(workers))
def StopYARN(master):
"""Stop YARN on all nodes."""
master.RemoteCommand(posixpath.join(HADOOP_SBIN, 'stop-yarn.sh'))
def StopHDFS(master):
"""Stop HDFS on all nodes."""
master.RemoteCommand(posixpath.join(HADOOP_SBIN, 'stop-dfs.sh'))
def StopHistoryServer(master):
"""Stop the MapReduce JobHistory daemon."""
master.RemoteCommand('{0} stop historyserver'.format(
posixpath.join(HADOOP_SBIN, 'mr-jobhistory-daemon.sh')))
def StopAll(master):
"""Stop HDFS and YARN.
Args:
master: VM. HDFS NameNode/YARN ResourceManager.
"""
StopHistoryServer(master)
StopYARN(master)
StopHDFS(master)
def CleanDatanode(vm):
"""Delete Hadoop data from 'vm'."""
vm.RemoteCommand('rm -rf {0}'.format(
posixpath.join(vm.GetScratchDir(), 'hadoop')))
|
from scattertext.diachronic.DiachronicVisualizer import DiachronicVisualizer
class BubbleDiachronicVisualization(DiachronicVisualizer):
@staticmethod
def visualize(display_df):
viridis = ['#440154', '#472c7a', '#3b518b', '#2c718e', '#21908d', '#27ad81', '#5cc863', '#aadc32', '#fde725']
import altair as alt
color_scale = alt.Scale(
domain=(display_df.dropna().trending.min(),
0,
display_df.dropna().trending.max()),
range=[viridis[0], viridis[len(viridis) // 2], viridis[-1]]
)
return alt.Chart(display_df).mark_circle().encode(
alt.X('variable'),
alt.Y('term'),
size='frequency',
color=alt.Color('trending:Q', scale=color_scale),
)
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.