text
stringlengths 213
32.3k
|
---|
from __future__ import division
import numbers
import numpy as np
import six
import chainer
from chainer.backends import cuda
from chainer import function
from chainer.utils import type_check
def _outsize(x):
if isinstance(x, chainer.utils.collections_abc.Iterable):
if len(x) == 2:
return (None, ) + x
else:
return x
return None, x, x
class PSROIAveragePooling2D(function.Function):
def __init__(self, outsize, spatial_scale, group_size):
out_c, out_h, out_w = _outsize(outsize)
if out_c is not None and \
not (isinstance(out_c, numbers.Integral) and out_c > 0):
raise TypeError(
'outsize[0] must be positive integer: {}, {}'
.format(type(out_c), out_c))
if not (isinstance(out_h, numbers.Integral) and out_h > 0):
raise TypeError(
'outsize[1] must be positive integer: {}, {}'
.format(type(out_h), out_h))
if not (isinstance(out_w, numbers.Integral) and out_w > 0):
raise TypeError(
'outsize[2] must be positive integer: {}, {}'
.format(type(out_w), out_w))
if isinstance(spatial_scale, numbers.Integral):
spatial_scale = float(spatial_scale)
if not (isinstance(spatial_scale, numbers.Real)
and spatial_scale > 0):
raise TypeError(
'spatial_scale must be a positive float number: {}, {}'
.format(type(spatial_scale), spatial_scale))
if not (isinstance(group_size, numbers.Integral)
and group_size > 0):
raise TypeError(
'group_size must be positive integer: {}, {}'
.format(type(group_size), group_size))
self.out_c, self.out_h, self.out_w = out_c, out_h, out_w
self.spatial_scale = spatial_scale
self.group_size = group_size
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 3)
x_type, roi_type, roi_index_type = in_types
type_check.expect(
x_type.dtype == np.float32,
x_type.ndim == 4,
roi_type.dtype == np.float32,
roi_type.ndim == 2,
roi_type.shape[1] == 4,
roi_index_type.dtype == np.int32,
roi_index_type.ndim == 1,
roi_type.shape[0] == roi_index_type.shape[0]
)
def forward_cpu(self, inputs):
self.retain_inputs((1, 2))
self._bottom_data_shape = inputs[0].shape
bottom_data, bottom_rois, bottom_roi_indices = inputs
channel, height, width = bottom_data.shape[1:]
if self.out_c is None:
if channel % (self.group_size * self.group_size) != 0:
raise ValueError(
'input channel must be divided by group_size * group_size:'
'{} % {} != 0'
.format(channel, self.group_size * self.group_size))
out_c = channel // (self.group_size * self.group_size)
else:
if channel != self.out_c * self.group_size * self.group_size:
raise ValueError(
'input channel must be equal to '
'outsize[0] * group_size * group_size: {} != {}'
.format(channel,
self.out_c * self.group_size * self.group_size))
out_c = self.out_c
n_roi = bottom_rois.shape[0]
top_data = np.empty(
(n_roi, out_c, self.out_h, self.out_w), dtype=np.float32)
spatial_scale = self.spatial_scale
pooled_height = self.out_h
pooled_width = self.out_w
group_size = self.group_size
for i in six.moves.range(top_data.size):
n, ctop, ph, pw = np.unravel_index(i, top_data.shape)
roi_batch_ind = bottom_roi_indices[n]
roi_start_h = bottom_rois[n, 0] * spatial_scale
roi_start_w = bottom_rois[n, 1] * spatial_scale
roi_end_h = bottom_rois[n, 2] * spatial_scale
roi_end_w = bottom_rois[n, 3] * spatial_scale
roi_height = max(roi_end_h - roi_start_h, 0.1)
roi_width = max(roi_end_w - roi_start_w, 0.1)
bin_size_h = roi_height / pooled_height
bin_size_w = roi_width / pooled_width
hstart = int(np.floor(ph * bin_size_h + roi_start_h))
wstart = int(np.floor(pw * bin_size_w + roi_start_w))
hend = int(np.ceil((ph + 1) * bin_size_h + roi_start_h))
wend = int(np.ceil((pw + 1) * bin_size_w + roi_start_w))
hstart = min(max(hstart, 0), height)
wstart = min(max(wstart, 0), width)
hend = min(max(hend, 0), height)
wend = min(max(wend, 0), width)
gh = int(np.floor(ph * group_size / pooled_height))
gw = int(np.floor(pw * group_size / pooled_width))
gh = min(max(gh, 0), group_size - 1)
gw = min(max(gw, 0), group_size - 1)
c = (ctop * group_size + gh) * group_size + gw
if hstart >= hend or wstart >= wend:
top_data[n, ctop, ph, pw] = 0
continue
top_data[n, ctop, ph, pw] = np.mean(
bottom_data[roi_batch_ind, c, hstart:hend, wstart:wend])
return top_data,
def forward_gpu(self, inputs):
self.retain_inputs((1, 2))
self._bottom_data_shape = inputs[0].shape
bottom_data, bottom_rois, bottom_roi_indices = inputs
channel, height, width = bottom_data.shape[1:]
if self.out_c is None:
if channel % (self.group_size * self.group_size) != 0:
raise ValueError(
'input channel must be divided by group_size * group_size:'
'{} % {} != 0'
.format(channel, self.group_size * self.group_size))
out_c = channel // (self.group_size * self.group_size)
else:
if channel != self.out_c * self.group_size * self.group_size:
raise ValueError(
'input channel must be equal to '
'outsize[0] * group_size * group_size: {} != {}'
.format(channel,
self.out_c * self.group_size * self.group_size))
out_c = self.out_c
n_roi = bottom_rois.shape[0]
top_data = cuda.cupy.empty(
(n_roi, out_c, self.out_h, self.out_w), dtype=np.float32)
cuda.elementwise(
'''
raw T bottom_data, raw T bottom_rois,
raw int32 bottom_roi_indices,
T spatial_scale, int32 channel,
int32 height, int32 width,
int32 pooled_dim, int32 pooled_height, int32 pooled_width,
int32 group_size
''',
'T top_data',
'''
// pos in output filter
int ph = (i / pooled_width) % pooled_height;
int pw = i % pooled_width;
int ctop = (i / pooled_width / pooled_height) % pooled_dim;
int n = i / pooled_width / pooled_height / pooled_dim;
int roi_batch_ind = bottom_roi_indices[n];
T roi_start_h = bottom_rois[n * 4 + 0] * spatial_scale;
T roi_start_w = bottom_rois[n * 4 + 1] * spatial_scale;
T roi_end_h = bottom_rois[n * 4 + 2] * spatial_scale;
T roi_end_w = bottom_rois[n * 4 + 3] * spatial_scale;
// Force too small ROIs to be 1x1
T roi_height = max(roi_end_h - roi_start_h, 0.1);
T roi_width = max(roi_end_w - roi_start_w, 0.1); // avoid 0
// Compute w and h at bottom
T bin_size_h = roi_height / static_cast<T>(pooled_height);
T bin_size_w = roi_width / static_cast<T>(pooled_width);
int hstart = floor(
static_cast<T>(ph) * bin_size_h + roi_start_h);
int wstart = floor(
static_cast<T>(pw) * bin_size_w + roi_start_w);
int hend = ceil(
static_cast<T>(ph + 1) * bin_size_h + roi_start_h);
int wend = ceil(
static_cast<T>(pw + 1) * bin_size_w + roi_start_w);
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart, 0), height);
wstart = min(max(wstart, 0), width);
hend = min(max(hend, 0), height);
wend = min(max(wend, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
// Compute c at bottom
int gh = floor(
static_cast<T>(ph) * group_size / pooled_height);
int gw = floor(
static_cast<T>(pw) * group_size / pooled_width);
gh = min(max(gh, 0), group_size - 1);
gw = min(max(gw, 0), group_size - 1);
int c = (ctop * group_size + gh) * group_size + gw;
int data_offset = (roi_batch_ind * channel + c) * height * width;
T out_sum = 0;
for (int h = hstart; h < hend; ++h){
for (int w = wstart; w < wend; ++w){
int bottom_index = h * width + w;
out_sum += bottom_data[data_offset + bottom_index];
}
}
T bin_area = (hend - hstart) * (wend - wstart);
top_data = is_empty? (T) 0. : out_sum / bin_area;
''', 'ps_roi_average_pooling_2d_fwd'
)(bottom_data, bottom_rois, bottom_roi_indices,
self.spatial_scale, channel, height, width,
out_c, self.out_h, self.out_w, self.group_size,
top_data)
return top_data,
def backward_cpu(self, inputs, gy):
_, bottom_rois, bottom_roi_indices = inputs
top_diff = gy[0]
height, width = self._bottom_data_shape[2:]
bottom_diff = np.zeros(self._bottom_data_shape, np.float32)
spatial_scale = self.spatial_scale
pooled_height = self.out_h
pooled_width = self.out_w
group_size = self.group_size
for i in six.moves.range(top_diff.size):
n, ctop, ph, pw = np.unravel_index(i, top_diff.shape)
roi_batch_ind = int(bottom_roi_indices[n])
roi_start_h = bottom_rois[n, 0] * spatial_scale
roi_start_w = bottom_rois[n, 1] * spatial_scale
roi_end_h = bottom_rois[n, 2] * spatial_scale
roi_end_w = bottom_rois[n, 3] * spatial_scale
roi_height = max(roi_end_h - roi_start_h, 0.1)
roi_width = max(roi_end_w - roi_start_w, 0.1)
bin_size_h = roi_height / pooled_height
bin_size_w = roi_width / pooled_width
hstart = int(np.floor(ph * bin_size_h + roi_start_h))
wstart = int(np.floor(pw * bin_size_w + roi_start_w))
hend = int(np.ceil((ph + 1) * bin_size_h + roi_start_h))
wend = int(np.ceil((pw + 1) * bin_size_w + roi_start_w))
hstart = min(max(hstart, 0), height)
wstart = min(max(wstart, 0), width)
hend = min(max(hend, 0), height)
wend = min(max(wend, 0), width)
gh = int(np.floor(ph * group_size / pooled_height))
gw = int(np.floor(pw * group_size / pooled_width))
gh = min(max(gh, 0), group_size - 1)
gw = min(max(gw, 0), group_size - 1)
c = (ctop * group_size + gh) * group_size + gw
if (hstart >= hend) or (wstart >= wend):
continue
count = (hend - hstart) * (wend - wstart)
diff_val = top_diff[n, ctop, ph, pw] / count
bottom_diff[roi_batch_ind, c, hstart:hend, wstart:wend] += diff_val
return bottom_diff, None, None
def backward_gpu(self, inputs, gy):
_, bottom_rois, bottom_roi_indices = inputs
channel, height, width = self._bottom_data_shape[1:]
out_c, out_h, out_w = gy[0].shape[1:]
bottom_diff = cuda.cupy.zeros(self._bottom_data_shape, np.float32)
cuda.elementwise(
'''
raw T top_diff, raw T bottom_rois,
raw int32 bottom_roi_indices,
T spatial_scale, int32 channel, int32 height, int32 width,
int32 pooled_dim, int32 pooled_height, int32 pooled_width,
int32 group_size
''',
'raw T bottom_diff',
'''
int ph = (i / pooled_width) % pooled_height;
int pw = i % pooled_width;
int ctop = (i / pooled_width / pooled_height) % pooled_dim;
int n = i / pooled_width / pooled_height / pooled_dim;
// [start, end) interval for spatial sampling
int roi_batch_ind = bottom_roi_indices[n];
T roi_start_h = bottom_rois[n * 4 + 0] * spatial_scale;
T roi_start_w = bottom_rois[n * 4 + 1] * spatial_scale;
T roi_end_h = bottom_rois[n * 4 + 2] * spatial_scale;
T roi_end_w = bottom_rois[n * 4 + 3] * spatial_scale;
// Force too small ROIs to be 1x1
T roi_height = max(roi_end_h - roi_start_h, 0.1);
T roi_width = max(roi_end_w - roi_start_w, 0.1); // avoid 0
// Compute w and h at bottom
T bin_size_h = roi_height / static_cast<T>(pooled_height);
T bin_size_w = roi_width / static_cast<T>(pooled_width);
int hstart = floor(
static_cast<T>(ph) * bin_size_h + roi_start_h);
int wstart = floor(
static_cast<T>(pw) * bin_size_w + roi_start_w);
int hend = ceil(
static_cast<T>(ph + 1.0) * bin_size_h + roi_start_h);
int wend = ceil(
static_cast<T>(pw + 1.0) * bin_size_w + roi_start_w);
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart, 0), height);
wstart = min(max(wstart, 0), width);
hend = min(max(hend, 0), height);
wend = min(max(wend, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
// Compute c at bottom
int gh = floor(
static_cast<T>(ph) * group_size / pooled_height);
int gw = floor(
static_cast<T>(pw) * group_size / pooled_width);
gh = min(max(gh, 0), group_size - 1);
gw = min(max(gw, 0), group_size - 1);
int c = (ctop * group_size + gh) * group_size + gw;
int bottom_diff_offset = (roi_batch_ind * channel + c);
bottom_diff_offset = bottom_diff_offset * height * width;
int top_offset =
(n * pooled_dim + ctop) * pooled_height * pooled_width;
T bin_area = (hend - hstart) * (wend - wstart);
T diff_val = is_empty ? (T) 0. :
top_diff[top_offset + ph * pooled_width + pw] / bin_area;
for (int h = hstart; h < hend; ++h){
for (int w = wstart; w < wend; ++w){
int bottom_index = h * width + w;
atomicAdd(
&bottom_diff[bottom_diff_offset + bottom_index], diff_val);
}
}
''', 'ps_roi_average_pooling_2d_bwd'
)(gy[0], bottom_rois, bottom_roi_indices,
self.spatial_scale, channel, height, width,
out_c, out_h, out_w, self.group_size, bottom_diff,
size=gy[0].size)
return bottom_diff, None, None
def ps_roi_average_pooling_2d(
x, rois, roi_indices, outsize,
spatial_scale, group_size
):
"""Position Sensitive Region of Interest (ROI) Average pooling function.
This function computes position sensitive average of input spatial patch
with the given region of interests. Each ROI is splitted into
:math:`(group\_size, group\_size)` regions, and position sensitive values
in each region is computed.
Args:
x (~chainer.Variable): Input variable. The shape is expected to be
4 dimentional: (n: batch, c: channel, h, height, w: width).
rois (array): Input roi. The shape is expected to
be :math:`(R, 4)`, and each datum is set as below:
(y_min, x_min, y_max, x_max). The dtype is :obj:`numpy.float32`.
roi_indices (array): Input roi indices. The shape is expected to
be :math:`(R, )`. The dtype is :obj:`numpy.int32`.
outsize ((int, int, int) or (int, int) or int): Expected output size
after pooled: (channel, height, width) or (height, width)
or outsize. ``outsize=o`` and ``outsize=(o, o)`` are equivalent.
Channel parameter is used to assert the input shape.
spatial_scale (float): Scale of the roi is resized.
group_size (int): Position sensitive group size.
Returns:
~chainer.Variable: Output variable.
See the original paper proposing PSROIPooling:
`R-FCN <https://arxiv.org/abs/1605.06409>`_.
"""
return PSROIAveragePooling2D(outsize, spatial_scale,
group_size)(x, rois, roi_indices)
|
import os
import tempfile
from PyQt5.QtCore import (pyqtSignal, pyqtSlot, QObject, QProcess,
QFileSystemWatcher)
from qutebrowser.config import config
from qutebrowser.utils import message, log
from qutebrowser.misc import guiprocess
from qutebrowser.qt import sip
class ExternalEditor(QObject):
"""Class to simplify editing a text in an external editor.
Attributes:
_text: The current text before the editor is opened.
_filename: The name of the file to be edited.
_remove_file: Whether the file should be removed when the editor is
closed.
_proc: The GUIProcess of the editor.
_watcher: A QFileSystemWatcher to watch the edited file for changes.
Only set if watch=True.
_content: The last-saved text of the editor.
Signals:
file_updated: The text in the edited file was updated.
arg: The new text.
editing_finished: The editor process was closed.
"""
file_updated = pyqtSignal(str)
editing_finished = pyqtSignal()
def __init__(self, parent=None, watch=False):
super().__init__(parent)
self._filename = None
self._proc = None
self._remove_file = None
self._watcher = QFileSystemWatcher(parent=self) if watch else None
self._content = None
def _cleanup(self):
"""Clean up temporary files after the editor closed."""
assert self._remove_file is not None
if (self._watcher is not None and
not sip.isdeleted(self._watcher) and
self._watcher.files()):
failed = self._watcher.removePaths(self._watcher.files())
if failed:
log.procs.error("Failed to unwatch paths: {}".format(failed))
if self._filename is None or not self._remove_file:
# Could not create initial file.
return
assert self._proc is not None
try:
if self._proc.exit_status() != QProcess.CrashExit:
os.remove(self._filename)
except OSError as e:
# NOTE: Do not replace this with "raise CommandError" as it's
# executed async.
message.error("Failed to delete tempfile... ({})".format(e))
@pyqtSlot(int, QProcess.ExitStatus)
def _on_proc_closed(self, _exitcode, exitstatus):
"""Write the editor text into the form field and clean up tempfile.
Callback for QProcess when the editor was closed.
"""
if sip.isdeleted(self): # pragma: no cover
log.procs.debug("Ignoring _on_proc_closed for deleted editor")
return
log.procs.debug("Editor closed")
if exitstatus != QProcess.NormalExit:
# No error/cleanup here, since we already handle this in
# on_proc_error.
return
# do a final read to make sure we don't miss the last signal
self._on_file_changed(self._filename)
self.editing_finished.emit()
self._cleanup()
@pyqtSlot(QProcess.ProcessError)
def _on_proc_error(self, _err):
self._cleanup()
def edit(self, text, caret_position=None):
"""Edit a given text.
Args:
text: The initial text to edit.
caret_position: The position of the caret in the text.
"""
if self._filename is not None:
raise ValueError("Already editing a file!")
try:
self._filename = self._create_tempfile(text, 'qutebrowser-editor-')
except OSError as e:
message.error("Failed to create initial file: {}".format(e))
return
self._remove_file = True
line, column = self._calc_line_and_column(text, caret_position)
self._start_editor(line=line, column=column)
def backup(self):
"""Create a backup if the content has changed from the original."""
if not self._content:
return
try:
fname = self._create_tempfile(self._content,
'qutebrowser-editor-backup-')
message.info('Editor backup at {}'.format(fname))
except OSError as e:
message.error('Failed to create editor backup: {}'.format(e))
def _create_tempfile(self, text, prefix):
# Close while the external process is running, as otherwise systems
# with exclusive write access (e.g. Windows) may fail to update
# the file from the external editor, see
# https://github.com/qutebrowser/qutebrowser/issues/1767
with tempfile.NamedTemporaryFile(
mode='w', prefix=prefix,
encoding=config.val.editor.encoding,
delete=False) as fobj:
if text:
fobj.write(text)
return fobj.name
@pyqtSlot(str)
def _on_file_changed(self, path):
try:
with open(path, 'r', encoding=config.val.editor.encoding) as f:
text = f.read()
except OSError as e:
# NOTE: Do not replace this with "raise CommandError" as it's
# executed async.
message.error("Failed to read back edited file: {}".format(e))
return
log.procs.debug("Read back: {}".format(text))
if self._content != text:
self._content = text
self.file_updated.emit(text)
def edit_file(self, filename):
"""Edit the file with the given filename."""
if not os.path.exists(filename):
with open(filename, 'w', encoding='utf-8'):
pass
self._filename = filename
self._remove_file = False
self._start_editor()
def _start_editor(self, line=1, column=1):
"""Start the editor with the file opened as self._filename.
Args:
line: the line number to pass to the editor
column: the column number to pass to the editor
"""
self._proc = guiprocess.GUIProcess(what='editor', parent=self)
self._proc.finished.connect(self._on_proc_closed)
self._proc.error.connect(self._on_proc_error)
editor = config.val.editor.command
executable = editor[0]
if self._watcher:
assert self._filename is not None
ok = self._watcher.addPath(self._filename)
if not ok:
log.procs.error("Failed to watch path: {}"
.format(self._filename))
self._watcher.fileChanged.connect( # type: ignore[attr-defined]
self._on_file_changed)
args = [self._sub_placeholder(arg, line, column) for arg in editor[1:]]
log.procs.debug("Calling \"{}\" with args {}".format(executable, args))
self._proc.start(executable, args)
def _calc_line_and_column(self, text, caret_position):
r"""Calculate line and column numbers given a text and caret position.
Both line and column are 1-based indexes, because that's what most
editors use as line and column starting index. By "most" we mean at
least vim, nvim, gvim, emacs, atom, sublimetext, notepad++, brackets,
visual studio, QtCreator and so on.
To find the line we just count how many newlines there are before the
caret and add 1.
To find the column we calculate the difference between the caret and
the last newline before the caret.
For example in the text `aaa\nbb|bbb` (| represents the caret):
caret_position = 6
text[:caret_position] = `aaa\nbb`
text[:caret_position].count('\n') = 1
caret_position - text[:caret_position].rfind('\n') = 3
Thus line, column = 2, 3, and the caret is indeed in the second
line, third column
Args:
text: the text for which the numbers must be calculated
caret_position: the position of the caret in the text, or None
Return:
A (line, column) tuple of (int, int)
"""
if caret_position is None:
return 1, 1
line = text[:caret_position].count('\n') + 1
column = caret_position - text[:caret_position].rfind('\n')
return line, column
def _sub_placeholder(self, arg, line, column):
"""Substitute a single placeholder.
If the `arg` input to this function is a valid placeholder it will
be substituted with the appropriate value, otherwise it will be left
unchanged.
Args:
arg: an argument of editor.command.
line: the previously-calculated line number for the text caret.
column: the previously-calculated column number for the text caret.
Return:
The substituted placeholder or the original argument.
"""
replacements = {
'{}': self._filename,
'{file}': self._filename,
'{line}': str(line),
'{line0}': str(line-1),
'{column}': str(column),
'{column0}': str(column-1)
}
for old, new in replacements.items():
arg = arg.replace(old, new)
return arg
|
import logging
import re
import serial
import voluptuous as vol
from homeassistant.components.switch import PLATFORM_SCHEMA, SwitchEntity
from homeassistant.const import (
CONF_FILENAME,
CONF_NAME,
STATE_OFF,
STATE_ON,
STATE_UNKNOWN,
)
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
CONF_TIMEOUT = "timeout"
CONF_WRITE_TIMEOUT = "write_timeout"
DEFAULT_NAME = "Acer Projector"
DEFAULT_TIMEOUT = 1
DEFAULT_WRITE_TIMEOUT = 1
ECO_MODE = "ECO Mode"
ICON = "mdi:projector"
INPUT_SOURCE = "Input Source"
LAMP = "Lamp"
LAMP_HOURS = "Lamp Hours"
MODEL = "Model"
# Commands known to the projector
CMD_DICT = {
LAMP: "* 0 Lamp ?\r",
LAMP_HOURS: "* 0 Lamp\r",
INPUT_SOURCE: "* 0 Src ?\r",
ECO_MODE: "* 0 IR 052\r",
MODEL: "* 0 IR 035\r",
STATE_ON: "* 0 IR 001\r",
STATE_OFF: "* 0 IR 002\r",
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_FILENAME): cv.isdevice,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): cv.positive_int,
vol.Optional(
CONF_WRITE_TIMEOUT, default=DEFAULT_WRITE_TIMEOUT
): cv.positive_int,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Connect with serial port and return Acer Projector."""
serial_port = config[CONF_FILENAME]
name = config[CONF_NAME]
timeout = config[CONF_TIMEOUT]
write_timeout = config[CONF_WRITE_TIMEOUT]
add_entities([AcerSwitch(serial_port, name, timeout, write_timeout)], True)
class AcerSwitch(SwitchEntity):
"""Represents an Acer Projector as a switch."""
def __init__(self, serial_port, name, timeout, write_timeout, **kwargs):
"""Init of the Acer projector."""
self.ser = serial.Serial(
port=serial_port, timeout=timeout, write_timeout=write_timeout, **kwargs
)
self._serial_port = serial_port
self._name = name
self._state = False
self._available = False
self._attributes = {
LAMP_HOURS: STATE_UNKNOWN,
INPUT_SOURCE: STATE_UNKNOWN,
ECO_MODE: STATE_UNKNOWN,
}
def _write_read(self, msg):
"""Write to the projector and read the return."""
ret = ""
# Sometimes the projector won't answer for no reason or the projector
# was disconnected during runtime.
# This way the projector can be reconnected and will still work
try:
if not self.ser.is_open:
self.ser.open()
msg = msg.encode("utf-8")
self.ser.write(msg)
# Size is an experience value there is no real limit.
# AFAIK there is no limit and no end character so we will usually
# need to wait for timeout
ret = self.ser.read_until(size=20).decode("utf-8")
except serial.SerialException:
_LOGGER.error("Problem communicating with %s", self._serial_port)
self.ser.close()
return ret
def _write_read_format(self, msg):
"""Write msg, obtain answer and format output."""
# answers are formatted as ***\answer\r***
awns = self._write_read(msg)
match = re.search(r"\r(.+)\r", awns)
if match:
return match.group(1)
return STATE_UNKNOWN
@property
def available(self):
"""Return if projector is available."""
return self._available
@property
def name(self):
"""Return name of the projector."""
return self._name
@property
def is_on(self):
"""Return if the projector is turned on."""
return self._state
@property
def state_attributes(self):
"""Return state attributes."""
return self._attributes
def update(self):
"""Get the latest state from the projector."""
msg = CMD_DICT[LAMP]
awns = self._write_read_format(msg)
if awns == "Lamp 1":
self._state = True
self._available = True
elif awns == "Lamp 0":
self._state = False
self._available = True
else:
self._available = False
for key in self._attributes:
msg = CMD_DICT.get(key)
if msg:
awns = self._write_read_format(msg)
self._attributes[key] = awns
def turn_on(self, **kwargs):
"""Turn the projector on."""
msg = CMD_DICT[STATE_ON]
self._write_read(msg)
self._state = STATE_ON
def turn_off(self, **kwargs):
"""Turn the projector off."""
msg = CMD_DICT[STATE_OFF]
self._write_read(msg)
self._state = STATE_OFF
|
import shutil
import tempfile
from radicale import Application, config
from radicale.tests import BaseTest
class TestBaseWebRequests(BaseTest):
"""Test web plugin."""
def setup(self):
self.configuration = config.load()
self.colpath = tempfile.mkdtemp()
self.configuration.update({
"storage": {"filesystem_folder": self.colpath,
# Disable syncing to disk for better performance
"_filesystem_fsync": "False"}},
"test", privileged=True)
self.application = Application(self.configuration)
def teardown(self):
shutil.rmtree(self.colpath)
def test_internal(self):
status, headers, _ = self.request("GET", "/.web")
assert status == 302
assert headers.get("Location") == ".web/"
_, answer = self.get("/.web/")
assert answer
self.post("/.web", check=405)
def test_none(self):
self.configuration.update({"web": {"type": "none"}}, "test")
self.application = Application(self.configuration)
_, answer = self.get("/.web")
assert answer
self.get("/.web/", check=404)
self.post("/.web", check=405)
def test_custom(self):
"""Custom web plugin."""
self.configuration.update({
"web": {"type": "radicale.tests.custom.web"}}, "test")
self.application = Application(self.configuration)
_, answer = self.get("/.web")
assert answer == "custom"
_, answer = self.post("/.web", "body content")
assert answer == "echo:body content"
|
from django.conf import settings
from django.contrib.syndication.views import Feed
from django.shortcuts import get_object_or_404
from django.urls import reverse
from django.utils.translation import gettext as _
from weblate.lang.models import Language
from weblate.trans.models import Change
from weblate.utils.views import get_component, get_project, get_translation
class ChangesFeed(Feed):
"""Generic RSS feed for Weblate changes."""
def get_object(self, request, *args, **kwargs):
return request.user
def title(self):
return _("Recent changes in %s") % settings.SITE_TITLE
def description(self):
return _("All recent changes made using Weblate in %s.") % (settings.SITE_TITLE)
def link(self):
return reverse("home")
def items(self, obj):
return Change.objects.last_changes(obj)[:10]
def item_title(self, item):
return item.get_action_display()
def item_description(self, item):
return str(item)
def item_author_name(self, item):
return item.get_user_display(False)
def item_pubdate(self, item):
return item.timestamp
class TranslationChangesFeed(ChangesFeed):
"""RSS feed for changes in translation."""
# Arguments number differs from overridden method
# pylint: disable=arguments-differ
def get_object(self, request, project, component, lang):
return get_translation(request, project, component, lang)
def title(self, obj):
return _("Recent changes in %s") % obj
def description(self, obj):
return _("All recent changes made using Weblate in %s.") % obj
def link(self, obj):
return obj.get_absolute_url()
def items(self, obj):
return Change.objects.prefetch().filter(translation=obj).order()[:10]
class ComponentChangesFeed(TranslationChangesFeed):
"""RSS feed for changes in component."""
# Arguments number differs from overridden method
# pylint: disable=arguments-differ
def get_object(self, request, project, component):
return get_component(request, project, component)
def items(self, obj):
return Change.objects.prefetch().filter(component=obj).order()[:10]
class ProjectChangesFeed(TranslationChangesFeed):
"""RSS feed for changes in project."""
# Arguments number differs from overridden method
# pylint: disable=arguments-differ
def get_object(self, request, project):
return get_project(request, project)
def items(self, obj):
return Change.objects.prefetch().filter(project=obj).order()[:10]
class LanguageChangesFeed(TranslationChangesFeed):
"""RSS feed for changes in language."""
# Arguments number differs from overridden method
# pylint: disable=arguments-differ
def get_object(self, request, lang):
return get_object_or_404(Language, code=lang)
def items(self, obj):
return Change.objects.prefetch().filter(language=obj).order()[:10]
|
from datetime import timedelta
import logging
import threading
import time
import mychevy.mychevy as mc
import voluptuous as vol
from homeassistant.const import CONF_PASSWORD, CONF_USERNAME
from homeassistant.helpers import config_validation as cv, discovery
from homeassistant.util import Throttle
DOMAIN = "mychevy"
UPDATE_TOPIC = DOMAIN
ERROR_TOPIC = f"{DOMAIN}_error"
MYCHEVY_SUCCESS = "success"
MYCHEVY_ERROR = "error"
NOTIFICATION_ID = "mychevy_website_notification"
NOTIFICATION_TITLE = "MyChevy website status"
_LOGGER = logging.getLogger(__name__)
MIN_TIME_BETWEEN_UPDATES = timedelta(minutes=30)
ERROR_SLEEP_TIME = timedelta(minutes=30)
CONF_COUNTRY = "country"
DEFAULT_COUNTRY = "us"
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_COUNTRY, default=DEFAULT_COUNTRY): vol.All(
cv.string, vol.In(["us", "ca"])
),
}
)
},
extra=vol.ALLOW_EXTRA,
)
class EVSensorConfig:
"""The EV sensor configuration."""
def __init__(
self, name, attr, unit_of_measurement=None, icon=None, extra_attrs=None
):
"""Create new sensor configuration."""
self.name = name
self.attr = attr
self.extra_attrs = extra_attrs or []
self.unit_of_measurement = unit_of_measurement
self.icon = icon
class EVBinarySensorConfig:
"""The EV binary sensor configuration."""
def __init__(self, name, attr, device_class=None):
"""Create new binary sensor configuration."""
self.name = name
self.attr = attr
self.device_class = device_class
def setup(hass, base_config):
"""Set up the mychevy component."""
config = base_config.get(DOMAIN)
email = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
country = config.get(CONF_COUNTRY)
hass.data[DOMAIN] = MyChevyHub(
mc.MyChevy(email, password, country), hass, base_config
)
hass.data[DOMAIN].start()
return True
class MyChevyHub(threading.Thread):
"""MyChevy Hub.
Connecting to the mychevy website is done through a selenium
webscraping process. That can only run synchronously. In order to
prevent blocking of other parts of Home Assistant the architecture
launches a polling loop in a thread.
When new data is received, sensors are updated, and hass is
signaled that there are updates. Sensors are not created until the
first update, which will be 60 - 120 seconds after the platform
starts.
"""
def __init__(self, client, hass, hass_config):
"""Initialize MyChevy Hub."""
super().__init__()
self._client = client
self.hass = hass
self.hass_config = hass_config
self.cars = []
self.status = None
self.ready = False
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Update sensors from mychevy website.
This is a synchronous polling call that takes a very long time
(like 2 to 3 minutes long time)
"""
self._client.login()
self._client.get_cars()
self.cars = self._client.cars
if self.ready is not True:
discovery.load_platform(self.hass, "sensor", DOMAIN, {}, self.hass_config)
discovery.load_platform(
self.hass, "binary_sensor", DOMAIN, {}, self.hass_config
)
self.ready = True
self.cars = self._client.update_cars()
def get_car(self, vid):
"""Compatibility to work with one car."""
if self.cars:
for car in self.cars:
if car.vid == vid:
return car
return None
def run(self):
"""Thread run loop."""
# We add the status device first outside of the loop
# And then busy wait on threads
while True:
try:
_LOGGER.info("Starting mychevy loop")
self.update()
self.hass.helpers.dispatcher.dispatcher_send(UPDATE_TOPIC)
time.sleep(MIN_TIME_BETWEEN_UPDATES.seconds)
except Exception: # pylint: disable=broad-except
_LOGGER.exception(
"Error updating mychevy data. "
"This probably means the OnStar link is down again"
)
self.hass.helpers.dispatcher.dispatcher_send(ERROR_TOPIC)
time.sleep(ERROR_SLEEP_TIME.seconds)
|
import time
import atexit
import heapq
from threading import Thread
from plumbum.lib import IS_WIN32, six
try:
from queue import Queue, Empty as QueueEmpty
except ImportError:
from Queue import Queue, Empty as QueueEmpty # type: ignore
try:
from io import StringIO
except ImportError:
from cStringIO import StringIO # type: ignore
#===================================================================================================
# utility functions
#===================================================================================================
def _check_process(proc, retcode, timeout, stdout, stderr):
proc.verify(retcode, timeout, stdout, stderr)
return proc.returncode, stdout, stderr
def _iter_lines_posix(proc, decode, linesize, line_timeout=None):
try:
from selectors import DefaultSelector, EVENT_READ
except ImportError:
# Pre Python 3.4 implementation
from select import select
def selector():
while True:
rlist, _, _ = select([proc.stdout, proc.stderr], [], [], line_timeout)
if not rlist and line_timeout:
raise ProcessLineTimedOut("popen line timeout expired", getattr(proc, "argv", None), getattr(proc, "machine", None))
for stream in rlist:
yield (stream is proc.stderr), decode(
stream.readline(linesize))
else:
# Python 3.4 implementation
def selector():
sel = DefaultSelector()
sel.register(proc.stdout, EVENT_READ, 0)
sel.register(proc.stderr, EVENT_READ, 1)
while True:
ready = sel.select(line_timeout)
if not ready and line_timeout:
raise ProcessLineTimedOut("popen line timeout expired", getattr(proc, "argv", None), getattr(proc, "machine", None))
for key, mask in ready:
yield key.data, decode(key.fileobj.readline(linesize))
for ret in selector():
yield ret
if proc.poll() is not None:
break
for line in proc.stdout:
yield 0, decode(line)
for line in proc.stderr:
yield 1, decode(line)
def _iter_lines_win32(proc, decode, linesize, line_timeout=None):
class Piper(Thread):
def __init__(self, fd, pipe):
super().__init__(name="PlumbumPiper%sThread" % fd)
self.pipe = pipe
self.fd = fd
self.empty = False
self.daemon = True
super().start()
def read_from_pipe(self):
return self.pipe.readline(linesize)
def run(self):
for line in iter(self.read_from_pipe, b''):
queue.put((self.fd, decode(line)))
# self.pipe.close()
if line_timeout is None:
line_timeout = float("inf")
queue = Queue()
pipers = [Piper(0, proc.stdout), Piper(1, proc.stderr)]
last_line_ts = time.time()
empty = True
while True:
try:
yield queue.get_nowait()
last_line_ts = time.time()
empty = False
except QueueEmpty:
empty = True
if time.time() - last_line_ts > line_timeout:
raise ProcessLineTimedOut("popen line timeout expired", getattr(proc, "argv", None), getattr(proc, "machine", None))
if proc.poll() is not None:
break
if empty:
time.sleep(0.1)
for piper in pipers:
piper.join()
while True:
try:
yield queue.get_nowait()
except QueueEmpty:
break
if IS_WIN32:
_iter_lines = _iter_lines_win32
else:
_iter_lines = _iter_lines_posix
#===================================================================================================
# Exceptions
#===================================================================================================
class ProcessExecutionError(EnvironmentError):
"""Represents the failure of a process. When the exit code of a terminated process does not
match the expected result, this exception is raised by :func:`run_proc
<plumbum.commands.run_proc>`. It contains the process' return code, stdout, and stderr, as
well as the command line used to create the process (``argv``)
"""
def __init__(self, argv, retcode, stdout, stderr, message=None):
Exception.__init__(self, argv, retcode, stdout, stderr)
self.message = message
self.argv = argv
self.retcode = retcode
if six.PY3 and isinstance(stdout, six.bytes):
stdout = six.ascii(stdout)
if six.PY3 and isinstance(stderr, six.bytes):
stderr = six.ascii(stderr)
self.stdout = stdout
self.stderr = stderr
def __str__(self):
# avoid an import cycle
from plumbum.commands.base import shquote_list
stdout = "\n | ".join(str(self.stdout).splitlines())
stderr = "\n | ".join(str(self.stderr).splitlines())
cmd = " ".join(shquote_list(self.argv))
lines = []
if self.message:
lines = [
self.message,
"\nReturn code: | ", str(self.retcode)]
else:
lines = ["Unexpected exit code: ", str(self.retcode)]
cmd = "\n | ".join(cmd.splitlines())
lines += ["\nCommand line: | ", cmd]
if stdout:
lines += ["\nStdout: | ", stdout]
if stderr:
lines += ["\nStderr: | ", stderr]
return "".join(lines)
class ProcessTimedOut(Exception):
"""Raises by :func:`run_proc <plumbum.commands.run_proc>` when a ``timeout`` has been
specified and it has elapsed before the process terminated"""
def __init__(self, msg, argv):
Exception.__init__(self, msg, argv)
self.argv = argv
class ProcessLineTimedOut(Exception):
"""Raises by :func:`iter_lines <plumbum.commands.iter_lines>` when a ``line_timeout`` has been
specified and it has elapsed before the process yielded another line"""
def __init__(self, msg, argv, machine):
Exception.__init__(self, msg, argv, machine)
self.argv = argv
self.machine = machine
class CommandNotFound(AttributeError):
"""Raised by :func:`local.which <plumbum.machines.local.LocalMachine.which>` and
:func:`RemoteMachine.which <plumbum.machines.remote.RemoteMachine.which>` when a
command was not found in the system's ``PATH``"""
def __init__(self, program, path):
Exception.__init__(self, program, path)
self.program = program
self.path = path
#===================================================================================================
# Timeout thread
#===================================================================================================
class MinHeap(object):
def __init__(self, items=()):
self._items = list(items)
heapq.heapify(self._items)
def __len__(self):
return len(self._items)
def push(self, item):
heapq.heappush(self._items, item)
def pop(self):
heapq.heappop(self._items)
def peek(self):
return self._items[0]
_timeout_queue = Queue()
_shutting_down = False
def _timeout_thread_func():
waiting = MinHeap()
try:
while not _shutting_down:
if waiting:
ttk, _ = waiting.peek()
timeout = max(0, ttk - time.time())
else:
timeout = None
try:
proc, time_to_kill = _timeout_queue.get(timeout=timeout)
if proc is SystemExit:
# terminate
return
waiting.push((time_to_kill, proc))
except QueueEmpty:
pass
now = time.time()
while waiting:
ttk, proc = waiting.peek()
if ttk > now:
break
waiting.pop()
try:
if proc.poll() is None:
proc.kill()
proc._timed_out = True
except EnvironmentError:
pass
except Exception:
if _shutting_down:
# to prevent all sorts of exceptions during interpreter shutdown
pass
else:
raise
bgthd = Thread(target=_timeout_thread_func, name="PlumbumTimeoutThread")
bgthd.setDaemon(True)
bgthd.start()
def _register_proc_timeout(proc, timeout):
if timeout is not None:
_timeout_queue.put((proc, time.time() + timeout))
def _shutdown_bg_threads():
global _shutting_down
_shutting_down = True
# Make sure this still exists (don't throw error in atexit!)
if _timeout_queue:
_timeout_queue.put((SystemExit, 0))
# grace period
bgthd.join(0.1)
atexit.register(_shutdown_bg_threads)
#===================================================================================================
# run_proc
#===================================================================================================
def run_proc(proc, retcode, timeout=None):
"""Waits for the given process to terminate, with the expected exit code
:param proc: a running Popen-like object, with all the expected methods.
:param retcode: the expected return (exit) code of the process. It defaults to 0 (the
convention for success). If ``None``, the return code is ignored.
It may also be a tuple (or any object that supports ``__contains__``)
of expected return codes.
:param timeout: the number of seconds (a ``float``) to allow the process to run, before
forcefully terminating it. If ``None``, not timeout is imposed; otherwise
the process is expected to terminate within that timeout value, or it will
be killed and :class:`ProcessTimedOut <plumbum.cli.ProcessTimedOut>`
will be raised
:returns: A tuple of (return code, stdout, stderr)
"""
_register_proc_timeout(proc, timeout)
stdout, stderr = proc.communicate()
proc._end_time = time.time()
if not stdout:
stdout = six.b("")
if not stderr:
stderr = six.b("")
if getattr(proc, "custom_encoding", None):
stdout = stdout.decode(proc.custom_encoding, "ignore")
stderr = stderr.decode(proc.custom_encoding, "ignore")
return _check_process(proc, retcode, timeout, stdout, stderr)
#===================================================================================================
# iter_lines
#===================================================================================================
BY_POSITION = object()
BY_TYPE = object()
DEFAULT_ITER_LINES_MODE = BY_POSITION
def iter_lines(proc,
retcode=0,
timeout=None,
linesize=-1,
line_timeout=None,
mode=None,
_iter_lines=_iter_lines,
):
"""Runs the given process (equivalent to run_proc()) and yields a tuples of (out, err) line pairs.
If the exit code of the process does not match the expected one, :class:`ProcessExecutionError
<plumbum.commands.ProcessExecutionError>` is raised.
:param retcode: The expected return code of this process (defaults to 0).
In order to disable exit-code validation, pass ``None``. It may also
be a tuple (or any iterable) of expected exit codes.
:param timeout: The maximal amount of time (in seconds) to allow the process to run.
``None`` means no timeout is imposed; otherwise, if the process hasn't
terminated after that many seconds, the process will be forcefully
terminated an exception will be raised
:param linesize: Maximum number of characters to read from stdout/stderr at each iteration.
``-1`` (default) reads until a b'\\n' is encountered.
:param line_timeout: The maximal amount of time (in seconds) to allow between consecutive lines in either stream.
Raise an :class:`ProcessLineTimedOut <plumbum.commands.ProcessLineTimedOut>` if the timeout has
been reached. ``None`` means no timeout is imposed.
:returns: An iterator of (out, err) line tuples.
"""
if mode is None:
mode = DEFAULT_ITER_LINES_MODE
assert mode in (BY_POSITION, BY_TYPE)
encoding = getattr(proc, "custom_encoding", None)
if encoding:
decode = lambda s: s.decode(encoding).rstrip()
else:
decode = lambda s: s
_register_proc_timeout(proc, timeout)
buffers = [StringIO(), StringIO()]
for t, line in _iter_lines(proc, decode, linesize, line_timeout):
# verify that the proc hasn't timed out yet
proc.verify(timeout=timeout, retcode=None, stdout=None, stderr=None)
buffers[t].write(line + "\n")
if mode is BY_POSITION:
ret = [None, None]
ret[t] = line
yield tuple(ret)
elif mode is BY_TYPE:
yield (t + 1), line # 1=stdout, 2=stderr
# this will take care of checking return code and timeouts
_check_process(proc, retcode, timeout, *(s.getvalue() for s in buffers))
|
import io
import json
import logging
import os
import re
from babelfish import Language, language_converters
from datetime import datetime, timedelta
from dogpile.cache.api import NO_VALUE
from guessit import guessit
import pytz
import rarfile
from rarfile import RarFile, is_rarfile
from rebulk.loose import ensure_list
from requests import Session
from zipfile import ZipFile, is_zipfile
from . import ParserBeautifulSoup, Provider
from ..cache import SHOW_EXPIRATION_TIME, region
from ..exceptions import AuthenticationError, ConfigurationError, ProviderError, ServiceUnavailable
from ..matches import guess_matches
from ..subtitle import SUBTITLE_EXTENSIONS, Subtitle, fix_line_ending
from ..utils import sanitize
from ..video import Episode, Movie
logger = logging.getLogger(__name__)
language_converters.register('legendastv = subliminal.converters.legendastv:LegendasTVConverter')
# Configure :mod:`rarfile` to use the same path separator as :mod:`zipfile`
rarfile.PATH_SEP = '/'
#: Conversion map for types
type_map = {'M': 'movie', 'S': 'episode', 'C': 'episode'}
#: BR title season parsing regex
season_re = re.compile(r' - (?P<season>\d+)(\xaa|a|st|nd|rd|th) (temporada|season)', re.IGNORECASE)
#: Downloads parsing regex
downloads_re = re.compile(r'(?P<downloads>\d+) downloads')
#: Rating parsing regex
rating_re = re.compile(r'nota (?P<rating>\d+)')
#: Timestamp parsing regex
timestamp_re = re.compile(r'(?P<day>\d+)/(?P<month>\d+)/(?P<year>\d+) - (?P<hour>\d+):(?P<minute>\d+)')
#: Title with year/country regex
title_re = re.compile(r'^(?P<series>.*?)(?: \((?:(?P<year>\d{4})|(?P<country>[A-Z]{2}))\))?$')
#: Cache key for releases
releases_key = __name__ + ':releases|{archive_id}|{archive_name}'
class LegendasTVArchive(object):
"""LegendasTV Archive.
:param str id: identifier.
:param str name: name.
:param bool pack: contains subtitles for multiple episodes.
:param bool pack: featured.
:param str link: link.
:param int downloads: download count.
:param int rating: rating (0-10).
:param timestamp: timestamp.
:type timestamp: datetime.datetime
"""
def __init__(self, id, name, pack, featured, link, downloads=0, rating=0, timestamp=None):
#: Identifier
self.id = id
#: Name
self.name = name
#: Pack
self.pack = pack
#: Featured
self.featured = featured
#: Link
self.link = link
#: Download count
self.downloads = downloads
#: Rating (0-10)
self.rating = rating
#: Timestamp
self.timestamp = timestamp
#: Compressed content as :class:`rarfile.RarFile` or :class:`zipfile.ZipFile`
self.content = None
def __repr__(self):
return '<%s [%s] %r>' % (self.__class__.__name__, self.id, self.name)
class LegendasTVSubtitle(Subtitle):
"""LegendasTV Subtitle."""
provider_name = 'legendastv'
def __init__(self, language, type, title, year, imdb_id, season, archive, name):
super(LegendasTVSubtitle, self).__init__(language, page_link=archive.link)
self.type = type
self.title = title
self.year = year
self.imdb_id = imdb_id
self.season = season
self.archive = archive
self.name = name
@property
def id(self):
return '%s-%s' % (self.archive.id, self.name.lower())
@property
def info(self):
return self.name
def get_matches(self, video, hearing_impaired=False):
matches = guess_matches(video, {
'title': self.title,
'year': self.year
})
# episode
if isinstance(video, Episode) and self.type == 'episode':
# imdb_id
if video.series_imdb_id and self.imdb_id == video.series_imdb_id:
matches.add('series_imdb_id')
# movie
elif isinstance(video, Movie) and self.type == 'movie':
# imdb_id
if video.imdb_id and self.imdb_id == video.imdb_id:
matches.add('imdb_id')
# name
matches |= guess_matches(video, guessit(self.name, {'type': self.type}))
return matches
class LegendasTVProvider(Provider):
"""LegendasTV Provider.
:param str username: username.
:param str password: password.
"""
languages = {Language.fromlegendastv(l) for l in language_converters['legendastv'].codes}
server_url = 'http://legendas.tv/'
subtitle_class = LegendasTVSubtitle
def __init__(self, username=None, password=None):
# Provider needs UNRAR installed. If not available raise ConfigurationError
try:
rarfile.custom_check([rarfile.UNRAR_TOOL], True)
except rarfile.RarExecError:
raise ConfigurationError('UNRAR tool not available')
if any((username, password)) and not all((username, password)):
raise ConfigurationError('Username and password must be specified')
self.username = username
self.password = password
self.logged_in = False
self.session = None
def initialize(self):
self.session = Session()
self.session.headers['User-Agent'] = self.user_agent
# login
if self.username and self.password:
logger.info('Logging in')
data = {'_method': 'POST', 'data[User][username]': self.username, 'data[User][password]': self.password}
r = self.session.post(self.server_url + 'login', data, allow_redirects=False, timeout=10)
raise_for_status(r)
soup = ParserBeautifulSoup(r.content, ['html.parser'])
if soup.find('div', {'class': 'alert-error'}, string=re.compile(u'Usuário ou senha inválidos')):
raise AuthenticationError(self.username)
logger.debug('Logged in')
self.logged_in = True
def terminate(self):
# logout
if self.logged_in:
logger.info('Logging out')
r = self.session.get(self.server_url + 'users/logout', allow_redirects=False, timeout=10)
raise_for_status(r)
logger.debug('Logged out')
self.logged_in = False
self.session.close()
@staticmethod
def is_valid_title(title, title_id, sanitized_title, season, year):
"""Check if is a valid title."""
sanitized_result = sanitize(title['title'])
if sanitized_result != sanitized_title:
logger.debug("Mismatched title, discarding title %d (%s)",
title_id, sanitized_result)
return
# episode type
if season:
# discard mismatches on type
if title['type'] != 'episode':
logger.debug("Mismatched 'episode' type, discarding title %d (%s)", title_id, sanitized_result)
return
# discard mismatches on season
if 'season' not in title or title['season'] != season:
logger.debug('Mismatched season %s, discarding title %d (%s)',
title.get('season'), title_id, sanitized_result)
return
# movie type
else:
# discard mismatches on type
if title['type'] != 'movie':
logger.debug("Mismatched 'movie' type, discarding title %d (%s)", title_id, sanitized_result)
return
# discard mismatches on year
if year is not None and 'year' in title and title['year'] != year:
logger.debug("Mismatched movie year, discarding title %d (%s)", title_id, sanitized_result)
return
return True
@region.cache_on_arguments(expiration_time=SHOW_EXPIRATION_TIME, should_cache_fn=lambda value: value)
def search_titles(self, title, season, title_year):
"""Search for titles matching the `title`.
For episodes, each season has it own title
:param str title: the title to search for.
:param int season: season of the title
:param int title_year: year of the title
:return: found titles.
:rtype: dict
"""
titles = {}
sanitized_titles = [sanitize(title)]
ignore_characters = {'\'', '.'}
if any(c in title for c in ignore_characters):
sanitized_titles.append(sanitize(title, ignore_characters=ignore_characters))
for sanitized_title in sanitized_titles:
# make the query
if season:
logger.info('Searching episode title %r for season %r', sanitized_title, season)
else:
logger.info('Searching movie title %r', sanitized_title)
r = self.session.get(self.server_url + 'legenda/sugestao/{}'.format(sanitized_title), timeout=10)
raise_for_status(r)
results = json.loads(r.text)
# loop over results
for result in results:
source = result['_source']
# extract id
title_id = int(source['id_filme'])
# extract type
title = {'type': type_map[source['tipo']]}
# extract title, year and country
name, year, country = title_re.match(source['dsc_nome']).groups()
title['title'] = name
# extract imdb_id
if source['id_imdb'] != '0':
if not source['id_imdb'].startswith('tt'):
title['imdb_id'] = 'tt' + source['id_imdb'].zfill(7)
else:
title['imdb_id'] = source['id_imdb']
# extract season
if title['type'] == 'episode':
if source['temporada'] and source['temporada'].isdigit():
title['season'] = int(source['temporada'])
else:
match = season_re.search(source['dsc_nome_br'])
if match:
title['season'] = int(match.group('season'))
else:
logger.debug('No season detected for title %d (%s)', title_id, name)
# extract year
if year:
title['year'] = int(year)
elif source['dsc_data_lancamento'] and source['dsc_data_lancamento'].isdigit():
# year is based on season air date hence the adjustment
title['year'] = int(source['dsc_data_lancamento']) - title.get('season', 1) + 1
# add title only if is valid
# Check against title without ignored chars
if self.is_valid_title(title, title_id, sanitized_titles[0], season, title_year):
titles[title_id] = title
logger.debug('Found %d titles', len(titles))
return titles
@region.cache_on_arguments(expiration_time=timedelta(minutes=15).total_seconds())
def get_archives(self, title_id, language_code, title_type, season, episodes):
"""Get the archive list from a given `title_id`, `language_code`, `title_type`, `season` and `episode`.
:param int title_id: title id.
:param int language_code: language code.
:param str title_type: episode or movie
:param int season: season
:param list episodes: episodes
:return: the archives.
:rtype: list of :class:`LegendasTVArchive`
"""
archives = []
page = 0
while True:
# get the archive page
url = self.server_url + 'legenda/busca/-/{language}/-/{page}/{title}'.format(
language=language_code, page=page, title=title_id)
r = self.session.get(url)
raise_for_status(r)
# parse the results
soup = ParserBeautifulSoup(r.content, ['lxml', 'html.parser'])
for archive_soup in soup.select('div.list_element > article > div > div.f_left'):
# create archive
archive = LegendasTVArchive(archive_soup.a['href'].split('/')[2],
archive_soup.a.text,
'pack' in archive_soup.parent['class'],
'destaque' in archive_soup.parent['class'],
self.server_url + archive_soup.a['href'][1:])
# clean name of path separators and pack flags
clean_name = archive.name.replace('/', '-')
if archive.pack and clean_name.startswith('(p)'):
clean_name = clean_name[3:]
# guess from name
guess = guessit(clean_name, {'type': title_type})
# episode
if season and episodes:
# discard mismatches on episode in non-pack archives
# Guessit may return int for single episode or list for multi-episode
# Check if archive name has multiple episodes releases on it
if not archive.pack and 'episode' in guess:
wanted_episode = set(episodes)
archive_episode = set(ensure_list(guess['episode']))
if not wanted_episode.intersection(archive_episode):
logger.debug('Mismatched episode %s, discarding archive: %s', guess['episode'], clean_name)
continue
# extract text containing downloads, rating and timestamp
data_text = archive_soup.find('p', class_='data').text
# match downloads
archive.downloads = int(downloads_re.search(data_text).group('downloads'))
# match rating
match = rating_re.search(data_text)
if match:
archive.rating = int(match.group('rating'))
# match timestamp and validate it
time_data = {k: int(v) for k, v in timestamp_re.search(data_text).groupdict().items()}
archive.timestamp = pytz.timezone('America/Sao_Paulo').localize(datetime(**time_data))
if archive.timestamp > datetime.utcnow().replace(tzinfo=pytz.utc):
raise ProviderError('Archive timestamp is in the future')
# add archive
logger.info('Found archive for title %d and language %d at page %s: %s',
title_id, language_code, page, archive)
archives.append(archive)
# stop on last page
if soup.find('a', attrs={'class': 'load_more'}, string='carregar mais') is None:
break
# increment page count
page += 1
logger.debug('Found %d archives', len(archives))
return archives
def download_archive(self, archive):
"""Download an archive's :attr:`~LegendasTVArchive.content`.
:param archive: the archive to download :attr:`~LegendasTVArchive.content` of.
:type archive: :class:`LegendasTVArchive`
"""
logger.info('Downloading archive %s', archive.id)
r = self.session.get(self.server_url + 'downloadarquivo/{}'.format(archive.id))
raise_for_status(r)
# open the archive
archive_stream = io.BytesIO(r.content)
if is_rarfile(archive_stream):
logger.debug('Identified rar archive')
archive.content = RarFile(archive_stream)
elif is_zipfile(archive_stream):
logger.debug('Identified zip archive')
archive.content = ZipFile(archive_stream)
else:
raise ValueError('Not a valid archive')
def query(self, language, title, season=None, episodes=None, year=None):
# search for titles
titles = self.search_titles(title, season, year)
subtitles = []
# iterate over titles
for title_id, t in titles.items():
logger.info('Getting archives for title %d and language %d', title_id, language.legendastv)
archives = self.get_archives(title_id, language.legendastv, t['type'], season, episodes or [])
if not archives:
logger.info('No archives found for title %d and language %d', title_id, language.legendastv)
# iterate over title's archives
for a in archives:
# compute an expiration time based on the archive timestamp
expiration_time = (datetime.utcnow().replace(tzinfo=pytz.utc) - a.timestamp).total_seconds()
# attempt to get the releases from the cache
cache_key = releases_key.format(archive_id=a.id, archive_name=a.name)
releases = region.get(cache_key, expiration_time=expiration_time)
# the releases are not in cache or cache is expired
if releases == NO_VALUE:
logger.info('Releases not found in cache')
# download archive
self.download_archive(a)
# extract the releases
releases = []
for name in a.content.namelist():
# discard the legendastv file
if name.startswith('Legendas.tv'):
continue
# discard hidden files
if os.path.split(name)[-1].startswith('.'):
continue
# discard non-subtitle files
if not name.lower().endswith(SUBTITLE_EXTENSIONS):
continue
releases.append(name)
# cache the releases
region.set(cache_key, releases)
# iterate over releases
for r in releases:
subtitle = self.subtitle_class(language, t['type'], t['title'], t.get('year'), t.get('imdb_id'),
t.get('season'), a, r)
logger.debug('Found subtitle %r', subtitle)
subtitles.append(subtitle)
return subtitles
def list_subtitles(self, video, languages):
season = None
episodes = []
if isinstance(video, Episode):
titles = [video.series] + video.alternative_series
season = video.season
episodes = video.episodes
else:
titles = [video.title] + video.alternative_titles
for title in titles:
subtitles = [s for l in languages for s in
self.query(l, title, season=season, episodes=episodes, year=video.year)]
if subtitles:
return subtitles
return []
def download_subtitle(self, subtitle):
# download archive in case we previously hit the releases cache and didn't download it
if subtitle.archive.content is None:
self.download_archive(subtitle.archive)
# extract subtitle's content
subtitle.content = fix_line_ending(subtitle.archive.content.read(subtitle.name))
def raise_for_status(r):
# When site is under maintaince and http status code 200.
if 'Em breve estaremos de volta' in r.text:
raise ServiceUnavailable
else:
r.raise_for_status()
|
revision = "ac483cfeb230"
down_revision = "b29e2c4bf8c9"
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
op.alter_column(
"certificates",
"name",
existing_type=sa.VARCHAR(length=128),
type_=sa.String(length=256),
)
def downgrade():
op.alter_column(
"certificates",
"name",
existing_type=sa.VARCHAR(length=256),
type_=sa.String(length=128),
)
|
import unittest
from homeassistant.components.command_line import binary_sensor as command_line
from homeassistant.const import STATE_OFF, STATE_ON
from homeassistant.helpers import template
from tests.common import get_test_home_assistant
class TestCommandSensorBinarySensor(unittest.TestCase):
"""Test the Command line Binary sensor."""
def setUp(self):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.addCleanup(self.hass.stop)
def test_setup(self):
"""Test sensor setup."""
config = {
"name": "Test",
"command": "echo 1",
"payload_on": "1",
"payload_off": "0",
"command_timeout": 15,
}
devices = []
def add_dev_callback(devs, update):
"""Add callback to add devices."""
for dev in devs:
devices.append(dev)
command_line.setup_platform(self.hass, config, add_dev_callback)
assert 1 == len(devices)
entity = devices[0]
entity.update()
assert "Test" == entity.name
assert STATE_ON == entity.state
def test_template(self):
"""Test setting the state with a template."""
data = command_line.CommandSensorData(self.hass, "echo 10", 15)
entity = command_line.CommandBinarySensor(
self.hass,
data,
"test",
None,
"1.0",
"0",
template.Template("{{ value | multiply(0.1) }}", self.hass),
)
entity.update()
assert STATE_ON == entity.state
def test_sensor_off(self):
"""Test setting the state with a template."""
data = command_line.CommandSensorData(self.hass, "echo 0", 15)
entity = command_line.CommandBinarySensor(
self.hass, data, "test", None, "1", "0", None
)
entity.update()
assert STATE_OFF == entity.state
|
import logging
import requests
from homeassistant.components.camera import Camera
from . import DOMAIN
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up access to BloomSky cameras."""
if discovery_info is not None:
return
bloomsky = hass.data[DOMAIN]
for device in bloomsky.devices.values():
add_entities([BloomSkyCamera(bloomsky, device)])
class BloomSkyCamera(Camera):
"""Representation of the images published from the BloomSky's camera."""
def __init__(self, bs, device):
"""Initialize access to the BloomSky camera images."""
super().__init__()
self._name = device["DeviceName"]
self._id = device["DeviceID"]
self._bloomsky = bs
self._url = ""
self._last_url = ""
# last_image will store images as they are downloaded so that the
# frequent updates in home-assistant don't keep poking the server
# to download the same image over and over.
self._last_image = ""
self._logger = logging.getLogger(__name__)
def camera_image(self):
"""Update the camera's image if it has changed."""
try:
self._url = self._bloomsky.devices[self._id]["Data"]["ImageURL"]
self._bloomsky.refresh_devices()
# If the URL hasn't changed then the image hasn't changed.
if self._url != self._last_url:
response = requests.get(self._url, timeout=10)
self._last_url = self._url
self._last_image = response.content
except requests.exceptions.RequestException as error:
self._logger.error("Error getting bloomsky image: %s", error)
return None
return self._last_image
@property
def unique_id(self):
"""Return a unique ID."""
return self._id
@property
def name(self):
"""Return the name of this BloomSky device."""
return self._name
|
import chainer
from chainer.backends import cuda
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
import numpy as np
import unittest
from chainercv import functions
from tests.functions_tests.test_ps_roi_average_pooling_2d import _outsize
@testing.parameterize(*testing.product({
'sampling_ratio': [(np.int(1), np.int(2)), None, 1, 2, (None, 3), (1, 2)],
'spatial_scale': [np.float(0.6), np.int(1), 0.6, 1.0, 2.0],
'outsize': [(np.int(2), np.int(4), np.int(4)), (2, 4, 4), (4, 4), 4],
}))
class TestPSROIAverageAlign2D(unittest.TestCase):
def setUp(self):
self.N = 3
self.group_size = 2
self.out_c, self.out_h, self.out_w = _outsize(self.outsize)
if self.out_c is None:
self.out_c = 2
self.n_channels = self.group_size * self.group_size * self.out_c
self.x = np.arange(
self.N * self.n_channels * 10 * 12,
dtype=np.float32).reshape((self.N, self.n_channels, 10, 12))
np.random.shuffle(self.x)
self.x = 2 * self.x / self.x.size - 1
self.x = self.x.astype(np.float32)
self.rois = np.array(
[[0, 0, 7, 7],
[1, 0, 5, 12],
[0, 1, 10, 5],
[3, 3, 4, 4]],
dtype=np.float32
)
self.roi_indices = np.array([0, 2, 1, 0], dtype=np.int32)
self.n_roi = self.rois.shape[0]
self.out_h, self.out_w = 4, 4
self.gy = np.random.uniform(
-1, 1, (self.n_roi, self.out_c, self.out_h, self.out_w))
self.gy = self.gy.astype(np.float32)
self.check_backward_options = {'atol': 5e-4, 'rtol': 5e-3}
def check_forward(self, x_data, roi_data, roi_index_data):
x = chainer.Variable(x_data)
rois = chainer.Variable(roi_data)
roi_indices = chainer.Variable(roi_index_data)
y = functions.ps_roi_average_align_2d(
x, rois, roi_indices, self.outsize,
self.spatial_scale, self.group_size,
sampling_ratio=self.sampling_ratio)
self.assertEqual(y.data.dtype, np.float32)
y_data = cuda.to_cpu(y.data)
self.assertEqual(
(self.n_roi, self.out_c, self.out_h, self.out_w), y_data.shape)
@condition.retry(3)
def test_forward_cpu(self):
self.check_forward(self.x, self.rois, self.roi_indices)
@attr.gpu
@condition.retry(3)
def test_forward_gpu(self):
self.check_forward(
cuda.to_gpu(self.x), cuda.to_gpu(self.rois),
cuda.to_gpu(self.roi_indices))
def check_backward(self, x_data, roi_data, roi_index_data, y_grad_data):
def f(x, rois, roi_indices):
return functions.ps_roi_average_align_2d(
x, rois, roi_indices, self.outsize,
self.spatial_scale, self.group_size,
sampling_ratio=self.sampling_ratio)
gradient_check.check_backward(
f, (x_data, roi_data, roi_index_data), y_grad_data,
no_grads=[False, True, True], **self.check_backward_options)
@condition.retry(3)
def test_backward_cpu(self):
self.check_backward(self.x, self.rois, self.roi_indices, self.gy)
@attr.gpu
@condition.retry(3)
def test_backward_gpu(self):
self.check_backward(
cuda.to_gpu(self.x), cuda.to_gpu(self.rois),
cuda.to_gpu(self.roi_indices), cuda.to_gpu(self.gy))
def apply_backward(self, x_data, roi_data, roi_index_data, y_grad_data):
x = chainer.Variable(x_data)
rois = chainer.Variable(roi_data)
roi_indices = chainer.Variable(roi_index_data)
y = functions.ps_roi_average_align_2d(
x, rois, roi_indices, self.outsize,
self.spatial_scale, self.group_size,
sampling_ratio=self.sampling_ratio)
x.cleargrad()
y.grad = y_grad_data
y.backward()
return x, y
@attr.gpu
@condition.retry(3)
def test_consistency_with_gpu(self):
x_cpu, y_cpu = self.apply_backward(
self.x, self.rois, self.roi_indices, self.gy)
x_gpu, y_gpu = self.apply_backward(
cuda.to_gpu(self.x), cuda.to_gpu(self.rois),
cuda.to_gpu(self.roi_indices), cuda.to_gpu(self.gy))
testing.assert_allclose(y_cpu.data, y_gpu.data)
testing.assert_allclose(x_cpu.grad, x_gpu.grad)
@testing.parameterize(*testing.product({
'outsize': [(2, 4, 4), (4, 4), 4]
}))
class TestPSROIAverageAlign2DFailure(unittest.TestCase):
def setUp(self):
self.N = 3
self.group_size = 2
self.spatial_scale = 0.6
out_c, _, _ = _outsize(self.outsize)
if out_c is None:
self.n_channels = self.group_size * self.group_size * 2 - 1
else:
self.n_channels = self.group_size * self.group_size * (out_c + 1)
self.x = np.arange(
self.N * self.n_channels * 10 * 12,
dtype=np.float32).reshape((self.N, self.n_channels, 10, 12))
np.random.shuffle(self.x)
self.x = 2 * self.x / self.x.size - 1
self.x = self.x.astype(np.float32)
self.rois = np.array(
[[0, 0, 7, 7],
[1, 0, 5, 12],
[0, 1, 10, 5],
[3, 3, 4, 4]],
dtype=np.float32
)
self.roi_indices = np.array([0, 2, 1, 0], dtype=np.int32)
self.n_roi = self.rois.shape[0]
def check_forward(self, x_data, roi_data, roi_index_data):
x = chainer.Variable(x_data)
rois = chainer.Variable(roi_data)
roi_indices = chainer.Variable(roi_index_data)
functions.ps_roi_average_align_2d(
x, rois, roi_indices, self.outsize,
self.spatial_scale, self.group_size)
@condition.retry(3)
def test_invalid_outsize_cpu(self):
with self.assertRaises(ValueError):
self.check_forward(self.x, self.rois, self.roi_indices)
@attr.gpu
@condition.retry(3)
def test_invalid_outsize_gpu(self):
with self.assertRaises(ValueError):
self.check_forward(
cuda.to_gpu(self.x), cuda.to_gpu(self.rois),
cuda.to_gpu(self.roi_indices))
testing.run_module(__name__, __file__)
|
import copy
import logging
import os
from perfkitbenchmarker import data
from perfkitbenchmarker import regex_util
from perfkitbenchmarker import sample
from perfkitbenchmarker.linux_packages import blaze
from perfkitbenchmarker.linux_packages import fortran
BLAZEMARK_FOLDER = 'blazemark'
BLAZEMARK_DIR = os.path.join(blaze.BLAZE_DIR, BLAZEMARK_FOLDER)
CONFIG_TEMPLATE = 'blazemark_config.j2'
CONFIG = 'config'
THROUGHPUT_HEADER_REGEX = (
r'(\w+[\w\- ]+\w+)\s*(\([0-9.]+% filled\))*\s*[\[\(]'
'([\w/]+)[\]\)]:([0-9\s.e\-+]+)')
THROUGHPUT_RESULT_REGEX = r'([0-9]+)\s*([0-9.e\-+]+)'
FILLED_REGEX = r'([0-9.]+)% filled'
LIBS = frozenset([
'C-like', 'Classic', 'Blaze', 'Boost uBLAS', 'Blitz++',
'GMM++', 'Armadillo', 'MTL', 'Eigen'])
BLAZEMARK_BINARIES = frozenset([
'cg', 'daxpy', 'dmatsvecmult', 'dvecdvecsub', 'mat3mat3mult',
'smatdmatmult', 'smattsmatadd', 'svectdvecmult', 'tdmattdmatmult',
'tmat3mat3mult', 'tsmatdmatmult', 'tsvecdmatmult', 'tvec6tmat6mult',
'complex1', 'dmatdmatadd', 'dmattdmatadd', 'dvecnorm', 'mat3tmat3mult',
'smatdvecmult', 'smattsmatmult', 'svectsvecmult', 'tdmattsmatadd',
'tmat3tmat3add', 'tsmatdvecmult', 'tsvecdvecmult', 'vec3vec3add',
'complex2', 'dmatdmatmult', 'dmattdmatmult', 'dvecscalarmult',
'mat3vec3mult', 'smatscalarmult', 'svecdvecadd', 'tdmatdmatadd',
'tdmattsmatmult', 'tmat3tmat3mult', 'tsmatsmatadd', 'tsvecsmatmult',
'vec6vec6add', 'complex3', 'dmatdmatsub', 'dmattrans', 'dvecsvecadd',
'mat6mat6add', 'smatsmatadd', 'svecdveccross', 'tdmatdmatmult',
'tdvecdmatmult', 'tmat3vec3mult', 'tsmatsmatmult', 'tsvecsvecmult',
'complex4', 'dmatdvecmult', 'dmattsmatadd', 'dvecsveccross', 'mat6mat6mult',
'smatsmatmult', 'svecdvecmult', 'tdmatdvecmult', 'tdvecdvecmult',
'tmat6mat6mult', 'tsmatsvecmult', 'tsvectdmatmult', 'complex5', 'dmatinv',
'dmattsmatmult', 'dvecsvecmult', 'mat6tmat6mult', 'smatsvecmult',
'svecscalarmult', 'tdmatsmatadd', 'tdvecsmatmult', 'tmat6tmat6add',
'tsmattdmatadd', 'tsvectsmatmult', 'complex6', 'dmatscalarmult',
'dvecdvecadd', 'dvectdvecmult', 'mat6vec6mult', 'smattdmatadd',
'svecsvecadd', 'tdmatsmatmult', 'tdvecsvecmult', 'tmat6tmat6mult',
'tsmattdmatmult', 'tvec3mat3mult', 'complex7', 'dmatsmatadd',
'dvecdveccross', 'dvectsvecmult', 'memorysweep', 'smattdmatmult',
'svecsveccross', 'tdmatsvecmult', 'tdvectdmatmult', 'tmat6vec6mult',
'tsmattsmatadd', 'tvec3tmat3mult', 'complex8', 'dmatsmatmult',
'dvecdvecmult', 'mat3mat3add', 'smatdmatadd', 'smattrans',
'svecsvecmult', 'tdmattdmatadd', 'tdvectsmatmult', 'tsmatdmatadd',
'tsmattsmatmult', 'tvec6mat6mult'])
def GetBinaries():
"""Find available blazemark binaries."""
return BLAZEMARK_BINARIES
def _SimplfyLibName(name):
"""Simply library name parsed from output.
Args:
name: string. Name parsed from blazemark output.
Returns:
A simplified name defined in LIBS.
"""
for lib in LIBS:
if lib in name:
return lib
return name
def _ParseResult(out, test):
"""Parse blazemark results.
Sample output:
https://bitbucket.org/blaze-lib/blaze/wiki/Blazemark#!command-line-parameters
Dense Vector/Dense Vector Addition:
C-like implementation [MFlop/s]:
100 1115.44
10000000 206.317
Classic operator overloading [MFlop/s]:
100 415.703
10000000 112.557
Blaze [MFlop/s]:
100 2602.56
10000000 292.569
Boost uBLAS [MFlop/s]:
100 1056.75
10000000 208.639
Blitz++ [MFlop/s]:
100 1011.1
10000000 207.855
GMM++ [MFlop/s]:
100 1115.42
10000000 207.699
Armadillo [MFlop/s]:
100 1095.86
10000000 208.658
MTL [MFlop/s]:
100 1018.47
10000000 209.065
Eigen [MFlop/s]:
100 2173.48
10000000 209.899
N=100, steps=55116257
C-like = 2.33322 (4.94123)
Classic = 6.26062 (13.2586)
Blaze = 1 (2.11777)
Boost uBLAS = 2.4628 (5.21565)
Blitz++ = 2.57398 (5.4511)
GMM++ = 2.33325 (4.94129)
Armadillo = 2.3749 (5.0295)
MTL = 2.55537 (5.41168)
Eigen = 1.19742 (2.53585)
N=10000000, steps=8
C-like = 1.41805 (0.387753)
Classic = 2.5993 (0.710753)
Blaze = 1 (0.27344)
Boost uBLAS = 1.40227 (0.383437)
Blitz++ = 1.40756 (0.384884)
GMM++ = 1.40862 (0.385172)
Armadillo = 1.40215 (0.383403)
MTL = 1.39941 (0.382656)
Eigen = 1.39386 (0.381136)
Args:
out: string. Blazemark output in raw string format.
test: string. Name of the test ran.
Returns:
A list of samples. Each sample if a 4-tuple of (benchmark_name, value, unit,
metadata).
"""
matches = regex_util.ExtractAllMatches(THROUGHPUT_HEADER_REGEX, out)
results = []
for m in matches:
lib = _SimplfyLibName(m[0])
metadata = {}
filled = m[1]
if filled:
metadata['% filled'] = regex_util.ExtractFloat(FILLED_REGEX, filled)
unit = m[-2]
for v in regex_util.ExtractAllMatches(THROUGHPUT_RESULT_REGEX, m[-1]):
metadata['N'] = int(v[0])
results.append(sample.Sample(
'_'.join([test, lib, 'Throughput']), # Metric name
float(v[1]), # Value
unit, # Unit
copy.deepcopy(metadata))) # Metadata
logging.info('Results for %s:\n %s', test, results)
return results
def RunTest(vm, test):
"""Run blazemark test on vm.
Args:
vm: VirtualMachine. The VM to run blazemark.
test: string. The test name to run.
Returns:
A list of samples. Each sample if a 4-tuple of (benchmark_name, value, unit,
metadata).
"""
out, _ = vm.RemoteCommand(
'cd %s; export BLAZE_NUM_THREADS=%s; ./%s -only-blaze' % (
os.path.join(BLAZEMARK_DIR, 'bin'), vm.NumCpusForBenchmark(), test))
ret = []
try:
ret = _ParseResult(out, test)
except regex_util.NoMatchError:
logging.exception('Parsing failed for %s.\n', test)
return ret
def _Configure(vm):
"""Configure and build blazemark on vm."""
vm.RenderTemplate(
data.ResourcePath(CONFIG_TEMPLATE),
os.path.join(BLAZEMARK_DIR, CONFIG),
{'compiler': '"g++-5"',
'compiler_flags': (
'"-Wall -Wextra -Werror -Wshadow -Woverloaded-virtual -ansi -O3 '
'-mavx -DNDEBUG -fpermissive -ansi -O3 -DNDEBUG '
'-DBLAZE_USE_BOOST_THREADS --std=c++14"'),
'lapack_path': '"/tmp/pkb/lapack-3.6.1/lib"',
'lapack_libs': '"-llapack -lblas -L%s -lgfortran"'
% os.path.dirname(fortran.GetLibPath(vm))})
vm.RemoteCommand('cd %s; ./configure %s; make -j %s' % (
BLAZEMARK_DIR, CONFIG, vm.num_cpus))
def _Install(vm):
"""Install blazemark."""
for package in ['g++5', 'build_tools', 'boost', 'blaze', 'lapack']:
vm.Install(package)
_Configure(vm)
def YumInstall(vm):
"""Installs the blazemark package on the VM."""
_Install(vm)
def AptInstall(vm):
"""Installs the blazemark package on the VM."""
_Install(vm)
|
import asyncio
import logging
import queue
import pytest
from homeassistant.bootstrap import async_setup_component
from homeassistant.components import system_log
from homeassistant.core import callback
from tests.async_mock import MagicMock, patch
_LOGGER = logging.getLogger("test_logger")
BASIC_CONFIG = {"system_log": {"max_entries": 2}}
@pytest.fixture
def simple_queue():
"""Fixture that get the queue."""
simple_queue_fixed = queue.SimpleQueue()
with patch(
"homeassistant.components.system_log.queue.SimpleQueue",
return_value=simple_queue_fixed,
):
yield simple_queue_fixed
async def _async_block_until_queue_empty(hass, sq):
# Unfortunately we are stuck with polling
await hass.async_block_till_done()
while not sq.empty():
await asyncio.sleep(0.01)
hass.data[system_log.DOMAIN].acquire()
hass.data[system_log.DOMAIN].release()
await hass.async_block_till_done()
async def get_error_log(hass, hass_client, expected_count):
"""Fetch all entries from system_log via the API."""
client = await hass_client()
resp = await client.get("/api/error/all")
assert resp.status == 200
data = await resp.json()
assert len(data) == expected_count
return data
def _generate_and_log_exception(exception, log):
try:
raise Exception(exception)
except: # noqa: E722 pylint: disable=bare-except
_LOGGER.exception(log)
def assert_log(log, exception, message, level):
"""Assert that specified values are in a specific log entry."""
if not isinstance(message, list):
message = [message]
assert log["name"] == "test_logger"
assert exception in log["exception"]
assert message == log["message"]
assert level == log["level"]
assert "timestamp" in log
def get_frame(name):
"""Get log stack frame."""
return (name, 5, None, None)
async def test_normal_logs(hass, simple_queue, hass_client):
"""Test that debug and info are not logged."""
await async_setup_component(hass, system_log.DOMAIN, BASIC_CONFIG)
_LOGGER.debug("debug")
_LOGGER.info("info")
await _async_block_until_queue_empty(hass, simple_queue)
# Assert done by get_error_log
await get_error_log(hass, hass_client, 0)
async def test_exception(hass, simple_queue, hass_client):
"""Test that exceptions are logged and retrieved correctly."""
await async_setup_component(hass, system_log.DOMAIN, BASIC_CONFIG)
_generate_and_log_exception("exception message", "log message")
await _async_block_until_queue_empty(hass, simple_queue)
log = (await get_error_log(hass, hass_client, 1))[0]
assert_log(log, "exception message", "log message", "ERROR")
async def test_warning(hass, simple_queue, hass_client):
"""Test that warning are logged and retrieved correctly."""
await async_setup_component(hass, system_log.DOMAIN, BASIC_CONFIG)
_LOGGER.warning("warning message")
await _async_block_until_queue_empty(hass, simple_queue)
log = (await get_error_log(hass, hass_client, 1))[0]
assert_log(log, "", "warning message", "WARNING")
async def test_error(hass, simple_queue, hass_client):
"""Test that errors are logged and retrieved correctly."""
await async_setup_component(hass, system_log.DOMAIN, BASIC_CONFIG)
_LOGGER.error("error message")
await _async_block_until_queue_empty(hass, simple_queue)
log = (await get_error_log(hass, hass_client, 1))[0]
assert_log(log, "", "error message", "ERROR")
async def test_config_not_fire_event(hass, simple_queue):
"""Test that errors are not posted as events with default config."""
await async_setup_component(hass, system_log.DOMAIN, BASIC_CONFIG)
events = []
@callback
def event_listener(event):
"""Listen to events of type system_log_event."""
events.append(event)
hass.bus.async_listen(system_log.EVENT_SYSTEM_LOG, event_listener)
_LOGGER.error("error message")
await _async_block_until_queue_empty(hass, simple_queue)
assert len(events) == 0
async def test_error_posted_as_event(hass, simple_queue):
"""Test that error are posted as events."""
await async_setup_component(
hass, system_log.DOMAIN, {"system_log": {"max_entries": 2, "fire_event": True}}
)
events = []
@callback
def event_listener(event):
"""Listen to events of type system_log_event."""
events.append(event)
hass.bus.async_listen(system_log.EVENT_SYSTEM_LOG, event_listener)
_LOGGER.error("error message")
await _async_block_until_queue_empty(hass, simple_queue)
assert len(events) == 1
assert_log(events[0].data, "", "error message", "ERROR")
async def test_critical(hass, simple_queue, hass_client):
"""Test that critical are logged and retrieved correctly."""
await async_setup_component(hass, system_log.DOMAIN, BASIC_CONFIG)
_LOGGER.critical("critical message")
await _async_block_until_queue_empty(hass, simple_queue)
log = (await get_error_log(hass, hass_client, 1))[0]
assert_log(log, "", "critical message", "CRITICAL")
async def test_remove_older_logs(hass, simple_queue, hass_client):
"""Test that older logs are rotated out."""
await async_setup_component(hass, system_log.DOMAIN, BASIC_CONFIG)
_LOGGER.error("error message 1")
_LOGGER.error("error message 2")
_LOGGER.error("error message 3")
await _async_block_until_queue_empty(hass, simple_queue)
log = await get_error_log(hass, hass_client, 2)
assert_log(log[0], "", "error message 3", "ERROR")
assert_log(log[1], "", "error message 2", "ERROR")
def log_msg(nr=2):
"""Log an error at same line."""
_LOGGER.error("error message %s", nr)
async def test_dedupe_logs(hass, simple_queue, hass_client):
"""Test that duplicate log entries are dedupe."""
await async_setup_component(hass, system_log.DOMAIN, {})
_LOGGER.error("error message 1")
log_msg()
log_msg("2-2")
_LOGGER.error("error message 3")
await _async_block_until_queue_empty(hass, simple_queue)
log = await get_error_log(hass, hass_client, 3)
assert_log(log[0], "", "error message 3", "ERROR")
assert log[1]["count"] == 2
assert_log(log[1], "", ["error message 2", "error message 2-2"], "ERROR")
log_msg()
await _async_block_until_queue_empty(hass, simple_queue)
log = await get_error_log(hass, hass_client, 3)
assert_log(log[0], "", ["error message 2", "error message 2-2"], "ERROR")
assert log[0]["timestamp"] > log[0]["first_occurred"]
log_msg("2-3")
log_msg("2-4")
log_msg("2-5")
log_msg("2-6")
await _async_block_until_queue_empty(hass, simple_queue)
log = await get_error_log(hass, hass_client, 3)
assert_log(
log[0],
"",
[
"error message 2-2",
"error message 2-3",
"error message 2-4",
"error message 2-5",
"error message 2-6",
],
"ERROR",
)
async def test_clear_logs(hass, simple_queue, hass_client):
"""Test that the log can be cleared via a service call."""
await async_setup_component(hass, system_log.DOMAIN, BASIC_CONFIG)
_LOGGER.error("error message")
await _async_block_until_queue_empty(hass, simple_queue)
await hass.services.async_call(system_log.DOMAIN, system_log.SERVICE_CLEAR, {})
await _async_block_until_queue_empty(hass, simple_queue)
# Assert done by get_error_log
await get_error_log(hass, hass_client, 0)
async def test_write_log(hass):
"""Test that error propagates to logger."""
await async_setup_component(hass, system_log.DOMAIN, BASIC_CONFIG)
logger = MagicMock()
with patch("logging.getLogger", return_value=logger) as mock_logging:
await hass.services.async_call(
system_log.DOMAIN, system_log.SERVICE_WRITE, {"message": "test_message"}
)
await hass.async_block_till_done()
mock_logging.assert_called_once_with("homeassistant.components.system_log.external")
assert logger.method_calls[0] == ("error", ("test_message",))
async def test_write_choose_logger(hass):
"""Test that correct logger is chosen."""
await async_setup_component(hass, system_log.DOMAIN, BASIC_CONFIG)
with patch("logging.getLogger") as mock_logging:
await hass.services.async_call(
system_log.DOMAIN,
system_log.SERVICE_WRITE,
{"message": "test_message", "logger": "myLogger"},
)
await hass.async_block_till_done()
mock_logging.assert_called_once_with("myLogger")
async def test_write_choose_level(hass):
"""Test that correct logger is chosen."""
await async_setup_component(hass, system_log.DOMAIN, BASIC_CONFIG)
logger = MagicMock()
with patch("logging.getLogger", return_value=logger):
await hass.services.async_call(
system_log.DOMAIN,
system_log.SERVICE_WRITE,
{"message": "test_message", "level": "debug"},
)
await hass.async_block_till_done()
assert logger.method_calls[0] == ("debug", ("test_message",))
async def test_unknown_path(hass, simple_queue, hass_client):
"""Test error logged from unknown path."""
await async_setup_component(hass, system_log.DOMAIN, BASIC_CONFIG)
_LOGGER.findCaller = MagicMock(return_value=("unknown_path", 0, None, None))
_LOGGER.error("error message")
await _async_block_until_queue_empty(hass, simple_queue)
log = (await get_error_log(hass, hass_client, 1))[0]
assert log["source"] == ["unknown_path", 0]
async def async_log_error_from_test_path(hass, path, sq):
"""Log error while mocking the path."""
call_path = "internal_path.py"
with patch.object(
_LOGGER, "findCaller", MagicMock(return_value=(call_path, 0, None, None))
):
with patch(
"traceback.extract_stack",
MagicMock(
return_value=[
get_frame("main_path/main.py"),
get_frame(path),
get_frame(call_path),
get_frame("venv_path/logging/log.py"),
]
),
):
_LOGGER.error("error message")
await _async_block_until_queue_empty(hass, sq)
async def test_homeassistant_path(hass, simple_queue, hass_client):
"""Test error logged from Home Assistant path."""
await async_setup_component(hass, system_log.DOMAIN, BASIC_CONFIG)
with patch(
"homeassistant.components.system_log.HOMEASSISTANT_PATH",
new=["venv_path/homeassistant"],
):
await async_log_error_from_test_path(
hass, "venv_path/homeassistant/component/component.py", simple_queue
)
log = (await get_error_log(hass, hass_client, 1))[0]
assert log["source"] == ["component/component.py", 5]
async def test_config_path(hass, simple_queue, hass_client):
"""Test error logged from config path."""
await async_setup_component(hass, system_log.DOMAIN, BASIC_CONFIG)
with patch.object(hass.config, "config_dir", new="config"):
await async_log_error_from_test_path(
hass, "config/custom_component/test.py", simple_queue
)
log = (await get_error_log(hass, hass_client, 1))[0]
assert log["source"] == ["custom_component/test.py", 5]
|
import asyncio
import logging
import aiohttp
import async_timeout
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import ATTR_ATTRIBUTION, HTTP_OK, TIME_MINUTES
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
ATTRIBUTION = "Powered by ViaggiaTreno Data"
VIAGGIATRENO_ENDPOINT = (
"http://www.viaggiatreno.it/viaggiatrenonew/"
"resteasy/viaggiatreno/andamentoTreno/"
"{station_id}/{train_id}"
)
REQUEST_TIMEOUT = 5 # seconds
ICON = "mdi:train"
MONITORED_INFO = [
"categoria",
"compOrarioArrivoZeroEffettivo",
"compOrarioPartenzaZeroEffettivo",
"destinazione",
"numeroTreno",
"orarioArrivo",
"orarioPartenza",
"origine",
"subTitle",
]
DEFAULT_NAME = "Train {}"
CONF_NAME = "train_name"
CONF_STATION_ID = "station_id"
CONF_STATION_NAME = "station_name"
CONF_TRAIN_ID = "train_id"
ARRIVED_STRING = "Arrived"
CANCELLED_STRING = "Cancelled"
NOT_DEPARTED_STRING = "Not departed yet"
NO_INFORMATION_STRING = "No information for this train now"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_TRAIN_ID): cv.string,
vol.Required(CONF_STATION_ID): cv.string,
vol.Optional(CONF_NAME): cv.string,
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the ViaggiaTreno platform."""
train_id = config.get(CONF_TRAIN_ID)
station_id = config.get(CONF_STATION_ID)
name = config.get(CONF_NAME)
if not name:
name = DEFAULT_NAME.format(train_id)
async_add_entities([ViaggiaTrenoSensor(train_id, station_id, name)])
async def async_http_request(hass, uri):
"""Perform actual request."""
try:
session = hass.helpers.aiohttp_client.async_get_clientsession(hass)
with async_timeout.timeout(REQUEST_TIMEOUT):
req = await session.get(uri)
if req.status != HTTP_OK:
return {"error": req.status}
json_response = await req.json()
return json_response
except (asyncio.TimeoutError, aiohttp.ClientError) as exc:
_LOGGER.error("Cannot connect to ViaggiaTreno API endpoint: %s", exc)
except ValueError:
_LOGGER.error("Received non-JSON data from ViaggiaTreno API endpoint")
class ViaggiaTrenoSensor(Entity):
"""Implementation of a ViaggiaTreno sensor."""
def __init__(self, train_id, station_id, name):
"""Initialize the sensor."""
self._state = None
self._attributes = {}
self._unit = ""
self._icon = ICON
self._station_id = station_id
self._name = name
self.uri = VIAGGIATRENO_ENDPOINT.format(
station_id=station_id, train_id=train_id
)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return self._icon
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return self._unit
@property
def device_state_attributes(self):
"""Return extra attributes."""
self._attributes[ATTR_ATTRIBUTION] = ATTRIBUTION
return self._attributes
@staticmethod
def has_departed(data):
"""Check if the train has actually departed."""
try:
first_station = data["fermate"][0]
if data["oraUltimoRilevamento"] or first_station["effettiva"]:
return True
except ValueError:
_LOGGER.error("Cannot fetch first station: %s", data)
return False
@staticmethod
def has_arrived(data):
"""Check if the train has already arrived."""
last_station = data["fermate"][-1]
if not last_station["effettiva"]:
return False
return True
@staticmethod
def is_cancelled(data):
"""Check if the train is cancelled."""
if data["tipoTreno"] == "ST" and data["provvedimento"] == 1:
return True
return False
async def async_update(self):
"""Update state."""
uri = self.uri
res = await async_http_request(self.hass, uri)
if res.get("error", ""):
if res["error"] == 204:
self._state = NO_INFORMATION_STRING
self._unit = ""
else:
self._state = "Error: {}".format(res["error"])
self._unit = ""
else:
for i in MONITORED_INFO:
self._attributes[i] = res[i]
if self.is_cancelled(res):
self._state = CANCELLED_STRING
self._icon = "mdi:cancel"
self._unit = ""
elif not self.has_departed(res):
self._state = NOT_DEPARTED_STRING
self._unit = ""
elif self.has_arrived(res):
self._state = ARRIVED_STRING
self._unit = ""
else:
self._state = res.get("ritardo")
self._unit = TIME_MINUTES
self._icon = ICON
|
import diamond.collector
import urllib2
import json
class FluentdCollector(diamond.collector.Collector):
API_PATH = '/api/plugins.json'
def get_default_config_help(self):
config_help = super(FluentdCollector, self).get_default_config_help()
config_help.update({
'host': 'Fluentd host',
'port': 'Fluentd port',
'collect': 'Plugins and their metrics to collect'
})
return config_help
def get_default_config(self):
config = super(FluentdCollector, self).get_default_config()
config.update({
'host': 'localhost',
'port': '24220',
'path': 'fluentd',
'collect': {}
})
return config
def collect(self):
params = (self.config['host'], self.config['port'], self.API_PATH)
url = "http://%s:%s/%s" % params
res = urllib2.urlopen(url)
data = json.load(res)
result = self.parse_api_output(data)
for r in result:
self.publish(r[0], r[1])
def parse_api_output(self, status):
result = []
for p in status.get('plugins'):
if p['type'] in self.config['collect'].keys():
for m in self.config['collect'].get(p['type']):
tag = ".".join([p['type'], m])
result.append((tag, p.get(m)))
return result
|
import logging
import voluptuous as vol
from homeassistant import config_entries, core, exceptions
from homeassistant.const import (
CONF_ALIAS,
CONF_HOST,
CONF_PASSWORD,
CONF_PORT,
CONF_RESOURCES,
CONF_SCAN_INTERVAL,
CONF_USERNAME,
)
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from . import PyNUTData, find_resources_in_config_entry
from .const import (
DEFAULT_HOST,
DEFAULT_PORT,
DEFAULT_SCAN_INTERVAL,
KEY_STATUS,
KEY_STATUS_DISPLAY,
SENSOR_NAME,
SENSOR_TYPES,
)
from .const import DOMAIN # pylint:disable=unused-import
_LOGGER = logging.getLogger(__name__)
SENSOR_DICT = {
sensor_id: sensor_spec[SENSOR_NAME]
for sensor_id, sensor_spec in SENSOR_TYPES.items()
}
def _base_schema(discovery_info):
"""Generate base schema."""
base_schema = {}
if not discovery_info:
base_schema.update(
{
vol.Optional(CONF_HOST, default=DEFAULT_HOST): str,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): int,
}
)
base_schema.update(
{vol.Optional(CONF_USERNAME): str, vol.Optional(CONF_PASSWORD): str}
)
return vol.Schema(base_schema)
def _resource_schema_base(available_resources, selected_resources):
"""Resource selection schema."""
known_available_resources = {
sensor_id: sensor[SENSOR_NAME]
for sensor_id, sensor in SENSOR_TYPES.items()
if sensor_id in available_resources
}
if KEY_STATUS in known_available_resources:
known_available_resources[KEY_STATUS_DISPLAY] = SENSOR_TYPES[
KEY_STATUS_DISPLAY
][SENSOR_NAME]
return {
vol.Required(CONF_RESOURCES, default=selected_resources): cv.multi_select(
known_available_resources
)
}
def _ups_schema(ups_list):
"""UPS selection schema."""
return vol.Schema({vol.Required(CONF_ALIAS): vol.In(ups_list)})
async def validate_input(hass: core.HomeAssistant, data):
"""Validate the user input allows us to connect.
Data has the keys from _base_schema with values provided by the user.
"""
host = data[CONF_HOST]
port = data[CONF_PORT]
alias = data.get(CONF_ALIAS)
username = data.get(CONF_USERNAME)
password = data.get(CONF_PASSWORD)
data = PyNUTData(host, port, alias, username, password)
await hass.async_add_executor_job(data.update)
status = data.status
if not status:
raise CannotConnect
return {"ups_list": data.ups_list, "available_resources": status}
def _format_host_port_alias(user_input):
"""Format a host, port, and alias so it can be used for comparison or display."""
host = user_input[CONF_HOST]
port = user_input[CONF_PORT]
alias = user_input.get(CONF_ALIAS)
if alias:
return f"{alias}@{host}:{port}"
return f"{host}:{port}"
class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for Network UPS Tools (NUT)."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_POLL
def __init__(self):
"""Initialize the nut config flow."""
self.nut_config = {}
self.available_resources = {}
self.discovery_info = {}
self.ups_list = None
self.title = None
async def async_step_zeroconf(self, discovery_info):
"""Prepare configuration for a discovered nut device."""
self.discovery_info = discovery_info
await self._async_handle_discovery_without_unique_id()
# pylint: disable=no-member # https://github.com/PyCQA/pylint/issues/3167
self.context["title_placeholders"] = {
CONF_PORT: discovery_info.get(CONF_PORT, DEFAULT_PORT),
CONF_HOST: discovery_info[CONF_HOST],
}
return await self.async_step_user()
async def async_step_import(self, user_input=None):
"""Handle the import."""
errors = {}
if user_input is not None:
if self._host_port_alias_already_configured(user_input):
return self.async_abort(reason="already_configured")
_, errors = await self._async_validate_or_error(user_input)
if not errors:
title = _format_host_port_alias(user_input)
return self.async_create_entry(title=title, data=user_input)
return self.async_show_form(
step_id="user", data_schema=_base_schema({}), errors=errors
)
async def async_step_user(self, user_input=None):
"""Handle the user input."""
errors = {}
if user_input is not None:
if self.discovery_info:
user_input.update(
{
CONF_HOST: self.discovery_info[CONF_HOST],
CONF_PORT: self.discovery_info.get(CONF_PORT, DEFAULT_PORT),
}
)
info, errors = await self._async_validate_or_error(user_input)
if not errors:
self.nut_config.update(user_input)
if len(info["ups_list"]) > 1:
self.ups_list = info["ups_list"]
return await self.async_step_ups()
if self._host_port_alias_already_configured(self.nut_config):
return self.async_abort(reason="already_configured")
self.available_resources.update(info["available_resources"])
return await self.async_step_resources()
return self.async_show_form(
step_id="user", data_schema=_base_schema(self.discovery_info), errors=errors
)
async def async_step_ups(self, user_input=None):
"""Handle the picking the ups."""
errors = {}
if user_input is not None:
self.nut_config.update(user_input)
if self._host_port_alias_already_configured(self.nut_config):
return self.async_abort(reason="already_configured")
info, errors = await self._async_validate_or_error(self.nut_config)
if not errors:
self.available_resources.update(info["available_resources"])
return await self.async_step_resources()
return self.async_show_form(
step_id="ups",
data_schema=_ups_schema(self.ups_list),
errors=errors,
)
async def async_step_resources(self, user_input=None):
"""Handle the picking the resources."""
if user_input is None:
return self.async_show_form(
step_id="resources",
data_schema=vol.Schema(
_resource_schema_base(self.available_resources, [])
),
)
self.nut_config.update(user_input)
title = _format_host_port_alias(self.nut_config)
return self.async_create_entry(title=title, data=self.nut_config)
def _host_port_alias_already_configured(self, user_input):
"""See if we already have a nut entry matching user input configured."""
existing_host_port_aliases = {
_format_host_port_alias(entry.data)
for entry in self._async_current_entries()
if CONF_HOST in entry.data
}
return _format_host_port_alias(user_input) in existing_host_port_aliases
async def _async_validate_or_error(self, config):
errors = {}
info = {}
try:
info = await validate_input(self.hass, config)
except CannotConnect:
errors["base"] = "cannot_connect"
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected exception")
errors["base"] = "unknown"
return info, errors
@staticmethod
@callback
def async_get_options_flow(config_entry):
"""Get the options flow for this handler."""
return OptionsFlowHandler(config_entry)
class OptionsFlowHandler(config_entries.OptionsFlow):
"""Handle a option flow for nut."""
def __init__(self, config_entry: config_entries.ConfigEntry):
"""Initialize options flow."""
self.config_entry = config_entry
async def async_step_init(self, user_input=None):
"""Handle options flow."""
if user_input is not None:
return self.async_create_entry(title="", data=user_input)
resources = find_resources_in_config_entry(self.config_entry)
scan_interval = self.config_entry.options.get(
CONF_SCAN_INTERVAL, DEFAULT_SCAN_INTERVAL
)
info = await validate_input(self.hass, self.config_entry.data)
base_schema = _resource_schema_base(info["available_resources"], resources)
base_schema[
vol.Optional(CONF_SCAN_INTERVAL, default=scan_interval)
] = cv.positive_int
return self.async_show_form(
step_id="init",
data_schema=vol.Schema(base_schema),
)
class CannotConnect(exceptions.HomeAssistantError):
"""Error to indicate we cannot connect."""
|
import pytest
from mne import open_docs, grade_to_tris
from mne.epochs import add_channels_epochs
from mne.utils import (copy_function_doc_to_method_doc, copy_doc,
linkcode_resolve, deprecated, deprecated_alias)
import webbrowser
@pytest.mark.parametrize('obj', (grade_to_tris, add_channels_epochs))
def test_doc_filling(obj):
"""Test that docs are filled properly."""
doc = obj.__doc__
assert 'verbose : ' in doc
if obj is add_channels_epochs:
assert 'keyword-argument only. Defaults to True if' in doc
def test_deprecated_alias():
"""Test deprecated_alias."""
def new_func():
"""Do something."""
pass
deprecated_alias('old_func', new_func)
assert old_func # noqa
assert 'has been deprecated in favor of new_func' in old_func.__doc__ # noqa
assert 'deprecated' not in new_func.__doc__
@deprecated('message')
def deprecated_func():
"""Do something."""
pass
@deprecated('message')
class deprecated_class(object):
def __init__(self):
pass
def test_deprecated():
"""Test deprecated function."""
pytest.deprecated_call(deprecated_func)
pytest.deprecated_call(deprecated_class)
def test_copy_doc():
"""Test decorator for copying docstrings."""
class A:
def m1():
"""Docstring for m1."""
pass
class B:
def m1():
pass
class C (A):
@copy_doc(A.m1)
def m1():
pass
assert C.m1.__doc__ == 'Docstring for m1.'
pytest.raises(ValueError, copy_doc(B.m1), C.m1)
def test_copy_function_doc_to_method_doc():
"""Test decorator for re-using function docstring as method docstrings."""
def f1(object, a, b, c):
"""Docstring for f1.
Parameters
----------
object : object
Some object. This description also has
blank lines in it.
a : int
Parameter a
b : int
Parameter b
"""
pass
def f2(object):
"""Docstring for f2.
Parameters
----------
object : object
Only one parameter
Returns
-------
nothing.
"""
pass
def f3(object):
"""Docstring for f3.
Parameters
----------
object : object
Only one parameter
"""
pass
def f4(object):
"""Docstring for f4."""
pass
def f5(object): # noqa: D410, D411, D414
"""Docstring for f5.
Parameters
----------
Returns
-------
nothing.
"""
pass
class A:
@copy_function_doc_to_method_doc(f1)
def method_f1(self, a, b, c):
pass
@copy_function_doc_to_method_doc(f2)
def method_f2(self):
"method_f3 own docstring"
pass
@copy_function_doc_to_method_doc(f3)
def method_f3(self):
pass
assert A.method_f1.__doc__ == """Docstring for f1.
Parameters
----------
a : int
Parameter a
b : int
Parameter b
"""
assert A.method_f2.__doc__ == """Docstring for f2.
Returns
-------
nothing.
method_f3 own docstring"""
assert A.method_f3.__doc__ == 'Docstring for f3.\n\n '
pytest.raises(ValueError, copy_function_doc_to_method_doc(f5), A.method_f1)
def myfun(x):
"""Check url."""
assert 'mne.tools' in x
def test_open_docs():
"""Test doc launching."""
old_tab = webbrowser.open_new_tab
try:
# monkey patch temporarily to prevent tabs from actually spawning
webbrowser.open_new_tab = myfun
open_docs()
open_docs('tutorials', 'dev')
open_docs('examples', 'stable')
pytest.raises(ValueError, open_docs, 'foo')
pytest.raises(ValueError, open_docs, 'api', 'foo')
finally:
webbrowser.open_new_tab = old_tab
def test_linkcode_resolve():
"""Test linkcode resolving."""
ex = '#L'
url = linkcode_resolve('py', dict(module='mne', fullname='Epochs'))
assert '/mne/epochs.py' + ex in url
url = linkcode_resolve('py', dict(module='mne',
fullname='compute_covariance'))
assert '/mne/cov.py' + ex in url
url = linkcode_resolve('py', dict(module='mne',
fullname='convert_forward_solution'))
assert '/mne/forward/forward.py' + ex in url
url = linkcode_resolve('py', dict(module='mne',
fullname='datasets.sample.data_path'))
assert '/mne/datasets/sample/sample.py' + ex in url
|
from __future__ import division
from bs4 import BeautifulSoup
from django.utils.functional import cached_property
from django.utils.html import strip_tags
from django.utils.text import Truncator
from zinnia.settings import PREVIEW_MAX_WORDS
from zinnia.settings import PREVIEW_MORE_STRING
from zinnia.settings import PREVIEW_SPLITTERS
class HTMLPreview(object):
"""
Build an HTML preview of an HTML content.
"""
def __init__(self, content, lead='',
splitters=PREVIEW_SPLITTERS,
max_words=PREVIEW_MAX_WORDS,
more_string=PREVIEW_MORE_STRING):
self._preview = None
self.lead = lead
self.content = content
self.splitters = splitters
self.max_words = max_words
self.more_string = more_string
@property
def preview(self):
"""
The preview is a cached property.
"""
if self._preview is None:
self._preview = self.build_preview()
return self._preview
@property
def has_more(self):
"""
Boolean telling if the preview has hidden content.
"""
return bool(self.content and self.preview != self.content)
def __str__(self):
"""
Method used to render the preview in templates.
"""
return str(self.preview)
def build_preview(self):
"""
Build the preview by:
- Returning the lead attribut if not empty.
- Checking if a split marker is present in the content
Then split the content with the marker to build the preview.
- Splitting the content to a fixed number of words.
"""
if self.lead:
return self.lead
for splitter in self.splitters:
if splitter in self.content:
return self.split(splitter)
return self.truncate()
def truncate(self):
"""
Truncate the content with the Truncator object.
"""
return Truncator(self.content).words(
self.max_words, self.more_string, html=True)
def split(self, splitter):
"""
Split the HTML content with a marker
without breaking closing markups.
"""
soup = BeautifulSoup(self.content.split(splitter)[0],
'html.parser')
last_string = soup.find_all(text=True)[-1]
last_string.replace_with(last_string.string + self.more_string)
return soup
@cached_property
def total_words(self):
"""
Return the total of words contained
in the content and in the lead.
"""
return len(strip_tags('%s %s' % (self.lead, self.content)).split())
@cached_property
def displayed_words(self):
"""
Return the number of words displayed in the preview.
"""
return (len(strip_tags(self.preview).split()) -
(len(self.more_string.split()) * int(not bool(self.lead))))
@cached_property
def remaining_words(self):
"""
Return the number of words remaining after the preview.
"""
return self.total_words - self.displayed_words
@cached_property
def displayed_percent(self):
"""
Return the percentage of the content displayed in the preview.
"""
return (self.displayed_words / self.total_words) * 100
@cached_property
def remaining_percent(self):
"""
Return the percentage of the content remaining after the preview.
"""
return (self.remaining_words / self.total_words) * 100
|
from html5lib.treebuilders import _base, etree as etree_builders
from lxml import html, etree
class DocumentType(object):
def __init__(self, name, publicId, systemId):
self.name = name
self.publicId = publicId
self.systemId = systemId
class Document(object):
def __init__(self):
self._elementTree = None
self.childNodes = []
def appendChild(self, element):
self._elementTree.getroot().addnext(element._element)
class TreeBuilder(_base.TreeBuilder):
documentClass = Document
doctypeClass = DocumentType
elementClass = None
commentClass = None
fragmentClass = Document
def __init__(self, *args, **kwargs):
html_builder = etree_builders.getETreeModule(html, fullTree=False)
etree_builder = etree_builders.getETreeModule(etree, fullTree=False)
self.elementClass = html_builder.Element
self.commentClass = etree_builder.Comment
_base.TreeBuilder.__init__(self, *args, **kwargs)
def reset(self):
_base.TreeBuilder.reset(self)
self.rootInserted = False
self.initialComments = []
self.doctype = None
def getDocument(self):
return self.document._elementTree
def getFragment(self):
fragment = []
element = self.openElements[0]._element
if element.text:
fragment.append(element.text)
fragment.extend(element.getchildren())
if element.tail:
fragment.append(element.tail)
return fragment
def insertDoctype(self, name, publicId, systemId):
doctype = self.doctypeClass(name, publicId, systemId)
self.doctype = doctype
def insertComment(self, data, parent=None):
if not self.rootInserted:
self.initialComments.append(data)
else:
_base.TreeBuilder.insertComment(self, data, parent)
def insertRoot(self, name):
buf = []
if self.doctype and self.doctype.name:
buf.append('<!DOCTYPE %s' % self.doctype.name)
if self.doctype.publicId is not None or self.doctype.systemId is not None:
buf.append(' PUBLIC "%s" "%s"' % (self.doctype.publicId,
self.doctype.systemId))
buf.append('>')
buf.append('<html></html>')
root = html.fromstring(''.join(buf))
# Append the initial comments:
for comment in self.initialComments:
root.addprevious(etree.Comment(comment))
# Create the root document and add the ElementTree to it
self.document = self.documentClass()
self.document._elementTree = root.getroottree()
# Add the root element to the internal child/open data structures
root_element = self.elementClass(name)
root_element._element = root
self.document.childNodes.append(root_element)
self.openElements.append(root_element)
self.rootInserted = True
|
from __future__ import division, print_function
from urwid.compat import PYTHON3, xrange
def _call_modified(fn):
def call_modified_wrapper(self, *args, **kwargs):
rval = fn(self, *args, **kwargs)
self._modified()
return rval
return call_modified_wrapper
class MonitoredList(list):
"""
This class can trigger a callback any time its contents are changed
with the usual list operations append, extend, etc.
"""
def _modified(self):
pass
def set_modified_callback(self, callback):
"""
Assign a callback function with no parameters that is called any
time the list is modified. Callback's return value is ignored.
>>> import sys
>>> ml = MonitoredList([1,2,3])
>>> ml.set_modified_callback(lambda: sys.stdout.write("modified\\n"))
>>> ml
MonitoredList([1, 2, 3])
>>> ml.append(10)
modified
>>> len(ml)
4
>>> ml += [11, 12, 13]
modified
>>> ml[:] = ml[:2] + ml[-2:]
modified
>>> ml
MonitoredList([1, 2, 12, 13])
"""
self._modified = callback
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, list(self))
__add__ = _call_modified(list.__add__)
__delitem__ = _call_modified(list.__delitem__)
if not PYTHON3:
__delslice__ = _call_modified(list.__delslice__)
__iadd__ = _call_modified(list.__iadd__)
__imul__ = _call_modified(list.__imul__)
__rmul__ = _call_modified(list.__rmul__)
__setitem__ = _call_modified(list.__setitem__)
if not PYTHON3:
__setslice__ = _call_modified(list.__setslice__)
append = _call_modified(list.append)
extend = _call_modified(list.extend)
insert = _call_modified(list.insert)
pop = _call_modified(list.pop)
remove = _call_modified(list.remove)
reverse = _call_modified(list.reverse)
sort = _call_modified(list.sort)
if hasattr(list, 'clear'):
clear = _call_modified(list.clear)
class MonitoredFocusList(MonitoredList):
"""
This class can trigger a callback any time its contents are modified,
before and/or after modification, and any time the focus index is changed.
"""
def __init__(self, *argl, **argd):
"""
This is a list that tracks one item as the focus item. If items
are inserted or removed it will update the focus.
>>> ml = MonitoredFocusList([10, 11, 12, 13, 14], focus=3)
>>> ml
MonitoredFocusList([10, 11, 12, 13, 14], focus=3)
>>> del(ml[1])
>>> ml
MonitoredFocusList([10, 12, 13, 14], focus=2)
>>> ml[:2] = [50, 51, 52, 53]
>>> ml
MonitoredFocusList([50, 51, 52, 53, 13, 14], focus=4)
>>> ml[4] = 99
>>> ml
MonitoredFocusList([50, 51, 52, 53, 99, 14], focus=4)
>>> ml[:] = []
>>> ml
MonitoredFocusList([], focus=None)
"""
focus = argd.pop('focus', 0)
super(MonitoredFocusList, self).__init__(*argl, **argd)
self._focus = focus
self._focus_modified = lambda ml, indices, new_items: None
def __repr__(self):
return "%s(%r, focus=%r)" % (
self.__class__.__name__, list(self), self.focus)
def _get_focus(self):
"""
Return the index of the item "in focus" or None if
the list is empty.
>>> MonitoredFocusList([1,2,3], focus=2)._get_focus()
2
>>> MonitoredFocusList()._get_focus()
"""
if not self:
return None
return self._focus
def _set_focus(self, index):
"""
index -- index into this list, any index out of range will
raise an IndexError, except when the list is empty and
the index passed is ignored.
This function may call self._focus_changed when the focus
is modified, passing the new focus position to the
callback just before changing the old focus setting.
That method may be overridden on the
instance with set_focus_changed_callback().
>>> ml = MonitoredFocusList([9, 10, 11])
>>> ml._set_focus(2); ml._get_focus()
2
>>> ml._set_focus(0); ml._get_focus()
0
>>> ml._set_focus(-2)
Traceback (most recent call last):
...
IndexError: focus index is out of range: -2
"""
if not self:
self._focus = 0
return
if index < 0 or index >= len(self):
raise IndexError('focus index is out of range: %s' % (index,))
if index != int(index):
raise IndexError('invalid focus index: %s' % (index,))
index = int(index)
if index != self._focus:
self._focus_changed(index)
self._focus = index
focus = property(_get_focus, _set_focus, doc="""
Get/set the focus index. This value is read as None when the list
is empty, and may only be set to a value between 0 and len(self)-1
or an IndexError will be raised.
""")
def _focus_changed(self, new_focus):
pass
def set_focus_changed_callback(self, callback):
"""
Assign a callback to be called when the focus index changes
for any reason. The callback is in the form:
callback(new_focus)
new_focus -- new focus index
>>> import sys
>>> ml = MonitoredFocusList([1,2,3], focus=1)
>>> ml.set_focus_changed_callback(lambda f: sys.stdout.write("focus: %d\\n" % (f,)))
>>> ml
MonitoredFocusList([1, 2, 3], focus=1)
>>> ml.append(10)
>>> ml.insert(1, 11)
focus: 2
>>> ml
MonitoredFocusList([1, 11, 2, 3, 10], focus=2)
>>> del ml[:2]
focus: 0
>>> ml[:0] = [12, 13, 14]
focus: 3
>>> ml.focus = 5
focus: 5
>>> ml
MonitoredFocusList([12, 13, 14, 2, 3, 10], focus=5)
"""
self._focus_changed = callback
def _validate_contents_modified(self, indices, new_items):
return None
def set_validate_contents_modified(self, callback):
"""
Assign a callback function to handle validating changes to the list.
This may raise an exception if the change should not be performed.
It may also return an integer position to be the new focus after the
list is modified, or None to use the default behaviour.
The callback is in the form:
callback(indices, new_items)
indices -- a (start, stop, step) tuple whose range covers the
items being modified
new_items -- an iterable of items replacing those at range(*indices),
empty if items are being removed, if step==1 this list may
contain any number of items
"""
self._validate_contents_modified = callback
def _adjust_focus_on_contents_modified(self, slc, new_items=()):
"""
Default behaviour is to move the focus to the item following
any removed items, unless that item was simply replaced.
Failing that choose the last item in the list.
returns focus position for after change is applied
"""
num_new_items = len(new_items)
start, stop, step = indices = slc.indices(len(self))
num_removed = len(list(xrange(*indices)))
focus = self._validate_contents_modified(indices, new_items)
if focus is not None:
return focus
focus = self._focus
if step == 1:
if start + num_new_items <= focus < stop:
focus = stop
# adjust for added/removed items
if stop <= focus:
focus += num_new_items - (stop - start)
else:
if not num_new_items:
# extended slice being removed
if focus in xrange(start, stop, step):
focus += 1
# adjust for removed items
focus -= len(list(xrange(start, min(focus, stop), step)))
return min(focus, len(self) + num_new_items - num_removed -1)
# override all the list methods that modify the list
def __delitem__(self, y):
"""
>>> ml = MonitoredFocusList([0,1,2,3,4], focus=2)
>>> del ml[3]; ml
MonitoredFocusList([0, 1, 2, 4], focus=2)
>>> del ml[-1]; ml
MonitoredFocusList([0, 1, 2], focus=2)
>>> del ml[0]; ml
MonitoredFocusList([1, 2], focus=1)
>>> del ml[1]; ml
MonitoredFocusList([1], focus=0)
>>> del ml[0]; ml
MonitoredFocusList([], focus=None)
>>> ml = MonitoredFocusList([5,4,6,4,5,4,6,4,5], focus=4)
>>> del ml[1::2]; ml
MonitoredFocusList([5, 6, 5, 6, 5], focus=2)
>>> del ml[::2]; ml
MonitoredFocusList([6, 6], focus=1)
>>> ml = MonitoredFocusList([0,1,2,3,4,6,7], focus=2)
>>> del ml[-2:]; ml
MonitoredFocusList([0, 1, 2, 3, 4], focus=2)
>>> del ml[-4:-2]; ml
MonitoredFocusList([0, 3, 4], focus=1)
>>> del ml[:]; ml
MonitoredFocusList([], focus=None)
"""
if isinstance(y, slice):
focus = self._adjust_focus_on_contents_modified(y)
else:
focus = self._adjust_focus_on_contents_modified(slice(y,
y+1 or None))
rval = super(MonitoredFocusList, self).__delitem__(y)
self._set_focus(focus)
return rval
def __setitem__(self, i, y):
"""
>>> def modified(indices, new_items):
... print("range%r <- %r" % (indices, new_items))
>>> ml = MonitoredFocusList([0,1,2,3], focus=2)
>>> ml.set_validate_contents_modified(modified)
>>> ml[0] = 9
range(0, 1, 1) <- [9]
>>> ml[2] = 6
range(2, 3, 1) <- [6]
>>> ml.focus
2
>>> ml[-1] = 8
range(3, 4, 1) <- [8]
>>> ml
MonitoredFocusList([9, 1, 6, 8], focus=2)
>>> ml[1::2] = [12, 13]
range(1, 4, 2) <- [12, 13]
>>> ml[::2] = [10, 11]
range(0, 4, 2) <- [10, 11]
>>> ml[-3:-1] = [21, 22, 23]
range(1, 3, 1) <- [21, 22, 23]
>>> ml
MonitoredFocusList([10, 21, 22, 23, 13], focus=2)
>>> ml[:] = []
range(0, 5, 1) <- []
>>> ml
MonitoredFocusList([], focus=None)
"""
if isinstance(i, slice):
focus = self._adjust_focus_on_contents_modified(i, y)
else:
focus = self._adjust_focus_on_contents_modified(slice(i, i+1 or None), [y])
rval = super(MonitoredFocusList, self).__setitem__(i, y)
self._set_focus(focus)
return rval
if not PYTHON3:
def __delslice__(self, i, j):
return self.__delitem__(slice(i,j))
def __setslice__(self, i, j, y):
return self.__setitem__(slice(i, j), y)
def __imul__(self, n):
"""
>>> def modified(indices, new_items):
... print("range%r <- %r" % (indices, list(new_items)))
>>> ml = MonitoredFocusList([0,1,2], focus=2)
>>> ml.set_validate_contents_modified(modified)
>>> ml *= 3
range(3, 3, 1) <- [0, 1, 2, 0, 1, 2]
>>> ml
MonitoredFocusList([0, 1, 2, 0, 1, 2, 0, 1, 2], focus=2)
>>> ml *= 0
range(0, 9, 1) <- []
>>> print(ml.focus)
None
"""
if n > 0:
focus = self._adjust_focus_on_contents_modified(
slice(len(self), len(self)), list(self)*(n-1))
else: # all contents are being removed
focus = self._adjust_focus_on_contents_modified(slice(0, len(self)))
rval = super(MonitoredFocusList, self).__imul__(n)
self._set_focus(focus)
return rval
def append(self, item):
"""
>>> def modified(indices, new_items):
... print("range%r <- %r" % (indices, new_items))
>>> ml = MonitoredFocusList([0,1,2], focus=2)
>>> ml.set_validate_contents_modified(modified)
>>> ml.append(6)
range(3, 3, 1) <- [6]
"""
focus = self._adjust_focus_on_contents_modified(
slice(len(self), len(self)), [item])
rval = super(MonitoredFocusList, self).append(item)
self._set_focus(focus)
return rval
def extend(self, items):
"""
>>> def modified(indices, new_items):
... print("range%r <- %r" % (indices, list(new_items)))
>>> ml = MonitoredFocusList([0,1,2], focus=2)
>>> ml.set_validate_contents_modified(modified)
>>> ml.extend((6,7,8))
range(3, 3, 1) <- [6, 7, 8]
"""
focus = self._adjust_focus_on_contents_modified(
slice(len(self), len(self)), items)
rval = super(MonitoredFocusList, self).extend(items)
self._set_focus(focus)
return rval
def insert(self, index, item):
"""
>>> ml = MonitoredFocusList([0,1,2,3], focus=2)
>>> ml.insert(-1, -1); ml
MonitoredFocusList([0, 1, 2, -1, 3], focus=2)
>>> ml.insert(0, -2); ml
MonitoredFocusList([-2, 0, 1, 2, -1, 3], focus=3)
>>> ml.insert(3, -3); ml
MonitoredFocusList([-2, 0, 1, -3, 2, -1, 3], focus=4)
"""
focus = self._adjust_focus_on_contents_modified(slice(index, index),
[item])
rval = super(MonitoredFocusList, self).insert(index, item)
self._set_focus(focus)
return rval
def pop(self, index=-1):
"""
>>> ml = MonitoredFocusList([-2,0,1,-3,2,3], focus=4)
>>> ml.pop(3); ml
-3
MonitoredFocusList([-2, 0, 1, 2, 3], focus=3)
>>> ml.pop(0); ml
-2
MonitoredFocusList([0, 1, 2, 3], focus=2)
>>> ml.pop(-1); ml
3
MonitoredFocusList([0, 1, 2], focus=2)
>>> ml.pop(2); ml
2
MonitoredFocusList([0, 1], focus=1)
"""
focus = self._adjust_focus_on_contents_modified(slice(index,
index+1 or None))
rval = super(MonitoredFocusList, self).pop(index)
self._set_focus(focus)
return rval
def remove(self, value):
"""
>>> ml = MonitoredFocusList([-2,0,1,-3,2,-1,3], focus=4)
>>> ml.remove(-3); ml
MonitoredFocusList([-2, 0, 1, 2, -1, 3], focus=3)
>>> ml.remove(-2); ml
MonitoredFocusList([0, 1, 2, -1, 3], focus=2)
>>> ml.remove(3); ml
MonitoredFocusList([0, 1, 2, -1], focus=2)
"""
index = self.index(value)
focus = self._adjust_focus_on_contents_modified(slice(index,
index+1 or None))
rval = super(MonitoredFocusList, self).remove(value)
self._set_focus(focus)
return rval
def reverse(self):
"""
>>> ml = MonitoredFocusList([0,1,2,3,4], focus=1)
>>> ml.reverse(); ml
MonitoredFocusList([4, 3, 2, 1, 0], focus=3)
"""
rval = super(MonitoredFocusList, self).reverse()
self._set_focus(max(0, len(self) - self._focus - 1))
return rval
def sort(self, **kwargs):
"""
>>> ml = MonitoredFocusList([-2,0,1,-3,2,-1,3], focus=4)
>>> ml.sort(); ml
MonitoredFocusList([-3, -2, -1, 0, 1, 2, 3], focus=5)
"""
if not self:
return
value = self[self._focus]
rval = super(MonitoredFocusList, self).sort(**kwargs)
self._set_focus(self.index(value))
return rval
if hasattr(list, 'clear'):
def clear(self):
focus = self._adjust_focus_on_contents_modified(slice(0, 0))
rval = super(MonitoredFocusList, self).clear()
self._set_focus(focus)
return rval
def _test():
import doctest
doctest.testmod()
if __name__=='__main__':
_test()
|
import io
import logging
import os
import docutils.core
import docutils.nodes
import docutils.transforms
import docutils.utils
import docutils.io
import docutils.readers.standalone
import docutils.writers.html5_polyglot
import docutils.parsers.rst.directives
from docutils.parsers.rst import roles
from nikola.nikola import LEGAL_VALUES
from nikola.metadata_extractors import MetaCondition
from nikola.plugin_categories import PageCompiler
from nikola.utils import (
makedirs,
write_metadata,
LocaleBorg,
map_metadata
)
class CompileRest(PageCompiler):
"""Compile reStructuredText into HTML."""
name = "rest"
friendly_name = "reStructuredText"
demote_headers = True
logger = None
supports_metadata = True
metadata_conditions = [(MetaCondition.config_bool, "USE_REST_DOCINFO_METADATA")]
def read_metadata(self, post, lang=None):
"""Read the metadata from a post, and return a metadata dict."""
if lang is None:
lang = LocaleBorg().current_lang
source_path = post.translated_source_path(lang)
# Silence reST errors, some of which are due to a different
# environment. Real issues will be reported while compiling.
null_logger = logging.getLogger('NULL')
null_logger.setLevel(1000)
with io.open(source_path, 'r', encoding='utf-8-sig') as inf:
data = inf.read()
_, _, _, document = rst2html(data, logger=null_logger, source_path=source_path, transforms=self.site.rst_transforms)
meta = {}
if 'title' in document:
meta['title'] = document['title']
for docinfo in document.traverse(docutils.nodes.docinfo):
for element in docinfo.children:
if element.tagname == 'field': # custom fields (e.g. summary)
name_elem, body_elem = element.children
name = name_elem.astext()
value = body_elem.astext()
elif element.tagname == 'authors': # author list
name = element.tagname
value = [element.astext() for element in element.children]
else: # standard fields (e.g. address)
name = element.tagname
value = element.astext()
name = name.lower()
meta[name] = value
# Put 'authors' meta field contents in 'author', too
if 'authors' in meta and 'author' not in meta:
meta['author'] = '; '.join(meta['authors'])
# Map metadata from other platforms to names Nikola expects (Issue #2817)
map_metadata(meta, 'rest_docinfo', self.site.config)
return meta
def compile_string(self, data, source_path=None, is_two_file=True, post=None, lang=None):
"""Compile reST into HTML strings."""
# If errors occur, this will be added to the line number reported by
# docutils so the line number matches the actual line number (off by
# 7 with default metadata, could be more or less depending on the post).
add_ln = 0
if not is_two_file:
m_data, data = self.split_metadata(data, post, lang)
add_ln = len(m_data.splitlines()) + 1
default_template_path = os.path.join(os.path.dirname(__file__), 'template.txt')
settings_overrides = {
'initial_header_level': 1,
'record_dependencies': True,
'stylesheet_path': None,
'link_stylesheet': True,
'syntax_highlight': 'short',
# This path is not used by Nikola, but we need something to silence
# warnings about it from reST.
'math_output': 'mathjax /assets/js/mathjax.js',
'template': default_template_path,
'language_code': LEGAL_VALUES['DOCUTILS_LOCALES'].get(LocaleBorg().current_lang, 'en'),
'doctitle_xform': self.site.config.get('USE_REST_DOCINFO_METADATA'),
'file_insertion_enabled': self.site.config.get('REST_FILE_INSERTION_ENABLED'),
}
from nikola import shortcodes as sc
new_data, shortcodes = sc.extract_shortcodes(data)
if self.site.config.get('HIDE_REST_DOCINFO', False):
self.site.rst_transforms.append(RemoveDocinfo)
output, error_level, deps, _ = rst2html(
new_data, settings_overrides=settings_overrides, logger=self.logger, source_path=source_path, l_add_ln=add_ln, transforms=self.site.rst_transforms)
if not isinstance(output, str):
# To prevent some weird bugs here or there.
# Original issue: empty files. `output` became a bytestring.
output = output.decode('utf-8')
output, shortcode_deps = self.site.apply_shortcodes_uuid(output, shortcodes, filename=source_path, extra_context={'post': post})
return output, error_level, deps, shortcode_deps
def compile(self, source, dest, is_two_file=True, post=None, lang=None):
"""Compile the source file into HTML and save as dest."""
makedirs(os.path.dirname(dest))
error_level = 100
with io.open(dest, "w+", encoding="utf-8") as out_file:
with io.open(source, "r", encoding="utf-8-sig") as in_file:
data = in_file.read()
output, error_level, deps, shortcode_deps = self.compile_string(data, source, is_two_file, post, lang)
out_file.write(output)
if post is None:
if deps.list:
self.logger.error(
"Cannot save dependencies for post {0} (post unknown)",
source)
else:
post._depfile[dest] += deps.list
post._depfile[dest] += shortcode_deps
if error_level < 3:
return True
else:
return False
def create_post(self, path, **kw):
"""Create a new post."""
content = kw.pop('content', None)
onefile = kw.pop('onefile', False)
# is_page is not used by create_post as of now.
kw.pop('is_page', False)
metadata = {}
metadata.update(self.default_metadata)
metadata.update(kw)
makedirs(os.path.dirname(path))
if not content.endswith('\n'):
content += '\n'
with io.open(path, "w+", encoding="utf-8") as fd:
if onefile:
fd.write(write_metadata(metadata, comment_wrap=False, site=self.site, compiler=self))
fd.write(content)
def set_site(self, site):
"""Set Nikola site."""
super().set_site(site)
self.config_dependencies = []
for plugin_info in self.get_compiler_extensions():
self.config_dependencies.append(plugin_info.name)
plugin_info.plugin_object.short_help = plugin_info.description
if not site.debug:
self.logger.level = logging.WARNING
def get_observer(settings):
"""Return an observer for the docutils Reporter."""
def observer(msg):
"""Report docutils/rest messages to a Nikola user.
Error code mapping:
+----------+----------+
| docutils | logging |
+----------+----------+
| DEBUG | DEBUG |
| INFO | INFO |
| WARNING | WARNING |
| ERROR | ERROR |
| SEVERE | CRITICAL |
+----------+----------+
"""
errormap = {
docutils.utils.Reporter.DEBUG_LEVEL: logging.DEBUG,
docutils.utils.Reporter.INFO_LEVEL: logging.INFO,
docutils.utils.Reporter.WARNING_LEVEL: logging.WARNING,
docutils.utils.Reporter.ERROR_LEVEL: logging.ERROR,
docutils.utils.Reporter.SEVERE_LEVEL: logging.CRITICAL
}
text = docutils.nodes.Element.astext(msg)
line = msg['line'] + settings['add_ln'] if 'line' in msg else ''
out = '[{source}{colon}{line}] {text}'.format(
source=settings['source'], colon=(':' if line else ''),
line=line, text=text)
settings['logger'].log(errormap[msg['level']], out)
return observer
class NikolaReader(docutils.readers.standalone.Reader):
"""Nikola-specific docutils reader."""
config_section = 'nikola'
def __init__(self, *args, **kwargs):
"""Initialize the reader."""
self.transforms = kwargs.pop('transforms', [])
self.logging_settings = kwargs.pop('nikola_logging_settings', {})
docutils.readers.standalone.Reader.__init__(self, *args, **kwargs)
def get_transforms(self):
"""Get docutils transforms."""
return docutils.readers.standalone.Reader(self).get_transforms() + self.transforms
def new_document(self):
"""Create and return a new empty document tree (root node)."""
document = docutils.utils.new_document(self.source.source_path, self.settings)
document.reporter.stream = False
document.reporter.attach_observer(get_observer(self.logging_settings))
return document
def shortcode_role(name, rawtext, text, lineno, inliner,
options={}, content=[]):
"""Return a shortcode role that passes through raw inline HTML."""
return [docutils.nodes.raw('', text, format='html')], []
roles.register_canonical_role('raw-html', shortcode_role)
roles.register_canonical_role('html', shortcode_role)
roles.register_canonical_role('sc', shortcode_role)
def add_node(node, visit_function=None, depart_function=None):
"""Register a Docutils node class.
This function is completely optional. It is a same concept as
`Sphinx add_node function <http://sphinx-doc.org/extdev/appapi.html#sphinx.application.Sphinx.add_node>`_.
For example::
class Plugin(RestExtension):
name = "rest_math"
def set_site(self, site):
self.site = site
directives.register_directive('math', MathDirective)
add_node(MathBlock, visit_Math, depart_Math)
return super().set_site(site)
class MathDirective(Directive):
def run(self):
node = MathBlock()
return [node]
class Math(docutils.nodes.Element): pass
def visit_Math(self, node):
self.body.append(self.starttag(node, 'math'))
def depart_Math(self, node):
self.body.append('</math>')
For full example, you can refer to `Microdata plugin <https://plugins.getnikola.com/#microdata>`_
"""
docutils.nodes._add_node_class_names([node.__name__])
if visit_function:
setattr(docutils.writers.html5_polyglot.HTMLTranslator, 'visit_' + node.__name__, visit_function)
if depart_function:
setattr(docutils.writers.html5_polyglot.HTMLTranslator, 'depart_' + node.__name__, depart_function)
# Output <code> for ``double backticks``. (Code and extra logic based on html4css1 translator)
def visit_literal(self, node):
"""Output <code> for double backticks."""
# special case: "code" role
classes = node.get('classes', [])
if 'code' in classes:
# filter 'code' from class arguments
node['classes'] = [cls for cls in classes if cls != 'code']
self.body.append(self.starttag(node, 'code', ''))
return
self.body.append(
self.starttag(node, 'code', '', CLASS='docutils literal'))
text = node.astext()
for token in self.words_and_spaces.findall(text):
if token.strip():
# Protect text like "--an-option" and the regular expression
# ``[+]?(\d+(\.\d*)?|\.\d+)`` from bad line wrapping
if self.in_word_wrap_point.search(token):
self.body.append('<span class="pre">%s</span>'
% self.encode(token))
else:
self.body.append(self.encode(token))
elif token in ('\n', ' '):
# Allow breaks at whitespace:
self.body.append(token)
else:
# Protect runs of multiple spaces; the last space can wrap:
self.body.append(' ' * (len(token) - 1) + ' ')
self.body.append('</code>')
# Content already processed:
raise docutils.nodes.SkipNode
setattr(docutils.writers.html5_polyglot.HTMLTranslator, 'visit_literal', visit_literal)
def rst2html(source, source_path=None, source_class=docutils.io.StringInput,
destination_path=None, reader=None,
parser=None, parser_name='restructuredtext', writer=None,
writer_name='html5_polyglot', settings=None, settings_spec=None,
settings_overrides=None, config_section='nikola',
enable_exit_status=None, logger=None, l_add_ln=0, transforms=None):
"""Set up & run a ``Publisher``, and return a dictionary of document parts.
Dictionary keys are the names of parts, and values are Unicode strings;
encoding is up to the client. For programmatic use with string I/O.
For encoded string input, be sure to set the 'input_encoding' setting to
the desired encoding. Set it to 'unicode' for unencoded Unicode string
input. Here's how::
publish_parts(..., settings_overrides={'input_encoding': 'unicode'})
For a description of the parameters, see `publish_programmatically`.
WARNING: `reader` should be None (or NikolaReader()) if you want Nikola to report
reStructuredText syntax errors.
"""
if reader is None:
# For our custom logging, we have special needs and special settings we
# specify here.
# logger a logger from Nikola
# source source filename (docutils gets a string)
# add_ln amount of metadata lines (see comment in CompileRest.compile above)
reader = NikolaReader(transforms=transforms,
nikola_logging_settings={
'logger': logger, 'source': source_path,
'add_ln': l_add_ln
})
pub = docutils.core.Publisher(reader, parser, writer, settings=settings,
source_class=source_class,
destination_class=docutils.io.StringOutput)
pub.set_components(None, parser_name, writer_name)
pub.process_programmatic_settings(
settings_spec, settings_overrides, config_section)
pub.set_source(source, None)
pub.settings._nikola_source_path = source_path
pub.set_destination(None, destination_path)
pub.publish(enable_exit_status=enable_exit_status)
return pub.writer.parts['docinfo'] + pub.writer.parts['fragment'], pub.document.reporter.max_level, pub.settings.record_dependencies, pub.document
# Alignment helpers for extensions
_align_options_base = ('left', 'center', 'right')
def _align_choice(argument):
return docutils.parsers.rst.directives.choice(argument, _align_options_base + ("none", ""))
class RemoveDocinfo(docutils.transforms.Transform):
"""Remove docinfo nodes."""
default_priority = 870
def apply(self):
"""Remove docinfo nodes."""
for node in self.document.traverse(docutils.nodes.docinfo):
node.parent.remove(node)
|
from json import loads
from homeassistant.components.advantage_air.const import (
ADVANTAGE_AIR_STATE_OFF,
ADVANTAGE_AIR_STATE_ON,
)
from homeassistant.components.switch import (
DOMAIN as SWITCH_DOMAIN,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
)
from homeassistant.const import ATTR_ENTITY_ID, STATE_OFF
from tests.components.advantage_air import (
TEST_SET_RESPONSE,
TEST_SET_URL,
TEST_SYSTEM_DATA,
TEST_SYSTEM_URL,
add_mock_config,
)
async def test_cover_async_setup_entry(hass, aioclient_mock):
"""Test climate setup without sensors."""
aioclient_mock.get(
TEST_SYSTEM_URL,
text=TEST_SYSTEM_DATA,
)
aioclient_mock.get(
TEST_SET_URL,
text=TEST_SET_RESPONSE,
)
await add_mock_config(hass)
registry = await hass.helpers.entity_registry.async_get_registry()
assert len(aioclient_mock.mock_calls) == 1
# Test Switch Entity
entity_id = "switch.ac_one_fresh_air"
state = hass.states.get(entity_id)
assert state
assert state.state == STATE_OFF
entry = registry.async_get(entity_id)
assert entry
assert entry.unique_id == "uniqueid-ac1-freshair"
await hass.services.async_call(
SWITCH_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: [entity_id]},
blocking=True,
)
assert len(aioclient_mock.mock_calls) == 3
assert aioclient_mock.mock_calls[-2][0] == "GET"
assert aioclient_mock.mock_calls[-2][1].path == "/setAircon"
data = loads(aioclient_mock.mock_calls[-2][1].query["json"])
assert data["ac1"]["info"]["freshAirStatus"] == ADVANTAGE_AIR_STATE_ON
assert aioclient_mock.mock_calls[-1][0] == "GET"
assert aioclient_mock.mock_calls[-1][1].path == "/getSystemData"
await hass.services.async_call(
SWITCH_DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: [entity_id]},
blocking=True,
)
assert len(aioclient_mock.mock_calls) == 5
assert aioclient_mock.mock_calls[-2][0] == "GET"
assert aioclient_mock.mock_calls[-2][1].path == "/setAircon"
data = loads(aioclient_mock.mock_calls[-2][1].query["json"])
assert data["ac1"]["info"]["freshAirStatus"] == ADVANTAGE_AIR_STATE_OFF
assert aioclient_mock.mock_calls[-1][0] == "GET"
assert aioclient_mock.mock_calls[-1][1].path == "/getSystemData"
|
import os
import sys
import time
import random
import colors
prompt = 'user@machine:~/autobahn-python$ '
def interkey_interval():
"""in milliseconds"""
# return 0 # makes testing faster
return (random.lognormvariate(0.0, 0.5) * 30.0) / 1000.0
return float(random.randrange(10, 50)) / 1000.0
def type_it_out(line):
for c in line:
sys.stdout.write(c)
sys.stdout.flush()
time.sleep(interkey_interval())
def do_commands(lines):
for line in lines:
sys.stdout.write(colors.blue(prompt))
type_it_out(line)
time.sleep(0.5)
print
os.system(colors.strip_color(line))
commands = [
"clear",
colors.red('# Welcome! Here we set up and run one basic'),
colors.red('# http://crossbar.io/autobahn example'),
colors.red('# (Note there are many other examples to try)'),
colors.red('#'),
colors.red("# I presume yo've got a clone of https://github.com/crossbario/autobahn-python"),
colors.red("# in ~/autobahn-python"),
"sleep 5",
"clear",
colors.red("# first, we create a virtualenv:"),
"virtualenv venv-autobahn",
"./venv-autobahn/bin/" + colors.bold("pip install -q --editable ."),
colors.red("# we also need a WAMP router"),
colors.red("# so we will use http://crossbar.io"),
"./venv-autobahn/bin/" + colors.bold("pip install -q crossbar"),
"clear",
colors.red("# we have installed the AutobahnPython checkout, and crossbar."),
colors.red("# the examples have a suitable crossbar configuration"),
"./venv-autobahn/bin/" + colors.bold("crossbar start --cbdir examples/router/.crossbar &"),
"sleep 2",
colors.red('# now we run a simple "backend" which registers some callable methods'),
"./venv-autobahn/bin/" + colors.bold("python examples/twisted/wamp/rpc/arguments/backend.py &"),
"sleep 2",
colors.red('# ...and a frontend that calls those methods'),
"./venv-autobahn/bin/" + colors.bold("python examples/twisted/wamp/rpc/arguments/frontend.py"),
colors.red('# Thanks for watching!'),
colors.red('# https://autobahn-python.readthedocs.io/en/latest/wamp/examples.html'),
"sleep 5",
]
if __name__ == '__main__':
do_commands(commands)
|
from datetime import timedelta
import arrow
import boto3
import pytest
from freezegun import freeze_time
from moto import mock_ses
from lemur.tests.factories import AuthorityFactory, CertificateFactory, EndpointFactory
@mock_ses
def verify_sender_email():
ses_client = boto3.client("ses", region_name="us-east-1")
ses_client.verify_email_identity(EmailAddress="[email protected]")
def test_needs_notification(app, certificate, notification):
from lemur.notifications.messaging import needs_notification
assert not needs_notification(certificate)
with pytest.raises(Exception):
notification.options = [
{"name": "interval", "value": 10},
{"name": "unit", "value": "min"},
]
certificate.notifications.append(notification)
needs_notification(certificate)
certificate.notifications[0].options = [
{"name": "interval", "value": 10},
{"name": "unit", "value": "days"},
]
assert not needs_notification(certificate)
delta = certificate.not_after - timedelta(days=10)
with freeze_time(delta.datetime):
assert needs_notification(certificate)
def test_get_certificates(app, certificate, notification):
from lemur.notifications.messaging import get_certificates
certificate.not_after = arrow.utcnow() + timedelta(days=30)
delta = certificate.not_after - timedelta(days=2)
notification.options = [
{"name": "interval", "value": 2},
{"name": "unit", "value": "days"},
]
with freeze_time(delta.datetime):
# no notification
certs = len(get_certificates())
# with notification
certificate.notifications.append(notification)
assert len(get_certificates()) > certs
certificate.notify = False
assert len(get_certificates()) == certs
# expired
delta = certificate.not_after + timedelta(days=2)
with freeze_time(delta.datetime):
certificate.notifications.append(notification)
assert len(get_certificates()) == 0
def test_get_eligible_certificates(app, certificate, notification):
from lemur.notifications.messaging import get_eligible_certificates
certificate.notifications.append(notification)
certificate.notifications[0].options = [
{"name": "interval", "value": 10},
{"name": "unit", "value": "days"},
]
delta = certificate.not_after - timedelta(days=10)
with freeze_time(delta.datetime):
assert get_eligible_certificates() == {
certificate.owner: {notification.label: [(notification, certificate)]}
}
@mock_ses
def test_send_expiration_notification(certificate, notification, notification_plugin):
from lemur.notifications.messaging import send_expiration_notifications
verify_sender_email()
certificate.notifications.append(notification)
certificate.notifications[0].options = [
{"name": "interval", "value": 10},
{"name": "unit", "value": "days"},
]
delta = certificate.not_after - timedelta(days=10)
with freeze_time(delta.datetime):
# this will only send owner and security emails (no additional recipients),
# but it executes 3 successful send attempts
assert send_expiration_notifications([]) == (3, 0)
@mock_ses
def test_send_expiration_notification_with_no_notifications(
certificate, notification, notification_plugin
):
from lemur.notifications.messaging import send_expiration_notifications
delta = certificate.not_after - timedelta(days=10)
with freeze_time(delta.datetime):
assert send_expiration_notifications([]) == (0, 0)
@mock_ses
def test_send_expiration_summary_notification(certificate, notification, notification_plugin):
from lemur.notifications.messaging import send_security_expiration_summary
verify_sender_email()
# we don't actually test the email contents, but adding an assortment of certs here is useful for step debugging
# to confirm the produced email body looks like we expect
create_cert_that_expires_in_days(14)
create_cert_that_expires_in_days(12)
create_cert_that_expires_in_days(9)
create_cert_that_expires_in_days(7)
create_cert_that_expires_in_days(7)
create_cert_that_expires_in_days(2)
create_cert_that_expires_in_days(30)
create_cert_that_expires_in_days(15)
create_cert_that_expires_in_days(20)
create_cert_that_expires_in_days(1)
create_cert_that_expires_in_days(100)
assert send_security_expiration_summary([])
@mock_ses
def test_send_rotation_notification(notification_plugin, certificate):
from lemur.notifications.messaging import send_rotation_notification
verify_sender_email()
assert send_rotation_notification(certificate)
@mock_ses
def test_send_pending_failure_notification(notification_plugin, async_issuer_plugin, pending_certificate):
from lemur.notifications.messaging import send_pending_failure_notification
verify_sender_email()
assert send_pending_failure_notification(pending_certificate)
def test_get_authority_certificates():
from lemur.notifications.messaging import get_expiring_authority_certificates
certificate_1 = create_ca_cert_that_expires_in_days(180)
certificate_2 = create_ca_cert_that_expires_in_days(365)
create_ca_cert_that_expires_in_days(364)
create_ca_cert_that_expires_in_days(366)
create_ca_cert_that_expires_in_days(179)
create_ca_cert_that_expires_in_days(181)
create_ca_cert_that_expires_in_days(1)
assert set(get_expiring_authority_certificates()) == {certificate_1, certificate_2}
@mock_ses
def test_send_authority_expiration_notifications():
from lemur.notifications.messaging import send_authority_expiration_notifications
verify_sender_email()
create_ca_cert_that_expires_in_days(180)
create_ca_cert_that_expires_in_days(180) # two on the same day results in a single email
create_ca_cert_that_expires_in_days(365)
create_ca_cert_that_expires_in_days(364)
create_ca_cert_that_expires_in_days(366)
create_ca_cert_that_expires_in_days(179)
create_ca_cert_that_expires_in_days(181)
create_ca_cert_that_expires_in_days(1)
assert send_authority_expiration_notifications() == (2, 0)
def create_ca_cert_that_expires_in_days(days):
now = arrow.utcnow()
not_after = now + timedelta(days=days, hours=1) # a bit more than specified since we'll check in the future
authority = AuthorityFactory()
certificate = CertificateFactory()
certificate.not_after = not_after
certificate.notify = True
certificate.root_authority_id = authority.id
certificate.authority_id = None
return certificate
def create_cert_that_expires_in_days(days):
from random import randrange
now = arrow.utcnow()
not_after = now + timedelta(days=days, hours=1) # a bit more than specified since we'll check in the future
certificate = CertificateFactory()
certificate.not_after = not_after
certificate.notify = True
endpoints = []
for i in range(0, randrange(0, 5)):
endpoints.append(EndpointFactory())
certificate.endpoints = endpoints
return certificate
|
from homeassistant.exceptions import HomeAssistantError
from .const import API_TEMP_UNITS
class UnsupportedInterface(HomeAssistantError):
"""This entity does not support the requested Smart Home API interface."""
class UnsupportedProperty(HomeAssistantError):
"""This entity does not support the requested Smart Home API property."""
class NoTokenAvailable(HomeAssistantError):
"""There is no access token available."""
class AlexaError(Exception):
"""Base class for errors that can be serialized for the Alexa API.
A handler can raise subclasses of this to return an error to the request.
"""
namespace = None
error_type = None
def __init__(self, error_message, payload=None):
"""Initialize an alexa error."""
Exception.__init__(self)
self.error_message = error_message
self.payload = None
class AlexaInvalidEndpointError(AlexaError):
"""The endpoint in the request does not exist."""
namespace = "Alexa"
error_type = "NO_SUCH_ENDPOINT"
def __init__(self, endpoint_id):
"""Initialize invalid endpoint error."""
msg = f"The endpoint {endpoint_id} does not exist"
AlexaError.__init__(self, msg)
self.endpoint_id = endpoint_id
class AlexaInvalidValueError(AlexaError):
"""Class to represent InvalidValue errors."""
namespace = "Alexa"
error_type = "INVALID_VALUE"
class AlexaUnsupportedThermostatModeError(AlexaError):
"""Class to represent UnsupportedThermostatMode errors."""
namespace = "Alexa.ThermostatController"
error_type = "UNSUPPORTED_THERMOSTAT_MODE"
class AlexaTempRangeError(AlexaError):
"""Class to represent TempRange errors."""
namespace = "Alexa"
error_type = "TEMPERATURE_VALUE_OUT_OF_RANGE"
def __init__(self, hass, temp, min_temp, max_temp):
"""Initialize TempRange error."""
unit = hass.config.units.temperature_unit
temp_range = {
"minimumValue": {"value": min_temp, "scale": API_TEMP_UNITS[unit]},
"maximumValue": {"value": max_temp, "scale": API_TEMP_UNITS[unit]},
}
payload = {"validRange": temp_range}
msg = f"The requested temperature {temp} is out of range"
AlexaError.__init__(self, msg, payload)
class AlexaBridgeUnreachableError(AlexaError):
"""Class to represent BridgeUnreachable errors."""
namespace = "Alexa"
error_type = "BRIDGE_UNREACHABLE"
class AlexaSecurityPanelUnauthorizedError(AlexaError):
"""Class to represent SecurityPanelController Unauthorized errors."""
namespace = "Alexa.SecurityPanelController"
error_type = "UNAUTHORIZED"
class AlexaSecurityPanelAuthorizationRequired(AlexaError):
"""Class to represent SecurityPanelController AuthorizationRequired errors."""
namespace = "Alexa.SecurityPanelController"
error_type = "AUTHORIZATION_REQUIRED"
class AlexaAlreadyInOperationError(AlexaError):
"""Class to represent AlreadyInOperation errors."""
namespace = "Alexa"
error_type = "ALREADY_IN_OPERATION"
class AlexaInvalidDirectiveError(AlexaError):
"""Class to represent InvalidDirective errors."""
namespace = "Alexa"
error_type = "INVALID_DIRECTIVE"
class AlexaVideoActionNotPermittedForContentError(AlexaError):
"""Class to represent action not permitted for content errors."""
namespace = "Alexa.Video"
error_type = "ACTION_NOT_PERMITTED_FOR_CONTENT"
|
from collections import defaultdict
from serial import SerialException
from homeassistant.components.media_player.const import (
ATTR_INPUT_SOURCE,
ATTR_INPUT_SOURCE_LIST,
ATTR_MEDIA_VOLUME_LEVEL,
DOMAIN as MEDIA_PLAYER_DOMAIN,
SERVICE_SELECT_SOURCE,
SUPPORT_SELECT_SOURCE,
SUPPORT_TURN_OFF,
SUPPORT_TURN_ON,
SUPPORT_VOLUME_MUTE,
SUPPORT_VOLUME_SET,
SUPPORT_VOLUME_STEP,
)
from homeassistant.components.monoprice.const import (
CONF_NOT_FIRST_RUN,
CONF_SOURCES,
DOMAIN,
SERVICE_RESTORE,
SERVICE_SNAPSHOT,
)
from homeassistant.const import (
CONF_PORT,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
SERVICE_VOLUME_DOWN,
SERVICE_VOLUME_MUTE,
SERVICE_VOLUME_SET,
SERVICE_VOLUME_UP,
)
from homeassistant.helpers.entity_component import async_update_entity
from tests.async_mock import patch
from tests.common import MockConfigEntry
MOCK_CONFIG = {CONF_PORT: "fake port", CONF_SOURCES: {"1": "one", "3": "three"}}
MOCK_OPTIONS = {CONF_SOURCES: {"2": "two", "4": "four"}}
ZONE_1_ID = "media_player.zone_11"
ZONE_2_ID = "media_player.zone_12"
ZONE_7_ID = "media_player.zone_21"
class AttrDict(dict):
"""Helper class for mocking attributes."""
def __setattr__(self, name, value):
"""Set attribute."""
self[name] = value
def __getattr__(self, item):
"""Get attribute."""
return self[item]
class MockMonoprice:
"""Mock for pymonoprice object."""
def __init__(self):
"""Init mock object."""
self.zones = defaultdict(
lambda: AttrDict(power=True, volume=0, mute=True, source=1)
)
def zone_status(self, zone_id):
"""Get zone status."""
status = self.zones[zone_id]
status.zone = zone_id
return AttrDict(status)
def set_source(self, zone_id, source_idx):
"""Set source for zone."""
self.zones[zone_id].source = source_idx
def set_power(self, zone_id, power):
"""Turn zone on/off."""
self.zones[zone_id].power = power
def set_mute(self, zone_id, mute):
"""Mute/unmute zone."""
self.zones[zone_id].mute = mute
def set_volume(self, zone_id, volume):
"""Set volume for zone."""
self.zones[zone_id].volume = volume
def restore_zone(self, zone):
"""Restore zone status."""
self.zones[zone.zone] = AttrDict(zone)
async def test_cannot_connect(hass):
"""Test connection error."""
with patch(
"homeassistant.components.monoprice.get_monoprice",
side_effect=SerialException,
):
config_entry = MockConfigEntry(domain=DOMAIN, data=MOCK_CONFIG)
config_entry.add_to_hass(hass)
await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
assert hass.states.get(ZONE_1_ID) is None
async def _setup_monoprice(hass, monoprice):
with patch(
"homeassistant.components.monoprice.get_monoprice",
new=lambda *a: monoprice,
):
config_entry = MockConfigEntry(domain=DOMAIN, data=MOCK_CONFIG)
config_entry.add_to_hass(hass)
await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
async def _setup_monoprice_with_options(hass, monoprice):
with patch(
"homeassistant.components.monoprice.get_monoprice",
new=lambda *a: monoprice,
):
config_entry = MockConfigEntry(
domain=DOMAIN, data=MOCK_CONFIG, options=MOCK_OPTIONS
)
config_entry.add_to_hass(hass)
await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
async def _setup_monoprice_not_first_run(hass, monoprice):
with patch(
"homeassistant.components.monoprice.get_monoprice",
new=lambda *a: monoprice,
):
data = {**MOCK_CONFIG, CONF_NOT_FIRST_RUN: True}
config_entry = MockConfigEntry(domain=DOMAIN, data=data)
config_entry.add_to_hass(hass)
await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
async def _call_media_player_service(hass, name, data):
await hass.services.async_call(
MEDIA_PLAYER_DOMAIN, name, service_data=data, blocking=True
)
async def _call_homeassistant_service(hass, name, data):
await hass.services.async_call(
"homeassistant", name, service_data=data, blocking=True
)
async def _call_monoprice_service(hass, name, data):
await hass.services.async_call(DOMAIN, name, service_data=data, blocking=True)
async def test_service_calls_with_entity_id(hass):
"""Test snapshot save/restore service calls."""
await _setup_monoprice(hass, MockMonoprice())
# Changing media player to new state
await _call_media_player_service(
hass, SERVICE_VOLUME_SET, {"entity_id": ZONE_1_ID, "volume_level": 0.0}
)
await _call_media_player_service(
hass, SERVICE_SELECT_SOURCE, {"entity_id": ZONE_1_ID, "source": "one"}
)
# Saving existing values
await _call_monoprice_service(hass, SERVICE_SNAPSHOT, {"entity_id": ZONE_1_ID})
# Changing media player to new state
await _call_media_player_service(
hass, SERVICE_VOLUME_SET, {"entity_id": ZONE_1_ID, "volume_level": 1.0}
)
await _call_media_player_service(
hass, SERVICE_SELECT_SOURCE, {"entity_id": ZONE_1_ID, "source": "three"}
)
# Restoring other media player to its previous state
# The zone should not be restored
await _call_monoprice_service(hass, SERVICE_RESTORE, {"entity_id": ZONE_2_ID})
await hass.async_block_till_done()
# Checking that values were not (!) restored
state = hass.states.get(ZONE_1_ID)
assert state.attributes[ATTR_MEDIA_VOLUME_LEVEL] == 1.0
assert state.attributes[ATTR_INPUT_SOURCE] == "three"
# Restoring media player to its previous state
await _call_monoprice_service(hass, SERVICE_RESTORE, {"entity_id": ZONE_1_ID})
await hass.async_block_till_done()
state = hass.states.get(ZONE_1_ID)
assert state.attributes[ATTR_MEDIA_VOLUME_LEVEL] == 0.0
assert state.attributes[ATTR_INPUT_SOURCE] == "one"
async def test_service_calls_with_all_entities(hass):
"""Test snapshot save/restore service calls."""
await _setup_monoprice(hass, MockMonoprice())
# Changing media player to new state
await _call_media_player_service(
hass, SERVICE_VOLUME_SET, {"entity_id": ZONE_1_ID, "volume_level": 0.0}
)
await _call_media_player_service(
hass, SERVICE_SELECT_SOURCE, {"entity_id": ZONE_1_ID, "source": "one"}
)
# Saving existing values
await _call_monoprice_service(hass, SERVICE_SNAPSHOT, {"entity_id": "all"})
# Changing media player to new state
await _call_media_player_service(
hass, SERVICE_VOLUME_SET, {"entity_id": ZONE_1_ID, "volume_level": 1.0}
)
await _call_media_player_service(
hass, SERVICE_SELECT_SOURCE, {"entity_id": ZONE_1_ID, "source": "three"}
)
# Restoring media player to its previous state
await _call_monoprice_service(hass, SERVICE_RESTORE, {"entity_id": "all"})
await hass.async_block_till_done()
state = hass.states.get(ZONE_1_ID)
assert state.attributes[ATTR_MEDIA_VOLUME_LEVEL] == 0.0
assert state.attributes[ATTR_INPUT_SOURCE] == "one"
async def test_service_calls_without_relevant_entities(hass):
"""Test snapshot save/restore service calls."""
await _setup_monoprice(hass, MockMonoprice())
# Changing media player to new state
await _call_media_player_service(
hass, SERVICE_VOLUME_SET, {"entity_id": ZONE_1_ID, "volume_level": 0.0}
)
await _call_media_player_service(
hass, SERVICE_SELECT_SOURCE, {"entity_id": ZONE_1_ID, "source": "one"}
)
# Saving existing values
await _call_monoprice_service(hass, SERVICE_SNAPSHOT, {"entity_id": "all"})
# Changing media player to new state
await _call_media_player_service(
hass, SERVICE_VOLUME_SET, {"entity_id": ZONE_1_ID, "volume_level": 1.0}
)
await _call_media_player_service(
hass, SERVICE_SELECT_SOURCE, {"entity_id": ZONE_1_ID, "source": "three"}
)
# Restoring media player to its previous state
await _call_monoprice_service(hass, SERVICE_RESTORE, {"entity_id": "light.demo"})
await hass.async_block_till_done()
state = hass.states.get(ZONE_1_ID)
assert state.attributes[ATTR_MEDIA_VOLUME_LEVEL] == 1.0
assert state.attributes[ATTR_INPUT_SOURCE] == "three"
async def test_restore_without_snapshort(hass):
"""Test restore when snapshot wasn't called."""
await _setup_monoprice(hass, MockMonoprice())
with patch.object(MockMonoprice, "restore_zone") as method_call:
await _call_monoprice_service(hass, SERVICE_RESTORE, {"entity_id": ZONE_1_ID})
await hass.async_block_till_done()
assert not method_call.called
async def test_update(hass):
"""Test updating values from monoprice."""
monoprice = MockMonoprice()
await _setup_monoprice(hass, monoprice)
# Changing media player to new state
await _call_media_player_service(
hass, SERVICE_VOLUME_SET, {"entity_id": ZONE_1_ID, "volume_level": 0.0}
)
await _call_media_player_service(
hass, SERVICE_SELECT_SOURCE, {"entity_id": ZONE_1_ID, "source": "one"}
)
monoprice.set_source(11, 3)
monoprice.set_volume(11, 38)
await async_update_entity(hass, ZONE_1_ID)
await hass.async_block_till_done()
state = hass.states.get(ZONE_1_ID)
assert state.attributes[ATTR_MEDIA_VOLUME_LEVEL] == 1.0
assert state.attributes[ATTR_INPUT_SOURCE] == "three"
async def test_failed_update(hass):
"""Test updating failure from monoprice."""
monoprice = MockMonoprice()
await _setup_monoprice(hass, monoprice)
# Changing media player to new state
await _call_media_player_service(
hass, SERVICE_VOLUME_SET, {"entity_id": ZONE_1_ID, "volume_level": 0.0}
)
await _call_media_player_service(
hass, SERVICE_SELECT_SOURCE, {"entity_id": ZONE_1_ID, "source": "one"}
)
monoprice.set_source(11, 3)
monoprice.set_volume(11, 38)
with patch.object(MockMonoprice, "zone_status", side_effect=SerialException):
await async_update_entity(hass, ZONE_1_ID)
await hass.async_block_till_done()
state = hass.states.get(ZONE_1_ID)
assert state.attributes[ATTR_MEDIA_VOLUME_LEVEL] == 0.0
assert state.attributes[ATTR_INPUT_SOURCE] == "one"
async def test_empty_update(hass):
"""Test updating with no state from monoprice."""
monoprice = MockMonoprice()
await _setup_monoprice(hass, monoprice)
# Changing media player to new state
await _call_media_player_service(
hass, SERVICE_VOLUME_SET, {"entity_id": ZONE_1_ID, "volume_level": 0.0}
)
await _call_media_player_service(
hass, SERVICE_SELECT_SOURCE, {"entity_id": ZONE_1_ID, "source": "one"}
)
monoprice.set_source(11, 3)
monoprice.set_volume(11, 38)
with patch.object(MockMonoprice, "zone_status", return_value=None):
await async_update_entity(hass, ZONE_1_ID)
await hass.async_block_till_done()
state = hass.states.get(ZONE_1_ID)
assert state.attributes[ATTR_MEDIA_VOLUME_LEVEL] == 0.0
assert state.attributes[ATTR_INPUT_SOURCE] == "one"
async def test_supported_features(hass):
"""Test supported features property."""
await _setup_monoprice(hass, MockMonoprice())
state = hass.states.get(ZONE_1_ID)
assert (
SUPPORT_VOLUME_MUTE
| SUPPORT_VOLUME_SET
| SUPPORT_VOLUME_STEP
| SUPPORT_TURN_ON
| SUPPORT_TURN_OFF
| SUPPORT_SELECT_SOURCE
== state.attributes["supported_features"]
)
async def test_source_list(hass):
"""Test source list property."""
await _setup_monoprice(hass, MockMonoprice())
state = hass.states.get(ZONE_1_ID)
# Note, the list is sorted!
assert state.attributes[ATTR_INPUT_SOURCE_LIST] == ["one", "three"]
async def test_source_list_with_options(hass):
"""Test source list property."""
await _setup_monoprice_with_options(hass, MockMonoprice())
state = hass.states.get(ZONE_1_ID)
# Note, the list is sorted!
assert state.attributes[ATTR_INPUT_SOURCE_LIST] == ["two", "four"]
async def test_select_source(hass):
"""Test source selection methods."""
monoprice = MockMonoprice()
await _setup_monoprice(hass, monoprice)
await _call_media_player_service(
hass,
SERVICE_SELECT_SOURCE,
{"entity_id": ZONE_1_ID, ATTR_INPUT_SOURCE: "three"},
)
assert monoprice.zones[11].source == 3
# Trying to set unknown source
await _call_media_player_service(
hass,
SERVICE_SELECT_SOURCE,
{"entity_id": ZONE_1_ID, ATTR_INPUT_SOURCE: "no name"},
)
assert monoprice.zones[11].source == 3
async def test_unknown_source(hass):
"""Test behavior when device has unknown source."""
monoprice = MockMonoprice()
await _setup_monoprice(hass, monoprice)
monoprice.set_source(11, 5)
await async_update_entity(hass, ZONE_1_ID)
await hass.async_block_till_done()
state = hass.states.get(ZONE_1_ID)
assert state.attributes.get(ATTR_INPUT_SOURCE) is None
async def test_turn_on_off(hass):
"""Test turning on the zone."""
monoprice = MockMonoprice()
await _setup_monoprice(hass, monoprice)
await _call_media_player_service(hass, SERVICE_TURN_OFF, {"entity_id": ZONE_1_ID})
assert not monoprice.zones[11].power
await _call_media_player_service(hass, SERVICE_TURN_ON, {"entity_id": ZONE_1_ID})
assert monoprice.zones[11].power
async def test_mute_volume(hass):
"""Test mute functionality."""
monoprice = MockMonoprice()
await _setup_monoprice(hass, monoprice)
await _call_media_player_service(
hass, SERVICE_VOLUME_SET, {"entity_id": ZONE_1_ID, "volume_level": 0.5}
)
await _call_media_player_service(
hass, SERVICE_VOLUME_MUTE, {"entity_id": ZONE_1_ID, "is_volume_muted": False}
)
assert not monoprice.zones[11].mute
await _call_media_player_service(
hass, SERVICE_VOLUME_MUTE, {"entity_id": ZONE_1_ID, "is_volume_muted": True}
)
assert monoprice.zones[11].mute
async def test_volume_up_down(hass):
"""Test increasing volume by one."""
monoprice = MockMonoprice()
await _setup_monoprice(hass, monoprice)
await _call_media_player_service(
hass, SERVICE_VOLUME_SET, {"entity_id": ZONE_1_ID, "volume_level": 0.0}
)
assert monoprice.zones[11].volume == 0
await _call_media_player_service(
hass, SERVICE_VOLUME_DOWN, {"entity_id": ZONE_1_ID}
)
# should not go below zero
assert monoprice.zones[11].volume == 0
await _call_media_player_service(hass, SERVICE_VOLUME_UP, {"entity_id": ZONE_1_ID})
assert monoprice.zones[11].volume == 1
await _call_media_player_service(
hass, SERVICE_VOLUME_SET, {"entity_id": ZONE_1_ID, "volume_level": 1.0}
)
assert monoprice.zones[11].volume == 38
await _call_media_player_service(hass, SERVICE_VOLUME_UP, {"entity_id": ZONE_1_ID})
# should not go above 38
assert monoprice.zones[11].volume == 38
await _call_media_player_service(
hass, SERVICE_VOLUME_DOWN, {"entity_id": ZONE_1_ID}
)
assert monoprice.zones[11].volume == 37
async def test_first_run_with_available_zones(hass):
"""Test first run with all zones available."""
monoprice = MockMonoprice()
await _setup_monoprice(hass, monoprice)
registry = await hass.helpers.entity_registry.async_get_registry()
entry = registry.async_get(ZONE_7_ID)
assert not entry.disabled
async def test_first_run_with_failing_zones(hass):
"""Test first run with failed zones."""
monoprice = MockMonoprice()
with patch.object(MockMonoprice, "zone_status", side_effect=SerialException):
await _setup_monoprice(hass, monoprice)
registry = await hass.helpers.entity_registry.async_get_registry()
entry = registry.async_get(ZONE_1_ID)
assert not entry.disabled
entry = registry.async_get(ZONE_7_ID)
assert entry.disabled
assert entry.disabled_by == "integration"
async def test_not_first_run_with_failing_zone(hass):
"""Test first run with failed zones."""
monoprice = MockMonoprice()
with patch.object(MockMonoprice, "zone_status", side_effect=SerialException):
await _setup_monoprice_not_first_run(hass, monoprice)
registry = await hass.helpers.entity_registry.async_get_registry()
entry = registry.async_get(ZONE_1_ID)
assert not entry.disabled
entry = registry.async_get(ZONE_7_ID)
assert not entry.disabled
|
import itertools
from scattertext.ParsedCorpus import ParsedCorpus
class Doc2VecBuilder(object):
def __init__(self, model, term_from_token=lambda tok: tok.lower_):
self.model = model
self.term_from_token = term_from_token
self.cartegory2dvid = None
self.corpus = None
def train(self, corpus):
assert isinstance(corpus, ParsedCorpus)
tagged_docs = []
try:
import gensim
except:
raise Exception("Please install gensim before using Doc2VecCategoryProjector/")
for doc, tag in zip(corpus.get_parsed_docs(), corpus.get_category_names_by_row()):
words = list(itertools.chain(
*[[t.lower_ for t in sent if not t.is_punct if t.lower_.strip()] for sent in doc.sents]))
tagged_docs.append(gensim.models.doc2vec.TaggedDocument(words, [tag]))
self.model.build_vocab(tagged_docs)
self.cartegory2dvid = {}
for i in range(corpus.get_num_categories()):
self.cartegory2dvid[self.model.docvecs.index_to_doctag(i)] = i
self.model.train(tagged_docs, total_examples=self.model.corpus_count, epochs=self.model.epochs)
self.corpus = corpus
return self.model
def project(self):
if self.corpus is None:
raise Exception("Please run train before project.")
return self.model.docvecs.vectors_docs[
[self.cartegory2dvid[category] for category in self.corpus.get_categories()]]
|
from pgmpy.base import UndirectedGraph
from pgmpy.tests import help_functions as hf
import unittest
class TestUndirectedGraphCreation(unittest.TestCase):
def setUp(self):
self.graph = UndirectedGraph()
def test_class_init_without_data(self):
self.assertIsInstance(self.graph, UndirectedGraph)
def test_class_init_with_data_string(self):
self.G = UndirectedGraph([("a", "b"), ("b", "c")])
self.assertListEqual(sorted(self.G.nodes()), ["a", "b", "c"])
self.assertListEqual(
hf.recursive_sorted(self.G.edges()), [["a", "b"], ["b", "c"]]
)
def test_add_node_string(self):
self.graph.add_node("a")
self.assertListEqual(list(self.graph.nodes()), ["a"])
def test_add_node_nonstring(self):
self.graph.add_node(1)
self.assertListEqual(list(self.graph.nodes()), [1])
def test_add_nodes_from_string(self):
self.graph.add_nodes_from(["a", "b", "c", "d"])
self.assertListEqual(sorted(self.graph.nodes()), ["a", "b", "c", "d"])
def test_add_node_with_weight(self):
self.graph.add_node("a")
self.graph.add_node("weight_a", weight=0.3)
self.assertEqual(self.graph.nodes["weight_a"]["weight"], 0.3)
self.assertEqual(self.graph.nodes["a"]["weight"], None)
def test_add_nodes_from_with_weight(self):
self.graph.add_node(1)
self.graph.add_nodes_from(["weight_b", "weight_c"], weights=[0.3, 0.5])
self.assertEqual(self.graph.nodes["weight_b"]["weight"], 0.3)
self.assertEqual(self.graph.nodes["weight_c"]["weight"], 0.5)
self.assertEqual(self.graph.nodes[1]["weight"], None)
def test_add_nodes_from_non_string(self):
self.graph.add_nodes_from([1, 2, 3, 4])
def test_add_edge_string(self):
self.graph.add_edge("d", "e")
self.assertListEqual(sorted(self.graph.nodes()), ["d", "e"])
self.assertListEqual(hf.recursive_sorted(self.graph.edges()), [["d", "e"]])
self.graph.add_nodes_from(["a", "b", "c"])
self.graph.add_edge("a", "b")
self.assertListEqual(
hf.recursive_sorted(self.graph.edges()), [["a", "b"], ["d", "e"]]
)
def test_add_edge_nonstring(self):
self.graph.add_edge(1, 2)
def test_add_edges_from_string(self):
self.graph.add_edges_from([("a", "b"), ("b", "c")])
self.assertListEqual(sorted(self.graph.nodes()), ["a", "b", "c"])
self.assertListEqual(
hf.recursive_sorted(self.graph.edges()), [["a", "b"], ["b", "c"]]
)
self.graph.add_nodes_from(["d", "e", "f"])
self.graph.add_edges_from([("d", "e"), ("e", "f")])
self.assertListEqual(sorted(self.graph.nodes()), ["a", "b", "c", "d", "e", "f"])
self.assertListEqual(
hf.recursive_sorted(self.graph.edges()),
hf.recursive_sorted([("a", "b"), ("b", "c"), ("d", "e"), ("e", "f")]),
)
def test_add_edges_from_nonstring(self):
self.graph.add_edges_from([(1, 2), (2, 3)])
def test_number_of_neighbors(self):
self.graph.add_edges_from([("a", "b"), ("b", "c")])
self.assertEqual(len(list(self.graph.neighbors("b"))), 2)
def tearDown(self):
del self.graph
class TestUndirectedGraphMethods(unittest.TestCase):
def test_is_clique(self):
G = UndirectedGraph(
[
("A", "B"),
("C", "B"),
("B", "D"),
("B", "E"),
("D", "E"),
("E", "F"),
("D", "F"),
("B", "F"),
]
)
self.assertFalse(G.is_clique(nodes=["A", "B", "C", "D"]))
self.assertTrue(G.is_clique(nodes=["B", "D", "E", "F"]))
self.assertTrue(G.is_clique(nodes=["D", "E", "B"]))
def test_is_triangulated(self):
G = UndirectedGraph([("A", "B"), ("A", "C"), ("B", "D"), ("C", "D")])
self.assertFalse(G.is_triangulated())
G.add_edge("A", "D")
self.assertTrue(G.is_triangulated())
|
import asyncio
import json
import logging
import time
from aiohttp import ClientConnectionError, ClientResponseError
from hass_splunk import SplunkPayloadError, hass_splunk
import voluptuous as vol
from homeassistant.const import (
CONF_HOST,
CONF_NAME,
CONF_PORT,
CONF_SSL,
CONF_TOKEN,
CONF_VERIFY_SSL,
EVENT_STATE_CHANGED,
)
from homeassistant.helpers import state as state_helper
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entityfilter import FILTER_SCHEMA
from homeassistant.helpers.json import JSONEncoder
_LOGGER = logging.getLogger(__name__)
DOMAIN = "splunk"
CONF_FILTER = "filter"
DEFAULT_HOST = "localhost"
DEFAULT_PORT = 8088
DEFAULT_SSL = False
DEFAULT_NAME = "HASS"
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_TOKEN): cv.string,
vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_SSL, default=False): cv.boolean,
vol.Optional(CONF_VERIFY_SSL, default=True): cv.boolean,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_FILTER, default={}): FILTER_SCHEMA,
}
)
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass, config):
"""Set up the Splunk component."""
conf = config[DOMAIN]
host = conf.get(CONF_HOST)
port = conf.get(CONF_PORT)
token = conf.get(CONF_TOKEN)
use_ssl = conf[CONF_SSL]
verify_ssl = conf.get(CONF_VERIFY_SSL)
name = conf.get(CONF_NAME)
entity_filter = conf[CONF_FILTER]
event_collector = hass_splunk(
session=async_get_clientsession(hass),
host=host,
port=port,
token=token,
use_ssl=use_ssl,
verify_ssl=verify_ssl,
)
if not await event_collector.check(connectivity=False, token=True, busy=False):
return False
payload = {
"time": time.time(),
"host": name,
"event": {
"domain": DOMAIN,
"meta": "Splunk integration has started",
},
}
await event_collector.queue(json.dumps(payload, cls=JSONEncoder), send=False)
async def splunk_event_listener(event):
"""Listen for new messages on the bus and sends them to Splunk."""
state = event.data.get("new_state")
if state is None or not entity_filter(state.entity_id):
return
try:
_state = state_helper.state_as_number(state)
except ValueError:
_state = state.state
payload = {
"time": event.time_fired.timestamp(),
"host": name,
"event": {
"domain": state.domain,
"entity_id": state.object_id,
"attributes": dict(state.attributes),
"value": _state,
},
}
try:
await event_collector.queue(json.dumps(payload, cls=JSONEncoder), send=True)
except SplunkPayloadError as err:
if err.status == 401:
_LOGGER.error(err)
else:
_LOGGER.warning(err)
except ClientConnectionError as err:
_LOGGER.warning(err)
except asyncio.TimeoutError:
_LOGGER.warning("Connection to %s:%s timed out", host, port)
except ClientResponseError as err:
_LOGGER.error(err.message)
hass.bus.async_listen(EVENT_STATE_CHANGED, splunk_event_listener)
return True
|
import gzip
from urllib.request import urlretrieve
def get_example_model(model):
"""
Fetches the specified model from bnlearn repository and returns a
pgmpy.model instance.
Parameter
---------
model: str
Any model from bnlearn repository (http://www.bnlearn.com/bnrepository).
Discrete Bayesian Network Options:
Small Networks:
1. asia
2. cancer
3. earthquake
4. sachs
5. survey
Medium Networks:
1. alarm
2. barley
3. child
4. insurance
5. mildew
6. water
Large Networks:
1. hailfinder
2. hepar2
3. win95pts
Very Large Networks:
1. andes
2. diabetes
3. link
4. munin1
5. munin2
6. munin3
7. munin4
8. pathfinder
9. pigs
10. munin
Gaussian Bayesian Network Options:
1. ecoli70
2. magic-niab
3. magic-irri
4. arth150
Conditional Linear Gaussian Bayesian Network Options:
1. sangiovese
2. mehra
Example
-------
>>> from pgmpy.data import get_example_model
>>> model = get_example_model(model='asia')
>>> model
Returns
-------
pgmpy.models instance: An instance of one of the model classes in pgmpy.models
depending on the type of dataset.
"""
from pgmpy.readwrite import BIFReader
model_links = {
"asia": "http://www.bnlearn.com/bnrepository/asia/asia.bif.gz",
"cancer": "http://www.bnlearn.com/bnrepository/cancer/cancer.bif.gz",
"earthquake": "http://www.bnlearn.com/bnrepository/earthquake/earthquake.bif.gz",
"sachs": "http://www.bnlearn.com/bnrepository/sachs/sachs.bif.gz",
"survey": "http://www.bnlearn.com/bnrepository/survey/survey.bif.gz",
"alarm": "http://www.bnlearn.com/bnrepository/alarm/alarm.bif.gz",
"barley": "http://www.bnlearn.com/bnrepository/barley/barley.bif.gz",
"child": "http://www.bnlearn.com/bnrepository/child/child.bif.gz",
"insurance": "http://www.bnlearn.com/bnrepository/insurance/insurance.bif.gz",
"mildew": "http://www.bnlearn.com/bnrepository/mildew/mildew.bif.gz",
"water": "http://www.bnlearn.com/bnrepository/water/water.bif.gz",
"hailfinder": "http://www.bnlearn.com/bnrepository/hailfinder/hailfinder.bif.gz",
"hepar2": "http://www.bnlearn.com/bnrepository/hepar2/hepar2.bif.gz",
"win95pts": "http://www.bnlearn.com/bnrepository/win95pts/win95pts.bif.gz",
"andes": "http://www.bnlearn.com/bnrepository/andes/andes.bif.gz",
"diabetes": "http://www.bnlearn.com/bnrepository/diabetes/diabetes.bif.gz",
"link": "http://www.bnlearn.com/bnrepository/link/link.bif.gz",
"munin1": "http://www.bnlearn.com/bnrepository/munin4/munin1.bif.gz",
"munin2": "http://www.bnlearn.com/bnrepository/munin4/munin2.bif.gz",
"munin3": "http://www.bnlearn.com/bnrepository/munin4/munin3.bif.gz",
"munin4": "http://www.bnlearn.com/bnrepository/munin4/munin4.bif.gz",
"pathfinder": "http://www.bnlearn.com/bnrepository/pathfinder/pathfinder.bif.gz",
"pigs": "http://www.bnlearn.com/bnrepository/pigs/pigs.bif.gz",
"munin": "http://www.bnlearn.com/bnrepository/munin/munin.bif.gz",
"ecoli70": "",
"magic-niab": "",
"magic-irri": "",
"arth150": "",
"sangiovese": "",
"mehra": "",
}
if model not in model_links.keys():
raise ValueError("dataset should be one of the options")
if model_links[model] == "":
raise NotImplementedError("The specified dataset isn't supported")
filename, _ = urlretrieve(model_links[model])
with gzip.open(filename, "rb") as f:
content = f.read()
reader = BIFReader(string=content.decode("utf-8"), n_jobs=1)
return reader.get_model()
|
from homeassistant.core import callback
from .. import registries
from ..const import (
ATTR_ATTRIBUTE_ID,
ATTR_ATTRIBUTE_NAME,
ATTR_VALUE,
REPORT_CONFIG_ASAP,
REPORT_CONFIG_MAX_INT,
REPORT_CONFIG_MIN_INT,
SIGNAL_ATTR_UPDATED,
UNKNOWN,
)
from .base import ZigbeeChannel
@registries.ZIGBEE_CHANNEL_REGISTRY.register(registries.SMARTTHINGS_HUMIDITY_CLUSTER)
class SmartThingsHumidity(ZigbeeChannel):
"""Smart Things Humidity channel."""
REPORT_CONFIG = [
{
"attr": "measured_value",
"config": (REPORT_CONFIG_MIN_INT, REPORT_CONFIG_MAX_INT, 50),
}
]
@registries.CHANNEL_ONLY_CLUSTERS.register(0xFD00)
@registries.ZIGBEE_CHANNEL_REGISTRY.register(0xFD00)
class OsramButton(ZigbeeChannel):
"""Osram button channel."""
REPORT_CONFIG = []
@registries.CHANNEL_ONLY_CLUSTERS.register(registries.PHILLIPS_REMOTE_CLUSTER)
@registries.ZIGBEE_CHANNEL_REGISTRY.register(registries.PHILLIPS_REMOTE_CLUSTER)
class PhillipsRemote(ZigbeeChannel):
"""Phillips remote channel."""
REPORT_CONFIG = []
@registries.CHANNEL_ONLY_CLUSTERS.register(0xFCC0)
@registries.ZIGBEE_CHANNEL_REGISTRY.register(0xFCC0)
class OppleRemote(ZigbeeChannel):
"""Opple button channel."""
REPORT_CONFIG = []
@registries.ZIGBEE_CHANNEL_REGISTRY.register(
registries.SMARTTHINGS_ACCELERATION_CLUSTER
)
class SmartThingsAcceleration(ZigbeeChannel):
"""Smart Things Acceleration channel."""
REPORT_CONFIG = [
{"attr": "acceleration", "config": REPORT_CONFIG_ASAP},
{"attr": "x_axis", "config": REPORT_CONFIG_ASAP},
{"attr": "y_axis", "config": REPORT_CONFIG_ASAP},
{"attr": "z_axis", "config": REPORT_CONFIG_ASAP},
]
@callback
def attribute_updated(self, attrid, value):
"""Handle attribute updates on this cluster."""
if attrid == self.value_attribute:
self.async_send_signal(
f"{self.unique_id}_{SIGNAL_ATTR_UPDATED}",
attrid,
self._cluster.attributes.get(attrid, [UNKNOWN])[0],
value,
)
return
self.zha_send_event(
SIGNAL_ATTR_UPDATED,
{
ATTR_ATTRIBUTE_ID: attrid,
ATTR_ATTRIBUTE_NAME: self._cluster.attributes.get(attrid, [UNKNOWN])[0],
ATTR_VALUE: value,
},
)
|
import re # noqa: F401
import sys # noqa: F401
import nulltype # noqa: F401
from paasta_tools.paastaapi.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from paasta_tools.paastaapi.model.marathon_dashboard_item import MarathonDashboardItem
globals()['MarathonDashboardItem'] = MarathonDashboardItem
class MarathonDashboardCluster(ModelSimple):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'value': ([MarathonDashboardItem],),
}
@cached_property
def discriminator():
return None
attribute_map = {}
_composed_schemas = None
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs):
"""MarathonDashboardCluster - a model defined in OpenAPI
Note that value can be passed either in args or in kwargs, but not in both.
Args:
args[0] ([MarathonDashboardItem]): List of all the MarathonDashboardItems for a cluster. # noqa: E501
Keyword Args:
value ([MarathonDashboardItem]): List of all the MarathonDashboardItems for a cluster. # noqa: E501
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
if 'value' in kwargs:
value = kwargs.pop('value')
elif args:
args = list(args)
value = args.pop(0)
else:
raise ApiTypeError(
"value is required, but not passed in args or kwargs and doesn't have default",
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.value = value
if kwargs:
raise ApiTypeError(
"Invalid named arguments=%s passed to %s. Remove those invalid named arguments." % (
kwargs,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
|
import copy
import chainer
class PickableSequentialChain(chainer.Chain):
"""A sequential chain that can pick intermediate layers.
Callable objects, such as :class:`chainer.Link` and
:class:`chainer.Function`, can be registered to this chain with
:meth:`init_scope`.
This chain keeps the order of registrations and :meth:`forward`
executes callables in that order.
A :class:`chainer.Link` object in the sequence will be added as
a child link of this link.
:meth:`forward` returns single or multiple layers that are picked up
through a stream of computation.
These layers can be specified by :obj:`pick`, which contains
the names of the layers that are collected.
When :obj:`pick` is a string, single layer is returned.
When :obj:`pick` is an iterable of strings, a tuple of layers
is returned. The order of the layers is the same as the order of
the strings in :obj:`pick`.
When :obj:`pick` is :obj:`None`, the last layer is returned.
Examples:
>>> import chainer.functions as F
>>> import chainer.links as L
>>> model = PickableSequentialChain()
>>> with model.init_scope():
>>> model.l1 = L.Linear(None, 1000)
>>> model.l1_relu = F.relu
>>> model.l2 = L.Linear(None, 1000)
>>> model.l2_relu = F.relu
>>> model.l3 = L.Linear(None, 10)
>>> # This is layer l3
>>> layer3 = model(x)
>>> # The layers to be collected can be changed.
>>> model.pick = ('l2_relu', 'l1_relu')
>>> # These are layers l2_relu and l1_relu.
>>> layer2, layer1 = model(x)
Parameters:
pick (string or iterable of strings):
Names of layers that are collected during
the forward pass.
layer_names (iterable of strings):
Names of layers that can be collected from
this chain. The names are ordered in the order
of computation.
"""
def __init__(self):
super(PickableSequentialChain, self).__init__()
self.layer_names = []
# Two attributes are initialized by the setter of pick.
# self._pick -> None
# self._return_tuple -> False
self.pick = None
def __setattr__(self, name, value):
super(PickableSequentialChain, self).__setattr__(name, value)
if self.within_init_scope and callable(value):
self.layer_names.append(name)
def __delattr__(self, name):
if self._pick and name in self._pick:
raise AttributeError(
'layer {:s} is registered to pick.'.format(name))
super(PickableSequentialChain, self).__delattr__(name)
try:
self.layer_names.remove(name)
except ValueError:
pass
@property
def pick(self):
if self._pick is None:
return None
if self._return_tuple:
return self._pick
else:
return self._pick[0]
@pick.setter
def pick(self, pick):
if pick is None:
self._return_tuple = False
self._pick = None
return
if (not isinstance(pick, str) and
all(isinstance(name, str) for name in pick)):
return_tuple = True
else:
return_tuple = False
pick = (pick,)
for name in pick:
if name not in self.layer_names:
raise ValueError('Invalid layer name ({:s})'.format(name))
self._return_tuple = return_tuple
self._pick = tuple(pick)
def remove_unused(self):
"""Delete all layers that are not needed for the forward pass.
"""
if self._pick is None:
return
# The biggest index among indices of the layers that are included
# in pick.
last_index = max(self.layer_names.index(name) for name in self._pick)
for name in self.layer_names[last_index + 1:]:
delattr(self, name)
def forward(self, x):
"""Forward this model.
Args:
x (chainer.Variable or array): Input to the model.
Returns:
chainer.Variable or tuple of chainer.Variable:
The returned layers are determined by :obj:`pick`.
"""
if self._pick is None:
pick = (self.layer_names[-1],)
else:
pick = self._pick
# The biggest index among indices of the layers that are included
# in pick.
last_index = max(self.layer_names.index(name) for name in pick)
layers = {}
h = x
for name in self.layer_names[:last_index + 1]:
h = self[name](h)
if name in pick:
layers[name] = h
if self._return_tuple:
layers = tuple(layers[name] for name in pick)
else:
layers = list(layers.values())[0]
return layers
def copy(self, *args, **kargs):
copied = super(PickableSequentialChain, self).copy(*args, **kargs)
copied.layer_names = copy.copy(self.layer_names)
copied._pick = copy.copy(self._pick)
copied._return_tuple = copy.copy(self._return_tuple)
return copied
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import unittest
import mock
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.providers.openstack import swift
class SwiftTest(unittest.TestCase):
def setUp(self):
super(SwiftTest, self).setUp()
p = mock.patch(swift.__name__ + '.FLAGS')
self.mock_flags = p.start()
self.addCleanup(p.stop)
self.mock_flags.openstack_swift_insecure = False
@mock.patch.dict(os.environ, {'OS_AUTH_URL': 'OS_AUTH_URL',
'OS_TENANT_NAME': 'OS_TENANT_NAME',
'OS_USERNAME': 'OS_USERNAME',
'OS_PASSWORD': 'OS_PASSWORD'})
def testMakeBucket(self):
swift_storage_service = swift.SwiftStorageService()
swift_storage_service.PrepareService('location')
with mock.patch(vm_util.__name__ + '.IssueCommand',
return_value=('stdout', 'stderr', 0)) as mock_util:
swift_storage_service.MakeBucket('new_bucket')
mock_util.assert_called_with(['swift',
'--os-auth-url', 'OS_AUTH_URL',
'--os-tenant-name', 'OS_TENANT_NAME',
'--os-username', 'OS_USERNAME',
'--os-password', 'OS_PASSWORD',
'post',
'new_bucket'],
raise_on_failure=False)
if __name__ == '__main__':
unittest.main()
|
from plumbum import cli, colors
colors.use_color = 3
def make_app():
class SimpleApp(cli.Application):
PROGNAME = colors.green
VERSION = colors.red | "1.0.3"
@cli.switch(["a"])
def spam(self):
print("!!a")
def main(self, *args):
print("lalala")
return SimpleApp
class TestSimpleApp:
def test_runs(self):
SimpleApp = make_app()
_, rc = SimpleApp.run(['SimpleApp'], exit = False)
assert rc == 0
def test_colorless_run(self, capsys):
colors.use_color = 0
SimpleApp = make_app()
_, rc = SimpleApp.run(["SimpleApp"], exit = False)
assert capsys.readouterr()[0] == 'lalala\n'
def test_colorful_run(self, capsys):
colors.use_color = 4
SimpleApp = make_app()
_, rc = SimpleApp.run(["SimpleApp"], exit = False)
assert capsys.readouterr()[0] == 'lalala\n'
def test_colorless_output(self, capsys):
colors.use_color = 0
SimpleApp = make_app()
_, rc = SimpleApp.run(["SimpleApp", "-h"], exit = False)
output = capsys.readouterr()[0]
assert 'SimpleApp 1.0.3' in output
assert 'SimpleApp [SWITCHES] args...' in output
def test_colorful_help(self, capsys):
colors.use_color = 4
SimpleApp = make_app()
_, rc = SimpleApp.run(["SimpleApp", "-h"], exit = False)
output = capsys.readouterr()[0]
assert 'SimpleApp 1.0.3' not in output
assert SimpleApp.PROGNAME | 'SimpleApp' in output
class TestNSApp:
def test_colorful_output(self, capsys):
colors.use_color = 4
class NotSoSimpleApp(cli.Application):
PROGNAME = colors.blue | "NSApp"
VERSION = "1.2.3"
COLOR_GROUPS = {'Switches': colors.green}
@cli.switch(["b"], help="this is a bacon switch")
def bacon(self):
print("Oooooh, I love BACON!")
@cli.switch(["c"], help=colors.red | "crunchy")
def crunchy(self):
print("Crunchy...")
def main(self):
print("Eating!")
_, rc = NotSoSimpleApp.run(["NotSoSimpleApp", "-h"], exit=False)
output = capsys.readouterr()[0]
assert rc == 0
expected = str((colors.blue | "NSApp") + " 1.2.3")
assert "-b" in output
assert str(colors.red | "crunchy") in output
assert expected in output
if __name__ == "__main__":
NotSoSimpleApp.run()
|
import dbus as _dbus
from openrazer.client.devices import RazerDevice as __RazerDevice, BaseDeviceFactory as __BaseDeviceFactory
from openrazer.client.devices.mousemat import RazerMousemat as __RazerMousemat
from openrazer.client.devices.keyboard import RazerKeyboardFactory as __RazerKeyboardFactory
from openrazer.client.devices.mice import RazerMouse as __RazerMouse
DEVICE_MAP = {
'mousemat': __RazerMousemat,
'keyboard': __RazerKeyboardFactory,
'mouse': __RazerMouse,
'keypad': __RazerKeyboardFactory,
'default': __RazerDevice
}
class RazerDeviceFactory(__BaseDeviceFactory):
"""
Simple factory to return an object for a given device
"""
@staticmethod
def get_device(serial, vid_pid=None, daemon_dbus=None):
"""
Factory for turning a serial into a class
Device factory, will return a class fit for the device in question. The DEVICE_MAP mapping above
can contain a device_type => DeviceClass or DeviceFactory, this allows us to specify raw device classes
if there is only one model (like Firefly) or a factory for the keyboards (so we can differentiate between
old blackwidows and chromas). If the device is not in the device mapping then the factory will default
to a raw RazerDevice.
:param serial: Device serial
:type serial: str
:param vid_pid: Device VID, PID
:type vid_pid: list of int
:param daemon_dbus: Daemon DBus object
:type daemon_dbus: object or None
:return: RazerDevice object (or subclass)
:rtype: RazerDevice
"""
if daemon_dbus is None:
session_bus = _dbus.SessionBus()
daemon_dbus = session_bus.get_object("org.razer", "/org/razer/device/{0}".format(serial))
device_dbus = _dbus.Interface(daemon_dbus, "razer.device.misc")
device_type = device_dbus.getDeviceType()
device_vid_pid = device_dbus.getVidPid()
if device_type in DEVICE_MAP:
# Have device mapping
device_class = DEVICE_MAP[device_type]
if hasattr(device_class, 'get_device'):
# DeviceFactory
device = device_class.get_device(serial, vid_pid=device_vid_pid, daemon_dbus=daemon_dbus)
else:
# DeviceClass
device = device_class(serial, vid_pid=device_vid_pid, daemon_dbus=daemon_dbus)
else:
# No mapping, default to RazerDevice
device = DEVICE_MAP['default'](serial, vid_pid=device_vid_pid, daemon_dbus=daemon_dbus)
return device
|
from pyisy.constants import (
COMMAND_FRIENDLY_NAME,
EMPTY_TIME,
EVENT_PROPS_IGNORED,
PROTO_GROUP,
PROTO_ZWAVE,
)
from pyisy.helpers import NodeProperty
from homeassistant.const import STATE_OFF, STATE_ON
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.typing import Dict
from .const import _LOGGER, DOMAIN
class ISYEntity(Entity):
"""Representation of an ISY994 device."""
_name: str = None
def __init__(self, node) -> None:
"""Initialize the insteon device."""
self._node = node
self._attrs = {}
self._change_handler = None
self._control_handler = None
async def async_added_to_hass(self) -> None:
"""Subscribe to the node change events."""
self._change_handler = self._node.status_events.subscribe(self.on_update)
if hasattr(self._node, "control_events"):
self._control_handler = self._node.control_events.subscribe(self.on_control)
def on_update(self, event: object) -> None:
"""Handle the update event from the ISY994 Node."""
self.schedule_update_ha_state()
def on_control(self, event: NodeProperty) -> None:
"""Handle a control event from the ISY994 Node."""
event_data = {
"entity_id": self.entity_id,
"control": event.control,
"value": event.value,
"formatted": event.formatted,
"uom": event.uom,
"precision": event.prec,
}
if event.control not in EVENT_PROPS_IGNORED:
# New state attributes may be available, update the state.
self.schedule_update_ha_state()
self.hass.bus.fire("isy994_control", event_data)
@property
def device_info(self):
"""Return the device_info of the device."""
if hasattr(self._node, "protocol") and self._node.protocol == PROTO_GROUP:
# not a device
return None
uuid = self._node.isy.configuration["uuid"]
node = self._node
basename = self.name
if hasattr(self._node, "parent_node") and self._node.parent_node is not None:
# This is not the parent node, get the parent node.
node = self._node.parent_node
basename = node.name
device_info = {
"name": basename,
"identifiers": {},
"model": "Unknown",
"manufacturer": "Unknown",
"via_device": (DOMAIN, uuid),
}
if hasattr(node, "address"):
device_info["name"] += f" ({node.address})"
if hasattr(node, "primary_node"):
device_info["identifiers"] = {(DOMAIN, f"{uuid}_{node.address}")}
# ISYv5 Device Types
if hasattr(node, "node_def_id") and node.node_def_id is not None:
device_info["model"] = node.node_def_id
# Numerical Device Type
if hasattr(node, "type") and node.type is not None:
device_info["model"] += f" {node.type}"
if hasattr(node, "protocol"):
device_info["manufacturer"] = node.protocol
if node.protocol == PROTO_ZWAVE:
# Get extra information for Z-Wave Devices
device_info["manufacturer"] += f" MfrID:{node.zwave_props.mfr_id}"
device_info["model"] += (
f" Type:{node.zwave_props.devtype_gen} "
f"ProductTypeID:{node.zwave_props.prod_type_id} "
f"ProductID:{node.zwave_props.product_id}"
)
# Note: sw_version is not exposed by the ISY for the individual devices.
return device_info
@property
def unique_id(self) -> str:
"""Get the unique identifier of the device."""
if hasattr(self._node, "address"):
return f"{self._node.isy.configuration['uuid']}_{self._node.address}"
return None
@property
def old_unique_id(self) -> str:
"""Get the old unique identifier of the device."""
if hasattr(self._node, "address"):
return self._node.address
return None
@property
def name(self) -> str:
"""Get the name of the device."""
return self._name or str(self._node.name)
@property
def should_poll(self) -> bool:
"""No polling required since we're using the subscription."""
return False
class ISYNodeEntity(ISYEntity):
"""Representation of a ISY Nodebase (Node/Group) entity."""
@property
def device_state_attributes(self) -> Dict:
"""Get the state attributes for the device.
The 'aux_properties' in the pyisy Node class are combined with the
other attributes which have been picked up from the event stream and
the combined result are returned as the device state attributes.
"""
attr = {}
if hasattr(self._node, "aux_properties"):
# Cast as list due to RuntimeError if a new property is added while running.
for name, value in list(self._node.aux_properties.items()):
attr_name = COMMAND_FRIENDLY_NAME.get(name, name)
attr[attr_name] = str(value.formatted).lower()
# If a Group/Scene, set a property if the entire scene is on/off
if hasattr(self._node, "group_all_on"):
attr["group_all_on"] = STATE_ON if self._node.group_all_on else STATE_OFF
self._attrs.update(attr)
return self._attrs
def send_node_command(self, command):
"""Respond to an entity service command call."""
if not hasattr(self._node, command):
_LOGGER.error(
"Invalid Service Call %s for device %s", command, self.entity_id
)
return
getattr(self._node, command)()
def send_raw_node_command(
self, command, value=None, unit_of_measurement=None, parameters=None
):
"""Respond to an entity service raw command call."""
if not hasattr(self._node, "send_cmd"):
_LOGGER.error(
"Invalid Service Call %s for device %s", command, self.entity_id
)
return
self._node.send_cmd(command, value, unit_of_measurement, parameters)
class ISYProgramEntity(ISYEntity):
"""Representation of an ISY994 program base."""
def __init__(self, name: str, status, actions=None) -> None:
"""Initialize the ISY994 program-based entity."""
super().__init__(status)
self._name = name
self._actions = actions
@property
def device_state_attributes(self) -> Dict:
"""Get the state attributes for the device."""
attr = {}
if self._actions:
attr["actions_enabled"] = self._actions.enabled
if self._actions.last_finished != EMPTY_TIME:
attr["actions_last_finished"] = self._actions.last_finished
if self._actions.last_run != EMPTY_TIME:
attr["actions_last_run"] = self._actions.last_run
if self._actions.last_update != EMPTY_TIME:
attr["actions_last_update"] = self._actions.last_update
attr["ran_else"] = self._actions.ran_else
attr["ran_then"] = self._actions.ran_then
attr["run_at_startup"] = self._actions.run_at_startup
attr["running"] = self._actions.running
attr["status_enabled"] = self._node.enabled
if self._node.last_finished != EMPTY_TIME:
attr["status_last_finished"] = self._node.last_finished
if self._node.last_run != EMPTY_TIME:
attr["status_last_run"] = self._node.last_run
if self._node.last_update != EMPTY_TIME:
attr["status_last_update"] = self._node.last_update
return attr
|
from __future__ import division
import logging
import numpy as np
import scipy
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.utils import assert_all_finite
from sklearn.utils.validation import NotFittedError
__all__ = [
"SRM", "DetSRM"
]
logger = logging.getLogger(__name__)
def _init_w_transforms(data, features):
"""Initialize the mappings (Wi) for the SRM with random orthogonal matrices.
Parameters
----------
data : list of 2D arrays, element i has shape=[voxels_i, samples]
Each element in the list contains the fMRI data of one subject.
features : int
The number of features in the model.
Returns
-------
w : list of array, element i has shape=[voxels_i, features]
The initialized orthogonal transforms (mappings) :math:`W_i` for each
subject.
voxels : list of int
A list with the number of voxels per subject.
Note
----
This function assumes that the numpy random number generator was
initialized.
Not thread safe.
"""
w = []
subjects = len(data)
voxels = np.empty(subjects, dtype=int)
# Set Wi to a random orthogonal voxels by features matrix
for subject in range(subjects):
voxels[subject] = data[subject].shape[0]
rnd_matrix = np.random.random((voxels[subject], features))
q, r = np.linalg.qr(rnd_matrix)
w.append(q)
return w, voxels
class SRM(BaseEstimator, TransformerMixin):
"""Probabilistic Shared Response Model (SRM)
Given multi-subject data, factorize it as a shared response S among all
subjects and an orthogonal transform W per subject:
.. math:: X_i \\approx W_i S, \\forall i=1 \\dots N
Parameters
----------
n_iter : int, default: 10
Number of iterations to run the algorithm.
features : int, default: 50
Number of features to compute.
rand_seed : int, default: 0
Seed for initializing the random number generator.
Attributes
----------
w_ : list of array, element i has shape=[voxels_i, features]
The orthogonal transforms (mappings) for each subject.
s_ : array, shape=[features, samples]
The shared response.
sigma_s_ : array, shape=[features, features]
The covariance of the shared response Normal distribution.
mu_ : list of array, element i has shape=[voxels_i]
The voxel means over the samples for each subject.
rho2_ : array, shape=[subjects]
The estimated noise variance :math:`\\rho_i^2` for each subject
Note
----
The number of voxels may be different between subjects. However, the
number of samples must be the same across subjects.
The probabilistic Shared Response Model is approximated using the
Expectation Maximization (EM) algorithm proposed in [Chen2015]_. The
implementation follows the optimizations published in [Anderson2016]_.
This is a single node version.
The run-time complexity is :math:`O(I (V T K + V K^2 + K^3))` and the
memory complexity is :math:`O(V T)` with I - the number of iterations,
V - the sum of voxels from all subjects, T - the number of samples, and
K - the number of features (typically, :math:`V \\gg T \\gg K`).
"""
def __init__(self, n_iter=10, features=50, rand_seed=0):
self.n_iter = n_iter
self.features = features
self.rand_seed = rand_seed
return
def fit(self, X, y=None):
"""Compute the probabilistic Shared Response Model
Parameters
----------
X : list of 2D arrays, element i has shape=[voxels_i, samples]
Each element in the list contains the fMRI data of one subject.
y : not used
"""
logger.info('Starting Probabilistic SRM')
# Check the number of subjects
if len(X) <= 1:
raise ValueError("There are not enough subjects "
"({0:d}) to train the model.".format(len(X)))
# Check for input data sizes
if X[0].shape[1] < self.features:
raise ValueError(
"There are not enough samples to train the model with "
"{0:d} features.".format(self.features))
# Check if all subjects have same number of TRs
number_trs = X[0].shape[1]
number_subjects = len(X)
for subject in range(number_subjects):
assert_all_finite(X[subject])
if X[subject].shape[1] != number_trs:
raise ValueError("Different number of samples between subjects"
".")
# Run SRM
self.sigma_s_, self.w_, self.mu_, self.rho2_, self.s_ = self._srm(X)
return self
def transform(self, X, y=None):
"""Use the model to transform matrix to Shared Response space
Parameters
----------
X : list of 2D arrays, element i has shape=[voxels_i, samples_i]
Each element in the list contains the fMRI data of one subject
note that number of voxels and samples can vary across subjects
y : not used (as it is unsupervised learning)
Returns
-------
s : list of 2D arrays, element i has shape=[features_i, samples_i]
Shared responses from input data (X)
"""
# Check if the model exist
if hasattr(self, 'w_') is False:
raise NotFittedError("The model fit has not been run yet.")
# Check the number of subjects
if len(X) != len(self.w_):
raise ValueError("The number of subjects does not match the one"
" in the model.")
s = [None] * len(X)
for subject in range(len(X)):
s[subject] = self.w_[subject].T.dot(X[subject])
return s
def _init_structures(self, data, subjects):
"""Initializes data structures for SRM and preprocess the data.
Parameters
----------
data : list of 2D arrays, element i has shape=[voxels_i, samples]
Each element in the list contains the fMRI data of one subject.
subjects : int
The total number of subjects in `data`.
Returns
-------
x : list of array, element i has shape=[voxels_i, samples]
Demeaned data for each subject.
mu : list of array, element i has shape=[voxels_i]
Voxel means over samples, per subject.
rho2 : array, shape=[subjects]
Noise variance :math:`\\rho^2` per subject.
trace_xtx : array, shape=[subjects]
The squared Frobenius norm of the demeaned data in `x`.
"""
x = []
mu = []
rho2 = np.zeros(subjects)
trace_xtx = np.zeros(subjects)
for subject in range(subjects):
mu.append(np.mean(data[subject], 1))
rho2[subject] = 1
trace_xtx[subject] = np.sum(data[subject] ** 2)
x.append(data[subject] - mu[subject][:, np.newaxis])
return x, mu, rho2, trace_xtx
def _likelihood(self, chol_sigma_s_rhos, log_det_psi, chol_sigma_s,
trace_xt_invsigma2_x, inv_sigma_s_rhos, wt_invpsi_x,
samples):
"""Calculate the log-likelihood function
Parameters
----------
chol_sigma_s_rhos : array, shape=[features, features]
Cholesky factorization of the matrix (Sigma_S + sum_i(1/rho_i^2)
* I)
log_det_psi : float
Determinant of diagonal matrix Psi (containing the rho_i^2 value
voxels_i times).
chol_sigma_s : array, shape=[features, features]
Cholesky factorization of the matrix Sigma_S
trace_xt_invsigma2_x : float
Trace of :math:`\\sum_i (||X_i||_F^2/\\rho_i^2)`
inv_sigma_s_rhos : array, shape=[features, features]
Inverse of :math:`(\\Sigma_S + \\sum_i(1/\\rho_i^2) * I)`
wt_invpsi_x : array, shape=[features, samples]
samples : int
The total number of samples in the data.
Returns
-------
loglikehood : float
The log-likelihood value.
"""
log_det = (np.log(np.diag(chol_sigma_s_rhos) ** 2).sum() + log_det_psi
+ np.log(np.diag(chol_sigma_s) ** 2).sum())
loglikehood = -0.5 * samples * log_det - 0.5 * trace_xt_invsigma2_x
loglikehood += 0.5 * np.trace(
wt_invpsi_x.T.dot(inv_sigma_s_rhos).dot(wt_invpsi_x))
# + const --> -0.5*nTR*nvoxel*subjects*math.log(2*math.pi)
return loglikehood
def _srm(self, data):
"""Expectation-Maximization algorithm for fitting the probabilistic SRM.
Parameters
----------
data : list of 2D arrays, element i has shape=[voxels_i, samples]
Each element in the list contains the fMRI data of one subject.
Returns
-------
sigma_s : array, shape=[features, features]
The covariance :math:`\\Sigma_s` of the shared response Normal
distribution.
w : list of array, element i has shape=[voxels_i, features]
The orthogonal transforms (mappings) :math:`W_i` for each subject.
mu : list of array, element i has shape=[voxels_i]
The voxel means :math:`\\mu_i` over the samples for each subject.
rho2 : array, shape=[subjects]
The estimated noise variance :math:`\\rho_i^2` for each subject
s : array, shape=[features, samples]
The shared response.
"""
samples = data[0].shape[1]
subjects = len(data)
np.random.seed(self.rand_seed)
# Initialization step: initialize the outputs with initial values,
# voxels with the number of voxels in each subject, and trace_xtx with
# the ||X_i||_F^2 of each subject.
w, voxels = _init_w_transforms(data, self.features)
x, mu, rho2, trace_xtx = self._init_structures(data, subjects)
shared_response = np.zeros((self.features, samples))
sigma_s = np.identity(self.features)
# Main loop of the algorithm (run
for iteration in range(self.n_iter):
logger.info('Iteration %d' % (iteration + 1))
# E-step:
# Sum the inverted the rho2 elements for computing W^T * Psi^-1 * W
rho0 = (1 / rho2).sum()
# Invert Sigma_s using Cholesky factorization
(chol_sigma_s, lower_sigma_s) = scipy.linalg.cho_factor(
sigma_s, check_finite=False)
inv_sigma_s = scipy.linalg.cho_solve(
(chol_sigma_s, lower_sigma_s), np.identity(self.features),
check_finite=False)
# Invert (Sigma_s + rho_0 * I) using Cholesky factorization
sigma_s_rhos = inv_sigma_s + np.identity(self.features) * rho0
(chol_sigma_s_rhos, lower_sigma_s_rhos) = scipy.linalg.cho_factor(
sigma_s_rhos, check_finite=False)
inv_sigma_s_rhos = scipy.linalg.cho_solve(
(chol_sigma_s_rhos, lower_sigma_s_rhos),
np.identity(self.features), check_finite=False)
# Compute the sum of W_i^T * rho_i^-2 * X_i, and the sum of traces
# of X_i^T * rho_i^-2 * X_i
wt_invpsi_x = np.zeros((self.features, samples))
trace_xt_invsigma2_x = 0.0
for subject in range(subjects):
wt_invpsi_x += (w[subject].T.dot(x[subject])) / rho2[subject]
trace_xt_invsigma2_x += trace_xtx[subject] / rho2[subject]
log_det_psi = np.sum(np.log(rho2) * voxels)
# Update the shared response
shared_response = sigma_s.dot(
np.identity(self.features) - rho0 * inv_sigma_s_rhos).dot(
wt_invpsi_x)
# M-step
# Update Sigma_s and compute its trace
sigma_s = (inv_sigma_s_rhos
+ shared_response.dot(shared_response.T) / samples)
trace_sigma_s = samples * np.trace(sigma_s)
# Update each subject's mapping transform W_i and error variance
# rho_i^2
for subject in range(subjects):
a_subject = x[subject].dot(shared_response.T)
perturbation = np.zeros(a_subject.shape)
np.fill_diagonal(perturbation, 0.001)
u_subject, s_subject, v_subject = np.linalg.svd(
a_subject + perturbation, full_matrices=False)
w[subject] = u_subject.dot(v_subject)
rho2[subject] = trace_xtx[subject]
rho2[subject] += -2 * np.sum(w[subject] * a_subject).sum()
rho2[subject] += trace_sigma_s
rho2[subject] /= samples * voxels[subject]
if logger.isEnabledFor(logging.INFO):
# Calculate and log the current log-likelihood for checking
# convergence
loglike = self._likelihood(
chol_sigma_s_rhos, log_det_psi, chol_sigma_s,
trace_xt_invsigma2_x, inv_sigma_s_rhos, wt_invpsi_x,
samples)
logger.info('Objective function %f' % loglike)
return sigma_s, w, mu, rho2, shared_response
class DetSRM(BaseEstimator, TransformerMixin):
"""Deterministic Shared Response Model (DetSRM)
Given multi-subject data, factorize it as a shared response S among all
subjects and an orthogonal transform W per subject:
.. math:: X_i \\approx W_i S, \\forall i=1 \\dots N
Parameters
----------
n_iter : int, default: 10
Number of iterations to run the algorithm.
features : int, default: 50
Number of features to compute.
rand_seed : int, default: 0
Seed for initializing the random number generator.
Attributes
----------
w_ : list of array, element i has shape=[voxels_i, features]
The orthogonal transforms (mappings) for each subject.
s_ : array, shape=[features, samples]
The shared response.
Note
----
The number of voxels may be different between subjects. However, the
number of samples must be the same across subjects.
The Deterministic Shared Response Model is approximated using the
Block Coordinate Descent (BCD) algorithm proposed in [Chen2015]_.
This is a single node version.
The run-time complexity is :math:`O(I (V T K + V K^2))` and the memory
complexity is :math:`O(V T)` with I - the number of iterations, V - the
sum of voxels from all subjects, T - the number of samples, K - the
number of features (typically, :math:`V \\gg T \\gg K`), and N - the
number of subjects.
"""
def __init__(self, n_iter=10, features=50, rand_seed=0):
self.n_iter = n_iter
self.features = features
self.rand_seed = rand_seed
return
def fit(self, X, y=None):
"""Compute the Deterministic Shared Response Model
Parameters
----------
X : list of 2D arrays, element i has shape=[voxels_i, samples]
Each element in the list contains the fMRI data of one subject.
y : not used
"""
logger.info('Starting Deterministic SRM')
# Check the number of subjects
if len(X) <= 1:
raise ValueError("There are not enough subjects "
"({0:d}) to train the model.".format(len(X)))
# Check for input data sizes
if X[0].shape[1] < self.features:
raise ValueError(
"There are not enough samples to train the model with "
"{0:d} features.".format(self.features))
# Check if all subjects have same number of TRs
number_trs = X[0].shape[1]
number_subjects = len(X)
for subject in range(number_subjects):
assert_all_finite(X[subject])
if X[subject].shape[1] != number_trs:
raise ValueError("Different number of samples between subjects"
".")
# Run SRM
self.w_, self.s_ = self._srm(X)
return self
def transform(self, X, y=None):
"""Use the model to transform data to the Shared Response subspace
Parameters
----------
X : list of 2D arrays, element i has shape=[voxels_i, samples_i]
Each element in the list contains the fMRI data of one subject.
y : not used
Returns
-------
s : list of 2D arrays, element i has shape=[features_i, samples_i]
Shared responses from input data (X)
"""
# Check if the model exist
if hasattr(self, 'w_') is False:
raise NotFittedError("The model fit has not been run yet.")
# Check the number of subjects
if len(X) != len(self.w_):
raise ValueError("The number of subjects does not match the one"
" in the model.")
s = [None] * len(X)
for subject in range(len(X)):
s[subject] = self.w_[subject].T.dot(X[subject])
return s
def _objective_function(self, data, w, s):
"""Calculate the objective function
Parameters
----------
data : list of 2D arrays, element i has shape=[voxels_i, samples]
Each element in the list contains the fMRI data of one subject.
w : list of 2D arrays, element i has shape=[voxels_i, features]
The orthogonal transforms (mappings) :math:`W_i` for each subject.
s : array, shape=[features, samples]
The shared response
Returns
-------
objective : float
The objective function value.
"""
subjects = len(data)
objective = 0.0
for m in range(subjects):
objective += \
np.linalg.norm(data[m] - w[m].dot(s), 'fro')**2
return objective * 0.5 / data[0].shape[1]
def _compute_shared_response(self, data, w):
""" Compute the shared response S
Parameters
----------
data : list of 2D arrays, element i has shape=[voxels_i, samples]
Each element in the list contains the fMRI data of one subject.
w : list of 2D arrays, element i has shape=[voxels_i, features]
The orthogonal transforms (mappings) :math:`W_i` for each subject.
Returns
-------
s : array, shape=[features, samples]
The shared response for the subjects data with the mappings in w.
"""
s = np.zeros((w[0].shape[1], data[0].shape[1]))
for m in range(len(w)):
s = s + w[m].T.dot(data[m])
s /= len(w)
return s
def _srm(self, data):
"""Expectation-Maximization algorithm for fitting the probabilistic SRM.
Parameters
----------
data : list of 2D arrays, element i has shape=[voxels_i, samples]
Each element in the list contains the fMRI data of one subject.
Returns
-------
w : list of array, element i has shape=[voxels_i, features]
The orthogonal transforms (mappings) :math:`W_i` for each subject.
s : array, shape=[features, samples]
The shared response.
"""
subjects = len(data)
np.random.seed(self.rand_seed)
# Initialization step: initialize the outputs with initial values,
# voxels with the number of voxels in each subject.
w, _ = _init_w_transforms(data, self.features)
shared_response = self._compute_shared_response(data, w)
if logger.isEnabledFor(logging.INFO):
# Calculate the current objective function value
objective = self._objective_function(data, w, shared_response)
logger.info('Objective function %f' % objective)
# Main loop of the algorithm
for iteration in range(self.n_iter):
logger.info('Iteration %d' % (iteration + 1))
# Update each subject's mapping transform W_i:
for subject in range(subjects):
a_subject = data[subject].dot(shared_response.T)
perturbation = np.zeros(a_subject.shape)
np.fill_diagonal(perturbation, 0.001)
u_subject, _, v_subject = np.linalg.svd(
a_subject + perturbation, full_matrices=False)
w[subject] = u_subject.dot(v_subject)
# Update the shared response:
shared_response = self._compute_shared_response(data, w)
if logger.isEnabledFor(logging.INFO):
# Calculate the current objective function value
objective = self._objective_function(data, w, shared_response)
logger.info('Objective function %f' % objective)
return w, shared_response
|
import asyncio
from datetime import timedelta
import logging
import async_timeout
import coronavirus
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers import aiohttp_client, entity_registry, update_coordinator
from .const import DOMAIN
PLATFORMS = ["sensor"]
async def async_setup(hass: HomeAssistant, config: dict):
"""Set up the Coronavirus component."""
# Make sure coordinator is initialized.
await get_coordinator(hass)
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Set up Coronavirus from a config entry."""
if isinstance(entry.data["country"], int):
hass.config_entries.async_update_entry(
entry, data={**entry.data, "country": entry.title}
)
@callback
def _async_migrator(entity_entry: entity_registry.RegistryEntry):
"""Migrate away from unstable ID."""
country, info_type = entity_entry.unique_id.rsplit("-", 1)
if not country.isnumeric():
return None
return {"new_unique_id": f"{entry.title}-{info_type}"}
await entity_registry.async_migrate_entries(
hass, entry.entry_id, _async_migrator
)
if not entry.unique_id:
hass.config_entries.async_update_entry(entry, unique_id=entry.data["country"])
for component in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, component)
)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Unload a config entry."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, component)
for component in PLATFORMS
]
)
)
return unload_ok
async def get_coordinator(hass):
"""Get the data update coordinator."""
if DOMAIN in hass.data:
return hass.data[DOMAIN]
async def async_get_cases():
with async_timeout.timeout(10):
return {
case.country: case
for case in await coronavirus.get_cases(
aiohttp_client.async_get_clientsession(hass)
)
}
hass.data[DOMAIN] = update_coordinator.DataUpdateCoordinator(
hass,
logging.getLogger(__name__),
name=DOMAIN,
update_method=async_get_cases,
update_interval=timedelta(hours=1),
)
await hass.data[DOMAIN].async_refresh()
return hass.data[DOMAIN]
|
import pytest
class Helpers:
@staticmethod
def assert_order_lists_equal(orders_1, orders_2):
"""
Carries out Order-wise comparison on all Order attributes
with exception of the generated ID, in order to determine
if two order lists are equal.
Parameters
----------
orders_1 : `List[Order]`
The first order list.
orders_2 : `List[Order]`
The second order list.
"""
for order_1, order_2 in zip(orders_1, orders_2):
assert order_1._order_attribs_equal(order_2)
@pytest.fixture
def helpers():
return Helpers
|
import re
import numpy as np
from scipy import linalg
from ...io.pick import _picks_to_idx
from ...utils import fill_doc
@fill_doc
def source_detector_distances(info, picks=None):
r"""Determine the distance between NIRS source and detectors.
Parameters
----------
info : Info
The measurement info.
%(picks_all)s
Returns
-------
dists : array of float
Array containing distances in meters.
Of shape equal to number of channels, or shape of picks if supplied.
"""
dist = [linalg.norm(ch['loc'][3:6] - ch['loc'][6:9])
for ch in info['chs']]
picks = _picks_to_idx(info, picks, exclude=[])
return np.array(dist, float)[picks]
def short_channels(info, threshold=0.01):
r"""Determine which NIRS channels are short.
Channels with a source to detector distance of less than
``threshold`` are reported as short. The default threshold is 0.01 m.
Parameters
----------
info : Info
The measurement info.
threshold : float
The threshold distance for what is considered short in meters.
Returns
-------
short : array of bool
Array indicating which channels are short.
Of shape equal to number of channels.
"""
return source_detector_distances(info) < threshold
def _channel_frequencies(raw):
"""Return the light frequency for each channel."""
picks = _picks_to_idx(raw.info, 'fnirs', exclude=[], allow_empty=True)
freqs = np.empty(picks.size, int)
for ii in picks:
freqs[ii] = raw.info['chs'][ii]['loc'][9]
return freqs
def _check_channels_ordered(raw, freqs):
"""Check channels followed expected fNIRS format."""
# Every second channel should be same SD pair
# and have the specified light frequencies.
picks = _picks_to_idx(raw.info, 'fnirs', exclude=[], allow_empty=True)
if len(picks) % 2 != 0:
raise ValueError(
'NIRS channels not ordered correctly. An even number of NIRS '
'channels is required. %d channels were provided: %r'
% (len(raw.ch_names), raw.ch_names))
for ii in picks[::2]:
ch1_name_info = re.match(r'S(\d+)_D(\d+) (\d+)',
raw.info['chs'][ii]['ch_name'])
ch2_name_info = re.match(r'S(\d+)_D(\d+) (\d+)',
raw.info['chs'][ii + 1]['ch_name'])
if raw.info['chs'][ii]['loc'][9] != \
float(ch1_name_info.groups()[2]) or \
raw.info['chs'][ii + 1]['loc'][9] != \
float(ch2_name_info.groups()[2]):
raise ValueError(
'NIRS channels not ordered correctly. Channel name and NIRS'
' frequency do not match: %s -> %s & %s -> %s'
% (raw.info['chs'][ii]['ch_name'],
raw.info['chs'][ii]['loc'][9],
raw.info['chs'][ii + 1]['ch_name'],
raw.info['chs'][ii + 1]['loc'][9]))
if (ch1_name_info.groups()[0] != ch2_name_info.groups()[0]) or \
(ch1_name_info.groups()[1] != ch2_name_info.groups()[1]) or \
(int(ch1_name_info.groups()[2]) != freqs[0]) or \
(int(ch2_name_info.groups()[2]) != freqs[1]):
raise ValueError(
'NIRS channels not ordered correctly. Channels must be ordered'
' as source detector pairs with frequencies: %d & %d'
% (freqs[0], freqs[1]))
return picks
def _fnirs_check_bads(raw):
"""Check consistent labeling of bads across fnirs optodes."""
# For an optode pair, if one component (light frequency or chroma) is
# marked as bad then they all should be. This function checks that all
# optodes are marked bad consistently.
picks = _picks_to_idx(raw.info, 'fnirs', exclude=[])
for ii in picks[::2]:
bad_opto = set(raw.info['bads']).intersection(raw.ch_names[ii:ii + 2])
if len(bad_opto) == 1:
raise RuntimeError('NIRS bad labelling is not consistent')
def _fnirs_spread_bads(raw):
"""Spread bad labeling across fnirs channels."""
# For an optode if any component (light frequency or chroma) is marked
# as bad, then they all should be. This function will find any pairs marked
# as bad and spread the bad marking to all components of the optode pair.
picks = _picks_to_idx(raw.info, 'fnirs', exclude=[])
new_bads = list()
for ii in picks[::2]:
bad_opto = set(raw.info['bads']).intersection(raw.ch_names[ii:ii + 2])
if len(bad_opto) > 0:
new_bads.extend(raw.ch_names[ii:ii + 2])
raw.info['bads'] = new_bads
return raw
|
from django.conf import settings
from django.utils.translation import get_language
import weblate
# Enabled languages in the docs
LANGMAP = {
"zh-hans": "zh_CN",
"pt-br": "pt_BR",
"uk": "uk",
"ru": "ru",
"es": "es",
"pt": "pt",
"nb": "no",
"ja": "ja",
"fr": "fr",
}
def get_doc_url(page, anchor="", user=None):
"""Return URL to documentation."""
# Should we use tagged release or latest version
if "-dev" in weblate.VERSION or (
(user is None or not user.is_authenticated) and settings.HIDE_VERSION
):
version = "latest"
else:
version = f"weblate-{weblate.VERSION}"
# Language variant
code = LANGMAP.get(get_language(), "en")
# Generate URL
url = f"https://docs.weblate.org/{code}/{version}/{page}.html"
# Optionally append anchor
if anchor != "":
url += "#{}".format(anchor.replace("_", "-"))
return url
|
from datetime import timedelta
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import CONF_ENTITY_NAMESPACE, CONF_MONITORED_CONDITIONS
import homeassistant.helpers.config_validation as cv
from . import DEFAULT_ENTITY_NAMESPACE, DOMAIN as SKYBELL_DOMAIN, SkybellDevice
SCAN_INTERVAL = timedelta(seconds=30)
# Sensor types: Name, icon
SENSOR_TYPES = {"chime_level": ["Chime Level", "bell-ring"]}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(
CONF_ENTITY_NAMESPACE, default=DEFAULT_ENTITY_NAMESPACE
): cv.string,
vol.Required(CONF_MONITORED_CONDITIONS, default=[]): vol.All(
cv.ensure_list, [vol.In(SENSOR_TYPES)]
),
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the platform for a Skybell device."""
skybell = hass.data.get(SKYBELL_DOMAIN)
sensors = []
for sensor_type in config.get(CONF_MONITORED_CONDITIONS):
for device in skybell.get_devices():
sensors.append(SkybellSensor(device, sensor_type))
add_entities(sensors, True)
class SkybellSensor(SkybellDevice):
"""A sensor implementation for Skybell devices."""
def __init__(self, device, sensor_type):
"""Initialize a sensor for a Skybell device."""
super().__init__(device)
self._sensor_type = sensor_type
self._icon = "mdi:{}".format(SENSOR_TYPES[self._sensor_type][1])
self._name = "{} {}".format(
self._device.name, SENSOR_TYPES[self._sensor_type][0]
)
self._state = None
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return self._icon
def update(self):
"""Get the latest data and updates the state."""
super().update()
if self._sensor_type == "chime_level":
self._state = self._device.outdoor_chime_level
|
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from mock import Mock
from mock import patch
from diamond.collector import Collector
from darner import DarnerCollector
###############################################################################
class TestDarnerCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('DarnerCollector', {
'interval': 10,
'hosts': ['localhost:22133'],
})
self.collector = DarnerCollector(config, None)
def test_import(self):
self.assertTrue(DarnerCollector)
@patch.object(Collector, 'publish')
def test_should_work_with_real_data(self, publish_mock):
patch_raw_stats1 = patch.object(
DarnerCollector,
'get_raw_stats',
Mock(return_value=self.getFixture(
'stats1').getvalue()))
patch_raw_stats2 = patch.object(
DarnerCollector,
'get_raw_stats',
Mock(return_value=self.getFixture(
'stats2').getvalue()))
patch_raw_stats1.start()
self.collector.collect()
patch_raw_stats1.stop()
self.assertPublishedMany(publish_mock, {})
patch_raw_stats2.start()
self.collector.collect()
patch_raw_stats2.stop()
metrics = {
'localhost.uptime': 2422175,
'localhost.total_items': 20,
'localhost.curr_connections': 2,
'localhost.total_connections': 15,
'localhost.cmd_get': 100,
'localhost.cmd_set': 150,
'localhost.queues.test1.items': 2,
'localhost.queues.test1.waiters': 4,
'localhost.queues.test1.open_transactions': 8,
'localhost.queues.test_2.items': 16,
'localhost.queues.test_2.waiters': 32,
'localhost.queues.test_2.open_transactions': 64,
'localhost.queues.test_3_bar.items': 128,
'localhost.queues.test_3_bar.waiters': 256,
'localhost.queues.test_3_bar.open_transactions': 512,
}
self.setDocExample(collector=self.collector.__class__.__name__,
metrics=metrics,
defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics)
###############################################################################
if __name__ == "__main__":
unittest.main()
|
import pytest
from homeassistant.helpers import singleton
from tests.async_mock import Mock
@pytest.fixture
def mock_hass():
"""Mock hass fixture."""
return Mock(data={})
async def test_singleton_async(mock_hass):
"""Test singleton with async function."""
@singleton.singleton("test_key")
async def something(hass):
return object()
result1 = await something(mock_hass)
result2 = await something(mock_hass)
assert result1 is result2
assert "test_key" in mock_hass.data
assert mock_hass.data["test_key"] is result1
def test_singleton(mock_hass):
"""Test singleton with function."""
@singleton.singleton("test_key")
def something(hass):
return object()
result1 = something(mock_hass)
result2 = something(mock_hass)
assert result1 is result2
assert "test_key" in mock_hass.data
assert mock_hass.data["test_key"] is result1
|
import asyncio
import datetime
from decimal import Decimal
from itertools import chain, repeat
from homeassistant.components.dsmr.const import DOMAIN
from homeassistant.components.dsmr.sensor import DerivativeDSMREntity
from homeassistant.components.sensor import DOMAIN as SENSOR_DOMAIN
from homeassistant.const import ENERGY_KILO_WATT_HOUR, TIME_HOURS, VOLUME_CUBIC_METERS
from homeassistant.setup import async_setup_component
from tests.async_mock import DEFAULT, MagicMock
from tests.common import MockConfigEntry, patch
async def test_setup_platform(hass, dsmr_connection_fixture):
"""Test setup of platform."""
async_add_entities = MagicMock()
entry_data = {
"platform": DOMAIN,
"port": "/dev/ttyUSB0",
"dsmr_version": "2.2",
"precision": 4,
"reconnect_interval": 30,
}
serial_data = {"serial_id": "1234", "serial_id_gas": "5678"}
with patch("homeassistant.components.dsmr.async_setup", return_value=True), patch(
"homeassistant.components.dsmr.async_setup_entry", return_value=True
), patch(
"homeassistant.components.dsmr.config_flow._validate_dsmr_connection",
return_value=serial_data,
):
assert await async_setup_component(
hass, SENSOR_DOMAIN, {SENSOR_DOMAIN: entry_data}
)
await hass.async_block_till_done()
assert not async_add_entities.called
# Check config entry
conf_entries = hass.config_entries.async_entries(DOMAIN)
assert len(conf_entries) == 1
entry = conf_entries[0]
assert entry.state == "loaded"
assert entry.data == {**entry_data, **serial_data}
async def test_default_setup(hass, dsmr_connection_fixture):
"""Test the default setup."""
(connection_factory, transport, protocol) = dsmr_connection_fixture
from dsmr_parser.obis_references import (
CURRENT_ELECTRICITY_USAGE,
ELECTRICITY_ACTIVE_TARIFF,
GAS_METER_READING,
)
from dsmr_parser.objects import CosemObject, MBusObject
entry_data = {
"port": "/dev/ttyUSB0",
"dsmr_version": "2.2",
"precision": 4,
"reconnect_interval": 30,
"serial_id": "1234",
"serial_id_gas": "5678",
}
telegram = {
CURRENT_ELECTRICITY_USAGE: CosemObject(
[{"value": Decimal("0.0"), "unit": ENERGY_KILO_WATT_HOUR}]
),
ELECTRICITY_ACTIVE_TARIFF: CosemObject([{"value": "0001", "unit": ""}]),
GAS_METER_READING: MBusObject(
[
{"value": datetime.datetime.fromtimestamp(1551642213)},
{"value": Decimal(745.695), "unit": VOLUME_CUBIC_METERS},
]
),
}
mock_entry = MockConfigEntry(
domain="dsmr", unique_id="/dev/ttyUSB0", data=entry_data
)
mock_entry.add_to_hass(hass)
await hass.config_entries.async_setup(mock_entry.entry_id)
await hass.async_block_till_done()
registry = await hass.helpers.entity_registry.async_get_registry()
entry = registry.async_get("sensor.power_consumption")
assert entry
assert entry.unique_id == "1234_Power_Consumption"
entry = registry.async_get("sensor.gas_consumption")
assert entry
assert entry.unique_id == "5678_Gas_Consumption"
telegram_callback = connection_factory.call_args_list[0][0][2]
# make sure entities have been created and return 'unknown' state
power_consumption = hass.states.get("sensor.power_consumption")
assert power_consumption.state == "unknown"
assert power_consumption.attributes.get("unit_of_measurement") is None
# simulate a telegram pushed from the smartmeter and parsed by dsmr_parser
telegram_callback(telegram)
# after receiving telegram entities need to have the chance to update
await asyncio.sleep(0)
# ensure entities have new state value after incoming telegram
power_consumption = hass.states.get("sensor.power_consumption")
assert power_consumption.state == "0.0"
assert (
power_consumption.attributes.get("unit_of_measurement") == ENERGY_KILO_WATT_HOUR
)
# tariff should be translated in human readable and have no unit
power_tariff = hass.states.get("sensor.power_tariff")
assert power_tariff.state == "low"
assert power_tariff.attributes.get("unit_of_measurement") == ""
# check if gas consumption is parsed correctly
gas_consumption = hass.states.get("sensor.gas_consumption")
assert gas_consumption.state == "745.695"
assert gas_consumption.attributes.get("unit_of_measurement") == VOLUME_CUBIC_METERS
async def test_setup_only_energy(hass, dsmr_connection_fixture):
"""Test the default setup."""
entry_data = {
"port": "/dev/ttyUSB0",
"dsmr_version": "2.2",
"precision": 4,
"reconnect_interval": 30,
"serial_id": "1234",
}
mock_entry = MockConfigEntry(
domain="dsmr", unique_id="/dev/ttyUSB0", data=entry_data
)
mock_entry.add_to_hass(hass)
await hass.config_entries.async_setup(mock_entry.entry_id)
await hass.async_block_till_done()
registry = await hass.helpers.entity_registry.async_get_registry()
entry = registry.async_get("sensor.power_consumption")
assert entry
assert entry.unique_id == "1234_Power_Consumption"
entry = registry.async_get("sensor.gas_consumption")
assert not entry
async def test_derivative():
"""Test calculation of derivative value."""
from dsmr_parser.objects import MBusObject
config = {"platform": "dsmr"}
entity = DerivativeDSMREntity("test", "test_device", "5678", "1.0.0", config)
await entity.async_update()
assert entity.state is None, "initial state not unknown"
entity.telegram = {
"1.0.0": MBusObject(
[
{"value": datetime.datetime.fromtimestamp(1551642213)},
{"value": Decimal(745.695), "unit": VOLUME_CUBIC_METERS},
]
)
}
await entity.async_update()
assert entity.state is None, "state after first update should still be unknown"
entity.telegram = {
"1.0.0": MBusObject(
[
{"value": datetime.datetime.fromtimestamp(1551642543)},
{"value": Decimal(745.698), "unit": VOLUME_CUBIC_METERS},
]
)
}
await entity.async_update()
assert (
abs(entity.state - 0.033) < 0.00001
), "state should be hourly usage calculated from first and second update"
assert entity.unit_of_measurement == f"{VOLUME_CUBIC_METERS}/{TIME_HOURS}"
async def test_v4_meter(hass, dsmr_connection_fixture):
"""Test if v4 meter is correctly parsed."""
(connection_factory, transport, protocol) = dsmr_connection_fixture
from dsmr_parser.obis_references import (
ELECTRICITY_ACTIVE_TARIFF,
HOURLY_GAS_METER_READING,
)
from dsmr_parser.objects import CosemObject, MBusObject
entry_data = {
"port": "/dev/ttyUSB0",
"dsmr_version": "4",
"precision": 4,
"reconnect_interval": 30,
"serial_id": "1234",
"serial_id_gas": "5678",
}
telegram = {
HOURLY_GAS_METER_READING: MBusObject(
[
{"value": datetime.datetime.fromtimestamp(1551642213)},
{"value": Decimal(745.695), "unit": VOLUME_CUBIC_METERS},
]
),
ELECTRICITY_ACTIVE_TARIFF: CosemObject([{"value": "0001", "unit": ""}]),
}
mock_entry = MockConfigEntry(
domain="dsmr", unique_id="/dev/ttyUSB0", data=entry_data
)
mock_entry.add_to_hass(hass)
await hass.config_entries.async_setup(mock_entry.entry_id)
await hass.async_block_till_done()
telegram_callback = connection_factory.call_args_list[0][0][2]
# simulate a telegram pushed from the smartmeter and parsed by dsmr_parser
telegram_callback(telegram)
# after receiving telegram entities need to have the chance to update
await asyncio.sleep(0)
# tariff should be translated in human readable and have no unit
power_tariff = hass.states.get("sensor.power_tariff")
assert power_tariff.state == "low"
assert power_tariff.attributes.get("unit_of_measurement") == ""
# check if gas consumption is parsed correctly
gas_consumption = hass.states.get("sensor.gas_consumption")
assert gas_consumption.state == "745.695"
assert gas_consumption.attributes.get("unit_of_measurement") == VOLUME_CUBIC_METERS
async def test_v5_meter(hass, dsmr_connection_fixture):
"""Test if v5 meter is correctly parsed."""
(connection_factory, transport, protocol) = dsmr_connection_fixture
from dsmr_parser.obis_references import (
ELECTRICITY_ACTIVE_TARIFF,
HOURLY_GAS_METER_READING,
)
from dsmr_parser.objects import CosemObject, MBusObject
entry_data = {
"port": "/dev/ttyUSB0",
"dsmr_version": "5",
"precision": 4,
"reconnect_interval": 30,
"serial_id": "1234",
"serial_id_gas": "5678",
}
telegram = {
HOURLY_GAS_METER_READING: MBusObject(
[
{"value": datetime.datetime.fromtimestamp(1551642213)},
{"value": Decimal(745.695), "unit": VOLUME_CUBIC_METERS},
]
),
ELECTRICITY_ACTIVE_TARIFF: CosemObject([{"value": "0001", "unit": ""}]),
}
mock_entry = MockConfigEntry(
domain="dsmr", unique_id="/dev/ttyUSB0", data=entry_data
)
mock_entry.add_to_hass(hass)
await hass.config_entries.async_setup(mock_entry.entry_id)
await hass.async_block_till_done()
telegram_callback = connection_factory.call_args_list[0][0][2]
# simulate a telegram pushed from the smartmeter and parsed by dsmr_parser
telegram_callback(telegram)
# after receiving telegram entities need to have the chance to update
await asyncio.sleep(0)
# tariff should be translated in human readable and have no unit
power_tariff = hass.states.get("sensor.power_tariff")
assert power_tariff.state == "low"
assert power_tariff.attributes.get("unit_of_measurement") == ""
# check if gas consumption is parsed correctly
gas_consumption = hass.states.get("sensor.gas_consumption")
assert gas_consumption.state == "745.695"
assert gas_consumption.attributes.get("unit_of_measurement") == VOLUME_CUBIC_METERS
async def test_belgian_meter(hass, dsmr_connection_fixture):
"""Test if Belgian meter is correctly parsed."""
(connection_factory, transport, protocol) = dsmr_connection_fixture
from dsmr_parser.obis_references import (
BELGIUM_HOURLY_GAS_METER_READING,
ELECTRICITY_ACTIVE_TARIFF,
)
from dsmr_parser.objects import CosemObject, MBusObject
entry_data = {
"port": "/dev/ttyUSB0",
"dsmr_version": "5B",
"precision": 4,
"reconnect_interval": 30,
"serial_id": "1234",
"serial_id_gas": "5678",
}
telegram = {
BELGIUM_HOURLY_GAS_METER_READING: MBusObject(
[
{"value": datetime.datetime.fromtimestamp(1551642213)},
{"value": Decimal(745.695), "unit": VOLUME_CUBIC_METERS},
]
),
ELECTRICITY_ACTIVE_TARIFF: CosemObject([{"value": "0001", "unit": ""}]),
}
mock_entry = MockConfigEntry(
domain="dsmr", unique_id="/dev/ttyUSB0", data=entry_data
)
mock_entry.add_to_hass(hass)
await hass.config_entries.async_setup(mock_entry.entry_id)
await hass.async_block_till_done()
telegram_callback = connection_factory.call_args_list[0][0][2]
# simulate a telegram pushed from the smartmeter and parsed by dsmr_parser
telegram_callback(telegram)
# after receiving telegram entities need to have the chance to update
await asyncio.sleep(0)
# tariff should be translated in human readable and have no unit
power_tariff = hass.states.get("sensor.power_tariff")
assert power_tariff.state == "normal"
assert power_tariff.attributes.get("unit_of_measurement") == ""
# check if gas consumption is parsed correctly
gas_consumption = hass.states.get("sensor.gas_consumption")
assert gas_consumption.state == "745.695"
assert gas_consumption.attributes.get("unit_of_measurement") == VOLUME_CUBIC_METERS
async def test_belgian_meter_low(hass, dsmr_connection_fixture):
"""Test if Belgian meter is correctly parsed."""
(connection_factory, transport, protocol) = dsmr_connection_fixture
from dsmr_parser.obis_references import ELECTRICITY_ACTIVE_TARIFF
from dsmr_parser.objects import CosemObject
entry_data = {
"port": "/dev/ttyUSB0",
"dsmr_version": "5B",
"precision": 4,
"reconnect_interval": 30,
"serial_id": "1234",
"serial_id_gas": "5678",
}
telegram = {ELECTRICITY_ACTIVE_TARIFF: CosemObject([{"value": "0002", "unit": ""}])}
mock_entry = MockConfigEntry(
domain="dsmr", unique_id="/dev/ttyUSB0", data=entry_data
)
mock_entry.add_to_hass(hass)
await hass.config_entries.async_setup(mock_entry.entry_id)
await hass.async_block_till_done()
telegram_callback = connection_factory.call_args_list[0][0][2]
# simulate a telegram pushed from the smartmeter and parsed by dsmr_parser
telegram_callback(telegram)
# after receiving telegram entities need to have the chance to update
await asyncio.sleep(0)
# tariff should be translated in human readable and have no unit
power_tariff = hass.states.get("sensor.power_tariff")
assert power_tariff.state == "low"
assert power_tariff.attributes.get("unit_of_measurement") == ""
async def test_tcp(hass, dsmr_connection_fixture):
"""If proper config provided TCP connection should be made."""
(connection_factory, transport, protocol) = dsmr_connection_fixture
entry_data = {
"host": "localhost",
"port": "1234",
"dsmr_version": "2.2",
"precision": 4,
"reconnect_interval": 30,
"serial_id": "1234",
"serial_id_gas": "5678",
}
mock_entry = MockConfigEntry(
domain="dsmr", unique_id="/dev/ttyUSB0", data=entry_data
)
mock_entry.add_to_hass(hass)
await hass.config_entries.async_setup(mock_entry.entry_id)
await hass.async_block_till_done()
assert connection_factory.call_args_list[0][0][0] == "localhost"
assert connection_factory.call_args_list[0][0][1] == "1234"
async def test_connection_errors_retry(hass, dsmr_connection_fixture):
"""Connection should be retried on error during setup."""
(connection_factory, transport, protocol) = dsmr_connection_fixture
entry_data = {
"port": "/dev/ttyUSB0",
"dsmr_version": "2.2",
"precision": 4,
"reconnect_interval": 0,
"serial_id": "1234",
"serial_id_gas": "5678",
}
# override the mock to have it fail the first time and succeed after
first_fail_connection_factory = MagicMock(
return_value=(transport, protocol),
side_effect=chain([TimeoutError], repeat(DEFAULT)),
)
mock_entry = MockConfigEntry(
domain="dsmr", unique_id="/dev/ttyUSB0", data=entry_data
)
mock_entry.add_to_hass(hass)
with patch(
"homeassistant.components.dsmr.sensor.create_dsmr_reader",
first_fail_connection_factory,
):
await hass.config_entries.async_setup(mock_entry.entry_id)
await hass.async_block_till_done()
# wait for sleep to resolve
await hass.async_block_till_done()
assert first_fail_connection_factory.call_count >= 2, "connecting not retried"
async def test_reconnect(hass, dsmr_connection_fixture):
"""If transport disconnects, the connection should be retried."""
(connection_factory, transport, protocol) = dsmr_connection_fixture
entry_data = {
"port": "/dev/ttyUSB0",
"dsmr_version": "2.2",
"precision": 4,
"reconnect_interval": 0,
"serial_id": "1234",
"serial_id_gas": "5678",
}
# mock waiting coroutine while connection lasts
closed = asyncio.Event()
# Handshake so that `hass.async_block_till_done()` doesn't cycle forever
closed2 = asyncio.Event()
async def wait_closed():
await closed.wait()
closed2.set()
protocol.wait_closed = wait_closed
mock_entry = MockConfigEntry(
domain="dsmr", unique_id="/dev/ttyUSB0", data=entry_data
)
mock_entry.add_to_hass(hass)
await hass.config_entries.async_setup(mock_entry.entry_id)
await hass.async_block_till_done()
assert connection_factory.call_count == 1
# indicate disconnect, release wait lock and allow reconnect to happen
closed.set()
# wait for lock set to resolve
await closed2.wait()
closed2.clear()
closed.clear()
await hass.async_block_till_done()
assert connection_factory.call_count >= 2, "connecting not retried"
# setting it so teardown can be successful
closed.set()
await hass.config_entries.async_unload(mock_entry.entry_id)
assert mock_entry.state == "not_loaded"
|
from pysmartthings import Attribute, Capability
from pysmartthings.device import Status
import pytest
from homeassistant.components.climate.const import (
ATTR_CURRENT_HUMIDITY,
ATTR_CURRENT_TEMPERATURE,
ATTR_FAN_MODE,
ATTR_FAN_MODES,
ATTR_HVAC_ACTION,
ATTR_HVAC_MODE,
ATTR_HVAC_MODES,
ATTR_TARGET_TEMP_HIGH,
ATTR_TARGET_TEMP_LOW,
CURRENT_HVAC_IDLE,
DOMAIN as CLIMATE_DOMAIN,
HVAC_MODE_AUTO,
HVAC_MODE_COOL,
HVAC_MODE_DRY,
HVAC_MODE_FAN_ONLY,
HVAC_MODE_HEAT,
HVAC_MODE_HEAT_COOL,
HVAC_MODE_OFF,
SERVICE_SET_FAN_MODE,
SERVICE_SET_HVAC_MODE,
SERVICE_SET_TEMPERATURE,
SUPPORT_FAN_MODE,
SUPPORT_TARGET_TEMPERATURE,
SUPPORT_TARGET_TEMPERATURE_RANGE,
)
from homeassistant.components.smartthings import climate
from homeassistant.components.smartthings.const import DOMAIN
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_SUPPORTED_FEATURES,
ATTR_TEMPERATURE,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_UNKNOWN,
)
from .conftest import setup_platform
@pytest.fixture(name="legacy_thermostat")
def legacy_thermostat_fixture(device_factory):
"""Fixture returns a legacy thermostat."""
device = device_factory(
"Legacy Thermostat",
capabilities=[Capability.thermostat],
status={
Attribute.cooling_setpoint: 74,
Attribute.heating_setpoint: 68,
Attribute.thermostat_fan_mode: "auto",
Attribute.supported_thermostat_fan_modes: ["auto", "on"],
Attribute.thermostat_mode: "auto",
Attribute.supported_thermostat_modes: climate.MODE_TO_STATE.keys(),
Attribute.thermostat_operating_state: "idle",
},
)
device.status.attributes[Attribute.temperature] = Status(70, "F", None)
return device
@pytest.fixture(name="basic_thermostat")
def basic_thermostat_fixture(device_factory):
"""Fixture returns a basic thermostat."""
device = device_factory(
"Basic Thermostat",
capabilities=[
Capability.temperature_measurement,
Capability.thermostat_cooling_setpoint,
Capability.thermostat_heating_setpoint,
Capability.thermostat_mode,
],
status={
Attribute.cooling_setpoint: 74,
Attribute.heating_setpoint: 68,
Attribute.thermostat_mode: "off",
Attribute.supported_thermostat_modes: ["off", "auto", "heat", "cool"],
},
)
device.status.attributes[Attribute.temperature] = Status(70, "F", None)
return device
@pytest.fixture(name="thermostat")
def thermostat_fixture(device_factory):
"""Fixture returns a fully-featured thermostat."""
device = device_factory(
"Thermostat",
capabilities=[
Capability.temperature_measurement,
Capability.relative_humidity_measurement,
Capability.thermostat_cooling_setpoint,
Capability.thermostat_heating_setpoint,
Capability.thermostat_mode,
Capability.thermostat_operating_state,
Capability.thermostat_fan_mode,
],
status={
Attribute.cooling_setpoint: 74,
Attribute.heating_setpoint: 68,
Attribute.thermostat_fan_mode: "on",
Attribute.supported_thermostat_fan_modes: ["auto", "on"],
Attribute.thermostat_mode: "heat",
Attribute.supported_thermostat_modes: [
"auto",
"heat",
"cool",
"off",
"eco",
],
Attribute.thermostat_operating_state: "idle",
Attribute.humidity: 34,
},
)
device.status.attributes[Attribute.temperature] = Status(70, "F", None)
return device
@pytest.fixture(name="buggy_thermostat")
def buggy_thermostat_fixture(device_factory):
"""Fixture returns a buggy thermostat."""
device = device_factory(
"Buggy Thermostat",
capabilities=[
Capability.temperature_measurement,
Capability.thermostat_cooling_setpoint,
Capability.thermostat_heating_setpoint,
Capability.thermostat_mode,
],
status={
Attribute.thermostat_mode: "heating",
Attribute.cooling_setpoint: 74,
Attribute.heating_setpoint: 68,
},
)
device.status.attributes[Attribute.temperature] = Status(70, "F", None)
return device
@pytest.fixture(name="air_conditioner")
def air_conditioner_fixture(device_factory):
"""Fixture returns a air conditioner."""
device = device_factory(
"Air Conditioner",
capabilities=[
Capability.air_conditioner_mode,
Capability.demand_response_load_control,
Capability.air_conditioner_fan_mode,
Capability.power_consumption_report,
Capability.switch,
Capability.temperature_measurement,
Capability.thermostat_cooling_setpoint,
],
status={
Attribute.air_conditioner_mode: "auto",
Attribute.supported_ac_modes: [
"cool",
"dry",
"wind",
"auto",
"heat",
"fanOnly",
],
Attribute.drlc_status: {
"duration": 0,
"drlcLevel": -1,
"start": "1970-01-01T00:00:00Z",
"override": False,
},
Attribute.fan_mode: "medium",
Attribute.supported_ac_fan_modes: [
"auto",
"low",
"medium",
"high",
"turbo",
],
Attribute.power_consumption: {
"start": "2019-02-24T21:03:04Z",
"power": 0,
"energy": 500,
"end": "2019-02-26T02:05:55Z",
},
Attribute.switch: "on",
Attribute.cooling_setpoint: 23,
},
)
device.status.attributes[Attribute.temperature] = Status(24, "C", None)
return device
async def test_legacy_thermostat_entity_state(hass, legacy_thermostat):
"""Tests the state attributes properly match the thermostat type."""
await setup_platform(hass, CLIMATE_DOMAIN, devices=[legacy_thermostat])
state = hass.states.get("climate.legacy_thermostat")
assert state.state == HVAC_MODE_HEAT_COOL
assert (
state.attributes[ATTR_SUPPORTED_FEATURES]
== SUPPORT_FAN_MODE
| SUPPORT_TARGET_TEMPERATURE_RANGE
| SUPPORT_TARGET_TEMPERATURE
)
assert state.attributes[ATTR_HVAC_ACTION] == CURRENT_HVAC_IDLE
assert sorted(state.attributes[ATTR_HVAC_MODES]) == [
HVAC_MODE_AUTO,
HVAC_MODE_COOL,
HVAC_MODE_HEAT,
HVAC_MODE_HEAT_COOL,
HVAC_MODE_OFF,
]
assert state.attributes[ATTR_FAN_MODE] == "auto"
assert state.attributes[ATTR_FAN_MODES] == ["auto", "on"]
assert state.attributes[ATTR_TARGET_TEMP_LOW] == 20 # celsius
assert state.attributes[ATTR_TARGET_TEMP_HIGH] == 23.3 # celsius
assert state.attributes[ATTR_CURRENT_TEMPERATURE] == 21.1 # celsius
async def test_basic_thermostat_entity_state(hass, basic_thermostat):
"""Tests the state attributes properly match the thermostat type."""
await setup_platform(hass, CLIMATE_DOMAIN, devices=[basic_thermostat])
state = hass.states.get("climate.basic_thermostat")
assert state.state == HVAC_MODE_OFF
assert (
state.attributes[ATTR_SUPPORTED_FEATURES]
== SUPPORT_TARGET_TEMPERATURE_RANGE | SUPPORT_TARGET_TEMPERATURE
)
assert ATTR_HVAC_ACTION not in state.attributes
assert sorted(state.attributes[ATTR_HVAC_MODES]) == [
HVAC_MODE_COOL,
HVAC_MODE_HEAT,
HVAC_MODE_HEAT_COOL,
HVAC_MODE_OFF,
]
assert state.attributes[ATTR_CURRENT_TEMPERATURE] == 21.1 # celsius
async def test_thermostat_entity_state(hass, thermostat):
"""Tests the state attributes properly match the thermostat type."""
await setup_platform(hass, CLIMATE_DOMAIN, devices=[thermostat])
state = hass.states.get("climate.thermostat")
assert state.state == HVAC_MODE_HEAT
assert (
state.attributes[ATTR_SUPPORTED_FEATURES]
== SUPPORT_FAN_MODE
| SUPPORT_TARGET_TEMPERATURE_RANGE
| SUPPORT_TARGET_TEMPERATURE
)
assert state.attributes[ATTR_HVAC_ACTION] == CURRENT_HVAC_IDLE
assert sorted(state.attributes[ATTR_HVAC_MODES]) == [
HVAC_MODE_AUTO,
HVAC_MODE_COOL,
HVAC_MODE_HEAT,
HVAC_MODE_HEAT_COOL,
HVAC_MODE_OFF,
]
assert state.attributes[ATTR_FAN_MODE] == "on"
assert state.attributes[ATTR_FAN_MODES] == ["auto", "on"]
assert state.attributes[ATTR_TEMPERATURE] == 20 # celsius
assert state.attributes[ATTR_CURRENT_TEMPERATURE] == 21.1 # celsius
assert state.attributes[ATTR_CURRENT_HUMIDITY] == 34
async def test_buggy_thermostat_entity_state(hass, buggy_thermostat):
"""Tests the state attributes properly match the thermostat type."""
await setup_platform(hass, CLIMATE_DOMAIN, devices=[buggy_thermostat])
state = hass.states.get("climate.buggy_thermostat")
assert state.state == STATE_UNKNOWN
assert (
state.attributes[ATTR_SUPPORTED_FEATURES]
== SUPPORT_TARGET_TEMPERATURE_RANGE | SUPPORT_TARGET_TEMPERATURE
)
assert state.state is STATE_UNKNOWN
assert state.attributes[ATTR_TEMPERATURE] is None
assert state.attributes[ATTR_CURRENT_TEMPERATURE] == 21.1 # celsius
assert state.attributes[ATTR_HVAC_MODES] == []
async def test_buggy_thermostat_invalid_mode(hass, buggy_thermostat):
"""Tests when an invalid operation mode is included."""
buggy_thermostat.status.update_attribute_value(
Attribute.supported_thermostat_modes, ["heat", "emergency heat", "other"]
)
await setup_platform(hass, CLIMATE_DOMAIN, devices=[buggy_thermostat])
state = hass.states.get("climate.buggy_thermostat")
assert state.attributes[ATTR_HVAC_MODES] == [HVAC_MODE_HEAT]
async def test_air_conditioner_entity_state(hass, air_conditioner):
"""Tests when an invalid operation mode is included."""
await setup_platform(hass, CLIMATE_DOMAIN, devices=[air_conditioner])
state = hass.states.get("climate.air_conditioner")
assert state.state == HVAC_MODE_HEAT_COOL
assert (
state.attributes[ATTR_SUPPORTED_FEATURES]
== SUPPORT_FAN_MODE | SUPPORT_TARGET_TEMPERATURE
)
assert sorted(state.attributes[ATTR_HVAC_MODES]) == [
HVAC_MODE_COOL,
HVAC_MODE_DRY,
HVAC_MODE_FAN_ONLY,
HVAC_MODE_HEAT,
HVAC_MODE_HEAT_COOL,
HVAC_MODE_OFF,
]
assert state.attributes[ATTR_FAN_MODE] == "medium"
assert sorted(state.attributes[ATTR_FAN_MODES]) == [
"auto",
"high",
"low",
"medium",
"turbo",
]
assert state.attributes[ATTR_TEMPERATURE] == 23
assert state.attributes[ATTR_CURRENT_TEMPERATURE] == 24
assert state.attributes["drlc_status_duration"] == 0
assert state.attributes["drlc_status_level"] == -1
assert state.attributes["drlc_status_start"] == "1970-01-01T00:00:00Z"
assert state.attributes["drlc_status_override"] is False
assert state.attributes["power_consumption_start"] == "2019-02-24T21:03:04Z"
assert state.attributes["power_consumption_power"] == 0
assert state.attributes["power_consumption_energy"] == 500
assert state.attributes["power_consumption_end"] == "2019-02-26T02:05:55Z"
async def test_set_fan_mode(hass, thermostat, air_conditioner):
"""Test the fan mode is set successfully."""
await setup_platform(hass, CLIMATE_DOMAIN, devices=[thermostat, air_conditioner])
entity_ids = ["climate.thermostat", "climate.air_conditioner"]
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_FAN_MODE,
{ATTR_ENTITY_ID: entity_ids, ATTR_FAN_MODE: "auto"},
blocking=True,
)
for entity_id in entity_ids:
state = hass.states.get(entity_id)
assert state.attributes[ATTR_FAN_MODE] == "auto", entity_id
async def test_set_hvac_mode(hass, thermostat, air_conditioner):
"""Test the hvac mode is set successfully."""
await setup_platform(hass, CLIMATE_DOMAIN, devices=[thermostat, air_conditioner])
entity_ids = ["climate.thermostat", "climate.air_conditioner"]
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_HVAC_MODE,
{ATTR_ENTITY_ID: entity_ids, ATTR_HVAC_MODE: HVAC_MODE_COOL},
blocking=True,
)
for entity_id in entity_ids:
state = hass.states.get(entity_id)
assert state.state == HVAC_MODE_COOL, entity_id
async def test_ac_set_hvac_mode_from_off(hass, air_conditioner):
"""Test setting HVAC mode when the unit is off."""
air_conditioner.status.update_attribute_value(
Attribute.air_conditioner_mode, "heat"
)
air_conditioner.status.update_attribute_value(Attribute.switch, "off")
await setup_platform(hass, CLIMATE_DOMAIN, devices=[air_conditioner])
state = hass.states.get("climate.air_conditioner")
assert state.state == HVAC_MODE_OFF
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_HVAC_MODE,
{
ATTR_ENTITY_ID: "climate.air_conditioner",
ATTR_HVAC_MODE: HVAC_MODE_HEAT_COOL,
},
blocking=True,
)
state = hass.states.get("climate.air_conditioner")
assert state.state == HVAC_MODE_HEAT_COOL
async def test_ac_set_hvac_mode_off(hass, air_conditioner):
"""Test the AC HVAC mode can be turned off set successfully."""
await setup_platform(hass, CLIMATE_DOMAIN, devices=[air_conditioner])
state = hass.states.get("climate.air_conditioner")
assert state.state != HVAC_MODE_OFF
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_HVAC_MODE,
{ATTR_ENTITY_ID: "climate.air_conditioner", ATTR_HVAC_MODE: HVAC_MODE_OFF},
blocking=True,
)
state = hass.states.get("climate.air_conditioner")
assert state.state == HVAC_MODE_OFF
async def test_set_temperature_heat_mode(hass, thermostat):
"""Test the temperature is set successfully when in heat mode."""
thermostat.status.thermostat_mode = "heat"
await setup_platform(hass, CLIMATE_DOMAIN, devices=[thermostat])
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_TEMPERATURE,
{ATTR_ENTITY_ID: "climate.thermostat", ATTR_TEMPERATURE: 21},
blocking=True,
)
state = hass.states.get("climate.thermostat")
assert state.state == HVAC_MODE_HEAT
assert state.attributes[ATTR_TEMPERATURE] == 21
assert thermostat.status.heating_setpoint == 69.8
async def test_set_temperature_cool_mode(hass, thermostat):
"""Test the temperature is set successfully when in cool mode."""
thermostat.status.thermostat_mode = "cool"
await setup_platform(hass, CLIMATE_DOMAIN, devices=[thermostat])
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_TEMPERATURE,
{ATTR_ENTITY_ID: "climate.thermostat", ATTR_TEMPERATURE: 21},
blocking=True,
)
state = hass.states.get("climate.thermostat")
assert state.attributes[ATTR_TEMPERATURE] == 21
async def test_set_temperature(hass, thermostat):
"""Test the temperature is set successfully."""
thermostat.status.thermostat_mode = "auto"
await setup_platform(hass, CLIMATE_DOMAIN, devices=[thermostat])
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_TEMPERATURE,
{
ATTR_ENTITY_ID: "climate.thermostat",
ATTR_TARGET_TEMP_HIGH: 25.5,
ATTR_TARGET_TEMP_LOW: 22.2,
},
blocking=True,
)
state = hass.states.get("climate.thermostat")
assert state.attributes[ATTR_TARGET_TEMP_HIGH] == 25.5
assert state.attributes[ATTR_TARGET_TEMP_LOW] == 22.2
async def test_set_temperature_ac(hass, air_conditioner):
"""Test the temperature is set successfully."""
await setup_platform(hass, CLIMATE_DOMAIN, devices=[air_conditioner])
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_TEMPERATURE,
{ATTR_ENTITY_ID: "climate.air_conditioner", ATTR_TEMPERATURE: 27},
blocking=True,
)
state = hass.states.get("climate.air_conditioner")
assert state.attributes[ATTR_TEMPERATURE] == 27
async def test_set_temperature_ac_with_mode(hass, air_conditioner):
"""Test the temperature is set successfully."""
await setup_platform(hass, CLIMATE_DOMAIN, devices=[air_conditioner])
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_TEMPERATURE,
{
ATTR_ENTITY_ID: "climate.air_conditioner",
ATTR_TEMPERATURE: 27,
ATTR_HVAC_MODE: HVAC_MODE_COOL,
},
blocking=True,
)
state = hass.states.get("climate.air_conditioner")
assert state.attributes[ATTR_TEMPERATURE] == 27
assert state.state == HVAC_MODE_COOL
async def test_set_temperature_ac_with_mode_from_off(hass, air_conditioner):
"""Test the temp and mode is set successfully when the unit is off."""
air_conditioner.status.update_attribute_value(
Attribute.air_conditioner_mode, "heat"
)
air_conditioner.status.update_attribute_value(Attribute.switch, "off")
await setup_platform(hass, CLIMATE_DOMAIN, devices=[air_conditioner])
assert hass.states.get("climate.air_conditioner").state == HVAC_MODE_OFF
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_TEMPERATURE,
{
ATTR_ENTITY_ID: "climate.air_conditioner",
ATTR_TEMPERATURE: 27,
ATTR_HVAC_MODE: HVAC_MODE_COOL,
},
blocking=True,
)
state = hass.states.get("climate.air_conditioner")
assert state.attributes[ATTR_TEMPERATURE] == 27
assert state.state == HVAC_MODE_COOL
async def test_set_temperature_ac_with_mode_to_off(hass, air_conditioner):
"""Test the temp and mode is set successfully to turn off the unit."""
await setup_platform(hass, CLIMATE_DOMAIN, devices=[air_conditioner])
assert hass.states.get("climate.air_conditioner").state != HVAC_MODE_OFF
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_TEMPERATURE,
{
ATTR_ENTITY_ID: "climate.air_conditioner",
ATTR_TEMPERATURE: 27,
ATTR_HVAC_MODE: HVAC_MODE_OFF,
},
blocking=True,
)
state = hass.states.get("climate.air_conditioner")
assert state.attributes[ATTR_TEMPERATURE] == 27
assert state.state == HVAC_MODE_OFF
async def test_set_temperature_with_mode(hass, thermostat):
"""Test the temperature and mode is set successfully."""
await setup_platform(hass, CLIMATE_DOMAIN, devices=[thermostat])
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_TEMPERATURE,
{
ATTR_ENTITY_ID: "climate.thermostat",
ATTR_TARGET_TEMP_HIGH: 25.5,
ATTR_TARGET_TEMP_LOW: 22.2,
ATTR_HVAC_MODE: HVAC_MODE_HEAT_COOL,
},
blocking=True,
)
state = hass.states.get("climate.thermostat")
assert state.attributes[ATTR_TARGET_TEMP_HIGH] == 25.5
assert state.attributes[ATTR_TARGET_TEMP_LOW] == 22.2
assert state.state == HVAC_MODE_HEAT_COOL
async def test_set_turn_off(hass, air_conditioner):
"""Test the a/c is turned off successfully."""
await setup_platform(hass, CLIMATE_DOMAIN, devices=[air_conditioner])
state = hass.states.get("climate.air_conditioner")
assert state.state == HVAC_MODE_HEAT_COOL
await hass.services.async_call(
CLIMATE_DOMAIN, SERVICE_TURN_OFF, {"entity_id": "all"}, blocking=True
)
state = hass.states.get("climate.air_conditioner")
assert state.state == HVAC_MODE_OFF
async def test_set_turn_on(hass, air_conditioner):
"""Test the a/c is turned on successfully."""
air_conditioner.status.update_attribute_value(Attribute.switch, "off")
await setup_platform(hass, CLIMATE_DOMAIN, devices=[air_conditioner])
state = hass.states.get("climate.air_conditioner")
assert state.state == HVAC_MODE_OFF
await hass.services.async_call(
CLIMATE_DOMAIN, SERVICE_TURN_ON, {"entity_id": "all"}, blocking=True
)
state = hass.states.get("climate.air_conditioner")
assert state.state == HVAC_MODE_HEAT_COOL
async def test_entity_and_device_attributes(hass, thermostat):
"""Test the attributes of the entries are correct."""
await setup_platform(hass, CLIMATE_DOMAIN, devices=[thermostat])
entity_registry = await hass.helpers.entity_registry.async_get_registry()
device_registry = await hass.helpers.device_registry.async_get_registry()
entry = entity_registry.async_get("climate.thermostat")
assert entry
assert entry.unique_id == thermostat.device_id
entry = device_registry.async_get_device({(DOMAIN, thermostat.device_id)}, [])
assert entry
assert entry.name == thermostat.label
assert entry.model == thermostat.device_type_name
assert entry.manufacturer == "Unavailable"
|
from datetime import timedelta
import pytest
import homeassistant.components.automation as automation
from homeassistant.components.binary_sensor import DEVICE_CLASSES, DOMAIN
from homeassistant.components.binary_sensor.device_condition import ENTITY_CONDITIONS
from homeassistant.const import CONF_PLATFORM, STATE_OFF, STATE_ON
from homeassistant.helpers import device_registry
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from tests.async_mock import patch
from tests.common import (
MockConfigEntry,
async_get_device_automation_capabilities,
async_get_device_automations,
async_mock_service,
mock_device_registry,
mock_registry,
)
@pytest.fixture
def device_reg(hass):
"""Return an empty, loaded, registry."""
return mock_device_registry(hass)
@pytest.fixture
def entity_reg(hass):
"""Return an empty, loaded, registry."""
return mock_registry(hass)
@pytest.fixture
def calls(hass):
"""Track calls to a mock service."""
return async_mock_service(hass, "test", "automation")
async def test_get_conditions(hass, device_reg, entity_reg):
"""Test we get the expected conditions from a binary_sensor."""
platform = getattr(hass.components, f"test.{DOMAIN}")
platform.init()
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
for device_class in DEVICE_CLASSES:
entity_reg.async_get_or_create(
DOMAIN,
"test",
platform.ENTITIES[device_class].unique_id,
device_id=device_entry.id,
)
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}})
await hass.async_block_till_done()
expected_conditions = [
{
"condition": "device",
"domain": DOMAIN,
"type": condition["type"],
"device_id": device_entry.id,
"entity_id": platform.ENTITIES[device_class].entity_id,
}
for device_class in DEVICE_CLASSES
for condition in ENTITY_CONDITIONS[device_class]
]
conditions = await async_get_device_automations(hass, "condition", device_entry.id)
assert conditions == expected_conditions
async def test_get_condition_capabilities(hass, device_reg, entity_reg):
"""Test we get the expected capabilities from a binary_sensor condition."""
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create(DOMAIN, "test", "5678", device_id=device_entry.id)
expected_capabilities = {
"extra_fields": [
{"name": "for", "optional": True, "type": "positive_time_period_dict"}
]
}
conditions = await async_get_device_automations(hass, "condition", device_entry.id)
for condition in conditions:
capabilities = await async_get_device_automation_capabilities(
hass, "condition", condition
)
assert capabilities == expected_capabilities
async def test_if_state(hass, calls):
"""Test for turn_on and turn_off conditions."""
platform = getattr(hass.components, f"test.{DOMAIN}")
platform.init()
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}})
await hass.async_block_till_done()
sensor1 = platform.ENTITIES["battery"]
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {"platform": "event", "event_type": "test_event1"},
"condition": [
{
"condition": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": sensor1.entity_id,
"type": "is_bat_low",
}
],
"action": {
"service": "test.automation",
"data_template": {
"some": "is_on {{ trigger.%s }}"
% "}} - {{ trigger.".join(("platform", "event.event_type"))
},
},
},
{
"trigger": {"platform": "event", "event_type": "test_event2"},
"condition": [
{
"condition": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": sensor1.entity_id,
"type": "is_not_bat_low",
}
],
"action": {
"service": "test.automation",
"data_template": {
"some": "is_off {{ trigger.%s }}"
% "}} - {{ trigger.".join(("platform", "event.event_type"))
},
},
},
]
},
)
await hass.async_block_till_done()
assert hass.states.get(sensor1.entity_id).state == STATE_ON
assert len(calls) == 0
hass.bus.async_fire("test_event1")
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data["some"] == "is_on event - test_event1"
hass.states.async_set(sensor1.entity_id, STATE_OFF)
hass.bus.async_fire("test_event1")
hass.bus.async_fire("test_event2")
await hass.async_block_till_done()
assert len(calls) == 2
assert calls[1].data["some"] == "is_off event - test_event2"
async def test_if_fires_on_for_condition(hass, calls):
"""Test for firing if condition is on with delay."""
point1 = dt_util.utcnow()
point2 = point1 + timedelta(seconds=10)
point3 = point2 + timedelta(seconds=10)
platform = getattr(hass.components, f"test.{DOMAIN}")
platform.init()
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}})
await hass.async_block_till_done()
sensor1 = platform.ENTITIES["battery"]
with patch("homeassistant.core.dt_util.utcnow") as mock_utcnow:
mock_utcnow.return_value = point1
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {"platform": "event", "event_type": "test_event1"},
"condition": {
"condition": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": sensor1.entity_id,
"type": "is_not_bat_low",
"for": {"seconds": 5},
},
"action": {
"service": "test.automation",
"data_template": {
"some": "is_off {{ trigger.%s }}"
% "}} - {{ trigger.".join(
("platform", "event.event_type")
)
},
},
}
]
},
)
await hass.async_block_till_done()
assert hass.states.get(sensor1.entity_id).state == STATE_ON
assert len(calls) == 0
hass.bus.async_fire("test_event1")
await hass.async_block_till_done()
assert len(calls) == 0
# Time travel 10 secs into the future
mock_utcnow.return_value = point2
hass.bus.async_fire("test_event1")
await hass.async_block_till_done()
assert len(calls) == 0
hass.states.async_set(sensor1.entity_id, STATE_OFF)
hass.bus.async_fire("test_event1")
await hass.async_block_till_done()
assert len(calls) == 0
# Time travel 20 secs into the future
mock_utcnow.return_value = point3
hass.bus.async_fire("test_event1")
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data["some"] == "is_off event - test_event1"
|
import logging
from pyrecswitch import RSNetwork, RSNetworkError
import voluptuous as vol
from homeassistant.components.switch import PLATFORM_SCHEMA, SwitchEntity
from homeassistant.const import CONF_HOST, CONF_MAC, CONF_NAME
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = "RecSwitch {0}"
DATA_RSN = "RSN"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_MAC): vol.All(cv.string, vol.Upper),
vol.Optional(CONF_NAME): cv.string,
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the device."""
host = config[CONF_HOST]
mac_address = config[CONF_MAC]
device_name = config.get(CONF_NAME)
if not hass.data.get(DATA_RSN):
hass.data[DATA_RSN] = RSNetwork()
job = hass.data[DATA_RSN].create_datagram_endpoint()
hass.async_create_task(job)
device = hass.data[DATA_RSN].register_device(mac_address, host)
async_add_entities([RecSwitchSwitch(device, device_name, mac_address)])
class RecSwitchSwitch(SwitchEntity):
"""Representation of a recswitch device."""
def __init__(self, device, device_name, mac_address):
"""Initialize a recswitch device."""
self.gpio_state = False
self.device = device
self.device_name = device_name
self.mac_address = mac_address
if not self.device_name:
self.device_name = DEFAULT_NAME.format(self.mac_address)
@property
def unique_id(self):
"""Return the switch unique ID."""
return self.mac_address
@property
def name(self):
"""Return the switch name."""
return self.device_name
@property
def is_on(self):
"""Return true if switch is on."""
return self.gpio_state
async def async_turn_on(self, **kwargs):
"""Turn on the switch."""
await self.async_set_gpio_status(True)
async def async_turn_off(self, **kwargs):
"""Turn off the switch."""
await self.async_set_gpio_status(False)
async def async_set_gpio_status(self, status):
"""Set the switch status."""
try:
ret = await self.device.set_gpio_status(status)
self.gpio_state = ret.state
except RSNetworkError as error:
_LOGGER.error("Setting status to %s: %r", self.name, error)
async def async_update(self):
"""Update the current switch status."""
try:
ret = await self.device.get_gpio_status()
self.gpio_state = ret.state
except RSNetworkError as error:
_LOGGER.error("Reading status from %s: %r", self.name, error)
|
import os
from kaggle_web_client import KaggleWebClient
class UserSessionClient():
GET_SOURCE_ENDPOINT = '/requests/GetKernelRunSourceForCaipRequest'
def __init__(self):
self.web_client = KaggleWebClient()
def get_exportable_ipynb(self):
"""Fetch the .ipynb source of the current notebook session.
If Kaggle datasets are attached to the notebook, the source will
include an additonnal cell with logic to download the datasets
outside the Kaggle platform.
"""
request_body = {
'UseDraft': True,
}
return self.web_client.make_post_request(request_body, self.GET_SOURCE_ENDPOINT)
|
import os
import json
import logging
os.environ['GIT_WEBHOOK_CONFIG'] = 'config_test.py'
logging.basicConfig(level=logging.DEBUG)
TEST_DIR = os.path.dirname(__file__)
WEBHOOKDATA_DIR = os.path.join(TEST_DIR, 'webhookdata')
WEBHOOKDATA = {}
for filename in os.listdir(WEBHOOKDATA_DIR):
name = os.path.splitext(filename)[0]
with open(os.path.join(WEBHOOKDATA_DIR, filename)) as f:
data = json.load(f)
WEBHOOKDATA[name] = data
with open(os.path.join(TEST_DIR, '../docker/ssh/id_rsa')) as f:
RSA_PRIVATE_KEY = f.read()
def success(response):
if response.status_code != 200:
print(response.data)
if response.status_code == 200:
data = json.loads(response.data)
return data['success']
return False
def load_data(response):
data = json.loads(response.data)
return data['data']
|
from __future__ import division, print_function, absolute_import
import os
import platform
import warnings
from struct import Struct
def get_terminal_size(default=(80, 25)):
"""
Get width and height of console; works on linux, os x, windows and cygwin
Adapted from https://gist.github.com/jtriley/1108174
Originally from: http://stackoverflow.com/questions/566746/how-to-get-console-window-width-in-python
"""
current_os = platform.system()
if current_os == 'Windows': # pragma: no cover
size = _get_terminal_size_windows()
if not size:
# needed for window's python in cygwin's xterm!
size = _get_terminal_size_tput()
elif current_os in ('Linux', 'Darwin', 'FreeBSD',
'SunOS') or current_os.startswith('CYGWIN'):
size = _get_terminal_size_linux()
else: # pragma: no cover
warnings.warn(
"Plumbum does not know the type of the current OS for term size, defaulting to UNIX"
)
size = _get_terminal_size_linux()
if size is None: # we'll assume the standard 80x25 if for any reason we don't know the terminal size
size = default
return size
def _get_terminal_size_windows(): # pragma: no cover
try:
from ctypes import windll, create_string_buffer # type: ignore
STDERR_HANDLE = -12
h = windll.kernel32.GetStdHandle(STDERR_HANDLE)
csbi_struct = Struct("hhhhHhhhhhh")
csbi = create_string_buffer(csbi_struct.size)
res = windll.kernel32.GetConsoleScreenBufferInfo(h, csbi)
if res:
_, _, _, _, _, left, top, right, bottom, _, _ = csbi_struct.unpack(
csbi.raw)
return right - left + 1, bottom - top + 1
return None
except Exception:
return None
def _get_terminal_size_tput(): # pragma: no cover
# get terminal width
# src: http://stackoverflow.com/questions/263890/how-do-i-find-the-width-height-of-a-terminal-window
try:
tput = local['tput']
cols = int(tput('cols'))
rows = int(tput('lines'))
return (cols, rows)
except Exception:
return None
def _ioctl_GWINSZ(fd):
yx = Struct("hh")
try:
import fcntl
import termios
return yx.unpack(fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234'))
except Exception:
return None
def _get_terminal_size_linux():
cr = _ioctl_GWINSZ(0) or _ioctl_GWINSZ(1) or _ioctl_GWINSZ(2)
if not cr:
try:
fd = os.open(os.ctermid(), os.O_RDONLY)
cr = _ioctl_GWINSZ(fd)
os.close(fd)
except Exception:
pass
if not cr:
try:
cr = (int(os.environ['LINES']), int(os.environ['COLUMNS']))
except Exception:
return None
return cr[1], cr[0]
|
import logging
from qbittorrent.client import Client, LoginRequired
from requests.exceptions import RequestException
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_NAME,
CONF_PASSWORD,
CONF_URL,
CONF_USERNAME,
DATA_RATE_KILOBYTES_PER_SECOND,
STATE_IDLE,
)
from homeassistant.exceptions import PlatformNotReady
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
SENSOR_TYPE_CURRENT_STATUS = "current_status"
SENSOR_TYPE_DOWNLOAD_SPEED = "download_speed"
SENSOR_TYPE_UPLOAD_SPEED = "upload_speed"
DEFAULT_NAME = "qBittorrent"
SENSOR_TYPES = {
SENSOR_TYPE_CURRENT_STATUS: ["Status", None],
SENSOR_TYPE_DOWNLOAD_SPEED: ["Down Speed", DATA_RATE_KILOBYTES_PER_SECOND],
SENSOR_TYPE_UPLOAD_SPEED: ["Up Speed", DATA_RATE_KILOBYTES_PER_SECOND],
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_URL): cv.url,
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the qBittorrent sensors."""
try:
client = Client(config[CONF_URL])
client.login(config[CONF_USERNAME], config[CONF_PASSWORD])
except LoginRequired:
_LOGGER.error("Invalid authentication")
return
except RequestException as err:
_LOGGER.error("Connection failed")
raise PlatformNotReady from err
name = config.get(CONF_NAME)
dev = []
for sensor_type in SENSOR_TYPES:
sensor = QBittorrentSensor(sensor_type, client, name, LoginRequired)
dev.append(sensor)
add_entities(dev, True)
def format_speed(speed):
"""Return a bytes/s measurement as a human readable string."""
kb_spd = float(speed) / 1024
return round(kb_spd, 2 if kb_spd < 0.1 else 1)
class QBittorrentSensor(Entity):
"""Representation of an qBittorrent sensor."""
def __init__(self, sensor_type, qbittorrent_client, client_name, exception):
"""Initialize the qBittorrent sensor."""
self._name = SENSOR_TYPES[sensor_type][0]
self.client = qbittorrent_client
self.type = sensor_type
self.client_name = client_name
self._state = None
self._unit_of_measurement = SENSOR_TYPES[sensor_type][1]
self._available = False
self._exception = exception
@property
def name(self):
"""Return the name of the sensor."""
return f"{self.client_name} {self._name}"
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def available(self):
"""Return true if device is available."""
return self._available
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit_of_measurement
def update(self):
"""Get the latest data from qBittorrent and updates the state."""
try:
data = self.client.sync_main_data()
self._available = True
except RequestException:
_LOGGER.error("Connection lost")
self._available = False
return
except self._exception:
_LOGGER.error("Invalid authentication")
return
if data is None:
return
download = data["server_state"]["dl_info_speed"]
upload = data["server_state"]["up_info_speed"]
if self.type == SENSOR_TYPE_CURRENT_STATUS:
if upload > 0 and download > 0:
self._state = "up_down"
elif upload > 0 and download == 0:
self._state = "seeding"
elif upload == 0 and download > 0:
self._state = "downloading"
else:
self._state = STATE_IDLE
elif self.type == SENSOR_TYPE_DOWNLOAD_SPEED:
self._state = format_speed(download)
elif self.type == SENSOR_TYPE_UPLOAD_SPEED:
self._state = format_speed(upload)
|
from redbot.core.commands import Context, permissions_check
from redbot.core.utils.mod import is_mod_or_superior, check_permissions
def check_self_permissions():
async def predicate(ctx: Context):
if not ctx.guild:
return True
if await check_permissions(ctx, {"manage_messages": True}) or await is_mod_or_superior(
ctx.bot, ctx.author
):
return True
return False
return permissions_check(predicate)
|
import pytest
import zigpy
from zigpy.application import ControllerApplication
import zigpy.config
import zigpy.group
import zigpy.types
from homeassistant.components.zha import DOMAIN
import homeassistant.components.zha.core.const as zha_const
import homeassistant.components.zha.core.device as zha_core_device
from homeassistant.setup import async_setup_component
from .common import FakeDevice, FakeEndpoint, get_zha_gateway
from tests.async_mock import AsyncMock, MagicMock, PropertyMock, patch
from tests.common import MockConfigEntry
from tests.components.light.conftest import mock_light_profiles # noqa
FIXTURE_GRP_ID = 0x1001
FIXTURE_GRP_NAME = "fixture group"
@pytest.fixture
def zigpy_app_controller():
"""Zigpy ApplicationController fixture."""
app = MagicMock(spec_set=ControllerApplication)
app.startup = AsyncMock()
app.shutdown = AsyncMock()
groups = zigpy.group.Groups(app)
groups.add_group(FIXTURE_GRP_ID, FIXTURE_GRP_NAME, suppress_event=True)
app.configure_mock(groups=groups)
type(app).ieee = PropertyMock()
app.ieee.return_value = zigpy.types.EUI64.convert("00:15:8d:00:02:32:4f:32")
type(app).nwk = PropertyMock(return_value=zigpy.types.NWK(0x0000))
type(app).devices = PropertyMock(return_value={})
return app
@pytest.fixture(name="config_entry")
async def config_entry_fixture(hass):
"""Fixture representing a config entry."""
entry = MockConfigEntry(
version=2,
domain=zha_const.DOMAIN,
data={
zigpy.config.CONF_DEVICE: {zigpy.config.CONF_DEVICE_PATH: "/dev/ttyUSB0"},
zha_const.CONF_RADIO_TYPE: "ezsp",
},
)
entry.add_to_hass(hass)
return entry
@pytest.fixture
def setup_zha(hass, config_entry, zigpy_app_controller):
"""Set up ZHA component."""
zha_config = {zha_const.CONF_ENABLE_QUIRKS: False}
p1 = patch(
"bellows.zigbee.application.ControllerApplication.new",
return_value=zigpy_app_controller,
)
async def _setup(config=None):
config = config or {}
with p1:
status = await async_setup_component(
hass, zha_const.DOMAIN, {zha_const.DOMAIN: {**zha_config, **config}}
)
assert status is True
await hass.async_block_till_done()
return _setup
@pytest.fixture
def channel():
"""Channel mock factory fixture."""
def channel(name: str, cluster_id: int, endpoint_id: int = 1):
ch = MagicMock()
ch.name = name
ch.generic_id = f"channel_0x{cluster_id:04x}"
ch.id = f"{endpoint_id}:0x{cluster_id:04x}"
ch.async_configure = AsyncMock()
ch.async_initialize = AsyncMock()
return ch
return channel
@pytest.fixture
def zigpy_device_mock(zigpy_app_controller):
"""Make a fake device using the specified cluster classes."""
def _mock_dev(
endpoints,
ieee="00:0d:6f:00:0a:90:69:e7",
manufacturer="FakeManufacturer",
model="FakeModel",
node_descriptor=b"\x02@\x807\x10\x7fd\x00\x00*d\x00\x00",
nwk=0xB79C,
patch_cluster=True,
):
"""Make a fake device using the specified cluster classes."""
device = FakeDevice(
zigpy_app_controller, ieee, manufacturer, model, node_descriptor, nwk=nwk
)
for epid, ep in endpoints.items():
endpoint = FakeEndpoint(manufacturer, model, epid)
endpoint.device = device
device.endpoints[epid] = endpoint
endpoint.device_type = ep["device_type"]
profile_id = ep.get("profile_id")
if profile_id:
endpoint.profile_id = profile_id
for cluster_id in ep.get("in_clusters", []):
endpoint.add_input_cluster(cluster_id, _patch_cluster=patch_cluster)
for cluster_id in ep.get("out_clusters", []):
endpoint.add_output_cluster(cluster_id, _patch_cluster=patch_cluster)
return device
return _mock_dev
@pytest.fixture
def zha_device_joined(hass, setup_zha):
"""Return a newly joined ZHA device."""
async def _zha_device(zigpy_dev):
await setup_zha()
zha_gateway = get_zha_gateway(hass)
await zha_gateway.async_device_initialized(zigpy_dev)
await hass.async_block_till_done()
return zha_gateway.get_device(zigpy_dev.ieee)
return _zha_device
@pytest.fixture
def zha_device_restored(hass, zigpy_app_controller, setup_zha, hass_storage):
"""Return a restored ZHA device."""
async def _zha_device(zigpy_dev, last_seen=None):
zigpy_app_controller.devices[zigpy_dev.ieee] = zigpy_dev
if last_seen is not None:
hass_storage[f"{DOMAIN}.storage"] = {
"key": f"{DOMAIN}.storage",
"version": 1,
"data": {
"devices": [
{
"ieee": str(zigpy_dev.ieee),
"last_seen": last_seen,
"name": f"{zigpy_dev.manufacturer} {zigpy_dev.model}",
}
],
},
}
await setup_zha()
zha_gateway = hass.data[zha_const.DATA_ZHA][zha_const.DATA_ZHA_GATEWAY]
return zha_gateway.get_device(zigpy_dev.ieee)
return _zha_device
@pytest.fixture(params=["zha_device_joined", "zha_device_restored"])
def zha_device_joined_restored(request):
"""Join or restore ZHA device."""
named_method = request.getfixturevalue(request.param)
named_method.name = request.param
return named_method
@pytest.fixture
def zha_device_mock(hass, zigpy_device_mock):
"""Return a zha Device factory."""
def _zha_device(
endpoints=None,
ieee="00:11:22:33:44:55:66:77",
manufacturer="mock manufacturer",
model="mock model",
node_desc=b"\x02@\x807\x10\x7fd\x00\x00*d\x00\x00",
patch_cluster=True,
):
if endpoints is None:
endpoints = {
1: {
"in_clusters": [0, 1, 8, 768],
"out_clusters": [0x19],
"device_type": 0x0105,
},
2: {
"in_clusters": [0],
"out_clusters": [6, 8, 0x19, 768],
"device_type": 0x0810,
},
}
zigpy_device = zigpy_device_mock(
endpoints, ieee, manufacturer, model, node_desc, patch_cluster=patch_cluster
)
zha_device = zha_core_device.ZHADevice(hass, zigpy_device, MagicMock())
return zha_device
return _zha_device
@pytest.fixture
def hass_disable_services(hass):
"""Mock service register."""
with patch.object(hass.services, "async_register"), patch.object(
hass.services, "has_service", return_value=True
):
yield hass
|
import re
import bleach
from django.core.exceptions import ValidationError
from django.core.validators import URLValidator
from django.utils.functional import cached_property
from django.utils.translation import gettext_lazy as _
from weblate.checks.base import TargetCheck
from weblate.utils.html import extract_bleach
from weblate.utils.xml import parse_xml
BBCODE_MATCH = re.compile(
r"(?P<start>\[(?P<tag>[^]]+)(@[^]]*)?\])(.*?)(?P<end>\[\/(?P=tag)\])", re.MULTILINE
)
MD_LINK = re.compile(
r"!?\[("
r"(?:\[[^^\]]*\]|[^\[\]]|\](?=[^\[]*\]))*"
r")\]\("
r"""\s*(<)?([\s\S]*?)(?(2)>)(?:\s+['"]([\s\S]*?)['"])?\s*"""
r"\)"
)
MD_BROKEN_LINK = re.compile(r"\] +\(")
MD_REFLINK = re.compile(
r"!?\[(" # leading [
r"(?:\[[^^\]]*\]|[^\[\]]|\](?=[^\[]*\]))*" # link text
r")\]\s*\[([^^\]]*)\]" # trailing ] with optional target
)
MD_SYNTAX = re.compile(
r"(_{2})(?:[\s\S]+?)_{2}(?!_)" # __word__
r"|"
r"(\*{2})(?:[\s\S]+?)\*{2}(?!\*)" # **word**
r"|"
r"\b(_)(?:(?:__|[^_])+?)_\b" # _word_
r"|"
r"(\*)(?:(?:\*\*|[^\*])+?)\*(?!\*)" # *word*
r"|"
r"(`+)\s*(?:[\s\S]*?[^`])\s*\5(?!`)" # `code`
r"|"
r"(~~)(?=\S)(?:[\s\S]*?\S)~~" # ~~word~~
)
XML_MATCH = re.compile(r"<[^>]+>")
XML_ENTITY_MATCH = re.compile(r"&#?\w+;")
def strip_entities(text):
"""Strip all HTML entities (we don't care about them)."""
return XML_ENTITY_MATCH.sub(" ", text)
class BBCodeCheck(TargetCheck):
"""Check for matching bbcode tags."""
check_id = "bbcode"
name = _("BBcode markup")
description = _("BBcode in translation does not match source")
def check_single(self, source, target, unit):
# Parse source
src_match = BBCODE_MATCH.findall(source)
# Any BBCode in source?
if not src_match:
return False
# Parse target
tgt_match = BBCODE_MATCH.findall(target)
if len(src_match) != len(tgt_match):
return True
src_tags = {x[1] for x in src_match}
tgt_tags = {x[1] for x in tgt_match}
return src_tags != tgt_tags
def check_highlight(self, source, unit):
if self.should_skip(unit):
return []
ret = []
for match in BBCODE_MATCH.finditer(source):
for tag in ("start", "end"):
ret.append((match.start(tag), match.end(tag), match.group(tag)))
return ret
class BaseXMLCheck(TargetCheck):
def parse_xml(self, text, wrap=None):
"""Wrapper for parsing XML."""
if wrap is None:
# Detect whether wrapping is desired
try:
return self.parse_xml(text, True), True
except SyntaxError:
return self.parse_xml(text, False), False
text = strip_entities(text)
if wrap:
text = f"<weblate>{text}</weblate>"
return parse_xml(text.encode() if "encoding" in text else text)
def is_source_xml(self, flags, source):
"""Quick check if source looks like XML."""
if "xml-text" in flags:
return True
return "<" in source and len(XML_MATCH.findall(source))
def check_single(self, source, target, unit):
"""Check for single phrase, not dealing with plurals."""
raise NotImplementedError()
class XMLValidityCheck(BaseXMLCheck):
"""Check whether XML in target is valid."""
check_id = "xml-invalid"
name = _("XML syntax")
description = _("The translation is not valid XML")
def check_single(self, source, target, unit):
if not self.is_source_xml(unit.all_flags, source):
return False
# Check if source is XML
try:
wrap = self.parse_xml(source)[1]
except SyntaxError:
# Source is not valid XML, we give up
return False
# Check target
try:
self.parse_xml(target, wrap)
except SyntaxError:
# Target is not valid XML
return True
return False
class XMLTagsCheck(BaseXMLCheck):
"""Check whether XML in target matches source."""
check_id = "xml-tags"
name = _("XML markup")
description = _("XML tags in translation do not match source")
def check_single(self, source, target, unit):
if not self.is_source_xml(unit.all_flags, source):
return False
# Check if source is XML
try:
source_tree, wrap = self.parse_xml(source)
source_tags = [(x.tag, x.keys()) for x in source_tree.iter()]
except SyntaxError:
# Source is not valid XML, we give up
return False
# Check target
try:
target_tree = self.parse_xml(target, wrap)
target_tags = [(x.tag, x.keys()) for x in target_tree.iter()]
except SyntaxError:
# Target is not valid XML
return False
# Compare tags
return source_tags != target_tags
def check_highlight(self, source, unit):
if self.should_skip(unit):
return []
ret = []
try:
self.parse_xml(source)
except SyntaxError:
return ret
# Include XML markup
for match in XML_MATCH.finditer(source):
ret.append((match.start(), match.end(), match.group()))
# Add XML entities
skipranges = [x[:2] for x in ret]
skipranges.append((len(source), len(source)))
offset = 0
for match in XML_ENTITY_MATCH.finditer(source):
start = match.start()
end = match.end()
while skipranges[offset][1] < end:
offset += 1
# Avoid including entities inside markup
if start > skipranges[offset][0] and end < skipranges[offset][1]:
continue
ret.append((start, end, match.group()))
return ret
class MarkdownBaseCheck(TargetCheck):
default_disabled = True
def __init__(self):
super().__init__()
self.enable_string = "md-text"
class MarkdownRefLinkCheck(MarkdownBaseCheck):
check_id = "md-reflink"
name = _("Markdown references")
description = _("Markdown link references do not match source")
def check_single(self, source, target, unit):
src_match = MD_REFLINK.findall(source)
if not src_match:
return False
tgt_match = MD_REFLINK.findall(target)
src_tags = {x[1] for x in src_match}
tgt_tags = {x[1] for x in tgt_match}
return src_tags != tgt_tags
class MarkdownLinkCheck(MarkdownBaseCheck):
check_id = "md-link"
name = _("Markdown links")
description = _("Markdown links do not match source")
def check_single(self, source, target, unit):
src_match = MD_LINK.findall(source)
if not src_match:
return False
tgt_match = MD_LINK.findall(target)
# Check number of links
if len(src_match) != len(tgt_match):
return True
# We don't check actual remote link targets as those might
# be localized as well (consider links to Wikipedia).
# Instead we check only relative links and templated ones.
link_start = (".", "#", "{")
tgt_anchors = {x[2] for x in tgt_match if x[2] and x[2][0] in link_start}
src_anchors = {x[2] for x in src_match if x[2] and x[2][0] in link_start}
return tgt_anchors != src_anchors
def get_fixup(self, unit):
if MD_BROKEN_LINK.findall(unit.target):
return [(MD_BROKEN_LINK.pattern, "](")]
return None
class MarkdownSyntaxCheck(MarkdownBaseCheck):
check_id = "md-syntax"
name = _("Markdown syntax")
description = _("Markdown syntax does not match source")
@staticmethod
def extract_match(match):
for i in range(6):
if match[i]:
return match[i]
return None
def check_single(self, source, target, unit):
src_tags = {self.extract_match(x) for x in MD_SYNTAX.findall(source)}
tgt_tags = {self.extract_match(x) for x in MD_SYNTAX.findall(target)}
return src_tags != tgt_tags
def check_highlight(self, source, unit):
if self.should_skip(unit):
return []
ret = []
for match in MD_SYNTAX.finditer(source):
value = ""
for i in range(6):
value = match.group(i + 1)
if value:
break
start = match.start()
end = match.end()
ret.append((start, start + len(value), value))
ret.append((end - len(value), end, value))
return ret
class URLCheck(TargetCheck):
check_id = "url"
name = _("URL")
description = _("The translation does not contain an URL")
default_disabled = True
@cached_property
def validator(self):
return URLValidator()
def check_single(self, source, target, unit):
if not source:
return False
try:
self.validator(target)
return False
except ValidationError:
return True
class SafeHTMLCheck(TargetCheck):
check_id = "safe-html"
name = _("Unsafe HTML")
description = _("The translation uses unsafe HTML markup")
default_disabled = True
def check_single(self, source, target, unit):
return bleach.clean(target, **extract_bleach(source)) != target
|
import inspect
from inspect import getsource
import os.path as op
from pkgutil import walk_packages
import re
import sys
from unittest import SkipTest
import pytest
import mne
from mne.utils import run_tests_if_main, requires_numpydoc, _pl
public_modules = [
# the list of modules users need to access for all functionality
'mne',
'mne.baseline',
'mne.beamformer',
'mne.channels',
'mne.chpi',
'mne.connectivity',
'mne.cov',
'mne.cuda',
'mne.datasets',
'mne.datasets.brainstorm',
'mne.datasets.hf_sef',
'mne.datasets.sample',
'mne.decoding',
'mne.dipole',
'mne.filter',
'mne.forward',
'mne.inverse_sparse',
'mne.io',
'mne.io.kit',
'mne.minimum_norm',
'mne.preprocessing',
'mne.report',
'mne.simulation',
'mne.source_estimate',
'mne.source_space',
'mne.surface',
'mne.stats',
'mne.time_frequency',
'mne.time_frequency.tfr',
'mne.viz',
]
def _func_name(func, cls=None):
"""Get the name."""
parts = []
if cls is not None:
module = inspect.getmodule(cls)
else:
module = inspect.getmodule(func)
if module:
parts.append(module.__name__)
if cls is not None:
parts.append(cls.__name__)
parts.append(func.__name__)
return '.'.join(parts)
# functions to ignore args / docstring of
docstring_ignores = {
'mne.externals',
'mne.fixes',
'mne.io.write',
'mne.io.meas_info.Info',
'mne.utils.docs.deprecated',
}
char_limit = 800 # XX eventually we should probably get this lower
tab_ignores = [
'mne.externals.tqdm._tqdm.__main__',
'mne.externals.tqdm._tqdm.cli',
'mne.channels.tests.test_montage',
'mne.io.curry.tests.test_curry',
]
error_ignores = {
# These we do not live by:
'GL01', # Docstring should start in the line immediately after the quotes
'EX01', 'EX02', # examples failed (we test them separately)
'ES01', # no extended summary
'SA01', # no see also
'YD01', # no yields section
'SA04', # no description in See Also
'PR04', # Parameter "shape (n_channels" has no type
'RT02', # The first line of the Returns section should contain only the type, unless multiple values are being returned # noqa
# XXX should also verify that | is used rather than , to separate params
# XXX should maybe also restore the parameter-desc-length < 800 char check
}
error_ignores_specific = { # specific instances to skip
('regress_artifact', 'SS05'), # "Regress" is actually imperative
}
subclass_name_ignores = (
(dict, {'values', 'setdefault', 'popitems', 'keys', 'pop', 'update',
'copy', 'popitem', 'get', 'items', 'fromkeys', 'clear'}),
(list, {'append', 'count', 'extend', 'index', 'insert', 'pop', 'remove',
'sort'}),
(mne.fixes.BaseEstimator, {'get_params', 'set_params', 'fit_transform'}),
)
def check_parameters_match(func, cls=None):
"""Check docstring, return list of incorrect results."""
from numpydoc.validate import validate
name = _func_name(func, cls)
skip = (not name.startswith('mne.') or
any(re.match(d, name) for d in docstring_ignores) or
'deprecation_wrapped' in getattr(
getattr(func, '__code__', None), 'co_name', ''))
if skip:
return list()
if cls is not None:
for subclass, ignores in subclass_name_ignores:
if issubclass(cls, subclass) and name.split('.')[-1] in ignores:
return list()
incorrect = ['%s : %s : %s' % (name, err[0], err[1])
for err in validate(name)['errors']
if err[0] not in error_ignores and
(name.split('.')[-1], err[0]) not in error_ignores_specific]
return incorrect
@pytest.mark.slowtest
@requires_numpydoc
def test_docstring_parameters():
"""Test module docstring formatting."""
from numpydoc import docscrape
# skip modules that require mayavi if mayavi is not installed
public_modules_ = public_modules[:]
try:
import mayavi # noqa: F401 analysis:ignore
public_modules_.append('mne.gui')
except ImportError:
pass
incorrect = []
for name in public_modules_:
# Assert that by default we import all public names with `import mne`
if name not in ('mne', 'mne.gui'):
extra = name.split('.')[1]
assert hasattr(mne, extra)
with pytest.warns(None): # traits warnings
module = __import__(name, globals())
for submod in name.split('.')[1:]:
module = getattr(module, submod)
classes = inspect.getmembers(module, inspect.isclass)
for cname, cls in classes:
if cname.startswith('_'):
continue
incorrect += check_parameters_match(cls)
cdoc = docscrape.ClassDoc(cls)
for method_name in cdoc.methods:
method = getattr(cls, method_name)
incorrect += check_parameters_match(method, cls=cls)
if hasattr(cls, '__call__') and \
'of type object' not in str(cls.__call__):
incorrect += check_parameters_match(cls.__call__, cls)
functions = inspect.getmembers(module, inspect.isfunction)
for fname, func in functions:
if fname.startswith('_'):
continue
incorrect += check_parameters_match(func)
incorrect = sorted(list(set(incorrect)))
msg = '\n' + '\n'.join(incorrect)
msg += '\n%d error%s' % (len(incorrect), _pl(incorrect))
if len(incorrect) > 0:
raise AssertionError(msg)
def test_tabs():
"""Test that there are no tabs in our source files."""
# avoid importing modules that require mayavi if mayavi is not installed
ignore = tab_ignores[:]
try:
import mayavi # noqa: F401 analysis:ignore
except ImportError:
ignore.extend('mne.gui.' + name for name in
('_coreg_gui', '_fiducials_gui', '_file_traits', '_help',
'_kit2fiff_gui', '_marker_gui', '_viewer'))
for _, modname, ispkg in walk_packages(mne.__path__, prefix='mne.'):
# because we don't import e.g. mne.tests w/mne
if not ispkg and modname not in ignore:
# mod = importlib.import_module(modname) # not py26 compatible!
try:
with pytest.warns(None):
__import__(modname)
except Exception: # can't import properly
continue
mod = sys.modules[modname]
try:
source = getsource(mod)
except IOError: # user probably should have run "make clean"
continue
assert '\t' not in source, ('"%s" has tabs, please remove them '
'or add it to the ignore list'
% modname)
documented_ignored_mods = (
'mne.fixes',
'mne.io.write',
'mne.utils',
'mne.viz.utils',
)
documented_ignored_names = """
BaseEstimator
ContainsMixin
CrossSpectralDensity
FilterMixin
GeneralizationAcrossTime
RawFIF
TimeMixin
ToDataFrameMixin
TransformerMixin
UpdateChannelsMixin
activate_proj
adjust_axes
apply_maxfilter
apply_trans
channel_type
check_n_jobs
combine_kit_markers
combine_tfr
combine_transforms
design_mne_c_filter
detrend
dir_tree_find
fast_cross_3d
fiff_open
find_source_space_hemi
find_tag
get_score_funcs
get_version
invert_transform
is_power2
is_fixed_orient
kit2fiff
label_src_vertno_sel
make_eeg_average_ref_proj
make_projector
mesh_dist
mesh_edges
next_fast_len
parallel_func
pick_channels_evoked
plot_epochs_psd
plot_epochs_psd_topomap
plot_raw_psd_topo
plot_source_spectrogram
prepare_inverse_operator
read_bad_channels
read_fiducials
read_tag
rescale
setup_proj
source_estimate_quantification
tddr
whiten_evoked
write_fiducials
write_info
""".split('\n')
def test_documented():
"""Test that public functions and classes are documented."""
# skip modules that require mayavi if mayavi is not installed
public_modules_ = public_modules[:]
try:
import mayavi # noqa: F401, analysis:ignore
except ImportError:
pass
else:
public_modules_.append('mne.gui')
doc_file = op.abspath(op.join(op.dirname(__file__), '..', '..', 'doc',
'python_reference.rst'))
if not op.isfile(doc_file):
raise SkipTest('Documentation file not found: %s' % doc_file)
known_names = list()
with open(doc_file, 'rb') as fid:
for line in fid:
line = line.decode('utf-8')
if not line.startswith(' '): # at least two spaces
continue
line = line.split()
if len(line) == 1 and line[0] != ':':
known_names.append(line[0].split('.')[-1])
known_names = set(known_names)
missing = []
for name in public_modules_:
with pytest.warns(None): # traits warnings
module = __import__(name, globals())
for submod in name.split('.')[1:]:
module = getattr(module, submod)
classes = inspect.getmembers(module, inspect.isclass)
functions = inspect.getmembers(module, inspect.isfunction)
checks = list(classes) + list(functions)
for name, cf in checks:
if not name.startswith('_') and name not in known_names:
from_mod = inspect.getmodule(cf).__name__
if (from_mod.startswith('mne') and
not from_mod.startswith('mne.externals') and
not any(from_mod.startswith(x)
for x in documented_ignored_mods) and
name not in documented_ignored_names):
missing.append('%s (%s.%s)' % (name, from_mod, name))
if len(missing) > 0:
raise AssertionError('\n\nFound new public members missing from '
'doc/python_reference.rst:\n\n* ' +
'\n* '.join(sorted(set(missing))))
run_tests_if_main()
|
import os
import shutil
import tempfile
import pytest
from radicale import Application, config, xmlutils
from radicale.tests import BaseTest
class TestBaseAuthRequests(BaseTest):
"""Tests basic requests with auth.
We should setup auth for each type before creating the Application object.
"""
def setup(self):
self.configuration = config.load()
self.colpath = tempfile.mkdtemp()
self.configuration.update({
"storage": {"filesystem_folder": self.colpath,
# Disable syncing to disk for better performance
"_filesystem_fsync": "False"},
# Set incorrect authentication delay to a very low value
"auth": {"delay": "0.002"}}, "test", privileged=True)
def teardown(self):
shutil.rmtree(self.colpath)
def _test_htpasswd(self, htpasswd_encryption, htpasswd_content,
test_matrix="ascii"):
"""Test htpasswd authentication with user "tmp" and password "bepo" for
``test_matrix`` "ascii" or user "😀" and password "🔑" for
``test_matrix`` "unicode"."""
if htpasswd_encryption == "bcrypt":
try:
from passlib.exc import MissingBackendError
from passlib.hash import bcrypt
except ImportError:
pytest.skip("passlib[bcrypt] is not installed")
try:
bcrypt.hash("test-bcrypt-backend")
except MissingBackendError:
pytest.skip("bcrypt backend for passlib is not installed")
htpasswd_file_path = os.path.join(self.colpath, ".htpasswd")
encoding = self.configuration.get("encoding", "stock")
with open(htpasswd_file_path, "w", encoding=encoding) as f:
f.write(htpasswd_content)
self.configuration.update({
"auth": {"type": "htpasswd",
"htpasswd_filename": htpasswd_file_path,
"htpasswd_encryption": htpasswd_encryption}}, "test")
self.application = Application(self.configuration)
if test_matrix == "ascii":
test_matrix = (("tmp", "bepo", True), ("tmp", "tmp", False),
("tmp", "", False), ("unk", "unk", False),
("unk", "", False), ("", "", False))
elif test_matrix == "unicode":
test_matrix = (("😀", "🔑", True), ("😀", "🌹", False),
("😁", "🔑", False), ("😀", "", False),
("", "🔑", False), ("", "", False))
for user, password, valid in test_matrix:
self.propfind("/", check=207 if valid else 401,
login="%s:%s" % (user, password))
def test_htpasswd_plain(self):
self._test_htpasswd("plain", "tmp:bepo")
def test_htpasswd_plain_password_split(self):
self._test_htpasswd("plain", "tmp:be:po", (
("tmp", "be:po", True), ("tmp", "bepo", False)))
def test_htpasswd_plain_unicode(self):
self._test_htpasswd("plain", "😀:🔑", "unicode")
def test_htpasswd_md5(self):
self._test_htpasswd("md5", "tmp:$apr1$BI7VKCZh$GKW4vq2hqDINMr8uv7lDY/")
def test_htpasswd_md5_unicode(self):
self._test_htpasswd(
"md5", "😀:$apr1$w4ev89r1$29xO8EvJmS2HEAadQ5qy11", "unicode")
def test_htpasswd_bcrypt(self):
self._test_htpasswd("bcrypt", "tmp:$2y$05$oD7hbiQFQlvCM7zoalo/T.MssV3V"
"NTRI3w5KDnj8NTUKJNWfVpvRq")
def test_htpasswd_bcrypt_unicode(self):
self._test_htpasswd("bcrypt", "😀:$2y$10$Oyz5aHV4MD9eQJbk6GPemOs4T6edK"
"6U9Sqlzr.W1mMVCS8wJUftnW", "unicode")
def test_htpasswd_multi(self):
self._test_htpasswd("plain", "ign:ign\ntmp:bepo")
@pytest.mark.skipif(os.name == "nt", reason="leading and trailing "
"whitespaces not allowed in file names")
def test_htpasswd_whitespace_user(self):
for user in (" tmp", "tmp ", " tmp "):
self._test_htpasswd("plain", "%s:bepo" % user, (
(user, "bepo", True), ("tmp", "bepo", False)))
def test_htpasswd_whitespace_password(self):
for password in (" bepo", "bepo ", " bepo "):
self._test_htpasswd("plain", "tmp:%s" % password, (
("tmp", password, True), ("tmp", "bepo", False)))
def test_htpasswd_comment(self):
self._test_htpasswd("plain", "#comment\n #comment\n \ntmp:bepo\n\n")
def test_remote_user(self):
self.configuration.update({"auth": {"type": "remote_user"}}, "test")
self.application = Application(self.configuration)
_, responses = self.propfind("/", """\
<?xml version="1.0" encoding="utf-8"?>
<propfind xmlns="DAV:">
<prop>
<current-user-principal />
</prop>
</propfind>""", REMOTE_USER="test")
status, prop = responses["/"]["D:current-user-principal"]
assert status == 200
assert prop.find(xmlutils.make_clark("D:href")).text == "/test/"
def test_http_x_remote_user(self):
self.configuration.update(
{"auth": {"type": "http_x_remote_user"}}, "test")
self.application = Application(self.configuration)
_, responses = self.propfind("/", """\
<?xml version="1.0" encoding="utf-8"?>
<propfind xmlns="DAV:">
<prop>
<current-user-principal />
</prop>
</propfind>""", HTTP_X_REMOTE_USER="test")
status, prop = responses["/"]["D:current-user-principal"]
assert status == 200
assert prop.find(xmlutils.make_clark("D:href")).text == "/test/"
def test_custom(self):
"""Custom authentication."""
self.configuration.update(
{"auth": {"type": "radicale.tests.custom.auth"}}, "test")
self.application = Application(self.configuration)
self.propfind("/tmp/", login="tmp:")
|
import logging
import voluptuous as vol
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
DOMAIN = "logger"
SERVICE_SET_DEFAULT_LEVEL = "set_default_level"
SERVICE_SET_LEVEL = "set_level"
LOGSEVERITY = {
"CRITICAL": 50,
"FATAL": 50,
"ERROR": 40,
"WARNING": 30,
"WARN": 30,
"INFO": 20,
"DEBUG": 10,
"NOTSET": 0,
}
DEFAULT_LOGSEVERITY = "DEBUG"
LOGGER_DEFAULT = "default"
LOGGER_LOGS = "logs"
ATTR_LEVEL = "level"
_VALID_LOG_LEVEL = vol.All(vol.Upper, vol.In(LOGSEVERITY))
SERVICE_SET_DEFAULT_LEVEL_SCHEMA = vol.Schema({ATTR_LEVEL: _VALID_LOG_LEVEL})
SERVICE_SET_LEVEL_SCHEMA = vol.Schema({cv.string: _VALID_LOG_LEVEL})
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Optional(LOGGER_DEFAULT): _VALID_LOG_LEVEL,
vol.Optional(LOGGER_LOGS): vol.Schema({cv.string: _VALID_LOG_LEVEL}),
}
)
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass, config):
"""Set up the logger component."""
hass.data[DOMAIN] = {}
logging.setLoggerClass(_get_logger_class(hass.data[DOMAIN]))
@callback
def set_default_log_level(level):
"""Set the default log level for components."""
_set_log_level(logging.getLogger(""), level)
@callback
def set_log_levels(logpoints):
"""Set the specified log levels."""
hass.data[DOMAIN].update(logpoints)
for key, value in logpoints.items():
_set_log_level(logging.getLogger(key), value)
# Set default log severity
set_default_log_level(config[DOMAIN].get(LOGGER_DEFAULT, DEFAULT_LOGSEVERITY))
if LOGGER_LOGS in config[DOMAIN]:
set_log_levels(config[DOMAIN][LOGGER_LOGS])
@callback
def async_service_handler(service):
"""Handle logger services."""
if service.service == SERVICE_SET_DEFAULT_LEVEL:
set_default_log_level(service.data.get(ATTR_LEVEL))
else:
set_log_levels(service.data)
hass.services.async_register(
DOMAIN,
SERVICE_SET_DEFAULT_LEVEL,
async_service_handler,
schema=SERVICE_SET_DEFAULT_LEVEL_SCHEMA,
)
hass.services.async_register(
DOMAIN,
SERVICE_SET_LEVEL,
async_service_handler,
schema=SERVICE_SET_LEVEL_SCHEMA,
)
return True
def _set_log_level(logger, level):
"""Set the log level.
Any logger fetched before this integration is loaded will use old class.
"""
getattr(logger, "orig_setLevel", logger.setLevel)(LOGSEVERITY[level])
def _get_logger_class(hass_overrides):
"""Create a logger subclass.
logging.setLoggerClass checks if it is a subclass of Logger and
so we cannot use partial to inject hass_overrides.
"""
class HassLogger(logging.Logger):
"""Home Assistant aware logger class."""
def setLevel(self, level) -> None:
"""Set the log level unless overridden."""
if self.name in hass_overrides:
return
super().setLevel(level)
# pylint: disable=invalid-name
def orig_setLevel(self, level) -> None:
"""Set the log level."""
super().setLevel(level)
return HassLogger
|
from unittest import mock
import pytest
import zigpy.profiles.zha
import zigpy.zcl.clusters.general as general
import zigpy.zcl.clusters.homeautomation as homeautomation
import zigpy.zcl.clusters.measurement as measurement
import zigpy.zcl.clusters.smartenergy as smartenergy
from homeassistant.components.sensor import DOMAIN
import homeassistant.config as config_util
from homeassistant.const import (
ATTR_UNIT_OF_MEASUREMENT,
CONF_UNIT_SYSTEM,
CONF_UNIT_SYSTEM_IMPERIAL,
CONF_UNIT_SYSTEM_METRIC,
LIGHT_LUX,
PERCENTAGE,
POWER_WATT,
PRESSURE_HPA,
STATE_UNAVAILABLE,
STATE_UNKNOWN,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
)
from homeassistant.helpers import restore_state
from homeassistant.util import dt as dt_util
from .common import (
async_enable_traffic,
async_test_rejoin,
find_entity_id,
send_attribute_report,
send_attributes_report,
)
async def async_test_humidity(hass, cluster, entity_id):
"""Test humidity sensor."""
await send_attributes_report(hass, cluster, {1: 1, 0: 1000, 2: 100})
assert_state(hass, entity_id, "10.0", PERCENTAGE)
async def async_test_temperature(hass, cluster, entity_id):
"""Test temperature sensor."""
await send_attributes_report(hass, cluster, {1: 1, 0: 2900, 2: 100})
assert_state(hass, entity_id, "29.0", TEMP_CELSIUS)
async def async_test_pressure(hass, cluster, entity_id):
"""Test pressure sensor."""
await send_attributes_report(hass, cluster, {1: 1, 0: 1000, 2: 10000})
assert_state(hass, entity_id, "1000", PRESSURE_HPA)
await send_attributes_report(hass, cluster, {0: 1000, 20: -1, 16: 10000})
assert_state(hass, entity_id, "1000", PRESSURE_HPA)
async def async_test_illuminance(hass, cluster, entity_id):
"""Test illuminance sensor."""
await send_attributes_report(hass, cluster, {1: 1, 0: 10, 2: 20})
assert_state(hass, entity_id, "1.0", LIGHT_LUX)
async def async_test_metering(hass, cluster, entity_id):
"""Test metering sensor."""
await send_attributes_report(hass, cluster, {1025: 1, 1024: 12345, 1026: 100})
assert_state(hass, entity_id, "12345.0", "unknown")
async def async_test_electrical_measurement(hass, cluster, entity_id):
"""Test electrical measurement sensor."""
with mock.patch(
(
"homeassistant.components.zha.core.channels.homeautomation"
".ElectricalMeasurementChannel.divisor"
),
new_callable=mock.PropertyMock,
) as divisor_mock:
divisor_mock.return_value = 1
await send_attributes_report(hass, cluster, {0: 1, 1291: 100, 10: 1000})
assert_state(hass, entity_id, "100", POWER_WATT)
await send_attributes_report(hass, cluster, {0: 1, 1291: 99, 10: 1000})
assert_state(hass, entity_id, "99", POWER_WATT)
divisor_mock.return_value = 10
await send_attributes_report(hass, cluster, {0: 1, 1291: 1000, 10: 5000})
assert_state(hass, entity_id, "100", POWER_WATT)
await send_attributes_report(hass, cluster, {0: 1, 1291: 99, 10: 5000})
assert_state(hass, entity_id, "9.9", POWER_WATT)
@pytest.mark.parametrize(
"cluster_id, test_func, report_count",
(
(measurement.RelativeHumidity.cluster_id, async_test_humidity, 1),
(measurement.TemperatureMeasurement.cluster_id, async_test_temperature, 1),
(measurement.PressureMeasurement.cluster_id, async_test_pressure, 1),
(measurement.IlluminanceMeasurement.cluster_id, async_test_illuminance, 1),
(smartenergy.Metering.cluster_id, async_test_metering, 1),
(
homeautomation.ElectricalMeasurement.cluster_id,
async_test_electrical_measurement,
1,
),
),
)
async def test_sensor(
hass,
zigpy_device_mock,
zha_device_joined_restored,
cluster_id,
test_func,
report_count,
):
"""Test zha sensor platform."""
zigpy_device = zigpy_device_mock(
{
1: {
"in_clusters": [cluster_id, general.Basic.cluster_id],
"out_cluster": [],
"device_type": zigpy.profiles.zha.DeviceType.ON_OFF_SWITCH,
}
}
)
cluster = zigpy_device.endpoints[1].in_clusters[cluster_id]
zha_device = await zha_device_joined_restored(zigpy_device)
entity_id = await find_entity_id(DOMAIN, zha_device, hass)
await async_enable_traffic(hass, [zha_device], enabled=False)
await hass.async_block_till_done()
# ensure the sensor entity was created
assert hass.states.get(entity_id).state == STATE_UNAVAILABLE
# allow traffic to flow through the gateway and devices
await async_enable_traffic(hass, [zha_device])
# test that the sensor now have a state of unknown
assert hass.states.get(entity_id).state == STATE_UNKNOWN
# test sensor associated logic
await test_func(hass, cluster, entity_id)
# test rejoin
await async_test_rejoin(hass, zigpy_device, [cluster], (report_count,))
def assert_state(hass, entity_id, state, unit_of_measurement):
"""Check that the state is what is expected.
This is used to ensure that the logic in each sensor class handled the
attribute report it received correctly.
"""
hass_state = hass.states.get(entity_id)
assert hass_state.state == state
assert hass_state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == unit_of_measurement
@pytest.fixture
def hass_ms(hass):
"""Hass instance with measurement system."""
async def _hass_ms(meas_sys):
await config_util.async_process_ha_core_config(
hass, {CONF_UNIT_SYSTEM: meas_sys}
)
await hass.async_block_till_done()
return hass
return _hass_ms
@pytest.fixture
def core_rs(hass_storage):
"""Core.restore_state fixture."""
def _storage(entity_id, uom, state):
now = dt_util.utcnow().isoformat()
hass_storage[restore_state.STORAGE_KEY] = {
"version": restore_state.STORAGE_VERSION,
"key": restore_state.STORAGE_KEY,
"data": [
{
"state": {
"entity_id": entity_id,
"state": str(state),
"attributes": {ATTR_UNIT_OF_MEASUREMENT: uom},
"last_changed": now,
"last_updated": now,
"context": {
"id": "3c2243ff5f30447eb12e7348cfd5b8ff",
"user_id": None,
},
},
"last_seen": now,
}
],
}
return
return _storage
@pytest.mark.parametrize(
"uom, raw_temp, expected, restore",
[
(TEMP_CELSIUS, 2900, 29, False),
(TEMP_CELSIUS, 2900, 29, True),
(TEMP_FAHRENHEIT, 2900, 84, False),
(TEMP_FAHRENHEIT, 2900, 84, True),
],
)
async def test_temp_uom(
uom,
raw_temp,
expected,
restore,
hass_ms,
core_rs,
zigpy_device_mock,
zha_device_restored,
):
"""Test zha temperature sensor unit of measurement."""
entity_id = "sensor.fake1026_fakemodel1026_004f3202_temperature"
if restore:
core_rs(entity_id, uom, state=(expected - 2))
hass = await hass_ms(
CONF_UNIT_SYSTEM_METRIC if uom == TEMP_CELSIUS else CONF_UNIT_SYSTEM_IMPERIAL
)
zigpy_device = zigpy_device_mock(
{
1: {
"in_clusters": [
measurement.TemperatureMeasurement.cluster_id,
general.Basic.cluster_id,
],
"out_cluster": [],
"device_type": zigpy.profiles.zha.DeviceType.ON_OFF_SWITCH,
}
}
)
cluster = zigpy_device.endpoints[1].temperature
zha_device = await zha_device_restored(zigpy_device)
entity_id = await find_entity_id(DOMAIN, zha_device, hass)
if not restore:
await async_enable_traffic(hass, [zha_device], enabled=False)
assert hass.states.get(entity_id).state == STATE_UNAVAILABLE
# allow traffic to flow through the gateway and devices
await async_enable_traffic(hass, [zha_device])
# test that the sensors now have a state of unknown
if not restore:
assert hass.states.get(entity_id).state == STATE_UNKNOWN
await send_attribute_report(hass, cluster, 0, raw_temp)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state is not None
assert round(float(state.state)) == expected
assert state.attributes[ATTR_UNIT_OF_MEASUREMENT] == uom
async def test_electrical_measurement_init(
hass,
zigpy_device_mock,
zha_device_joined,
):
"""Test proper initialization of the electrical measurement cluster."""
cluster_id = homeautomation.ElectricalMeasurement.cluster_id
zigpy_device = zigpy_device_mock(
{
1: {
"in_clusters": [cluster_id, general.Basic.cluster_id],
"out_cluster": [],
"device_type": zigpy.profiles.zha.DeviceType.ON_OFF_SWITCH,
}
}
)
cluster = zigpy_device.endpoints[1].in_clusters[cluster_id]
zha_device = await zha_device_joined(zigpy_device)
entity_id = await find_entity_id(DOMAIN, zha_device, hass)
# allow traffic to flow through the gateway and devices
await async_enable_traffic(hass, [zha_device])
# test that the sensor now have a state of unknown
assert hass.states.get(entity_id).state == STATE_UNKNOWN
await send_attributes_report(hass, cluster, {0: 1, 1291: 100, 10: 1000})
assert int(hass.states.get(entity_id).state) == 100
channel = zha_device.channels.pools[0].all_channels["1:0x0b04"]
assert channel.divisor == 1
assert channel.multiplier == 1
# update power divisor
await send_attributes_report(hass, cluster, {0: 1, 1291: 20, 0x0403: 5, 10: 1000})
assert channel.divisor == 5
assert channel.multiplier == 1
assert hass.states.get(entity_id).state == "4.0"
await send_attributes_report(hass, cluster, {0: 1, 1291: 30, 0x0605: 10, 10: 1000})
assert channel.divisor == 10
assert channel.multiplier == 1
assert hass.states.get(entity_id).state == "3.0"
# update power multiplier
await send_attributes_report(hass, cluster, {0: 1, 1291: 20, 0x0402: 6, 10: 1000})
assert channel.divisor == 10
assert channel.multiplier == 6
assert hass.states.get(entity_id).state == "12.0"
await send_attributes_report(hass, cluster, {0: 1, 1291: 30, 0x0604: 20, 10: 1000})
assert channel.divisor == 10
assert channel.multiplier == 20
assert hass.states.get(entity_id).state == "60.0"
|
import string
from importlib import import_module
import warnings
from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.auth.models import AnonymousUser
from django.contrib.auth.signals import user_logged_in, user_logged_out
from django.core.exceptions import ObjectDoesNotExist
from django.db import models, DEFAULT_DB_ALIAS
from django.db.models.fields import FieldDoesNotExist
from django.dispatch import receiver
from django.utils import timezone
from django.utils.functional import SimpleLazyObject
from django.utils.translation import gettext_lazy as _
from shop import deferred
from shop.models.fields import JSONField
from shop.signals import customer_recognized
from shop.models.fields import ChoiceEnum, ChoiceEnumField
SessionStore = import_module(settings.SESSION_ENGINE).SessionStore()
class CustomerState(ChoiceEnum):
UNRECOGNIZED = 0, _("Unrecognized")
GUEST = 1, _("Guest")
REGISTERED = 2, ("Registered")
class CustomerQuerySet(models.QuerySet):
def _filter_or_exclude(self, negate, *args, **kwargs):
"""
Emulate filter queries on a Customer using attributes from the User object.
Example: Customer.objects.filter(last_name__icontains='simpson') will return
a queryset with customers whose last name contains "simpson".
"""
opts = self.model._meta
lookup_kwargs = {}
for key, lookup in kwargs.items():
try:
field_name = key[:key.index('__')]
except ValueError:
field_name = key
if field_name == 'pk':
field_name = opts.pk.name
try:
opts.get_field(field_name)
if isinstance(lookup, get_user_model()):
lookup.pk # force lazy object to resolve
lookup_kwargs[key] = lookup
except FieldDoesNotExist as fdne:
try:
get_user_model()._meta.get_field(field_name)
lookup_kwargs['user__' + key] = lookup
except FieldDoesNotExist:
raise fdne
except Exception as othex:
raise othex
result = super()._filter_or_exclude(negate, *args, **lookup_kwargs)
return result
class CustomerManager(models.Manager):
"""
Manager for the Customer database model. This manager can also cope with customers, which have
an entity in the database but otherwise are considered as anonymous. The username of these
so called unrecognized customers is a compact version of the session key.
"""
BASE64_ALPHABET = string.digits + string.ascii_uppercase + string.ascii_lowercase + '.@'
REVERSE_ALPHABET = dict((c, i) for i, c in enumerate(BASE64_ALPHABET))
BASE36_ALPHABET = string.digits + string.ascii_lowercase
_queryset_class = CustomerQuerySet
@classmethod
def encode_session_key(cls, session_key):
"""
Session keys have base 36 and length 32. Since the field ``username`` accepts only up
to 30 characters, the session key is converted to a base 64 representation, resulting
in a length of approximately 28.
"""
return cls._encode(int(session_key[:32], 36), cls.BASE64_ALPHABET)
@classmethod
def decode_session_key(cls, compact_session_key):
"""
Decode a compact session key back to its original length and base.
"""
base_length = len(cls.BASE64_ALPHABET)
n = 0
for c in compact_session_key:
n = n * base_length + cls.REVERSE_ALPHABET[c]
return cls._encode(n, cls.BASE36_ALPHABET).zfill(32)
@classmethod
def _encode(cls, n, base_alphabet):
base_length = len(base_alphabet)
s = []
while True:
n, r = divmod(n, base_length)
s.append(base_alphabet[r])
if n == 0:
break
return ''.join(reversed(s))
def get_queryset(self):
"""
Whenever we fetch from the Customer table, inner join with the User table to reduce the
number of presumed future queries to the database.
"""
qs = self._queryset_class(self.model, using=self._db).select_related('user')
return qs
def create(self, *args, **kwargs):
if 'user' in kwargs and kwargs['user'].is_authenticated:
kwargs.setdefault('recognized', CustomerState.REGISTERED)
customer = super().create(*args, **kwargs)
return customer
def _get_visiting_user(self, session_key):
"""
Since the Customer has a 1:1 relation with the User object, look for an entity of a
User object. As its ``username`` (which must be unique), use the given session key.
"""
username = self.encode_session_key(session_key)
try:
user = get_user_model().objects.get(username=username)
except get_user_model().DoesNotExist:
user = AnonymousUser()
return user
def get_from_request(self, request):
"""
Return an Customer object for the current User object.
"""
if request.user.is_anonymous and request.session.session_key:
# the visitor is determined through the session key
user = self._get_visiting_user(request.session.session_key)
else:
user = request.user
try:
if user.customer:
return user.customer
except AttributeError:
pass
if request.user.is_authenticated:
customer, created = self.get_or_create(user=user)
if created: # `user` has been created by another app than shop
customer.recognize_as_registered(request)
else:
customer = VisitingCustomer()
return customer
def get_or_create_from_request(self, request):
if request.user.is_authenticated:
user = request.user
recognized = CustomerState.REGISTERED
else:
if not request.session.session_key:
request.session.cycle_key()
assert request.session.session_key
username = self.encode_session_key(request.session.session_key)
# create or get a previously created inactive intermediate user,
# which later can declare himself as guest, or register as a valid Django user
try:
user = get_user_model().objects.get(username=username)
except get_user_model().DoesNotExist:
user = get_user_model().objects.create_user(username)
user.is_active = False
user.save()
recognized = CustomerState.UNRECOGNIZED
customer, created = self.get_or_create(user=user, recognized=recognized)
return customer
class BaseCustomer(models.Model, metaclass=deferred.ForeignKeyBuilder):
"""
Base class for shop customers.
Customer is a profile model that extends
the django User model if a customer is authenticated. On checkout, a User
object is created for anonymous customers also (with unusable password).
"""
user = models.OneToOneField(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
primary_key=True,
related_name='customer',
)
recognized = ChoiceEnumField(
_("Recognized as"),
enum_type=CustomerState,
help_text=_("Designates the state the customer is recognized as."),
)
last_access = models.DateTimeField(
_("Last accessed"),
default=timezone.now,
)
extra = JSONField(
editable=False,
verbose_name=_("Extra information about this customer"),
)
objects = CustomerManager()
class Meta:
abstract = True
def __str__(self):
return self.get_username()
def get_username(self):
return self.user.get_username()
def get_full_name(self):
return self.user.get_full_name()
@property
def first_name(self):
# pending deprecation: warnings.warn("Property first_name is deprecated and will be removed")
return self.user.first_name
@first_name.setter
def first_name(self, value):
# pending deprecation: warnings.warn("Property first_name is deprecated and will be removed")
self.user.first_name = value
@property
def last_name(self):
# pending deprecation: warnings.warn("Property last_name is deprecated and will be removed")
return self.user.last_name
@last_name.setter
def last_name(self, value):
# pending deprecation: warnings.warn("Property last_name is deprecated and will be removed")
self.user.last_name = value
@property
def email(self):
return self.user.email
@email.setter
def email(self, value):
self.user.email = value
@property
def date_joined(self):
return self.user.date_joined
@property
def last_login(self):
return self.user.last_login
@property
def groups(self):
return self.user.groups
@property
def is_anonymous(self):
return self.recognized in (CustomerState.UNRECOGNIZED, CustomerState.GUEST)
@property
def is_authenticated(self):
return self.recognized is CustomerState.REGISTERED
@property
def is_recognized(self):
"""
Return True if the customer is associated with a User account.
Unrecognized customers have accessed the shop, but did not register
an account nor declared themselves as guests.
"""
return self.recognized is not CustomerState.UNRECOGNIZED
@property
def is_guest(self):
"""
Return true if the customer isn't associated with valid User account, but declared
himself as a guest, leaving their email address.
"""
return self.recognized is CustomerState.GUEST
def recognize_as_guest(self, request=None, commit=True):
"""
Recognize the current customer as guest customer.
"""
if self.recognized != CustomerState.GUEST:
self.recognized = CustomerState.GUEST
if commit:
self.save(update_fields=['recognized'])
customer_recognized.send(sender=self.__class__, customer=self, request=request)
@property
def is_registered(self):
"""
Return true if the customer has registered himself.
"""
return self.recognized is CustomerState.REGISTERED
def recognize_as_registered(self, request=None, commit=True):
"""
Recognize the current customer as registered customer.
"""
if self.recognized != CustomerState.REGISTERED:
self.recognized = CustomerState.REGISTERED
if commit:
self.save(update_fields=['recognized'])
customer_recognized.send(sender=self.__class__, customer=self, request=request)
@property
def is_visitor(self):
"""
Always False for instantiated Customer objects.
"""
return False
@property
def is_expired(self):
"""
Return True if the session of an unrecognized customer expired or is not decodable.
Registered customers never expire.
Guest customers only expire, if they failed fulfilling the purchase.
"""
is_expired = False
if self.recognized is CustomerState.UNRECOGNIZED:
try:
session_key = CustomerManager.decode_session_key(self.user.username)
is_expired = not SessionStore.exists(session_key)
except KeyError:
msg = "Unable to decode username '{}' as session key"
warnings.warn(msg.format(self.user.username))
is_expired = True
return is_expired
def get_or_assign_number(self):
"""
Hook to get or to assign the customers number. It is invoked, every time an Order object
is created. Using a customer number, which is different from the primary key is useful for
merchants, wishing to assign sequential numbers only to customers which actually bought
something. Otherwise the customer number (primary key) is increased whenever a site visitor
puts something into the cart. If he never proceeds to checkout, that entity expires and may
be deleted at any time in the future.
"""
return self.get_number()
def get_number(self):
"""
Hook to get the customer's number. Customers haven't purchased anything may return None.
"""
return str(self.user_id)
def save(self, **kwargs):
if 'update_fields' not in kwargs:
self.user.save(using=kwargs.get('using', DEFAULT_DB_ALIAS))
super().save(**kwargs)
def delete(self, *args, **kwargs):
if self.user.is_active and self.recognized is CustomerState.UNRECOGNIZED:
# invalid state of customer, keep the referred User
super().delete(*args, **kwargs)
else:
# also delete self through cascading
self.user.delete(*args, **kwargs)
CustomerModel = deferred.MaterializedModel(BaseCustomer)
class VisitingCustomer:
"""
This dummy object is used for customers which just visit the site. Whenever a VisitingCustomer
adds something to the cart, this object is replaced against a real Customer object.
"""
user = AnonymousUser()
def __str__(self):
return 'Visitor'
@property
def email(self):
return ''
@email.setter
def email(self, value):
pass
@property
def is_anonymous(self):
return True
@property
def is_authenticated(self):
return False
@property
def is_recognized(self):
return False
@property
def is_guest(self):
return False
@property
def is_registered(self):
return False
@property
def is_visitor(self):
return True
def save(self, **kwargs):
pass
@receiver(user_logged_in)
def handle_customer_login(sender, **kwargs):
"""
Update request.customer to an authenticated Customer
"""
try:
kwargs['request'].customer = kwargs['user'].customer
except (AttributeError, ObjectDoesNotExist):
kwargs['request'].customer = SimpleLazyObject(lambda: CustomerModel.objects.get_from_request(kwargs['request']))
@receiver(user_logged_out)
def handle_customer_logout(sender, **kwargs):
"""
Update request.customer to a visiting Customer
"""
# defer assignment to anonymous customer, since the session_key is not yet rotated
kwargs['request'].customer = SimpleLazyObject(lambda: CustomerModel.objects.get_from_request(kwargs['request']))
|
from django.shortcuts import redirect
from django.urls import reverse
from django.utils.http import urlencode
from social_core.pipeline.partial import partial
from weblate.legal.models import Agreement
@partial
def tos_confirm(strategy, backend, user, current_partial, **kwargs):
"""Force authentication when adding new association."""
agreement = Agreement.objects.get_or_create(user=user)[0]
if not agreement.is_current():
if user:
strategy.request.session["tos_user"] = user.pk
url = "{}?partial_token={}".format(
reverse("social:complete", args=(backend.name,)), current_partial.token
)
return redirect(
"{}?{}".format(reverse("legal:confirm"), urlencode({"next": url}))
)
strategy.request.session.pop("tos_user", None)
return None
|
from collections import namedtuple
from typing import Optional, Sequence
from pysmartthings import Attribute, Capability
from homeassistant.const import (
AREA_SQUARE_METERS,
CONCENTRATION_PARTS_PER_MILLION,
DEVICE_CLASS_BATTERY,
DEVICE_CLASS_HUMIDITY,
DEVICE_CLASS_ILLUMINANCE,
DEVICE_CLASS_TEMPERATURE,
DEVICE_CLASS_TIMESTAMP,
ENERGY_KILO_WATT_HOUR,
LIGHT_LUX,
MASS_KILOGRAMS,
PERCENTAGE,
POWER_WATT,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
VOLT,
)
from . import SmartThingsEntity
from .const import DATA_BROKERS, DOMAIN
Map = namedtuple("map", "attribute name default_unit device_class")
CAPABILITY_TO_SENSORS = {
Capability.activity_lighting_mode: [
Map(Attribute.lighting_mode, "Activity Lighting Mode", None, None)
],
Capability.air_conditioner_mode: [
Map(Attribute.air_conditioner_mode, "Air Conditioner Mode", None, None)
],
Capability.air_quality_sensor: [
Map(Attribute.air_quality, "Air Quality", "CAQI", None)
],
Capability.alarm: [Map(Attribute.alarm, "Alarm", None, None)],
Capability.audio_volume: [Map(Attribute.volume, "Volume", PERCENTAGE, None)],
Capability.battery: [
Map(Attribute.battery, "Battery", PERCENTAGE, DEVICE_CLASS_BATTERY)
],
Capability.body_mass_index_measurement: [
Map(
Attribute.bmi_measurement,
"Body Mass Index",
f"{MASS_KILOGRAMS}/{AREA_SQUARE_METERS}",
None,
)
],
Capability.body_weight_measurement: [
Map(Attribute.body_weight_measurement, "Body Weight", MASS_KILOGRAMS, None)
],
Capability.carbon_dioxide_measurement: [
Map(
Attribute.carbon_dioxide,
"Carbon Dioxide Measurement",
CONCENTRATION_PARTS_PER_MILLION,
None,
)
],
Capability.carbon_monoxide_detector: [
Map(Attribute.carbon_monoxide, "Carbon Monoxide Detector", None, None)
],
Capability.carbon_monoxide_measurement: [
Map(
Attribute.carbon_monoxide_level,
"Carbon Monoxide Measurement",
CONCENTRATION_PARTS_PER_MILLION,
None,
)
],
Capability.dishwasher_operating_state: [
Map(Attribute.machine_state, "Dishwasher Machine State", None, None),
Map(Attribute.dishwasher_job_state, "Dishwasher Job State", None, None),
Map(
Attribute.completion_time,
"Dishwasher Completion Time",
None,
DEVICE_CLASS_TIMESTAMP,
),
],
Capability.dryer_mode: [Map(Attribute.dryer_mode, "Dryer Mode", None, None)],
Capability.dryer_operating_state: [
Map(Attribute.machine_state, "Dryer Machine State", None, None),
Map(Attribute.dryer_job_state, "Dryer Job State", None, None),
Map(
Attribute.completion_time,
"Dryer Completion Time",
None,
DEVICE_CLASS_TIMESTAMP,
),
],
Capability.dust_sensor: [
Map(Attribute.fine_dust_level, "Fine Dust Level", None, None),
Map(Attribute.dust_level, "Dust Level", None, None),
],
Capability.energy_meter: [
Map(Attribute.energy, "Energy Meter", ENERGY_KILO_WATT_HOUR, None)
],
Capability.equivalent_carbon_dioxide_measurement: [
Map(
Attribute.equivalent_carbon_dioxide_measurement,
"Equivalent Carbon Dioxide Measurement",
CONCENTRATION_PARTS_PER_MILLION,
None,
)
],
Capability.formaldehyde_measurement: [
Map(
Attribute.formaldehyde_level,
"Formaldehyde Measurement",
CONCENTRATION_PARTS_PER_MILLION,
None,
)
],
Capability.illuminance_measurement: [
Map(Attribute.illuminance, "Illuminance", LIGHT_LUX, DEVICE_CLASS_ILLUMINANCE)
],
Capability.infrared_level: [
Map(Attribute.infrared_level, "Infrared Level", PERCENTAGE, None)
],
Capability.media_input_source: [
Map(Attribute.input_source, "Media Input Source", None, None)
],
Capability.media_playback_repeat: [
Map(Attribute.playback_repeat_mode, "Media Playback Repeat", None, None)
],
Capability.media_playback_shuffle: [
Map(Attribute.playback_shuffle, "Media Playback Shuffle", None, None)
],
Capability.media_playback: [
Map(Attribute.playback_status, "Media Playback Status", None, None)
],
Capability.odor_sensor: [Map(Attribute.odor_level, "Odor Sensor", None, None)],
Capability.oven_mode: [Map(Attribute.oven_mode, "Oven Mode", None, None)],
Capability.oven_operating_state: [
Map(Attribute.machine_state, "Oven Machine State", None, None),
Map(Attribute.oven_job_state, "Oven Job State", None, None),
Map(Attribute.completion_time, "Oven Completion Time", None, None),
],
Capability.oven_setpoint: [
Map(Attribute.oven_setpoint, "Oven Set Point", None, None)
],
Capability.power_meter: [Map(Attribute.power, "Power Meter", POWER_WATT, None)],
Capability.power_source: [Map(Attribute.power_source, "Power Source", None, None)],
Capability.refrigeration_setpoint: [
Map(
Attribute.refrigeration_setpoint,
"Refrigeration Setpoint",
None,
DEVICE_CLASS_TEMPERATURE,
)
],
Capability.relative_humidity_measurement: [
Map(
Attribute.humidity,
"Relative Humidity Measurement",
PERCENTAGE,
DEVICE_CLASS_HUMIDITY,
)
],
Capability.robot_cleaner_cleaning_mode: [
Map(
Attribute.robot_cleaner_cleaning_mode,
"Robot Cleaner Cleaning Mode",
None,
None,
)
],
Capability.robot_cleaner_movement: [
Map(Attribute.robot_cleaner_movement, "Robot Cleaner Movement", None, None)
],
Capability.robot_cleaner_turbo_mode: [
Map(Attribute.robot_cleaner_turbo_mode, "Robot Cleaner Turbo Mode", None, None)
],
Capability.signal_strength: [
Map(Attribute.lqi, "LQI Signal Strength", None, None),
Map(Attribute.rssi, "RSSI Signal Strength", None, None),
],
Capability.smoke_detector: [Map(Attribute.smoke, "Smoke Detector", None, None)],
Capability.temperature_measurement: [
Map(
Attribute.temperature,
"Temperature Measurement",
None,
DEVICE_CLASS_TEMPERATURE,
)
],
Capability.thermostat_cooling_setpoint: [
Map(
Attribute.cooling_setpoint,
"Thermostat Cooling Setpoint",
None,
DEVICE_CLASS_TEMPERATURE,
)
],
Capability.thermostat_fan_mode: [
Map(Attribute.thermostat_fan_mode, "Thermostat Fan Mode", None, None)
],
Capability.thermostat_heating_setpoint: [
Map(
Attribute.heating_setpoint,
"Thermostat Heating Setpoint",
None,
DEVICE_CLASS_TEMPERATURE,
)
],
Capability.thermostat_mode: [
Map(Attribute.thermostat_mode, "Thermostat Mode", None, None)
],
Capability.thermostat_operating_state: [
Map(
Attribute.thermostat_operating_state,
"Thermostat Operating State",
None,
None,
)
],
Capability.thermostat_setpoint: [
Map(
Attribute.thermostat_setpoint,
"Thermostat Setpoint",
None,
DEVICE_CLASS_TEMPERATURE,
)
],
Capability.three_axis: [],
Capability.tv_channel: [Map(Attribute.tv_channel, "Tv Channel", None, None)],
Capability.tvoc_measurement: [
Map(
Attribute.tvoc_level,
"Tvoc Measurement",
CONCENTRATION_PARTS_PER_MILLION,
None,
)
],
Capability.ultraviolet_index: [
Map(Attribute.ultraviolet_index, "Ultraviolet Index", None, None)
],
Capability.voltage_measurement: [
Map(Attribute.voltage, "Voltage Measurement", VOLT, None)
],
Capability.washer_mode: [Map(Attribute.washer_mode, "Washer Mode", None, None)],
Capability.washer_operating_state: [
Map(Attribute.machine_state, "Washer Machine State", None, None),
Map(Attribute.washer_job_state, "Washer Job State", None, None),
Map(
Attribute.completion_time,
"Washer Completion Time",
None,
DEVICE_CLASS_TIMESTAMP,
),
],
}
UNITS = {"C": TEMP_CELSIUS, "F": TEMP_FAHRENHEIT}
THREE_AXIS_NAMES = ["X Coordinate", "Y Coordinate", "Z Coordinate"]
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Add binary sensors for a config entry."""
broker = hass.data[DOMAIN][DATA_BROKERS][config_entry.entry_id]
sensors = []
for device in broker.devices.values():
for capability in broker.get_assigned(device.device_id, "sensor"):
if capability == Capability.three_axis:
sensors.extend(
[
SmartThingsThreeAxisSensor(device, index)
for index in range(len(THREE_AXIS_NAMES))
]
)
else:
maps = CAPABILITY_TO_SENSORS[capability]
sensors.extend(
[
SmartThingsSensor(
device, m.attribute, m.name, m.default_unit, m.device_class
)
for m in maps
]
)
async_add_entities(sensors)
def get_capabilities(capabilities: Sequence[str]) -> Optional[Sequence[str]]:
"""Return all capabilities supported if minimum required are present."""
return [
capability for capability in CAPABILITY_TO_SENSORS if capability in capabilities
]
class SmartThingsSensor(SmartThingsEntity):
"""Define a SmartThings Sensor."""
def __init__(
self, device, attribute: str, name: str, default_unit: str, device_class: str
):
"""Init the class."""
super().__init__(device)
self._attribute = attribute
self._name = name
self._device_class = device_class
self._default_unit = default_unit
@property
def name(self) -> str:
"""Return the name of the binary sensor."""
return f"{self._device.label} {self._name}"
@property
def unique_id(self) -> str:
"""Return a unique ID."""
return f"{self._device.device_id}.{self._attribute}"
@property
def state(self):
"""Return the state of the sensor."""
return self._device.status.attributes[self._attribute].value
@property
def device_class(self):
"""Return the device class of the sensor."""
return self._device_class
@property
def unit_of_measurement(self):
"""Return the unit this state is expressed in."""
unit = self._device.status.attributes[self._attribute].unit
return UNITS.get(unit, unit) if unit else self._default_unit
class SmartThingsThreeAxisSensor(SmartThingsEntity):
"""Define a SmartThings Three Axis Sensor."""
def __init__(self, device, index):
"""Init the class."""
super().__init__(device)
self._index = index
@property
def name(self) -> str:
"""Return the name of the binary sensor."""
return "{} {}".format(self._device.label, THREE_AXIS_NAMES[self._index])
@property
def unique_id(self) -> str:
"""Return a unique ID."""
return "{}.{}".format(self._device.device_id, THREE_AXIS_NAMES[self._index])
@property
def state(self):
"""Return the state of the sensor."""
three_axis = self._device.status.attributes[Attribute.three_axis].value
try:
return three_axis[self._index]
except (TypeError, IndexError):
return None
|
import logging
from homeassistant import core
from homeassistant.components.http.view import HomeAssistantView
from homeassistant.const import CONF_CLIENT_ID, CONF_CLIENT_SECRET
from .auth import Auth
from .config import AbstractConfig
from .const import CONF_ENDPOINT, CONF_ENTITY_CONFIG, CONF_FILTER, CONF_LOCALE
from .smart_home import async_handle_message
from .state_report import async_enable_proactive_mode
_LOGGER = logging.getLogger(__name__)
SMART_HOME_HTTP_ENDPOINT = "/api/alexa/smart_home"
class AlexaConfig(AbstractConfig):
"""Alexa config."""
def __init__(self, hass, config):
"""Initialize Alexa config."""
super().__init__(hass)
self._config = config
if config.get(CONF_CLIENT_ID) and config.get(CONF_CLIENT_SECRET):
self._auth = Auth(hass, config[CONF_CLIENT_ID], config[CONF_CLIENT_SECRET])
else:
self._auth = None
@property
def supports_auth(self):
"""Return if config supports auth."""
return self._auth is not None
@property
def should_report_state(self):
"""Return if we should proactively report states."""
return self._auth is not None
@property
def endpoint(self):
"""Endpoint for report state."""
return self._config.get(CONF_ENDPOINT)
@property
def entity_config(self):
"""Return entity config."""
return self._config.get(CONF_ENTITY_CONFIG) or {}
@property
def locale(self):
"""Return config locale."""
return self._config.get(CONF_LOCALE)
def should_expose(self, entity_id):
"""If an entity should be exposed."""
return self._config[CONF_FILTER](entity_id)
@core.callback
def async_invalidate_access_token(self):
"""Invalidate access token."""
self._auth.async_invalidate_access_token()
async def async_get_access_token(self):
"""Get an access token."""
return await self._auth.async_get_access_token()
async def async_accept_grant(self, code):
"""Accept a grant."""
return await self._auth.async_do_auth(code)
async def async_setup(hass, config):
"""Activate Smart Home functionality of Alexa component.
This is optional, triggered by having a `smart_home:` sub-section in the
alexa configuration.
Even if that's disabled, the functionality in this module may still be used
by the cloud component which will call async_handle_message directly.
"""
smart_home_config = AlexaConfig(hass, config)
hass.http.register_view(SmartHomeView(smart_home_config))
if smart_home_config.should_report_state:
await async_enable_proactive_mode(hass, smart_home_config)
class SmartHomeView(HomeAssistantView):
"""Expose Smart Home v3 payload interface via HTTP POST."""
url = SMART_HOME_HTTP_ENDPOINT
name = "api:alexa:smart_home"
def __init__(self, smart_home_config):
"""Initialize."""
self.smart_home_config = smart_home_config
async def post(self, request):
"""Handle Alexa Smart Home requests.
The Smart Home API requires the endpoint to be implemented in AWS
Lambda, which will need to forward the requests to here and pass back
the response.
"""
hass = request.app["hass"]
user = request["hass_user"]
message = await request.json()
_LOGGER.debug("Received Alexa Smart Home request: %s", message)
response = await async_handle_message(
hass, self.smart_home_config, message, context=core.Context(user_id=user.id)
)
_LOGGER.debug("Sending Alexa Smart Home response: %s", response)
return b"" if response is None else self.json(response)
|
try:
import pkg_resources
except ImportError:
pass
from threading import local as _local
from ._cperror import (
HTTPError, HTTPRedirect, InternalRedirect,
NotFound, CherryPyException,
)
from . import _cpdispatch as dispatch
from ._cptools import default_toolbox as tools, Tool
from ._helper import expose, popargs, url
from . import _cprequest, _cpserver, _cptree, _cplogging, _cpconfig
import cherrypy.lib.httputil as _httputil
from ._cptree import Application
from . import _cpwsgi as wsgi
from . import process
try:
from .process import win32
engine = win32.Win32Bus()
engine.console_control_handler = win32.ConsoleCtrlHandler(engine)
del win32
except ImportError:
engine = process.bus
from . import _cpchecker
__all__ = (
'HTTPError', 'HTTPRedirect', 'InternalRedirect',
'NotFound', 'CherryPyException',
'dispatch', 'tools', 'Tool', 'Application',
'wsgi', 'process', 'tree', 'engine',
'quickstart', 'serving', 'request', 'response', 'thread_data',
'log', 'expose', 'popargs', 'url', 'config',
)
__import__('cherrypy._cptools')
__import__('cherrypy._cprequest')
tree = _cptree.Tree()
try:
__version__ = pkg_resources.require('cherrypy')[0].version
except Exception:
__version__ = 'unknown'
engine.listeners['before_request'] = set()
engine.listeners['after_request'] = set()
engine.autoreload = process.plugins.Autoreloader(engine)
engine.autoreload.subscribe()
engine.thread_manager = process.plugins.ThreadManager(engine)
engine.thread_manager.subscribe()
engine.signal_handler = process.plugins.SignalHandler(engine)
class _HandleSignalsPlugin(object):
"""Handle signals from other processes.
Based on the configured platform handlers above.
"""
def __init__(self, bus):
self.bus = bus
def subscribe(self):
"""Add the handlers based on the platform."""
if hasattr(self.bus, 'signal_handler'):
self.bus.signal_handler.subscribe()
if hasattr(self.bus, 'console_control_handler'):
self.bus.console_control_handler.subscribe()
engine.signals = _HandleSignalsPlugin(engine)
server = _cpserver.Server()
server.subscribe()
def quickstart(root=None, script_name='', config=None):
"""Mount the given root, start the builtin server (and engine), then block.
root: an instance of a "controller class" (a collection of page handler
methods) which represents the root of the application.
script_name: a string containing the "mount point" of the application.
This should start with a slash, and be the path portion of the URL
at which to mount the given root. For example, if root.index() will
handle requests to "http://www.example.com:8080/dept/app1/", then
the script_name argument would be "/dept/app1".
It MUST NOT end in a slash. If the script_name refers to the root
of the URI, it MUST be an empty string (not "/").
config: a file or dict containing application config. If this contains
a [global] section, those entries will be used in the global
(site-wide) config.
"""
if config:
_global_conf_alias.update(config)
tree.mount(root, script_name, config)
engine.signals.subscribe()
engine.start()
engine.block()
class _Serving(_local):
"""An interface for registering request and response objects.
Rather than have a separate "thread local" object for the request and
the response, this class works as a single threadlocal container for
both objects (and any others which developers wish to define). In this
way, we can easily dump those objects when we stop/start a new HTTP
conversation, yet still refer to them as module-level globals in a
thread-safe way.
"""
request = _cprequest.Request(_httputil.Host('127.0.0.1', 80),
_httputil.Host('127.0.0.1', 1111))
"""
The request object for the current thread. In the main thread,
and any threads which are not receiving HTTP requests, this is None."""
response = _cprequest.Response()
"""
The response object for the current thread. In the main thread,
and any threads which are not receiving HTTP requests, this is None."""
def load(self, request, response):
self.request = request
self.response = response
def clear(self):
"""Remove all attributes of self."""
self.__dict__.clear()
serving = _Serving()
class _ThreadLocalProxy(object):
__slots__ = ['__attrname__', '__dict__']
def __init__(self, attrname):
self.__attrname__ = attrname
def __getattr__(self, name):
child = getattr(serving, self.__attrname__)
return getattr(child, name)
def __setattr__(self, name, value):
if name in ('__attrname__', ):
object.__setattr__(self, name, value)
else:
child = getattr(serving, self.__attrname__)
setattr(child, name, value)
def __delattr__(self, name):
child = getattr(serving, self.__attrname__)
delattr(child, name)
@property
def __dict__(self):
child = getattr(serving, self.__attrname__)
d = child.__class__.__dict__.copy()
d.update(child.__dict__)
return d
def __getitem__(self, key):
child = getattr(serving, self.__attrname__)
return child[key]
def __setitem__(self, key, value):
child = getattr(serving, self.__attrname__)
child[key] = value
def __delitem__(self, key):
child = getattr(serving, self.__attrname__)
del child[key]
def __contains__(self, key):
child = getattr(serving, self.__attrname__)
return key in child
def __len__(self):
child = getattr(serving, self.__attrname__)
return len(child)
def __nonzero__(self):
child = getattr(serving, self.__attrname__)
return bool(child)
# Python 3
__bool__ = __nonzero__
# Create request and response object (the same objects will be used
# throughout the entire life of the webserver, but will redirect
# to the "serving" object)
request = _ThreadLocalProxy('request')
response = _ThreadLocalProxy('response')
# Create thread_data object as a thread-specific all-purpose storage
class _ThreadData(_local):
"""A container for thread-specific data."""
thread_data = _ThreadData()
# Monkeypatch pydoc to allow help() to go through the threadlocal proxy.
# Jan 2007: no Googleable examples of anyone else replacing pydoc.resolve.
# The only other way would be to change what is returned from type(request)
# and that's not possible in pure Python (you'd have to fake ob_type).
def _cherrypy_pydoc_resolve(thing, forceload=0):
"""Given an object or a path to an object, get the object and its name."""
if isinstance(thing, _ThreadLocalProxy):
thing = getattr(serving, thing.__attrname__)
return _pydoc._builtin_resolve(thing, forceload)
try:
import pydoc as _pydoc
_pydoc._builtin_resolve = _pydoc.resolve
_pydoc.resolve = _cherrypy_pydoc_resolve
except ImportError:
pass
class _GlobalLogManager(_cplogging.LogManager):
"""A site-wide LogManager; routes to app.log or global log as appropriate.
This :class:`LogManager<cherrypy._cplogging.LogManager>` implements
cherrypy.log() and cherrypy.log.access(). If either
function is called during a request, the message will be sent to the
logger for the current Application. If they are called outside of a
request, the message will be sent to the site-wide logger.
"""
def __call__(self, *args, **kwargs):
"""Log the given message to the app.log or global log.
Log the given message to the app.log or global
log as appropriate.
"""
# Do NOT use try/except here. See
# https://github.com/cherrypy/cherrypy/issues/945
if hasattr(request, 'app') and hasattr(request.app, 'log'):
log = request.app.log
else:
log = self
return log.error(*args, **kwargs)
def access(self):
"""Log an access message to the app.log or global log.
Log the given message to the app.log or global
log as appropriate.
"""
try:
return request.app.log.access()
except AttributeError:
return _cplogging.LogManager.access(self)
log = _GlobalLogManager()
# Set a default screen handler on the global log.
log.screen = True
log.error_file = ''
# Using an access file makes CP about 10% slower. Leave off by default.
log.access_file = ''
@engine.subscribe('log')
def _buslog(msg, level):
log.error(msg, 'ENGINE', severity=level)
# Use _global_conf_alias so quickstart can use 'config' as an arg
# without shadowing cherrypy.config.
config = _global_conf_alias = _cpconfig.Config()
config.defaults = {
'tools.log_tracebacks.on': True,
'tools.log_headers.on': True,
'tools.trailing_slash.on': True,
'tools.encode.on': True
}
config.namespaces['log'] = lambda k, v: setattr(log, k, v)
config.namespaces['checker'] = lambda k, v: setattr(checker, k, v)
# Must reset to get our defaults applied.
config.reset()
checker = _cpchecker.Checker()
engine.subscribe('start', checker)
|
from copy import deepcopy
from unittest import mock
import axis as axislib
from axis.api_discovery import URL as API_DISCOVERY_URL
from axis.applications import URL_LIST as APPLICATIONS_URL
from axis.applications.vmd4 import URL as VMD4_URL
from axis.basic_device_info import URL as BASIC_DEVICE_INFO_URL
from axis.event_stream import OPERATION_INITIALIZED
from axis.light_control import URL as LIGHT_CONTROL_URL
from axis.mqtt import URL_CLIENT as MQTT_CLIENT_URL
from axis.param_cgi import (
BRAND as BRAND_URL,
INPUT as INPUT_URL,
IOPORT as IOPORT_URL,
OUTPUT as OUTPUT_URL,
PROPERTIES as PROPERTIES_URL,
STREAM_PROFILES as STREAM_PROFILES_URL,
)
from axis.port_management import URL as PORT_MANAGEMENT_URL
import pytest
from homeassistant import config_entries
from homeassistant.components import axis
from homeassistant.components.axis.const import (
CONF_EVENTS,
CONF_MODEL,
DOMAIN as AXIS_DOMAIN,
)
from homeassistant.components.binary_sensor import DOMAIN as BINARY_SENSOR_DOMAIN
from homeassistant.config_entries import SOURCE_ZEROCONF
from homeassistant.const import (
CONF_HOST,
CONF_MAC,
CONF_NAME,
CONF_PASSWORD,
CONF_PORT,
CONF_USERNAME,
STATE_ON,
)
from tests.async_mock import AsyncMock, Mock, patch
from tests.common import MockConfigEntry, async_fire_mqtt_message
MAC = "00408C12345"
MODEL = "model"
NAME = "name"
ENTRY_OPTIONS = {CONF_EVENTS: True}
ENTRY_CONFIG = {
CONF_HOST: "1.2.3.4",
CONF_USERNAME: "root",
CONF_PASSWORD: "pass",
CONF_PORT: 80,
CONF_MAC: MAC,
CONF_MODEL: MODEL,
CONF_NAME: NAME,
}
API_DISCOVERY_RESPONSE = {
"method": "getApiList",
"apiVersion": "1.0",
"data": {
"apiList": [
{"id": "api-discovery", "version": "1.0", "name": "API Discovery Service"},
{"id": "param-cgi", "version": "1.0", "name": "Legacy Parameter Handling"},
]
},
}
API_DISCOVERY_BASIC_DEVICE_INFO = {
"id": "basic-device-info",
"version": "1.1",
"name": "Basic Device Information",
}
API_DISCOVERY_MQTT = {"id": "mqtt-client", "version": "1.0", "name": "MQTT Client API"}
API_DISCOVERY_PORT_MANAGEMENT = {
"id": "io-port-management",
"version": "1.0",
"name": "IO Port Management",
}
APPLICATIONS_LIST_RESPONSE = """<reply result="ok">
<application Name="vmd" NiceName="AXIS Video Motion Detection" Vendor="Axis Communications" Version="4.2-0" ApplicationID="143440" License="None" Status="Running" ConfigurationPage="local/vmd/config.html" VendorHomePage="http://www.axis.com" />
</reply>"""
BASIC_DEVICE_INFO_RESPONSE = {
"apiVersion": "1.1",
"data": {
"propertyList": {
"ProdNbr": "M1065-LW",
"ProdType": "Network Camera",
"SerialNumber": "00408C12345",
"Version": "9.80.1",
}
},
}
LIGHT_CONTROL_RESPONSE = {
"apiVersion": "1.1",
"method": "getLightInformation",
"data": {
"items": [
{
"lightID": "led0",
"lightType": "IR",
"enabled": True,
"synchronizeDayNightMode": True,
"lightState": False,
"automaticIntensityMode": False,
"automaticAngleOfIlluminationMode": False,
"nrOfLEDs": 1,
"error": False,
"errorInfo": "",
}
]
},
}
MQTT_CLIENT_RESPONSE = {
"apiVersion": "1.0",
"context": "some context",
"method": "getClientStatus",
"data": {"status": {"state": "active", "connectionStatus": "Connected"}},
}
PORT_MANAGEMENT_RESPONSE = {
"apiVersion": "1.0",
"method": "getPorts",
"data": {
"numberOfPorts": 1,
"items": [
{
"port": "0",
"configurable": False,
"usage": "",
"name": "PIR sensor",
"direction": "input",
"state": "open",
"normalState": "open",
}
],
},
}
VMD4_RESPONSE = {
"apiVersion": "1.4",
"method": "getConfiguration",
"context": "Axis library",
"data": {
"cameras": [{"id": 1, "rotation": 0, "active": True}],
"profiles": [
{"filters": [], "camera": 1, "triggers": [], "name": "Profile 1", "uid": 1}
],
},
}
BRAND_RESPONSE = """root.Brand.Brand=AXIS
root.Brand.ProdFullName=AXIS M1065-LW Network Camera
root.Brand.ProdNbr=M1065-LW
root.Brand.ProdShortName=AXIS M1065-LW
root.Brand.ProdType=Network Camera
root.Brand.ProdVariant=
root.Brand.WebURL=http://www.axis.com
"""
PORTS_RESPONSE = """root.Input.NbrOfInputs=1
root.IOPort.I0.Configurable=no
root.IOPort.I0.Direction=input
root.IOPort.I0.Input.Name=PIR sensor
root.IOPort.I0.Input.Trig=closed
root.Output.NbrOfOutputs=0
"""
PROPERTIES_RESPONSE = """root.Properties.API.HTTP.Version=3
root.Properties.API.Metadata.Metadata=yes
root.Properties.API.Metadata.Version=1.0
root.Properties.EmbeddedDevelopment.Version=2.16
root.Properties.Firmware.BuildDate=Feb 15 2019 09:42
root.Properties.Firmware.BuildNumber=26
root.Properties.Firmware.Version=9.10.1
root.Properties.Image.Format=jpeg,mjpeg,h264
root.Properties.Image.NbrOfViews=2
root.Properties.Image.Resolution=1920x1080,1280x960,1280x720,1024x768,1024x576,800x600,640x480,640x360,352x240,320x240
root.Properties.Image.Rotation=0,180
root.Properties.System.SerialNumber=00408C12345
"""
STREAM_PROFILES_RESPONSE = """root.StreamProfile.MaxGroups=26
root.StreamProfile.S0.Description=profile_1_description
root.StreamProfile.S0.Name=profile_1
root.StreamProfile.S0.Parameters=videocodec=h264
root.StreamProfile.S1.Description=profile_2_description
root.StreamProfile.S1.Name=profile_2
root.StreamProfile.S1.Parameters=videocodec=h265
"""
async def vapix_request(self, session, url, **kwargs):
"""Return data based on url."""
if API_DISCOVERY_URL in url:
return API_DISCOVERY_RESPONSE
if APPLICATIONS_URL in url:
return APPLICATIONS_LIST_RESPONSE
if BASIC_DEVICE_INFO_URL in url:
return BASIC_DEVICE_INFO_RESPONSE
if LIGHT_CONTROL_URL in url:
return LIGHT_CONTROL_RESPONSE
if MQTT_CLIENT_URL in url:
return MQTT_CLIENT_RESPONSE
if PORT_MANAGEMENT_URL in url:
return PORT_MANAGEMENT_RESPONSE
if VMD4_URL in url:
return VMD4_RESPONSE
if BRAND_URL in url:
return BRAND_RESPONSE
if IOPORT_URL in url or INPUT_URL in url or OUTPUT_URL in url:
return PORTS_RESPONSE
if PROPERTIES_URL in url:
return PROPERTIES_RESPONSE
if STREAM_PROFILES_URL in url:
return STREAM_PROFILES_RESPONSE
async def setup_axis_integration(hass, config=ENTRY_CONFIG, options=ENTRY_OPTIONS):
"""Create the Axis device."""
config_entry = MockConfigEntry(
domain=AXIS_DOMAIN,
data=deepcopy(config),
connection_class=config_entries.CONN_CLASS_LOCAL_PUSH,
options=deepcopy(options),
version=2,
)
config_entry.add_to_hass(hass)
with patch("axis.vapix.Vapix.request", new=vapix_request), patch(
"axis.rtsp.RTSPClient.start",
return_value=True,
):
await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
return config_entry
async def test_device_setup(hass):
"""Successful setup."""
with patch(
"homeassistant.config_entries.ConfigEntries.async_forward_entry_setup",
return_value=True,
) as forward_entry_setup:
config_entry = await setup_axis_integration(hass)
device = hass.data[AXIS_DOMAIN][config_entry.unique_id]
assert device.api.vapix.firmware_version == "9.10.1"
assert device.api.vapix.product_number == "M1065-LW"
assert device.api.vapix.product_type == "Network Camera"
assert device.api.vapix.serial_number == "00408C12345"
entry = device.config_entry
assert len(forward_entry_setup.mock_calls) == 4
assert forward_entry_setup.mock_calls[0][1] == (entry, "binary_sensor")
assert forward_entry_setup.mock_calls[1][1] == (entry, "camera")
assert forward_entry_setup.mock_calls[2][1] == (entry, "light")
assert forward_entry_setup.mock_calls[3][1] == (entry, "switch")
assert device.host == ENTRY_CONFIG[CONF_HOST]
assert device.model == ENTRY_CONFIG[CONF_MODEL]
assert device.name == ENTRY_CONFIG[CONF_NAME]
assert device.serial == ENTRY_CONFIG[CONF_MAC]
async def test_device_info(hass):
"""Verify other path of device information works."""
api_discovery = deepcopy(API_DISCOVERY_RESPONSE)
api_discovery["data"]["apiList"].append(API_DISCOVERY_BASIC_DEVICE_INFO)
with patch.dict(API_DISCOVERY_RESPONSE, api_discovery):
config_entry = await setup_axis_integration(hass)
device = hass.data[AXIS_DOMAIN][config_entry.unique_id]
assert device.api.vapix.firmware_version == "9.80.1"
assert device.api.vapix.product_number == "M1065-LW"
assert device.api.vapix.product_type == "Network Camera"
assert device.api.vapix.serial_number == "00408C12345"
async def test_device_support_mqtt(hass, mqtt_mock):
"""Successful setup."""
api_discovery = deepcopy(API_DISCOVERY_RESPONSE)
api_discovery["data"]["apiList"].append(API_DISCOVERY_MQTT)
with patch.dict(API_DISCOVERY_RESPONSE, api_discovery):
await setup_axis_integration(hass)
mqtt_mock.async_subscribe.assert_called_with(f"{MAC}/#", mock.ANY, 0, "utf-8")
topic = f"{MAC}/event/tns:onvif/Device/tns:axis/Sensor/PIR/$source/sensor/0"
message = b'{"timestamp": 1590258472044, "topic": "onvif:Device/axis:Sensor/PIR", "message": {"source": {"sensor": "0"}, "key": {}, "data": {"state": "1"}}}'
assert len(hass.states.async_entity_ids(BINARY_SENSOR_DOMAIN)) == 0
async_fire_mqtt_message(hass, topic, message)
await hass.async_block_till_done()
assert len(hass.states.async_entity_ids(BINARY_SENSOR_DOMAIN)) == 1
pir = hass.states.get(f"{BINARY_SENSOR_DOMAIN}.{NAME}_pir_0")
assert pir.state == STATE_ON
assert pir.name == f"{NAME} PIR 0"
async def test_update_address(hass):
"""Test update address works."""
config_entry = await setup_axis_integration(hass)
device = hass.data[AXIS_DOMAIN][config_entry.unique_id]
assert device.api.config.host == "1.2.3.4"
with patch("axis.vapix.Vapix.request", new=vapix_request), patch(
"homeassistant.components.axis.async_setup_entry",
return_value=True,
) as mock_setup_entry:
await hass.config_entries.flow.async_init(
AXIS_DOMAIN,
data={
"host": "2.3.4.5",
"port": 80,
"hostname": "name",
"properties": {"macaddress": MAC},
},
context={"source": SOURCE_ZEROCONF},
)
await hass.async_block_till_done()
assert device.api.config.host == "2.3.4.5"
assert len(mock_setup_entry.mock_calls) == 1
async def test_device_unavailable(hass):
"""Successful setup."""
config_entry = await setup_axis_integration(hass)
device = hass.data[AXIS_DOMAIN][config_entry.unique_id]
device.async_connection_status_callback(status=False)
assert not device.available
async def test_device_reset(hass):
"""Successfully reset device."""
config_entry = await setup_axis_integration(hass)
device = hass.data[AXIS_DOMAIN][config_entry.unique_id]
result = await device.async_reset()
assert result is True
async def test_device_not_accessible(hass):
"""Failed setup schedules a retry of setup."""
with patch.object(axis.device, "get_device", side_effect=axis.errors.CannotConnect):
await setup_axis_integration(hass)
assert hass.data[AXIS_DOMAIN] == {}
async def test_device_unknown_error(hass):
"""Unknown errors are handled."""
with patch.object(axis.device, "get_device", side_effect=Exception):
await setup_axis_integration(hass)
assert hass.data[AXIS_DOMAIN] == {}
async def test_new_event_sends_signal(hass):
"""Make sure that new event send signal."""
entry = Mock()
entry.data = ENTRY_CONFIG
axis_device = axis.device.AxisNetworkDevice(hass, entry)
with patch.object(axis.device, "async_dispatcher_send") as mock_dispatch_send:
axis_device.async_event_callback(action=OPERATION_INITIALIZED, event_id="event")
await hass.async_block_till_done()
assert len(mock_dispatch_send.mock_calls) == 1
assert len(mock_dispatch_send.mock_calls[0]) == 3
async def test_shutdown():
"""Successful shutdown."""
hass = Mock()
entry = Mock()
entry.data = ENTRY_CONFIG
axis_device = axis.device.AxisNetworkDevice(hass, entry)
axis_device.api = Mock()
axis_device.api.vapix.close = AsyncMock()
await axis_device.shutdown(None)
assert len(axis_device.api.stream.stop.mock_calls) == 1
assert len(axis_device.api.vapix.close.mock_calls) == 1
async def test_get_device_fails(hass):
"""Device unauthorized yields authentication required error."""
with patch(
"axis.vapix.Vapix.request", side_effect=axislib.Unauthorized
), pytest.raises(axis.errors.AuthenticationRequired):
await axis.device.get_device(hass, host="", port="", username="", password="")
async def test_get_device_device_unavailable(hass):
"""Device unavailable yields cannot connect error."""
with patch(
"axis.vapix.Vapix.request", side_effect=axislib.RequestError
), pytest.raises(axis.errors.CannotConnect):
await axis.device.get_device(hass, host="", port="", username="", password="")
async def test_get_device_unknown_error(hass):
"""Device yield unknown error."""
with patch(
"axis.vapix.Vapix.request", side_effect=axislib.AxisException
), pytest.raises(axis.errors.AuthenticationRequired):
await axis.device.get_device(hass, host="", port="", username="", password="")
|
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from mock import Mock
from mock import patch
from diamond.collector import Collector
from diskusage import DiskUsageCollector
##########################################################################
class TestDiskUsageCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('DiskUsageCollector', {
'interval': 10,
'sector_size': '512',
'byte_unit': 'kilobyte'
})
self.collector = DiskUsageCollector(config, None)
def test_config(self):
self.assertFalse(self.collector.config['send_zero'])
def test_import(self):
self.assertTrue(DiskUsageCollector)
@patch('os.access', Mock(return_value=True))
def test_get_disk_statistics(self):
patch_open = patch(
'__builtin__.open',
Mock(return_value=self.getFixture('diskstats')))
open_mock = patch_open.start()
result = self.collector.get_disk_statistics()
patch_open.stop()
open_mock.assert_called_once_with('/proc/diskstats')
self.assertEqual(
sorted(result.keys()),
[(8, 0), (8, 1), (8, 16), (8, 17), (8, 32),
(8, 33), (8, 48), (8, 49), (9, 0)])
return result
@patch('os.access', Mock(return_value=True))
@patch.object(Collector, 'publish')
def test_should_work_with_real_data(self, publish_mock):
patch_open = patch(
'__builtin__.open',
Mock(
return_value=self.getFixture('proc_diskstats_1')))
patch_time = patch('time.time', Mock(return_value=10))
patch_open.start()
patch_time.start()
self.collector.collect()
patch_open.stop()
patch_time.stop()
self.assertPublishedMany(publish_mock, {})
patch_open = patch(
'__builtin__.open',
Mock(
return_value=self.getFixture('proc_diskstats_2')))
patch_time = patch('time.time', Mock(return_value=20))
patch_open.start()
patch_time.start()
self.collector.collect()
patch_open.stop()
patch_time.stop()
metrics = self.getPickledResults('test_should_work_with_real_data.pkl')
self.setDocExample(collector=self.collector.__class__.__name__,
metrics=metrics,
defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics)
@patch('os.access', Mock(return_value=True))
@patch.object(Collector, 'publish')
def test_verify_supporting_vda_and_xvdb(self, publish_mock):
patch_open = patch(
'__builtin__.open',
Mock(
return_value=self.getFixture(
'proc_diskstats_1_vda_xvdb')))
patch_time = patch('time.time', Mock(return_value=10))
patch_open.start()
patch_time.start()
self.collector.collect()
patch_open.stop()
patch_time.stop()
self.assertPublishedMany(publish_mock, {})
patch_open = patch(
'__builtin__.open',
Mock(
return_value=self.getFixture(
'proc_diskstats_2_vda_xvdb')))
patch_time = patch('time.time', Mock(return_value=20))
patch_open.start()
patch_time.start()
self.collector.collect()
patch_open.stop()
patch_time.stop()
metrics = self.getPickledResults(
'test_verify_supporting_vda_and_xvdb.pkl')
self.assertPublishedMany(publish_mock, metrics)
@patch('os.access', Mock(return_value=True))
@patch.object(Collector, 'publish')
def test_verify_supporting_md_dm(self, publish_mock):
patch_open = patch(
'__builtin__.open',
Mock(
return_value=self.getFixture(
'proc_diskstats_1_md_dm')))
patch_time = patch('time.time', Mock(return_value=10))
patch_open.start()
patch_time.start()
self.collector.collect()
patch_open.stop()
patch_time.stop()
self.assertPublishedMany(publish_mock, {})
patch_open = patch(
'__builtin__.open',
Mock(
return_value=self.getFixture(
'proc_diskstats_2_md_dm')))
patch_time = patch('time.time', Mock(return_value=20))
patch_open.start()
patch_time.start()
self.collector.collect()
patch_open.stop()
patch_time.stop()
metrics = self.getPickledResults('test_verify_supporting_md_dm.pkl')
self.assertPublishedMany(publish_mock, metrics)
@patch('os.access', Mock(return_value=True))
@patch.object(Collector, 'publish')
def test_verify_supporting_disk(self, publish_mock):
patch_open = patch(
'__builtin__.open',
Mock(
return_value=self.getFixture(
'proc_diskstats_1_disk')))
patch_time = patch('time.time', Mock(return_value=10))
patch_open.start()
patch_time.start()
self.collector.collect()
patch_open.stop()
patch_time.stop()
self.assertPublishedMany(publish_mock, {})
patch_open = patch(
'__builtin__.open',
Mock(
return_value=self.getFixture(
'proc_diskstats_2_disk')))
patch_time = patch('time.time', Mock(return_value=20))
patch_open.start()
patch_time.start()
self.collector.collect()
patch_open.stop()
patch_time.stop()
metrics = self.getPickledResults('test_verify_supporting_disk.pkl')
self.assertPublishedMany(publish_mock, metrics)
@patch('os.access', Mock(return_value=True))
@patch.object(Collector, 'publish')
def test_service_Time(self, publish_mock):
patch_open = patch(
'__builtin__.open',
Mock(
return_value=self.getFixture(
'proc_diskstats_1_service_time')))
patch_time = patch('time.time', Mock(return_value=10))
patch_open.start()
patch_time.start()
self.collector.collect()
patch_open.stop()
patch_time.stop()
self.assertPublishedMany(publish_mock, {})
patch_open = patch(
'__builtin__.open',
Mock(
return_value=self.getFixture(
'proc_diskstats_2_service_time')))
patch_time = patch('time.time', Mock(return_value=70))
patch_open.start()
patch_time.start()
self.collector.collect()
patch_open.stop()
patch_time.stop()
metrics = self.getPickledResults('test_service_Time.pkl')
self.assertPublishedMany(publish_mock, metrics)
##########################################################################
if __name__ == "__main__":
unittest.main()
|
from __future__ import print_function
import argparse
import sys
from six import string_types
from six.moves import input
from stashutils import mount_ctrl, mount_manager
from stashutils.fsi.interfaces import FILESYSTEM_TYPES
_stash = globals()["_stash"]
def list_mounts():
"""list all mounts"""
manager = mount_ctrl.get_manager()
if manager is None:
manager = mount_manager.MountManager()
mount_ctrl.set_manager(manager)
mounts = manager.get_mounts()
for p, fsi, readonly in mounts:
print("{f} on {p}".format(f=fsi.repr(), p=p))
if __name__ == "__main__":
if len(sys.argv) == 2 and ("-l" in sys.argv):
list_mounts()
sys.exit(0)
parser = argparse.ArgumentParser()
parser.add_argument("-l", "--show-labels", action="store_true", dest="list", help="show also filesystem labels")
parser.add_argument("-v", "--verbose", action="store_true", dest="v", help="be more chatty")
parser.add_argument("-y", "--yes", action="store_true", dest="yes", help="enable the monkeypatches without asking")
parser.add_argument("-f", "--fake", action="store_false", dest="do_mount", help="dry run; do not mount fs")
parser.add_argument("-r", "--read-only", action="store_true", dest="readonly", help="mount the filesystem read-only")
parser.add_argument("-t", "--type", action="store", dest="type", default=None, help="Type of the filesystem to mount")
parser.add_argument("options", action="store", nargs="*", help="additional arguments for mounting the fs", default=[])
parser.add_argument("dir", action="store", help="dir to mount to")
ns = parser.parse_args()
if ns.type is None:
print(_stash.text_color("Error: no FS-Type specified!", "red"))
sys.exit(1)
manager = mount_ctrl.get_manager()
if manager is None:
manager = mount_manager.MountManager()
mount_ctrl.set_manager(manager)
if not manager.check_patches_enabled():
if not ns.yes:
print(_stash.text_color("WARNING: ", "red"))
print(_stash.text_color("The 'mount'-command needs to enable a few monkeypatches.", "yellow"))
print(_stash.text_color("Monkeypatches may make the system unstable.", "yellow"))
y = input(_stash.text_color("Do you want to enable these patches? (y/n)", "yellow")).upper() == "Y"
if not y:
print(_stash.text_color("Error: Monkeypatches not enabled!", "red"))
sys.exit(1)
manager.enable_patches()
else:
manager.enable_patches()
if not ns.type in FILESYSTEM_TYPES:
print(_stash.text_color("Error: Unknown Filesystem-Type!", "red"))
sys.exit(1)
fsic = FILESYSTEM_TYPES[ns.type]
if ns.v:
logger = sys.stdout.write
print("Creating FSI...")
else:
logger = None
fsi = fsic(logger=logger)
if ns.v:
print("Connecting FSI...")
msg = fsi.connect(*tuple(ns.options))
if isinstance(msg, string_types):
print(_stash.text_color("Error: {m}".format(m=msg), "red"))
sys.exit(1)
if ns.do_mount:
try:
manager.mount_fsi(ns.dir, fsi, readonly=ns.readonly)
except mount_manager.MountError as e:
print(_stash.text_color("Error: {e}".format(e=e.message), "red"))
try:
if ns.v:
print("unmounting FSI...")
fsi.close()
except Exception as e:
print(_stash.text_color("Error unmounting FSI: {e}".format(e=e.message), "red"))
else:
if ns.v:
print("Finished cleanup.")
sys.exit(1)
else:
# close fs
fsi.close()
if ns.v:
print("Done.")
if ns.list:
list_mounts()
|
import json
import os
import shutil
import tempfile
import threading
from . import utils
class Persistor():
"""Persist stuff in a place.
This is an intentionally dumb implementation. It is *not* meant to be
fast, or useful for arbitrarily large data. Use lightly.
Intentionally it has no namespaces, sections, etc. Use as a
responsible adult.
"""
def __init__(self, path):
"""Where do you want it persisted."""
self._path = path
self._local = threading.local()
self._local.data = {}
def _set_site(self, site):
"""Set site and create path directory."""
self._site = site
utils.makedirs(os.path.dirname(self._path))
def get(self, key):
"""Get data stored in key."""
self._read()
return self._local.data.get(key)
def set(self, key, value):
"""Store value in key."""
self._read()
self._local.data[key] = value
self._save()
def delete(self, key):
"""Delete key and the value it contains."""
self._read()
if key in self._local.data:
self._local.data.pop(key)
self._save()
def _read(self):
if os.path.isfile(self._path):
with open(self._path) as inf:
self._local.data = json.load(inf)
def _save(self):
dname = os.path.dirname(self._path)
with tempfile.NamedTemporaryFile(dir=dname, delete=False, mode='w+', encoding='utf-8') as outf:
tname = outf.name
json.dump(self._local.data, outf, sort_keys=True, indent=2)
shutil.move(tname, self._path)
|
from qutebrowser.mainwindow.statusbar import textbase
class Backforward(textbase.TextBase):
"""Shows navigation indicator (if you can go backward and/or forward)."""
def __init__(self, parent=None):
super().__init__(parent)
self.enabled = False
def on_tab_cur_url_changed(self, tabs):
"""Called on URL changes."""
tab = tabs.widget.currentWidget()
if tab is None: # pragma: no cover
self.setText('')
self.hide()
return
self.on_tab_changed(tab)
def on_tab_changed(self, tab):
"""Update the text based on the given tab."""
text = ''
if tab.history.can_go_back():
text += '<'
if tab.history.can_go_forward():
text += '>'
if text:
text = '[' + text + ']'
self.setText(text)
self.setVisible(bool(text) and self.enabled)
|
import unittest
import keras
import numpy as np
import pandas as pd
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D, LSTM
from keras.optimizers import RMSprop, SGD
from keras.utils.np_utils import to_categorical
from common import gpu_test
class TestKeras(unittest.TestCase):
def test_train(self):
train = pd.read_csv("/input/tests/data/train.csv")
x_train = train.iloc[:,1:].values.astype('float32')
y_train = to_categorical(train.iloc[:,0].astype('int32'))
model = Sequential()
model.add(Dense(units=10, input_dim=784, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer=RMSprop(lr=0.001),
metrics=['accuracy'])
model.fit(x_train, y_train, epochs=1, batch_size=32)
# Uses convnet which depends on libcudnn when running on GPU
def test_conv2d(self):
# Generate dummy data
x_train = np.random.random((100, 100, 100, 3))
y_train = keras.utils.to_categorical(np.random.randint(10, size=(100, 1)), num_classes=10)
x_test = np.random.random((20, 100, 100, 3))
y_test = keras.utils.to_categorical(np.random.randint(10, size=(20, 1)), num_classes=10)
model = Sequential()
# input: 100x100 images with 3 channels -> (100, 100, 3) tensors.
# this applies 32 convolution filters of size 3x3 each.
model.add(Conv2D(32, (3, 3), activation='relu', input_shape=(100, 100, 3)))
model.add(Conv2D(32, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax'))
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
# This throws if libcudnn is not properly installed with on a GPU
model.compile(loss='categorical_crossentropy', optimizer=sgd)
model.fit(x_train, y_train, batch_size=32, epochs=1)
model.evaluate(x_test, y_test, batch_size=32)
def test_lstm(self):
x_train = np.random.random((100, 100, 100))
y_train = keras.utils.to_categorical(np.random.randint(10, size=(100, 1)), num_classes=10)
x_test = np.random.random((20, 100, 100))
y_test = keras.utils.to_categorical(np.random.randint(10, size=(20, 1)), num_classes=10)
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model = Sequential()
model.add(LSTM(32, return_sequences=True, input_shape=(100, 100)))
model.add(Flatten())
model.add(Dense(10, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer=sgd)
model.fit(x_train, y_train, batch_size=32, epochs=1)
model.evaluate(x_test, y_test, batch_size=32)
|
import logging
from homeassistant.helpers.entity import Entity
from .const import DOMAIN
from .subscriber import Subscriber
_LOGGER = logging.getLogger(__name__)
class DevoloDeviceEntity(Entity):
"""Abstract representation of a device within devolo Home Control."""
def __init__(self, homecontrol, device_instance, element_uid):
"""Initialize a devolo device entity."""
self._device_instance = device_instance
self._unique_id = element_uid
self._homecontrol = homecontrol
self._name = device_instance.settings_property["general_device_settings"].name
self._device_class = None
self._value = None
self._unit = None
self._enabled_default = True
# This is not doing I/O. It fetches an internal state of the API
self._available = device_instance.is_online()
# Get the brand and model information
self._brand = device_instance.brand
self._model = device_instance.name
self.subscriber = None
self.sync_callback = self._sync
async def async_added_to_hass(self) -> None:
"""Call when entity is added to hass."""
self.subscriber = Subscriber(self._name, callback=self.sync_callback)
self._homecontrol.publisher.register(
self._device_instance.uid, self.subscriber, self.sync_callback
)
async def async_will_remove_from_hass(self) -> None:
"""Call when entity is removed or disabled."""
self._homecontrol.publisher.unregister(
self._device_instance.uid, self.subscriber
)
@property
def unique_id(self):
"""Return the unique ID of the entity."""
return self._unique_id
@property
def device_info(self):
"""Return the device info."""
return {
"identifiers": {(DOMAIN, self._device_instance.uid)},
"name": self._name,
"manufacturer": self._brand,
"model": self._model,
}
@property
def entity_registry_enabled_default(self) -> bool:
"""Return if the entity should be enabled when first added to the entity registry."""
return self._enabled_default
@property
def should_poll(self):
"""Return the polling state."""
return False
@property
def name(self):
"""Return the display name of this entity."""
return self._name
@property
def available(self) -> bool:
"""Return the online state."""
return self._available
def _sync(self, message):
"""Update the state."""
if message[0] == self._unique_id:
self._value = message[1]
else:
self._generic_message(message)
self.schedule_update_ha_state()
def _generic_message(self, message):
"""Handle generic messages."""
if len(message) == 3 and message[2] == "battery_level":
self._value = message[1]
elif len(message) == 3 and message[2] == "status":
# Maybe the API wants to tell us, that the device went on- or offline.
self._available = self._device_instance.is_online()
else:
_LOGGER.debug("No valid message received: %s", message)
|
import os.path
from datetime import datetime
from core.admin import AuthorAdmin, BookAdmin, BookResource, CustomBookAdmin
from core.models import Author, Book, Category, EBook, Parent
from django.contrib.admin.models import LogEntry
from django.contrib.auth.models import User
from django.core.files.uploadedfile import SimpleUploadedFile
from django.test.testcases import TestCase
from django.test.utils import override_settings
from django.utils.translation import gettext_lazy as _
from tablib import Dataset
from import_export.formats.base_formats import DEFAULT_FORMATS
class ImportExportAdminIntegrationTest(TestCase):
def setUp(self):
user = User.objects.create_user('admin', '[email protected]',
'password')
user.is_staff = True
user.is_superuser = True
user.save()
self.client.login(username='admin', password='password')
def test_import_export_template(self):
response = self.client.get('/admin/core/book/')
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response,
'admin/import_export/change_list_import_export.html')
self.assertContains(response, _('Import'))
self.assertContains(response, _('Export'))
@override_settings(TEMPLATE_STRING_IF_INVALID='INVALID_VARIABLE')
def test_import(self):
# GET the import form
response = self.client.get('/admin/core/book/import/')
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'admin/import_export/import.html')
self.assertContains(response, 'form action=""')
# POST the import form
input_format = '0'
filename = os.path.join(
os.path.dirname(__file__),
os.path.pardir,
'exports',
'books.csv')
with open(filename, "rb") as f:
data = {
'input_format': input_format,
'import_file': f,
}
response = self.client.post('/admin/core/book/import/', data)
self.assertEqual(response.status_code, 200)
self.assertIn('result', response.context)
self.assertFalse(response.context['result'].has_errors())
self.assertIn('confirm_form', response.context)
confirm_form = response.context['confirm_form']
data = confirm_form.initial
self.assertEqual(data['original_file_name'], 'books.csv')
response = self.client.post('/admin/core/book/process_import/', data,
follow=True)
self.assertEqual(response.status_code, 200)
self.assertContains(response,
_('Import finished, with {} new and {} updated {}.').format(
1, 0, Book._meta.verbose_name_plural)
)
@override_settings(TEMPLATE_STRING_IF_INVALID='INVALID_VARIABLE')
def test_import_mac(self):
# GET the import form
response = self.client.get('/admin/core/book/import/')
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'admin/import_export/import.html')
self.assertContains(response, 'form action=""')
# POST the import form
input_format = '0'
filename = os.path.join(
os.path.dirname(__file__),
os.path.pardir,
'exports',
'books-mac.csv')
with open(filename, "rb") as f:
data = {
'input_format': input_format,
'import_file': f,
}
response = self.client.post('/admin/core/book/import/', data)
self.assertEqual(response.status_code, 200)
self.assertIn('result', response.context)
self.assertFalse(response.context['result'].has_errors())
self.assertIn('confirm_form', response.context)
confirm_form = response.context['confirm_form']
data = confirm_form.initial
self.assertEqual(data['original_file_name'], 'books-mac.csv')
response = self.client.post('/admin/core/book/process_import/', data,
follow=True)
self.assertEqual(response.status_code, 200)
self.assertContains(response,
_('Import finished, with {} new and {} updated {}.').format(
1, 0, Book._meta.verbose_name_plural)
)
def test_export(self):
response = self.client.get('/admin/core/book/export/')
self.assertEqual(response.status_code, 200)
data = {
'file_format': '0',
}
date_str = datetime.now().strftime('%Y-%m-%d')
response = self.client.post('/admin/core/book/export/', data)
self.assertEqual(response.status_code, 200)
self.assertTrue(response.has_header("Content-Disposition"))
self.assertEqual(response['Content-Type'], 'text/csv')
self.assertEqual(
response['Content-Disposition'],
'attachment; filename="Book-{}.csv"'.format(date_str)
)
def test_returns_xlsx_export(self):
response = self.client.get('/admin/core/book/export/')
self.assertEqual(response.status_code, 200)
for i, f in enumerate(DEFAULT_FORMATS):
if f().get_title() == 'xlsx':
xlsx_index = i
break
else:
self.fail('Unable to find xlsx format. DEFAULT_FORMATS: %r' % DEFAULT_FORMATS)
data = {'file_format': str(xlsx_index)}
response = self.client.post('/admin/core/book/export/', data)
self.assertEqual(response.status_code, 200)
self.assertTrue(response.has_header("Content-Disposition"))
self.assertEqual(response['Content-Type'],
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')
def test_import_export_buttons_visible_without_add_permission(self):
# issue 38 - Export button not visible when no add permission
original = BookAdmin.has_add_permission
BookAdmin.has_add_permission = lambda self, request: False
response = self.client.get('/admin/core/book/')
BookAdmin.has_add_permission = original
self.assertContains(response, _('Export'))
self.assertContains(response, _('Import'))
def test_import_buttons_visible_without_add_permission(self):
# When using ImportMixin, users should be able to see the import button
# without add permission (to be consistent with ImportExportMixin)
original = AuthorAdmin.has_add_permission
AuthorAdmin.has_add_permission = lambda self, request: False
response = self.client.get('/admin/core/author/')
AuthorAdmin.has_add_permission = original
self.assertContains(response, _('Import'))
self.assertTemplateUsed(response, 'admin/import_export/change_list.html')
def test_import_file_name_in_tempdir(self):
# 65 - import_file_name form field can be use to access the filesystem
import_file_name = os.path.join(
os.path.dirname(__file__),
os.path.pardir,
'exports',
'books.csv')
data = {
'input_format': "0",
'import_file_name': import_file_name,
'original_file_name': 'books.csv'
}
with self.assertRaises(FileNotFoundError):
self.client.post('/admin/core/book/process_import/', data)
def test_csrf(self):
response = self.client.get('/admin/core/book/process_import/')
self.assertEqual(response.status_code, 405)
def test_import_log_entry(self):
input_format = '0'
filename = os.path.join(
os.path.dirname(__file__),
os.path.pardir,
'exports',
'books.csv')
with open(filename, "rb") as f:
data = {
'input_format': input_format,
'import_file': f,
}
response = self.client.post('/admin/core/book/import/', data)
self.assertEqual(response.status_code, 200)
confirm_form = response.context['confirm_form']
data = confirm_form.initial
response = self.client.post('/admin/core/book/process_import/', data,
follow=True)
self.assertEqual(response.status_code, 200)
book = LogEntry.objects.latest('id')
self.assertEqual(book.object_repr, "Some book")
self.assertEqual(book.object_id, str(1))
def test_import_log_entry_with_fk(self):
Parent.objects.create(id=1234, name='Some Parent')
input_format = '0'
filename = os.path.join(
os.path.dirname(__file__),
os.path.pardir,
'exports',
'child.csv')
with open(filename, "rb") as f:
data = {
'input_format': input_format,
'import_file': f,
}
response = self.client.post('/admin/core/child/import/', data)
self.assertEqual(response.status_code, 200)
confirm_form = response.context['confirm_form']
data = confirm_form.initial
response = self.client.post('/admin/core/child/process_import/', data,
follow=True)
self.assertEqual(response.status_code, 200)
child = LogEntry.objects.latest('id')
self.assertEqual(child.object_repr, 'Some - child of Some Parent')
self.assertEqual(child.object_id, str(1))
def test_logentry_creation_with_import_obj_exception(self):
# from https://mail.python.org/pipermail/python-dev/2008-January/076194.html
def monkeypatch_method(cls):
def decorator(func):
setattr(cls, func.__name__, func)
return func
return decorator
# Cause an exception in import_row, but only after import is confirmed,
# so a failure only occurs when ImportMixin.process_import is called.
class R(BookResource):
def import_obj(self, obj, data, dry_run):
if dry_run:
super().import_obj(obj, data, dry_run)
else:
raise Exception
@monkeypatch_method(BookAdmin)
def get_resource_class(self):
return R
# Verify that when an exception occurs in import_row, when raise_errors is False,
# the returned row result has a correct import_type value,
# so generating log entries does not fail.
@monkeypatch_method(BookAdmin)
def process_dataset(self, dataset, confirm_form, request, *args, **kwargs):
resource = self.get_import_resource_class()(**self.get_import_resource_kwargs(request, *args, **kwargs))
return resource.import_data(dataset,
dry_run=False,
raise_errors=False,
file_name=confirm_form.cleaned_data['original_file_name'],
user=request.user,
**kwargs)
dataset = Dataset(headers=["id","name","author_email"])
dataset.append([1, "Test 1", "[email protected]"])
input_format = '0'
content = dataset.csv
f = SimpleUploadedFile("data.csv", content.encode(), content_type="text/csv")
data = {
"input_format": input_format,
"import_file": f,
}
response = self.client.post('/admin/core/book/import/', data)
self.assertEqual(response.status_code, 200)
confirm_form = response.context['confirm_form']
data = confirm_form.initial
response = self.client.post('/admin/core/book/process_import/', data,
follow=True)
self.assertEqual(response.status_code, 200)
def test_import_with_customized_forms(self):
"""Test if admin import works if forms are customized"""
# We reuse import scheme from `test_import` to import books.csv.
# We use customized BookAdmin (CustomBookAdmin) with modified import
# form, which requires Author to be selected (from available authors).
# Note that url is /admin/core/ebook/import (and not: ...book/import)!
# We need at least a single author in the db to select from in the
# admin import custom forms
Author.objects.create(id=11, name='Test Author')
# GET the import form
response = self.client.get('/admin/core/ebook/import/')
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'admin/import_export/import.html')
self.assertContains(response, 'form action=""')
# POST the import form
input_format = '0'
filename = os.path.join(os.path.dirname(__file__),
os.path.pardir,
'exports',
'books.csv')
with open(filename, "rb") as fobj:
data = {'author': 11,
'input_format': input_format,
'import_file': fobj}
response = self.client.post('/admin/core/ebook/import/', data)
self.assertEqual(response.status_code, 200)
self.assertIn('result', response.context)
self.assertFalse(response.context['result'].has_errors())
self.assertIn('confirm_form', response.context)
confirm_form = response.context['confirm_form']
self.assertIsInstance(confirm_form,
CustomBookAdmin(EBook, 'ebook/import')
.get_confirm_import_form())
data = confirm_form.initial
self.assertEqual(data['original_file_name'], 'books.csv')
response = self.client.post('/admin/core/ebook/process_import/',
data, follow=True)
self.assertEqual(response.status_code, 200)
self.assertContains(
response,
_('Import finished, with {} new and {} updated {}.').format(
1, 0, EBook._meta.verbose_name_plural)
)
class ExportActionAdminIntegrationTest(TestCase):
def setUp(self):
user = User.objects.create_user('admin', '[email protected]',
'password')
user.is_staff = True
user.is_superuser = True
user.save()
self.cat1 = Category.objects.create(name='Cat 1')
self.cat2 = Category.objects.create(name='Cat 2')
self.client.login(username='admin', password='password')
def test_export(self):
data = {
'action': ['export_admin_action'],
'file_format': '0',
'_selected_action': [str(self.cat1.id)],
}
response = self.client.post('/admin/core/category/', data)
self.assertContains(response, self.cat1.name, status_code=200)
self.assertNotContains(response, self.cat2.name, status_code=200)
self.assertTrue(response.has_header("Content-Disposition"))
date_str = datetime.now().strftime('%Y-%m-%d')
self.assertEqual(
response['Content-Disposition'],
'attachment; filename="Category-{}.csv"'.format(date_str)
)
def test_export_no_format_selected(self):
data = {
'action': ['export_admin_action'],
'_selected_action': [str(self.cat1.id)],
}
response = self.client.post('/admin/core/category/', data)
self.assertEqual(response.status_code, 302)
|
import unittest
from absl import flags
import mock
from perfkitbenchmarker.providers.gcp import gcp_cloud_redis
from perfkitbenchmarker.providers.gcp import util
from tests import pkb_common_test_case
FLAGS = flags.FLAGS
class GcpCloudRedisTestCase(pkb_common_test_case.PkbCommonTestCase):
def setUp(self):
super(GcpCloudRedisTestCase, self).setUp()
FLAGS.project = 'project'
FLAGS.zones = ['us-central1-a']
mock_spec = mock.Mock()
self.redis = gcp_cloud_redis.CloudRedis(mock_spec)
def testCreate(self):
with mock.patch.object(util.GcloudCommand, 'Issue',
return_value=('{}', '', 0)) as gcloud:
self.redis._Create()
gcloud.assert_called_once_with(timeout=600)
self.assertTrue(self.redis._Exists())
def testDelete(self):
with mock.patch.object(util.GcloudCommand, 'Issue',
return_value=('{}', '', 0)) as gcloud:
self.redis._Delete()
gcloud.assert_called_once_with(raise_on_failure=False, timeout=600)
def testExistTrue(self):
with mock.patch.object(util.GcloudCommand, 'Issue',
return_value=('{}', '', 0)):
self.assertTrue(self.redis._Exists())
def testExistFalse(self):
with mock.patch.object(util.GcloudCommand, 'Issue',
return_value=('{}', '', 1)):
self.assertFalse(self.redis._Exists())
if __name__ == '__main__':
unittest.main()
|
import pytest
from redbot.pytest.rpc import *
from redbot.core.rpc import get_name
def test_get_name(cog):
assert get_name(cog.cofunc) == "COG__COFUNC"
assert get_name(cog.cofunc2) == "COG__COFUNC2"
assert get_name(cog.func) == "COG__FUNC"
def test_internal_methods_exist(rpc):
assert "GET_METHODS" in rpc._rpc.methods
def test_add_method(rpc, cog):
rpc.add_method(cog.cofunc)
assert get_name(cog.cofunc) in rpc._rpc.methods
def test_double_add(rpc, cog):
rpc.add_method(cog.cofunc)
count = len(rpc._rpc.methods)
rpc.add_method(cog.cofunc)
assert count == len(rpc._rpc.methods)
def test_add_notcoro_method(rpc, cog):
with pytest.raises(TypeError):
rpc.add_method(cog.func)
def test_add_multi(rpc, cog):
funcs = [cog.cofunc, cog.cofunc2, cog.cofunc3]
rpc.add_multi_method(*funcs)
names = [get_name(f) for f in funcs]
assert all(n in rpc._rpc.methods for n in names)
def test_add_multi_bad(rpc, cog):
funcs = [cog.cofunc, cog.cofunc2, cog.cofunc3, cog.func]
with pytest.raises(TypeError):
rpc.add_multi_method(*funcs)
names = [get_name(f) for f in funcs]
assert not any(n in rpc._rpc.methods for n in names)
def test_remove_method(rpc, existing_func):
before_count = len(rpc._rpc.methods)
rpc.remove_method(existing_func)
assert get_name(existing_func) not in rpc._rpc.methods
assert before_count - 1 == len(rpc._rpc.methods)
def test_remove_multi_method(rpc, existing_multi_func):
before_count = len(rpc._rpc.methods)
name = get_name(existing_multi_func[0])
prefix = name.split("__")[0]
rpc.remove_methods(prefix)
assert before_count - len(existing_multi_func) == len(rpc._rpc.methods)
names = [get_name(f) for f in existing_multi_func]
assert not any(n in rpc._rpc.methods for n in names)
def test_rpcmixin_register(rpcmixin, cog):
rpcmixin.register_rpc_handler(cog.cofunc)
assert rpcmixin.rpc.add_method.called_once_with(cog.cofunc)
name = get_name(cog.cofunc)
cogname = name.split("__")[0]
assert cogname in rpcmixin.rpc_handlers
def test_rpcmixin_unregister(rpcmixin, cog):
rpcmixin.register_rpc_handler(cog.cofunc)
rpcmixin.unregister_rpc_handler(cog.cofunc)
assert rpcmixin.rpc.remove_method.called_once_with(cog.cofunc)
name = get_name(cog.cofunc)
cogname = name.split("__")[0]
if cogname in rpcmixin.rpc_handlers:
assert cog.cofunc not in rpcmixin.rpc_handlers[cogname]
|
import os.path as op
import numpy as np
from pandas import read_csv
import matplotlib.pyplot as plt
from sklearn.model_selection import StratifiedKFold
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import roc_auc_score
from sklearn.manifold import MDS
import mne
from mne.io import read_raw_fif, concatenate_raws
from mne.datasets import visual_92_categories
print(__doc__)
data_path = visual_92_categories.data_path()
# Define stimulus - trigger mapping
fname = op.join(data_path, 'visual_stimuli.csv')
conds = read_csv(fname)
print(conds.head(5))
##############################################################################
# Let's restrict the number of conditions to speed up computation
max_trigger = 24
conds = conds[:max_trigger] # take only the first 24 rows
##############################################################################
# Define stimulus - trigger mapping
conditions = []
for c in conds.values:
cond_tags = list(c[:2])
cond_tags += [('not-' if i == 0 else '') + conds.columns[k]
for k, i in enumerate(c[2:], 2)]
conditions.append('/'.join(map(str, cond_tags)))
print(conditions[:10])
##############################################################################
# Let's make the event_id dictionary
event_id = dict(zip(conditions, conds.trigger + 1))
event_id['0/human bodypart/human/not-face/animal/natural']
##############################################################################
# Read MEG data
n_runs = 4 # 4 for full data (use less to speed up computations)
fname = op.join(data_path, 'sample_subject_%i_tsss_mc.fif')
raws = [read_raw_fif(fname % block, verbose='error')
for block in range(n_runs)] # ignore filename warnings
raw = concatenate_raws(raws)
events = mne.find_events(raw, min_duration=.002)
events = events[events[:, 2] <= max_trigger]
##############################################################################
# Epoch data
picks = mne.pick_types(raw.info, meg=True)
epochs = mne.Epochs(raw, events=events, event_id=event_id, baseline=None,
picks=picks, tmin=-.1, tmax=.500, preload=True)
##############################################################################
# Let's plot some conditions
epochs['face'].average().plot()
epochs['not-face'].average().plot()
##############################################################################
# Representational Similarity Analysis (RSA) is a neuroimaging-specific
# appelation to refer to statistics applied to the confusion matrix
# also referred to as the representational dissimilarity matrices (RDM).
#
# Compared to the approach from Cichy et al. we'll use a multiclass
# classifier (Multinomial Logistic Regression) while the paper uses
# all pairwise binary classification task to make the RDM.
# Also we use here the ROC-AUC as performance metric while the
# paper uses accuracy. Finally here for the sake of time we use
# RSA on a window of data while Cichy et al. did it for all time
# instants separately.
# Classify using the average signal in the window 50ms to 300ms
# to focus the classifier on the time interval with best SNR.
clf = make_pipeline(StandardScaler(),
LogisticRegression(C=1, solver='liblinear',
multi_class='auto'))
X = epochs.copy().crop(0.05, 0.3).get_data().mean(axis=2)
y = epochs.events[:, 2]
classes = set(y)
cv = StratifiedKFold(n_splits=5, random_state=0, shuffle=True)
# Compute confusion matrix for each cross-validation fold
y_pred = np.zeros((len(y), len(classes)))
for train, test in cv.split(X, y):
# Fit
clf.fit(X[train], y[train])
# Probabilistic prediction (necessary for ROC-AUC scoring metric)
y_pred[test] = clf.predict_proba(X[test])
##############################################################################
# Compute confusion matrix using ROC-AUC
confusion = np.zeros((len(classes), len(classes)))
for ii, train_class in enumerate(classes):
for jj in range(ii, len(classes)):
confusion[ii, jj] = roc_auc_score(y == train_class, y_pred[:, jj])
confusion[jj, ii] = confusion[ii, jj]
##############################################################################
# Plot
labels = [''] * 5 + ['face'] + [''] * 11 + ['bodypart'] + [''] * 6
fig, ax = plt.subplots(1)
im = ax.matshow(confusion, cmap='RdBu_r', clim=[0.3, 0.7])
ax.set_yticks(range(len(classes)))
ax.set_yticklabels(labels)
ax.set_xticks(range(len(classes)))
ax.set_xticklabels(labels, rotation=40, ha='left')
ax.axhline(11.5, color='k')
ax.axvline(11.5, color='k')
plt.colorbar(im)
plt.tight_layout()
plt.show()
##############################################################################
# Confusion matrix related to mental representations have been historically
# summarized with dimensionality reduction using multi-dimensional scaling [1].
# See how the face samples cluster together.
fig, ax = plt.subplots(1)
mds = MDS(2, random_state=0, dissimilarity='precomputed')
chance = 0.5
summary = mds.fit_transform(chance - confusion)
cmap = plt.get_cmap('rainbow')
colors = ['r', 'b']
names = list(conds['condition'].values)
for color, name in zip(colors, set(names)):
sel = np.where([this_name == name for this_name in names])[0]
size = 500 if name == 'human face' else 100
ax.scatter(summary[sel, 0], summary[sel, 1], s=size,
facecolors=color, label=name, edgecolors='k')
ax.axis('off')
ax.legend(loc='lower right', scatterpoints=1, ncol=2)
plt.tight_layout()
plt.show()
##############################################################################
# References
# ----------
# .. footbibliography::
|
import pytest
import socket
import warnings
from unittest.mock import Mock, patch
from kombu import Connection
from kombu import pidbox
from kombu.exceptions import ContentDisallowed, InconsistencyError
from kombu.utils.uuid import uuid
def is_cast(message):
return message['method']
def is_call(message):
return message['method'] and message['reply_to']
class test_Mailbox:
class Mailbox(pidbox.Mailbox):
def _collect(self, *args, **kwargs):
return 'COLLECTED'
def setup(self):
self.mailbox = self.Mailbox('test_pidbox')
self.connection = Connection(transport='memory')
self.state = {'var': 1}
self.handlers = {'mymethod': self._handler}
self.bound = self.mailbox(self.connection)
self.default_chan = self.connection.channel()
self.node = self.bound.Node(
'test_pidbox',
state=self.state, handlers=self.handlers,
channel=self.default_chan,
)
def _handler(self, state):
return self.stats['var']
def test_broadcast_matcher_pattern_string_type(self):
mailbox = pidbox.Mailbox("test_matcher_str")(self.connection)
with pytest.raises(ValueError):
mailbox._broadcast("ping", pattern=1, matcher=2)
def test_publish_reply_ignores_InconsistencyError(self):
mailbox = pidbox.Mailbox('test_reply__collect')(self.connection)
with patch('kombu.pidbox.Producer') as Producer:
producer = Producer.return_value = Mock(name='producer')
producer.publish.side_effect = InconsistencyError()
mailbox._publish_reply(
{'foo': 'bar'}, mailbox.reply_exchange, mailbox.oid, 'foo',
)
producer.publish.assert_called()
def test_reply__collect(self):
mailbox = pidbox.Mailbox('test_reply__collect')(self.connection)
exchange = mailbox.reply_exchange.name
channel = self.connection.channel()
mailbox.reply_queue(channel).declare()
ticket = uuid()
mailbox._publish_reply({'foo': 'bar'}, exchange, mailbox.oid, ticket)
_callback_called = [False]
def callback(body):
_callback_called[0] = True
reply = mailbox._collect(ticket, limit=1,
callback=callback, channel=channel)
assert reply == [{'foo': 'bar'}]
assert _callback_called[0]
ticket = uuid()
mailbox._publish_reply({'biz': 'boz'}, exchange, mailbox.oid, ticket)
reply = mailbox._collect(ticket, limit=1, channel=channel)
assert reply == [{'biz': 'boz'}]
mailbox._publish_reply({'foo': 'BAM'}, exchange, mailbox.oid, 'doom',
serializer='pickle')
with pytest.raises(ContentDisallowed):
reply = mailbox._collect('doom', limit=1, channel=channel)
mailbox._publish_reply(
{'foo': 'BAMBAM'}, exchange, mailbox.oid, 'doom',
serializer='pickle',
)
reply = mailbox._collect('doom', limit=1, channel=channel,
accept=['pickle'])
assert reply[0]['foo'] == 'BAMBAM'
de = mailbox.connection.drain_events = Mock()
de.side_effect = socket.timeout
mailbox._collect(ticket, limit=1, channel=channel)
def test_reply__collect_uses_default_channel(self):
class ConsumerCalled(Exception):
pass
def fake_Consumer(channel, *args, **kwargs):
raise ConsumerCalled(channel)
ticket = uuid()
with patch('kombu.pidbox.Consumer') as Consumer:
mailbox = pidbox.Mailbox('test_reply__collect')(self.connection)
assert mailbox.connection.default_channel is not None
Consumer.side_effect = fake_Consumer
try:
mailbox._collect(ticket, limit=1)
except ConsumerCalled as c:
assert c.args[0] is not None
except Exception:
raise
else:
assert False, "Consumer not called"
def test__publish_uses_default_channel(self):
class QueueCalled(Exception):
pass
def queue__call__side(channel, *args, **kwargs):
raise QueueCalled(channel)
ticket = uuid()
with patch.object(pidbox.Queue, '__call__') as queue__call__:
mailbox = pidbox.Mailbox('test_reply__collect')(self.connection)
queue__call__.side_effect = queue__call__side
try:
mailbox._publish(ticket, {}, reply_ticket=ticket)
except QueueCalled as c:
assert c.args[0] is not None
except Exception:
raise
else:
assert False, "Queue not called"
def test_constructor(self):
assert self.mailbox.connection is None
assert self.mailbox.exchange.name
assert self.mailbox.reply_exchange.name
def test_bound(self):
bound = self.mailbox(self.connection)
assert bound.connection is self.connection
def test_Node(self):
assert self.node.hostname
assert self.node.state
assert self.node.mailbox is self.bound
assert self.handlers
# No initial handlers
node2 = self.bound.Node('test_pidbox2', state=self.state)
assert node2.handlers == {}
def test_Node_consumer(self):
consumer1 = self.node.Consumer()
assert consumer1.channel is self.default_chan
assert consumer1.no_ack
chan2 = self.connection.channel()
consumer2 = self.node.Consumer(channel=chan2, no_ack=False)
assert consumer2.channel is chan2
assert not consumer2.no_ack
def test_Node_consumer_multiple_listeners(self):
warnings.resetwarnings()
consumer = self.node.Consumer()
q = consumer.queues[0]
with warnings.catch_warnings(record=True) as log:
q.on_declared('foo', 1, 1)
assert log
assert 'already using this' in log[0].message.args[0]
with warnings.catch_warnings(record=True) as log:
q.on_declared('foo', 1, 0)
assert not log
def test_handler(self):
node = self.bound.Node('test_handler', state=self.state)
@node.handler
def my_handler_name(state):
return 42
assert 'my_handler_name' in node.handlers
def test_dispatch(self):
node = self.bound.Node('test_dispatch', state=self.state)
@node.handler
def my_handler_name(state, x=None, y=None):
return x + y
assert node.dispatch('my_handler_name',
arguments={'x': 10, 'y': 10}) == 20
def test_dispatch_raising_SystemExit(self):
node = self.bound.Node('test_dispatch_raising_SystemExit',
state=self.state)
@node.handler
def my_handler_name(state):
raise SystemExit
with pytest.raises(SystemExit):
node.dispatch('my_handler_name')
def test_dispatch_raising(self):
node = self.bound.Node('test_dispatch_raising', state=self.state)
@node.handler
def my_handler_name(state):
raise KeyError('foo')
res = node.dispatch('my_handler_name')
assert 'error' in res
assert 'KeyError' in res['error']
def test_dispatch_replies(self):
_replied = [False]
def reply(data, **options):
_replied[0] = True
node = self.bound.Node('test_dispatch', state=self.state)
node.reply = reply
@node.handler
def my_handler_name(state, x=None, y=None):
return x + y
node.dispatch('my_handler_name',
arguments={'x': 10, 'y': 10},
reply_to={'exchange': 'foo', 'routing_key': 'bar'})
assert _replied[0]
def test_reply(self):
_replied = [(None, None, None)]
def publish_reply(data, exchange, routing_key, ticket, **kwargs):
_replied[0] = (data, exchange, routing_key, ticket)
mailbox = self.mailbox(self.connection)
mailbox._publish_reply = publish_reply
node = mailbox.Node('test_reply')
@node.handler
def my_handler_name(state):
return 42
node.dispatch('my_handler_name',
reply_to={'exchange': 'exchange',
'routing_key': 'rkey'},
ticket='TICKET')
data, exchange, routing_key, ticket = _replied[0]
assert data == {'test_reply': 42}
assert exchange == 'exchange'
assert routing_key == 'rkey'
assert ticket == 'TICKET'
def test_handle_message(self):
node = self.bound.Node('test_dispatch_from_message')
@node.handler
def my_handler_name(state, x=None, y=None):
return x * y
body = {'method': 'my_handler_name',
'arguments': {'x': 64, 'y': 64}}
assert node.handle_message(body, None) == 64 * 64
# message not for me should not be processed.
body['destination'] = ['some_other_node']
assert node.handle_message(body, None) is None
# message for me should be processed.
body['destination'] = ['test_dispatch_from_message']
assert node.handle_message(body, None) is not None
# message not for me should not be processed.
body.pop("destination")
body['matcher'] = 'glob'
body["pattern"] = "something*"
assert node.handle_message(body, None) is None
body["pattern"] = "test*"
assert node.handle_message(body, None) is not None
def test_handle_message_adjusts_clock(self):
node = self.bound.Node('test_adjusts_clock')
@node.handler
def my_handler_name(state):
return 10
body = {'method': 'my_handler_name',
'arguments': {}}
message = Mock(name='message')
message.headers = {'clock': 313}
node.adjust_clock = Mock(name='adjust_clock')
res = node.handle_message(body, message)
node.adjust_clock.assert_called_with(313)
assert res == 10
def test_listen(self):
consumer = self.node.listen()
assert consumer.callbacks[0] == self.node.handle_message
assert consumer.channel == self.default_chan
def test_cast(self):
self.bound.cast(['somenode'], 'mymethod')
consumer = self.node.Consumer()
assert is_cast(self.get_next(consumer))
def test_abcast(self):
self.bound.abcast('mymethod')
consumer = self.node.Consumer()
assert is_cast(self.get_next(consumer))
def test_call_destination_must_be_sequence(self):
with pytest.raises(ValueError):
self.bound.call('some_node', 'mymethod')
def test_call(self):
assert self.bound.call(['some_node'], 'mymethod') == 'COLLECTED'
consumer = self.node.Consumer()
assert is_call(self.get_next(consumer))
def test_multi_call(self):
assert self.bound.multi_call('mymethod') == 'COLLECTED'
consumer = self.node.Consumer()
assert is_call(self.get_next(consumer))
def get_next(self, consumer):
m = consumer.queues[0].get()
if m:
return m.payload
|
from homeassistant.components.abode import ATTR_DEVICE_ID
from homeassistant.components.abode.const import ATTRIBUTION
from homeassistant.components.binary_sensor import (
DEVICE_CLASS_WINDOW,
DOMAIN as BINARY_SENSOR_DOMAIN,
)
from homeassistant.const import (
ATTR_ATTRIBUTION,
ATTR_DEVICE_CLASS,
ATTR_FRIENDLY_NAME,
STATE_OFF,
)
from .common import setup_platform
async def test_entity_registry(hass):
"""Tests that the devices are registered in the entity registry."""
await setup_platform(hass, BINARY_SENSOR_DOMAIN)
entity_registry = await hass.helpers.entity_registry.async_get_registry()
entry = entity_registry.async_get("binary_sensor.front_door")
assert entry.unique_id == "2834013428b6035fba7d4054aa7b25a3"
async def test_attributes(hass):
"""Test the binary sensor attributes are correct."""
await setup_platform(hass, BINARY_SENSOR_DOMAIN)
state = hass.states.get("binary_sensor.front_door")
assert state.state == STATE_OFF
assert state.attributes.get(ATTR_ATTRIBUTION) == ATTRIBUTION
assert state.attributes.get(ATTR_DEVICE_ID) == "RF:01430030"
assert not state.attributes.get("battery_low")
assert not state.attributes.get("no_response")
assert state.attributes.get("device_type") == "Door Contact"
assert state.attributes.get(ATTR_FRIENDLY_NAME) == "Front Door"
assert state.attributes.get(ATTR_DEVICE_CLASS) == DEVICE_CLASS_WINDOW
|
from os import path
import statistics
from homeassistant import config as hass_config
from homeassistant.components.min_max import DOMAIN
from homeassistant.const import (
ATTR_UNIT_OF_MEASUREMENT,
PERCENTAGE,
SERVICE_RELOAD,
STATE_UNAVAILABLE,
STATE_UNKNOWN,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
)
from homeassistant.setup import async_setup_component
from tests.async_mock import patch
VALUES = [17, 20, 15.3]
COUNT = len(VALUES)
MIN_VALUE = min(VALUES)
MAX_VALUE = max(VALUES)
MEAN = round(sum(VALUES) / COUNT, 2)
MEAN_1_DIGIT = round(sum(VALUES) / COUNT, 1)
MEAN_4_DIGITS = round(sum(VALUES) / COUNT, 4)
MEDIAN = round(statistics.median(VALUES), 2)
async def test_min_sensor(hass):
"""Test the min sensor."""
config = {
"sensor": {
"platform": "min_max",
"name": "test_min",
"type": "min",
"entity_ids": ["sensor.test_1", "sensor.test_2", "sensor.test_3"],
}
}
assert await async_setup_component(hass, "sensor", config)
await hass.async_block_till_done()
entity_ids = config["sensor"]["entity_ids"]
for entity_id, value in dict(zip(entity_ids, VALUES)).items():
hass.states.async_set(entity_id, value)
await hass.async_block_till_done()
state = hass.states.get("sensor.test_min")
assert str(float(MIN_VALUE)) == state.state
assert entity_ids[2] == state.attributes.get("min_entity_id")
assert MAX_VALUE == state.attributes.get("max_value")
assert entity_ids[1] == state.attributes.get("max_entity_id")
assert MEAN == state.attributes.get("mean")
assert MEDIAN == state.attributes.get("median")
async def test_max_sensor(hass):
"""Test the max sensor."""
config = {
"sensor": {
"platform": "min_max",
"name": "test_max",
"type": "max",
"entity_ids": ["sensor.test_1", "sensor.test_2", "sensor.test_3"],
}
}
assert await async_setup_component(hass, "sensor", config)
await hass.async_block_till_done()
entity_ids = config["sensor"]["entity_ids"]
for entity_id, value in dict(zip(entity_ids, VALUES)).items():
hass.states.async_set(entity_id, value)
await hass.async_block_till_done()
state = hass.states.get("sensor.test_max")
assert str(float(MAX_VALUE)) == state.state
assert entity_ids[2] == state.attributes.get("min_entity_id")
assert MIN_VALUE == state.attributes.get("min_value")
assert entity_ids[1] == state.attributes.get("max_entity_id")
assert MEAN == state.attributes.get("mean")
assert MEDIAN == state.attributes.get("median")
async def test_mean_sensor(hass):
"""Test the mean sensor."""
config = {
"sensor": {
"platform": "min_max",
"name": "test_mean",
"type": "mean",
"entity_ids": ["sensor.test_1", "sensor.test_2", "sensor.test_3"],
}
}
assert await async_setup_component(hass, "sensor", config)
await hass.async_block_till_done()
entity_ids = config["sensor"]["entity_ids"]
for entity_id, value in dict(zip(entity_ids, VALUES)).items():
hass.states.async_set(entity_id, value)
await hass.async_block_till_done()
state = hass.states.get("sensor.test_mean")
assert str(float(MEAN)) == state.state
assert MIN_VALUE == state.attributes.get("min_value")
assert entity_ids[2] == state.attributes.get("min_entity_id")
assert MAX_VALUE == state.attributes.get("max_value")
assert entity_ids[1] == state.attributes.get("max_entity_id")
assert MEDIAN == state.attributes.get("median")
async def test_mean_1_digit_sensor(hass):
"""Test the mean with 1-digit precision sensor."""
config = {
"sensor": {
"platform": "min_max",
"name": "test_mean",
"type": "mean",
"round_digits": 1,
"entity_ids": ["sensor.test_1", "sensor.test_2", "sensor.test_3"],
}
}
assert await async_setup_component(hass, "sensor", config)
await hass.async_block_till_done()
entity_ids = config["sensor"]["entity_ids"]
for entity_id, value in dict(zip(entity_ids, VALUES)).items():
hass.states.async_set(entity_id, value)
await hass.async_block_till_done()
state = hass.states.get("sensor.test_mean")
assert str(float(MEAN_1_DIGIT)) == state.state
assert MIN_VALUE == state.attributes.get("min_value")
assert entity_ids[2] == state.attributes.get("min_entity_id")
assert MAX_VALUE == state.attributes.get("max_value")
assert entity_ids[1] == state.attributes.get("max_entity_id")
assert MEDIAN == state.attributes.get("median")
async def test_mean_4_digit_sensor(hass):
"""Test the mean with 1-digit precision sensor."""
config = {
"sensor": {
"platform": "min_max",
"name": "test_mean",
"type": "mean",
"round_digits": 4,
"entity_ids": ["sensor.test_1", "sensor.test_2", "sensor.test_3"],
}
}
assert await async_setup_component(hass, "sensor", config)
await hass.async_block_till_done()
entity_ids = config["sensor"]["entity_ids"]
for entity_id, value in dict(zip(entity_ids, VALUES)).items():
hass.states.async_set(entity_id, value)
await hass.async_block_till_done()
state = hass.states.get("sensor.test_mean")
assert str(float(MEAN_4_DIGITS)) == state.state
assert MIN_VALUE == state.attributes.get("min_value")
assert entity_ids[2] == state.attributes.get("min_entity_id")
assert MAX_VALUE == state.attributes.get("max_value")
assert entity_ids[1] == state.attributes.get("max_entity_id")
assert MEDIAN == state.attributes.get("median")
async def test_median_sensor(hass):
"""Test the median sensor."""
config = {
"sensor": {
"platform": "min_max",
"name": "test_median",
"type": "median",
"entity_ids": ["sensor.test_1", "sensor.test_2", "sensor.test_3"],
}
}
assert await async_setup_component(hass, "sensor", config)
await hass.async_block_till_done()
entity_ids = config["sensor"]["entity_ids"]
for entity_id, value in dict(zip(entity_ids, VALUES)).items():
hass.states.async_set(entity_id, value)
await hass.async_block_till_done()
state = hass.states.get("sensor.test_median")
assert str(float(MEDIAN)) == state.state
assert MIN_VALUE == state.attributes.get("min_value")
assert entity_ids[2] == state.attributes.get("min_entity_id")
assert MAX_VALUE == state.attributes.get("max_value")
assert entity_ids[1] == state.attributes.get("max_entity_id")
assert MEAN == state.attributes.get("mean")
async def test_not_enough_sensor_value(hass):
"""Test that there is nothing done if not enough values available."""
config = {
"sensor": {
"platform": "min_max",
"name": "test_max",
"type": "max",
"entity_ids": ["sensor.test_1", "sensor.test_2", "sensor.test_3"],
}
}
assert await async_setup_component(hass, "sensor", config)
await hass.async_block_till_done()
entity_ids = config["sensor"]["entity_ids"]
hass.states.async_set(entity_ids[0], STATE_UNKNOWN)
await hass.async_block_till_done()
state = hass.states.get("sensor.test_max")
assert STATE_UNKNOWN == state.state
assert state.attributes.get("min_entity_id") is None
assert state.attributes.get("min_value") is None
assert state.attributes.get("max_entity_id") is None
assert state.attributes.get("max_value") is None
assert state.attributes.get("median") is None
hass.states.async_set(entity_ids[1], VALUES[1])
await hass.async_block_till_done()
state = hass.states.get("sensor.test_max")
assert STATE_UNKNOWN != state.state
assert entity_ids[1] == state.attributes.get("min_entity_id")
assert VALUES[1] == state.attributes.get("min_value")
assert entity_ids[1] == state.attributes.get("max_entity_id")
assert VALUES[1] == state.attributes.get("max_value")
hass.states.async_set(entity_ids[2], STATE_UNKNOWN)
await hass.async_block_till_done()
state = hass.states.get("sensor.test_max")
assert STATE_UNKNOWN != state.state
assert entity_ids[1] == state.attributes.get("min_entity_id")
assert VALUES[1] == state.attributes.get("min_value")
assert entity_ids[1] == state.attributes.get("max_entity_id")
assert VALUES[1] == state.attributes.get("max_value")
hass.states.async_set(entity_ids[1], STATE_UNAVAILABLE)
await hass.async_block_till_done()
state = hass.states.get("sensor.test_max")
assert STATE_UNKNOWN == state.state
assert state.attributes.get("min_entity_id") is None
assert state.attributes.get("min_value") is None
assert state.attributes.get("max_entity_id") is None
assert state.attributes.get("max_value") is None
async def test_different_unit_of_measurement(hass):
"""Test for different unit of measurement."""
config = {
"sensor": {
"platform": "min_max",
"name": "test",
"type": "mean",
"entity_ids": ["sensor.test_1", "sensor.test_2", "sensor.test_3"],
}
}
assert await async_setup_component(hass, "sensor", config)
await hass.async_block_till_done()
entity_ids = config["sensor"]["entity_ids"]
hass.states.async_set(
entity_ids[0], VALUES[0], {ATTR_UNIT_OF_MEASUREMENT: TEMP_CELSIUS}
)
await hass.async_block_till_done()
state = hass.states.get("sensor.test")
assert str(float(VALUES[0])) == state.state
assert state.attributes.get("unit_of_measurement") == TEMP_CELSIUS
hass.states.async_set(
entity_ids[1], VALUES[1], {ATTR_UNIT_OF_MEASUREMENT: TEMP_FAHRENHEIT}
)
await hass.async_block_till_done()
state = hass.states.get("sensor.test")
assert STATE_UNKNOWN == state.state
assert state.attributes.get("unit_of_measurement") == "ERR"
hass.states.async_set(
entity_ids[2], VALUES[2], {ATTR_UNIT_OF_MEASUREMENT: PERCENTAGE}
)
await hass.async_block_till_done()
state = hass.states.get("sensor.test")
assert STATE_UNKNOWN == state.state
assert state.attributes.get("unit_of_measurement") == "ERR"
async def test_last_sensor(hass):
"""Test the last sensor."""
config = {
"sensor": {
"platform": "min_max",
"name": "test_last",
"type": "last",
"entity_ids": ["sensor.test_1", "sensor.test_2", "sensor.test_3"],
}
}
assert await async_setup_component(hass, "sensor", config)
await hass.async_block_till_done()
entity_ids = config["sensor"]["entity_ids"]
for entity_id, value in dict(zip(entity_ids, VALUES)).items():
hass.states.async_set(entity_id, value)
await hass.async_block_till_done()
state = hass.states.get("sensor.test_last")
assert str(float(value)) == state.state
assert entity_id == state.attributes.get("last_entity_id")
assert MIN_VALUE == state.attributes.get("min_value")
assert MAX_VALUE == state.attributes.get("max_value")
assert MEAN == state.attributes.get("mean")
assert MEDIAN == state.attributes.get("median")
async def test_reload(hass):
"""Verify we can reload filter sensors."""
hass.states.async_set("sensor.test_1", 12345)
hass.states.async_set("sensor.test_2", 45678)
await async_setup_component(
hass,
"sensor",
{
"sensor": {
"platform": "min_max",
"name": "test",
"type": "mean",
"entity_ids": ["sensor.test_1", "sensor.test_2"],
}
},
)
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 3
assert hass.states.get("sensor.test")
yaml_path = path.join(
_get_fixtures_base_path(),
"fixtures",
"min_max/configuration.yaml",
)
with patch.object(hass_config, "YAML_CONFIG_FILE", yaml_path):
await hass.services.async_call(
DOMAIN,
SERVICE_RELOAD,
{},
blocking=True,
)
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 3
assert hass.states.get("sensor.test") is None
assert hass.states.get("sensor.second_test")
def _get_fixtures_base_path():
return path.dirname(path.dirname(path.dirname(__file__)))
|
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from mock import Mock
from mock import patch
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from tcp import TCPCollector
##########################################################################
class TestTCPCollector(CollectorTestCase):
def setUp(self, allowed_names=None, gauges=None):
if not allowed_names:
allowed_names = []
config = get_collector_config('TCPCollector', {
'allowed_names': allowed_names,
'gauges': gauges,
'interval': 1
})
self.collector = TCPCollector(config, None)
def test_import(self):
self.assertTrue(TCPCollector)
@patch('os.access', Mock(return_value=True))
@patch('__builtin__.open')
@patch('diamond.collector.Collector.publish')
def test_should_open_proc_net_netstat(self, publish_mock, open_mock):
TCPCollector.PROC = ['/proc/net/netstat']
open_mock.return_value = StringIO('')
self.collector.collect()
open_mock.assert_called_once_with('/proc/net/netstat')
@patch('os.access', Mock(return_value=True))
@patch('__builtin__.open')
@patch('diamond.collector.Collector.publish')
def test_should_work_with_synthetic_data(self, publish_mock, open_mock):
TCPCollector.PROC = ['/proc/net/netstat']
self.setUp(['A', 'C'])
open_mock.return_value = StringIO('''
TcpExt: A B C
TcpExt: 0 0 0
'''.strip())
self.collector.collect()
self.assertPublishedMany(publish_mock, {})
open_mock.return_value = StringIO('''
TcpExt: A B C
TcpExt: 0 1 2
'''.strip())
self.collector.collect()
self.assertEqual(len(publish_mock.call_args_list), 2)
metrics = {
'A': 0,
'C': 2,
}
self.assertPublishedMany(publish_mock, metrics)
@patch('diamond.collector.Collector.publish')
def test_should_work_with_real_data(self, publish_mock):
self.setUp(['ListenOverflows', 'ListenDrops',
'TCPLoss', 'TCPTimeouts'])
TCPCollector.PROC = [self.getFixturePath('proc_net_netstat_1')]
self.collector.collect()
self.assertPublishedMany(publish_mock, {})
TCPCollector.PROC = [self.getFixturePath('proc_net_netstat_2')]
self.collector.collect()
metrics = {
'ListenOverflows': 0,
'ListenDrops': 0,
'TCPLoss': 188,
'TCPTimeouts': 15265
}
self.assertPublishedMany(publish_mock, metrics)
@patch('diamond.collector.Collector.publish')
def test_should_work_with_all_data(self, publish_mock):
self.setUp([])
TCPCollector.PROC = [
self.getFixturePath('proc_net_netstat_1'),
self.getFixturePath('proc_net_snmp_1'),
]
self.collector.collect()
self.assertPublishedMany(publish_mock, {})
TCPCollector.PROC = [
self.getFixturePath('proc_net_netstat_2'),
self.getFixturePath('proc_net_snmp_2'),
]
self.collector.collect()
metrics = {
'TCPMD5Unexpected': 0.0,
'ArpFilter': 0.0,
'TCPBacklogDrop': 0.0,
'TCPDSACKRecv': 1580.0,
'TCPDSACKIgnoredOld': 292.0,
'MaxConn': (-1.0),
'RcvPruned': 0.0,
'TCPSackMerged': 1121.0,
'OutOfWindowIcmps': 10.0,
'TCPDeferAcceptDrop': 0.0,
'TCPLossUndo': 6538.0,
'TCPHPHitsToUser': 5667.0,
'TCPTimeouts': 15265.0,
'TCPForwardRetrans': 41.0,
'TCPTSReorder': 0.0,
'RtoMin': 0.0,
'TCPAbortOnData': 143.0,
'TCPFullUndo': 0.0,
'TCPSackRecoveryFail': 13.0,
'InErrs': 0.0,
'TCPAbortOnClose': 38916.0,
'TCPAbortOnTimeout': 68.0,
'TCPFACKReorder': 0.0,
'LockDroppedIcmps': 4.0,
'RtoMax': 0.0,
'TCPSchedulerFailed': 0.0,
'EstabResets': 0.0,
'DelayedACKs': 125491.0,
'TCPSACKReneging': 0.0,
'PruneCalled': 0.0,
'OutRsts': 0.0,
'TCPRenoRecoveryFail': 0.0,
'TCPSackShifted': 2356.0,
'DelayedACKLocked': 144.0,
'TCPHPHits': 10361792.0,
'EmbryonicRsts': 0.0,
'TCPLossFailures': 7.0,
'TWKilled': 0.0,
'TCPSACKDiscard': 0.0,
'TCPAbortFailed': 0.0,
'TCPSackRecovery': 364.0,
'TCPDirectCopyFromBacklog': 35660.0,
'TCPFastRetrans': 1184.0,
'TCPPartialUndo': 0.0,
'TCPMinTTLDrop': 0.0,
'SyncookiesSent': 0.0,
'OutSegs': 0.0,
'TCPSackShiftFallback': 3091.0,
'RetransSegs': 0.0,
'IPReversePathFilter': 0.0,
'TCPRcvCollapsed': 0.0,
'TCPDSACKUndo': 2448.0,
'SyncookiesFailed': 9.0,
'TCPSACKReorder': 0.0,
'TCPDSACKOldSent': 10175.0,
'TCPAbortOnLinger': 0.0,
'TCPSpuriousRTOs': 9.0,
'TCPRenoRecovery': 0.0,
'TCPPrequeued': 114232.0,
'TCPLostRetransmit': 7.0,
'TCPLoss': 188.0,
'TCPHPAcks': 12673896.0,
'TCPDSACKOfoRecv': 0.0,
'TWRecycled': 0.0,
'TCPRenoFailures': 0.0,
'OfoPruned': 0.0,
'TCPMD5NotFound': 0.0,
'ActiveOpens': 0.0,
'TCPDSACKIgnoredNoUndo': 1025.0,
'TCPPrequeueDropped': 0.0,
'RtoAlgorithm': 0.0,
'TCPAbortOnMemory': 0.0,
'TCPTimeWaitOverflow': 0.0,
'TCPAbortOnSyn': 0.0,
'TCPDirectCopyFromPrequeue': 19340531.0,
'DelayedACKLost': 10118.0,
'PassiveOpens': 0.0,
'InSegs': 1.0,
'PAWSPassive': 0.0,
'TCPRenoReorder': 0.0,
'CurrEstab': 3.0,
'TW': 89479.0,
'AttemptFails': 0.0,
'PAWSActive': 0.0,
'ListenDrops': 0.0,
'SyncookiesRecv': 0.0,
'TCPDSACKOfoSent': 0.0,
'TCPSlowStartRetrans': 2540.0,
'TCPMemoryPressures': 0.0,
'PAWSEstab': 0.0,
'TCPSackFailures': 502.0,
'ListenOverflows': 0.0,
'TCPPureAcks': 1003528.0,
}
self.setDocExample(collector=self.collector.__class__.__name__,
metrics=metrics,
defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics)
##########################################################################
if __name__ == "__main__":
unittest.main()
|
import nltk
from .unit import Unit
class Lemmatization(Unit):
"""Process unit for token lemmatization."""
def transform(self, input_: list) -> list:
"""
Lemmatization a sequence of tokens.
:param input_: list of tokens to be lemmatized.
:return tokens: list of lemmatizd tokens.
"""
lemmatizer = nltk.WordNetLemmatizer()
return [lemmatizer.lemmatize(token, pos='v') for token in input_]
|
import pytest
import homeassistant.components.media_player as mp
from homeassistant.components.yamaha import media_player as yamaha
from homeassistant.components.yamaha.const import DOMAIN
from homeassistant.helpers.discovery import async_load_platform
from homeassistant.setup import async_setup_component
from tests.async_mock import MagicMock, PropertyMock, call, patch
CONFIG = {"media_player": {"platform": "yamaha", "host": "127.0.0.1"}}
def _create_zone_mock(name, url):
zone = MagicMock()
zone.ctrl_url = url
zone.zone = name
return zone
class FakeYamahaDevice:
"""A fake Yamaha device."""
def __init__(self, ctrl_url, name, zones=None):
"""Initialize the fake Yamaha device."""
self.ctrl_url = ctrl_url
self.name = name
self._zones = zones or []
def zone_controllers(self):
"""Return controllers for all available zones."""
return self._zones
@pytest.fixture(name="main_zone")
def main_zone_fixture():
"""Mock the main zone."""
return _create_zone_mock("Main zone", "http://main")
@pytest.fixture(name="device")
def device_fixture(main_zone):
"""Mock the yamaha device."""
device = FakeYamahaDevice("http://receiver", "Receiver", zones=[main_zone])
with patch("rxv.RXV", return_value=device):
yield device
async def test_setup_host(hass, device, main_zone):
"""Test set up integration with host."""
assert await async_setup_component(hass, mp.DOMAIN, CONFIG)
await hass.async_block_till_done()
state = hass.states.get("media_player.yamaha_receiver_main_zone")
assert state is not None
assert state.state == "off"
async def test_setup_no_host(hass, device, main_zone):
"""Test set up integration without host."""
with patch("rxv.find", return_value=[device]):
assert await async_setup_component(
hass, mp.DOMAIN, {"media_player": {"platform": "yamaha"}}
)
await hass.async_block_till_done()
state = hass.states.get("media_player.yamaha_receiver_main_zone")
assert state is not None
assert state.state == "off"
async def test_setup_discovery(hass, device, main_zone):
"""Test set up integration via discovery."""
discovery_info = {
"name": "Yamaha Receiver",
"model_name": "Yamaha",
"control_url": "http://receiver",
"description_url": "http://receiver/description",
}
await async_load_platform(
hass, mp.DOMAIN, "yamaha", discovery_info, {mp.DOMAIN: {}}
)
await hass.async_block_till_done()
state = hass.states.get("media_player.yamaha_receiver_main_zone")
assert state is not None
assert state.state == "off"
async def test_setup_zone_ignore(hass, device, main_zone):
"""Test set up integration without host."""
assert await async_setup_component(
hass,
mp.DOMAIN,
{
"media_player": {
"platform": "yamaha",
"host": "127.0.0.1",
"zone_ignore": "Main zone",
}
},
)
await hass.async_block_till_done()
state = hass.states.get("media_player.yamaha_receiver_main_zone")
assert state is None
async def test_enable_output(hass, device, main_zone):
"""Test enable output service."""
assert await async_setup_component(hass, mp.DOMAIN, CONFIG)
await hass.async_block_till_done()
port = "hdmi1"
enabled = True
data = {
"entity_id": "media_player.yamaha_receiver_main_zone",
"port": port,
"enabled": enabled,
}
await hass.services.async_call(DOMAIN, yamaha.SERVICE_ENABLE_OUTPUT, data, True)
assert main_zone.enable_output.call_count == 1
assert main_zone.enable_output.call_args == call(port, enabled)
async def test_select_scene(hass, device, main_zone, caplog):
"""Test select scene service."""
scene_prop = PropertyMock(return_value=None)
type(main_zone).scene = scene_prop
assert await async_setup_component(hass, mp.DOMAIN, CONFIG)
await hass.async_block_till_done()
scene = "TV Viewing"
data = {
"entity_id": "media_player.yamaha_receiver_main_zone",
"scene": scene,
}
await hass.services.async_call(DOMAIN, yamaha.SERVICE_SELECT_SCENE, data, True)
assert scene_prop.call_count == 1
assert scene_prop.call_args == call(scene)
scene = "BD/DVD Movie Viewing"
data["scene"] = scene
await hass.services.async_call(DOMAIN, yamaha.SERVICE_SELECT_SCENE, data, True)
assert scene_prop.call_count == 2
assert scene_prop.call_args == call(scene)
scene_prop.side_effect = AssertionError()
missing_scene = "Missing scene"
data["scene"] = missing_scene
await hass.services.async_call(DOMAIN, yamaha.SERVICE_SELECT_SCENE, data, True)
assert f"Scene '{missing_scene}' does not exist!" in caplog.text
|
import urllib2
import json
import diamond.collector
class HTTPJSONCollector(diamond.collector.Collector):
def get_default_config_help(self):
config_help = super(HTTPJSONCollector, self).get_default_config_help()
config_help.update({
'url': 'Full URL',
'headers': 'Header variable if needed. '
'Will be added to every request',
})
return config_help
def get_default_config(self):
default_config = super(HTTPJSONCollector, self).get_default_config()
default_config.update({
'path': 'httpjson',
'url': 'http://localhost/stat',
'headers': {'User-Agent': 'Diamond HTTP collector'},
})
return default_config
def _json_to_flat_metrics(self, prefix, data):
for key, value in data.items():
if isinstance(value, dict):
for k, v in self._json_to_flat_metrics(
"%s.%s" % (prefix, key), value):
yield k, v
else:
try:
int(value)
except ValueError:
value = None
finally:
yield ("%s.%s" % (prefix, key), value)
def collect(self):
url = self.config['url']
req = urllib2.Request(url, headers=self.config['headers'])
req.add_header('Content-type', 'application/json')
try:
resp = urllib2.urlopen(req)
except urllib2.URLError as e:
self.log.error("Can't open url %s. %s", url, e)
else:
content = resp.read()
try:
data = json.loads(content)
except ValueError as e:
self.log.error("Can't parse JSON object from %s. %s", url, e)
else:
for metric_name, metric_value in self._json_to_flat_metrics(
"", data):
self.publish(metric_name, metric_value)
|
import operator
import numpy as np
from ..fixes import rfft, irfft, rfftfreq
from ..parallel import parallel_func
from ..utils import sum_squared, warn, verbose, logger, _check_option
def dpss_windows(N, half_nbw, Kmax, low_bias=True, interp_from=None,
interp_kind='linear'):
"""Compute Discrete Prolate Spheroidal Sequences.
Will give of orders [0,Kmax-1] for a given frequency-spacing multiple
NW and sequence length N.
.. note:: Copied from NiTime.
Parameters
----------
N : int
Sequence length.
half_nbw : float
Standardized half bandwidth corresponding to 2 * half_bw = BW*f0
= BW*N/dt but with dt taken as 1.
Kmax : int
Number of DPSS windows to return is Kmax (orders 0 through Kmax-1).
low_bias : bool
Keep only tapers with eigenvalues > 0.9.
interp_from : int (optional)
The dpss can be calculated using interpolation from a set of dpss
with the same NW and Kmax, but shorter N. This is the length of this
shorter set of dpss windows.
.. note:: If SciPy 1.1 or greater is available, interpolating
is likely not necessary as DPSS computations should be
sufficiently fast.
interp_kind : str (optional)
This input variable is passed to scipy.interpolate.interp1d and
specifies the kind of interpolation as a string ('linear', 'nearest',
'zero', 'slinear', 'quadratic, 'cubic') or as an integer specifying the
order of the spline interpolator to use.
Returns
-------
v, e : tuple,
The v array contains DPSS windows shaped (Kmax, N).
e are the eigenvalues.
Notes
-----
Tridiagonal form of DPSS calculation from:
Slepian, D. Prolate spheroidal wave functions, Fourier analysis, and
uncertainty V: The discrete case. Bell System Technical Journal,
Volume 57 (1978), 1371430
"""
from scipy import interpolate
from scipy.signal.windows import dpss as sp_dpss
from ..filter import next_fast_len
# This np.int32 business works around a weird Windows bug, see
# gh-5039 and https://github.com/scipy/scipy/pull/8608
Kmax = np.int32(operator.index(Kmax))
N = np.int32(operator.index(N))
W = float(half_nbw) / N
nidx = np.arange(N, dtype='d')
# In this case, we create the dpss windows of the smaller size
# (interp_from) and then interpolate to the larger size (N)
if interp_from is not None:
if interp_from > N:
e_s = 'In dpss_windows, interp_from is: %s ' % interp_from
e_s += 'and N is: %s. ' % N
e_s += 'Please enter interp_from smaller than N.'
raise ValueError(e_s)
dpss = []
d, e = dpss_windows(interp_from, half_nbw, Kmax, low_bias=False)
for this_d in d:
x = np.arange(this_d.shape[-1])
tmp = interpolate.interp1d(x, this_d, kind=interp_kind)
d_temp = tmp(np.linspace(0, this_d.shape[-1] - 1, N,
endpoint=False))
# Rescale:
d_temp = d_temp / np.sqrt(sum_squared(d_temp))
dpss.append(d_temp)
dpss = np.array(dpss)
else:
dpss = sp_dpss(N, half_nbw, Kmax)
# Now find the eigenvalues of the original spectral concentration problem
# Use the autocorr sequence technique from Percival and Walden, 1993 pg 390
# compute autocorr using FFT (same as nitime.utils.autocorr(dpss) * N)
rxx_size = 2 * N - 1
n_fft = next_fast_len(rxx_size)
dpss_fft = rfft(dpss, n_fft)
dpss_rxx = irfft(dpss_fft * dpss_fft.conj(), n_fft)
dpss_rxx = dpss_rxx[:, :N]
r = 4 * W * np.sinc(2 * W * nidx)
r[0] = 2 * W
eigvals = np.dot(dpss_rxx, r)
if low_bias:
idx = (eigvals > 0.9)
if not idx.any():
warn('Could not properly use low_bias, keeping lowest-bias taper')
idx = [np.argmax(eigvals)]
dpss, eigvals = dpss[idx], eigvals[idx]
assert len(dpss) > 0 # should never happen
assert dpss.shape[1] == N # old nitime bug
return dpss, eigvals
def _psd_from_mt_adaptive(x_mt, eigvals, freq_mask, max_iter=150,
return_weights=False):
r"""Use iterative procedure to compute the PSD from tapered spectra.
.. note:: Modified from NiTime.
Parameters
----------
x_mt : array, shape=(n_signals, n_tapers, n_freqs)
The DFTs of the tapered sequences (only positive frequencies)
eigvals : array, length n_tapers
The eigenvalues of the DPSS tapers
freq_mask : array
Frequency indices to keep
max_iter : int
Maximum number of iterations for weight computation
return_weights : bool
Also return the weights
Returns
-------
psd : array, shape=(n_signals, np.sum(freq_mask))
The computed PSDs
weights : array shape=(n_signals, n_tapers, np.sum(freq_mask))
The weights used to combine the tapered spectra
Notes
-----
The weights to use for making the multitaper estimate, such that
:math:`S_{mt} = \sum_{k} |w_k|^2S_k^{mt} / \sum_{k} |w_k|^2`
"""
n_signals, n_tapers, n_freqs = x_mt.shape
if len(eigvals) != n_tapers:
raise ValueError('Need one eigenvalue for each taper')
if n_tapers < 3:
raise ValueError('Not enough tapers to compute adaptive weights.')
rt_eig = np.sqrt(eigvals)
# estimate the variance from an estimate with fixed weights
psd_est = _psd_from_mt(x_mt, rt_eig[np.newaxis, :, np.newaxis])
x_var = np.trapz(psd_est, dx=np.pi / n_freqs) / (2 * np.pi)
del psd_est
# allocate space for output
psd = np.empty((n_signals, np.sum(freq_mask)))
# only keep the frequencies of interest
x_mt = x_mt[:, :, freq_mask]
if return_weights:
weights = np.empty((n_signals, n_tapers, psd.shape[1]))
for i, (xk, var) in enumerate(zip(x_mt, x_var)):
# combine the SDFs in the traditional way in order to estimate
# the variance of the timeseries
# The process is to iteratively switch solving for the following
# two expressions:
# (1) Adaptive Multitaper SDF:
# S^{mt}(f) = [ sum |d_k(f)|^2 S_k(f) ]/ sum |d_k(f)|^2
#
# (2) Weights
# d_k(f) = [sqrt(lam_k) S^{mt}(f)] / [lam_k S^{mt}(f) + E{B_k(f)}]
#
# Where lam_k are the eigenvalues corresponding to the DPSS tapers,
# and the expected value of the broadband bias function
# E{B_k(f)} is replaced by its full-band integration
# (1/2pi) int_{-pi}^{pi} E{B_k(f)} = sig^2(1-lam_k)
# start with an estimate from incomplete data--the first 2 tapers
psd_iter = _psd_from_mt(xk[:2, :], rt_eig[:2, np.newaxis])
err = np.zeros_like(xk)
for n in range(max_iter):
d_k = (psd_iter / (eigvals[:, np.newaxis] * psd_iter +
(1 - eigvals[:, np.newaxis]) * var))
d_k *= rt_eig[:, np.newaxis]
# Test for convergence -- this is overly conservative, since
# iteration only stops when all frequencies have converged.
# A better approach is to iterate separately for each freq, but
# that is a nonvectorized algorithm.
# Take the RMS difference in weights from the previous iterate
# across frequencies. If the maximum RMS error across freqs is
# less than 1e-10, then we're converged
err -= d_k
if np.max(np.mean(err ** 2, axis=0)) < 1e-10:
break
# update the iterative estimate with this d_k
psd_iter = _psd_from_mt(xk, d_k)
err = d_k
if n == max_iter - 1:
warn('Iterative multi-taper PSD computation did not converge.')
psd[i, :] = psd_iter
if return_weights:
weights[i, :, :] = d_k
if return_weights:
return psd, weights
else:
return psd
def _psd_from_mt(x_mt, weights):
"""Compute PSD from tapered spectra.
Parameters
----------
x_mt : array
Tapered spectra
weights : array
Weights used to combine the tapered spectra
Returns
-------
psd : array
The computed PSD
"""
psd = weights * x_mt
psd *= psd.conj()
psd = psd.real.sum(axis=-2)
psd *= 2 / (weights * weights.conj()).real.sum(axis=-2)
return psd
def _csd_from_mt(x_mt, y_mt, weights_x, weights_y):
"""Compute CSD from tapered spectra.
Parameters
----------
x_mt : array
Tapered spectra for x
y_mt : array
Tapered spectra for y
weights_x : array
Weights used to combine the tapered spectra of x_mt
weights_y : array
Weights used to combine the tapered spectra of y_mt
Returns
-------
psd: array
The computed PSD
"""
csd = np.sum(weights_x * x_mt * (weights_y * y_mt).conj(), axis=-2)
denom = (np.sqrt((weights_x * weights_x.conj()).real.sum(axis=-2)) *
np.sqrt((weights_y * weights_y.conj()).real.sum(axis=-2)))
csd *= 2 / denom
return csd
def _mt_spectra(x, dpss, sfreq, n_fft=None):
"""Compute tapered spectra.
Parameters
----------
x : array, shape=(..., n_times)
Input signal
dpss : array, shape=(n_tapers, n_times)
The tapers
sfreq : float
The sampling frequency
n_fft : int | None
Length of the FFT. If None, the number of samples in the input signal
will be used.
Returns
-------
x_mt : array, shape=(..., n_tapers, n_times)
The tapered spectra
freqs : array
The frequency points in Hz of the spectra
"""
if n_fft is None:
n_fft = x.shape[-1]
# remove mean (do not use in-place subtraction as it may modify input x)
x = x - np.mean(x, axis=-1, keepdims=True)
# only keep positive frequencies
freqs = rfftfreq(n_fft, 1. / sfreq)
# The following is equivalent to this, but uses less memory:
# x_mt = fftpack.fft(x[:, np.newaxis, :] * dpss, n=n_fft)
n_tapers = dpss.shape[0] if dpss.ndim > 1 else 1
x_mt = np.zeros(x.shape[:-1] + (n_tapers, len(freqs)),
dtype=np.complex128)
for idx, sig in enumerate(x):
x_mt[idx] = rfft(sig[..., np.newaxis, :] * dpss, n=n_fft)
# Adjust DC and maybe Nyquist, depending on one-sided transform
x_mt[..., 0] /= np.sqrt(2.)
if x.shape[1] % 2 == 0:
x_mt[..., -1] /= np.sqrt(2.)
return x_mt, freqs
@verbose
def _compute_mt_params(n_times, sfreq, bandwidth, low_bias, adaptive,
interp_from=None, verbose=None):
"""Triage windowing and multitaper parameters."""
# Compute standardized half-bandwidth
from scipy.signal import get_window
if isinstance(bandwidth, str):
logger.info(' Using standard spectrum estimation with "%s" window'
% (bandwidth,))
window_fun = get_window(bandwidth, n_times)[np.newaxis]
return window_fun, np.ones(1), False
if bandwidth is not None:
half_nbw = float(bandwidth) * n_times / (2. * sfreq)
else:
half_nbw = 4.
if half_nbw < 0.5:
raise ValueError(
'bandwidth value %s yields a normalized bandwidth of %s < 0.5, '
'use a value of at least %s'
% (bandwidth, half_nbw, sfreq / n_times))
# Compute DPSS windows
n_tapers_max = int(2 * half_nbw)
window_fun, eigvals = dpss_windows(n_times, half_nbw, n_tapers_max,
low_bias=low_bias,
interp_from=interp_from)
logger.info(' Using multitaper spectrum estimation with %d DPSS '
'windows' % len(eigvals))
if adaptive and len(eigvals) < 3:
warn('Not adaptively combining the spectral estimators due to a '
'low number of tapers (%s < 3).' % (len(eigvals),))
adaptive = False
return window_fun, eigvals, adaptive
@verbose
def psd_array_multitaper(x, sfreq, fmin=0, fmax=np.inf, bandwidth=None,
adaptive=False, low_bias=True, normalization='length',
n_jobs=1, verbose=None):
"""Compute power spectral density (PSD) using a multi-taper method.
Parameters
----------
x : array, shape=(..., n_times)
The data to compute PSD from.
sfreq : float
The sampling frequency.
fmin : float
The lower frequency of interest.
fmax : float
The upper frequency of interest.
bandwidth : float
The bandwidth of the multi taper windowing function in Hz.
adaptive : bool
Use adaptive weights to combine the tapered spectra into PSD
(slow, use n_jobs >> 1 to speed up computation).
low_bias : bool
Only use tapers with more than 90%% spectral concentration within
bandwidth.
normalization : str
Either "full" or "length" (default). If "full", the PSD will
be normalized by the sampling rate as well as the length of
the signal (as in nitime).
%(n_jobs)s
%(verbose)s
Returns
-------
psds : ndarray, shape (..., n_freqs) or
The power spectral densities. All dimensions up to the last will
be the same as input.
freqs : array
The frequency points in Hz of the PSD.
See Also
--------
mne.io.Raw.plot_psd
mne.Epochs.plot_psd
csd_multitaper
psd_multitaper
Notes
-----
.. versionadded:: 0.14.0
"""
_check_option('normalization', normalization, ['length', 'full'])
# Reshape data so its 2-D for parallelization
ndim_in = x.ndim
x = np.atleast_2d(x)
n_times = x.shape[-1]
dshape = x.shape[:-1]
x = x.reshape(-1, n_times)
dpss, eigvals, adaptive = _compute_mt_params(
n_times, sfreq, bandwidth, low_bias, adaptive)
# decide which frequencies to keep
freqs = rfftfreq(n_times, 1. / sfreq)
freq_mask = (freqs >= fmin) & (freqs <= fmax)
freqs = freqs[freq_mask]
psd = np.zeros((x.shape[0], freq_mask.sum()))
# Let's go in up to 50 MB chunks of signals to save memory
n_chunk = max(50000000 // (len(freq_mask) * len(eigvals) * 16), n_jobs)
offsets = np.concatenate((np.arange(0, x.shape[0], n_chunk), [x.shape[0]]))
for start, stop in zip(offsets[:-1], offsets[1:]):
x_mt = _mt_spectra(x[start:stop], dpss, sfreq)[0]
if not adaptive:
weights = np.sqrt(eigvals)[np.newaxis, :, np.newaxis]
psd[start:stop] = _psd_from_mt(x_mt[:, :, freq_mask], weights)
else:
n_splits = min(stop - start, n_jobs)
parallel, my_psd_from_mt_adaptive, n_jobs = \
parallel_func(_psd_from_mt_adaptive, n_splits)
out = parallel(my_psd_from_mt_adaptive(x, eigvals, freq_mask)
for x in np.array_split(x_mt, n_splits))
psd[start:stop] = np.concatenate(out)
if normalization == 'full':
psd /= sfreq
# Combining/reshaping to original data shape
psd.shape = dshape + (-1,)
if ndim_in == 1:
psd = psd[0]
return psd, freqs
@verbose
def tfr_array_multitaper(epoch_data, sfreq, freqs, n_cycles=7.0,
zero_mean=True, time_bandwidth=None, use_fft=True,
decim=1, output='complex', n_jobs=1,
verbose=None):
"""Compute Time-Frequency Representation (TFR) using DPSS tapers.
Same computation as `~mne.time_frequency.tfr_multitaper`, but operates on
:class:`NumPy arrays <numpy.ndarray>` instead of `~mne.Epochs` objects.
Parameters
----------
epoch_data : array of shape (n_epochs, n_channels, n_times)
The epochs.
sfreq : float | int
Sampling frequency of the data.
freqs : array-like of float, shape (n_freqs,)
The frequencies.
n_cycles : float | array of float
Number of cycles in the wavelet. Fixed number or one per
frequency. Defaults to 7.0.
zero_mean : bool
If True, make sure the wavelets have a mean of zero. Defaults to True.
time_bandwidth : float
If None, will be set to 4.0 (3 tapers). Time x (Full) Bandwidth
product. The number of good tapers (low-bias) is chosen automatically
based on this to equal floor(time_bandwidth - 1). Defaults to None.
use_fft : bool
Use the FFT for convolutions or not. Defaults to True.
decim : int | slice
To reduce memory usage, decimation factor after time-frequency
decomposition. Defaults to 1.
If `int`, returns tfr[..., ::decim].
If `slice`, returns tfr[..., decim].
.. note::
Decimation may create aliasing artifacts, yet decimation
is done after the convolutions.
output : str, default 'complex'
* 'complex' : single trial complex.
* 'power' : single trial power.
* 'phase' : single trial phase.
* 'avg_power' : average of single trial power.
* 'itc' : inter-trial coherence.
* 'avg_power_itc' : average of single trial power and inter-trial
coherence across trials.
%(n_jobs)s
The number of epochs to process at the same time. The parallelization
is implemented across channels. Defaults to 1.
%(verbose)s
Returns
-------
out : array
Time frequency transform of epoch_data. If output is in ['complex',
'phase', 'power'], then shape of out is (n_epochs, n_chans, n_freqs,
n_times), else it is (n_chans, n_freqs, n_times). If output is
'avg_power_itc', the real values code for 'avg_power' and the
imaginary values code for the 'itc': out = avg_power + i * itc.
See Also
--------
mne.time_frequency.tfr_multitaper
mne.time_frequency.tfr_morlet
mne.time_frequency.tfr_array_morlet
mne.time_frequency.tfr_stockwell
mne.time_frequency.tfr_array_stockwell
Notes
-----
.. versionadded:: 0.14.0
"""
from .tfr import _compute_tfr
return _compute_tfr(epoch_data, freqs, sfreq=sfreq,
method='multitaper', n_cycles=n_cycles,
zero_mean=zero_mean, time_bandwidth=time_bandwidth,
use_fft=use_fft, decim=decim, output=output,
n_jobs=n_jobs, verbose=verbose)
|
from flexx.util.testing import run_tests_if_main, raises
import time
import asyncio
import threading
from flexx import event
def test_in_thread2():
""" Test running a Component object in another thread.
"""
res = []
class MyComp1(event.Component):
foo = event.IntProp(0, settable=True)
@event.reaction('foo')
def on_foo(self, *events):
for ev in events:
res.append(ev.new_value)
def main():
# Create fresh ioloop and make flexx use it
# event.loop.reset()
loop = asyncio.new_event_loop()
event.loop.integrate(loop, reset=True)
# Create component and manipulate prop
component = MyComp1()
component.set_foo(3)
component.set_foo(4)
# Run mainloop for one iterartion
loop.call_later(0.2, loop.stop)
loop.run_forever()
t = threading.Thread(target=main)
t.start()
t.join()
event.loop.integrate(reset=True) # restore
assert res == [0, 3, 4]
def test_in_thread3():
""" Test hotswapping the loop to another thread.
"""
res = []
class MyComp1(event.Component):
foo = event.IntProp(0, settable=True)
@event.reaction('foo')
def on_foo(self, *events):
for ev in events:
res.append(ev.new_value)
def main():
# Create fresh ioloop and make flexx use it
# event.loop.reset()
loop = asyncio.new_event_loop()
event.loop.integrate(loop, reset=False) # no reset!
# Run mainloop for one iterartion
loop.call_later(0.2, loop.stop)
loop.run_forever()
# Create component and manipulate prop
event.loop.reset()
component = MyComp1()
component.set_foo(3)
component.set_foo(4)
t = threading.Thread(target=main)
t.start()
t.join()
event.loop.integrate(reset=True) # restore
assert res == [0, 3, 4]
def test_in_thread4():
""" Test invoking actions from another thread.
"""
res = []
class MyComp1(event.Component):
foo = event.IntProp(0, settable=True)
@event.reaction('foo')
def on_foo(self, *events):
for ev in events:
res.append(ev.new_value)
def main():
# Create fresh ioloop and make flexx use it
# event.loop.reset()
loop = asyncio.new_event_loop()
event.loop.integrate(loop, reset=False) # no reset!
# set foo
component.set_foo(3)
# Run mainloop for one iterartion
loop.call_later(0.4, loop.stop)
loop.run_forever()
# Create component and manipulate prop
event.loop.reset()
component = MyComp1()
t = threading.Thread(target=main)
t.start()
time.sleep(0.2)
component.set_foo(4) # invoke from main thread
t.join()
event.loop.integrate(reset=True) # restore
assert res == [0, 3, 4]
run_tests_if_main()
|
import asyncio
from base64 import b64encode
from collections import defaultdict
from datetime import timedelta
from itertools import product
import logging
from broadlink.exceptions import (
AuthorizationError,
BroadlinkException,
NetworkTimeoutError,
ReadError,
StorageError,
)
import voluptuous as vol
from homeassistant.components.remote import (
ATTR_ALTERNATIVE,
ATTR_COMMAND,
ATTR_DELAY_SECS,
ATTR_DEVICE,
ATTR_NUM_REPEATS,
DEFAULT_DELAY_SECS,
PLATFORM_SCHEMA,
SUPPORT_LEARN_COMMAND,
RemoteEntity,
)
from homeassistant.const import CONF_HOST, STATE_OFF
from homeassistant.core import callback
from homeassistant.exceptions import HomeAssistantError
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.restore_state import RestoreEntity
from homeassistant.helpers.storage import Store
from homeassistant.util.dt import utcnow
from .const import DOMAIN
from .helpers import data_packet, import_device
_LOGGER = logging.getLogger(__name__)
LEARNING_TIMEOUT = timedelta(seconds=30)
CODE_STORAGE_VERSION = 1
FLAG_STORAGE_VERSION = 1
FLAG_SAVE_DELAY = 15
COMMAND_SCHEMA = vol.Schema(
{
vol.Required(ATTR_COMMAND): vol.All(
cv.ensure_list, [vol.All(cv.string, vol.Length(min=1))], vol.Length(min=1)
),
},
extra=vol.ALLOW_EXTRA,
)
SERVICE_SEND_SCHEMA = COMMAND_SCHEMA.extend(
{
vol.Optional(ATTR_DEVICE): vol.All(cv.string, vol.Length(min=1)),
vol.Optional(ATTR_DELAY_SECS, default=DEFAULT_DELAY_SECS): vol.Coerce(float),
}
)
SERVICE_LEARN_SCHEMA = COMMAND_SCHEMA.extend(
{
vol.Required(ATTR_DEVICE): vol.All(cv.string, vol.Length(min=1)),
vol.Optional(ATTR_ALTERNATIVE, default=False): cv.boolean,
}
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Required(CONF_HOST): cv.string}, extra=vol.ALLOW_EXTRA
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Import the device and discontinue platform.
This is for backward compatibility.
Do not use this method.
"""
import_device(hass, config[CONF_HOST])
_LOGGER.warning(
"The remote platform is deprecated, please remove it from your configuration"
)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up a Broadlink remote."""
device = hass.data[DOMAIN].devices[config_entry.entry_id]
remote = BroadlinkRemote(
device,
Store(hass, CODE_STORAGE_VERSION, f"broadlink_remote_{device.unique_id}_codes"),
Store(hass, FLAG_STORAGE_VERSION, f"broadlink_remote_{device.unique_id}_flags"),
)
loaded = await remote.async_load_storage_files()
if not loaded:
_LOGGER.error("Failed to create '%s Remote' entity: Storage error", device.name)
return
async_add_entities([remote], False)
class BroadlinkRemote(RemoteEntity, RestoreEntity):
"""Representation of a Broadlink remote."""
def __init__(self, device, codes, flags):
"""Initialize the remote."""
self._device = device
self._coordinator = device.update_manager.coordinator
self._code_storage = codes
self._flag_storage = flags
self._codes = {}
self._flags = defaultdict(int)
self._state = True
@property
def name(self):
"""Return the name of the remote."""
return f"{self._device.name} Remote"
@property
def unique_id(self):
"""Return the unique id of the remote."""
return self._device.unique_id
@property
def is_on(self):
"""Return True if the remote is on."""
return self._state
@property
def available(self):
"""Return True if the remote is available."""
return self._device.update_manager.available
@property
def should_poll(self):
"""Return True if the remote has to be polled for state."""
return False
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_LEARN_COMMAND
@property
def device_info(self):
"""Return device info."""
return {
"identifiers": {(DOMAIN, self._device.unique_id)},
"manufacturer": self._device.api.manufacturer,
"model": self._device.api.model,
"name": self._device.name,
"sw_version": self._device.fw_version,
}
def get_code(self, command, device):
"""Return a code and a boolean indicating a toggle command.
If the command starts with `b64:`, extract the code from it.
Otherwise, extract the code from the dictionary, using the device
and command as keys.
You need to change the flag whenever a toggle command is sent
successfully. Use `self._flags[device] ^= 1`.
"""
if command.startswith("b64:"):
code, is_toggle_cmd = command[4:], False
else:
if device is None:
raise KeyError("You need to specify a device")
try:
code = self._codes[device][command]
except KeyError as err:
raise KeyError("Command not found") from err
# For toggle commands, alternate between codes in a list.
if isinstance(code, list):
code = code[self._flags[device]]
is_toggle_cmd = True
else:
is_toggle_cmd = False
try:
return data_packet(code), is_toggle_cmd
except ValueError as err:
raise ValueError("Invalid code") from err
@callback
def get_flags(self):
"""Return a dictionary of toggle flags.
A toggle flag indicates whether the remote should send an
alternative code.
"""
return self._flags
async def async_added_to_hass(self):
"""Call when the remote is added to hass."""
state = await self.async_get_last_state()
self._state = state is None or state.state != STATE_OFF
self.async_on_remove(
self._coordinator.async_add_listener(self.async_write_ha_state)
)
async def async_update(self):
"""Update the remote."""
await self._coordinator.async_request_refresh()
async def async_turn_on(self, **kwargs):
"""Turn on the remote."""
self._state = True
self.async_write_ha_state()
async def async_turn_off(self, **kwargs):
"""Turn off the remote."""
self._state = False
self.async_write_ha_state()
async def async_load_storage_files(self):
"""Load codes and toggle flags from storage files."""
try:
self._codes.update(await self._code_storage.async_load() or {})
self._flags.update(await self._flag_storage.async_load() or {})
except HomeAssistantError:
return False
return True
async def async_send_command(self, command, **kwargs):
"""Send a list of commands to a device."""
kwargs[ATTR_COMMAND] = command
kwargs = SERVICE_SEND_SCHEMA(kwargs)
commands = kwargs[ATTR_COMMAND]
device = kwargs.get(ATTR_DEVICE)
repeat = kwargs[ATTR_NUM_REPEATS]
delay = kwargs[ATTR_DELAY_SECS]
if not self._state:
_LOGGER.warning(
"remote.send_command canceled: %s entity is turned off", self.entity_id
)
return
should_delay = False
for _, cmd in product(range(repeat), commands):
if should_delay:
await asyncio.sleep(delay)
try:
code, is_toggle_cmd = self.get_code(cmd, device)
except (KeyError, ValueError) as err:
_LOGGER.error("Failed to send '%s': %s", cmd, err)
should_delay = False
continue
try:
await self._device.async_request(self._device.api.send_data, code)
except (AuthorizationError, NetworkTimeoutError, OSError) as err:
_LOGGER.error("Failed to send '%s': %s", command, err)
break
except BroadlinkException as err:
_LOGGER.error("Failed to send '%s': %s", command, err)
should_delay = False
continue
should_delay = True
if is_toggle_cmd:
self._flags[device] ^= 1
self._flag_storage.async_delay_save(self.get_flags, FLAG_SAVE_DELAY)
async def async_learn_command(self, **kwargs):
"""Learn a list of commands from a remote."""
kwargs = SERVICE_LEARN_SCHEMA(kwargs)
commands = kwargs[ATTR_COMMAND]
device = kwargs[ATTR_DEVICE]
toggle = kwargs[ATTR_ALTERNATIVE]
if not self._state:
_LOGGER.warning(
"remote.learn_command canceled: %s entity is turned off", self.entity_id
)
return
should_store = False
for command in commands:
try:
code = await self._async_learn_command(command)
if toggle:
code = [code, await self._async_learn_command(command)]
except (AuthorizationError, NetworkTimeoutError, OSError) as err:
_LOGGER.error("Failed to learn '%s': %s", command, err)
break
except BroadlinkException as err:
_LOGGER.error("Failed to learn '%s': %s", command, err)
continue
self._codes.setdefault(device, {}).update({command: code})
should_store = True
if should_store:
await self._code_storage.async_save(self._codes)
async def _async_learn_command(self, command):
"""Learn a command from a remote."""
try:
await self._device.async_request(self._device.api.enter_learning)
except (BroadlinkException, OSError) as err:
_LOGGER.debug("Failed to enter learning mode: %s", err)
raise
self.hass.components.persistent_notification.async_create(
f"Press the '{command}' button.",
title="Learn command",
notification_id="learn_command",
)
try:
start_time = utcnow()
while (utcnow() - start_time) < LEARNING_TIMEOUT:
await asyncio.sleep(1)
try:
code = await self._device.async_request(self._device.api.check_data)
except (ReadError, StorageError):
continue
return b64encode(code).decode("utf8")
raise TimeoutError("No code received")
finally:
self.hass.components.persistent_notification.async_dismiss(
notification_id="learn_command"
)
|
from absl import flags
from perfkitbenchmarker import configs
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.linux_packages import ch_block_storage
BENCHMARK_NAME = 'ch_block_storage'
BENCHMARK_CONFIG = """
ch_block_storage:
description: Runs cloudharmony block storage tests.
vm_groups:
default:
vm_spec: *default_single_core
disk_spec: *default_500_gb
"""
flags.DEFINE_multi_enum(
'ch_block_tests', ['iops'],
['iops', 'throughput', 'latency', 'wsat', 'hir'],
'A list of tests supported by CloudHarmony block storage benchmark.')
FLAGS = flags.FLAGS
def GetConfig(user_config):
config = configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
disk_spec = config['vm_groups']['default']['disk_spec']
# Use raw device
# TODO(yuyanting): Support filesystem?
for cloud in disk_spec:
disk_spec[cloud]['mount_point'] = None
return config
def Prepare(benchmark_spec):
"""Prepares the cloudharmony block storage benchmark."""
vm = benchmark_spec.vms[0]
vm.Install('ch_block_storage')
def _PrepareDevicePath(vm, link):
"""Find device path and grant full permission.
Args:
vm: VirtualMachine object.
link: string. Represents device path.
Returns:
String represents the actual path to the device.
"""
path = vm.RemoteCommand('readlink -f %s' % link)[0][:-1]
vm.RemoteCommand('sudo chmod 777 %s' % path)
return path
def _LocateFioJson(vm, outdir, test):
"""Locate raw fio output.
Args:
vm: VirtualMachine object.
outdir: string. Output directory.
test: string. Test name.
Returns:
A list of strings representing fio output filename.
"""
fns, _ = vm.RemoteCommand('ls %s/fio-%s*.json' % (outdir, test))
return fns.split()
def Run(benchmark_spec):
"""Runs cloudharmony block storage and reports the results."""
vm = benchmark_spec.vms[0]
target = ' '.join(['--target=%s' % _PrepareDevicePath(vm, dev.GetDevicePath())
for dev in vm.scratch_disks])
tests = ' '.join(['--test=%s' % test for test in FLAGS.ch_block_tests])
args = ' '.join(['--%s' % param for param in FLAGS.ch_params])
outdir = vm_util.VM_TMP_DIR
cmd = ('{benchmark_dir}/run.sh '
'{target} {tests} '
'--output={outdir} --noreport {args} --verbose').format(
benchmark_dir=ch_block_storage.INSTALL_PATH,
target=target, outdir=outdir,
tests=tests, args=args)
vm.RobustRemoteCommand('sudo %s' % cmd)
results = []
for test in FLAGS.ch_block_tests:
metadata = {'ch_test': test}
result_json, _ = vm.RemoteCommand('cat %s/%s.json' % (outdir, test))
fns = _LocateFioJson(vm, outdir, test)
fio_json_list = [
vm.RemoteCommand('cat %s' % fn)[0] for fn in fns]
tmp_results = ch_block_storage.ParseOutput(result_json, fio_json_list)
for r in tmp_results:
r.metadata.update(metadata)
results += tmp_results
return results
def Cleanup(benchmark_spec):
vm = benchmark_spec.vms[0]
vm.RemoteCommand('rm -rf %s' % ch_block_storage.INSTALL_PATH)
|
import argparse
import itertools
from paasta_tools.marathon_tools import get_list_of_marathon_clients
def parse_args():
parser = argparse.ArgumentParser(
description=(
"Helper script to get the number of in-progress marathon deployments"
)
)
return parser.parse_args()
def get_deployments():
clients = get_list_of_marathon_clients()
return [
deployment
for deployment in itertools.chain.from_iterable(
c.list_deployments() for c in clients
)
]
def main():
parse_args()
print(len(get_deployments()))
if __name__ == "__main__":
main()
|
import copy
import logging
import socket
from typing import Any, Dict, Optional
from pyvizio import VizioAsync, async_guess_device_type
from pyvizio.const import APP_HOME
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.components.media_player import DEVICE_CLASS_SPEAKER, DEVICE_CLASS_TV
from homeassistant.config_entries import (
SOURCE_IGNORE,
SOURCE_IMPORT,
SOURCE_ZEROCONF,
ConfigEntry,
)
from homeassistant.const import (
CONF_ACCESS_TOKEN,
CONF_DEVICE_CLASS,
CONF_EXCLUDE,
CONF_HOST,
CONF_INCLUDE,
CONF_NAME,
CONF_PIN,
CONF_PORT,
CONF_TYPE,
)
from homeassistant.core import callback
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.typing import DiscoveryInfoType
from homeassistant.util.network import is_ip_address
from .const import (
CONF_APPS,
CONF_APPS_TO_INCLUDE_OR_EXCLUDE,
CONF_INCLUDE_OR_EXCLUDE,
CONF_VOLUME_STEP,
DEFAULT_DEVICE_CLASS,
DEFAULT_NAME,
DEFAULT_VOLUME_STEP,
DEVICE_ID,
DOMAIN,
)
_LOGGER = logging.getLogger(__name__)
def _get_config_schema(input_dict: Dict[str, Any] = None) -> vol.Schema:
"""
Return schema defaults for init step based on user input/config dict.
Retain info already provided for future form views by setting them
as defaults in schema.
"""
if input_dict is None:
input_dict = {}
return vol.Schema(
{
vol.Required(
CONF_NAME, default=input_dict.get(CONF_NAME, DEFAULT_NAME)
): str,
vol.Required(CONF_HOST, default=input_dict.get(CONF_HOST)): str,
vol.Required(
CONF_DEVICE_CLASS,
default=input_dict.get(CONF_DEVICE_CLASS, DEFAULT_DEVICE_CLASS),
): vol.All(str, vol.Lower, vol.In([DEVICE_CLASS_TV, DEVICE_CLASS_SPEAKER])),
vol.Optional(
CONF_ACCESS_TOKEN, default=input_dict.get(CONF_ACCESS_TOKEN, "")
): str,
},
extra=vol.REMOVE_EXTRA,
)
def _get_pairing_schema(input_dict: Dict[str, Any] = None) -> vol.Schema:
"""
Return schema defaults for pairing data based on user input.
Retain info already provided for future form views by setting
them as defaults in schema.
"""
if input_dict is None:
input_dict = {}
return vol.Schema(
{vol.Required(CONF_PIN, default=input_dict.get(CONF_PIN, "")): str}
)
def _host_is_same(host1: str, host2: str) -> bool:
"""Check if host1 and host2 are the same."""
host1 = host1.split(":")[0]
host1 = host1 if is_ip_address(host1) else socket.gethostbyname(host1)
host2 = host2.split(":")[0]
host2 = host2 if is_ip_address(host2) else socket.gethostbyname(host2)
return host1 == host2
class VizioOptionsConfigFlow(config_entries.OptionsFlow):
"""Handle Vizio options."""
def __init__(self, config_entry: ConfigEntry) -> None:
"""Initialize vizio options flow."""
self.config_entry = config_entry
async def async_step_init(
self, user_input: Dict[str, Any] = None
) -> Dict[str, Any]:
"""Manage the vizio options."""
if user_input is not None:
if user_input.get(CONF_APPS_TO_INCLUDE_OR_EXCLUDE):
user_input[CONF_APPS] = {
user_input[CONF_INCLUDE_OR_EXCLUDE]: user_input[
CONF_APPS_TO_INCLUDE_OR_EXCLUDE
].copy()
}
user_input.pop(CONF_INCLUDE_OR_EXCLUDE)
user_input.pop(CONF_APPS_TO_INCLUDE_OR_EXCLUDE)
return self.async_create_entry(title="", data=user_input)
options = vol.Schema(
{
vol.Optional(
CONF_VOLUME_STEP,
default=self.config_entry.options.get(
CONF_VOLUME_STEP, DEFAULT_VOLUME_STEP
),
): vol.All(vol.Coerce(int), vol.Range(min=1, max=10))
}
)
if self.config_entry.data[CONF_DEVICE_CLASS] == DEVICE_CLASS_TV:
default_include_or_exclude = (
CONF_EXCLUDE
if self.config_entry.options
and CONF_EXCLUDE in self.config_entry.options.get(CONF_APPS, {})
else CONF_INCLUDE
)
options = options.extend(
{
vol.Optional(
CONF_INCLUDE_OR_EXCLUDE,
default=default_include_or_exclude.title(),
): vol.All(
vol.In([CONF_INCLUDE.title(), CONF_EXCLUDE.title()]), vol.Lower
),
vol.Optional(
CONF_APPS_TO_INCLUDE_OR_EXCLUDE,
default=self.config_entry.options.get(CONF_APPS, {}).get(
default_include_or_exclude, []
),
): cv.multi_select(
[
APP_HOME["name"],
*[
app["name"]
for app in self.hass.data[DOMAIN][CONF_APPS].data
],
]
),
}
)
return self.async_show_form(step_id="init", data_schema=options)
class VizioConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a Vizio config flow."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_POLL
@staticmethod
@callback
def async_get_options_flow(config_entry: ConfigEntry) -> VizioOptionsConfigFlow:
"""Get the options flow for this handler."""
return VizioOptionsConfigFlow(config_entry)
def __init__(self) -> None:
"""Initialize config flow."""
self._user_schema = None
self._must_show_form = None
self._ch_type = None
self._pairing_token = None
self._data = None
self._apps = {}
async def _create_entry(self, input_dict: Dict[str, Any]) -> Dict[str, Any]:
"""Create vizio config entry."""
# Remove extra keys that will not be used by entry setup
input_dict.pop(CONF_APPS_TO_INCLUDE_OR_EXCLUDE, None)
input_dict.pop(CONF_INCLUDE_OR_EXCLUDE, None)
if self._apps:
input_dict[CONF_APPS] = self._apps
return self.async_create_entry(title=input_dict[CONF_NAME], data=input_dict)
async def async_step_user(
self, user_input: Dict[str, Any] = None
) -> Dict[str, Any]:
"""Handle a flow initialized by the user."""
assert self.hass
errors = {}
if user_input is not None:
# Store current values in case setup fails and user needs to edit
self._user_schema = _get_config_schema(user_input)
if self.unique_id is None:
unique_id = await VizioAsync.get_unique_id(
user_input[CONF_HOST],
user_input[CONF_DEVICE_CLASS],
session=async_get_clientsession(self.hass, False),
)
# Check if unique ID was found, set unique ID, and abort if a flow with
# the same unique ID is already in progress
if not unique_id:
errors[CONF_HOST] = "cannot_connect"
elif (
await self.async_set_unique_id(
unique_id=unique_id, raise_on_progress=True
)
is not None
):
errors[CONF_HOST] = "existing_config_entry_found"
if not errors:
# pylint: disable=no-member # https://github.com/PyCQA/pylint/issues/3167
if self._must_show_form and self.context["source"] == SOURCE_ZEROCONF:
# Discovery should always display the config form before trying to
# create entry so that user can update default config options
self._must_show_form = False
elif user_input[
CONF_DEVICE_CLASS
] == DEVICE_CLASS_SPEAKER or user_input.get(CONF_ACCESS_TOKEN):
# Ensure config is valid for a device
if not await VizioAsync.validate_ha_config(
user_input[CONF_HOST],
user_input.get(CONF_ACCESS_TOKEN),
user_input[CONF_DEVICE_CLASS],
session=async_get_clientsession(self.hass, False),
):
errors["base"] = "cannot_connect"
if not errors:
return await self._create_entry(user_input)
# pylint: disable=no-member # https://github.com/PyCQA/pylint/issues/3167
elif self._must_show_form and self.context["source"] == SOURCE_IMPORT:
# Import should always display the config form if CONF_ACCESS_TOKEN
# wasn't included but is needed so that the user can choose to update
# their configuration.yaml or to proceed with config flow pairing. We
# will also provide contextual message to user explaining why
_LOGGER.warning(
"Couldn't complete configuration.yaml import: '%s' key is "
"missing. Either provide '%s' key in configuration.yaml or "
"finish setup by completing configuration via frontend",
CONF_ACCESS_TOKEN,
CONF_ACCESS_TOKEN,
)
self._must_show_form = False
else:
self._data = copy.deepcopy(user_input)
return await self.async_step_pair_tv()
schema = self._user_schema or _get_config_schema()
# pylint: disable=no-member # https://github.com/PyCQA/pylint/issues/3167
if errors and self.context["source"] == SOURCE_IMPORT:
# Log an error message if import config flow fails since otherwise failure is silent
_LOGGER.error(
"configuration.yaml import failure: %s", ", ".join(errors.values())
)
return self.async_show_form(step_id="user", data_schema=schema, errors=errors)
async def async_step_import(self, import_config: Dict[str, Any]) -> Dict[str, Any]:
"""Import a config entry from configuration.yaml."""
# Check if new config entry matches any existing config entries
for entry in self.hass.config_entries.async_entries(DOMAIN):
# If source is ignore bypass host check and continue through loop
if entry.source == SOURCE_IGNORE:
continue
if await self.hass.async_add_executor_job(
_host_is_same, entry.data[CONF_HOST], import_config[CONF_HOST]
):
updated_options = {}
updated_data = {}
remove_apps = False
if entry.data[CONF_HOST] != import_config[CONF_HOST]:
updated_data[CONF_HOST] = import_config[CONF_HOST]
if entry.data[CONF_NAME] != import_config[CONF_NAME]:
updated_data[CONF_NAME] = import_config[CONF_NAME]
# Update entry.data[CONF_APPS] if import_config[CONF_APPS] differs, and
# pop entry.data[CONF_APPS] if import_config[CONF_APPS] is not specified
if entry.data.get(CONF_APPS) != import_config.get(CONF_APPS):
if not import_config.get(CONF_APPS):
remove_apps = True
else:
updated_options[CONF_APPS] = import_config[CONF_APPS]
if entry.data.get(CONF_VOLUME_STEP) != import_config[CONF_VOLUME_STEP]:
updated_options[CONF_VOLUME_STEP] = import_config[CONF_VOLUME_STEP]
if updated_options or updated_data or remove_apps:
new_data = entry.data.copy()
new_options = entry.options.copy()
if remove_apps:
new_data.pop(CONF_APPS)
new_options.pop(CONF_APPS)
if updated_data:
new_data.update(updated_data)
# options are stored in entry options and data so update both
if updated_options:
new_data.update(updated_options)
new_options.update(updated_options)
self.hass.config_entries.async_update_entry(
entry=entry, data=new_data, options=new_options
)
return self.async_abort(reason="updated_entry")
return self.async_abort(reason="already_configured_device")
self._must_show_form = True
# Store config key/value pairs that are not configurable in user step so they
# don't get lost on user step
if import_config.get(CONF_APPS):
self._apps = copy.deepcopy(import_config[CONF_APPS])
return await self.async_step_user(user_input=import_config)
async def async_step_zeroconf(
self, discovery_info: Optional[DiscoveryInfoType] = None
) -> Dict[str, Any]:
"""Handle zeroconf discovery."""
assert self.hass
discovery_info[
CONF_HOST
] = f"{discovery_info[CONF_HOST]}:{discovery_info[CONF_PORT]}"
# Set default name to discovered device name by stripping zeroconf service
# (`type`) from `name`
num_chars_to_strip = len(discovery_info[CONF_TYPE]) + 1
discovery_info[CONF_NAME] = discovery_info[CONF_NAME][:-num_chars_to_strip]
discovery_info[CONF_DEVICE_CLASS] = await async_guess_device_type(
discovery_info[CONF_HOST]
)
# Set unique ID early for discovery flow so we can abort if needed
unique_id = await VizioAsync.get_unique_id(
discovery_info[CONF_HOST],
discovery_info[CONF_DEVICE_CLASS],
session=async_get_clientsession(self.hass, False),
)
await self.async_set_unique_id(unique_id=unique_id, raise_on_progress=True)
self._abort_if_unique_id_configured()
# Form must be shown after discovery so user can confirm/update configuration
# before ConfigEntry creation.
self._must_show_form = True
return await self.async_step_user(user_input=discovery_info)
async def async_step_pair_tv(
self, user_input: Dict[str, Any] = None
) -> Dict[str, Any]:
"""
Start pairing process for TV.
Ask user for PIN to complete pairing process.
"""
errors = {}
# Start pairing process if it hasn't already started
if not self._ch_type and not self._pairing_token:
dev = VizioAsync(
DEVICE_ID,
self._data[CONF_HOST],
self._data[CONF_NAME],
None,
self._data[CONF_DEVICE_CLASS],
session=async_get_clientsession(self.hass, False),
)
pair_data = await dev.start_pair()
if pair_data:
self._ch_type = pair_data.ch_type
self._pairing_token = pair_data.token
return await self.async_step_pair_tv()
return self.async_show_form(
step_id="user",
data_schema=_get_config_schema(self._data),
errors={"base": "cannot_connect"},
)
# Complete pairing process if PIN has been provided
if user_input and user_input.get(CONF_PIN):
dev = VizioAsync(
DEVICE_ID,
self._data[CONF_HOST],
self._data[CONF_NAME],
None,
self._data[CONF_DEVICE_CLASS],
session=async_get_clientsession(self.hass, False),
)
pair_data = await dev.pair(
self._ch_type, self._pairing_token, user_input[CONF_PIN]
)
if pair_data:
self._data[CONF_ACCESS_TOKEN] = pair_data.auth_token
self._must_show_form = True
# pylint: disable=no-member # https://github.com/PyCQA/pylint/issues/3167
if self.context["source"] == SOURCE_IMPORT:
# If user is pairing via config import, show different message
return await self.async_step_pairing_complete_import()
return await self.async_step_pairing_complete()
# If no data was retrieved, it's assumed that the pairing attempt was not
# successful
errors[CONF_PIN] = "complete_pairing_failed"
return self.async_show_form(
step_id="pair_tv",
data_schema=_get_pairing_schema(user_input),
errors=errors,
)
async def _pairing_complete(self, step_id: str) -> Dict[str, Any]:
"""Handle config flow completion."""
if not self._must_show_form:
return await self._create_entry(self._data)
self._must_show_form = False
return self.async_show_form(
step_id=step_id,
data_schema=vol.Schema({}),
description_placeholders={"access_token": self._data[CONF_ACCESS_TOKEN]},
)
async def async_step_pairing_complete(
self, user_input: Dict[str, Any] = None
) -> Dict[str, Any]:
"""
Complete non-import sourced config flow.
Display final message to user confirming pairing.
"""
return await self._pairing_complete("pairing_complete")
async def async_step_pairing_complete_import(
self, user_input: Dict[str, Any] = None
) -> Dict[str, Any]:
"""
Complete import sourced config flow.
Display final message to user confirming pairing and displaying
access token.
"""
return await self._pairing_complete("pairing_complete_import")
|
import pytest
from homeassistant.components.config import device_registry
from tests.common import mock_device_registry
@pytest.fixture
def client(hass, hass_ws_client):
"""Fixture that can interact with the config manager API."""
hass.loop.run_until_complete(device_registry.async_setup(hass))
yield hass.loop.run_until_complete(hass_ws_client(hass))
@pytest.fixture
def registry(hass):
"""Return an empty, loaded, registry."""
return mock_device_registry(hass)
async def test_list_devices(hass, client, registry):
"""Test list entries."""
registry.async_get_or_create(
config_entry_id="1234",
connections={("ethernet", "12:34:56:78:90:AB:CD:EF")},
identifiers={("bridgeid", "0123")},
manufacturer="manufacturer",
model="model",
)
registry.async_get_or_create(
config_entry_id="1234",
identifiers={("bridgeid", "1234")},
manufacturer="manufacturer",
model="model",
via_device=("bridgeid", "0123"),
entry_type="service",
)
await client.send_json({"id": 5, "type": "config/device_registry/list"})
msg = await client.receive_json()
dev1, dev2 = [entry.pop("id") for entry in msg["result"]]
assert msg["result"] == [
{
"config_entries": ["1234"],
"connections": [["ethernet", "12:34:56:78:90:AB:CD:EF"]],
"identifiers": [["bridgeid", "0123"]],
"manufacturer": "manufacturer",
"model": "model",
"name": None,
"sw_version": None,
"entry_type": None,
"via_device_id": None,
"area_id": None,
"name_by_user": None,
},
{
"config_entries": ["1234"],
"connections": [],
"identifiers": [["bridgeid", "1234"]],
"manufacturer": "manufacturer",
"model": "model",
"name": None,
"sw_version": None,
"entry_type": "service",
"via_device_id": dev1,
"area_id": None,
"name_by_user": None,
},
]
async def test_update_device(hass, client, registry):
"""Test update entry."""
device = registry.async_get_or_create(
config_entry_id="1234",
connections={("ethernet", "12:34:56:78:90:AB:CD:EF")},
identifiers={("bridgeid", "0123")},
manufacturer="manufacturer",
model="model",
)
assert not device.area_id
assert not device.name_by_user
await client.send_json(
{
"id": 1,
"device_id": device.id,
"area_id": "12345A",
"name_by_user": "Test Friendly Name",
"type": "config/device_registry/update",
}
)
msg = await client.receive_json()
assert msg["result"]["id"] == device.id
assert msg["result"]["area_id"] == "12345A"
assert msg["result"]["name_by_user"] == "Test Friendly Name"
assert len(registry.devices) == 1
|
import os
import mock
import redis
from scrapy import Request, Spider
from scrapy.settings import Settings
from scrapy.utils.test import get_crawler
from unittest import TestCase
from scrapy_redis import connection
from scrapy_redis.dupefilter import RFPDupeFilter
from scrapy_redis.queue import FifoQueue, LifoQueue, PriorityQueue
from scrapy_redis.scheduler import Scheduler
# allow test settings from environment
REDIS_HOST = os.environ.get('REDIST_HOST', 'localhost')
REDIS_PORT = int(os.environ.get('REDIS_PORT', 6379))
def get_spider(*args, **kwargs):
crawler = get_crawler(spidercls=kwargs.pop('spidercls', None),
settings_dict=kwargs.pop('settings_dict', None))
return crawler._create_spider(*args, **kwargs)
class RedisTestMixin(object):
@property
def server(self):
if not hasattr(self, '_redis'):
self._redis = redis.Redis(REDIS_HOST, REDIS_PORT)
return self._redis
def clear_keys(self, prefix):
keys = self.server.keys(prefix + '*')
if keys:
self.server.delete(*keys)
class DupeFilterTest(RedisTestMixin, TestCase):
def setUp(self):
self.key = 'scrapy_redis:tests:dupefilter:'
self.df = RFPDupeFilter(self.server, self.key)
def tearDown(self):
self.clear_keys(self.key)
def test_dupe_filter(self):
req = Request('http://example.com')
self.assertFalse(self.df.request_seen(req))
self.assertTrue(self.df.request_seen(req))
self.df.close('nothing')
class QueueTestMixin(RedisTestMixin):
queue_cls = None
def setUp(self):
self.spider = get_spider(name='myspider')
self.key = 'scrapy_redis:tests:%s:queue' % self.spider.name
self.q = self.queue_cls(self.server, Spider('myspider'), self.key)
def tearDown(self):
self.clear_keys(self.key)
def test_clear(self):
self.assertEqual(len(self.q), 0)
for i in range(10):
# XXX: can't use same url for all requests as SpiderPriorityQueue
# uses redis' set implemention and we will end with only one
# request in the set and thus failing the test. It should be noted
# that when using SpiderPriorityQueue it acts as a request
# duplication filter whenever the serielized requests are the same.
# This might be unwanted on repetitive requests to the same page
# even with dont_filter=True flag.
req = Request('http://example.com/?page=%s' % i)
self.q.push(req)
self.assertEqual(len(self.q), 10)
self.q.clear()
self.assertEqual(len(self.q), 0)
class FifoQueueTest(QueueTestMixin, TestCase):
queue_cls = FifoQueue
def test_queue(self):
req1 = Request('http://example.com/page1')
req2 = Request('http://example.com/page2')
self.q.push(req1)
self.q.push(req2)
out1 = self.q.pop()
out2 = self.q.pop(timeout=1)
self.assertEqual(out1.url, req1.url)
self.assertEqual(out2.url, req2.url)
class PriorityQueueTest(QueueTestMixin, TestCase):
queue_cls = PriorityQueue
def test_queue(self):
req1 = Request('http://example.com/page1', priority=100)
req2 = Request('http://example.com/page2', priority=50)
req3 = Request('http://example.com/page2', priority=200)
self.q.push(req1)
self.q.push(req2)
self.q.push(req3)
out1 = self.q.pop()
out2 = self.q.pop(timeout=0)
out3 = self.q.pop(timeout=1)
self.assertEqual(out1.url, req3.url)
self.assertEqual(out2.url, req1.url)
self.assertEqual(out3.url, req2.url)
class LifoQueueTest(QueueTestMixin, TestCase):
queue_cls = LifoQueue
def test_queue(self):
req1 = Request('http://example.com/page1')
req2 = Request('http://example.com/page2')
self.q.push(req1)
self.q.push(req2)
out1 = self.q.pop()
out2 = self.q.pop(timeout=1)
self.assertEqual(out1.url, req2.url)
self.assertEqual(out2.url, req1.url)
class SchedulerTest(RedisTestMixin, TestCase):
def setUp(self):
self.key_prefix = 'scrapy_redis:tests:'
self.queue_key = self.key_prefix + '%(spider)s:requests'
self.dupefilter_key = self.key_prefix + '%(spider)s:dupefilter'
self.spider = get_spider(name='myspider', settings_dict={
'REDIS_HOST': REDIS_HOST,
'REDIS_PORT': REDIS_PORT,
'SCHEDULER_QUEUE_KEY': self.queue_key,
'SCHEDULER_DUPEFILTER_KEY': self.dupefilter_key,
'SCHEDULER_FLUSH_ON_START': False,
'SCHEDULER_PERSIST': False,
'SCHEDULER_SERIALIZER': 'pickle',
'DUPEFILTER_CLASS': 'scrapy_redis.dupefilter.RFPDupeFilter',
})
self.scheduler = Scheduler.from_crawler(self.spider.crawler)
def tearDown(self):
self.clear_keys(self.key_prefix)
def test_scheduler(self):
# default no persist
self.assertFalse(self.scheduler.persist)
self.scheduler.open(self.spider)
self.assertEqual(len(self.scheduler), 0)
req = Request('http://example.com')
self.scheduler.enqueue_request(req)
self.assertTrue(self.scheduler.has_pending_requests())
self.assertEqual(len(self.scheduler), 1)
# dupefilter in action
self.scheduler.enqueue_request(req)
self.assertEqual(len(self.scheduler), 1)
out = self.scheduler.next_request()
self.assertEqual(out.url, req.url)
self.assertFalse(self.scheduler.has_pending_requests())
self.assertEqual(len(self.scheduler), 0)
self.scheduler.close('finish')
def test_scheduler_persistent(self):
# TODO: Improve this test to avoid the need to check for log messages.
self.spider.log = mock.Mock(spec=self.spider.log)
self.scheduler.persist = True
self.scheduler.open(self.spider)
self.assertEqual(self.spider.log.call_count, 0)
self.scheduler.enqueue_request(Request('http://example.com/page1'))
self.scheduler.enqueue_request(Request('http://example.com/page2'))
self.assertTrue(self.scheduler.has_pending_requests())
self.scheduler.close('finish')
self.scheduler.open(self.spider)
self.spider.log.assert_has_calls([
mock.call("Resuming crawl (2 requests scheduled)"),
])
self.assertEqual(len(self.scheduler), 2)
self.scheduler.persist = False
self.scheduler.close('finish')
self.assertEqual(len(self.scheduler), 0)
class ConnectionTest(TestCase):
# We can get a connection from just REDIS_URL.
def test_redis_url(self):
settings = Settings({
'REDIS_URL': 'redis://foo:bar@localhost:9001/42',
})
server = connection.from_settings(settings)
connect_args = server.connection_pool.connection_kwargs
self.assertEqual(connect_args['host'], 'localhost')
self.assertEqual(connect_args['port'], 9001)
self.assertEqual(connect_args['password'], 'bar')
self.assertEqual(connect_args['db'], 42)
# We can get a connection from REDIS_HOST/REDIS_PORT.
def test_redis_host_port(self):
settings = Settings({
'REDIS_HOST': 'localhost',
'REDIS_PORT': 9001,
})
server = connection.from_settings(settings)
connect_args = server.connection_pool.connection_kwargs
self.assertEqual(connect_args['host'], 'localhost')
self.assertEqual(connect_args['port'], 9001)
# REDIS_URL takes precedence over REDIS_HOST/REDIS_PORT.
def test_redis_url_precedence(self):
settings = Settings(dict(
REDIS_HOST='baz',
REDIS_PORT=1337,
REDIS_URL='redis://foo:bar@localhost:9001/42'
))
server = connection.from_settings(settings)
connect_args = server.connection_pool.connection_kwargs
self.assertEqual(connect_args['host'], 'localhost')
self.assertEqual(connect_args['port'], 9001)
self.assertEqual(connect_args['password'], 'bar')
self.assertEqual(connect_args['db'], 42)
# We fallback to REDIS_HOST/REDIS_PORT if REDIS_URL is None.
def test_redis_host_port_fallback(self):
settings = Settings(dict(
REDIS_HOST='baz',
REDIS_PORT=1337,
REDIS_URL=None
))
server = connection.from_settings(settings)
connect_args = server.connection_pool.connection_kwargs
self.assertEqual(connect_args['host'], 'baz')
self.assertEqual(connect_args['port'], 1337)
# We use default values for REDIS_HOST/REDIS_PORT.
def test_redis_default(self):
settings = Settings()
server = connection.from_settings(settings)
connect_args = server.connection_pool.connection_kwargs
self.assertEqual(connect_args['host'], 'localhost')
self.assertEqual(connect_args['port'], 6379)
|
import asyncio
import logging
import aiohttp
import async_timeout
import requests
from requests.auth import HTTPDigestAuth
import voluptuous as vol
from homeassistant.components.camera import (
DEFAULT_CONTENT_TYPE,
PLATFORM_SCHEMA,
SUPPORT_STREAM,
Camera,
)
from homeassistant.const import (
CONF_AUTHENTICATION,
CONF_NAME,
CONF_PASSWORD,
CONF_USERNAME,
CONF_VERIFY_SSL,
HTTP_BASIC_AUTHENTICATION,
HTTP_DIGEST_AUTHENTICATION,
)
from homeassistant.exceptions import TemplateError
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.reload import async_setup_reload_service
from . import DOMAIN, PLATFORMS
_LOGGER = logging.getLogger(__name__)
CONF_CONTENT_TYPE = "content_type"
CONF_LIMIT_REFETCH_TO_URL_CHANGE = "limit_refetch_to_url_change"
CONF_STILL_IMAGE_URL = "still_image_url"
CONF_STREAM_SOURCE = "stream_source"
CONF_FRAMERATE = "framerate"
DEFAULT_NAME = "Generic Camera"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_STILL_IMAGE_URL): cv.template,
vol.Optional(CONF_STREAM_SOURCE): cv.template,
vol.Optional(CONF_AUTHENTICATION, default=HTTP_BASIC_AUTHENTICATION): vol.In(
[HTTP_BASIC_AUTHENTICATION, HTTP_DIGEST_AUTHENTICATION]
),
vol.Optional(CONF_LIMIT_REFETCH_TO_URL_CHANGE, default=False): cv.boolean,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PASSWORD): cv.string,
vol.Optional(CONF_USERNAME): cv.string,
vol.Optional(CONF_CONTENT_TYPE, default=DEFAULT_CONTENT_TYPE): cv.string,
vol.Optional(CONF_FRAMERATE, default=2): vol.Any(
cv.small_float, cv.positive_int
),
vol.Optional(CONF_VERIFY_SSL, default=True): cv.boolean,
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up a generic IP Camera."""
await async_setup_reload_service(hass, DOMAIN, PLATFORMS)
async_add_entities([GenericCamera(hass, config)])
class GenericCamera(Camera):
"""A generic implementation of an IP camera."""
def __init__(self, hass, device_info):
"""Initialize a generic camera."""
super().__init__()
self.hass = hass
self._authentication = device_info.get(CONF_AUTHENTICATION)
self._name = device_info.get(CONF_NAME)
self._still_image_url = device_info[CONF_STILL_IMAGE_URL]
self._stream_source = device_info.get(CONF_STREAM_SOURCE)
self._still_image_url.hass = hass
if self._stream_source is not None:
self._stream_source.hass = hass
self._limit_refetch = device_info[CONF_LIMIT_REFETCH_TO_URL_CHANGE]
self._frame_interval = 1 / device_info[CONF_FRAMERATE]
self._supported_features = SUPPORT_STREAM if self._stream_source else 0
self.content_type = device_info[CONF_CONTENT_TYPE]
self.verify_ssl = device_info[CONF_VERIFY_SSL]
username = device_info.get(CONF_USERNAME)
password = device_info.get(CONF_PASSWORD)
if username and password:
if self._authentication == HTTP_DIGEST_AUTHENTICATION:
self._auth = HTTPDigestAuth(username, password)
else:
self._auth = aiohttp.BasicAuth(username, password=password)
else:
self._auth = None
self._last_url = None
self._last_image = None
@property
def supported_features(self):
"""Return supported features for this camera."""
return self._supported_features
@property
def frame_interval(self):
"""Return the interval between frames of the mjpeg stream."""
return self._frame_interval
def camera_image(self):
"""Return bytes of camera image."""
return asyncio.run_coroutine_threadsafe(
self.async_camera_image(), self.hass.loop
).result()
async def async_camera_image(self):
"""Return a still image response from the camera."""
try:
url = self._still_image_url.async_render(parse_result=False)
except TemplateError as err:
_LOGGER.error("Error parsing template %s: %s", self._still_image_url, err)
return self._last_image
if url == self._last_url and self._limit_refetch:
return self._last_image
# aiohttp don't support DigestAuth yet
if self._authentication == HTTP_DIGEST_AUTHENTICATION:
def fetch():
"""Read image from a URL."""
try:
response = requests.get(
url, timeout=10, auth=self._auth, verify=self.verify_ssl
)
return response.content
except requests.exceptions.RequestException as error:
_LOGGER.error(
"Error getting new camera image from %s: %s", self._name, error
)
return self._last_image
self._last_image = await self.hass.async_add_executor_job(fetch)
# async
else:
try:
websession = async_get_clientsession(
self.hass, verify_ssl=self.verify_ssl
)
with async_timeout.timeout(10):
response = await websession.get(url, auth=self._auth)
self._last_image = await response.read()
except asyncio.TimeoutError:
_LOGGER.error("Timeout getting camera image from %s", self._name)
return self._last_image
except aiohttp.ClientError as err:
_LOGGER.error(
"Error getting new camera image from %s: %s", self._name, err
)
return self._last_image
self._last_url = url
return self._last_image
@property
def name(self):
"""Return the name of this device."""
return self._name
async def stream_source(self):
"""Return the source of the stream."""
if self._stream_source is None:
return None
try:
return self._stream_source.async_render(parse_result=False)
except TemplateError as err:
_LOGGER.error("Error parsing template %s: %s", self._stream_source, err)
return None
|
import collections
import gzip
import io
import logging
import struct
import numpy as np
_END_OF_WORD_MARKER = b'\x00'
# FastText dictionary data structure holds elements of type `entry` which can have `entry_type`
# either `word` (0 :: int8) or `label` (1 :: int8). Here we deal with unsupervised case only
# so we want `word` type.
# See https://github.com/facebookresearch/fastText/blob/master/src/dictionary.h
_DICT_WORD_ENTRY_TYPE_MARKER = b'\x00'
logger = logging.getLogger(__name__)
# Constants for FastText vesrion and FastText file format magic (both int32)
# https://github.com/facebookresearch/fastText/blob/master/src/fasttext.cc#L25
_FASTTEXT_VERSION = np.int32(12)
_FASTTEXT_FILEFORMAT_MAGIC = np.int32(793712314)
# _NEW_HEADER_FORMAT is constructed on the basis of args::save method, see
# https://github.com/facebookresearch/fastText/blob/master/src/args.cc
_NEW_HEADER_FORMAT = [
('dim', 'i'),
('ws', 'i'),
('epoch', 'i'),
('min_count', 'i'),
('neg', 'i'),
('word_ngrams', 'i'), # Unused in loading
('loss', 'i'),
('model', 'i'),
('bucket', 'i'),
('minn', 'i'),
('maxn', 'i'),
('lr_update_rate', 'i'), # Unused in loading
('t', 'd'),
]
_OLD_HEADER_FORMAT = [
('epoch', 'i'),
('min_count', 'i'),
('neg', 'i'),
('word_ngrams', 'i'), # Unused in loading
('loss', 'i'),
('model', 'i'),
('bucket', 'i'),
('minn', 'i'),
('maxn', 'i'),
('lr_update_rate', 'i'), # Unused in loading
('t', 'd'),
]
_FLOAT_SIZE = struct.calcsize('@f')
if _FLOAT_SIZE == 4:
_FLOAT_DTYPE = np.dtype(np.float32)
elif _FLOAT_SIZE == 8:
_FLOAT_DTYPE = np.dtype(np.float64)
else:
_FLOAT_DTYPE = None
def _yield_field_names():
for name, _ in _OLD_HEADER_FORMAT + _NEW_HEADER_FORMAT:
if not name.startswith('_'):
yield name
yield 'raw_vocab'
yield 'vocab_size'
yield 'nwords'
yield 'vectors_ngrams'
yield 'hidden_output'
yield 'ntokens'
_FIELD_NAMES = sorted(set(_yield_field_names()))
Model = collections.namedtuple('Model', _FIELD_NAMES)
"""Holds data loaded from the Facebook binary.
Parameters
----------
dim : int
The dimensionality of the vectors.
ws : int
The window size.
epoch : int
The number of training epochs.
neg : int
If non-zero, indicates that the model uses negative sampling.
loss : int
If equal to 1, indicates that the model uses hierarchical sampling.
model : int
If equal to 2, indicates that the model uses skip-grams.
bucket : int
The number of buckets.
min_count : int
The threshold below which the model ignores terms.
t : float
The sample threshold.
minn : int
The minimum ngram length.
maxn : int
The maximum ngram length.
raw_vocab : collections.OrderedDict
A map from words (str) to their frequency (int). The order in the dict
corresponds to the order of the words in the Facebook binary.
nwords : int
The number of words.
vocab_size : int
The size of the vocabulary.
vectors_ngrams : numpy.array
This is a matrix that contains vectors learned by the model.
Each row corresponds to a vector.
The number of vectors is equal to the number of words plus the number of buckets.
The number of columns is equal to the vector dimensionality.
hidden_output : numpy.array
This is a matrix that contains the shallow neural network output.
This array has the same dimensions as vectors_ngrams.
May be None - in that case, it is impossible to continue training the model.
"""
def _struct_unpack(fin, fmt):
num_bytes = struct.calcsize(fmt)
return struct.unpack(fmt, fin.read(num_bytes))
def _load_vocab(fin, new_format, encoding='utf-8'):
"""Load a vocabulary from a FB binary.
Before the vocab is ready for use, call the prepare_vocab function and pass
in the relevant parameters from the model.
Parameters
----------
fin : file
An open file pointer to the binary.
new_format: boolean
True if the binary is of the newer format.
encoding : str
The encoding to use when decoding binary data into words.
Returns
-------
tuple
The loaded vocabulary. Keys are words, values are counts.
The vocabulary size.
The number of words.
The number of tokens.
"""
vocab_size, nwords, nlabels = _struct_unpack(fin, '@3i')
# Vocab stored by [Dictionary::save](https://github.com/facebookresearch/fastText/blob/master/src/dictionary.cc)
if nlabels > 0:
raise NotImplementedError("Supervised fastText models are not supported")
logger.info("loading %s words for fastText model from %s", vocab_size, fin.name)
ntokens = _struct_unpack(fin, '@q')[0] # number of tokens
if new_format:
pruneidx_size, = _struct_unpack(fin, '@q')
raw_vocab = collections.OrderedDict()
for i in range(vocab_size):
word_bytes = io.BytesIO()
char_byte = fin.read(1)
while char_byte != _END_OF_WORD_MARKER:
word_bytes.write(char_byte)
char_byte = fin.read(1)
word_bytes = word_bytes.getvalue()
try:
word = word_bytes.decode(encoding)
except UnicodeDecodeError:
word = word_bytes.decode(encoding, errors='backslashreplace')
logger.error(
'failed to decode invalid unicode bytes %r; replacing invalid characters, using %r',
word_bytes, word
)
count, _ = _struct_unpack(fin, '@qb')
raw_vocab[word] = count
if new_format:
for j in range(pruneidx_size):
_struct_unpack(fin, '@2i')
return raw_vocab, vocab_size, nwords, ntokens
def _load_matrix(fin, new_format=True):
"""Load a matrix from fastText native format.
Interprets the matrix dimensions and type from the file stream.
Parameters
----------
fin : file
A file handle opened for reading.
new_format : bool, optional
True if the quant_input variable precedes
the matrix declaration. Should be True for newer versions of fastText.
Returns
-------
:class:`numpy.array`
The vectors as an array.
Each vector will be a row in the array.
The number of columns of the array will correspond to the vector size.
"""
if _FLOAT_DTYPE is None:
raise ValueError('bad _FLOAT_SIZE: %r' % _FLOAT_SIZE)
if new_format:
_struct_unpack(fin, '@?') # bool quant_input in fasttext.cc
num_vectors, dim = _struct_unpack(fin, '@2q')
count = num_vectors * dim
#
# numpy.fromfile doesn't play well with gzip.GzipFile as input:
#
# - https://github.com/RaRe-Technologies/gensim/pull/2476
# - https://github.com/numpy/numpy/issues/13470
#
# Until they fix it, we have to apply a workaround. We only apply the
# workaround when it's necessary, because np.fromfile is heavily optimized
# and very efficient (when it works).
#
if isinstance(fin, gzip.GzipFile):
logger.warning(
'Loading model from a compressed .gz file. This can be slow. '
'This is a work-around for a bug in NumPy: https://github.com/numpy/numpy/issues/13470. '
'Consider decompressing your model file for a faster load. '
)
matrix = _fromfile(fin, _FLOAT_DTYPE, count)
else:
matrix = np.fromfile(fin, _FLOAT_DTYPE, count)
assert matrix.shape == (count,), 'expected (%r,), got %r' % (count, matrix.shape)
matrix = matrix.reshape((num_vectors, dim))
return matrix
def _batched_generator(fin, count, batch_size=1e6):
"""Read `count` floats from `fin`.
Batches up read calls to avoid I/O overhead. Keeps no more than batch_size
floats in memory at once.
Yields floats.
"""
while count > batch_size:
batch = _struct_unpack(fin, '@%df' % batch_size)
for f in batch:
yield f
count -= batch_size
batch = _struct_unpack(fin, '@%df' % count)
for f in batch:
yield f
def _fromfile(fin, dtype, count):
"""Reimplementation of numpy.fromfile."""
return np.fromiter(_batched_generator(fin, count), dtype=dtype)
def load(fin, encoding='utf-8', full_model=True):
"""Load a model from a binary stream.
Parameters
----------
fin : file
The readable binary stream.
encoding : str, optional
The encoding to use for decoding text
full_model : boolean, optional
If False, skips loading the hidden output matrix. This saves a fair bit
of CPU time and RAM, but prevents training continuation.
Returns
-------
:class:`~gensim.models._fasttext_bin.Model`
The loaded model.
"""
if isinstance(fin, str):
fin = open(fin, 'rb')
magic, version = _struct_unpack(fin, '@2i')
new_format = magic == _FASTTEXT_FILEFORMAT_MAGIC
header_spec = _NEW_HEADER_FORMAT if new_format else _OLD_HEADER_FORMAT
model = {name: _struct_unpack(fin, fmt)[0] for (name, fmt) in header_spec}
if not new_format:
model.update(dim=magic, ws=version)
raw_vocab, vocab_size, nwords, ntokens = _load_vocab(fin, new_format, encoding=encoding)
model.update(raw_vocab=raw_vocab, vocab_size=vocab_size, nwords=nwords, ntokens=ntokens)
vectors_ngrams = _load_matrix(fin, new_format=new_format)
if not full_model:
hidden_output = None
else:
hidden_output = _load_matrix(fin, new_format=new_format)
assert fin.read() == b'', 'expected to reach EOF'
model.update(vectors_ngrams=vectors_ngrams, hidden_output=hidden_output)
model = {k: v for k, v in model.items() if k in _FIELD_NAMES}
return Model(**model)
def _backslashreplace_backport(ex):
"""Replace byte sequences that failed to decode with character escapes.
Does the same thing as errors="backslashreplace" from Python 3. Python 2
lacks this functionality out of the box, so we need to backport it.
Parameters
----------
ex: UnicodeDecodeError
contains arguments of the string and start/end indexes of the bad portion.
Returns
-------
text: unicode
The Unicode string corresponding to the decoding of the bad section.
end: int
The index from which to continue decoding.
Note
----
Works on Py2 only. Py3 already has backslashreplace built-in.
"""
#
# Based on:
# https://stackoverflow.com/questions/42860186/exact-equivalent-of-b-decodeutf-8-backslashreplace-in-python-2
#
bstr, start, end = ex.object, ex.start, ex.end
text = u''.join('\\x{:02x}'.format(ord(c)) for c in bstr[start:end])
return text, end
def _sign_model(fout):
"""
Write signature of the file in Facebook's native fastText `.bin` format
to the binary output stream `fout`. Signature includes magic bytes and version.
Name mimics original C++ implementation, see
[FastText::signModel](https://github.com/facebookresearch/fastText/blob/master/src/fasttext.cc)
Parameters
----------
fout: writeable binary stream
"""
fout.write(_FASTTEXT_FILEFORMAT_MAGIC.tobytes())
fout.write(_FASTTEXT_VERSION.tobytes())
def _conv_field_to_bytes(field_value, field_type):
"""
Auxiliary function that converts `field_value` to bytes based on request `field_type`,
for saving to the binary file.
Parameters
----------
field_value: numerical
contains arguments of the string and start/end indexes of the bad portion.
field_type: str
currently supported `field_types` are `i` for 32-bit integer and `d` for 64-bit float
"""
if field_type == 'i':
return (np.int32(field_value).tobytes())
elif field_type == 'd':
return (np.float64(field_value).tobytes())
else:
raise NotImplementedError('Currently conversion to "%s" type is not implemmented.' % field_type)
def _get_field_from_model(model, field):
"""
Extract `field` from `model`.
Parameters
----------
model: gensim.models.fasttext.FastText
model from which `field` is extracted
field: str
requested field name, fields are listed in the `_NEW_HEADER_FORMAT` list
"""
if field == 'bucket':
return model.wv.bucket
elif field == 'dim':
return model.vector_size
elif field == 'epoch':
return model.epochs
elif field == 'loss':
# `loss` => hs: 1, ns: 2, softmax: 3, ova-vs-all: 4
# ns = negative sampling loss (default)
# hs = hierarchical softmax loss
# softmax = softmax loss
# one-vs-all = one vs all loss (supervised)
if model.hs == 1:
return 1
elif model.hs == 0:
return 2
elif model.hs == 0 and model.negative == 0:
return 1
elif field == 'maxn':
return model.wv.max_n
elif field == 'minn':
return model.wv.min_n
elif field == 'min_count':
return model.min_count
elif field == 'model':
# `model` => cbow:1, sg:2, sup:3
# cbow = continous bag of words (default)
# sg = skip-gram
# sup = supervised
return 2 if model.sg == 1 else 1
elif field == 'neg':
return model.negative
elif field == 't':
return model.sample
elif field == 'word_ngrams':
# This is skipped in gensim loading setting, using the default from FB C++ code
return 1
elif field == 'ws':
return model.window
elif field == 'lr_update_rate':
# This is skipped in gensim loading setting, using the default from FB C++ code
return 100
else:
msg = 'Extraction of header field "' + field + '" from Gensim FastText object not implemmented.'
raise NotImplementedError(msg)
def _args_save(fout, model, fb_fasttext_parameters):
"""
Saves header with `model` parameters to the binary stream `fout` containing a model in the Facebook's
native fastText `.bin` format.
Name mimics original C++ implementation, see
[Args::save](https://github.com/facebookresearch/fastText/blob/master/src/args.cc)
Parameters
----------
fout: writeable binary stream
stream to which model is saved
model: gensim.models.fasttext.FastText
saved model
fb_fasttext_parameters: dictionary
dictionary contain parameters containing `lr_update_rate`, `word_ngrams`
unused by gensim implementation, so they have to be provided externally
"""
for field, field_type in _NEW_HEADER_FORMAT:
if field in fb_fasttext_parameters:
field_value = fb_fasttext_parameters[field]
else:
field_value = _get_field_from_model(model, field)
fout.write(_conv_field_to_bytes(field_value, field_type))
def _dict_save(fout, model, encoding):
"""
Saves the dictionary from `model` to the to the binary stream `fout` containing a model in the Facebook's
native fastText `.bin` format.
Name mimics the original C++ implementation
[Dictionary::save](https://github.com/facebookresearch/fastText/blob/master/src/dictionary.cc)
Parameters
----------
fout: writeable binary stream
stream to which the dictionary from the model is saved
model: gensim.models.fasttext.FastText
the model that contains the dictionary to save
encoding: str
string encoding used in the output
"""
# In the FB format the dictionary can contain two types of entries, i.e.
# words and labels. The first two fields of the dictionary contain
# the dictionary size (size_) and the number of words (nwords_).
# In the unsupervised case we have only words (no labels). Hence both fields
# are equal.
fout.write(np.int32(len(model.wv)).tobytes())
fout.write(np.int32(len(model.wv)).tobytes())
# nlabels=0 <- no labels we are in unsupervised mode
fout.write(np.int32(0).tobytes())
fout.write(np.int64(model.corpus_total_words).tobytes())
# prunedidx_size_=-1, -1 value denotes no prunning index (prunning is only supported in supervised mode)
fout.write(np.int64(-1))
for word in model.wv.index_to_key:
word_count = model.wv.get_vecattr(word, 'count')
fout.write(word.encode(encoding))
fout.write(_END_OF_WORD_MARKER)
fout.write(np.int64(word_count).tobytes())
fout.write(_DICT_WORD_ENTRY_TYPE_MARKER)
# We are in unsupervised case, therefore pruned_idx is empty, so we do not need to write anything else
def _input_save(fout, model):
"""
Saves word and ngram vectors from `model` to the binary stream `fout` containing a model in
the Facebook's native fastText `.bin` format.
Corresponding C++ fastText code:
[DenseMatrix::save](https://github.com/facebookresearch/fastText/blob/master/src/densematrix.cc)
Parameters
----------
fout: writeable binary stream
stream to which the vectors are saved
model: gensim.models.fasttext.FastText
the model that contains the vectors to save
"""
vocab_n, vocab_dim = model.wv.vectors_vocab.shape
ngrams_n, ngrams_dim = model.wv.vectors_ngrams.shape
assert vocab_dim == ngrams_dim
assert vocab_n == len(model.wv)
assert ngrams_n == model.wv.bucket
fout.write(struct.pack('@2q', vocab_n + ngrams_n, vocab_dim))
fout.write(model.wv.vectors_vocab.tobytes())
fout.write(model.wv.vectors_ngrams.tobytes())
def _output_save(fout, model):
"""
Saves output layer of `model` to the binary stream `fout` containing a model in
the Facebook's native fastText `.bin` format.
Corresponding C++ fastText code:
[DenseMatrix::save](https://github.com/facebookresearch/fastText/blob/master/src/densematrix.cc)
Parameters
----------
fout: writeable binary stream
the model that contains the output layer to save
model: gensim.models.fasttext.FastText
saved model
"""
if model.hs:
hidden_output = model.syn1
if model.negative:
hidden_output = model.syn1neg
hidden_n, hidden_dim = hidden_output.shape
fout.write(struct.pack('@2q', hidden_n, hidden_dim))
fout.write(hidden_output.tobytes())
def _save_to_stream(model, fout, fb_fasttext_parameters, encoding):
"""
Saves word embeddings to binary stream `fout` using the Facebook's native fasttext `.bin` format.
Parameters
----------
fout: file name or writeable binary stream
stream to which the word embeddings are saved
model: gensim.models.fasttext.FastText
the model that contains the word embeddings to save
fb_fasttext_parameters: dictionary
dictionary contain parameters containing `lr_update_rate`, `word_ngrams`
unused by gensim implementation, so they have to be provided externally
encoding: str
encoding used in the output file
"""
_sign_model(fout)
_args_save(fout, model, fb_fasttext_parameters)
_dict_save(fout, model, encoding)
fout.write(struct.pack('@?', False)) # Save 'quant_', which is False for unsupervised models
# Save words and ngrams vectors
_input_save(fout, model)
fout.write(struct.pack('@?', False)) # Save 'quot_', which is False for unsupervised models
# Save output layers of the model
_output_save(fout, model)
def save(model, fout, fb_fasttext_parameters, encoding):
"""
Saves word embeddings to the Facebook's native fasttext `.bin` format.
Parameters
----------
fout: file name or writeable binary stream
stream to which model is saved
model: gensim.models.fasttext.FastText
saved model
fb_fasttext_parameters: dictionary
dictionary contain parameters containing `lr_update_rate`, `word_ngrams`
unused by gensim implementation, so they have to be provided externally
encoding: str
encoding used in the output file
Notes
-----
Unfortunately, there is no documentation of the Facebook's native fasttext `.bin` format
This is just reimplementation of
[FastText::saveModel](https://github.com/facebookresearch/fastText/blob/master/src/fasttext.cc)
Based on v0.9.1, more precisely commit da2745fcccb848c7a225a7d558218ee4c64d5333
Code follows the original C++ code naming.
"""
if isinstance(fout, str):
with open(fout, "wb") as fout_stream:
_save_to_stream(model, fout_stream, fb_fasttext_parameters, encoding)
else:
_save_to_stream(model, fout, fb_fasttext_parameters, encoding)
|
from flask import Flask, jsonify, request
from flasgger import Schema, Swagger, SwaggerView, fields
app = Flask(__name__)
app.config['SWAGGER'] = {
"title": "API using Marshmallow",
"uiversion": 2
}
swag = Swagger(app)
class User(Schema):
username = fields.Str(required=True)
age = fields.Int(required=True, min=18)
tags = fields.List(fields.Str())
class UserPostView(SwaggerView):
parameters = User
# parameters = [
# {
# 'name': 'body',
# 'in': 'body',
# 'schema': User
# }
# ]
responses = {
200: {
'description': 'A single user',
'schema': User
}
}
# optional
# definitions = {
# 'User': User # if nto informed will be extracted from usage above
# }
tags = ['users']
# consumes = ['application/json']
# produces = ['application/json']
# schemes = ['http', 'https']
# security = []
# deprecated = False
# operationId = 'user'
# externalDocs = {'description': 'foo', 'url': 'bar.com'}
summary = "Will be overwritten by first line of docstring"
description = "will be overwritten by otehr lines"
def post(self):
"""
A simple post
Do it
---
# This value overwrites the attributes above
deprecated: true
"""
return jsonify(request.json)
app.add_url_rule(
'/user',
view_func=UserPostView.as_view('user'),
methods=['POST']
)
if __name__ == "__main__":
app.run(debug=True)
|
from django.conf import settings
from google.cloud.translate_v3 import TranslationServiceClient
from google.oauth2 import service_account
from weblate.machinery.base import MissingConfiguration
from weblate.machinery.google import GoogleBaseTranslation
class GoogleV3Translation(GoogleBaseTranslation):
"""Google Translate API v3 machine translation support."""
setup = None
name = "Google Translate API v3"
max_score = 90
def __init__(self):
"""Check configuration."""
super().__init__()
credentials = settings.MT_GOOGLE_CREDENTIALS
project = settings.MT_GOOGLE_PROJECT
location = settings.MT_GOOGLE_LOCATION
if credentials is None or project is None:
raise MissingConfiguration("Google Translate requires API key and project")
credentials = service_account.Credentials.from_service_account_file(credentials)
self.client = TranslationServiceClient(credentials=credentials)
self.parent = f"projects/{project}/locations/{location}"
def download_languages(self):
"""List of supported languages."""
response = self.client.get_supported_languages(request={"parent": self.parent})
return [language.language_code for language in response.languages]
def download_translations(
self,
source,
language,
text: str,
unit,
user,
search: bool,
threshold: int = 75,
):
"""Download list of possible translations from a service."""
request = {
"parent": self.parent,
"contents": [text],
"target_language_code": language,
"source_language_code": source,
}
response = self.client.translate_text(request)
yield {
"text": response.translations[0].translated_text,
"quality": self.max_score,
"service": self.name,
"source": text,
}
|
import diamond.collector
import os
import time
from diamond.collector import str_to_bool
try:
import psutil
except ImportError:
psutil = None
class CPUCollector(diamond.collector.Collector):
PROC = '/proc/stat'
INTERVAL = 1
MAX_VALUES = {
'user': diamond.collector.MAX_COUNTER,
'nice': diamond.collector.MAX_COUNTER,
'system': diamond.collector.MAX_COUNTER,
'idle': diamond.collector.MAX_COUNTER,
'iowait': diamond.collector.MAX_COUNTER,
'irq': diamond.collector.MAX_COUNTER,
'softirq': diamond.collector.MAX_COUNTER,
'steal': diamond.collector.MAX_COUNTER,
'guest': diamond.collector.MAX_COUNTER,
'guest_nice': diamond.collector.MAX_COUNTER,
}
def get_default_config_help(self):
config_help = super(CPUCollector, self).get_default_config_help()
config_help.update({
'percore': 'Collect metrics per cpu core or just total',
'simple': 'only return aggregate CPU% metric',
'normalize': 'for cpu totals, divide by the number of CPUs',
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(CPUCollector, self).get_default_config()
config.update({
'path': 'cpu',
'percore': 'True',
'xenfix': None,
'simple': 'False',
'normalize': 'False',
})
return config
def collect(self):
"""
Collector cpu stats
"""
def cpu_time_list():
"""
get cpu time list
"""
statFile = open(self.PROC, "r")
timeList = statFile.readline().split(" ")[2:6]
for i in range(len(timeList)):
timeList[i] = int(timeList[i])
statFile.close()
return timeList
def cpu_delta_time(interval):
"""
Get before and after cpu times for usage calc
"""
pre_check = cpu_time_list()
time.sleep(interval)
post_check = cpu_time_list()
for i in range(len(pre_check)):
post_check[i] -= pre_check[i]
return post_check
if os.access(self.PROC, os.R_OK):
# If simple only return aggregate CPU% metric
if str_to_bool(self.config['simple']):
dt = cpu_delta_time(self.INTERVAL)
cpuPct = 100 - (dt[len(dt) - 1] * 100.00 / sum(dt))
self.publish('percent', str('%.4f' % cpuPct))
return True
results = {}
# Open file
file = open(self.PROC)
ncpus = -1 # dont want to count the 'cpu'(total) cpu.
for line in file:
if not line.startswith('cpu'):
continue
ncpus += 1
elements = line.split()
cpu = elements[0]
if cpu == 'cpu':
cpu = 'total'
elif not str_to_bool(self.config['percore']):
continue
results[cpu] = {}
if len(elements) >= 2:
results[cpu]['user'] = elements[1]
if len(elements) >= 3:
results[cpu]['nice'] = elements[2]
if len(elements) >= 4:
results[cpu]['system'] = elements[3]
if len(elements) >= 5:
results[cpu]['idle'] = elements[4]
if len(elements) >= 6:
results[cpu]['iowait'] = elements[5]
if len(elements) >= 7:
results[cpu]['irq'] = elements[6]
if len(elements) >= 8:
results[cpu]['softirq'] = elements[7]
if len(elements) >= 9:
results[cpu]['steal'] = elements[8]
if len(elements) >= 10:
results[cpu]['guest'] = elements[9]
if len(elements) >= 11:
results[cpu]['guest_nice'] = elements[10]
# Close File
file.close()
metrics = {'cpu_count': ncpus}
for cpu in results.keys():
stats = results[cpu]
for s in stats.keys():
# Get Metric Name
metric_name = '.'.join([cpu, s])
# Get actual data
if ((str_to_bool(self.config['normalize']) and
cpu == 'total' and
ncpus > 0)):
metrics[metric_name] = self.derivative(
metric_name,
long(stats[s]),
self.MAX_VALUES[s]) / ncpus
else:
metrics[metric_name] = self.derivative(
metric_name,
long(stats[s]),
self.MAX_VALUES[s])
# Check for a bug in xen where the idle time is doubled for guest
# See https://bugzilla.redhat.com/show_bug.cgi?id=624756
if self.config['xenfix'] is None or self.config['xenfix'] is True:
if os.path.isdir('/proc/xen'):
total = 0
for metric_name in metrics.keys():
if 'cpu0.' in metric_name:
total += int(metrics[metric_name])
if total > 110:
self.config['xenfix'] = True
for mname in metrics.keys():
if '.idle' in mname:
metrics[mname] = float(metrics[mname]) / 2
elif total > 0:
self.config['xenfix'] = False
else:
self.config['xenfix'] = False
# Publish Metric Derivative
for metric_name in metrics.keys():
self.publish(metric_name,
metrics[metric_name],
precision=2)
return True
else:
if not psutil:
self.log.error('Unable to import psutil')
self.log.error('No cpu metrics retrieved')
return None
cpu_time = psutil.cpu_times(True)
cpu_count = len(cpu_time)
total_time = psutil.cpu_times()
for i in range(0, len(cpu_time)):
metric_name = 'cpu' + str(i)
self.publish(
metric_name + '.user',
self.derivative(metric_name + '.user',
cpu_time[i].user,
self.MAX_VALUES['user']),
precision=2)
if hasattr(cpu_time[i], 'nice'):
self.publish(
metric_name + '.nice',
self.derivative(metric_name + '.nice',
cpu_time[i].nice,
self.MAX_VALUES['nice']),
precision=2)
self.publish(
metric_name + '.system',
self.derivative(metric_name + '.system',
cpu_time[i].system,
self.MAX_VALUES['system']),
precision=2)
self.publish(
metric_name + '.idle',
self.derivative(metric_name + '.idle',
cpu_time[i].idle,
self.MAX_VALUES['idle']),
precision=2)
metric_name = 'total'
self.publish(
metric_name + '.user',
self.derivative(metric_name + '.user',
total_time.user,
self.MAX_VALUES['user']) / cpu_count,
precision=2)
if hasattr(total_time, 'nice'):
self.publish(
metric_name + '.nice',
self.derivative(metric_name + '.nice',
total_time.nice,
self.MAX_VALUES['nice']) / cpu_count,
precision=2)
self.publish(
metric_name + '.system',
self.derivative(metric_name + '.system',
total_time.system,
self.MAX_VALUES['system']) / cpu_count,
precision=2)
self.publish(
metric_name + '.idle',
self.derivative(metric_name + '.idle',
total_time.idle,
self.MAX_VALUES['idle']) / cpu_count,
precision=2)
self.publish('cpu_count', psutil.cpu_count())
return True
return None
|
from unittest import TestCase
from weblate.checks.tests.test_checks import MockLanguage, MockUnit
from weblate.trans.simplediff import html_diff
from weblate.trans.templatetags.translations import format_translation
class DiffTest(TestCase):
"""Testing of HTML diff function."""
def test_same(self):
self.assertEqual(html_diff("first text", "first text"), "first text")
def test_add(self):
self.assertEqual(
html_diff("first text", "first new text"), "first <ins>new </ins>text"
)
def test_unicode(self):
self.assertEqual(
html_diff("zkouška text", "zkouška nový text"),
"zkouška <ins>nový </ins>text",
)
def test_remove(self):
self.assertEqual(
html_diff("first old text", "first text"), "first <del>old </del>text"
)
def test_replace(self):
self.assertEqual(
html_diff("first old text", "first new text"),
"first <del>old</del><ins>new</ins> text",
)
def test_format_diff(self):
unit = MockUnit(source="Hello word!")
self.assertEqual(
format_translation(
unit.source,
unit.translation.component.source_language,
diff="Hello world!",
)["items"][0]["content"],
"Hello wor<del>l</del>d!",
)
def test_format_diff_whitespace(self):
unit = MockUnit(source="Hello world!")
self.assertEqual(
format_translation(
unit.source,
unit.translation.component.source_language,
diff="Hello world! ",
)["items"][0]["content"],
'Hello world!<del><span class="space-space">'
'<span class="sr-only"> </span></span></del>',
)
def test_format_entities(self):
unit = MockUnit(source="'word'")
self.assertEqual(
format_translation(
unit.source,
unit.translation.component.source_language,
diff='"word"',
)["items"][0]["content"],
"<del>"</del><ins>'</ins>word<del>"</del><ins>'</ins>",
)
def test_fmtsearchmatch(self):
self.assertEqual(
format_translation(
"Hello world!", MockLanguage("en"), search_match="hello"
)["items"][0]["content"],
'<span class="hlmatch">Hello</span> world!',
)
|
import json
import requests
import base64
import hmac
import hashlib
from flask import current_app
from lemur.common.utils import parse_certificate
from lemur.common.utils import get_authority_key
from lemur.constants import CRLReason
from lemur.plugins.bases import IssuerPlugin
from lemur.plugins import lemur_cfssl as cfssl
from lemur.extensions import metrics
class CfsslIssuerPlugin(IssuerPlugin):
title = "CFSSL"
slug = "cfssl-issuer"
description = "Enables the creation of certificates by CFSSL private CA"
version = cfssl.VERSION
author = "Charles Hendrie"
author_url = "https://github.com/netflix/lemur.git"
def __init__(self, *args, **kwargs):
self.session = requests.Session()
super(CfsslIssuerPlugin, self).__init__(*args, **kwargs)
def create_certificate(self, csr, issuer_options):
"""
Creates a CFSSL certificate.
:param csr:
:param issuer_options:
:return:
"""
current_app.logger.info(
"Requesting a new cfssl certificate with csr: {0}".format(csr)
)
url = "{0}{1}".format(current_app.config.get("CFSSL_URL"), "/api/v1/cfssl/sign")
data = {"certificate_request": csr}
data = json.dumps(data)
try:
hex_key = current_app.config.get("CFSSL_KEY")
key = bytes.fromhex(hex_key)
except (ValueError, NameError, TypeError):
# unable to find CFSSL_KEY in config, continue using normal sign method
pass
else:
data = data.encode()
token = base64.b64encode(
hmac.new(key, data, digestmod=hashlib.sha256).digest()
)
data = base64.b64encode(data)
data = json.dumps(
{"token": token.decode("utf-8"), "request": data.decode("utf-8")}
)
url = "{0}{1}".format(
current_app.config.get("CFSSL_URL"), "/api/v1/cfssl/authsign"
)
response = self.session.post(
url, data=data.encode(encoding="utf_8", errors="strict")
)
if response.status_code > 399:
metrics.send("cfssl_create_certificate_failure", "counter", 1)
raise Exception("Error creating cert. Please check your CFSSL API server")
response_json = json.loads(response.content.decode("utf_8"))
cert = response_json["result"]["certificate"]
parsed_cert = parse_certificate(cert)
metrics.send("cfssl_create_certificate_success", "counter", 1)
return (
cert,
current_app.config.get("CFSSL_INTERMEDIATE"),
parsed_cert.serial_number,
)
@staticmethod
def create_authority(options):
"""
Creates an authority, this authority is then used by Lemur to allow a user
to specify which Certificate Authority they want to sign their certificate.
:param options:
:return:
"""
role = {"username": "", "password": "", "name": "cfssl"}
return current_app.config.get("CFSSL_ROOT"), "", [role]
def revoke_certificate(self, certificate, reason):
"""Revoke a CFSSL certificate."""
base_url = current_app.config.get("CFSSL_URL")
create_url = "{0}/api/v1/cfssl/revoke".format(base_url)
crl_reason = CRLReason.unspecified
if "crl_reason" in reason:
crl_reason = CRLReason[reason["crl_reason"]]
data = (
'{"serial": "'
+ certificate.external_id
+ '","authority_key_id": "'
+ get_authority_key(certificate.body)
+ '", "reason": "'
+ crl_reason
+ '"}'
)
current_app.logger.debug("Revoking cert: {0}".format(data))
response = self.session.post(
create_url, data=data.encode(encoding="utf_8", errors="strict")
)
if response.status_code > 399:
metrics.send("cfssl_revoke_certificate_failure", "counter", 1)
raise Exception("Error revoking cert. Please check your CFSSL API server")
metrics.send("cfssl_revoke_certificate_success", "counter", 1)
return response.json()
|
from lark import Token
from _json_parser import json_parser
def ignore_errors(e):
if e.token.type == 'COMMA':
# Skip comma
return True
elif e.token.type == 'SIGNED_NUMBER':
# Try to feed a comma and retry the number
e.puppet.feed_token(Token('COMMA', ','))
e.puppet.feed_token(e.token)
return True
# Unhandled error. Will stop parse and raise exception
return False
def main():
s = "[0 1, 2,, 3,,, 4, 5 6 ]"
res = json_parser.parse(s, on_error=ignore_errors)
print(res) # prints [0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0]
main()
|