text
stringlengths 213
32.3k
|
---|
import os
import shutil
import tempfile
import unittest
junitxml = None
# Basic test of xmlresult functionality of reading gtest xml files and
# summarizing their results into a new file.
class MockResult():
def __init__(self, directory, filename, suites=[], noSuitesRoot=False):
self.filename = os.path.join(directory, filename)
self.suites = suites
# whether to suppress <testsuites> root node
self.noSuitesRoot = noSuitesRoot
class MockSuite():
def __init__(self, cases, name, tests=0, errors=0, fail=0, time=1):
self.cases = cases
self.tests = tests
self.time = time
self.fail = fail
self.errors = errors
self.name = name
class MockCase():
def __init__(self, name, errorList=[], classname='', time=1):
self.classname = classname
self.name = name
self.time = time
self.errorList = errorList
class MockErrorType(Exception):
def __init__(self, value, etype=''):
self.value = value
self.__name__ = value
self.type = etype
def _writeMockResultFile(result):
"""writes a test result as a gtest compatible test runner would do"""
with open(result.filename, 'w') as f:
f.write('<?xml version="1.0" encoding="UTF-8"?>\n')
if len(result.suites) > 1 or result.noSuitesRoot is False:
f.write('<testsuites>\n')
for suite in result.suites:
f.write('<testsuite tests="'+str(suite.tests)+'" failures="'+str(suite.fail)+'" time="'+str(suite.time)+'" errors="'+str(suite.errors)+'" name="'+suite.name+'">\n')
for case in suite.cases:
f.write('<testcase name="'+case.name+'" status="run" time="'+str(case.time)+'" classname="'+case.classname+'">\n')
for error in case.errorList:
f.write('<failure message="'+error.value+'" type="'+error.value+'"/>\n')
f.write('</testcase>\n')
f.write('</testsuite>\n')
if len(result.suites) > 1 or result.noSuitesRoot is False:
f.write('</testsuites>\n')
class XmlResultTestGeneration(unittest.TestCase):
def setUp(self):
global junitxml
if junitxml is None:
import rosunit.junitxml
junitxml = rosunit.junitxml
def tearDown(self):
pass
def testGenerateError(self):
error = junitxml.TestError('error_type', 'error_text')
error_str = error.xml()
self.assertEquals(b"""<error type="error_type"><![CDATA[
error_text
]]></error>""", error_str)
def testGenerateFailure(self):
failure = junitxml.TestFailure('failure_type', 'failure_text')
failure_str = failure.xml()
self.assertEquals(b"""<failure type="failure_type"><![CDATA[
failure_text
]]></failure>""", failure_str)
def testGenerateTestCaseResult(self):
testcase = junitxml.TestCaseResult('test_case')
error = junitxml.TestError('error_type', 'error_text')
error_str = error.xml()
failure = junitxml.TestFailure('failure_type', 'failure_text')
failure_str = failure.xml()
testcase.add_error(error)
testcase.add_failure(failure)
testcase_str = testcase.xml()
self.assertEquals(b"""<testcase classname="" name="test_case" time="0.0"><failure type="failure_type"><![CDATA[
failure_text
]]></failure><error type="error_type"><![CDATA[
error_text
]]></error></testcase>""", testcase_str)
class XmlResultTestRead(unittest.TestCase):
def setUp(self):
# lazy-import to get coverage
global junitxml
if junitxml is None:
import rosunit.junitxml
junitxml = rosunit.junitxml
self.directory = tempfile.mkdtemp()
# setting up mock results as dict so results can be checked individually
self.mockresults = {
'empty': MockResult(self.directory, 'empty.xml', []),
'emptysuite': MockResult(self.directory, 'emptysuite.xml', [MockSuite([], 'emptySuite', 0, 0, 0, 0)]),
'succ1': MockResult(self.directory, 'succ1.xml', [MockSuite([MockCase('succCase')], 'succ1suite', 1, 0, 0, 1)]),
'err1': MockResult(self.directory, 'err1.xml', [MockSuite([MockCase('errCase')], 'err1suite', 1, 1, 0, 1)]),
'fail1': MockResult(self.directory, 'fail1.xml', [MockSuite([MockCase('failCase')], 'fail1suite', 1, 0, 1, 1)]),
'noroot': MockResult(self.directory, 'succ1.xml', [MockSuite([MockCase('succCase')], 'succ1suite', 1, 0, 0, 1)], noSuitesRoot=True),
'multicase': MockResult(self.directory,
'multicase.xml',
[MockSuite([MockCase('succCase'),
MockCase('errCase'),
MockCase('failCase')],
'succ1suite', 3, 1, 1, time=3)]),
'multisuite': MockResult(self.directory,
'multisuite.xml',
[MockSuite([MockCase('succCase')], 'succ1suite', 1, 0, 0, 1),
MockSuite([MockCase('errCase')], 'err1suite', 1, 1, 0, 1),
MockSuite([MockCase('failCase')], 'fail1suite', 1, 0, 1, 1)])
}
for name, result in self.mockresults.items():
_writeMockResultFile(result)
def tearDown(self):
shutil.rmtree(self.directory)
# pass
def testReadNoSuites(self):
result = junitxml.read(self.mockresults['empty'].filename, 'fooname')
self.assert_(result is not None)
self.assertEquals(0.0, result.time)
self.assertEquals(0, result.num_tests)
self.assertEquals(0, result.num_errors)
self.assertEquals(0, result.num_failures)
def testReadEmptySuite(self):
result = junitxml.read(self.mockresults['emptysuite'].filename, 'fooname')
self.assert_(result is not None)
self.assertEquals(0.0, result.time)
self.assertEquals(0, result.num_tests)
self.assertEquals(0, result.num_errors)
self.assertEquals(0, result.num_failures)
def testReadSuccess(self):
result = junitxml.read(self.mockresults['succ1'].filename, 'fooname')
self.assert_(result is not None)
self.assertEquals(1.0, result.time)
self.assertEquals(1, result.num_tests)
self.assertEquals(0, result.num_errors)
self.assertEquals(0, result.num_failures)
def testReadError(self):
result = junitxml.read(self.mockresults['err1'].filename, 'fooname')
self.assert_(result is not None)
self.assertEquals(1.0, result.time)
self.assertEquals(1, result.num_tests)
self.assertEquals(1, result.num_errors)
self.assertEquals(0, result.num_failures)
def testReadFail(self):
result = junitxml.read(self.mockresults['fail1'].filename, 'fooname')
self.assert_(result is not None)
self.assertEquals(1.0, result.time)
self.assertEquals(1, result.num_tests)
self.assertEquals(0, result.num_errors)
self.assertEquals(1, result.num_failures)
def testReadMulticase(self):
result = junitxml.read(self.mockresults['multicase'].filename, 'fooname')
self.assert_(result is not None)
self.assertEquals(3.0, result.time)
self.assertEquals(3, result.num_tests)
self.assertEquals(1, result.num_errors)
self.assertEquals(1, result.num_failures)
def testReadMultisuite(self):
result = junitxml.read(self.mockresults['multisuite'].filename, 'fooname')
self.assert_(result is not None)
self.assertEquals(3.0, result.time)
self.assertEquals(3, result.num_tests)
self.assertEquals(1, result.num_errors)
self.assertEquals(1, result.num_failures)
|
import os
import unittest
from trashcli.fstab import volume_of
from trashcli.list_mount_points import os_mount_points
from trashcli.restore import RestoreCmd, TrashDirectories, TrashDirectory, \
TrashedFiles, TrashDirectories2
from .files import require_empty_dir
from trashcli.fs import remove_file, contents_of
from .fake_trash_dir import a_trashinfo
from .files import make_file
from unit_tests.myStringIO import StringIO
from trashcli import restore
class TestRestoreTrash(unittest.TestCase):
def test_it_does_nothing_when_no_file_have_been_found_in_current_dir(self):
self.user.chdir('/')
self.user.run_restore()
self.assertEqual("No files trashed from current dir ('/')\n",
self.user.stdout())
def test_gives_an_error_on_not_a_number_input(self):
self.user.having_a_trashed_file('/foo/bar')
self.user.chdir('/foo')
self.user.run_restore(with_user_typing='+@notanumber')
self.assertEqual('Invalid entry: not an index: +@notanumber\n',
self.user.stderr())
def test_it_gives_error_when_user_input_is_too_small(self):
self.user.having_a_trashed_file('/foo/bar')
self.user.chdir('/foo')
self.user.run_restore(with_user_typing='1')
self.assertEqual('Invalid entry: out of range 0..0: 1\n',
self.user.stderr())
def test_it_gives_error_when_user_input_is_too_large(self):
self.user.having_a_trashed_file('/foo/bar')
self.user.chdir('/foo')
self.user.run_restore(with_user_typing='1')
self.assertEqual('Invalid entry: out of range 0..0: 1\n',
self.user.stderr())
def test_it_shows_the_file_deleted_from_the_current_dir(self):
self.user.having_a_trashed_file('/foo/bar')
self.user.chdir('/foo')
self.user.run_restore(with_user_typing='')
self.assertEqual(' 0 2000-01-01 00:00:01 /foo/bar\n'
'Exiting\n', self.user.stdout())
self.assertEqual('', self.user.stderr())
def test_it_restores_the_file_selected_by_the_user(self):
self.user.having_a_file_trashed_from_current_dir('foo')
self.user.chdir(os.getcwd())
self.user.run_restore(with_user_typing='0')
self.file_should_have_been_restored('foo')
def test_it_refuses_overwriting_existing_file(self):
self.user.having_a_file_trashed_from_current_dir('foo')
self.user.chdir(os.getcwd())
make_file("foo")
self.user.run_restore(with_user_typing='0')
self.assertEqual('Refusing to overwrite existing file "foo".\n',
self.user.stderr())
def setUp(self):
require_empty_dir('XDG_DATA_HOME')
self.user = RestoreTrashUser('XDG_DATA_HOME')
def file_should_have_been_restored(self, filename):
assert os.path.exists(filename)
class RestoreTrashUser:
def __init__(self, XDG_DATA_HOME):
self.XDG_DATA_HOME = XDG_DATA_HOME
self.out = StringIO()
self.err = StringIO()
def chdir(self, dir):
self.current_dir = dir
def run_restore(self, with_user_typing=''):
environ = {'XDG_DATA_HOME': self.XDG_DATA_HOME}
trash_directories = TrashDirectories(volume_of, os.getuid, environ)
trash_directories2 = TrashDirectories2(volume_of, trash_directories)
trashed_files = TrashedFiles(trash_directories2, TrashDirectory(),
contents_of)
RestoreCmd(
stdout = self.out,
stderr = self.err,
exit = [].append,
input = lambda msg: with_user_typing,
curdir = lambda: self.current_dir,
trashed_files=trashed_files,
mount_points=os_mount_points,
fs = restore.FileSystem()
).run([])
def having_a_file_trashed_from_current_dir(self, filename):
self.having_a_trashed_file(os.path.join(os.getcwd(), filename))
remove_file(filename)
assert not os.path.exists(filename)
def having_a_trashed_file(self, path):
make_file('%s/info/foo.trashinfo' % self._trash_dir(),
a_trashinfo(path))
make_file('%s/files/foo' % self._trash_dir())
def _trash_dir(self):
return "%s/Trash" % self.XDG_DATA_HOME
def stdout(self):
return self.out.getvalue()
def stderr(self):
return self.err.getvalue()
|
import os
import re
files = ('README.rst', 'CONTRIBUTING.rst')
# replace GitHub external links by :doc: links
replacements = (('`([^`]*?) <(docs/)?(.*?)\.rst>`_', ':doc:`\g<1> <\g<3>>`'),)
def read(fname: str) -> str:
return open(os.path.join(os.path.dirname(__file__), fname), encoding='utf-8').read()
for file in files:
c = read('../' + file)
for r in replacements:
c = re.sub(r[0], r[1], c)
with open(file, 'w') as f:
f.write(c)
|
import argparse
import os
import sys
import time
from yapf.yapflib.yapf_api import FormatFile
try:
from .common import get_stash_dir
except (ImportError, ValueError):
from common import get_stash_dir
def apply_to_file(fp, sp, in_place=False):
"""
Apply the style to a file.
:param fp: path to file
:type fp: str
:param sp: path to style
:type sp: str
:param in_place: format code in-place
:type in_place: bool
:return: the reformated code
:rtype: str or None
"""
rc, encoidng, changed = FormatFile(fp, style_config=sp, verify=True, in_place=in_place)
return rc
def apply_to_dir(path, style, recursive=False, in_place=False, verbose=False, pyonly=True):
"""
Apply the style to all files in a directory.
:param path: path to directory
:type path: str
:param style: path to style file
:type style: str
:param recursive: also descend into subdirectories
:type recursive: bool
:param in_place: apply the changes directly to the file
:type in_place: bool
:param verbose: print additional information
:type verbose: bool
:param pyonly: only apply to .py files
:type pyonly: bool
"""
if verbose:
print("Applying style to directory '{}'...".format(path))
for fn in os.listdir(path):
fp = os.path.join(path, fn)
if os.path.isdir(fp) and recursive:
apply_to_dir(fp, style, recursive=recursive, in_place=in_place, verbose=verbose, pyonly=pyonly)
elif os.path.isfile(fp):
if (not fn.endswith(".py")) and pyonly:
if verbose:
print("Skipping '{}' (non-py)...".format(fp))
continue
if verbose:
print("Applying style to file '{}'...".format(fp))
res = apply_to_file(fp, style, in_place=in_place)
if not in_place:
print("# ======= {} =======".format(fp))
print(res)
def main():
"""the main function"""
parser = argparse.ArgumentParser(description="Reformat source to follow style rules")
parser.add_argument("action", help="action to perform", choices=["apply"])
parser.add_argument("-p", "--path", action="store", help="path to file/directory")
parser.add_argument("-s", "--style", action="store", help="path to style file")
parser.add_argument("-r", "--recursive", action="store_true", help="descend into subdirectories")
parser.add_argument("-v", "--verbose", action="store_true", help="be more verbose")
parser.add_argument("-i", "--inplace", action="store_true", help="apply the changes to the source")
parser.add_argument("-a", "--all", action="store_true", help="apply to all files (not just *.py files)")
ns = parser.parse_args()
if ns.path is not None:
path = ns.path
else:
path = get_stash_dir()
if ns.style is not None:
style = ns.style
else:
style = os.path.join(get_stash_dir(), "tools", "yapf.ini")
if ns.action == "apply":
start = time.time()
if not os.path.exists(path):
print("Error: path '{}' does not exists!".format(path))
sys.exit(1)
elif os.path.isdir(path):
apply_to_dir(path, style, in_place=ns.inplace, recursive=ns.recursive, pyonly=(not ns.all), verbose=ns.verbose)
else:
res = apply_to_file(path, style, in_place=ns.inplace)
if not ns.inplace:
print(res)
end = time.time()
if ns.verbose:
print("Done. Style applied in {}s".format(end - start))
if __name__ == "__main__":
main()
|
from __future__ import print_function
import os
import sys
from argparse import ArgumentParser
from fnmatch import fnmatch
def is_excluded(path, pattern):
if pattern:
while path != '':
prefix, tail = os.path.split(path)
if fnmatch(tail, pattern):
return True
else:
path = prefix
return False
else:
return False
def main(args):
ap = ArgumentParser(description='Summarize disk usage of the set of FILEs, recursively for directories.')
ap.add_argument('-s', '--summarize', action='store_true', help='display only a total for each argument')
ap.add_argument('--exclude', dest='exclude_pattern', metavar='PATTERN', help='exclude files that match PATTERN')
ap.add_argument('FILEs', nargs='*', default=['.'], help='files to summarize (default to current working directory')
ns = ap.parse_args(args)
exclude_pattern = ns.exclude_pattern if ns.exclude_pattern else None
sizeof_fmt = globals()['_stash'].libcore.sizeof_fmt
for path in ns.FILEs:
path_base = os.path.dirname(path)
# Use relative path because of the following facts:
# du A/B --exclude="B" -> no output
# du A/B --exclude="A" -> normal output
if is_excluded(os.path.relpath(path, path_base), exclude_pattern):
continue
if os.path.isdir(path):
dirs_dict = {}
# We need to walk the tree from the bottom up so that a directory can have easy
# access to the size of its subdirectories.
for root, dirs, files in os.walk(path, topdown=False):
# This is to make sure the directory is not exclude from its ancestor
if is_excluded(os.path.relpath(root, path_base), exclude_pattern):
continue
# Loop through every non directory file in this directory and sum their sizes
size = sum(
os.path.getsize(os.path.join(root,
name)) for name in files if not is_excluded(name,
exclude_pattern)
)
# Look at all of the subdirectories and add up their sizes from the `dirs_dict`
subdir_size = sum(dirs_dict[os.path.join(root, d)] for d in dirs if not is_excluded(d, exclude_pattern))
# store the size of this directory (plus subdirectories) in a dict so we
# can access it later
my_size = dirs_dict[root] = size + subdir_size
if ns.summarize and root != path:
continue
print('%-8s %s' % (sizeof_fmt(my_size), root))
else:
print('%-8s %s' % (sizeof_fmt(os.path.getsize(path)), path))
if __name__ == '__main__':
main(sys.argv[1:])
|
from ... import event
from . import Widget
class Label(Widget):
""" Widget to show text/html.
The ``node`` of this widget is a
`<div> <https://developer.mozilla.org/docs/Web/HTML/Element/div>`_ with
CSS ``word-wrap`` and ``white-space`` set appropriately.
"""
DEFAULT_MIN_SIZE = 10, 24
CSS = """
.flx-Label {
border: 0px solid #454;
user-select: text;
-moz-user-select: text;
-webkit-user-select: text;
-ms-user-select: text;
}"""
text = event.StringProp('', doc="""
The text shown in the label (HTML is shown verbatim).
""")
html = event.StringProp('', doc="""
The html shown in the label.
Warning: there is a risk of introducing openings for XSS attacks
when html is introduced that you do not control (e.g. from user input).
""")
wrap = event.IntProp(0, settable=True, doc="""
Whether the content is allowed to be wrapped on multiple
lines. Set to 0/False for no wrap (default), 1/True for word-wrap,
2 for character wrap.
""")
def init(self):
if self.text:
self.set_text(self.text)
elif self.html:
self.set_html(self.html)
@event.action
def set_text(self, text):
""" Setter for the text property.
"""
if not self.node:
self._mutate_text(text)
return
self.node.textContent = text
self._mutate_text(self.node.textContent)
self._mutate_html(self.node.innerHTML)
@event.action
def set_html(self, html):
""" Setter for the html property. Use with care.
"""
if not self.node:
self._mutate_html(html)
return
self.node.innerHTML = html
self._mutate_text(self.node.textContent)
self._mutate_html(self.node.innerHTML)
@event.reaction('wrap')
def _wrap_changed(self, *events):
wrap = self.wrap
if wrap < 0 or wrap > 2:
wrap = 0
self.node.style['word-wrap'] = ['normal', 'normal', 'break-word'][wrap]
self.node.style['white-space'] = ['nowrap', 'normal', 'normal'][wrap]
self.check_real_size(True)
|
from __future__ import division
import numpy as np
import chainer
from chainer.backends import cuda
import chainer.functions as F
from chainercv.links.model.faster_rcnn.utils.loc2bbox import loc2bbox
from chainercv.transforms.image.resize import resize
from chainercv.utils.bbox.non_maximum_suppression import \
non_maximum_suppression
class LightHeadRCNN(chainer.Chain):
"""Base class for Light-Head R-CNN.
This is a base class for Light-Head R-CNN links supporting object detection
API [#]_. The following three stages constitute Light-Head R-CNN.
1. **Feature extraction**: Images are taken and their \
feature maps are calculated.
2. **Region Proposal Networks**: Given the feature maps calculated in \
the previous stage, produce set of RoIs around objects.
3. **Localization and Classification Heads**: Using feature maps that \
belong to the proposed RoIs, classify the categories of the objects \
in the RoIs and improve localizations.
Each stage is carried out by one of the callable
:class:`chainer.Chain` objects :obj:`feature`, :obj:`rpn` and :obj:`head`.
There are two functions :meth:`predict` and :meth:`__call__` to conduct
object detection.
:meth:`predict` takes images and returns bounding boxes that are converted
to image coordinates. This will be useful for a scenario when
Light-Head R-CNN is treated as a black box function, for instance.
:meth:`__call__` is provided for a scnerario when intermediate outputs
are needed, for instance, for training and debugging.
Links that support obejct detection API have method :meth:`predict` with
the same interface. Please refer to :meth:`predict` for
further details.
.. [#] Zeming Li, Chao Peng, Gang Yu, Xiangyu Zhang, Yangdong Deng, \
Jian Sun. Light-Head R-CNN: In Defense of Two-Stage Object Detector. \
arXiv preprint arXiv:1711.07264.
Args:
extractor (callable Chain): A callable that takes a BCHW image
array and returns feature maps.
rpn (callable Chain): A callable that has the same interface as
:class:`~chainercv.links.model.faster_rcnn.RegionProposalNetwork`.
Please refer to the documentation found there.
head (callable Chain): A callable that takes
a BCHW array, RoIs and batch indices for RoIs. This returns class
dependent localization paramters and class scores.
mean (numpy.ndarray): A value to be subtracted from an image
in :meth:`prepare`.
min_size (int): A preprocessing paramter for :meth:`prepare`. Please
refer to a docstring found for :meth:`prepare`.
max_size (int): A preprocessing paramter for :meth:`prepare`.
loc_normalize_mean (tuple of four floats): Mean values of
localization estimates.
loc_normalize_std (tupler of four floats): Standard deviation
of localization estimates.
"""
def __init__(
self, extractor, rpn, head, mean,
min_size, max_size, loc_normalize_mean, loc_normalize_std,
):
super(LightHeadRCNN, self).__init__()
with self.init_scope():
self.extractor = extractor
self.rpn = rpn
self.head = head
self.mean = mean
self.min_size = min_size
self.max_size = max_size
self.loc_normalize_mean = loc_normalize_mean
self.loc_normalize_std = loc_normalize_std
self.use_preset('visualize')
@property
def n_class(self):
# Total number of classes including the background.
return self.head.n_class
def __call__(self, x, scales=None):
"""Forward Light Head R-CNN.
Scaling paramter :obj:`scales` is used by RPN to determine the
threshold to select small objects, which are going to be
rejected irrespective of their confidence scores.
Here are notations used.
* :math:`N` is the number of batch size
* :math:`R'` is the total number of RoIs produced across batches. \
Given :math:`R_i` proposed RoIs from the :math:`i` th image, \
:math:`R' = \\sum _{i=1} ^ N R_i`.
* :math:`L` is the number of classes excluding the background.
Classes are ordered by the background, the first class, ..., and
the :math:`L` th class.
Args:
x (~chainer.Variable): 4D image variable.
scales (tuple of float): Amount of scaling applied to the raw
image during preprocessing.
Returns:
Variable, Variable, array, array:
Returns tuple of four values listed below.
* **roi_cls_locs**: Offsets and scalings for the proposed RoIs. \
Its shape is :math:`(R', (L + 1) \\times 4)`.
* **roi_scores**: Class predictions for the proposed RoIs. \
Its shape is :math:`(R', L + 1)`.
* **rois**: RoIs proposed by RPN. Its shape is \
:math:`(R', 4)`.
* **roi_indices**: Batch indices of RoIs. Its shape is \
:math:`(R',)`.
"""
img_size = x.shape[2:]
rpn_features, roi_features = self.extractor(x)
_, _, rois, roi_indices, _ = self.rpn(
rpn_features, img_size, scales)
roi_cls_locs, roi_scores = self.head(
roi_features, rois, roi_indices)
return roi_cls_locs, roi_scores, rois, roi_indices
def use_preset(self, preset):
"""Use the given preset during prediction.
This method changes values of :obj:`self.nms_thresh` and
:obj:`self.score_thresh`. These values are a threshold value
used for non maximum suppression and a threshold value
to discard low confidence proposals in :meth:`predict`,
respectively.
If the attributes need to be changed to something
other than the values provided in the presets, please modify
them by directly accessing the public attributes.
Args:
preset ({'visualize', 'evaluate'): A string to determine the
preset to use.
"""
if preset == 'visualize':
self.nms_thresh = 0.5
self.score_thresh = 0.7
elif preset == 'evaluate':
self.nms_thresh = 0.5
self.score_thresh = 0.001
else:
raise ValueError('preset must be visualize or evaluate')
def prepare(self, img):
"""Preprocess an image for feature extraction.
The length of the shorter edge is scaled to :obj:`self.min_size`.
After the scaling, if the length of the longer edge is longer than
:obj:`self.max_size`, the image is scaled to fit the longer edge
to :obj:`self.max_size`.
After resizing the image, the image is subtracted by a mean image value
:obj:`self.mean`.
Args:
img (~numpy.ndarray): An image. This is in CHW and RGB format.
The range of its value is :math:`[0, 255]`.
Returns:
~numpy.ndarray:
A preprocessed image.
"""
_, H, W = img.shape
scale = 1.
scale = self.min_size / min(H, W)
if scale * max(H, W) > self.max_size:
scale = self.max_size / max(H, W)
img = resize(img, (int(H * scale), int(W * scale)))
img = (img - self.mean).astype(np.float32, copy=False)
return img
def _suppress(self, raw_cls_bbox, raw_prob):
bbox = []
label = []
prob = []
# skip cls_id = 0 because it is the background class
for l in range(1, self.n_class):
cls_bbox_l = raw_cls_bbox.reshape((-1, self.n_class, 4))[:, l, :]
prob_l = raw_prob[:, l]
mask = prob_l > self.score_thresh
cls_bbox_l = cls_bbox_l[mask]
prob_l = prob_l[mask]
keep = non_maximum_suppression(
cls_bbox_l, self.nms_thresh, prob_l)
bbox.append(cls_bbox_l[keep])
# The labels are in [0, self.n_class - 2].
label.append((l - 1) * np.ones((len(keep),)))
prob.append(prob_l[keep])
bbox = np.concatenate(bbox, axis=0).astype(np.float32)
label = np.concatenate(label, axis=0).astype(np.int32)
prob = np.concatenate(prob, axis=0).astype(np.float32)
return bbox, label, prob
def predict(self, imgs):
"""Detect objects from images.
This method predicts objects for each image.
Args:
imgs (iterable of numpy.ndarray): Arrays holding images.
All images are in CHW and RGB format
and the range of their value is :math:`[0, 255]`.
Returns:
tuple of lists:
This method returns a tuple of three lists,
:obj:`(bboxes, labels, scores)`.
* **bboxes**: A list of float arrays of shape :math:`(R, 4)`, \
where :math:`R` is the number of bounding boxes in a image. \
Each bouding box is organized by \
:math:`(y_{min}, x_{min}, y_{max}, x_{max})` \
in the second axis.
* **labels** : A list of integer arrays of shape :math:`(R,)`. \
Each value indicates the class of the bounding box. \
Values are in range :math:`[0, L - 1]`, where :math:`L` is the \
number of the foreground classes.
* **scores** : A list of float arrays of shape :math:`(R,)`. \
Each value indicates how confident the prediction is.
"""
prepared_imgs = []
sizes = []
for img in imgs:
size = img.shape[1:]
img = self.prepare(img.astype(np.float32))
prepared_imgs.append(img)
sizes.append(size)
bboxes = []
labels = []
scores = []
for img, size in zip(prepared_imgs, sizes):
with chainer.using_config('train', False), \
chainer.function.no_backprop_mode():
img_var = chainer.Variable(self.xp.asarray(img[None]))
scale = img_var.shape[3] / size[1]
roi_cls_locs, roi_scores, rois, _ = self.__call__(
img_var, [scale])
# We are assuming that batch size is 1.
roi_cls_loc = roi_cls_locs.array
roi_score = roi_scores.array
roi = rois / scale
# Convert predictions to bounding boxes in image coordinates.
# Bounding boxes are scaled to the scale of the input images.
mean = self.xp.tile(self.xp.asarray(self.loc_normalize_mean),
self.n_class)
std = self.xp.tile(self.xp.asarray(self.loc_normalize_std),
self.n_class)
roi_cls_loc = (roi_cls_loc * std + mean).astype(np.float32)
roi_cls_loc = roi_cls_loc.reshape((-1, self.n_class, 4))
roi = self.xp.broadcast_to(roi[:, None], roi_cls_loc.shape)
cls_bbox = loc2bbox(roi.reshape((-1, 4)),
roi_cls_loc.reshape((-1, 4)))
cls_bbox = cls_bbox.reshape((-1, self.n_class * 4))
# clip bounding box
cls_bbox[:, 0::2] = self.xp.clip(cls_bbox[:, 0::2], 0, size[0])
cls_bbox[:, 1::2] = self.xp.clip(cls_bbox[:, 1::2], 0, size[1])
prob = F.softmax(roi_score).array
raw_cls_bbox = cuda.to_cpu(cls_bbox)
raw_prob = cuda.to_cpu(prob)
bbox, label, prob = self._suppress(raw_cls_bbox, raw_prob)
indices = np.argsort(prob)[::-1]
bbox = bbox[indices]
label = label[indices]
prob = prob[indices]
bboxes.append(bbox)
labels.append(label)
scores.append(prob)
return bboxes, labels, scores
|
from textwrap import dedent
def assert_line_in_text(line, text):
assert line in text.splitlines(), dedent('''\
Line not found in text
Line:
%s
Text:
---
%s---''') % (repr(line), text)
def assert_equals_with_unidiff(expected, actual):
def unidiff(expected, actual):
import difflib
expected = expected.splitlines(1)
actual = actual.splitlines(1)
diff = difflib.unified_diff(expected, actual,
fromfile='Expected', tofile='Actual',
lineterm='\n', n=10)
return ''.join(diff)
assert expected == actual, ("\n"
"Expected:%s\n" % repr(expected) +
" Actual:%s\n" % repr(actual) +
unidiff(expected, actual))
|
from contextlib import contextmanager
class ShNullResponder(object):
def handle(self, *args, **kwargs):
pass
def __call__(self, *args, **kwargs):
pass
def __getattribute__(self, item):
return object.__getattribute__(self, 'handle')
def __getitem__(self, item):
return object.__getattribute__(self, 'handle')
NULL_RESPONDER = ShNullResponder()
# noinspection PyAttributeOutsideInit,PyDocstring
class ShUserActionProxy(object):
"""
This proxy object provides a central place to register handlers for
any user actions trigger from the UI including typing, touching,
tap, etc. A centralized object makes it easier to substitute default
handlers by user-defined functions in command script, e.g. ssh.
:param StaSh stash:
"""
def __init__(self, stash):
self.stash = stash
self.reset()
# TextView delegate
class _TVDelegate(object):
@staticmethod
def textview_did_begin_editing(sender):
self.tv_responder.textview_did_begin_editing(sender)
@staticmethod
def textview_did_end_editing(sender):
self.tv_responder.textview_did_end_editing(sender)
@staticmethod
def textview_should_change(sender, rng, replacement):
return self.tv_responder.textview_should_change(sender, rng, replacement)
@staticmethod
def textview_did_change(sender):
self.tv_responder.textview_did_change(sender)
@staticmethod
def textview_did_change_selection(sender):
self.tv_responder.textview_did_change_selection(sender)
# Virtual key row swipe gesture
class _SVDelegate(object):
@staticmethod
def scrollview_did_scroll(sender):
if self.sv_responder:
self.sv_responder.scrollview_did_scroll(sender)
else:
sender.superview.scrollview_did_scroll(sender)
self.tv_delegate = _TVDelegate()
self.sv_delegate = _SVDelegate()
# The properties are used for late binding as the various components
# may not be ready when this class is initialized
@property
def vk_responder(self):
return self._vk_responder or self.stash.ui.vk_tapped
@vk_responder.setter
def vk_responder(self, value):
self._vk_responder = value
@property
def tv_responder(self):
return self._tv_responder or self.stash.terminal.tv_delegate
@tv_responder.setter
def tv_responder(self, value):
self._tv_responder = value
@property
def kc_responder(self):
return self._kc_responder or self.stash.terminal.kc_pressed
@kc_responder.setter
def kc_responder(self, value):
self._kc_responder = value
@contextmanager
def config(self, vk_responder=False, tv_responder=False, sv_responder=False, kc_responder=False):
try:
self._vk_responder = NULL_RESPONDER if vk_responder is False else vk_responder
self._tv_responder = NULL_RESPONDER if tv_responder is False else tv_responder
self.sv_responder = NULL_RESPONDER if sv_responder is False else sv_responder
self.kc_responder = NULL_RESPONDER if kc_responder is False else kc_responder
yield
finally:
self.reset()
def reset(self):
self._vk_responder = None
self._tv_responder = None
self.sv_responder = None
self._kc_responder = None # for keyCommands
# --------------------- Proxy ---------------------
# Buttons
def vk_tapped(self, sender):
self.vk_responder(sender)
# Keyboard shortcuts
def kc_pressed(self, key, modifierFlags):
self.kc_responder(key, modifierFlags)
|
import argparse
import contextlib
import os
import signal
import socket
import sys
from radicale import VERSION, config, log, server, storage
from radicale.log import logger
def run():
"""Run Radicale as a standalone server."""
exit_signal_numbers = [signal.SIGTERM, signal.SIGINT]
if os.name == "posix":
exit_signal_numbers.append(signal.SIGHUP)
exit_signal_numbers.append(signal.SIGQUIT)
elif os.name == "nt":
exit_signal_numbers.append(signal.SIGBREAK)
# Raise SystemExit when signal arrives to run cleanup code
# (like destructors, try-finish etc.), otherwise the process exits
# without running any of them
def exit_signal_handler(signal_number, stack_frame):
sys.exit(1)
for signal_number in exit_signal_numbers:
signal.signal(signal_number, exit_signal_handler)
log.setup()
# Get command-line arguments
parser = argparse.ArgumentParser(
prog="radicale", usage="%(prog)s [OPTIONS]", allow_abbrev=False)
parser.add_argument("--version", action="version", version=VERSION)
parser.add_argument("--verify-storage", action="store_true",
help="check the storage for errors and exit")
parser.add_argument(
"-C", "--config", help="use specific configuration files", nargs="*")
parser.add_argument("-D", "--debug", action="store_true",
help="print debug information")
groups = {}
for section, values in config.DEFAULT_CONFIG_SCHEMA.items():
if section.startswith("_"):
continue
group = parser.add_argument_group(section)
groups[group] = []
for option, data in values.items():
if option.startswith("_"):
continue
kwargs = data.copy()
long_name = "--%s-%s" % (section, option.replace("_", "-"))
args = list(kwargs.pop("aliases", ()))
args.append(long_name)
kwargs["dest"] = "%s_%s" % (section, option)
groups[group].append(kwargs["dest"])
del kwargs["value"]
with contextlib.suppress(KeyError):
del kwargs["internal"]
if kwargs["type"] == bool:
del kwargs["type"]
kwargs["action"] = "store_const"
kwargs["const"] = "True"
opposite_args = kwargs.pop("opposite", [])
opposite_args.append("--no%s" % long_name[1:])
group.add_argument(*args, **kwargs)
kwargs["const"] = "False"
kwargs["help"] = "do not %s (opposite of %s)" % (
kwargs["help"], long_name)
group.add_argument(*opposite_args, **kwargs)
else:
del kwargs["type"]
group.add_argument(*args, **kwargs)
args = parser.parse_args()
# Preliminary configure logging
if args.debug:
args.logging_level = "debug"
with contextlib.suppress(ValueError):
log.set_level(config.DEFAULT_CONFIG_SCHEMA["logging"]["level"]["type"](
args.logging_level))
# Update Radicale configuration according to arguments
arguments_config = {}
for group, actions in groups.items():
section = group.title
section_config = {}
for action in actions:
value = getattr(args, action)
if value is not None:
section_config[action.split('_', 1)[1]] = value
if section_config:
arguments_config[section] = section_config
try:
configuration = config.load(config.parse_compound_paths(
config.DEFAULT_CONFIG_PATH,
os.environ.get("RADICALE_CONFIG"),
os.pathsep.join(args.config) if args.config else None))
if arguments_config:
configuration.update(arguments_config, "arguments")
except Exception as e:
logger.fatal("Invalid configuration: %s", e, exc_info=True)
sys.exit(1)
# Configure logging
log.set_level(configuration.get("logging", "level"))
# Log configuration after logger is configured
for source, miss in configuration.sources():
logger.info("%s %s", "Skipped missing" if miss else "Loaded", source)
if args.verify_storage:
logger.info("Verifying storage")
try:
storage_ = storage.load(configuration)
with storage_.acquire_lock("r"):
if not storage_.verify():
logger.fatal("Storage verifcation failed")
sys.exit(1)
except Exception as e:
logger.fatal("An exception occurred during storage verification: "
"%s", e, exc_info=True)
sys.exit(1)
return
# Create a socket pair to notify the server of program shutdown
shutdown_socket, shutdown_socket_out = socket.socketpair()
# Shutdown server when signal arrives
def shutdown_signal_handler(signal_number, stack_frame):
shutdown_socket.close()
for signal_number in exit_signal_numbers:
signal.signal(signal_number, shutdown_signal_handler)
try:
server.serve(configuration, shutdown_socket_out)
except Exception as e:
logger.fatal("An exception occurred during server startup: %s", e,
exc_info=True)
sys.exit(1)
if __name__ == "__main__":
run()
|
import importlib
import os
import pkgutil
from homeassistant.config import _identify_config_schema
from homeassistant.scripts.check_config import color
def explore_module(package):
"""Explore the modules."""
module = importlib.import_module(package)
if not hasattr(module, "__path__"):
return []
for _, name, _ in pkgutil.iter_modules(module.__path__, f"{package}."):
yield name
def main():
"""Run the script."""
if not os.path.isfile("requirements_all.txt"):
print("Run this from HA root dir")
return
msg = {}
def add_msg(key, item):
"""Add a message."""
if key not in msg:
msg[key] = []
msg[key].append(item)
for package in explore_module("homeassistant.components"):
module = importlib.import_module(package)
module_name = getattr(module, "DOMAIN", module.__name__)
if hasattr(module, "PLATFORM_SCHEMA"):
if hasattr(module, "CONFIG_SCHEMA"):
add_msg(
"WARNING",
f"Module {module_name} contains PLATFORM and CONFIG schemas",
)
add_msg("PLATFORM SCHEMA", module_name)
continue
if not hasattr(module, "CONFIG_SCHEMA"):
add_msg("NO SCHEMA", module_name)
continue
schema_type, schema = _identify_config_schema(module)
add_msg(
f"CONFIG_SCHEMA {schema_type}",
f"{module_name} {color('cyan', str(schema)[:60])}",
)
for key in sorted(msg):
print("\n{}\n - {}".format(key, "\n - ".join(msg[key])))
if __name__ == "__main__":
main()
|
import itertools
import logging
import posixpath
from absl import flags
from perfkitbenchmarker import configs
from perfkitbenchmarker import errors
from perfkitbenchmarker import flag_util
from perfkitbenchmarker import sample
from perfkitbenchmarker import units
from perfkitbenchmarker.linux_packages import multichase
_CHASES_WITHOUT_ARGS = (
'simple', 'incr', 't0', 't1', 't2', 'nta', 'movdqa', 'movntdqa',
'parallel2', 'parallel3', 'parallel4', 'parallel5', 'parallel6',
'parallel7', 'parallel8', 'parallel9', 'parallel10')
_CHASES_WITH_ARGS = 'critword', 'critword2', 'work'
# Dict mapping possible chase types accepted by the multichase -c flag to a
# boolean indicating whether the chase requires an integer argument. For
# example, the 'simple' chase type does not require an argument and is specified
# as `multichase -c simple`, but the 'work' chase type requires an argument and
# is specified as `multichase -c work:N`.
_CHASES = {c: c in _CHASES_WITH_ARGS
for c in _CHASES_WITHOUT_ARGS + _CHASES_WITH_ARGS}
_BENCHMARK_SPECIFIC_VM_STATE_ATTR = 'multichase_vm_state'
_NOT_ENOUGH_CPUS = 'error: more threads than cpus available'
BENCHMARK_NAME = 'multichase'
BENCHMARK_CONFIG = """
multichase:
description: >
Run a benchmark from the multichase benchmark suite.
vm_groups:
default:
vm_spec: *default_single_core
"""
FLAGS = flags.FLAGS
class _MemorySizeParser(flag_util.UnitsParser):
syntactic_help = (
"An explicit memory size that must be convertible to an integer number "
"of bytes (e.g. '7.5 MiB') or a percentage of the total memory rounded "
"down to the next integer byte (e.g. '97.5%', which translates to "
"1046898278 bytes if a total of 1 GiB memory is available).")
def __init__(self):
super(_MemorySizeParser, self).__init__(convertible_to=(units.byte,
units.percent))
def parse(self, inp):
"""Parse the input.
Args:
inp: string or units.Quantity.
Returns:
A units.Quantity.
Raises:
ValueError: If the input cannot be parsed, or if it parses to a value that
does not meet the requirements described in self.syntactic_help.
"""
size = super(_MemorySizeParser, self).parse(inp)
if size.units != units.percent:
size_byte_count = size.to(units.byte).magnitude
if size_byte_count != int(size_byte_count):
raise ValueError(
'Expression {0!r} parses to memory size {1!r}, which is not '
'convertible to an integer number of bytes.'.format(inp, str(size)))
return size
_MEMORY_SIZE_PARSER = _MemorySizeParser()
_UNITS_SERIALIZER = flag_util.UnitsSerializer()
_DEFAULT_MEMORY_SIZE = units.Quantity('256 MiB')
_DEFAULT_STRIDE_SIZE = units.Quantity('256 bytes')
def _DefineMemorySizeFlag(name, default, help, flag_values=FLAGS, **kwargs):
flags.DEFINE(_MEMORY_SIZE_PARSER, name, default, help, flag_values,
_UNITS_SERIALIZER, **kwargs)
flags.DEFINE_enum(
'multichase_chase_type', 'simple', sorted(_CHASES),
'Chase type to use when executing multichase. Passed to multichase via its '
'-c flag.')
flags.DEFINE_integer(
'multichase_chase_arg', 1,
'Argument to refine the chase type specified with --multichase_chase_type. '
'Applicable for the following types: {0}.'.format(', '.join(
_CHASES_WITH_ARGS)))
flag_util.DEFINE_integerlist(
'multichase_thread_count', flag_util.IntegerList([1]),
'Number of threads (one per core), to use when executing multichase. '
'Passed to multichase via its -t flag.', module_name=__name__)
_DefineMemorySizeFlag(
'multichase_memory_size_min', _DEFAULT_MEMORY_SIZE,
'Memory size to use when executing multichase. Passed to multichase via '
'its -m flag. If it differs from multichase_memory_size_max, then '
'multichase is executed multiple times, starting with a memory size equal '
'to the min and doubling while the memory size does not exceed the max. '
'Can be specified as a percentage of the total memory on the machine.')
_DefineMemorySizeFlag(
'multichase_memory_size_max', _DEFAULT_MEMORY_SIZE,
'Memory size to use when executing multichase. Passed to multichase via '
'its -m flag. If it differs from multichase_memory_size_min, then '
'multichase is executed multiple times, starting with a memory size equal '
'to the min and doubling while the memory size does not exceed the max. '
'Can be specified as a percentage of the total memory on the machine.')
_DefineMemorySizeFlag(
'multichase_stride_size_min', _DEFAULT_STRIDE_SIZE,
'Stride size to use when executing multichase. Passed to multichase via '
'its -s flag. If it differs from multichase_stride_size_max, then '
'multichase is executed multiple times, starting with a stride size equal '
'to the min and doubling while the stride size does not exceed the max. '
'Can be specified as a percentage of the maximum memory (-m flag) of each '
'multichase execution.')
_DefineMemorySizeFlag(
'multichase_stride_size_max', _DEFAULT_STRIDE_SIZE,
'Stride size to use when executing multichase. Passed to multichase via '
'its -s flag. If it differs from multichase_stride_size_min, then '
'multichase is executed multiple times, starting with a stride size equal '
'to the min and doubling while the stride size does not exceed the max. '
'Can be specified as a percentage of the maximum memory (-m flag) of each '
'multichase execution.')
flags.DEFINE_string(
'multichase_numactl_options', None,
'If provided, numactl is used to control memory placement and process '
'CPU affinity. Examples: "--membind=0" or "--cpunodebind=0".')
flags.DEFINE_string(
'multichase_additional_flags', '',
"Additional flags to use when executing multichase. Example: '-O 16 -y'.")
def _TranslateMemorySize(get_total_memory, size):
"""Translates a value parsed from a memory size flag to a byte count.
Args:
get_total_memory: Function that accepts no arguments and returns an integer
specifying the total amount of memory available in bytes.
size: units.Quantity specifying either an explicit memory size in a unit
convertible to bytes or a percentage of the total memory.
Returns:
int expressing the specified memory size in bytes.
"""
if size.units == units.percent:
return int(get_total_memory() * size.magnitude / 100.)
return int(size.to(units.byte).magnitude)
def _IterMemorySizes(get_total_memory, min_size, max_size):
"""Iterates over a range of memory sizes determined by a min and max.
Args:
get_total_memory: Function that accepts no arguments and returns an integer
specifying the total amount of memory available.
min_size: units.Quantity specifying either an explicit memory size in a unit
convertible to bytes or a percentage of the total memory.
max_size: units.Quantity specifying either an explicit memory size in a unit
convertible to bytes or a percentage of the total memory.
Yields:
int expressing memory sizes in bytes. The first yielded value is the
specified minimum size, each subsequent yielded value is twice the previous,
and no yielded values are greater than the specified maximum size. If
max_size specifies a size that is less than min_size, no values are yielded.
"""
min_bytes = _TranslateMemorySize(get_total_memory, min_size)
max_bytes = _TranslateMemorySize(get_total_memory, max_size)
size = min_bytes
while size <= max_bytes:
yield size
size *= 2
def GetConfig(user_config):
return configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
def CheckPrerequisites(benchmark_config):
"""Performs input verification checks.
Raises:
ValueError: If an invalid combination of flag values is specified.
"""
chase_type = FLAGS.multichase_chase_type
if not _CHASES[chase_type] and FLAGS['multichase_chase_arg'].present:
raise ValueError(
'Cannot use --multichase_chase_arg with chase type {0!r}. Chase type '
'{0!r} does not require an argument.'.format(chase_type))
class _MultichaseSpecificState(object):
"""State specific to this benchmark that must be preserved between PKB stages.
An instance of this class is attached to the VM as an attribute and is
therefore preserved as part of the pickled BenchmarkSpec between PKB stages.
Attributes:
dir: Optional string. Path of a directory on the remote machine where files
files related to this benchmark are stored.
multichase_dir: Optional string. Path of a directory on the remote machine
where multichase files are stored. A subdirectory within dir.
"""
def __init__(self):
self.dir = None
self.multichase_dir = None
def Prepare(benchmark_spec):
"""Install multichase on the VM.
Args:
benchmark_spec: BenchmarkSpec.
"""
vm = benchmark_spec.vms[0]
vm_state = _MultichaseSpecificState()
setattr(vm, _BENCHMARK_SPECIFIC_VM_STATE_ATTR, vm_state)
vm.Install('multichase')
vm.Install('numactl')
remote_benchmark_dir = '_'.join(('pkb', FLAGS.run_uri, benchmark_spec.uid))
vm.RemoteCommand('mkdir ' + remote_benchmark_dir)
vm_state.dir = remote_benchmark_dir
vm_state.multichase_dir = posixpath.join(vm_state.dir, 'multichase')
vm.RemoteCommand('cp -ar {0} {1}'.format(
multichase.INSTALL_PATH, vm_state.multichase_dir))
def Run(benchmark_spec):
"""Run multichase on the VM.
Args:
benchmark_spec: BenchmarkSpec.
Returns:
A list of sample.Sample objects.
"""
samples = []
base_metadata = {'additional_flags': FLAGS.multichase_additional_flags,
'chase_type': FLAGS.multichase_chase_type,
'multichase_version': multichase.GIT_VERSION}
vm = benchmark_spec.vms[0]
vm_state = getattr(vm, _BENCHMARK_SPECIFIC_VM_STATE_ATTR)
max_thread_count = float('inf')
base_cmd = []
if FLAGS.multichase_numactl_options:
base_cmd.extend(('numactl', FLAGS.multichase_numactl_options))
base_metadata['numactl_options'] = FLAGS.multichase_numactl_options
multichase_path = posixpath.join(vm_state.multichase_dir, 'multichase')
base_cmd.extend((multichase_path, '-a', '-v'))
chase_type = FLAGS.multichase_chase_type
if _CHASES[chase_type]:
chase_type = '{0}:{1}'.format(chase_type, FLAGS.multichase_chase_arg)
base_metadata['chase_arg'] = FLAGS.multichase_chase_arg
base_cmd.extend(('-c', chase_type))
for thread_count in FLAGS.multichase_thread_count:
if thread_count > vm.NumCpusForBenchmark():
continue
memory_size_iterator = _IterMemorySizes(
lambda: vm.total_memory_kb * 1024, FLAGS.multichase_memory_size_min,
FLAGS.multichase_memory_size_max)
for memory_size in memory_size_iterator:
stride_size_iterator = _IterMemorySizes(
lambda: memory_size, FLAGS.multichase_stride_size_min,
FLAGS.multichase_stride_size_max)
for stride_size in stride_size_iterator:
if thread_count >= max_thread_count:
continue
cmd = ' '.join(str(s) for s in itertools.chain(base_cmd, (
'-m', memory_size, '-s', stride_size, '-t', thread_count,
FLAGS.multichase_additional_flags)))
stdout, stderr, retcode = vm.RemoteCommandWithReturnCode(
cmd, ignore_failure=True)
if retcode:
if _NOT_ENOUGH_CPUS in stderr:
logging.warning(
'Not enough CPUs to run %s threads. If you have more than that '
'number of CPUs on the system, it could be due to process '
'CPU affinity.', thread_count)
max_thread_count = min(max_thread_count, thread_count)
continue
else:
raise errors.VirtualMachine.RemoteCommandError(
'Multichase failed.\nSTDOUT: %s\nSTDERR: %s' % (stdout, stderr))
# Latency is printed in ns in the last line.
latency_ns = float(stdout.split()[-1])
# Generate one sample from one run of multichase.
metadata = base_metadata.copy()
metadata.update({'memory_size_bytes': memory_size,
'stride_size_bytes': stride_size,
'thread_count': thread_count})
samples.append(sample.Sample('latency', latency_ns, 'ns', metadata))
return samples
def Cleanup(benchmark_spec):
"""Remove multichase from the VM.
Args:
benchmark_spec: BenchmarkSpec.
"""
vm = benchmark_spec.vms[0]
vm_state = getattr(vm, _BENCHMARK_SPECIFIC_VM_STATE_ATTR)
if vm_state.dir:
# Directory has been created on the VM. Delete it.
vm.RemoteCommand('rm -rf ' + vm_state.dir)
|
import random
import string
import unittest
def gen_rand_name():
return str.join('', (random.choice(string.ascii_letters + string.digits) for _ in range(64)))
def gen_rand_id():
return str.join('', (random.choice(string.ascii_letters + string.digits + '-_')
for _ in range(22)))
def gen_rand_md5():
return str.join('', (random.choice(string.ascii_lowercase + string.digits) for _ in range(32)))
def gen_folder(folders: list=None):
folder = {
'createdBy': 'acd_cli_oa-<user>',
'createdDate': '2015-01-01T00:00:00.00Z',
'eTagResponse': 'AbCdEfGhI01',
'id': gen_rand_id(),
'isShared': False,
'kind': 'FOLDER',
'labels': [],
'modifiedDate': '2015-01-01T00:00:00.000Z',
'name': gen_rand_name(),
'parents': [],
'restricted': False,
'status': 'AVAILABLE' if not folders else random.choice(['TRASH', 'AVAILABLE']),
'version': random.randint(1, 20)
}
if not folders:
folder['name'] = None
folder['isRoot'] = True
elif len(folders) == 1:
folder['parents'] = [folders[0]['id']]
else:
folder['parents'] = [folders[random.randint(0, len(folders) - 1)]['id']]
return folder
def gen_file(folders: list):
file = {
'contentProperties': {'contentType': 'text/plain',
'extension': 'txt',
'md5': gen_rand_md5(),
'size': random.randint(0, 32 * 1024 ** 3),
'version': random.randint(1, 20)},
'createdBy': 'acd_cli_oa-<user>',
'createdDate': '2015-01-01T00:00:00.00Z',
'eTagResponse': 'AbCdEfGhI01',
'id': gen_rand_id(),
'isShared': False,
'kind': 'FILE',
'labels': [],
'modifiedDate': '2015-01-01T00:00:00.000Z',
'name': gen_rand_name(),
'parents': [folders[random.randint(0, len(folders) - 1)]['id']],
'restricted': False,
'status': random.choice(['AVAILABLE', 'TRASH']),
'version': random.randint(1, 20)
}
return file
def gen_bunch_of_nodes(count: int):
folders = []
files = []
for _ in range(int(count / 2)):
folders.append(gen_folder(folders))
for _ in range(int(count / 2)):
files.append(gen_file(folders))
return folders, files
class HelperTestCase(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testCreateRootFolder(self):
folder = gen_folder()
self.assertIn('isRoot', folder)
self.assertListEqual(folder['parents'], [])
def testCreateNonRootFolder(self):
root = gen_folder()
folder = gen_folder([root])
self.assertNotIn('isRoot', folder)
self.assertListEqual(folder['parents'], [root['id']])
def testMultiFolders(self):
folders = []
for _ in range(100):
folders.append(gen_folder(folders))
self.assertEqual(1, sum(f.get('isRoot', 0) for f in folders))
self.assertEqual(99, sum(len(f['parents']) for f in folders))
|
import math
import numpy as np
import torch
from tensornetwork.backends.pytorch import decompositions
def test_expected_shapes():
val = torch.zeros((2, 3, 4, 5))
u, s, vh, _ = decompositions.svd(torch, val, 2)
assert u.shape == (2, 3, 6)
assert s.shape == (6,)
np.testing.assert_allclose(s, np.zeros(6))
assert vh.shape == (6, 4, 5)
def test_expected_shapes_qr():
val = torch.zeros((2, 3, 4, 5))
for non_negative_diagonal in [True, False]:
q, r = decompositions.qr(torch, val, 2, non_negative_diagonal)
assert q.shape == (2, 3, 6)
assert r.shape == (6, 4, 5)
def test_expected_shapes_rq():
val = torch.zeros((2, 3, 4, 5))
for non_negative_diagonal in [True, False]:
r, q = decompositions.rq(torch, val, 2, non_negative_diagonal)
assert r.shape == (2, 3, 6)
assert q.shape == (6, 4, 5)
def test_rq():
random_matrix = torch.rand([10, 10], dtype=torch.float64)
for non_negative_diagonal in [True, False]:
r, q = decompositions.rq(torch, random_matrix, 1, non_negative_diagonal)
np.testing.assert_allclose(r.mm(q), random_matrix)
def test_qr():
random_matrix = torch.rand([10, 10], dtype=torch.float64)
for non_negative_diagonal in [True, False]:
q, r = decompositions.rq(torch, random_matrix, 1, non_negative_diagonal)
np.testing.assert_allclose(q.mm(r), random_matrix)
def test_max_singular_values():
np.random.seed(2018)
random_matrix = np.random.rand(10, 10)
unitary1, _, unitary2 = np.linalg.svd(random_matrix)
singular_values = np.array(range(10))
val = unitary1.dot(np.diag(singular_values).dot(unitary2.T))
u, s, vh, trun = decompositions.svd(
torch, torch.tensor(val), 1, max_singular_values=7)
assert u.shape == (10, 7)
assert s.shape == (7,)
np.testing.assert_array_almost_equal(s, np.arange(9, 2, -1))
assert vh.shape == (7, 10)
np.testing.assert_array_almost_equal(trun, np.arange(2, -1, -1))
def test_max_truncation_error():
np.random.seed(2019)
random_matrix = np.random.rand(10, 10)
unitary1, _, unitary2 = np.linalg.svd(random_matrix)
singular_values = np.array(range(10))
val = unitary1.dot(np.diag(singular_values).dot(unitary2.T))
u, s, vh, trun = decompositions.svd(
torch, torch.Tensor(val), 1, max_truncation_error=math.sqrt(5.1))
assert u.shape == (10, 7)
assert s.shape == (7,)
np.testing.assert_array_almost_equal(s, np.arange(9, 2, -1), decimal=5)
assert vh.shape == (7, 10)
np.testing.assert_array_almost_equal(trun, np.arange(2, -1, -1))
def test_max_truncation_error_relative():
absolute = np.diag([2.0, 1.0, 0.2, 0.1])
relative = np.diag([2.0, 1.0, 0.2, 0.1])
max_truncation_err = 0.2
_, _, _, trunc_sv_absolute = decompositions.svd(
torch,
torch.Tensor(absolute),
1,
max_truncation_error=max_truncation_err,
relative=False)
_, _, _, trunc_sv_relative = decompositions.svd(
torch,
torch.Tensor(relative),
1,
max_truncation_error=max_truncation_err,
relative=True)
np.testing.assert_almost_equal(trunc_sv_absolute, [0.1])
np.testing.assert_almost_equal(trunc_sv_relative, [0.2, 0.1])
|
from asyncio import TimeoutError as AsyncIOTimeoutError
from contextlib import nullcontext
from datetime import timedelta
from typing import Any, Dict, Optional
from homeassistant import core
from homeassistant.components.bond.const import DOMAIN as BOND_DOMAIN
from homeassistant.const import CONF_ACCESS_TOKEN, CONF_HOST, STATE_UNAVAILABLE
from homeassistant.setup import async_setup_component
from homeassistant.util import utcnow
from tests.async_mock import patch
from tests.common import MockConfigEntry, async_fire_time_changed
def patch_setup_entry(domain: str, *, enabled: bool = True):
"""Patch async_setup_entry for specified domain."""
if not enabled:
return nullcontext()
return patch(f"homeassistant.components.bond.{domain}.async_setup_entry")
async def setup_bond_entity(
hass: core.HomeAssistant,
config_entry: MockConfigEntry,
*,
patch_version=False,
patch_device_ids=False,
patch_platforms=False,
):
"""Set up Bond entity."""
config_entry.add_to_hass(hass)
with patch_bond_version(enabled=patch_version), patch_bond_device_ids(
enabled=patch_device_ids
), patch_setup_entry("cover", enabled=patch_platforms), patch_setup_entry(
"fan", enabled=patch_platforms
), patch_setup_entry(
"light", enabled=patch_platforms
), patch_setup_entry(
"switch", enabled=patch_platforms
):
return await hass.config_entries.async_setup(config_entry.entry_id)
async def setup_platform(
hass: core.HomeAssistant,
platform: str,
discovered_device: Dict[str, Any],
*,
bond_device_id: str = "bond-device-id",
bond_version: Dict[str, Any] = None,
props: Dict[str, Any] = None,
state: Dict[str, Any] = None,
):
"""Set up the specified Bond platform."""
mock_entry = MockConfigEntry(
domain=BOND_DOMAIN,
data={CONF_HOST: "some host", CONF_ACCESS_TOKEN: "test-token"},
)
mock_entry.add_to_hass(hass)
with patch("homeassistant.components.bond.PLATFORMS", [platform]):
with patch_bond_version(return_value=bond_version), patch_bond_device_ids(
return_value=[bond_device_id]
), patch_bond_device(
return_value=discovered_device
), patch_bond_device_properties(
return_value=props
), patch_bond_device_state(
return_value=state
):
assert await async_setup_component(hass, BOND_DOMAIN, {})
await hass.async_block_till_done()
return mock_entry
def patch_bond_version(
enabled: bool = True, return_value: Optional[dict] = None, side_effect=None
):
"""Patch Bond API version endpoint."""
if not enabled:
return nullcontext()
if return_value is None:
return_value = {"bondid": "test-bond-id"}
return patch(
"homeassistant.components.bond.Bond.version",
return_value=return_value,
side_effect=side_effect,
)
def patch_bond_device_ids(enabled: bool = True, return_value=None, side_effect=None):
"""Patch Bond API devices endpoint."""
if not enabled:
return nullcontext()
if return_value is None:
return_value = []
return patch(
"homeassistant.components.bond.Bond.devices",
return_value=return_value,
side_effect=side_effect,
)
def patch_bond_device(return_value=None):
"""Patch Bond API device endpoint."""
return patch(
"homeassistant.components.bond.Bond.device",
return_value=return_value,
)
def patch_bond_action():
"""Patch Bond API action endpoint."""
return patch("homeassistant.components.bond.Bond.action")
def patch_bond_device_properties(return_value=None):
"""Patch Bond API device properties endpoint."""
if return_value is None:
return_value = {}
return patch(
"homeassistant.components.bond.Bond.device_properties",
return_value=return_value,
)
def patch_bond_device_state(return_value=None, side_effect=None):
"""Patch Bond API device state endpoint."""
if return_value is None:
return_value = {}
return patch(
"homeassistant.components.bond.Bond.device_state",
return_value=return_value,
side_effect=side_effect,
)
async def help_test_entity_available(
hass: core.HomeAssistant, domain: str, device: Dict[str, Any], entity_id: str
):
"""Run common test to verify available property."""
await setup_platform(hass, domain, device)
assert hass.states.get(entity_id).state != STATE_UNAVAILABLE
with patch_bond_device_state(side_effect=AsyncIOTimeoutError()):
async_fire_time_changed(hass, utcnow() + timedelta(seconds=30))
await hass.async_block_till_done()
assert hass.states.get(entity_id).state == STATE_UNAVAILABLE
with patch_bond_device_state(return_value={}):
async_fire_time_changed(hass, utcnow() + timedelta(seconds=30))
await hass.async_block_till_done()
assert hass.states.get(entity_id).state != STATE_UNAVAILABLE
|
import pytest
from homeassistant.const import DATA_GIGABYTES
from homeassistant.setup import async_setup_component
from tests.async_mock import patch
def mocked_exception(*args, **kwargs):
"""Mock exception thrown by requests.get."""
raise OSError
def mocked_requests_get(*args, **kwargs):
"""Mock requests.get invocations."""
class MockResponse:
"""Class to represent a mocked response."""
def __init__(self, json_data, status_code):
"""Initialize the mock response class."""
self.json_data = json_data
self.status_code = status_code
def json(self):
"""Return the json of the response."""
return self.json_data
url = str(args[0])
if "api/calendar" in url:
return MockResponse(
[
{
"title": "Resident Evil",
"sortTitle": "resident evil final chapter",
"sizeOnDisk": 0,
"status": "announced",
"overview": "Alice, Jill, Claire, Chris, Leon, Ada, and...",
"inCinemas": "2017-01-25T00:00:00Z",
"physicalRelease": "2017-01-27T00:00:00Z",
"images": [
{
"coverType": "poster",
"url": (
"/radarr/MediaCover/12/poster.jpg"
"?lastWrite=636208663600000000"
),
},
{
"coverType": "banner",
"url": (
"/radarr/MediaCover/12/banner.jpg"
"?lastWrite=636208663600000000"
),
},
],
"website": "",
"downloaded": "false",
"year": 2017,
"hasFile": "false",
"youTubeTrailerId": "B5yxr7lmxhg",
"studio": "Impact Pictures",
"path": "/path/to/Resident Evil The Final Chapter (2017)",
"profileId": 3,
"monitored": "false",
"runtime": 106,
"lastInfoSync": "2017-01-24T14:52:40.315434Z",
"cleanTitle": "residentevilfinalchapter",
"imdbId": "tt2592614",
"tmdbId": 173897,
"titleSlug": "resident-evil-the-final-chapter-2017",
"genres": ["Action", "Horror", "Science Fiction"],
"tags": [],
"added": "2017-01-24T14:52:39.989964Z",
"ratings": {"votes": 363, "value": 4.3},
"alternativeTitles": ["Resident Evil: Rising"],
"qualityProfileId": 3,
"id": 12,
}
],
200,
)
if "api/command" in url:
return MockResponse(
[
{
"name": "RescanMovie",
"startedOn": "0001-01-01T00:00:00Z",
"stateChangeTime": "2014-02-05T05:09:09.2366139Z",
"sendUpdatesToClient": "true",
"state": "pending",
"id": 24,
}
],
200,
)
if "api/movie" in url:
return MockResponse(
[
{
"title": "Assassin's Creed",
"sortTitle": "assassins creed",
"sizeOnDisk": 0,
"status": "released",
"overview": "Lynch discovers he is a descendant of...",
"inCinemas": "2016-12-21T00:00:00Z",
"images": [
{
"coverType": "poster",
"url": (
"/radarr/MediaCover/1/poster.jpg"
"?lastWrite=636200219330000000"
),
},
{
"coverType": "banner",
"url": (
"/radarr/MediaCover/1/banner.jpg"
"?lastWrite=636200219340000000"
),
},
],
"website": "https://www.ubisoft.com/en-US/",
"downloaded": "false",
"year": 2016,
"hasFile": "false",
"youTubeTrailerId": "pgALJgMjXN4",
"studio": "20th Century Fox",
"path": "/path/to/Assassin's Creed (2016)",
"profileId": 6,
"monitored": "true",
"runtime": 115,
"lastInfoSync": "2017-01-23T22:05:32.365337Z",
"cleanTitle": "assassinscreed",
"imdbId": "tt2094766",
"tmdbId": 121856,
"titleSlug": "assassins-creed-121856",
"genres": ["Action", "Adventure", "Fantasy", "Science Fiction"],
"tags": [],
"added": "2017-01-14T20:18:52.938244Z",
"ratings": {"votes": 711, "value": 5.2},
"alternativeTitles": ["Assassin's Creed: The IMAX Experience"],
"qualityProfileId": 6,
"id": 1,
}
],
200,
)
if "api/diskspace" in url:
return MockResponse(
[
{
"path": "/data",
"label": "",
"freeSpace": 282500067328,
"totalSpace": 499738734592,
}
],
200,
)
if "api/system/status" in url:
return MockResponse(
{
"version": "0.2.0.210",
"buildTime": "2017-01-22T23:12:49Z",
"isDebug": "false",
"isProduction": "true",
"isAdmin": "false",
"isUserInteractive": "false",
"startupPath": "/path/to/radarr",
"appData": "/path/to/radarr/data",
"osVersion": "4.8.13.1",
"isMonoRuntime": "true",
"isMono": "true",
"isLinux": "true",
"isOsx": "false",
"isWindows": "false",
"branch": "develop",
"authentication": "forms",
"sqliteVersion": "3.16.2",
"urlBase": "",
"runtimeVersion": (
"4.6.1 (Stable 4.6.1.3/abb06f1 Mon Oct 3 07:57:59 UTC 2016)"
),
},
200,
)
return MockResponse({"error": "Unauthorized"}, 401)
async def test_diskspace_no_paths(hass):
"""Test getting all disk space."""
config = {
"sensor": {
"platform": "radarr",
"api_key": "foo",
"days": "2",
"unit": DATA_GIGABYTES,
"include_paths": [],
"monitored_conditions": ["diskspace"],
}
}
with patch(
"requests.get",
side_effect=mocked_requests_get,
):
assert await async_setup_component(hass, "sensor", config)
await hass.async_block_till_done()
entity = hass.states.get("sensor.radarr_disk_space")
assert entity is not None
assert "263.10" == entity.state
assert "mdi:harddisk" == entity.attributes["icon"]
assert DATA_GIGABYTES == entity.attributes["unit_of_measurement"]
assert "Radarr Disk Space" == entity.attributes["friendly_name"]
assert "263.10/465.42GB (56.53%)" == entity.attributes["/data"]
async def test_diskspace_paths(hass):
"""Test getting diskspace for included paths."""
config = {
"sensor": {
"platform": "radarr",
"api_key": "foo",
"days": "2",
"unit": DATA_GIGABYTES,
"include_paths": ["/data"],
"monitored_conditions": ["diskspace"],
}
}
with patch(
"requests.get",
side_effect=mocked_requests_get,
):
assert await async_setup_component(hass, "sensor", config)
await hass.async_block_till_done()
entity = hass.states.get("sensor.radarr_disk_space")
assert entity is not None
assert "263.10" == entity.state
assert "mdi:harddisk" == entity.attributes["icon"]
assert DATA_GIGABYTES == entity.attributes["unit_of_measurement"]
assert "Radarr Disk Space" == entity.attributes["friendly_name"]
assert "263.10/465.42GB (56.53%)" == entity.attributes["/data"]
async def test_commands(hass):
"""Test getting running commands."""
config = {
"sensor": {
"platform": "radarr",
"api_key": "foo",
"days": "2",
"unit": DATA_GIGABYTES,
"include_paths": ["/data"],
"monitored_conditions": ["commands"],
}
}
with patch(
"requests.get",
side_effect=mocked_requests_get,
):
assert await async_setup_component(hass, "sensor", config)
await hass.async_block_till_done()
entity = hass.states.get("sensor.radarr_commands")
assert entity is not None
assert 1 == int(entity.state)
assert "mdi:code-braces" == entity.attributes["icon"]
assert "Commands" == entity.attributes["unit_of_measurement"]
assert "Radarr Commands" == entity.attributes["friendly_name"]
assert "pending" == entity.attributes["RescanMovie"]
async def test_movies(hass):
"""Test getting the number of movies."""
config = {
"sensor": {
"platform": "radarr",
"api_key": "foo",
"days": "2",
"unit": DATA_GIGABYTES,
"include_paths": ["/data"],
"monitored_conditions": ["movies"],
}
}
with patch(
"requests.get",
side_effect=mocked_requests_get,
):
assert await async_setup_component(hass, "sensor", config)
await hass.async_block_till_done()
entity = hass.states.get("sensor.radarr_movies")
assert entity is not None
assert 1 == int(entity.state)
assert "mdi:television" == entity.attributes["icon"]
assert "Movies" == entity.attributes["unit_of_measurement"]
assert "Radarr Movies" == entity.attributes["friendly_name"]
assert "false" == entity.attributes["Assassin's Creed (2016)"]
async def test_upcoming_multiple_days(hass):
"""Test the upcoming movies for multiple days."""
config = {
"sensor": {
"platform": "radarr",
"api_key": "foo",
"days": "2",
"unit": DATA_GIGABYTES,
"include_paths": ["/data"],
"monitored_conditions": ["upcoming"],
}
}
with patch(
"requests.get",
side_effect=mocked_requests_get,
):
assert await async_setup_component(hass, "sensor", config)
await hass.async_block_till_done()
entity = hass.states.get("sensor.radarr_upcoming")
assert entity is not None
assert 1 == int(entity.state)
assert "mdi:television" == entity.attributes["icon"]
assert "Movies" == entity.attributes["unit_of_measurement"]
assert "Radarr Upcoming" == entity.attributes["friendly_name"]
assert "2017-01-27T00:00:00Z" == entity.attributes["Resident Evil (2017)"]
@pytest.mark.skip
async def test_upcoming_today(hass):
"""Test filtering for a single day.
Radarr needs to respond with at least 2 days.
"""
config = {
"sensor": {
"platform": "radarr",
"api_key": "foo",
"days": "1",
"unit": DATA_GIGABYTES,
"include_paths": ["/data"],
"monitored_conditions": ["upcoming"],
}
}
with patch(
"requests.get",
side_effect=mocked_requests_get,
):
assert await async_setup_component(hass, "sensor", config)
await hass.async_block_till_done()
entity = hass.states.get("sensor.radarr_upcoming")
assert 1 == int(entity.state)
assert "mdi:television" == entity.attributes["icon"]
assert "Movies" == entity.attributes["unit_of_measurement"]
assert "Radarr Upcoming" == entity.attributes["friendly_name"]
assert "2017-01-27T00:00:00Z" == entity.attributes["Resident Evil (2017)"]
async def test_system_status(hass):
"""Test the getting of the system status."""
config = {
"sensor": {
"platform": "radarr",
"api_key": "foo",
"days": "2",
"unit": DATA_GIGABYTES,
"include_paths": ["/data"],
"monitored_conditions": ["status"],
}
}
with patch(
"requests.get",
side_effect=mocked_requests_get,
):
assert await async_setup_component(hass, "sensor", config)
await hass.async_block_till_done()
entity = hass.states.get("sensor.radarr_status")
assert entity is not None
assert "0.2.0.210" == entity.state
assert "mdi:information" == entity.attributes["icon"]
assert "Radarr Status" == entity.attributes["friendly_name"]
assert "4.8.13.1" == entity.attributes["osVersion"]
async def test_ssl(hass):
"""Test SSL being enabled."""
config = {
"sensor": {
"platform": "radarr",
"api_key": "foo",
"days": "1",
"unit": DATA_GIGABYTES,
"include_paths": ["/data"],
"monitored_conditions": ["upcoming"],
"ssl": "true",
}
}
with patch(
"requests.get",
side_effect=mocked_requests_get,
):
assert await async_setup_component(hass, "sensor", config)
await hass.async_block_till_done()
entity = hass.states.get("sensor.radarr_upcoming")
assert entity is not None
assert 1 == int(entity.state)
assert "mdi:television" == entity.attributes["icon"]
assert "Movies" == entity.attributes["unit_of_measurement"]
assert "Radarr Upcoming" == entity.attributes["friendly_name"]
assert "2017-01-27T00:00:00Z" == entity.attributes["Resident Evil (2017)"]
async def test_exception_handling(hass):
"""Test exception being handled."""
config = {
"sensor": {
"platform": "radarr",
"api_key": "foo",
"days": "1",
"unit": DATA_GIGABYTES,
"include_paths": ["/data"],
"monitored_conditions": ["upcoming"],
}
}
with patch(
"requests.get",
side_effect=mocked_exception,
):
assert await async_setup_component(hass, "sensor", config)
await hass.async_block_till_done()
entity = hass.states.get("sensor.radarr_upcoming")
assert entity is not None
assert "unavailable" == entity.state
|
import ipaddress
import logging
import re
from bravia_tv import BraviaRC
from bravia_tv.braviarc import NoIPControl
import voluptuous as vol
from homeassistant import config_entries, exceptions
from homeassistant.const import CONF_HOST, CONF_MAC, CONF_PIN
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from .const import ( # pylint:disable=unused-import
ATTR_CID,
ATTR_MAC,
ATTR_MODEL,
BRAVIARC,
CLIENTID_PREFIX,
CONF_IGNORED_SOURCES,
DOMAIN,
NICKNAME,
)
_LOGGER = logging.getLogger(__name__)
def host_valid(host):
"""Return True if hostname or IP address is valid."""
try:
if ipaddress.ip_address(host).version == (4 or 6):
return True
except ValueError:
disallowed = re.compile(r"[^a-zA-Z\d\-]")
return all(x and not disallowed.search(x) for x in host.split("."))
class BraviaTVConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for BraviaTV integration."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_POLL
def __init__(self):
"""Initialize."""
self.braviarc = None
self.host = None
self.title = None
self.mac = None
async def init_device(self, pin):
"""Initialize Bravia TV device."""
await self.hass.async_add_executor_job(
self.braviarc.connect, pin, CLIENTID_PREFIX, NICKNAME
)
connected = await self.hass.async_add_executor_job(self.braviarc.is_connected)
if not connected:
raise CannotConnect()
system_info = await self.hass.async_add_executor_job(
self.braviarc.get_system_info
)
if not system_info:
raise ModelNotSupported()
await self.async_set_unique_id(system_info[ATTR_CID].lower())
self._abort_if_unique_id_configured()
self.title = system_info[ATTR_MODEL]
self.mac = system_info[ATTR_MAC]
@staticmethod
@callback
def async_get_options_flow(config_entry):
"""Bravia TV options callback."""
return BraviaTVOptionsFlowHandler(config_entry)
async def async_step_import(self, user_input=None):
"""Handle configuration by yaml file."""
self.host = user_input[CONF_HOST]
self.braviarc = BraviaRC(self.host)
try:
await self.init_device(user_input[CONF_PIN])
except CannotConnect:
_LOGGER.error("Import aborted, cannot connect to %s", self.host)
return self.async_abort(reason="cannot_connect")
except NoIPControl:
_LOGGER.error("IP Control is disabled in the TV settings")
return self.async_abort(reason="no_ip_control")
except ModelNotSupported:
_LOGGER.error("Import aborted, your TV is not supported")
return self.async_abort(reason="unsupported_model")
user_input[CONF_MAC] = self.mac
return self.async_create_entry(title=self.title, data=user_input)
async def async_step_user(self, user_input=None):
"""Handle the initial step."""
errors = {}
if user_input is not None:
if host_valid(user_input[CONF_HOST]):
self.host = user_input[CONF_HOST]
self.braviarc = BraviaRC(self.host)
return await self.async_step_authorize()
errors[CONF_HOST] = "invalid_host"
return self.async_show_form(
step_id="user",
data_schema=vol.Schema({vol.Required(CONF_HOST, default=""): str}),
errors=errors,
)
async def async_step_authorize(self, user_input=None):
"""Get PIN from the Bravia TV device."""
errors = {}
if user_input is not None:
try:
await self.init_device(user_input[CONF_PIN])
except CannotConnect:
errors["base"] = "cannot_connect"
except ModelNotSupported:
errors["base"] = "unsupported_model"
else:
user_input[CONF_HOST] = self.host
user_input[CONF_MAC] = self.mac
return self.async_create_entry(title=self.title, data=user_input)
# Connecting with th PIN "0000" to start the pairing process on the TV.
try:
await self.hass.async_add_executor_job(
self.braviarc.connect, "0000", CLIENTID_PREFIX, NICKNAME
)
except NoIPControl:
return self.async_abort(reason="no_ip_control")
return self.async_show_form(
step_id="authorize",
data_schema=vol.Schema({vol.Required(CONF_PIN, default=""): str}),
errors=errors,
)
class BraviaTVOptionsFlowHandler(config_entries.OptionsFlow):
"""Config flow options for Bravia TV."""
def __init__(self, config_entry):
"""Initialize Bravia TV options flow."""
self.braviarc = None
self.config_entry = config_entry
self.pin = config_entry.data[CONF_PIN]
self.ignored_sources = config_entry.options.get(CONF_IGNORED_SOURCES)
self.source_list = []
async def async_step_init(self, user_input=None):
"""Manage the options."""
self.braviarc = self.hass.data[DOMAIN][self.config_entry.entry_id][BRAVIARC]
connected = await self.hass.async_add_executor_job(self.braviarc.is_connected)
if not connected:
await self.hass.async_add_executor_job(
self.braviarc.connect, self.pin, CLIENTID_PREFIX, NICKNAME
)
content_mapping = await self.hass.async_add_executor_job(
self.braviarc.load_source_list
)
self.source_list = [*content_mapping]
return await self.async_step_user()
async def async_step_user(self, user_input=None):
"""Handle a flow initialized by the user."""
if user_input is not None:
return self.async_create_entry(title="", data=user_input)
return self.async_show_form(
step_id="user",
data_schema=vol.Schema(
{
vol.Optional(
CONF_IGNORED_SOURCES, default=self.ignored_sources
): cv.multi_select(self.source_list)
}
),
)
class CannotConnect(exceptions.HomeAssistantError):
"""Error to indicate we cannot connect."""
class ModelNotSupported(exceptions.HomeAssistantError):
"""Error to indicate not supported model."""
|
import statsmodels.api as sm
from statsmodels.api import OLS, GLS, WLS
class LinearEstimator(object):
"""
A simple linear model built on statmodels.
"""
def __init__(self, graph, estimator_type="linear", **kwargs):
self._supported_models = {"linear": OLS, "OLS": OLS, "GLS": GLS, "WLS": WLS}
if estimator_type not in self._supported_models.keys():
raise NotImplementedError(
"We currently only support OLS, GLS, and WLS. Please specify which you would like to use."
)
else:
self.estimator = self._supported_models[estimator_type]
def _model(self, X, Y, Z, data, **kwargs):
exog = sm.add_constant(data[[X] + list(Z)])
endog = data[Y]
return self.estimator(endog=endog, exog=exog, **kwargs)
def fit(self, X, Y, Z, data, **kwargs):
self.estimator = self._model(X, Y, Z, data, **kwargs).fit()
self.ate = self.estimator.params[X]
return self
def _get_ate(self):
return self.ate
def summary(self):
return self.estimator.summary()
|
import voluptuous as vol
from homeassistant.components.switch import PLATFORM_SCHEMA, SwitchEntity
from homeassistant.const import CONF_NAME
import homeassistant.helpers.config_validation as cv
from . import DOMAIN
CONF_PINS = "pins"
CONF_TYPE = "digital"
CONF_NEGATE = "negate"
CONF_INITIAL = "initial"
PIN_SCHEMA = vol.Schema(
{
vol.Required(CONF_NAME): cv.string,
vol.Optional(CONF_INITIAL, default=False): cv.boolean,
vol.Optional(CONF_NEGATE, default=False): cv.boolean,
}
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Required(CONF_PINS, default={}): vol.Schema({cv.positive_int: PIN_SCHEMA})}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Arduino platform."""
board = hass.data[DOMAIN]
pins = config[CONF_PINS]
switches = []
for pinnum, pin in pins.items():
switches.append(ArduinoSwitch(pinnum, pin, board))
add_entities(switches)
class ArduinoSwitch(SwitchEntity):
"""Representation of an Arduino switch."""
def __init__(self, pin, options, board):
"""Initialize the Pin."""
self._pin = pin
self._name = options[CONF_NAME]
self.pin_type = CONF_TYPE
self.direction = "out"
self._state = options[CONF_INITIAL]
if options[CONF_NEGATE]:
self.turn_on_handler = board.set_digital_out_low
self.turn_off_handler = board.set_digital_out_high
else:
self.turn_on_handler = board.set_digital_out_high
self.turn_off_handler = board.set_digital_out_low
board.set_mode(self._pin, self.direction, self.pin_type)
(self.turn_on_handler if self._state else self.turn_off_handler)(pin)
@property
def name(self):
"""Get the name of the pin."""
return self._name
@property
def is_on(self):
"""Return true if pin is high/on."""
return self._state
def turn_on(self, **kwargs):
"""Turn the pin to high/on."""
self._state = True
self.turn_on_handler(self._pin)
def turn_off(self, **kwargs):
"""Turn the pin to low/off."""
self._state = False
self.turn_off_handler(self._pin)
|
import contextlib
import logging
import os
import shlex
import signal
import subprocess
from radicale import pathutils
from radicale.log import logger
class CollectionLockMixin:
def _acquire_cache_lock(self, ns=""):
if self._storage._lock.locked == "w":
return contextlib.ExitStack()
cache_folder = os.path.join(self._filesystem_path, ".Radicale.cache")
self._storage._makedirs_synced(cache_folder)
lock_path = os.path.join(cache_folder,
".Radicale.lock" + (".%s" % ns if ns else ""))
lock = pathutils.RwLock(lock_path)
return lock.acquire("w")
class StorageLockMixin:
def __init__(self, configuration):
super().__init__(configuration)
folder = self.configuration.get("storage", "filesystem_folder")
lock_path = os.path.join(folder, ".Radicale.lock")
self._lock = pathutils.RwLock(lock_path)
@contextlib.contextmanager
def acquire_lock(self, mode, user=None):
with self._lock.acquire(mode):
yield
# execute hook
hook = self.configuration.get("storage", "hook")
if mode == "w" and hook:
folder = self.configuration.get("storage", "filesystem_folder")
debug = logger.isEnabledFor(logging.DEBUG)
popen_kwargs = dict(
stdin=subprocess.DEVNULL,
stdout=subprocess.PIPE if debug else subprocess.DEVNULL,
stderr=subprocess.PIPE if debug else subprocess.DEVNULL,
shell=True, universal_newlines=True, cwd=folder)
# Use new process group for child to prevent terminals
# from sending SIGINT etc.
if os.name == "posix":
# Process group is also used to identify child processes
popen_kwargs["preexec_fn"] = os.setpgrp
elif os.name == "nt":
popen_kwargs["creationflags"] = (
subprocess.CREATE_NEW_PROCESS_GROUP)
command = hook % {"user": shlex.quote(user or "Anonymous")}
logger.debug("Running storage hook")
p = subprocess.Popen(command, **popen_kwargs)
try:
stdout_data, stderr_data = p.communicate()
except BaseException: # e.g. KeyboardInterrupt or SystemExit
p.kill()
p.wait()
raise
finally:
if os.name == "posix":
# Kill remaining children identified by process group
with contextlib.suppress(OSError):
os.killpg(p.pid, signal.SIGKILL)
if stdout_data:
logger.debug("Captured stdout from hook:\n%s", stdout_data)
if stderr_data:
logger.debug("Captured stderr from hook:\n%s", stderr_data)
if p.returncode != 0:
raise subprocess.CalledProcessError(p.returncode, p.args)
|
import numpy as np
from ..core import indexing
from ..core.utils import Frozen, FrozenDict, close_on_error
from ..core.variable import Variable
from .common import AbstractDataStore, BackendArray, BackendEntrypoint
from .file_manager import CachingFileManager
from .locks import HDF5_LOCK, NETCDFC_LOCK, SerializableLock, combine_locks, ensure_lock
from .store import open_backend_dataset_store
# PyNIO can invoke netCDF libraries internally
# Add a dedicated lock just in case NCL as well isn't thread-safe.
NCL_LOCK = SerializableLock()
PYNIO_LOCK = combine_locks([HDF5_LOCK, NETCDFC_LOCK, NCL_LOCK])
class NioArrayWrapper(BackendArray):
def __init__(self, variable_name, datastore):
self.datastore = datastore
self.variable_name = variable_name
array = self.get_array()
self.shape = array.shape
self.dtype = np.dtype(array.typecode())
def get_array(self, needs_lock=True):
ds = self.datastore._manager.acquire(needs_lock)
return ds.variables[self.variable_name]
def __getitem__(self, key):
return indexing.explicit_indexing_adapter(
key, self.shape, indexing.IndexingSupport.BASIC, self._getitem
)
def _getitem(self, key):
with self.datastore.lock:
array = self.get_array(needs_lock=False)
if key == () and self.ndim == 0:
return array.get_value()
return array[key]
class NioDataStore(AbstractDataStore):
"""Store for accessing datasets via PyNIO"""
def __init__(self, filename, mode="r", lock=None, **kwargs):
import Nio
if lock is None:
lock = PYNIO_LOCK
self.lock = ensure_lock(lock)
self._manager = CachingFileManager(
Nio.open_file, filename, lock=lock, mode=mode, kwargs=kwargs
)
# xarray provides its own support for FillValue,
# so turn off PyNIO's support for the same.
self.ds.set_option("MaskedArrayMode", "MaskedNever")
@property
def ds(self):
return self._manager.acquire()
def open_store_variable(self, name, var):
data = indexing.LazilyOuterIndexedArray(NioArrayWrapper(name, self))
return Variable(var.dimensions, data, var.attributes)
def get_variables(self):
return FrozenDict(
(k, self.open_store_variable(k, v)) for k, v in self.ds.variables.items()
)
def get_attrs(self):
return Frozen(self.ds.attributes)
def get_dimensions(self):
return Frozen(self.ds.dimensions)
def get_encoding(self):
return {
"unlimited_dims": {k for k in self.ds.dimensions if self.ds.unlimited(k)}
}
def close(self):
self._manager.close()
def open_backend_dataset_pynio(
filename_or_obj,
mask_and_scale=True,
decode_times=None,
concat_characters=None,
decode_coords=None,
drop_variables=None,
use_cftime=None,
decode_timedelta=None,
mode="r",
lock=None,
):
store = NioDataStore(
filename_or_obj,
mode=mode,
lock=lock,
)
with close_on_error(store):
ds = open_backend_dataset_store(
store,
mask_and_scale=mask_and_scale,
decode_times=decode_times,
concat_characters=concat_characters,
decode_coords=decode_coords,
drop_variables=drop_variables,
use_cftime=use_cftime,
decode_timedelta=decode_timedelta,
)
return ds
pynio_backend = BackendEntrypoint(open_dataset=open_backend_dataset_pynio)
|
from typing import DefaultDict, List, NamedTuple, Set
import pyvera as pv
from homeassistant.components.scene import DOMAIN as SCENE_DOMAIN
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers.event import call_later
from .const import DOMAIN
class ControllerData(NamedTuple):
"""Controller data."""
controller: pv.VeraController
devices: DefaultDict[str, List[pv.VeraDevice]]
scenes: List[pv.VeraScene]
config_entry: ConfigEntry
def get_configured_platforms(controller_data: ControllerData) -> Set[str]:
"""Get configured platforms for a controller."""
platforms = []
for platform in controller_data.devices:
platforms.append(platform)
if controller_data.scenes:
platforms.append(SCENE_DOMAIN)
return set(platforms)
def get_controller_data(
hass: HomeAssistant, config_entry: ConfigEntry
) -> ControllerData:
"""Get controller data from hass data."""
return hass.data[DOMAIN][config_entry.entry_id]
def set_controller_data(
hass: HomeAssistant, config_entry: ConfigEntry, data: ControllerData
) -> None:
"""Set controller data in hass data."""
hass.data[DOMAIN][config_entry.entry_id] = data
class SubscriptionRegistry(pv.AbstractSubscriptionRegistry):
"""Manages polling for data from vera."""
def __init__(self, hass: HomeAssistant) -> None:
"""Initialize the object."""
super().__init__()
self._hass = hass
self._cancel_poll = None
def start(self) -> None:
"""Start polling for data."""
self.stop()
self._schedule_poll(1)
def stop(self) -> None:
"""Stop polling for data."""
if self._cancel_poll:
self._cancel_poll()
self._cancel_poll = None
def _schedule_poll(self, delay: float) -> None:
self._cancel_poll = call_later(self._hass, delay, self._run_poll_server)
def _run_poll_server(self, now) -> None:
delay = 1
# Long poll for changes. The downstream API instructs the endpoint to wait a
# a minimum of 200ms before returning data and a maximum of 9s before timing out.
if not self.poll_server_once():
# If an error was encountered, wait a bit longer before trying again.
delay = 60
self._schedule_poll(delay)
|
from __future__ import (absolute_import, division)
from copy import deepcopy
import numpy as np
from numpy import dot, zeros, eye
from scipy.linalg import cholesky, qr, pinv
from filterpy.common import pretty_str
class SquareRootKalmanFilter(object):
"""
Create a Kalman filter which uses a square root implementation.
This uses the square root of the state covariance matrix, which doubles
the numerical precision of the filter, Therebuy reducing the effect
of round off errors.
It is likely that you do not need to use this algorithm; we understand
divergence issues very well now. However, if you expect the covariance
matrix P to vary by 20 or more orders of magnitude then perhaps this
will be useful to you, as the square root will vary by 10 orders
of magnitude. From my point of view this is merely a 'reference'
algorithm; I have not used this code in real world software. Brown[1]
has a useful discussion of when you might need to use the square
root form of this algorithm.
You are responsible for setting the various state variables to
reasonable values; the defaults below will not give you a functional
filter.
Parameters
----------
dim_x : int
Number of state variables for the Kalman filter. For example, if
you are tracking the position and velocity of an object in two
dimensions, dim_x would be 4.
This is used to set the default size of P, Q, and u
dim_z : int
Number of of measurement inputs. For example, if the sensor
provides you with position in (x,y), dim_z would be 2.
dim_u : int (optional)
size of the control input, if it is being used.
Default value of 0 indicates it is not used.
Attributes
----------
x : numpy.array(dim_x, 1)
State estimate
P : numpy.array(dim_x, dim_x)
State covariance matrix
x_prior : numpy.array(dim_x, 1)
Prior (predicted) state estimate. The *_prior and *_post attributes
are for convienence; they store the prior and posterior of the
current epoch. Read Only.
P_prior : numpy.array(dim_x, dim_x)
Prior (predicted) state covariance matrix. Read Only.
x_post : numpy.array(dim_x, 1)
Posterior (updated) state estimate. Read Only.
P_post : numpy.array(dim_x, dim_x)
Posterior (updated) state covariance matrix. Read Only.
z : numpy.array
Last measurement used in update(). Read only.
R : numpy.array(dim_z, dim_z)
Measurement noise matrix
Q : numpy.array(dim_x, dim_x)
Process noise matrix
F : numpy.array()
State Transition matrix
H : numpy.array(dim_z, dim_x)
Measurement function
y : numpy.array
Residual of the update step. Read only.
K : numpy.array(dim_x, dim_z)
Kalman gain of the update step. Read only.
S : numpy.array
Systen uncertaintly projected to measurement space. Read only.
Examples
--------
See my book Kalman and Bayesian Filters in Python
https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python
References
----------
[1] Robert Grover Brown. Introduction to Random Signals and Applied
Kalman Filtering. Wiley and sons, 2012.
"""
def __init__(self, dim_x, dim_z, dim_u=0):
if dim_z < 1:
raise ValueError('dim_x must be 1 or greater')
if dim_z < 1:
raise ValueError('dim_x must be 1 or greater')
if dim_u < 0:
raise ValueError('dim_x must be 0 or greater')
self.dim_x = dim_x
self.dim_z = dim_z
self.dim_u = dim_u
self.x = zeros((dim_x, 1)) # state
self._P = eye(dim_x) # uncertainty covariance
self._P1_2 = eye(dim_x) # sqrt uncertainty covariance
self._Q = eye(dim_x) # sqrt process uncertainty
self._Q1_2 = eye(dim_x) # sqrt process uncertainty
self.B = 0. # control transition matrix
self.F = np.eye(dim_x) # state transition matrix
self.H = np.zeros((dim_z, dim_x)) # Measurement function
self._R1_2 = eye(dim_z) # sqrt state uncertainty
self._R = eye(dim_z) # state uncertainty
self.z = np.array([[None]*self.dim_z]).T
self.K = 0.
self.S = 0.
# Residual is computed during the innovation (update) step. We
# save it so that in case you want to inspect it for various
# purposes
self.y = zeros((dim_z, 1))
# identity matrix.
self._I = np.eye(dim_x)
self.M = np.zeros((dim_z + dim_x, dim_z + dim_x))
# copy prior and posterior
self.x_prior = np.copy(self.x)
self._P1_2_prior = np.copy(self._P1_2)
self.x_post = np.copy(self.x)
self._P1_2_post = np.copy(self._P1_2)
def update(self, z, R2=None):
"""
Add a new measurement (z) to the kalman filter. If z is None, nothing
is changed.
Parameters
----------
z : np.array
measurement for this update.
R2 : np.array, scalar, or None
Sqrt of meaaurement noize. Optionally provide to override the
measurement noise for this one call, otherwise self.R2 will
be used.
"""
if z is None:
self.z = np.array([[None]*self.dim_z]).T
self.x_post = self.x.copy()
self._P1_2_post = np.copy(self._P1_2)
return
if R2 is None:
R2 = self._R1_2
elif np.isscalar(R2):
R2 = eye(self.dim_z) * R2
# rename for convienance
dim_z = self.dim_z
M = self.M
M[0:dim_z, 0:dim_z] = R2.T
M[dim_z:, 0:dim_z] = dot(self.H, self._P1_2).T
M[dim_z:, dim_z:] = self._P1_2.T
_, self.S = qr(M)
self.K = self.S[0:dim_z, dim_z:].T
N = self.S[0:dim_z, 0:dim_z].T
# y = z - Hx
# error (residual) between measurement and prediction
self.y = z - dot(self.H, self.x)
# x = x + Ky
# predict new x with residual scaled by the kalman gain
self.x += dot(self.K, pinv(N)).dot(self.y)
self._P1_2 = self.S[dim_z:, dim_z:].T
self.z = deepcopy(z)
self.x_post = self.x.copy()
self._P1_2_post = np.copy(self._P1_2)
def predict(self, u=0):
"""
Predict next state (prior) using the Kalman filter state propagation
equations.
Parameters
----------
u : np.array, optional
Optional control vector. If non-zero, it is multiplied by B
to create the control input into the system.
"""
# x = Fx + Bu
self.x = dot(self.F, self.x) + dot(self.B, u)
# P = FPF' + Q
_, P2 = qr(np.hstack([dot(self.F, self._P1_2), self._Q1_2]).T)
self._P1_2 = P2[:self.dim_x, :self.dim_x].T
# copy prior
self.x_prior = np.copy(self.x)
self._P1_2_prior = np.copy(self._P1_2)
def residual_of(self, z):
""" returns the residual for the given measurement (z). Does not alter
the state of the filter.
"""
return z - dot(self.H, self.x)
def measurement_of_state(self, x):
""" Helper function that converts a state into a measurement.
Parameters
----------
x : np.array
kalman state vector
Returns
-------
z : np.array
measurement corresponding to the given state
"""
return dot(self.H, x)
@property
def Q(self):
""" Process uncertainty"""
return dot(self._Q1_2.T, self._Q1_2)
@property
def Q1_2(self):
""" Sqrt Process uncertainty"""
return self._Q1_2
@Q.setter
def Q(self, value):
""" Process uncertainty"""
self._Q = value
self._Q1_2 = cholesky(self._Q, lower=True)
@property
def P(self):
""" covariance matrix"""
return dot(self._P1_2.T, self._P1_2)
@property
def P_prior(self):
""" covariance matrix of the prior"""
return dot(self._P1_2_prior.T, self._P1_2_prior)
@property
def P_post(self):
""" covariance matrix of the posterior"""
return dot(self._P1_2_prior.T, self._P1_2_prior)
@property
def P1_2(self):
""" sqrt of covariance matrix"""
return self._P1_2
@P.setter
def P(self, value):
""" covariance matrix"""
self._P = value
self._P1_2 = cholesky(self._P, lower=True)
@property
def R(self):
""" measurement uncertainty"""
return dot(self._R1_2.T, self._R1_2)
@property
def R1_2(self):
""" sqrt of measurement uncertainty"""
return self._R1_2
@R.setter
def R(self, value):
""" measurement uncertainty"""
self._R = value
self._R1_2 = cholesky(self._R, lower=True)
def __repr__(self):
return '\n'.join([
'SquareRootKalmanFilter object',
pretty_str('dim_x', self.dim_x),
pretty_str('dim_z', self.dim_z),
pretty_str('dim_u', self.dim_u),
pretty_str('x', self.x),
pretty_str('P', self.P),
pretty_str('F', self.F),
pretty_str('Q', self.Q),
pretty_str('R', self.R),
pretty_str('H', self.H),
pretty_str('K', self.K),
pretty_str('y', self.y),
pretty_str('S', self.S),
pretty_str('M', self.M),
pretty_str('B', self.B),
])
|
from crispy_forms.layout import Div, Field
from crispy_forms.utils import TEMPLATE_PACK
from django import forms
from django.template.loader import render_to_string
from django.utils.translation import gettext_lazy as _
from weblate.trans.defines import USERNAME_LENGTH
from weblate.trans.filter import FILTERS
from weblate.trans.util import sort_unicode
from weblate.utils.validators import validate_username
class UsernameField(forms.CharField):
default_validators = [validate_username]
def __init__(self, *args, **kwargs):
params = {
"max_length": USERNAME_LENGTH,
"help_text": _(
"Username may only contain letters, "
"numbers or the following characters: @ . + - _"
),
"label": _("Username"),
"required": True,
}
params.update(kwargs)
self.valid = None
super().__init__(*args, **params)
class SortedSelectMixin:
"""Mixin for Select widgets to sort choices alphabetically."""
def optgroups(self, name, value, attrs=None):
groups = super().optgroups(name, value, attrs)
return sort_unicode(groups, lambda val: str(val[1][0]["label"]))
class ColorWidget(forms.RadioSelect):
def __init__(self, attrs=None, choices=()):
attrs = {**(attrs or {}), "class": "color_edit"}
super().__init__(attrs, choices)
class SortedSelectMultiple(SortedSelectMixin, forms.SelectMultiple):
"""Wrapper class to sort choices alphabetically."""
class SortedSelect(SortedSelectMixin, forms.Select):
"""Wrapper class to sort choices alphabetically."""
class ContextDiv(Div):
def __init__(self, *fields, **kwargs):
self.context = kwargs.pop("context", {})
super().__init__(*fields, **kwargs)
def render(self, form, form_style, context, template_pack=TEMPLATE_PACK, **kwargs):
template = self.get_template_name(template_pack)
return render_to_string(template, self.context)
class SearchField(Field):
def __init__(self, *args, **kwargs):
kwargs["template"] = "snippets/query-field.html"
super().__init__(*args, **kwargs)
def render(self, form, form_style, context, template_pack=TEMPLATE_PACK, **kwargs):
extra_context = {"custom_filter_list": self.get_search_query_choices()}
return super().render(
form, form_style, context, template_pack, extra_context, **kwargs
)
def get_search_query_choices(self):
"""Return all filtering choices for query field."""
filter_keys = [
"nottranslated",
"todo",
"translated",
"fuzzy",
"suggestions",
"variants",
"labels",
"context",
"nosuggestions",
"comments",
"allchecks",
"approved",
"unapproved",
]
result = [
(key, FILTERS.get_filter_name(key), FILTERS.get_filter_query(key))
for key in filter_keys
]
return result
class FilterForm(forms.Form):
project = forms.SlugField(required=False)
component = forms.SlugField(required=False)
lang = forms.SlugField(required=False)
user = UsernameField(required=False)
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy.random as random
import numpy as np
import matplotlib.pyplot as plt
from filterpy.common import Saver
from filterpy.kalman import KalmanFilter, InformationFilter
DO_PLOT = False
def test_1d_0P():
global inf
f = KalmanFilter (dim_x=2, dim_z=1)
inf = InformationFilter (dim_x=2, dim_z=1)
f.x = np.array([[2.],
[0.]]) # initial state (location and velocity)
f.F = (np.array([[1., 1.],
[0., 1.]])) # state transition matrix
f.H = np.array([[1., 0.]]) # Measurement function
f.R = np.array([[5.]]) # state uncertainty
f.Q = np.eye(2)*0.0001 # process uncertainty
f.P = np.diag([20., 20.])
inf.x = f.x.copy()
inf.F = f.F.copy()
inf.H = np.array([[1.,0.]]) # Measurement function
inf.R_inv *= 1./5 # state uncertainty
inf.Q = np.eye(2)*0.0001
inf.P_inv = 0.000000000000000000001
#inf.P_inv = inv(f.P)
m = []
r = []
r2 = []
zs = []
for t in range (50):
# create measurement = t plus white noise
z = t + random.randn()* np.sqrt(5)
zs.append(z)
# perform kalman filtering
f.predict()
f.update(z)
inf.predict()
inf.update(z)
try:
print(t, inf.P)
except:
pass
# save data
r.append (f.x[0,0])
r2.append (inf.x[0,0])
m.append(z)
#assert np.allclose(f.x, inf.x), f'{t}: {f.x.T} {inf.x.T}'
if DO_PLOT:
plt.plot(m)
plt.plot(r)
plt.plot(r2)
def test_1d():
global inf
f = KalmanFilter(dim_x=2, dim_z=1)
inf = InformationFilter(dim_x=2, dim_z=1)
# ensure __repr__ doesn't assert
str(inf)
f.x = np.array([[2.],
[0.]]) # initial state (location and velocity)
inf.x = f.x.copy()
f.F = (np.array([[1.,1.],
[0.,1.]])) # state transition matrix
inf.F = f.F.copy()
f.H = np.array([[1.,0.]]) # Measurement function
inf.H = np.array([[1.,0.]]) # Measurement function
f.R *= 5 # state uncertainty
inf.R_inv *= 1./5 # state uncertainty
f.Q *= 0.0001 # process uncertainty
inf.Q *= 0.0001
m = []
r = []
r2 = []
zs = []
s = Saver(inf)
for t in range (100):
# create measurement = t plus white noise
z = t + random.randn()*20
zs.append(z)
# perform kalman filtering
f.update(z)
f.predict()
inf.update(z)
inf.predict()
# save data
r.append (f.x[0,0])
r2.append (inf.x[0,0])
m.append(z)
print(inf.y)
s.save()
assert abs(f.x[0,0] - inf.x[0,0]) < 1.e-12
if DO_PLOT:
plt.plot(m)
plt.plot(r)
plt.plot(r2)
def test_against_kf():
inv = np.linalg.inv
dt = 1.0
IM = np.eye(2)
Q = np.array([[.25, 0.5], [0.5, 1]])
F = np.array([[1, dt], [0, 1]])
#QI = inv(Q)
P = inv(IM)
from filterpy.kalman import InformationFilter
from filterpy.common import Q_discrete_white_noise
#f = IF2(2, 1)
r_std = .2
R = np.array([[r_std*r_std]])
RI = inv(R)
'''f.F = F.copy()
f.H = np.array([[1, 0.]])
f.RI = RI.copy()
f.Q = Q.copy()
f.IM = IM.copy()'''
kf = KalmanFilter(2, 1)
kf.F = F.copy()
kf.H = np.array([[1, 0.]])
kf.R = R.copy()
kf.Q = Q.copy()
f0 = InformationFilter(2, 1)
f0.F = F.copy()
f0.H = np.array([[1, 0.]])
f0.R_inv = RI.copy()
f0.Q = Q.copy()
#f.IM = np.zeros((2,2))
for i in range(1, 50):
z = i + (np.random.rand() * r_std)
f0.predict()
#f.predict()
kf.predict()
f0.update(z)
#f.update(z)
kf.update(z)
print(f0.x.T, kf.x.T)
assert np.allclose(f0.x, kf.x)
#assert np.allclose(f.x, kf.x)
if __name__ == "__main__":
DO_PLOT = True
#test_1d_0P()
test_1d()
test_against_kf()
|
from datetime import timedelta
import logging
import requests
import voluptuous as vol
from homeassistant.components.binary_sensor import (
DEVICE_CLASSES_SCHEMA,
PLATFORM_SCHEMA,
BinarySensorEntity,
)
from homeassistant.const import (
CONF_DEVICE_CLASS,
CONF_NAME,
CONF_PIN,
CONF_RESOURCE,
HTTP_OK,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.util import Throttle
_LOGGER = logging.getLogger(__name__)
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=30)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_RESOURCE): cv.url,
vol.Optional(CONF_NAME): cv.string,
vol.Required(CONF_PIN): cv.string,
vol.Optional(CONF_DEVICE_CLASS): DEVICE_CLASSES_SCHEMA,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the aREST binary sensor."""
resource = config[CONF_RESOURCE]
pin = config[CONF_PIN]
device_class = config.get(CONF_DEVICE_CLASS)
try:
response = requests.get(resource, timeout=10).json()
except requests.exceptions.MissingSchema:
_LOGGER.error(
"Missing resource or schema in configuration. Add http:// to your URL"
)
return False
except requests.exceptions.ConnectionError:
_LOGGER.error("No route to device at %s", resource)
return False
arest = ArestData(resource, pin)
add_entities(
[
ArestBinarySensor(
arest,
resource,
config.get(CONF_NAME, response[CONF_NAME]),
device_class,
pin,
)
],
True,
)
class ArestBinarySensor(BinarySensorEntity):
"""Implement an aREST binary sensor for a pin."""
def __init__(self, arest, resource, name, device_class, pin):
"""Initialize the aREST device."""
self.arest = arest
self._resource = resource
self._name = name
self._device_class = device_class
self._pin = pin
if self._pin is not None:
request = requests.get(f"{self._resource}/mode/{self._pin}/i", timeout=10)
if request.status_code != HTTP_OK:
_LOGGER.error("Can't set mode of %s", self._resource)
@property
def name(self):
"""Return the name of the binary sensor."""
return self._name
@property
def is_on(self):
"""Return true if the binary sensor is on."""
return bool(self.arest.data.get("state"))
@property
def device_class(self):
"""Return the class of this sensor."""
return self._device_class
def update(self):
"""Get the latest data from aREST API."""
self.arest.update()
class ArestData:
"""Class for handling the data retrieval for pins."""
def __init__(self, resource, pin):
"""Initialize the aREST data object."""
self._resource = resource
self._pin = pin
self.data = {}
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Get the latest data from aREST device."""
try:
response = requests.get(f"{self._resource}/digital/{self._pin}", timeout=10)
self.data = {"state": response.json()["return_value"]}
except requests.exceptions.ConnectionError:
_LOGGER.error("No route to device '%s'", self._resource)
|
import logging
from typing import Any, Dict, Optional
from aiohttp import ClientConnectionError, ClientResponseError
from bond_api import Bond
import voluptuous as vol
from homeassistant import config_entries, exceptions
from homeassistant.const import (
CONF_ACCESS_TOKEN,
CONF_HOST,
CONF_NAME,
HTTP_UNAUTHORIZED,
)
from .const import CONF_BOND_ID
from .const import DOMAIN # pylint:disable=unused-import
_LOGGER = logging.getLogger(__name__)
DATA_SCHEMA_USER = vol.Schema(
{vol.Required(CONF_HOST): str, vol.Required(CONF_ACCESS_TOKEN): str}
)
DATA_SCHEMA_DISCOVERY = vol.Schema({vol.Required(CONF_ACCESS_TOKEN): str})
async def _validate_input(data: Dict[str, Any]) -> str:
"""Validate the user input allows us to connect."""
try:
bond = Bond(data[CONF_HOST], data[CONF_ACCESS_TOKEN])
version = await bond.version()
# call to non-version API is needed to validate authentication
await bond.devices()
except ClientConnectionError as error:
raise InputValidationError("cannot_connect") from error
except ClientResponseError as error:
if error.status == HTTP_UNAUTHORIZED:
raise InputValidationError("invalid_auth") from error
raise InputValidationError("unknown") from error
except Exception as error:
_LOGGER.exception("Unexpected exception")
raise InputValidationError("unknown") from error
# Return unique ID from the hub to be stored in the config entry.
bond_id = version.get("bondid")
if not bond_id:
raise InputValidationError("old_firmware")
return bond_id
class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for Bond."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_POLL
_discovered: dict = None
async def async_step_zeroconf(
self, discovery_info: Optional[Dict[str, Any]] = None
) -> Dict[str, Any]:
"""Handle a flow initialized by zeroconf discovery."""
name: str = discovery_info[CONF_NAME]
host: str = discovery_info[CONF_HOST]
bond_id = name.partition(".")[0]
await self.async_set_unique_id(bond_id)
self._abort_if_unique_id_configured({CONF_HOST: host})
self._discovered = {
CONF_HOST: host,
CONF_BOND_ID: bond_id,
}
# pylint: disable=no-member # https://github.com/PyCQA/pylint/issues/3167
self.context.update({"title_placeholders": self._discovered})
return await self.async_step_confirm()
async def async_step_confirm(
self, user_input: Dict[str, Any] = None
) -> Dict[str, Any]:
"""Handle confirmation flow for discovered bond hub."""
errors = {}
if user_input is not None:
data = user_input.copy()
data[CONF_HOST] = self._discovered[CONF_HOST]
try:
return await self._try_create_entry(data)
except InputValidationError as error:
errors["base"] = error.base
return self.async_show_form(
step_id="confirm",
data_schema=DATA_SCHEMA_DISCOVERY,
errors=errors,
description_placeholders=self._discovered,
)
async def async_step_user(
self, user_input: Dict[str, Any] = None
) -> Dict[str, Any]:
"""Handle a flow initialized by the user."""
errors = {}
if user_input is not None:
try:
return await self._try_create_entry(user_input)
except InputValidationError as error:
errors["base"] = error.base
return self.async_show_form(
step_id="user", data_schema=DATA_SCHEMA_USER, errors=errors
)
async def _try_create_entry(self, data: Dict[str, Any]) -> Dict[str, Any]:
bond_id = await _validate_input(data)
await self.async_set_unique_id(bond_id)
self._abort_if_unique_id_configured()
return self.async_create_entry(title=bond_id, data=data)
class InputValidationError(exceptions.HomeAssistantError):
"""Error to indicate we cannot proceed due to invalid input."""
def __init__(self, base: str):
"""Initialize with error base."""
super().__init__()
self.base = base
|
import pytest
import numpy as np
import tensorflow as tf
import tensornetwork as tn
from examples.wavefunctions import wavefunctions
@pytest.mark.parametrize("num_sites", [2, 3, 4])
def test_expval(num_sites):
op = np.kron(np.array([[1.0, 0.0], [0.0, -1.0]]), np.eye(2)).reshape([2] * 4)
op = tf.convert_to_tensor(op)
for j in range(num_sites):
psi = np.zeros([2] * num_sites)
psi_vec = psi.reshape((2**num_sites,))
psi_vec[2**j] = 1.0
psi = tf.convert_to_tensor(psi)
for i in range(num_sites):
res = wavefunctions.expval(psi, op, i, pbc=True)
if i == num_sites - 1 - j:
np.testing.assert_allclose(res, -1.0)
else:
np.testing.assert_allclose(res, 1.0)
@pytest.mark.parametrize("num_sites", [2, 3, 4])
def test_apply_op(num_sites):
psi1 = np.zeros([2] * num_sites)
psi1_vec = psi1.reshape((2**num_sites,))
psi1_vec[0] = 1.0
psi1 = tf.convert_to_tensor(psi1)
for j in range(num_sites):
psi2 = np.zeros([2] * num_sites)
psi2_vec = psi2.reshape((2**num_sites,))
psi2_vec[2**j] = 1.0
psi2 = tf.convert_to_tensor(psi2)
opX = tf.convert_to_tensor(np.array([[0.0, 1.0], [1.0, 0.0]]))
psi2 = wavefunctions.apply_op(psi2, opX, num_sites - 1 - j)
res = wavefunctions.inner(psi1, psi2)
np.testing.assert_allclose(res, 1.0)
@pytest.mark.parametrize("num_sites,phys_dim,graph",
[(2, 3, False), (2, 3, True), (5, 2, False)])
def test_evolve_trotter(num_sites, phys_dim, graph):
tf.random.set_seed(10)
psi = tf.complex(
tf.random.normal([phys_dim] * num_sites, dtype=tf.float64),
tf.random.normal([phys_dim] * num_sites, dtype=tf.float64))
h = tf.complex(
tf.random.normal((phys_dim**2, phys_dim**2), dtype=tf.float64),
tf.random.normal((phys_dim**2, phys_dim**2), dtype=tf.float64))
h = 0.5 * (h + tf.linalg.adjoint(h))
h = tf.reshape(h, (phys_dim, phys_dim, phys_dim, phys_dim))
H = [h] * (num_sites - 1)
norm1 = wavefunctions.inner(psi, psi)
en1 = sum(wavefunctions.expval(psi, H[i], i) for i in range(num_sites - 1))
if graph:
psi, t = wavefunctions.evolve_trotter_defun(psi, H, 0.001, 10)
else:
psi, t = wavefunctions.evolve_trotter(psi, H, 0.001, 10)
norm2 = wavefunctions.inner(psi, psi)
en2 = sum(wavefunctions.expval(psi, H[i], i) for i in range(num_sites - 1))
np.testing.assert_allclose(t, 0.01)
np.testing.assert_almost_equal(norm1 / norm2, 1.0)
np.testing.assert_almost_equal(en1 / en2, 1.0, decimal=2)
@pytest.mark.parametrize("num_sites,phys_dim,graph",
[(2, 3, False), (2, 3, True), (5, 2, False)])
def test_evolve_trotter_euclidean(num_sites, phys_dim, graph):
tf.random.set_seed(10)
psi = tf.complex(
tf.random.normal([phys_dim] * num_sites, dtype=tf.float64),
tf.random.normal([phys_dim] * num_sites, dtype=tf.float64))
h = tf.complex(
tf.random.normal((phys_dim**2, phys_dim**2), dtype=tf.float64),
tf.random.normal((phys_dim**2, phys_dim**2), dtype=tf.float64))
h = 0.5 * (h + tf.linalg.adjoint(h))
h = tf.reshape(h, (phys_dim, phys_dim, phys_dim, phys_dim))
H = [h] * (num_sites - 1)
norm1 = wavefunctions.inner(psi, psi)
en1 = sum(wavefunctions.expval(psi, H[i], i) for i in range(num_sites - 1))
if graph:
psi, t = wavefunctions.evolve_trotter_defun(psi, H, 0.1, 10, euclidean=True)
else:
psi, t = wavefunctions.evolve_trotter(psi, H, 0.1, 10, euclidean=True)
norm2 = wavefunctions.inner(psi, psi)
en2 = sum(wavefunctions.expval(psi, H[i], i) for i in range(num_sites - 1))
np.testing.assert_allclose(t, 1.0)
np.testing.assert_almost_equal(norm2, 1.0)
assert en2.numpy() / norm2.numpy() < en1.numpy() / norm1.numpy()
|
import os
import inspect
import platform
import shutil
import unittest
from Tests.utils.utils import get_test_path
from kalliope.core.ConfigurationManager import SettingLoader
from kalliope.core.Models import Singleton
from kalliope.core.Models import Resources
from kalliope.core.Models.settings.Options import Options
from kalliope.core.Models.settings.Player import Player
from kalliope.core.Models.settings.RestAPI import RestAPI
from kalliope.core.Models.settings.Settings import Settings
from kalliope.core.Models.settings.Stt import Stt
from kalliope.core.Models.settings.Trigger import Trigger
from kalliope.core.Models.settings.Tts import Tts
class TestSettingLoader(unittest.TestCase):
def setUp(self):
self.settings_file_to_test = get_test_path("settings/settings_test.yml")
self.settings_dict = {
'rest_api':
{'allowed_cors_origin': False,
'active': True,
'login': 'admin',
'password_protected': True,
'password': 'secret', 'port': 5000},
'default_trigger': 'snowboy',
'triggers': [{'snowboy': {'pmdl_file': 'trigger/snowboy/resources/kalliope-FR-6samples.pmdl'}}],
'default_player': 'mplayer',
'players': [{'mplayer': {}}, {'pyalsaaudio': {"device": "default"}}],
'speech_to_text': [{'google': {'language': 'fr-FR'}}],
'cache_path': '/tmp/kalliope_tts_cache',
'resource_directory': {
'stt': '/tmp/kalliope/tests/kalliope_resources_dir/stt',
'tts': '/tmp/kalliope/tests/kalliope_resources_dir/tts',
'neuron': '/tmp/kalliope/tests/kalliope_resources_dir/neurons',
'trigger': '/tmp/kalliope/tests/kalliope_resources_dir/trigger'},
'default_text_to_speech': 'pico2wave',
'default_speech_to_text': 'google',
'text_to_speech': [
{'pico2wave': {'cache': True, 'language': 'fr-FR'}},
{'googletts': {'language': 'fr', 'cache': True}}
],
'var_files': ["../Tests/settings/variables.yml"],
'options': {'energy_threshold': 3000, 'deaf': True, 'mute': False},
'hooks': {'on_waiting_for_trigger': 'test',
'on_stop_listening': None,
'on_start_listening': None,
'on_order_found': None,
'on_start': ['on-start-synapse', 'bring-led-on'],
'on_undeaf': [],
'on_triggered': ['on-triggered-synapse'],
'on_deaf': [],
'on_mute': [],
'on_unmute': [],
'on_order_not_found': [
'order-not-found-synapse'],
'on_processed_synapses': None,
'on_start_speaking': None,
'on_stop_speaking': None
},
'send_anonymous_usage_stats': 0
}
# Init the folders, otherwise it raises an exceptions
os.makedirs("/tmp/kalliope/tests/kalliope_resources_dir/neurons")
os.makedirs("/tmp/kalliope/tests/kalliope_resources_dir/stt")
os.makedirs("/tmp/kalliope/tests/kalliope_resources_dir/tts")
os.makedirs("/tmp/kalliope/tests/kalliope_resources_dir/trigger")
def tearDown(self):
# Cleanup
shutil.rmtree('/tmp/kalliope/tests/kalliope_resources_dir')
Singleton._instances = {}
def test_singleton(self):
s1 = SettingLoader(file_path=self.settings_file_to_test)
s2 = SettingLoader(file_path=self.settings_file_to_test)
self.assertTrue(s1.settings is s2.settings)
def test_get_yaml_config(self):
sl = SettingLoader(file_path=self.settings_file_to_test)
self.maxDiff = None
self.assertDictEqual(sl.yaml_config, self.settings_dict)
def test_get_settings(self):
settings_object = Settings()
settings_object.default_tts_name = "pico2wave"
settings_object.default_stt_name = "google"
settings_object.default_trigger_name = "snowboy"
settings_object.default_player_name = "mplayer"
tts1 = Tts(name="pico2wave", parameters={'cache': True, 'language': 'fr-FR'})
tts2 = Tts(name="googletts", parameters={'language': 'fr', 'cache': True})
settings_object.ttss = [tts1, tts2]
stt = Stt(name="google", parameters={'language': 'fr-FR'})
settings_object.stts = [stt]
trigger1 = Trigger(name="snowboy",
parameters={'pmdl_file': 'trigger/snowboy/resources/kalliope-FR-6samples.pmdl'})
settings_object.triggers = [trigger1]
player1 = Player(name="mplayer", parameters={})
player2 = Player(name="pyalsaaudio", parameters={"device": "default"})
settings_object.players = [player1, player2]
settings_object.rest_api = RestAPI(password_protected=True, active=True,
login="admin", password="secret", port=5000,
allowed_cors_origin=False)
settings_object.cache_path = '/tmp/kalliope_tts_cache'
resources = Resources(neuron_folder="/tmp/kalliope/tests/kalliope_resources_dir/neurons",
stt_folder="/tmp/kalliope/tests/kalliope_resources_dir/stt",
tts_folder="/tmp/kalliope/tests/kalliope_resources_dir/tts",
trigger_folder="/tmp/kalliope/tests/kalliope_resources_dir/trigger")
settings_object.resources = resources
settings_object.variables = {
"author": "Lamonf",
"test_number": 60,
"test": "kalliope"
}
settings_object.options = Options(recognizer_multiplier=1.0,
recognizer_energy_ratio=1.5,
recognizer_recording_timeout=15.0,
recognizer_recording_timeout_with_silence=3.0,
deaf=True,
mute=False)
settings_object.machine = platform.machine()
settings_object.hooks = {'on_waiting_for_trigger': 'test',
'on_stop_listening': None,
'on_start_listening': None,
'on_order_found': None,
'on_start': ['on-start-synapse', 'bring-led-on'],
'on_undeaf': [],
'on_triggered': ['on-triggered-synapse'],
'on_deaf': [],
'on_mute': [],
'on_unmute': [],
'on_order_not_found': [
'order-not-found-synapse'],
'on_processed_synapses': None,
'on_start_speaking': None,
'on_stop_speaking': None,
}
settings_object.tracker_anonymous_usage_stats_id = "not_defined_id"
sl = SettingLoader(file_path=self.settings_file_to_test)
self.assertEqual(settings_object, sl.settings)
def test_get_default_speech_to_text(self):
expected_default_speech_to_text = "google"
sl = SettingLoader(file_path=self.settings_file_to_test)
self.assertEqual(expected_default_speech_to_text, sl._get_default_speech_to_text(self.settings_dict))
def test_get_default_text_to_speech(self):
expected_default_text_to_speech = "pico2wave"
sl = SettingLoader(file_path=self.settings_file_to_test)
self.assertEqual(expected_default_text_to_speech, sl._get_default_text_to_speech(self.settings_dict))
def test_get_default_trigger(self):
expected_default_trigger = "snowboy"
sl = SettingLoader(file_path=self.settings_file_to_test)
self.assertEqual(expected_default_trigger, sl._get_default_trigger(self.settings_dict))
def test_get_stts(self):
stt = Stt(name="google", parameters={'language': 'fr-FR'})
sl = SettingLoader(file_path=self.settings_file_to_test)
self.assertEqual([stt], sl._get_stts(self.settings_dict))
def test_get_ttss(self):
tts1 = Tts(name="pico2wave", parameters={'cache': True, 'language': 'fr-FR'})
tts2 = Tts(name="googletts", parameters={'language': 'fr', 'cache': True})
sl = SettingLoader(file_path=self.settings_file_to_test)
self.assertEqual([tts1, tts2], sl._get_ttss(self.settings_dict))
def test_get_triggers(self):
trigger1 = Trigger(name="snowboy",
parameters={'pmdl_file': 'trigger/snowboy/resources/kalliope-FR-6samples.pmdl'})
sl = SettingLoader(file_path=self.settings_file_to_test)
self.assertEqual([trigger1], sl._get_triggers(self.settings_dict))
def test_get_players(self):
player1 = Player(name="mplayer",
parameters={})
player2 = Player(name="pyalsaaudio",
parameters={'device': 'default'})
sl = SettingLoader(file_path=self.settings_file_to_test)
self.assertEqual([player1, player2], sl._get_players(self.settings_dict))
def test_get_rest_api(self):
expected_rest_api = RestAPI(password_protected=True, active=True,
login="admin", password="secret", port=5000,
allowed_cors_origin=False)
sl = SettingLoader(file_path=self.settings_file_to_test)
self.assertEqual(expected_rest_api, sl._get_rest_api(self.settings_dict))
def test_get_cache_path(self):
expected_cache_path = '/tmp/kalliope_tts_cache'
sl = SettingLoader(file_path=self.settings_file_to_test)
self.assertEqual(expected_cache_path, sl._get_cache_path(self.settings_dict))
def test_get_resources(self):
resources = Resources(neuron_folder="/tmp/kalliope/tests/kalliope_resources_dir/neurons",
stt_folder="/tmp/kalliope/tests/kalliope_resources_dir/stt",
tts_folder="/tmp/kalliope/tests/kalliope_resources_dir/tts",
trigger_folder="/tmp/kalliope/tests/kalliope_resources_dir/trigger")
expected_resource = resources
sl = SettingLoader(file_path=self.settings_file_to_test)
self.assertEqual(expected_resource, sl._get_resources(self.settings_dict))
def test_get_variables(self):
expected_result = {
"author": "Lamonf",
"test_number": 60,
"test": "kalliope"
}
sl = SettingLoader(file_path=self.settings_file_to_test)
self.assertEqual(expected_result,
sl._get_variables(self.settings_dict))
def test_get_options(self):
expected_result = Options(recognizer_multiplier=1.0,
recognizer_energy_ratio=1.5,
recognizer_recording_timeout=15.0,
recognizer_recording_timeout_with_silence=3.0,
deaf=True,
mute=False)
sl = SettingLoader(file_path=self.settings_file_to_test)
self.assertEqual(expected_result,
sl._get_options(self.settings_dict))
def test_get_hooks(self):
# test with only one hook set
settings = dict()
settings["hooks"] = {
"on_start": "test_synapse"
}
expected_dict = {
"on_start": "test_synapse",
"on_waiting_for_trigger": None,
"on_triggered": None,
"on_start_listening": None,
"on_stop_listening": None,
"on_order_found": None,
"on_order_not_found": None,
"on_deaf": None,
"on_undeaf": None,
"on_mute": None,
"on_unmute": None,
"on_start_speaking": None,
"on_stop_speaking": None
}
returned_dict = SettingLoader._get_hooks(settings)
self.assertEqual(returned_dict, expected_dict)
# test with no hook set
settings = dict()
expected_dict = {
"on_start": None,
"on_waiting_for_trigger": None,
"on_triggered": None,
"on_start_listening": None,
"on_stop_listening": None,
"on_order_found": None,
"on_order_not_found": None,
"on_deaf": None,
"on_undeaf": None,
"on_mute": None,
"on_unmute": None,
"on_start_speaking": None,
"on_stop_speaking": None
}
returned_dict = SettingLoader._get_hooks(settings)
self.assertEqual(returned_dict, expected_dict)
if __name__ == '__main__':
unittest.main()
# suite = unittest.TestSuite()
# suite.addTest(TestSettingLoader("test_get_hooks"))
# runner = unittest.TextTestRunner()
# runner.run(suite)
|
from datetime import timedelta
from homeassistant.const import DEVICE_CLASS_TIMESTAMP
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from homeassistant.util.dt import utcnow
from .const import (
ATTR_BLACK_DRUM_COUNTER,
ATTR_BLACK_DRUM_REMAINING_LIFE,
ATTR_BLACK_DRUM_REMAINING_PAGES,
ATTR_CYAN_DRUM_COUNTER,
ATTR_CYAN_DRUM_REMAINING_LIFE,
ATTR_CYAN_DRUM_REMAINING_PAGES,
ATTR_DRUM_COUNTER,
ATTR_DRUM_REMAINING_LIFE,
ATTR_DRUM_REMAINING_PAGES,
ATTR_ICON,
ATTR_LABEL,
ATTR_MAGENTA_DRUM_COUNTER,
ATTR_MAGENTA_DRUM_REMAINING_LIFE,
ATTR_MAGENTA_DRUM_REMAINING_PAGES,
ATTR_MANUFACTURER,
ATTR_UNIT,
ATTR_UPTIME,
ATTR_YELLOW_DRUM_COUNTER,
ATTR_YELLOW_DRUM_REMAINING_LIFE,
ATTR_YELLOW_DRUM_REMAINING_PAGES,
DOMAIN,
SENSOR_TYPES,
)
ATTR_COUNTER = "counter"
ATTR_FIRMWARE = "firmware"
ATTR_MODEL = "model"
ATTR_REMAINING_PAGES = "remaining_pages"
ATTR_SERIAL = "serial"
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Add Brother entities from a config_entry."""
coordinator = hass.data[DOMAIN][config_entry.entry_id]
sensors = []
device_info = {
"identifiers": {(DOMAIN, coordinator.data[ATTR_SERIAL])},
"name": coordinator.data[ATTR_MODEL],
"manufacturer": ATTR_MANUFACTURER,
"model": coordinator.data[ATTR_MODEL],
"sw_version": coordinator.data.get(ATTR_FIRMWARE),
}
for sensor in SENSOR_TYPES:
if sensor in coordinator.data:
sensors.append(BrotherPrinterSensor(coordinator, sensor, device_info))
async_add_entities(sensors, False)
class BrotherPrinterSensor(CoordinatorEntity):
"""Define an Brother Printer sensor."""
def __init__(self, coordinator, kind, device_info):
"""Initialize."""
super().__init__(coordinator)
self._name = f"{coordinator.data[ATTR_MODEL]} {SENSOR_TYPES[kind][ATTR_LABEL]}"
self._unique_id = f"{coordinator.data[ATTR_SERIAL].lower()}_{kind}"
self._device_info = device_info
self.kind = kind
self._attrs = {}
@property
def name(self):
"""Return the name."""
return self._name
@property
def state(self):
"""Return the state."""
if self.kind == ATTR_UPTIME:
uptime = utcnow() - timedelta(seconds=self.coordinator.data.get(self.kind))
return uptime.replace(microsecond=0).isoformat()
return self.coordinator.data.get(self.kind)
@property
def device_class(self):
"""Return the class of this sensor."""
if self.kind == ATTR_UPTIME:
return DEVICE_CLASS_TIMESTAMP
return None
@property
def device_state_attributes(self):
"""Return the state attributes."""
remaining_pages = None
drum_counter = None
if self.kind == ATTR_DRUM_REMAINING_LIFE:
remaining_pages = ATTR_DRUM_REMAINING_PAGES
drum_counter = ATTR_DRUM_COUNTER
elif self.kind == ATTR_BLACK_DRUM_REMAINING_LIFE:
remaining_pages = ATTR_BLACK_DRUM_REMAINING_PAGES
drum_counter = ATTR_BLACK_DRUM_COUNTER
elif self.kind == ATTR_CYAN_DRUM_REMAINING_LIFE:
remaining_pages = ATTR_CYAN_DRUM_REMAINING_PAGES
drum_counter = ATTR_CYAN_DRUM_COUNTER
elif self.kind == ATTR_MAGENTA_DRUM_REMAINING_LIFE:
remaining_pages = ATTR_MAGENTA_DRUM_REMAINING_PAGES
drum_counter = ATTR_MAGENTA_DRUM_COUNTER
elif self.kind == ATTR_YELLOW_DRUM_REMAINING_LIFE:
remaining_pages = ATTR_YELLOW_DRUM_REMAINING_PAGES
drum_counter = ATTR_YELLOW_DRUM_COUNTER
if remaining_pages and drum_counter:
self._attrs[ATTR_REMAINING_PAGES] = self.coordinator.data.get(
remaining_pages
)
self._attrs[ATTR_COUNTER] = self.coordinator.data.get(drum_counter)
return self._attrs
@property
def icon(self):
"""Return the icon."""
return SENSOR_TYPES[self.kind][ATTR_ICON]
@property
def unique_id(self):
"""Return a unique_id for this entity."""
return self._unique_id
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return SENSOR_TYPES[self.kind][ATTR_UNIT]
@property
def device_info(self):
"""Return the device info."""
return self._device_info
@property
def entity_registry_enabled_default(self):
"""Return if the entity should be enabled when first added to the entity registry."""
return True
|
import sys
from queue import Empty
import mock
import pytest
from paasta_tools.deployd.common import ServiceInstance
class FakePyinotify: # pragma: no cover
class ProcessEvent:
pass
@property
def WatchManager():
pass
@property
def EventsCodes():
pass
@property
def Notifier():
pass
class Event:
pass
# This module is only available on linux
# and we will be mocking it in the unit tests anyway
# so this just creates it as a dummy module to prevent
# the ImportError
sys.modules["pyinotify"] = FakePyinotify
from paasta_tools.deployd.master import DeployDaemon # noqa
from paasta_tools.deployd.master import main # noqa
class TestDeployDaemon:
@pytest.fixture(autouse=True)
def setUp(self):
with mock.patch(
"paasta_tools.deployd.master.PaastaQueue", autospec=True
), mock.patch(
"paasta_tools.deployd.master.DelayDeadlineQueue", autospec=True
), mock.patch(
"paasta_tools.deployd.master.get_metrics_interface", autospec=True
), mock.patch(
"paasta_tools.deployd.master.get_marathon_clients_from_config",
autospec=True,
), mock.patch(
"paasta_tools.deployd.master.load_system_paasta_config", autospec=True
) as mock_config_getter, mock.patch(
"paasta_tools.deployd.master.KazooClient", autospec=True
):
self.mock_config = mock.Mock(
get_deployd_log_level=mock.Mock(return_value="INFO"),
get_deployd_number_workers=mock.Mock(return_value=5),
get_deployd_big_bounce_rate=mock.Mock(return_value=10),
get_cluster=mock.Mock(return_value="westeros-prod"),
get_log_writer=mock.Mock(return_value={"driver": None}),
get_deployd_startup_oracle_enabled=mock.Mock(return_value=False),
get_deployd_use_zk_queue=mock.Mock(return_value=False),
)
mock_config_getter.return_value = self.mock_config
self.deployd = DeployDaemon()
def test_run(self):
with mock.patch(
"paasta_tools.deployd.master.ZookeeperPool", autospec=True
), mock.patch(
"paasta_tools.deployd.master.PaastaLeaderElection", autospec=True
) as mock_election_class:
mock_election = mock.Mock()
mock_election_class.return_value = mock_election
self.deployd.run()
assert mock_election_class.called
mock_election.run.assert_called_with(self.deployd.startup)
@pytest.mark.parametrize("use_zk_queue", [True, False])
def test_startup(self, use_zk_queue):
assert not hasattr(self.deployd, "is_leader")
assert not self.deployd.started
with mock.patch(
"paasta_tools.deployd.master.QueueAndWorkerMetrics", autospec=True
) as mock_q_metrics, mock.patch(
"paasta_tools.deployd.master.DeployDaemon.start_watchers", autospec=True
) as mock_start_watchers, mock.patch(
"paasta_tools.deployd.master.DeployDaemon.prioritise_bouncing_services",
autospec=True,
) as mock_prioritise_bouncing_services, mock.patch(
"paasta_tools.deployd.master.DeployDaemon.add_all_services", autospec=True
) as mock_add_all_services, mock.patch(
"paasta_tools.deployd.master.DeployDaemon.start_workers",
autospec=True,
side_effect=lambda self: setattr(
self, "workers", []
), # setattr because you can't do assignment in a lambda
) as mock_start_workers, mock.patch(
"paasta_tools.deployd.master.DeployDaemon.main_loop", autospec=True
) as mock_main_loop:
self.mock_config.get_deployd_use_zk_queue.return_value = use_zk_queue
self.deployd.startup()
assert self.deployd.started
assert self.deployd.is_leader
mock_q_metrics.assert_called_with(
self.deployd.instances_to_bounce,
self.deployd.workers,
"westeros-prod",
self.deployd.metrics,
)
assert mock_q_metrics.return_value.start.called
assert mock_start_watchers.called
assert mock_add_all_services.called == (not use_zk_queue)
assert not mock_prioritise_bouncing_services.called
assert mock_start_workers.called
assert mock_main_loop.called
self.deployd.config.get_deployd_startup_oracle_enabled = mock.Mock(
return_value=True
)
self.deployd.startup()
assert mock_prioritise_bouncing_services.called
def test_main_loop(self):
with mock.patch("time.sleep", autospec=True) as mock_sleep, mock.patch(
"paasta_tools.deployd.master.DeployDaemon.all_watchers_running",
autospec=True,
) as mock_all_watchers_running, mock.patch(
"paasta_tools.deployd.master.DeployDaemon.all_workers_dead", autospec=True
) as mock_all_workers_dead, mock.patch(
"paasta_tools.deployd.master.DeployDaemon.check_and_start_workers",
autospec=True,
) as mock_check_and_start_workers:
mock_all_workers_dead.return_value = False
mock_all_watchers_running.return_value = True
self.deployd.control.get.return_value = "ABORT"
self.deployd.main_loop()
assert not mock_sleep.called
assert not mock_check_and_start_workers.called
mock_all_workers_dead.return_value = True
mock_all_watchers_running.return_value = True
self.deployd.control.get.return_value = None
with pytest.raises(SystemExit):
self.deployd.main_loop()
assert not mock_sleep.called
assert not mock_check_and_start_workers.called
mock_all_workers_dead.return_value = False
mock_all_watchers_running.return_value = False
self.deployd.control.get.return_value = None
with pytest.raises(SystemExit):
self.deployd.main_loop()
assert not mock_sleep.called
assert not mock_check_and_start_workers.called
mock_all_workers_dead.return_value = False
mock_all_watchers_running.return_value = True
mock_sleep.side_effect = [None, LoopBreak]
self.deployd.control.get.side_effect = Empty
with pytest.raises(LoopBreak):
self.deployd.main_loop()
assert mock_sleep.call_count == 2
assert mock_check_and_start_workers.call_count == 2
def test_all_watchers_running(self):
mock_watchers = [
mock.Mock(is_alive=mock.Mock(return_value=True)),
mock.Mock(is_alive=mock.Mock(return_value=True)),
]
self.deployd.watcher_threads = mock_watchers
assert self.deployd.all_watchers_running()
mock_watchers = [
mock.Mock(is_alive=mock.Mock(return_value=True)),
mock.Mock(is_alive=mock.Mock(return_value=False)),
]
self.deployd.watcher_threads = mock_watchers
assert not self.deployd.all_watchers_running()
mock_watchers = [
mock.Mock(is_alive=mock.Mock(return_value=False)),
mock.Mock(is_alive=mock.Mock(return_value=False)),
]
self.deployd.watcher_threads = mock_watchers
assert not self.deployd.all_watchers_running()
def test_all_workers_dead(self):
mock_workers = [
mock.Mock(is_alive=mock.Mock(return_value=True)),
mock.Mock(is_alive=mock.Mock(return_value=True)),
]
self.deployd.workers = mock_workers
assert not self.deployd.all_workers_dead()
mock_workers = [
mock.Mock(is_alive=mock.Mock(return_value=True)),
mock.Mock(is_alive=mock.Mock(return_value=False)),
]
self.deployd.workers = mock_workers
assert not self.deployd.all_workers_dead()
mock_workers = [
mock.Mock(is_alive=mock.Mock(return_value=False)),
mock.Mock(is_alive=mock.Mock(return_value=False)),
]
self.deployd.workers = mock_workers
assert self.deployd.all_workers_dead()
def test_check_and_start_workers(self):
with mock.patch(
"paasta_tools.deployd.master.PaastaDeployWorker", autospec=True
) as mock_paasta_worker:
mock_worker_instance = mock.Mock()
mock_paasta_worker.return_value = mock_worker_instance
self.deployd.metrics = None
mock_alive_worker = mock.Mock(is_alive=mock.Mock(return_value=True))
mock_dead_worker = mock.Mock(is_alive=mock.Mock(return_value=False))
self.deployd.workers = [mock_alive_worker] * 5
self.deployd.check_and_start_workers()
assert not mock_paasta_worker.called
self.deployd.workers = [mock_alive_worker] * 3 + [mock_dead_worker] * 2
self.deployd.check_and_start_workers()
assert mock_paasta_worker.call_count == 2
assert mock_worker_instance.start.call_count == 2
def test_stop(self):
self.deployd.stop()
self.deployd.control.put.assert_called_with("ABORT")
def test_start_workers(self):
with mock.patch(
"paasta_tools.deployd.master.PaastaDeployWorker", autospec=True
) as mock_paasta_worker:
self.deployd.metrics = mock.Mock()
self.deployd.start_workers()
assert mock_paasta_worker.call_count == 5
def test_prioritise_bouncing_services(self):
with mock.patch(
"paasta_tools.deployd.master.get_service_instances_that_need_bouncing",
autospec=True,
) as mock_get_service_instances_that_need_bouncing, mock.patch(
"time.time", autospec=True, return_value=1
):
mock_changed_instances = (x for x in {"universe.c137", "universe.c138"})
mock_get_service_instances_that_need_bouncing.return_value = (
mock_changed_instances
)
self.deployd.prioritise_bouncing_services()
mock_get_service_instances_that_need_bouncing.assert_called_with(
self.deployd.marathon_clients, "/nail/etc/services"
)
calls = [
mock.call(
ServiceInstance(
service="universe",
instance="c138",
watcher="DeployDaemon",
bounce_by=1,
wait_until=1,
failures=0,
processed_count=0,
enqueue_time=1,
bounce_start_time=1,
)
),
mock.call(
ServiceInstance(
service="universe",
instance="c137",
watcher="DeployDaemon",
bounce_by=1,
wait_until=1,
failures=0,
processed_count=0,
enqueue_time=1,
bounce_start_time=1,
)
),
]
self.deployd.instances_to_bounce.put.assert_has_calls(calls, any_order=True)
def test_start_watchers(self):
class FakeWatchers: # pragma: no cover
class PaastaWatcher:
def __init__(self, *args, **kwargs):
pass
def start(self):
pass
@property
def is_ready(self):
return True
class FakeWatcher(PaastaWatcher):
pass
class FakeWatcher2(PaastaWatcher):
pass
with mock.patch(
"paasta_tools.deployd.master.watchers", autospec=False, new=FakeWatchers
), mock.patch("time.sleep", autospec=True):
mock_zk = mock.Mock()
self.deployd.zk = mock_zk
mock_start = mock.Mock()
FakeWatchers.PaastaWatcher.start = mock_start
self.deployd.config.get_disabled_watchers = mock.Mock()
self.deployd.config.get_disabled_watchers.return_value = []
self.deployd.start_watchers()
assert mock_start.call_count == 2
self.deployd.config.get_disabled_watchers.return_value = ["FakeWatcher2"]
self.deployd.start_watchers()
assert mock_start.call_count == 3
FakeWatchers.PaastaWatcher.is_ready = False
with pytest.raises(SystemExit):
self.deployd.start_watchers()
def test_main():
with mock.patch(
"paasta_tools.deployd.master.DeployDaemon", autospec=True
) as mock_deployd_class, mock.patch(
"time.sleep", autospec=True, side_effect=LoopBreak
):
mock_deployd = mock.Mock()
mock_deployd_class.return_value = mock_deployd
with pytest.raises(LoopBreak):
main()
assert mock_deployd_class.called
assert mock_deployd.start.called
class LoopBreak(Exception):
pass
|
from datetime import timedelta
import logging
import dovado
import voluptuous as vol
from homeassistant.const import (
CONF_HOST,
CONF_PASSWORD,
CONF_PORT,
CONF_USERNAME,
DEVICE_DEFAULT_NAME,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.util import Throttle
_LOGGER = logging.getLogger(__name__)
DOMAIN = "dovado"
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_HOST): cv.string,
vol.Optional(CONF_PORT): cv.port,
}
)
},
extra=vol.ALLOW_EXTRA,
)
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=30)
def setup(hass, config):
"""Set up the Dovado component."""
hass.data[DOMAIN] = DovadoData(
dovado.Dovado(
config[DOMAIN][CONF_USERNAME],
config[DOMAIN][CONF_PASSWORD],
config[DOMAIN].get(CONF_HOST),
config[DOMAIN].get(CONF_PORT),
)
)
return True
class DovadoData:
"""Maintain a connection to the router."""
def __init__(self, client):
"""Set up a new Dovado connection."""
self._client = client
self.state = {}
@property
def name(self):
"""Name of the router."""
return self.state.get("product name", DEVICE_DEFAULT_NAME)
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Update device state."""
try:
self.state = self._client.state or {}
if not self.state:
return False
self.state.update(connected=self.state.get("modem status") == "CONNECTED")
_LOGGER.debug("Received: %s", self.state)
return True
except OSError as error:
_LOGGER.warning("Could not contact the router: %s", error)
@property
def client(self):
"""Dovado client instance."""
return self._client
|
import base64
import logging
import mimetypes
import requests
from requests.auth import HTTPBasicAuth
import voluptuous as vol
from homeassistant.components.notify import (
ATTR_DATA,
ATTR_TARGET,
ATTR_TITLE,
ATTR_TITLE_DEFAULT,
PLATFORM_SCHEMA,
BaseNotificationService,
)
from homeassistant.const import HTTP_OK
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
_RESOURCE = "https://www.pushsafer.com/api"
_ALLOWED_IMAGES = ["image/gif", "image/jpeg", "image/png"]
CONF_DEVICE_KEY = "private_key"
CONF_TIMEOUT = 15
# Top level attributes in 'data'
ATTR_SOUND = "sound"
ATTR_VIBRATION = "vibration"
ATTR_ICON = "icon"
ATTR_ICONCOLOR = "iconcolor"
ATTR_URL = "url"
ATTR_URLTITLE = "urltitle"
ATTR_TIME2LIVE = "time2live"
ATTR_PRIORITY = "priority"
ATTR_RETRY = "retry"
ATTR_EXPIRE = "expire"
ATTR_ANSWER = "answer"
ATTR_PICTURE1 = "picture1"
# Attributes contained in picture1
ATTR_PICTURE1_URL = "url"
ATTR_PICTURE1_PATH = "path"
ATTR_PICTURE1_USERNAME = "username"
ATTR_PICTURE1_PASSWORD = "password"
ATTR_PICTURE1_AUTH = "auth"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({vol.Required(CONF_DEVICE_KEY): cv.string})
def get_service(hass, config, discovery_info=None):
"""Get the Pushsafer.com notification service."""
return PushsaferNotificationService(
config.get(CONF_DEVICE_KEY), hass.config.is_allowed_path
)
class PushsaferNotificationService(BaseNotificationService):
"""Implementation of the notification service for Pushsafer.com."""
def __init__(self, private_key, is_allowed_path):
"""Initialize the service."""
self._private_key = private_key
self.is_allowed_path = is_allowed_path
def send_message(self, message="", **kwargs):
"""Send a message to specified target."""
if kwargs.get(ATTR_TARGET) is None:
targets = ["a"]
_LOGGER.debug("No target specified. Sending push to all")
else:
targets = kwargs.get(ATTR_TARGET)
_LOGGER.debug("%s target(s) specified", len(targets))
title = kwargs.get(ATTR_TITLE, ATTR_TITLE_DEFAULT)
data = kwargs.get(ATTR_DATA, {})
# Converting the specified image to base64
picture1 = data.get(ATTR_PICTURE1)
picture1_encoded = ""
if picture1 is not None:
_LOGGER.debug("picture1 is available")
url = picture1.get(ATTR_PICTURE1_URL, None)
local_path = picture1.get(ATTR_PICTURE1_PATH, None)
username = picture1.get(ATTR_PICTURE1_USERNAME)
password = picture1.get(ATTR_PICTURE1_PASSWORD)
auth = picture1.get(ATTR_PICTURE1_AUTH)
if url is not None:
_LOGGER.debug("Loading image from url %s", url)
picture1_encoded = self.load_from_url(url, username, password, auth)
elif local_path is not None:
_LOGGER.debug("Loading image from file %s", local_path)
picture1_encoded = self.load_from_file(local_path)
else:
_LOGGER.warning("missing url or local_path for picture1")
else:
_LOGGER.debug("picture1 is not specified")
payload = {
"k": self._private_key,
"t": title,
"m": message,
"s": data.get(ATTR_SOUND, ""),
"v": data.get(ATTR_VIBRATION, ""),
"i": data.get(ATTR_ICON, ""),
"c": data.get(ATTR_ICONCOLOR, ""),
"u": data.get(ATTR_URL, ""),
"ut": data.get(ATTR_URLTITLE, ""),
"l": data.get(ATTR_TIME2LIVE, ""),
"pr": data.get(ATTR_PRIORITY, ""),
"re": data.get(ATTR_RETRY, ""),
"ex": data.get(ATTR_EXPIRE, ""),
"a": data.get(ATTR_ANSWER, ""),
"p": picture1_encoded,
}
for target in targets:
payload["d"] = target
response = requests.post(_RESOURCE, data=payload, timeout=CONF_TIMEOUT)
if response.status_code != HTTP_OK:
_LOGGER.error("Pushsafer failed with: %s", response.text)
else:
_LOGGER.debug("Push send: %s", response.json())
@classmethod
def get_base64(cls, filebyte, mimetype):
"""Convert the image to the expected base64 string of pushsafer."""
if mimetype not in _ALLOWED_IMAGES:
_LOGGER.warning("%s is a not supported mimetype for images", mimetype)
return None
base64_image = base64.b64encode(filebyte).decode("utf8")
return f"data:{mimetype};base64,{base64_image}"
def load_from_url(self, url=None, username=None, password=None, auth=None):
"""Load image/document/etc from URL."""
if url is not None:
_LOGGER.debug("Downloading image from %s", url)
if username is not None and password is not None:
auth_ = HTTPBasicAuth(username, password)
response = requests.get(url, auth=auth_, timeout=CONF_TIMEOUT)
else:
response = requests.get(url, timeout=CONF_TIMEOUT)
return self.get_base64(response.content, response.headers["content-type"])
_LOGGER.warning("url not found in param")
return None
def load_from_file(self, local_path=None):
"""Load image/document/etc from a local path."""
try:
if local_path is not None:
_LOGGER.debug("Loading image from local path")
if self.is_allowed_path(local_path):
file_mimetype = mimetypes.guess_type(local_path)
_LOGGER.debug("Detected mimetype %s", file_mimetype)
with open(local_path, "rb") as binary_file:
data = binary_file.read()
return self.get_base64(data, file_mimetype[0])
else:
_LOGGER.warning("Local path not found in params!")
except OSError as error:
_LOGGER.error("Can't load from local path: %s", error)
return None
|
import ctypes
import multiprocessing
import sys
import signal
import markups
from os import devnull
from os.path import join
from ReText import datadirs, settings, globalSettings, app_version
from ReText import initializeDataDirs
from ReText.window import ReTextWindow
from PyQt5.QtCore import QCommandLineOption, QCommandLineParser, QFile, \
QFileInfo, QIODevice, QLibraryInfo, QTextStream, QTranslator, Qt
from PyQt5.QtWidgets import QApplication
from PyQt5.QtNetwork import QNetworkProxyFactory
from PyQt5.QtDBus import QDBusConnection, QDBusInterface
def canonicalize(option):
if option == '-':
return option
return QFileInfo(option).canonicalFilePath()
def main():
multiprocessing.set_start_method('spawn')
if markups.__version_tuple__ < (2, ):
sys.exit('Error: ReText needs PyMarkups 2.0 or newer to run.')
# If we're running on Windows without a console, then discard stdout
# and save stderr to a file to facilitate debugging in case of crashes.
if sys.executable.endswith('pythonw.exe'):
sys.stdout = open(devnull, 'w')
sys.stderr = open('stderr.log', 'w')
try:
# See https://github.com/retext-project/retext/issues/399
# and https://launchpad.net/bugs/941826
ctypes.CDLL('libGL.so.1', ctypes.RTLD_GLOBAL)
except OSError:
pass
# Needed for Qt WebEngine on Windows
QApplication.setAttribute(Qt.AA_ShareOpenGLContexts)
QApplication.setAttribute(Qt.AA_UseHighDpiPixmaps);
app = QApplication(sys.argv)
app.setOrganizationName("ReText project")
app.setApplicationName("ReText")
app.setApplicationDisplayName("ReText")
app.setApplicationVersion(app_version)
app.setOrganizationDomain('mitya57.me')
if hasattr(app, 'setDesktopFileName'): # available since Qt 5.7
app.setDesktopFileName('me.mitya57.ReText.desktop')
QNetworkProxyFactory.setUseSystemConfiguration(True)
parser = QCommandLineParser()
parser.addHelpOption()
parser.addVersionOption()
previewOption = QCommandLineOption('preview',
QApplication.translate('main', 'Open the files in preview mode'))
newWindowOption = QCommandLineOption('new-window',
QApplication.translate('main', 'Create a new window even if there is an existing one'))
parser.addOption(previewOption)
parser.addOption(newWindowOption)
parser.addPositionalArgument('files',
QApplication.translate('main', 'List of files to open'),
'[files...]')
parser.process(app)
filesToOpen = parser.positionalArguments()
initializeDataDirs()
RtTranslator = QTranslator()
for path in datadirs:
if RtTranslator.load('retext_' + globalSettings.uiLanguage,
join(path, 'locale')):
break
QtTranslator = QTranslator()
QtTranslator.load("qtbase_" + globalSettings.uiLanguage,
QLibraryInfo.location(QLibraryInfo.TranslationsPath))
app.installTranslator(RtTranslator)
app.installTranslator(QtTranslator)
print('Using configuration file:', settings.fileName())
if globalSettings.appStyleSheet:
sheetfile = QFile(globalSettings.appStyleSheet)
sheetfile.open(QIODevice.ReadOnly)
app.setStyleSheet(QTextStream(sheetfile).readAll())
sheetfile.close()
window = ReTextWindow()
openInExistingWindow = (globalSettings.openFilesInExistingWindow
and not parser.isSet(newWindowOption))
connection = QDBusConnection.sessionBus()
if connection.isConnected() and openInExistingWindow:
connection.registerObject('/', window, QDBusConnection.ExportAllSlots)
serviceName = 'me.mitya57.ReText'
if not connection.registerService(serviceName) and filesToOpen:
print('Opening the file(s) in the existing window of ReText.')
iface = QDBusInterface(serviceName, '/', '', connection)
for fileName in filesToOpen:
iface.call('openFileWrapper', fileName)
qWidgetIface = QDBusInterface(serviceName, '/', 'org.qtproject.Qt.QWidget', connection)
qWidgetIface.call('raise')
sys.exit(0)
window.show()
# ReText can change directory when loading files, so we
# need to have a list of canonical names before loading
fileNames = list(map(canonicalize, filesToOpen))
readStdIn = False
if globalSettings.openLastFilesOnStartup:
window.restoreLastOpenedFiles()
for fileName in fileNames:
if QFile.exists(fileName):
window.openFileWrapper(fileName)
if parser.isSet(previewOption):
window.actionPreview.setChecked(True)
window.preview(True)
elif fileName == '-':
readStdIn = True
inputData = ''
if readStdIn and sys.stdin is not None:
if sys.stdin.isatty():
print('Reading stdin, press ^D to end...')
inputData = sys.stdin.read()
if inputData or not window.tabWidget.count():
window.createNew(inputData)
signal.signal(signal.SIGINT, lambda sig, frame: window.close())
sys.exit(app.exec())
if __name__ == '__main__':
main()
|
import numpy as np
import six
from chainercv.utils.testing.assertions.assert_is_image import assert_is_image
def assert_is_instance_segmentation_dataset(
dataset, n_fg_class, n_example=None
):
"""Checks if a dataset satisfies instance segmentation dataset APIs.
This function checks if a given dataset satisfies instance segmentation
dataset APIs or not.
If the dataset does not satifiy the APIs, this function raises an
:class:`AssertionError`.
Args:
dataset: A dataset to be checked.
n_fg_class (int): The number of foreground classes.
n_example (int): The number of examples to be checked.
If this argument is specified, this function picks
examples ramdomly and checks them. Otherwise,
this function checks all examples.
"""
assert len(dataset) > 0, 'The length of dataset must be greater than zero.'
if n_example:
for _ in six.moves.range(n_example):
i = np.random.randint(0, len(dataset))
_check_example(dataset[i], n_fg_class)
else:
for i in six.moves.range(len(dataset)):
_check_example(dataset[i], n_fg_class)
def _check_example(example, n_fg_class):
assert len(example) >= 3, \
'Each example must have at least four elements:' \
'img, mask and label.'
img, mask, label = example[:3]
assert_is_image(img, color=True)
_, H, W = img.shape
R = mask.shape[0]
assert isinstance(mask, np.ndarray), \
'mask must be a numpy.ndarray.'
assert isinstance(label, np.ndarray), \
'label must be a numpy.ndarray.'
assert mask.dtype == np.bool, \
'The type of mask must be bool'
assert label.dtype == np.int32, \
'The type of label must be numpy.int32.'
assert mask.shape == (R, H, W), \
'The shape of mask must be (R, H, W).'
assert label.shape == (R,), \
'The shape of label must be (R, ).'
if len(label) > 0:
assert label.min() >= 0 and label.max() < n_fg_class, \
'The value of label must be in [0, n_fg_class - 1].'
|
import logging
import voluptuous as vol
from homeassistant import config_entries, core, exceptions
from .const import DOMAIN # pylint:disable=unused-import
_LOGGER = logging.getLogger(__name__)
# TODO adjust the data schema to the data that you need
STEP_USER_DATA_SCHEMA = vol.Schema({"host": str, "username": str, "password": str})
class PlaceholderHub:
"""Placeholder class to make tests pass.
TODO Remove this placeholder class and replace with things from your PyPI package.
"""
def __init__(self, host):
"""Initialize."""
self.host = host
async def authenticate(self, username, password) -> bool:
"""Test if we can authenticate with the host."""
return True
async def validate_input(hass: core.HomeAssistant, data):
"""Validate the user input allows us to connect.
Data has the keys from STEP_USER_DATA_SCHEMA with values provided by the user.
"""
# TODO validate the data can be used to set up a connection.
# If your PyPI package is not built with async, pass your methods
# to the executor:
# await hass.async_add_executor_job(
# your_validate_func, data["username"], data["password"]
# )
hub = PlaceholderHub(data["host"])
if not await hub.authenticate(data["username"], data["password"]):
raise InvalidAuth
# If you cannot connect:
# throw CannotConnect
# If the authentication is wrong:
# InvalidAuth
# Return info that you want to store in the config entry.
return {"title": "Name of the device"}
class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for NEW_NAME."""
VERSION = 1
# TODO pick one of the available connection classes in homeassistant/config_entries.py
CONNECTION_CLASS = config_entries.CONN_CLASS_UNKNOWN
async def async_step_user(self, user_input=None):
"""Handle the initial step."""
if user_input is None:
return self.async_show_form(
step_id="user", data_schema=STEP_USER_DATA_SCHEMA
)
errors = {}
try:
info = await validate_input(self.hass, user_input)
except CannotConnect:
errors["base"] = "cannot_connect"
except InvalidAuth:
errors["base"] = "invalid_auth"
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected exception")
errors["base"] = "unknown"
else:
return self.async_create_entry(title=info["title"], data=user_input)
return self.async_show_form(
step_id="user", data_schema=STEP_USER_DATA_SCHEMA, errors=errors
)
class CannotConnect(exceptions.HomeAssistantError):
"""Error to indicate we cannot connect."""
class InvalidAuth(exceptions.HomeAssistantError):
"""Error to indicate there is invalid auth."""
|
import re
import voluptuous as vol
from homeassistant.components.http import HomeAssistantView
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import CONF_EMAIL, CONF_NAME, DEGREE
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
API_PATH = "/api/torque"
DEFAULT_NAME = "vehicle"
DOMAIN = "torque"
ENTITY_NAME_FORMAT = "{0} {1}"
SENSOR_EMAIL_FIELD = "eml"
SENSOR_NAME_KEY = r"userFullName(\w+)"
SENSOR_UNIT_KEY = r"userUnit(\w+)"
SENSOR_VALUE_KEY = r"k(\w+)"
NAME_KEY = re.compile(SENSOR_NAME_KEY)
UNIT_KEY = re.compile(SENSOR_UNIT_KEY)
VALUE_KEY = re.compile(SENSOR_VALUE_KEY)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_EMAIL): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
)
def convert_pid(value):
"""Convert pid from hex string to integer."""
return int(value, 16)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Torque platform."""
vehicle = config.get(CONF_NAME)
email = config.get(CONF_EMAIL)
sensors = {}
hass.http.register_view(
TorqueReceiveDataView(email, vehicle, sensors, add_entities)
)
return True
class TorqueReceiveDataView(HomeAssistantView):
"""Handle data from Torque requests."""
url = API_PATH
name = "api:torque"
def __init__(self, email, vehicle, sensors, add_entities):
"""Initialize a Torque view."""
self.email = email
self.vehicle = vehicle
self.sensors = sensors
self.add_entities = add_entities
@callback
def get(self, request):
"""Handle Torque data request."""
hass = request.app["hass"]
data = request.query
if self.email is not None and self.email != data[SENSOR_EMAIL_FIELD]:
return
names = {}
units = {}
for key in data:
is_name = NAME_KEY.match(key)
is_unit = UNIT_KEY.match(key)
is_value = VALUE_KEY.match(key)
if is_name:
pid = convert_pid(is_name.group(1))
names[pid] = data[key]
elif is_unit:
pid = convert_pid(is_unit.group(1))
temp_unit = data[key]
if "\\xC2\\xB0" in temp_unit:
temp_unit = temp_unit.replace("\\xC2\\xB0", DEGREE)
units[pid] = temp_unit
elif is_value:
pid = convert_pid(is_value.group(1))
if pid in self.sensors:
self.sensors[pid].async_on_update(data[key])
for pid in names:
if pid not in self.sensors:
self.sensors[pid] = TorqueSensor(
ENTITY_NAME_FORMAT.format(self.vehicle, names[pid]), units.get(pid)
)
hass.async_add_job(self.add_entities, [self.sensors[pid]])
return "OK!"
class TorqueSensor(Entity):
"""Representation of a Torque sensor."""
def __init__(self, name, unit):
"""Initialize the sensor."""
self._name = name
self._unit = unit
self._state = None
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return self._unit
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def icon(self):
"""Return the default icon of the sensor."""
return "mdi:car"
@callback
def async_on_update(self, value):
"""Receive an update."""
self._state = value
self.async_write_ha_state()
|
from logilab.common.testlib import TestCase, unittest_main
from logilab.common.graph import get_cycles, has_path, ordered_nodes, UnorderableGraph
class getCyclesTC(TestCase):
def test_known0(self):
self.assertEqual(get_cycles({1:[2], 2:[3], 3:[1]}), [[1, 2, 3]])
def test_known1(self):
self.assertEqual(get_cycles({1:[2], 2:[3], 3:[1, 4], 4:[3]}), [[1, 2, 3], [3, 4]])
def test_known2(self):
self.assertEqual(get_cycles({1:[2], 2:[3], 3:[0], 0:[]}), [])
class hasPathTC(TestCase):
def test_direct_connection(self):
self.assertEqual(has_path({'A': ['B'], 'B': ['A']}, 'A', 'B'), ['B'])
def test_indirect_connection(self):
self.assertEqual(has_path({'A': ['B'], 'B': ['A', 'C'], 'C': ['B']}, 'A', 'C'), ['B', 'C'])
def test_no_connection(self):
self.assertEqual(has_path({'A': ['B'], 'B': ['A']}, 'A', 'C'), None)
def test_cycle(self):
self.assertEqual(has_path({'A': ['A']}, 'A', 'B'), None)
class ordered_nodesTC(TestCase):
def test_one_item(self):
graph = {'a':[]}
ordered = ordered_nodes(graph)
self.assertEqual(ordered, ('a',))
def test_single_dependency(self):
graph = {'a':['b'], 'b':[]}
ordered = ordered_nodes(graph)
self.assertEqual(ordered, ('a','b'))
graph = {'a':[], 'b':['a']}
ordered = ordered_nodes(graph)
self.assertEqual(ordered, ('b','a'))
def test_two_items_no_dependency(self):
graph = {'a':[], 'b':[]}
ordered = ordered_nodes(graph)
self.assertEqual(ordered, ('a','b'))
def test_three_items_no_dependency(self):
graph = {'a':[], 'b':[], 'c':[]}
ordered = ordered_nodes(graph)
self.assertEqual(ordered, ('a', 'b', 'c'))
def test_three_items_one_dependency(self):
graph = {'a': ['c'], 'b': [], 'c':[]}
ordered = ordered_nodes(graph)
self.assertEqual(ordered, ('a', 'b', 'c'))
def test_three_items_two_dependencies(self):
graph = {'a': ['b'], 'b': ['c'], 'c':[]}
ordered = ordered_nodes(graph)
self.assertEqual(ordered, ('a', 'b', 'c'))
def test_bad_graph(self):
graph = {'a':['b']}
self.assertRaises(UnorderableGraph, ordered_nodes, graph)
if __name__ == "__main__":
unittest_main()
|
import typing
import tensorflow as tf
from keras.engine import Layer
class MatchingLayer(Layer):
"""
Layer that computes a matching matrix between samples in two tensors.
:param normalize: Whether to L2-normalize samples along the
dot product axis before taking the dot product.
If set to True, then the output of the dot product
is the cosine proximity between the two samples.
:param matching_type: the similarity function for matching
:param kwargs: Standard layer keyword arguments.
Examples:
>>> import matchzoo as mz
>>> layer = mz.layers.MatchingLayer(matching_type='dot',
... normalize=True)
>>> num_batch, left_len, right_len, num_dim = 5, 3, 2, 10
>>> layer.build([[num_batch, left_len, num_dim],
... [num_batch, right_len, num_dim]])
"""
def __init__(self, normalize: bool = False,
matching_type: str = 'dot', **kwargs):
""":class:`MatchingLayer` constructor."""
super().__init__(**kwargs)
self._normalize = normalize
self._validate_matching_type(matching_type)
self._matching_type = matching_type
self._shape1 = None
self._shape2 = None
@classmethod
def _validate_matching_type(cls, matching_type: str = 'dot'):
valid_matching_type = ['dot', 'mul', 'plus', 'minus', 'concat']
if matching_type not in valid_matching_type:
raise ValueError(f"{matching_type} is not a valid matching type, "
f"{valid_matching_type} expected.")
def build(self, input_shape: list):
"""
Build the layer.
:param input_shape: the shapes of the input tensors,
for MatchingLayer we need tow input tensors.
"""
# Used purely for shape validation.
if not isinstance(input_shape, list) or len(input_shape) != 2:
raise ValueError('A `MatchingLayer` layer should be called '
'on a list of 2 inputs.')
self._shape1 = input_shape[0]
self._shape2 = input_shape[1]
for idx in 0, 2:
if self._shape1[idx] != self._shape2[idx]:
raise ValueError(
'Incompatible dimensions: '
f'{self._shape1[idx]} != {self._shape2[idx]}.'
f'Layer shapes: {self._shape1}, {self._shape2}.'
)
def call(self, inputs: list, **kwargs) -> typing.Any:
"""
The computation logic of MatchingLayer.
:param inputs: two input tensors.
"""
x1 = inputs[0]
x2 = inputs[1]
if self._matching_type == 'dot':
if self._normalize:
x1 = tf.math.l2_normalize(x1, axis=2)
x2 = tf.math.l2_normalize(x2, axis=2)
return tf.expand_dims(tf.einsum('abd,acd->abc', x1, x2), 3)
else:
if self._matching_type == 'mul':
def func(x, y):
return x * y
elif self._matching_type == 'plus':
def func(x, y):
return x + y
elif self._matching_type == 'minus':
def func(x, y):
return x - y
elif self._matching_type == 'concat':
def func(x, y):
return tf.concat([x, y], axis=3)
else:
raise ValueError(f"Invalid matching type."
f"{self._matching_type} received."
f"Mut be in `dot`, `mul`, `plus`, "
f"`minus` and `concat`.")
x1_exp = tf.stack([x1] * self._shape2[1], 2)
x2_exp = tf.stack([x2] * self._shape1[1], 1)
return func(x1_exp, x2_exp)
def compute_output_shape(self, input_shape: list) -> tuple:
"""
Calculate the layer output shape.
:param input_shape: the shapes of the input tensors,
for MatchingLayer we need tow input tensors.
"""
if not isinstance(input_shape, list) or len(input_shape) != 2:
raise ValueError('A `MatchingLayer` layer should be called '
'on a list of 2 inputs.')
shape1 = list(input_shape[0])
shape2 = list(input_shape[1])
if len(shape1) != 3 or len(shape2) != 3:
raise ValueError('A `MatchingLayer` layer should be called '
'on 2 inputs with 3 dimensions.')
if shape1[0] != shape2[0] or shape1[2] != shape2[2]:
raise ValueError('A `MatchingLayer` layer should be called '
'on 2 inputs with same 0,2 dimensions.')
if self._matching_type in ['mul', 'plus', 'minus']:
return shape1[0], shape1[1], shape2[1], shape1[2]
elif self._matching_type == 'dot':
return shape1[0], shape1[1], shape2[1], 1
elif self._matching_type == 'concat':
return shape1[0], shape1[1], shape2[1], shape1[2] + shape2[2]
else:
raise ValueError(f"Invalid `matching_type`."
f"{self._matching_type} received."
f"Must be in `mul`, `plus`, `minus` "
f"`dot` and `concat`.")
def get_config(self) -> dict:
"""Get the config dict of MatchingLayer."""
config = {
'normalize': self._normalize,
'matching_type': self._matching_type,
}
base_config = super(MatchingLayer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
|
import math
from homeassistant.components.cover import (
ATTR_POSITION,
DOMAIN as COVER_DOMAIN,
SUPPORT_CLOSE,
SUPPORT_OPEN,
SUPPORT_SET_POSITION,
CoverEntity,
)
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from .const import SIGNAL_ADD_ENTITIES
from .insteon_entity import InsteonEntity
from .utils import async_add_insteon_entities
SUPPORTED_FEATURES = SUPPORT_OPEN | SUPPORT_CLOSE | SUPPORT_SET_POSITION
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the Insteon covers from a config entry."""
def add_entities(discovery_info=None):
"""Add the Insteon entities for the platform."""
async_add_insteon_entities(
hass, COVER_DOMAIN, InsteonCoverEntity, async_add_entities, discovery_info
)
signal = f"{SIGNAL_ADD_ENTITIES}_{COVER_DOMAIN}"
async_dispatcher_connect(hass, signal, add_entities)
add_entities()
class InsteonCoverEntity(InsteonEntity, CoverEntity):
"""A Class for an Insteon cover entity."""
@property
def current_cover_position(self):
"""Return the current cover position."""
if self._insteon_device_group.value is not None:
pos = self._insteon_device_group.value
else:
pos = 0
return int(math.ceil(pos * 100 / 255))
@property
def supported_features(self):
"""Return the supported features for this entity."""
return SUPPORTED_FEATURES
@property
def is_closed(self):
"""Return the boolean response if the node is on."""
return bool(self.current_cover_position)
async def async_open_cover(self, **kwargs):
"""Open cover."""
await self._insteon_device.async_open()
async def async_close_cover(self, **kwargs):
"""Close cover."""
await self._insteon_device.async_close()
async def async_set_cover_position(self, **kwargs):
"""Set the cover position."""
position = int(kwargs[ATTR_POSITION] * 255 / 100)
if position == 0:
await self._insteon_device.async_close()
else:
await self._insteon_device.async_open(
open_level=position, group=self._insteon_device_group.group
)
|
import json
from collections import Counter
from typing import Callable, List
from typing import Counter as CounterType
from pytest import mark
from cerberus import Validator
from cerberus.benchmarks.schemas.overalll_schema_2 import product_schema
from cerberus.benchmarks import DOCUMENTS_PATH
def init_validator():
return Validator(product_schema, purge_unknown=True)
def load_documents():
with (DOCUMENTS_PATH / "overall_documents_2.json").open() as f:
documents = json.load(f)
return documents
def validate_documents(init_validator: Callable, documents: List[dict]) -> None:
doc_count = failed_count = 0
error_paths: CounterType[tuple] = Counter()
validator = init_validator()
def count_errors(errors):
if errors is None:
return
for error in errors:
if error.is_group_error:
count_errors(error.child_errors)
else:
error_paths[error.schema_path] += 1
for document in documents:
if validator.validated(document) is None:
failed_count += 1
count_errors(validator._errors)
doc_count += 1
print(
f"{failed_count} out of {doc_count} documents failed with "
f"{len(error_paths)} different error leafs."
)
print("Top 3 errors, excluding container errors:")
for path, count in error_paths.most_common(3):
print(f"{count}: {path}")
@mark.benchmark(group="overall-2")
def test_overall_performance_2(benchmark):
benchmark.pedantic(validate_documents, (init_validator, load_documents()), rounds=5)
|
import logging
from homeassistant.const import MATCH_ALL
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.event import async_call_later
from .error import SmartHomeError
from .helpers import AbstractConfig, GoogleEntity, async_get_entities
# Time to wait until the homegraph updates
# https://github.com/actions-on-google/smart-home-nodejs/issues/196#issuecomment-439156639
INITIAL_REPORT_DELAY = 60
_LOGGER = logging.getLogger(__name__)
@callback
def async_enable_report_state(hass: HomeAssistant, google_config: AbstractConfig):
"""Enable state reporting."""
async def async_entity_state_listener(changed_entity, old_state, new_state):
if not hass.is_running:
return
if not new_state:
return
if not google_config.should_expose(new_state):
return
entity = GoogleEntity(hass, google_config, new_state)
if not entity.is_supported():
return
try:
entity_data = entity.query_serialize()
except SmartHomeError as err:
_LOGGER.debug("Not reporting state for %s: %s", changed_entity, err.code)
return
if old_state:
old_entity = GoogleEntity(hass, google_config, old_state)
# Only report to Google if data that Google cares about has changed
try:
if entity_data == old_entity.query_serialize():
return
except SmartHomeError:
# Happens if old state could not be serialized.
# In that case the data is different and should be
# reported.
pass
_LOGGER.debug("Reporting state for %s: %s", changed_entity, entity_data)
await google_config.async_report_state_all(
{"devices": {"states": {changed_entity: entity_data}}}
)
async def inital_report(_now):
"""Report initially all states."""
entities = {}
for entity in async_get_entities(hass, google_config):
if not entity.should_expose():
continue
try:
entities[entity.entity_id] = entity.query_serialize()
except SmartHomeError:
continue
if not entities:
return
await google_config.async_report_state_all({"devices": {"states": entities}})
async_call_later(hass, INITIAL_REPORT_DELAY, inital_report)
return hass.helpers.event.async_track_state_change(
MATCH_ALL, async_entity_state_listener
)
|
import logging
from typing import Any, Dict
from aioazuredevops.client import DevOpsClient
import aiohttp
from homeassistant.components.azure_devops.const import (
CONF_ORG,
CONF_PAT,
CONF_PROJECT,
DATA_AZURE_DEVOPS_CLIENT,
DOMAIN,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.typing import ConfigType, HomeAssistantType
_LOGGER = logging.getLogger(__name__)
async def async_setup(hass: HomeAssistantType, config: ConfigType) -> bool:
"""Set up the Azure DevOps components."""
return True
async def async_setup_entry(hass: HomeAssistantType, entry: ConfigEntry) -> bool:
"""Set up Azure DevOps from a config entry."""
client = DevOpsClient()
try:
if entry.data[CONF_PAT] is not None:
await client.authorize(entry.data[CONF_PAT], entry.data[CONF_ORG])
if not client.authorized:
_LOGGER.warning(
"Could not authorize with Azure DevOps. You may need to update your token"
)
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": "reauth"},
data=entry.data,
)
)
return False
await client.get_project(entry.data[CONF_ORG], entry.data[CONF_PROJECT])
except aiohttp.ClientError as exception:
_LOGGER.warning(exception)
raise ConfigEntryNotReady from exception
instance_key = f"{DOMAIN}_{entry.data[CONF_ORG]}_{entry.data[CONF_PROJECT]}"
hass.data.setdefault(instance_key, {})[DATA_AZURE_DEVOPS_CLIENT] = client
# Setup components
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, "sensor")
)
return True
async def async_unload_entry(hass: HomeAssistantType, entry: ConfigType) -> bool:
"""Unload Azure DevOps config entry."""
del hass.data[f"{DOMAIN}_{entry.data[CONF_ORG]}_{entry.data[CONF_PROJECT]}"]
return await hass.config_entries.async_forward_entry_unload(entry, "sensor")
class AzureDevOpsEntity(Entity):
"""Defines a base Azure DevOps entity."""
def __init__(self, organization: str, project: str, name: str, icon: str) -> None:
"""Initialize the Azure DevOps entity."""
self._name = name
self._icon = icon
self._available = True
self.organization = organization
self.project = project
@property
def name(self) -> str:
"""Return the name of the entity."""
return self._name
@property
def icon(self) -> str:
"""Return the mdi icon of the entity."""
return self._icon
@property
def available(self) -> bool:
"""Return True if entity is available."""
return self._available
async def async_update(self) -> None:
"""Update Azure DevOps entity."""
if await self._azure_devops_update():
self._available = True
else:
if self._available:
_LOGGER.debug(
"An error occurred while updating Azure DevOps sensor.",
exc_info=True,
)
self._available = False
async def _azure_devops_update(self) -> None:
"""Update Azure DevOps entity."""
raise NotImplementedError()
class AzureDevOpsDeviceEntity(AzureDevOpsEntity):
"""Defines a Azure DevOps device entity."""
@property
def device_info(self) -> Dict[str, Any]:
"""Return device information about this Azure DevOps instance."""
return {
"identifiers": {
(
DOMAIN,
self.organization,
self.project,
)
},
"manufacturer": self.organization,
"name": self.project,
"entry_type": "service",
}
|
import tensornetwork as tn
import pytest
import numpy as np
from tensornetwork.block_sparse import BlockSparseTensor, Index
from tensornetwork.block_sparse.charge import charge_equal, BaseCharge, U1Charge
from tensornetwork.block_sparse.blocksparse_utils import _find_diagonal_sparse_blocks #pylint: disable=line-too-long
def get_random(shape, num_charges, dtype=np.float64):
R = len(shape)
charges = [
BaseCharge(
np.random.randint(-5, 6, (shape[n], num_charges)),
charge_types=[U1Charge] * num_charges) for n in range(R)
]
flows = list(np.full(R, fill_value=False, dtype=np.bool))
indices = [Index(charges[n], flows[n]) for n in range(R)]
return BlockSparseTensor.randn(indices=indices, dtype=dtype)
def get_zeros(shape, num_charges, dtype=np.float64):
R = len(shape)
charges = [
BaseCharge(
np.random.randint(-5, 6, (shape[n], num_charges)),
charge_types=[U1Charge] * num_charges) for n in range(R)
]
flows = list(np.full(R, fill_value=False, dtype=np.bool))
indices = [Index(charges[n], flows[n]) for n in range(R)]
return BlockSparseTensor.zeros(indices=indices, dtype=dtype)
@pytest.mark.parametrize("dtype", [np.float64, np.complex128])
@pytest.mark.parametrize("num_charges", [1, 2, 3])
def test_split_node(dtype, num_charges):
np.random.seed(111)
a = tn.Node(
get_zeros((5, 7, 4, 5, 6), num_charges, dtype), backend='symmetric')
left_edges = []
for i in range(3):
left_edges.append(a[i])
right_edges = []
for i in range(3, 5):
right_edges.append(a[i])
left, right, _ = tn.split_node(a, left_edges, right_edges)
tn.check_correct({left, right})
actual = left @ right
np.testing.assert_allclose(actual.tensor.shape, (5, 7, 4, 5, 6))
np.testing.assert_allclose(a.tensor.shape, (5, 7, 4, 5, 6))
np.testing.assert_allclose(left.tensor.data, 0)
np.testing.assert_allclose(right.tensor.data, 0)
assert np.all([
charge_equal(a.tensor._charges[n], actual.tensor._charges[n])
for n in range(len(a.tensor._charges))
])
@pytest.mark.parametrize("dtype", [np.float64, np.complex128])
@pytest.mark.parametrize("num_charges", [1, 2, 3])
def test_split_node_mixed_order(dtype, num_charges):
np.random.seed(131)
a = tn.Node(
get_zeros((5, 3, 4, 5, 6), num_charges, dtype), backend='symmetric')
left_edges = []
for i in [0, 2, 4]:
left_edges.append(a[i])
right_edges = []
for i in [1, 3]:
right_edges.append(a[i])
left, right, _ = tn.split_node(a, left_edges, right_edges)
tn.check_correct({left, right})
actual = left @ right
np.testing.assert_allclose(actual.tensor.shape, (5, 4, 6, 3, 5))
np.testing.assert_allclose(a.tensor.shape, (5, 3, 4, 5, 6))
np.testing.assert_allclose(left.tensor.data, 0)
np.testing.assert_allclose(right.tensor.data, 0)
np.testing.assert_allclose(left.tensor.shape[0:3], (5, 4, 6))
np.testing.assert_allclose(right.tensor.shape[1:], (3, 5))
new_order = [0, 2, 4, 1, 3]
assert np.all([
charge_equal(a.tensor.charges[new_order[n]][0],
actual.tensor.charges[n][0])
for n in range(len(a.tensor._charges))
])
@pytest.mark.parametrize("dtype", [np.float64, np.complex128])
@pytest.mark.parametrize("num_charges", [1, 2, 3])
def test_svd_consistency(dtype, num_charges):
np.random.seed(111)
original_tensor = get_random((20, 20), num_charges, dtype)
node = tn.Node(original_tensor, backend='symmetric')
u, vh, _ = tn.split_node(node, [node[0]], [node[1]])
final_node = tn.contract_between(u, vh)
np.testing.assert_allclose(
final_node.tensor.data, original_tensor.data, rtol=1e-6)
assert np.all([
charge_equal(final_node.tensor._charges[n], original_tensor._charges[n])
for n in range(len(original_tensor._charges))
])
|
import argparse
import logging
import sys
import traceback
from socket import getfqdn
from socket import gethostbyname
from socket import gethostname
from paasta_tools import mesos_maintenance
from paasta_tools import utils
from paasta_tools.marathon_tools import get_expected_instance_count_for_namespace
from paasta_tools.marathon_tools import load_marathon_service_config
from paasta_tools.marathon_tools import marathon_services_running_here
from paasta_tools.smartstack_tools import backend_is_up
from paasta_tools.smartstack_tools import get_backends
from paasta_tools.smartstack_tools import get_replication_for_services
from paasta_tools.smartstack_tools import ip_port_hostname_from_svname
from paasta_tools.smartstack_tools import load_smartstack_info_for_service
log = logging.getLogger(__name__)
def parse_args():
"""Parses the command line arguments passed to this script"""
parser = argparse.ArgumentParser()
parser.add_argument(
"-d",
"--duration",
type=mesos_maintenance.parse_timedelta,
default="1h",
help="Duration of the maintenance window. Any pytimeparse unit is supported.",
)
parser.add_argument(
"-s",
"--start",
type=mesos_maintenance.parse_datetime,
default=str(mesos_maintenance.now()),
help="Time to start the maintenance window. Defaults to now.",
)
parser.add_argument(
"action",
choices=[
"cluster_status",
"down",
"drain",
"is_host_down",
"is_host_drained",
"is_host_draining",
"is_hosts_past_maintenance_end",
"is_hosts_past_maintenance_start",
"is_safe_to_drain",
"is_safe_to_kill",
"schedule",
"status",
"undrain",
"up",
],
help="Action to perform on the specified hosts",
)
parser.add_argument(
"hostname",
nargs="*",
default=[getfqdn()],
help="Hostname(s) of machine(s) to start draining. "
"You can specify <hostname>|<ip> to avoid querying DNS to determine the corresponding IP.",
)
parser.add_argument(
"-v",
"--verbose",
action="count",
dest="verbose",
default=0,
help="Print out more output.",
)
return parser.parse_args()
def is_safe_to_kill(hostname):
"""Checks if a host has drained or reached its maintenance window
:param hostname: hostname to check
:returns: True or False
"""
return mesos_maintenance.is_host_drained(
hostname
) or mesos_maintenance.is_host_past_maintenance_start(hostname)
def is_hostname_local(hostname):
return hostname == "localhost" or hostname == getfqdn() or hostname == gethostname()
def is_safe_to_drain(hostname):
"""Checks if a host has healthy tasks running locally that have low
replication in other places
:param hostname: hostname to check
:returns: True or False
"""
if not is_hostname_local(hostname):
print(
"Due to the way is_safe_to_drain is implemented, it can only work on localhost."
)
return False
return not are_local_tasks_in_danger()
def is_healthy_in_haproxy(local_port, backends):
local_ip = gethostbyname(gethostname())
for backend in backends:
ip, port, _ = ip_port_hostname_from_svname(backend["svname"])
if ip == local_ip and port == local_port:
if backend_is_up(backend):
log.debug("Found a healthy local backend: %s" % backend)
return True
else:
log.debug("Found a unhealthy local backend: %s" % backend)
return False
log.debug("Couldn't find any haproxy backend listening on %s" % local_port)
return False
def synapse_replication_is_low(service, instance, system_paasta_config, local_backends):
crit_threshold = 80
cluster = system_paasta_config.get_cluster()
marathon_service_config = load_marathon_service_config(
service=service, instance=instance, cluster=cluster, load_deployments=False
)
reg_svc, reg_namespace, _, __ = utils.decompose_job_id(
marathon_service_config.get_registrations()
)
# We only actually care about the replication of where we're registering
service, namespace = reg_svc, reg_namespace
smartstack_replication_info = load_smartstack_info_for_service(
service=service,
namespace=namespace,
blacklist=[],
system_paasta_config=system_paasta_config,
)
expected_count = get_expected_instance_count_for_namespace(
service=service, namespace=namespace
)
expected_count_per_location = int(expected_count / len(smartstack_replication_info))
synapse_name = utils.compose_job_id(service, namespace)
local_replication = get_replication_for_services(
synapse_host=system_paasta_config.get_default_synapse_host(),
synapse_port=system_paasta_config.get_synapse_port(),
synapse_haproxy_url_format=system_paasta_config.get_synapse_haproxy_url_format(),
services=[synapse_name],
)
num_available = local_replication.get(synapse_name, 0)
under_replicated, ratio = utils.is_under_replicated(
num_available, expected_count_per_location, crit_threshold
)
log.info(
"Service %s.%s has %d out of %d expected instances"
% (service, instance, num_available, expected_count_per_location)
)
return under_replicated
def are_local_tasks_in_danger():
try:
system_paasta_config = utils.load_system_paasta_config()
local_services = marathon_services_running_here()
local_backends = get_backends(
service=None,
synapse_host=system_paasta_config.get_default_synapse_host(),
synapse_port=system_paasta_config.get_synapse_port(),
synapse_haproxy_url_format=system_paasta_config.get_synapse_haproxy_url_format(),
)
for service, instance, port in local_services:
log.info(f"Inspecting {service}.{instance} on {port}")
if is_healthy_in_haproxy(
port, local_backends
) and synapse_replication_is_low(
service, instance, system_paasta_config, local_backends=local_backends
):
log.warning(
"{}.{} on port {} is healthy but the service is in danger!".format(
service, instance, port
)
)
return True
return False
except Exception:
log.warning(traceback.format_exc())
return False
def paasta_maintenance():
"""Manipulate the maintenance state of a PaaSTA host.
:returns: None
"""
args = parse_args()
if args.verbose >= 2:
logging.basicConfig(level=logging.DEBUG)
elif args.verbose == 1:
logging.basicConfig(level=logging.INFO)
else:
logging.basicConfig(level=logging.WARNING)
action = args.action
hostnames = args.hostname
if action != "status" and not hostnames:
print("You must specify one or more hostnames")
return
start = args.start
duration = args.duration
ret = "Done"
if action == "drain":
mesos_maintenance.drain(hostnames, start, duration)
elif action == "undrain":
mesos_maintenance.undrain(hostnames)
elif action == "down":
mesos_maintenance.down(hostnames)
elif action == "up":
mesos_maintenance.up(hostnames)
elif action == "status":
ret = mesos_maintenance.friendly_status()
elif action == "cluster_status":
ret = mesos_maintenance.status()
elif action == "schedule":
ret = mesos_maintenance.schedule()
elif action == "is_safe_to_drain":
ret = is_safe_to_drain(hostnames[0])
elif action == "is_safe_to_kill":
ret = is_safe_to_kill(hostnames[0])
elif action == "is_host_drained":
ret = mesos_maintenance.is_host_drained(hostnames[0])
elif action == "is_host_down":
ret = mesos_maintenance.is_host_down(hostnames[0])
elif action == "is_host_draining":
ret = mesos_maintenance.is_host_draining(hostnames[0])
elif action == "is_host_past_maintenance_start":
ret = mesos_maintenance.is_host_past_maintenance_start(hostnames[0])
elif action == "is_host_past_maintenance_end":
ret = mesos_maintenance.is_host_past_maintenance_end(hostnames[0])
else:
raise NotImplementedError("Action: '%s' is not implemented." % action)
print(ret)
return ret
if __name__ == "__main__":
if paasta_maintenance():
sys.exit(0)
sys.exit(1)
|
from .naive import Naive
from .dssm import DSSM
from .cdssm import CDSSM
from .dense_baseline import DenseBaseline
from .arci import ArcI
from .arcii import ArcII
from .match_pyramid import MatchPyramid
from .knrm import KNRM
from .conv_knrm import ConvKNRM
from .duet import DUET
from .drmmtks import DRMMTKS
from .drmm import DRMM
from .anmm import ANMM
from .mvlstm import MVLSTM
def list_available() -> list:
from matchzoo.engine.base_model import BaseModel
from matchzoo.utils import list_recursive_concrete_subclasses
return list_recursive_concrete_subclasses(BaseModel)
|
import pytest
import pyzerproc
from homeassistant import setup
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_HS_COLOR,
ATTR_RGB_COLOR,
ATTR_XY_COLOR,
SCAN_INTERVAL,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR,
)
from homeassistant.components.zerproc.light import DOMAIN
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_FRIENDLY_NAME,
ATTR_ICON,
ATTR_SUPPORTED_FEATURES,
STATE_OFF,
STATE_ON,
STATE_UNAVAILABLE,
)
import homeassistant.util.dt as dt_util
from tests.async_mock import patch
from tests.common import MockConfigEntry, async_fire_time_changed
@pytest.fixture
async def mock_light(hass):
"""Create a mock light entity."""
await setup.async_setup_component(hass, "persistent_notification", {})
mock_entry = MockConfigEntry(domain=DOMAIN)
mock_entry.add_to_hass(hass)
light = pyzerproc.Light("AA:BB:CC:DD:EE:FF", "LEDBlue-CCDDEEFF")
mock_state = pyzerproc.LightState(False, (0, 0, 0))
with patch(
"homeassistant.components.zerproc.light.pyzerproc.discover",
return_value=[light],
), patch.object(light, "connect"), patch.object(
light, "get_state", return_value=mock_state
):
await hass.config_entries.async_setup(mock_entry.entry_id)
await hass.async_block_till_done()
return light
async def test_init(hass):
"""Test platform setup."""
await setup.async_setup_component(hass, "persistent_notification", {})
mock_entry = MockConfigEntry(domain=DOMAIN)
mock_entry.add_to_hass(hass)
mock_light_1 = pyzerproc.Light("AA:BB:CC:DD:EE:FF", "LEDBlue-CCDDEEFF")
mock_light_2 = pyzerproc.Light("11:22:33:44:55:66", "LEDBlue-33445566")
mock_state_1 = pyzerproc.LightState(False, (0, 0, 0))
mock_state_2 = pyzerproc.LightState(True, (0, 80, 255))
with patch(
"homeassistant.components.zerproc.light.pyzerproc.discover",
return_value=[mock_light_1, mock_light_2],
), patch.object(mock_light_1, "connect"), patch.object(
mock_light_2, "connect"
), patch.object(
mock_light_1, "get_state", return_value=mock_state_1
), patch.object(
mock_light_2, "get_state", return_value=mock_state_2
):
await hass.config_entries.async_setup(mock_entry.entry_id)
await hass.async_block_till_done()
state = hass.states.get("light.ledblue_ccddeeff")
assert state.state == STATE_OFF
assert state.attributes == {
ATTR_FRIENDLY_NAME: "LEDBlue-CCDDEEFF",
ATTR_SUPPORTED_FEATURES: SUPPORT_BRIGHTNESS | SUPPORT_COLOR,
ATTR_ICON: "mdi:string-lights",
}
state = hass.states.get("light.ledblue_33445566")
assert state.state == STATE_ON
assert state.attributes == {
ATTR_FRIENDLY_NAME: "LEDBlue-33445566",
ATTR_SUPPORTED_FEATURES: SUPPORT_BRIGHTNESS | SUPPORT_COLOR,
ATTR_ICON: "mdi:string-lights",
ATTR_BRIGHTNESS: 255,
ATTR_HS_COLOR: (221.176, 100.0),
ATTR_RGB_COLOR: (0, 80, 255),
ATTR_XY_COLOR: (0.138, 0.08),
}
with patch.object(hass.loop, "stop"), patch.object(
mock_light_1, "disconnect"
) as mock_disconnect_1, patch.object(
mock_light_2, "disconnect"
) as mock_disconnect_2:
await hass.async_stop()
assert mock_disconnect_1.called
assert mock_disconnect_2.called
async def test_discovery_exception(hass):
"""Test platform setup."""
await setup.async_setup_component(hass, "persistent_notification", {})
mock_entry = MockConfigEntry(domain=DOMAIN)
mock_entry.add_to_hass(hass)
with patch(
"homeassistant.components.zerproc.light.pyzerproc.discover",
side_effect=pyzerproc.ZerprocException("TEST"),
):
await hass.config_entries.async_setup(mock_entry.entry_id)
await hass.async_block_till_done()
# The exception should be captured and no entities should be added
assert len(hass.data[DOMAIN]["addresses"]) == 0
async def test_connect_exception(hass):
"""Test platform setup."""
await setup.async_setup_component(hass, "persistent_notification", {})
mock_entry = MockConfigEntry(domain=DOMAIN)
mock_entry.add_to_hass(hass)
mock_light = pyzerproc.Light("AA:BB:CC:DD:EE:FF", "LEDBlue-CCDDEEFF")
with patch(
"homeassistant.components.zerproc.light.pyzerproc.discover",
return_value=[mock_light],
), patch.object(
mock_light, "connect", side_effect=pyzerproc.ZerprocException("TEST")
):
await hass.config_entries.async_setup(mock_entry.entry_id)
await hass.async_block_till_done()
# The exception should be captured and no entities should be added
assert len(hass.data[DOMAIN]["addresses"]) == 0
async def test_light_turn_on(hass, mock_light):
"""Test ZerprocLight turn_on."""
utcnow = dt_util.utcnow()
with patch.object(mock_light, "turn_on") as mock_turn_on:
await hass.services.async_call(
"light",
"turn_on",
{ATTR_ENTITY_ID: "light.ledblue_ccddeeff"},
blocking=True,
)
await hass.async_block_till_done()
mock_turn_on.assert_called()
with patch.object(mock_light, "set_color") as mock_set_color:
await hass.services.async_call(
"light",
"turn_on",
{ATTR_ENTITY_ID: "light.ledblue_ccddeeff", ATTR_BRIGHTNESS: 25},
blocking=True,
)
await hass.async_block_till_done()
mock_set_color.assert_called_with(25, 25, 25)
# Make sure no discovery calls are made while we emulate time passing
with patch("homeassistant.components.zerproc.light.pyzerproc.discover"):
with patch.object(
mock_light,
"get_state",
return_value=pyzerproc.LightState(True, (175, 150, 220)),
):
utcnow = utcnow + SCAN_INTERVAL
async_fire_time_changed(hass, utcnow)
await hass.async_block_till_done()
with patch.object(mock_light, "set_color") as mock_set_color:
await hass.services.async_call(
"light",
"turn_on",
{ATTR_ENTITY_ID: "light.ledblue_ccddeeff", ATTR_BRIGHTNESS: 25},
blocking=True,
)
await hass.async_block_till_done()
mock_set_color.assert_called_with(19, 17, 25)
with patch.object(mock_light, "set_color") as mock_set_color:
await hass.services.async_call(
"light",
"turn_on",
{ATTR_ENTITY_ID: "light.ledblue_ccddeeff", ATTR_HS_COLOR: (50, 50)},
blocking=True,
)
await hass.async_block_till_done()
mock_set_color.assert_called_with(220, 201, 110)
with patch.object(
mock_light,
"get_state",
return_value=pyzerproc.LightState(True, (75, 75, 75)),
):
utcnow = utcnow + SCAN_INTERVAL
async_fire_time_changed(hass, utcnow)
await hass.async_block_till_done()
with patch.object(mock_light, "set_color") as mock_set_color:
await hass.services.async_call(
"light",
"turn_on",
{ATTR_ENTITY_ID: "light.ledblue_ccddeeff", ATTR_HS_COLOR: (50, 50)},
blocking=True,
)
await hass.async_block_till_done()
mock_set_color.assert_called_with(75, 68, 37)
with patch.object(mock_light, "set_color") as mock_set_color:
await hass.services.async_call(
"light",
"turn_on",
{
ATTR_ENTITY_ID: "light.ledblue_ccddeeff",
ATTR_BRIGHTNESS: 200,
ATTR_HS_COLOR: (75, 75),
},
blocking=True,
)
await hass.async_block_till_done()
mock_set_color.assert_called_with(162, 200, 50)
async def test_light_turn_off(hass, mock_light):
"""Test ZerprocLight turn_on."""
with patch.object(mock_light, "turn_off") as mock_turn_off:
await hass.services.async_call(
"light",
"turn_off",
{ATTR_ENTITY_ID: "light.ledblue_ccddeeff"},
blocking=True,
)
await hass.async_block_till_done()
mock_turn_off.assert_called()
async def test_light_update(hass, mock_light):
"""Test ZerprocLight update."""
utcnow = dt_util.utcnow()
state = hass.states.get("light.ledblue_ccddeeff")
assert state.state == STATE_OFF
assert state.attributes == {
ATTR_FRIENDLY_NAME: "LEDBlue-CCDDEEFF",
ATTR_SUPPORTED_FEATURES: SUPPORT_BRIGHTNESS | SUPPORT_COLOR,
ATTR_ICON: "mdi:string-lights",
}
# Make sure no discovery calls are made while we emulate time passing
with patch("homeassistant.components.zerproc.light.pyzerproc.discover"):
# Test an exception during discovery
with patch.object(
mock_light, "get_state", side_effect=pyzerproc.ZerprocException("TEST")
):
utcnow = utcnow + SCAN_INTERVAL
async_fire_time_changed(hass, utcnow)
await hass.async_block_till_done()
state = hass.states.get("light.ledblue_ccddeeff")
assert state.state == STATE_UNAVAILABLE
assert state.attributes == {
ATTR_FRIENDLY_NAME: "LEDBlue-CCDDEEFF",
ATTR_SUPPORTED_FEATURES: SUPPORT_BRIGHTNESS | SUPPORT_COLOR,
ATTR_ICON: "mdi:string-lights",
}
with patch.object(
mock_light,
"get_state",
return_value=pyzerproc.LightState(False, (200, 128, 100)),
):
utcnow = utcnow + SCAN_INTERVAL
async_fire_time_changed(hass, utcnow)
await hass.async_block_till_done()
state = hass.states.get("light.ledblue_ccddeeff")
assert state.state == STATE_OFF
assert state.attributes == {
ATTR_FRIENDLY_NAME: "LEDBlue-CCDDEEFF",
ATTR_SUPPORTED_FEATURES: SUPPORT_BRIGHTNESS | SUPPORT_COLOR,
ATTR_ICON: "mdi:string-lights",
}
with patch.object(
mock_light,
"get_state",
return_value=pyzerproc.LightState(True, (175, 150, 220)),
):
utcnow = utcnow + SCAN_INTERVAL
async_fire_time_changed(hass, utcnow)
await hass.async_block_till_done()
state = hass.states.get("light.ledblue_ccddeeff")
assert state.state == STATE_ON
assert state.attributes == {
ATTR_FRIENDLY_NAME: "LEDBlue-CCDDEEFF",
ATTR_SUPPORTED_FEATURES: SUPPORT_BRIGHTNESS | SUPPORT_COLOR,
ATTR_ICON: "mdi:string-lights",
ATTR_BRIGHTNESS: 220,
ATTR_HS_COLOR: (261.429, 31.818),
ATTR_RGB_COLOR: (202, 173, 255),
ATTR_XY_COLOR: (0.291, 0.232),
}
|
from __future__ import print_function
import re
import sys
import inspect
import operator
import itertools
import collections
__version__ = '4.4.2'
if sys.version_info >= (3,):
from inspect import getfullargspec
def get_init(cls):
return cls.__init__
else:
FullArgSpec = collections.namedtuple(
'FullArgSpec', 'args varargs varkw defaults '
'kwonlyargs kwonlydefaults annotations')
def getfullargspec(f):
"A quick and dirty replacement for getfullargspec for Python 2.X"
return FullArgSpec._make(inspect.getargspec(f) + ([], None, {}))
def get_init(cls):
return cls.__init__.__func__
try:
iscoroutinefunction = inspect.iscoroutinefunction
except AttributeError:
# let's assume there are no coroutine functions in old Python
def iscoroutinefunction(f):
return False
try:
from inspect import isgeneratorfunction
except ImportError:
# assume no generator function in old Python versions
def isgeneratorfunction(caller):
return False
DEF = re.compile(r'\s*def\s*([_\w][_\w\d]*)\s*\(')
# basic functionality
class FunctionMaker(object):
"""
An object with the ability to create functions with a given signature.
It has attributes name, doc, module, signature, defaults, dict and
methods update and make.
"""
# Atomic get-and-increment provided by the GIL
_compile_count = itertools.count()
# make pylint happy
args = varargs = varkw = defaults = kwonlyargs = kwonlydefaults = ()
def __init__(self, func=None, name=None, signature=None,
defaults=None, doc=None, module=None, funcdict=None):
self.shortsignature = signature
if func:
# func can be a class or a callable, but not an instance method
self.name = func.__name__
if self.name == '<lambda>': # small hack for lambda functions
self.name = '_lambda_'
self.doc = func.__doc__
self.module = func.__module__
if inspect.isfunction(func):
argspec = getfullargspec(func)
self.annotations = getattr(func, '__annotations__', {})
for a in ('args', 'varargs', 'varkw', 'defaults', 'kwonlyargs',
'kwonlydefaults'):
setattr(self, a, getattr(argspec, a))
for i, arg in enumerate(self.args):
setattr(self, 'arg%d' % i, arg)
allargs = list(self.args)
allshortargs = list(self.args)
if self.varargs:
allargs.append('*' + self.varargs)
allshortargs.append('*' + self.varargs)
elif self.kwonlyargs:
allargs.append('*') # single star syntax
for a in self.kwonlyargs:
allargs.append('%s=None' % a)
allshortargs.append('%s=%s' % (a, a))
if self.varkw:
allargs.append('**' + self.varkw)
allshortargs.append('**' + self.varkw)
self.signature = ', '.join(allargs)
self.shortsignature = ', '.join(allshortargs)
self.dict = func.__dict__.copy()
# func=None happens when decorating a caller
if name:
self.name = name
if signature is not None:
self.signature = signature
if defaults:
self.defaults = defaults
if doc:
self.doc = doc
if module:
self.module = module
if funcdict:
self.dict = funcdict
# check existence required attributes
assert hasattr(self, 'name')
if not hasattr(self, 'signature'):
raise TypeError('You are decorating a non function: %s' % func)
def update(self, func, **kw):
"Update the signature of func with the data in self"
func.__name__ = self.name
func.__doc__ = getattr(self, 'doc', None)
func.__dict__ = getattr(self, 'dict', {})
func.__defaults__ = self.defaults
func.__kwdefaults__ = self.kwonlydefaults or None
func.__annotations__ = getattr(self, 'annotations', None)
try:
frame = sys._getframe(3)
except AttributeError: # for IronPython and similar implementations
callermodule = '?'
else:
callermodule = frame.f_globals.get('__name__', '?')
func.__module__ = getattr(self, 'module', callermodule)
func.__dict__.update(kw)
def make(self, src_templ, evaldict=None, addsource=False, **attrs):
"Make a new function from a given template and update the signature"
src = src_templ % vars(self) # expand name and signature
evaldict = evaldict or {}
mo = DEF.search(src)
if mo is None:
raise SyntaxError('not a valid function template\n%s' % src)
name = mo.group(1) # extract the function name
names = set([name] + [arg.strip(' *') for arg in
self.shortsignature.split(',')])
for n in names:
if n in ('_func_', '_call_'):
raise NameError('%s is overridden in\n%s' % (n, src))
if not src.endswith('\n'): # add a newline for old Pythons
src += '\n'
# Ensure each generated function has a unique filename for profilers
# (such as cProfile) that depend on the tuple of (<filename>,
# <definition line>, <function name>) being unique.
filename = '<decorator-gen-%d>' % next(self._compile_count)
try:
code = compile(src, filename, 'single')
exec(code, evaldict)
except Exception:
print('Error in generated code:', file=sys.stderr)
print(src, file=sys.stderr)
raise
func = evaldict[name]
if addsource:
attrs['__source__'] = src
self.update(func, **attrs)
return func
@classmethod
def create(cls, obj, body, evaldict, defaults=None,
doc=None, module=None, addsource=True, **attrs):
"""
Create a function from the strings name, signature and body.
evaldict is the evaluation dictionary. If addsource is true an
attribute __source__ is added to the result. The attributes attrs
are added, if any.
"""
if isinstance(obj, str): # "name(signature)"
name, rest = obj.strip().split('(', 1)
signature = rest[:-1] # strip a right parens
func = None
else: # a function
name = None
signature = None
func = obj
self = cls(func, name, signature, defaults, doc, module)
ibody = '\n'.join(' ' + line for line in body.splitlines())
caller = evaldict.get('_call_') # when called from `decorate`
if caller and iscoroutinefunction(caller):
body = ('async def %(name)s(%(signature)s):\n' + ibody).replace(
'return', 'return await')
else:
body = 'def %(name)s(%(signature)s):\n' + ibody
return self.make(body, evaldict, addsource, **attrs)
def decorate(func, caller, extras=()):
"""
decorate(func, caller) decorates a function using a caller.
If the caller is a generator function, the resulting function
will be a generator function.
"""
evaldict = dict(_call_=caller, _func_=func)
es = ''
for i, extra in enumerate(extras):
ex = '_e%d_' % i
evaldict[ex] = extra
es += ex + ', '
if '3.5' <= sys.version < '3.6':
# with Python 3.5 isgeneratorfunction returns True for all coroutines
# however we know that it is NOT possible to have a generator
# coroutine in python 3.5: PEP525 was not there yet
generatorcaller = isgeneratorfunction(
caller) and not iscoroutinefunction(caller)
else:
generatorcaller = isgeneratorfunction(caller)
if generatorcaller:
fun = FunctionMaker.create(
func, "for res in _call_(_func_, %s%%(shortsignature)s):\n"
" yield res" % es, evaldict, __wrapped__=func)
else:
fun = FunctionMaker.create(
func, "return _call_(_func_, %s%%(shortsignature)s)" % es,
evaldict, __wrapped__=func)
if hasattr(func, '__qualname__'):
fun.__qualname__ = func.__qualname__
return fun
def decorator(caller, _func=None):
"""decorator(caller) converts a caller function into a decorator"""
if _func is not None: # return a decorated function
# this is obsolete behavior; you should use decorate instead
return decorate(_func, caller)
# else return a decorator function
defaultargs, defaults = '', ()
if inspect.isclass(caller):
name = caller.__name__.lower()
doc = 'decorator(%s) converts functions/generators into ' \
'factories of %s objects' % (caller.__name__, caller.__name__)
elif inspect.isfunction(caller):
if caller.__name__ == '<lambda>':
name = '_lambda_'
else:
name = caller.__name__
doc = caller.__doc__
nargs = caller.__code__.co_argcount
ndefs = len(caller.__defaults__ or ())
defaultargs = ', '.join(caller.__code__.co_varnames[nargs-ndefs:nargs])
if defaultargs:
defaultargs += ','
defaults = caller.__defaults__
else: # assume caller is an object with a __call__ method
name = caller.__class__.__name__.lower()
doc = caller.__call__.__doc__
evaldict = dict(_call=caller, _decorate_=decorate)
dec = FunctionMaker.create(
'%s(func, %s)' % (name, defaultargs),
'if func is None: return lambda func: _decorate_(func, _call, (%s))\n'
'return _decorate_(func, _call, (%s))' % (defaultargs, defaultargs),
evaldict, doc=doc, module=caller.__module__, __wrapped__=caller)
if defaults:
dec.__defaults__ = (None,) + defaults
return dec
# ####################### contextmanager ####################### #
try: # Python >= 3.2
from contextlib import _GeneratorContextManager
except ImportError: # Python >= 2.5
from contextlib import GeneratorContextManager as _GeneratorContextManager
class ContextManager(_GeneratorContextManager):
def __call__(self, func):
"""Context manager decorator"""
return FunctionMaker.create(
func, "with _self_: return _func_(%(shortsignature)s)",
dict(_self_=self, _func_=func), __wrapped__=func)
init = getfullargspec(_GeneratorContextManager.__init__)
n_args = len(init.args)
if n_args == 2 and not init.varargs: # (self, genobj) Python 2.7
def __init__(self, g, *a, **k):
return _GeneratorContextManager.__init__(self, g(*a, **k))
ContextManager.__init__ = __init__
elif n_args == 2 and init.varargs: # (self, gen, *a, **k) Python 3.4
pass
elif n_args == 4: # (self, gen, args, kwds) Python 3.5
def __init__(self, g, *a, **k):
return _GeneratorContextManager.__init__(self, g, a, k)
ContextManager.__init__ = __init__
_contextmanager = decorator(ContextManager)
def contextmanager(func):
# Enable Pylint config: contextmanager-decorators=decorator.contextmanager
return _contextmanager(func)
# ############################ dispatch_on ############################ #
def append(a, vancestors):
"""
Append ``a`` to the list of the virtual ancestors, unless it is already
included.
"""
add = True
for j, va in enumerate(vancestors):
if issubclass(va, a):
add = False
break
if issubclass(a, va):
vancestors[j] = a
add = False
if add:
vancestors.append(a)
# inspired from simplegeneric by P.J. Eby and functools.singledispatch
def dispatch_on(*dispatch_args):
"""
Factory of decorators turning a function into a generic function
dispatching on the given arguments.
"""
assert dispatch_args, 'No dispatch args passed'
dispatch_str = '(%s,)' % ', '.join(dispatch_args)
def check(arguments, wrong=operator.ne, msg=''):
"""Make sure one passes the expected number of arguments"""
if wrong(len(arguments), len(dispatch_args)):
raise TypeError('Expected %d arguments, got %d%s' %
(len(dispatch_args), len(arguments), msg))
def gen_func_dec(func):
"""Decorator turning a function into a generic function"""
# first check the dispatch arguments
argset = set(getfullargspec(func).args)
if not set(dispatch_args) <= argset:
raise NameError('Unknown dispatch arguments %s' % dispatch_str)
typemap = {}
def vancestors(*types):
"""
Get a list of sets of virtual ancestors for the given types
"""
check(types)
ras = [[] for _ in range(len(dispatch_args))]
for types_ in typemap:
for t, type_, ra in zip(types, types_, ras):
if issubclass(t, type_) and type_ not in t.mro():
append(type_, ra)
return [set(ra) for ra in ras]
def ancestors(*types):
"""
Get a list of virtual MROs, one for each type
"""
check(types)
lists = []
for t, vas in zip(types, vancestors(*types)):
n_vas = len(vas)
if n_vas > 1:
raise RuntimeError(
'Ambiguous dispatch for %s: %s' % (t, vas))
elif n_vas == 1:
va, = vas
mro = type('t', (t, va), {}).mro()[1:]
else:
mro = t.mro()
lists.append(mro[:-1]) # discard t and object
return lists
def register(*types):
"""
Decorator to register an implementation for the given types
"""
check(types)
def dec(f):
check(getfullargspec(f).args, operator.lt, ' in ' + f.__name__)
typemap[types] = f
return f
return dec
def dispatch_info(*types):
"""
An utility to introspect the dispatch algorithm
"""
check(types)
lst = []
for anc in itertools.product(*ancestors(*types)):
lst.append(tuple(a.__name__ for a in anc))
return lst
def _dispatch(dispatch_args, *args, **kw):
types = tuple(type(arg) for arg in dispatch_args)
try: # fast path
f = typemap[types]
except KeyError:
pass
else:
return f(*args, **kw)
combinations = itertools.product(*ancestors(*types))
next(combinations) # the first one has been already tried
for types_ in combinations:
f = typemap.get(types_)
if f is not None:
return f(*args, **kw)
# else call the default implementation
return func(*args, **kw)
return FunctionMaker.create(
func, 'return _f_(%s, %%(shortsignature)s)' % dispatch_str,
dict(_f_=_dispatch), register=register, default=func,
typemap=typemap, vancestors=vancestors, ancestors=ancestors,
dispatch_info=dispatch_info, __wrapped__=func)
gen_func_dec.__name__ = 'dispatch_on' + dispatch_str
return gen_func_dec
|
from __future__ import with_statement
import logging
import os.path
import unittest
from functools import partial
import numpy as np
from gensim import corpora, models, utils, matutils
from gensim.parsing.preprocessing import preprocess_documents, preprocess_string, DEFAULT_FILTERS
bg_corpus = None
corpus = None
human_sim_vector = None
class TestLeeTest(unittest.TestCase):
def setUp(self):
"""setup lee test corpora"""
global bg_corpus, corpus, human_sim_vector, bg_corpus2, corpus2
pre_path = os.path.join(os.path.dirname(__file__), 'test_data')
bg_corpus_file = 'lee_background.cor'
corpus_file = 'lee.cor'
sim_file = 'similarities0-1.txt'
# read in the corpora
latin1 = partial(utils.to_unicode, encoding='latin1')
with utils.open(os.path.join(pre_path, bg_corpus_file), 'rb') as f:
bg_corpus = preprocess_documents(latin1(line) for line in f)
with utils.open(os.path.join(pre_path, corpus_file), 'rb') as f:
corpus = preprocess_documents(latin1(line) for line in f)
with utils.open(os.path.join(pre_path, bg_corpus_file), 'rb') as f:
bg_corpus2 = [preprocess_string(latin1(s), filters=DEFAULT_FILTERS[:-1]) for s in f]
with utils.open(os.path.join(pre_path, corpus_file), 'rb') as f:
corpus2 = [preprocess_string(latin1(s), filters=DEFAULT_FILTERS[:-1]) for s in f]
# read the human similarity data
sim_matrix = np.loadtxt(os.path.join(pre_path, sim_file))
sim_m_size = np.shape(sim_matrix)[0]
human_sim_vector = sim_matrix[np.triu_indices(sim_m_size, 1)]
def test_corpus(self):
"""availability and integrity of corpus"""
documents_in_bg_corpus = 300
documents_in_corpus = 50
len_sim_vector = 1225
self.assertEqual(len(bg_corpus), documents_in_bg_corpus)
self.assertEqual(len(corpus), documents_in_corpus)
self.assertEqual(len(human_sim_vector), len_sim_vector)
def test_lee(self):
"""correlation with human data > 0.6
(this is the value which was achieved in the original paper)
"""
global bg_corpus, corpus
# create a dictionary and corpus (bag of words)
dictionary = corpora.Dictionary(bg_corpus)
bg_corpus = [dictionary.doc2bow(text) for text in bg_corpus]
corpus = [dictionary.doc2bow(text) for text in corpus]
# transform the bag of words with log_entropy normalization
log_ent = models.LogEntropyModel(bg_corpus)
bg_corpus_ent = log_ent[bg_corpus]
# initialize an LSI transformation from background corpus
lsi = models.LsiModel(bg_corpus_ent, id2word=dictionary, num_topics=200)
# transform small corpus to lsi bow->log_ent->fold-in-lsi
corpus_lsi = lsi[log_ent[corpus]]
# compute pairwise similarity matrix and extract upper triangular
res = np.zeros((len(corpus), len(corpus)))
for i, par1 in enumerate(corpus_lsi):
for j, par2 in enumerate(corpus_lsi):
res[i, j] = matutils.cossim(par1, par2)
flat = res[np.triu_indices(len(corpus), 1)]
cor = np.corrcoef(flat, human_sim_vector)[0, 1]
logging.info("LSI correlation coefficient is %s", cor)
self.assertTrue(cor > 0.6)
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.DEBUG)
unittest.main()
|
from typing import Callable
from pyisy.constants import ISY_VALUE_UNKNOWN
from homeassistant.components.lock import DOMAIN as LOCK, LockEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.helpers.typing import HomeAssistantType
from .const import _LOGGER, DOMAIN as ISY994_DOMAIN, ISY994_NODES, ISY994_PROGRAMS
from .entity import ISYNodeEntity, ISYProgramEntity
from .helpers import migrate_old_unique_ids
VALUE_TO_STATE = {0: False, 100: True}
async def async_setup_entry(
hass: HomeAssistantType,
entry: ConfigEntry,
async_add_entities: Callable[[list], None],
) -> bool:
"""Set up the ISY994 lock platform."""
hass_isy_data = hass.data[ISY994_DOMAIN][entry.entry_id]
devices = []
for node in hass_isy_data[ISY994_NODES][LOCK]:
devices.append(ISYLockEntity(node))
for name, status, actions in hass_isy_data[ISY994_PROGRAMS][LOCK]:
devices.append(ISYLockProgramEntity(name, status, actions))
await migrate_old_unique_ids(hass, LOCK, devices)
async_add_entities(devices)
class ISYLockEntity(ISYNodeEntity, LockEntity):
"""Representation of an ISY994 lock device."""
@property
def is_locked(self) -> bool:
"""Get whether the lock is in locked state."""
if self._node.status == ISY_VALUE_UNKNOWN:
return None
return VALUE_TO_STATE.get(self._node.status)
def lock(self, **kwargs) -> None:
"""Send the lock command to the ISY994 device."""
if not self._node.secure_lock():
_LOGGER.error("Unable to lock device")
def unlock(self, **kwargs) -> None:
"""Send the unlock command to the ISY994 device."""
if not self._node.secure_unlock():
_LOGGER.error("Unable to lock device")
class ISYLockProgramEntity(ISYProgramEntity, LockEntity):
"""Representation of a ISY lock program."""
@property
def is_locked(self) -> bool:
"""Return true if the device is locked."""
return bool(self._node.status)
def lock(self, **kwargs) -> None:
"""Lock the device."""
if not self._actions.run_then():
_LOGGER.error("Unable to lock device")
def unlock(self, **kwargs) -> None:
"""Unlock the device."""
if not self._actions.run_else():
_LOGGER.error("Unable to unlock device")
|
import numpy as np
from scattertext import CorpusDF
from scattertext.TermDocMatrixFromPandas import TermDocMatrixFromPandas, build_sparse_matrices
class CorpusFromPandas(TermDocMatrixFromPandas):
'''Creates a Corpus from a pandas data frame. A Corpus is a Term Document Matrix
preserves the original texts.
Parameters
----------
data_frame : pd.DataFrame
The data frame that contains columns for the category of interest
and the document text.
text_col : str
The name of the column which contains the document text.
category_col : str
The name of the column which contains the category of interest.
clean_function : function, optional
A function that strips invalid characters out of the document text string, returning
a new string.
nlp : function, optional
verbose : boolean, optional
If true, prints a message every time a document index % 100 is 0.
See Also
--------
TermDocMatrixFromPandas
'''
def _apply_pipeline_and_get_build_instance(self,
X_factory,
mX_factory,
category_idx_store,
df,
parse_pipeline,
term_idx_store,
metadata_idx_store,
y):
'''
Parameters
----------
X_factory
mX_factory
category_idx_store
df
parse_pipeline
term_idx_store
metadata_idx_store
y
Returns
-------
CorpusDF
'''
df.apply(parse_pipeline.parse, axis=1)
y = np.array(y)
X, mX = build_sparse_matrices(y, X_factory, mX_factory)
return CorpusDF(df,
X,
mX,
y,
self._text_col,
term_idx_store,
category_idx_store,
metadata_idx_store)
|
import unittest
from mock import Mock
from integration_tests.files import require_empty_dir
from trashcli.put import TopTrashDirWriteRules
class TestMethod1VolumeTrashDirectory(unittest.TestCase):
def setUp(self):
require_empty_dir('sandbox')
self.fs = Mock()
self.fs.isdir.return_value = True
self.fs.islink.return_value = False
self.fs.has_sticky_bit.return_value = True
self.checker = TopTrashDirWriteRules
self.out = Mock()
def test_check_when_no_sticky_bit(self):
self.fs.has_sticky_bit.return_value = False
self.valid_to_be_written()
self.out.not_valid_parent_should_be_sticky.assert_called_with()
def test_check_when_no_dir(self):
self.fs.isdir.return_value = False
self.valid_to_be_written()
self.out.not_valid_should_be_a_dir.assert_called_with()
def test_check_when_is_symlink(self):
self.fs.islink.return_value = True
self.valid_to_be_written()
self.out.not_valid_parent_should_not_be_a_symlink.assert_called_with()
def test_check_pass(self):
self.valid_to_be_written()
self.out.is_valid()
def valid_to_be_written(self):
self.checker('sandbox/trash-dir/123', self.out, self.fs)
|
import pytest
import numpy as np
import tensornetwork as tn
import quantum as qu
def test_constructor(backend):
psi_tensor = np.random.rand(2, 2)
psi_node = tn.Node(psi_tensor, backend=backend)
op = qu.quantum_constructor([psi_node[0]], [psi_node[1]])
assert not op.is_scalar()
assert not op.is_vector()
assert not op.is_adjoint_vector()
assert len(op.out_edges) == 1
assert len(op.in_edges) == 1
assert op.out_edges[0] is psi_node[0]
assert op.in_edges[0] is psi_node[1]
op = qu.quantum_constructor([psi_node[0], psi_node[1]], [])
assert not op.is_scalar()
assert op.is_vector()
assert not op.is_adjoint_vector()
assert len(op.out_edges) == 2
assert len(op.in_edges) == 0
assert op.out_edges[0] is psi_node[0]
assert op.out_edges[1] is psi_node[1]
op = qu.quantum_constructor([], [psi_node[0], psi_node[1]])
assert not op.is_scalar()
assert not op.is_vector()
assert op.is_adjoint_vector()
assert len(op.out_edges) == 0
assert len(op.in_edges) == 2
assert op.in_edges[0] is psi_node[0]
assert op.in_edges[1] is psi_node[1]
with pytest.raises(ValueError):
op = qu.quantum_constructor([], [], [psi_node])
_ = psi_node[0] ^ psi_node[1]
op = qu.quantum_constructor([], [], [psi_node])
assert op.is_scalar()
assert not op.is_vector()
assert not op.is_adjoint_vector()
assert len(op.out_edges) == 0
assert len(op.in_edges) == 0
def test_checks(backend):
node1 = tn.Node(np.random.rand(2, 2), backend=backend)
node2 = tn.Node(np.random.rand(2, 2), backend=backend)
_ = node1[1] ^ node2[0]
# extra dangling edges must be explicitly ignored
with pytest.raises(ValueError):
_ = qu.QuVector([node1[0]])
# correctly ignore the extra edge
_ = qu.QuVector([node1[0]], ignore_edges=[node2[1]])
# in/out edges must be dangling
with pytest.raises(ValueError):
_ = qu.QuVector([node1[0], node1[1], node2[1]])
def test_from_tensor(backend):
psi_tensor = np.random.rand(2, 2)
op = qu.QuOperator.from_tensor(psi_tensor, [0], [1], backend=backend)
assert not op.is_scalar()
assert not op.is_vector()
assert not op.is_adjoint_vector()
np.testing.assert_almost_equal(op.eval(), psi_tensor)
op = qu.QuVector.from_tensor(psi_tensor, [0, 1], backend=backend)
assert not op.is_scalar()
assert op.is_vector()
assert not op.is_adjoint_vector()
np.testing.assert_almost_equal(op.eval(), psi_tensor)
op = qu.QuAdjointVector.from_tensor(psi_tensor, [0, 1], backend=backend)
assert not op.is_scalar()
assert not op.is_vector()
assert op.is_adjoint_vector()
np.testing.assert_almost_equal(op.eval(), psi_tensor)
op = qu.QuScalar.from_tensor(1.0, backend=backend)
assert op.is_scalar()
assert not op.is_vector()
assert not op.is_adjoint_vector()
assert op.eval() == 1.0
def test_identity(backend):
E = qu.identity((2, 3, 4), backend=backend)
for n in E.nodes:
assert isinstance(n, tn.CopyNode)
twentyfour = E.trace()
for n in twentyfour.nodes:
assert isinstance(n, tn.CopyNode)
assert twentyfour.eval() == 24
tensor = np.random.rand(2, 2)
psi = qu.QuVector.from_tensor(tensor, backend=backend)
E = qu.identity((2, 2), backend=backend)
np.testing.assert_allclose((E @ psi).eval(), psi.eval())
np.testing.assert_allclose((psi.adjoint() @ E @ psi).eval(),
psi.norm().eval())
op = qu.QuOperator.from_tensor(tensor, [0], [1], backend=backend)
op_I = op.tensor_product(E)
op_times_4 = op_I.partial_trace([1, 2])
np.testing.assert_allclose(op_times_4.eval(), 4 * op.eval())
def test_tensor_product(backend):
psi = qu.QuVector.from_tensor(np.random.rand(2, 2), backend=backend)
psi_psi = psi.tensor_product(psi)
assert len(psi_psi.subsystem_edges) == 4
np.testing.assert_almost_equal(psi_psi.norm().eval(), psi.norm().eval()**2)
def test_matmul(backend):
mat = np.random.rand(2, 2)
op = qu.QuOperator.from_tensor(mat, [0], [1], backend=backend)
res = (op @ op).eval()
np.testing.assert_allclose(res, mat @ mat)
def test_mul(backend):
mat = np.eye(2)
scal = np.float64(0.5)
op = qu.QuOperator.from_tensor(mat, [0], [1], backend=backend)
scal_op = qu.QuScalar.from_tensor(scal, backend=backend)
res = (op * scal_op).eval()
np.testing.assert_allclose(res, mat * 0.5)
res = (scal_op * op).eval()
np.testing.assert_allclose(res, mat * 0.5)
res = (scal_op * scal_op).eval()
np.testing.assert_almost_equal(res, 0.25)
res = (op * np.float64(0.5)).eval()
np.testing.assert_allclose(res, mat * 0.5)
res = (np.float64(0.5) * op).eval()
np.testing.assert_allclose(res, mat * 0.5)
with pytest.raises(ValueError):
_ = (op * op)
with pytest.raises(ValueError):
_ = (op * mat)
def test_expectations(backend):
if backend == 'pytorch':
psi_tensor = np.random.rand(2, 2, 2)
op_tensor = np.random.rand(2, 2)
else:
psi_tensor = np.random.rand(2, 2, 2) + 1.j * np.random.rand(2, 2, 2)
op_tensor = np.random.rand(2, 2) + 1.j * np.random.rand(2, 2)
psi = qu.QuVector.from_tensor(psi_tensor, backend=backend)
op = qu.QuOperator.from_tensor(op_tensor, [0], [1], backend=backend)
op_3 = op.tensor_product(
qu.identity((2, 2), backend=backend, dtype=psi_tensor.dtype))
res1 = (psi.adjoint() @ op_3 @ psi).eval()
rho_1 = psi.reduced_density([1, 2]) # trace out sites 2 and 3
res2 = (op @ rho_1).trace().eval()
np.testing.assert_almost_equal(res1, res2)
def test_projector(backend):
psi_tensor = np.random.rand(2, 2)
psi_tensor /= np.linalg.norm(psi_tensor)
psi = qu.QuVector.from_tensor(psi_tensor, backend=backend)
P = psi.projector()
np.testing.assert_allclose((P @ psi).eval(), psi_tensor)
np.testing.assert_allclose((P @ P).eval(), P.eval())
|
import time
import unittest
from retrying import RetryError
from retrying import Retrying
from retrying import retry
class TestStopConditions(unittest.TestCase):
def test_never_stop(self):
r = Retrying()
self.assertFalse(r.stop(3, 6546))
def test_stop_after_attempt(self):
r = Retrying(stop_max_attempt_number=3)
self.assertFalse(r.stop(2, 6546))
self.assertTrue(r.stop(3, 6546))
self.assertTrue(r.stop(4, 6546))
def test_stop_after_delay(self):
r = Retrying(stop_max_delay=1000)
self.assertFalse(r.stop(2, 999))
self.assertTrue(r.stop(2, 1000))
self.assertTrue(r.stop(2, 1001))
def test_legacy_explicit_stop_type(self):
Retrying(stop="stop_after_attempt")
def test_stop_func(self):
r = Retrying(stop_func=lambda attempt, delay: attempt == delay)
self.assertFalse(r.stop(1, 3))
self.assertFalse(r.stop(100, 99))
self.assertTrue(r.stop(101, 101))
class TestWaitConditions(unittest.TestCase):
def test_no_sleep(self):
r = Retrying()
self.assertEqual(0, r.wait(18, 9879))
def test_fixed_sleep(self):
r = Retrying(wait_fixed=1000)
self.assertEqual(1000, r.wait(12, 6546))
def test_incrementing_sleep(self):
r = Retrying(wait_incrementing_start=500, wait_incrementing_increment=100)
self.assertEqual(500, r.wait(1, 6546))
self.assertEqual(600, r.wait(2, 6546))
self.assertEqual(700, r.wait(3, 6546))
def test_random_sleep(self):
r = Retrying(wait_random_min=1000, wait_random_max=2000)
times = set()
times.add(r.wait(1, 6546))
times.add(r.wait(1, 6546))
times.add(r.wait(1, 6546))
times.add(r.wait(1, 6546))
# this is kind of non-deterministic...
self.assertTrue(len(times) > 1)
for t in times:
self.assertTrue(t >= 1000)
self.assertTrue(t <= 2000)
def test_random_sleep_without_min(self):
r = Retrying(wait_random_max=2000)
times = set()
times.add(r.wait(1, 6546))
times.add(r.wait(1, 6546))
times.add(r.wait(1, 6546))
times.add(r.wait(1, 6546))
# this is kind of non-deterministic...
self.assertTrue(len(times) > 1)
for t in times:
self.assertTrue(t >= 0)
self.assertTrue(t <= 2000)
def test_exponential(self):
r = Retrying(wait_exponential_max=100000)
self.assertEqual(r.wait(1, 0), 2)
self.assertEqual(r.wait(2, 0), 4)
self.assertEqual(r.wait(3, 0), 8)
self.assertEqual(r.wait(4, 0), 16)
self.assertEqual(r.wait(5, 0), 32)
self.assertEqual(r.wait(6, 0), 64)
def test_exponential_with_max_wait(self):
r = Retrying(wait_exponential_max=40)
self.assertEqual(r.wait(1, 0), 2)
self.assertEqual(r.wait(2, 0), 4)
self.assertEqual(r.wait(3, 0), 8)
self.assertEqual(r.wait(4, 0), 16)
self.assertEqual(r.wait(5, 0), 32)
self.assertEqual(r.wait(6, 0), 40)
self.assertEqual(r.wait(7, 0), 40)
self.assertEqual(r.wait(50, 0), 40)
def test_exponential_with_max_wait_and_multiplier(self):
r = Retrying(wait_exponential_max=50000, wait_exponential_multiplier=1000)
self.assertEqual(r.wait(1, 0), 2000)
self.assertEqual(r.wait(2, 0), 4000)
self.assertEqual(r.wait(3, 0), 8000)
self.assertEqual(r.wait(4, 0), 16000)
self.assertEqual(r.wait(5, 0), 32000)
self.assertEqual(r.wait(6, 0), 50000)
self.assertEqual(r.wait(7, 0), 50000)
self.assertEqual(r.wait(50, 0), 50000)
def test_legacy_explicit_wait_type(self):
Retrying(wait="exponential_sleep")
def test_wait_func(self):
r = Retrying(wait_func=lambda attempt, delay: attempt * delay)
self.assertEqual(r.wait(1, 5), 5)
self.assertEqual(r.wait(2, 11), 22)
self.assertEqual(r.wait(10, 100), 1000)
class NoneReturnUntilAfterCount:
"""
This class holds counter state for invoking a method several times in a row.
"""
def __init__(self, count):
self.counter = 0
self.count = count
def go(self):
"""
Return None until after count threshold has been crossed, then return True.
"""
if self.counter < self.count:
self.counter += 1
return None
return True
class NoIOErrorAfterCount:
"""
This class holds counter state for invoking a method several times in a row.
"""
def __init__(self, count):
self.counter = 0
self.count = count
def go(self):
"""
Raise an IOError until after count threshold has been crossed, then return True.
"""
if self.counter < self.count:
self.counter += 1
raise IOError("Hi there, I'm an IOError")
return True
class NoNameErrorAfterCount:
"""
This class holds counter state for invoking a method several times in a row.
"""
def __init__(self, count):
self.counter = 0
self.count = count
def go(self):
"""
Raise a NameError until after count threshold has been crossed, then return True.
"""
if self.counter < self.count:
self.counter += 1
raise NameError("Hi there, I'm a NameError")
return True
class CustomError(Exception):
"""
This is a custom exception class. Note that For Python 2.x, we don't
strictly need to extend BaseException, however, Python 3.x will complain.
While this test suite won't run correctly under Python 3.x without
extending from the Python exception hierarchy, the actual module code is
backwards compatible Python 2.x and will allow for cases where exception
classes don't extend from the hierarchy.
"""
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class NoCustomErrorAfterCount:
"""
This class holds counter state for invoking a method several times in a row.
"""
def __init__(self, count):
self.counter = 0
self.count = count
def go(self):
"""
Raise a CustomError until after count threshold has been crossed, then return True.
"""
if self.counter < self.count:
self.counter += 1
derived_message = "This is a Custom exception class"
raise CustomError(derived_message)
return True
def retry_if_result_none(result):
return result is None
def retry_if_exception_of_type(retryable_types):
def retry_if_exception_these_types(exception):
print("Detected Exception of type: {0}".format(str(type(exception))))
return isinstance(exception, retryable_types)
return retry_if_exception_these_types
def current_time_ms():
return int(round(time.time() * 1000))
@retry(wait_fixed=50, retry_on_result=retry_if_result_none)
def _retryable_test_with_wait(thing):
return thing.go()
@retry(stop_max_attempt_number=3, retry_on_result=retry_if_result_none)
def _retryable_test_with_stop(thing):
return thing.go()
@retry(retry_on_exception=(IOError,))
def _retryable_test_with_exception_type_io(thing):
return thing.go()
@retry(retry_on_exception=retry_if_exception_of_type(IOError), wrap_exception=True)
def _retryable_test_with_exception_type_io_wrap(thing):
return thing.go()
@retry(
stop_max_attempt_number=3,
retry_on_exception=(IOError,))
def _retryable_test_with_exception_type_io_attempt_limit(thing):
return thing.go()
@retry(
stop_max_attempt_number=3,
retry_on_exception=(IOError,),
wrap_exception=True)
def _retryable_test_with_exception_type_io_attempt_limit_wrap(thing):
return thing.go()
@retry
def _retryable_default(thing):
return thing.go()
@retry()
def _retryable_default_f(thing):
return thing.go()
@retry(retry_on_exception=retry_if_exception_of_type(CustomError))
def _retryable_test_with_exception_type_custom(thing):
return thing.go()
@retry(retry_on_exception=retry_if_exception_of_type(CustomError), wrap_exception=True)
def _retryable_test_with_exception_type_custom_wrap(thing):
return thing.go()
@retry(
stop_max_attempt_number=3,
retry_on_exception=retry_if_exception_of_type(CustomError))
def _retryable_test_with_exception_type_custom_attempt_limit(thing):
return thing.go()
@retry(
stop_max_attempt_number=3,
retry_on_exception=retry_if_exception_of_type(CustomError),
wrap_exception=True)
def _retryable_test_with_exception_type_custom_attempt_limit_wrap(thing):
return thing.go()
class TestDecoratorWrapper(unittest.TestCase):
def test_with_wait(self):
start = current_time_ms()
result = _retryable_test_with_wait(NoneReturnUntilAfterCount(5))
t = current_time_ms() - start
self.assertTrue(t >= 250)
self.assertTrue(result)
def test_with_stop_on_return_value(self):
try:
_retryable_test_with_stop(NoneReturnUntilAfterCount(5))
self.fail("Expected RetryError after 3 attempts")
except RetryError as re:
self.assertFalse(re.last_attempt.has_exception)
self.assertEqual(3, re.last_attempt.attempt_number)
self.assertTrue(re.last_attempt.value is None)
print(re)
def test_with_stop_on_exception(self):
try:
_retryable_test_with_stop(NoIOErrorAfterCount(5))
self.fail("Expected IOError")
except IOError as re:
self.assertTrue(isinstance(re, IOError))
print(re)
def test_retry_if_exception_of_type(self):
self.assertTrue(_retryable_test_with_exception_type_io(NoIOErrorAfterCount(5)))
try:
_retryable_test_with_exception_type_io(NoNameErrorAfterCount(5))
self.fail("Expected NameError")
except NameError as n:
self.assertTrue(isinstance(n, NameError))
print(n)
try:
_retryable_test_with_exception_type_io_attempt_limit_wrap(NoIOErrorAfterCount(5))
self.fail("Expected RetryError")
except RetryError as re:
self.assertEqual(3, re.last_attempt.attempt_number)
self.assertTrue(re.last_attempt.has_exception)
self.assertTrue(re.last_attempt.value[0] is not None)
self.assertTrue(isinstance(re.last_attempt.value[1], IOError))
self.assertTrue(re.last_attempt.value[2] is not None)
print(re)
self.assertTrue(_retryable_test_with_exception_type_custom(NoCustomErrorAfterCount(5)))
try:
_retryable_test_with_exception_type_custom(NoNameErrorAfterCount(5))
self.fail("Expected NameError")
except NameError as n:
self.assertTrue(isinstance(n, NameError))
print(n)
try:
_retryable_test_with_exception_type_custom_attempt_limit_wrap(NoCustomErrorAfterCount(5))
self.fail("Expected RetryError")
except RetryError as re:
self.assertEqual(3, re.last_attempt.attempt_number)
self.assertTrue(re.last_attempt.has_exception)
self.assertTrue(re.last_attempt.value[0] is not None)
self.assertTrue(isinstance(re.last_attempt.value[1], CustomError))
self.assertTrue(re.last_attempt.value[2] is not None)
print(re)
def test_wrapped_exception(self):
# base exception cases
self.assertTrue(_retryable_test_with_exception_type_io_wrap(NoIOErrorAfterCount(5)))
try:
_retryable_test_with_exception_type_io_wrap(NoNameErrorAfterCount(5))
self.fail("Expected RetryError")
except RetryError as re:
self.assertTrue(isinstance(re.last_attempt.value[1], NameError))
print(re)
try:
_retryable_test_with_exception_type_io_attempt_limit_wrap(NoIOErrorAfterCount(5))
self.fail("Expected RetryError")
except RetryError as re:
self.assertEqual(3, re.last_attempt.attempt_number)
self.assertTrue(re.last_attempt.has_exception)
self.assertTrue(re.last_attempt.value[0] is not None)
self.assertTrue(isinstance(re.last_attempt.value[1], IOError))
self.assertTrue(re.last_attempt.value[2] is not None)
print(re)
# custom error cases
self.assertTrue(_retryable_test_with_exception_type_custom_wrap(NoCustomErrorAfterCount(5)))
try:
_retryable_test_with_exception_type_custom_wrap(NoNameErrorAfterCount(5))
self.fail("Expected RetryError")
except RetryError as re:
self.assertTrue(re.last_attempt.value[0] is not None)
self.assertTrue(isinstance(re.last_attempt.value[1], NameError))
self.assertTrue(re.last_attempt.value[2] is not None)
print(re)
try:
_retryable_test_with_exception_type_custom_attempt_limit_wrap(NoCustomErrorAfterCount(5))
self.fail("Expected RetryError")
except RetryError as re:
self.assertEqual(3, re.last_attempt.attempt_number)
self.assertTrue(re.last_attempt.has_exception)
self.assertTrue(re.last_attempt.value[0] is not None)
self.assertTrue(isinstance(re.last_attempt.value[1], CustomError))
self.assertTrue(re.last_attempt.value[2] is not None)
self.assertTrue("This is a Custom exception class" in str(re.last_attempt.value[1]))
print(re)
def test_defaults(self):
self.assertTrue(_retryable_default(NoNameErrorAfterCount(5)))
self.assertTrue(_retryable_default_f(NoNameErrorAfterCount(5)))
self.assertTrue(_retryable_default(NoCustomErrorAfterCount(5)))
self.assertTrue(_retryable_default_f(NoCustomErrorAfterCount(5)))
class TestBeforeAfterAttempts(unittest.TestCase):
_attempt_number = 0
def test_before_attempts(self):
TestBeforeAfterAttempts._attempt_number = 0
def _before(attempt_number):
TestBeforeAfterAttempts._attempt_number = attempt_number
@retry(wait_fixed = 1000, stop_max_attempt_number = 1, before_attempts = _before)
def _test_before():
pass
_test_before()
self.assertTrue(TestBeforeAfterAttempts._attempt_number is 1)
def test_after_attempts(self):
TestBeforeAfterAttempts._attempt_number = 0
def _after(attempt_number):
TestBeforeAfterAttempts._attempt_number = attempt_number
@retry(wait_fixed = 100, stop_max_attempt_number = 3, after_attempts = _after)
def _test_after():
if TestBeforeAfterAttempts._attempt_number < 2:
raise Exception("testing after_attempts handler")
else:
pass
_test_after()
self.assertTrue(TestBeforeAfterAttempts._attempt_number is 2)
if __name__ == '__main__':
unittest.main()
|
from __future__ import division # always use floats
from __future__ import with_statement
import logging
import os
import unittest
from gensim import utils, corpora, models, similarities
from gensim.test.utils import datapath, get_tmpfile
logger = logging.getLogger(__name__)
class CorpusMiislita(corpora.TextCorpus):
stoplist = set('for a of the and to in on'.split())
def get_texts(self):
"""
Parse documents from the .cor file provided in the constructor. Lowercase
each document and ignore some stopwords.
.cor format: one document per line, words separated by whitespace.
"""
for doc in self.getstream():
yield [word for word in utils.to_unicode(doc).lower().split()
if word not in CorpusMiislita.stoplist]
def __len__(self):
"""Define this so we can use `len(corpus)`"""
if 'length' not in self.__dict__:
logger.info("caching corpus size (calculating number of documents)")
self.length = sum(1 for _ in self.get_texts())
return self.length
class TestMiislita(unittest.TestCase):
def test_textcorpus(self):
"""Make sure TextCorpus can be serialized to disk. """
# construct corpus from file
miislita = CorpusMiislita(datapath('head500.noblanks.cor.bz2'))
# make sure serializing works
ftmp = get_tmpfile('test_textcorpus.mm')
corpora.MmCorpus.save_corpus(ftmp, miislita)
self.assertTrue(os.path.exists(ftmp))
# make sure deserializing gives the same result
miislita2 = corpora.MmCorpus(ftmp)
self.assertEqual(list(miislita), list(miislita2))
def test_save_load_ability(self):
"""
Make sure we can save and load (un/pickle) TextCorpus objects (as long
as the underlying input isn't a file-like object; we cannot pickle those).
"""
# construct corpus from file
corpusname = datapath('miIslita.cor')
miislita = CorpusMiislita(corpusname)
# pickle to disk
tmpf = get_tmpfile('tc_test.cpickle')
miislita.save(tmpf)
miislita2 = CorpusMiislita.load(tmpf)
self.assertEqual(len(miislita), len(miislita2))
self.assertEqual(miislita.dictionary.token2id, miislita2.dictionary.token2id)
def test_miislita_high_level(self):
# construct corpus from file
miislita = CorpusMiislita(datapath('miIslita.cor'))
# initialize tfidf transformation and similarity index
tfidf = models.TfidfModel(miislita, miislita.dictionary, normalize=False)
index = similarities.SparseMatrixSimilarity(tfidf[miislita], num_features=len(miislita.dictionary))
# compare to query
query = 'latent semantic indexing'
vec_bow = miislita.dictionary.doc2bow(query.lower().split())
vec_tfidf = tfidf[vec_bow]
# perform a similarity query against the corpus
sims_tfidf = index[vec_tfidf]
# for the expected results see the article
expected = [0.0, 0.2560, 0.7022, 0.1524, 0.3334]
for i, value in enumerate(expected):
self.assertAlmostEqual(sims_tfidf[i], value, 2)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
unittest.main()
|
import unittest
import numpy as np
from chainercv.utils import testing
from chainercv.visualizations import vis_image
try:
import matplotlib # NOQA
_available = True
except ImportError:
_available = False
@testing.parameterize(
{'img': np.random.randint(0, 255, size=(3, 32, 32)).astype(np.float32)},
{'img': None}
)
@unittest.skipUnless(_available, 'Matplotlib is not installed')
class TestVisImage(unittest.TestCase):
def test_vis_image(self):
ax = vis_image(self.img)
self.assertTrue(isinstance(ax, matplotlib.axes.Axes))
testing.run_module(__name__, __file__)
|
import io
from os import path
import pytest
from nikola.plugins.compile.markdown import CompileMarkdown
from .helper import FakeSite
@pytest.mark.parametrize(
"input_str, expected_output",
[
pytest.param("", "", id="empty"),
pytest.param(
"[podcast]https://archive.org/download/Rebeldes_Stereotipos/rs20120609_1.mp3[/podcast]",
'<p><audio controls=""><source src="https://archive.org/download/Rebeldes_Stereotipos/rs20120609_1.mp3" type="audio/mpeg"></source></audio></p>',
id="mdx podcast",
),
pytest.param(
"~~striked out text~~",
"<p><del>striked out text</del></p>",
id="strikethrough",
),
pytest.param(
"""\
#!python
from this
""",
"""\
<table class="codehilitetable"><tr><td class="linenos">\
<div class="linenodiv"><pre>1</pre></div>\
</td><td class="code"><pre class="code literal-block"><span></span>\
<code><span class="kn">from</span> <span class="nn">this</span>
</code></pre>
</td></tr></table>
""",
id="hilite",
),
],
)
def test_compiling_markdown(
compiler, input_path, output_path, input_str, expected_output
):
output = markdown_compile(compiler, input_path, output_path, input_str)
assert output.strip() == expected_output.strip()
@pytest.fixture(scope="module")
def site():
return FakeSite()
@pytest.fixture(scope="module")
def compiler(site):
compiler = CompileMarkdown()
compiler.set_site(site)
return compiler
@pytest.fixture
def input_path(tmpdir):
return path.join(str(tmpdir), "input.markdown")
@pytest.fixture
def output_path(tmpdir):
return path.join(str(tmpdir), "output.html")
def markdown_compile(compiler, input_path, output_path, text):
with io.open(input_path, "w+", encoding="utf8") as input_file:
input_file.write(text)
compiler.compile(input_path, output_path, lang="en")
with io.open(output_path, "r", encoding="utf8") as output_path:
return output_path.read()
|
from __future__ import division
from collections import defaultdict
import itertools
import numpy as np
import six
from chainercv.utils.bbox.bbox_iou import bbox_iou
def eval_detection_voc(
pred_bboxes, pred_labels, pred_scores, gt_bboxes, gt_labels,
gt_difficults=None,
iou_thresh=0.5, use_07_metric=False):
"""Calculate average precisions based on evaluation code of PASCAL VOC.
This function evaluates predicted bounding boxes obtained from a dataset
which has :math:`N` images by using average precision for each class.
The code is based on the evaluation code used in PASCAL VOC Challenge.
Args:
pred_bboxes (iterable of numpy.ndarray): See the table below.
pred_labels (iterable of numpy.ndarray): See the table below.
pred_scores (iterable of numpy.ndarray): See the table below.
gt_bboxes (iterable of numpy.ndarray): See the table below.
gt_labels (iterable of numpy.ndarray): See the table below.
gt_difficults (iterable of numpy.ndarray): See the table below.
By default, this is :obj:`None`. In that case, this function
considers all bounding boxes to be not difficult.
iou_thresh (float): A prediction is correct if its Intersection over
Union with the ground truth is above this value.
use_07_metric (bool): Whether to use PASCAL VOC 2007 evaluation metric
for calculating average precision. The default value is
:obj:`False`.
.. csv-table::
:header: name, shape, dtype, format
:obj:`pred_bboxes`, ":math:`[(R, 4)]`", :obj:`float32`, \
":math:`(y_{min}, x_{min}, y_{max}, x_{max})`"
:obj:`pred_labels`, ":math:`[(R,)]`", :obj:`int32`, \
":math:`[0, \#fg\_class - 1]`"
:obj:`pred_scores`, ":math:`[(R,)]`", :obj:`float32`, \
--
:obj:`gt_bboxes`, ":math:`[(R, 4)]`", :obj:`float32`, \
":math:`(y_{min}, x_{min}, y_{max}, x_{max})`"
:obj:`gt_labels`, ":math:`[(R,)]`", :obj:`int32`, \
":math:`[0, \#fg\_class - 1]`"
:obj:`gt_difficults`, ":math:`[(R,)]`", :obj:`bool`, --
Returns:
dict:
The keys, value-types and the description of the values are listed
below.
* **ap** (*numpy.ndarray*): An array of average precisions. \
The :math:`l`-th value corresponds to the average precision \
for class :math:`l`. If class :math:`l` does not exist in \
either :obj:`pred_labels` or :obj:`gt_labels`, the corresponding \
value is set to :obj:`numpy.nan`.
* **map** (*float*): The average of Average Precisions over classes.
"""
prec, rec = calc_detection_voc_prec_rec(
pred_bboxes, pred_labels, pred_scores,
gt_bboxes, gt_labels, gt_difficults,
iou_thresh=iou_thresh)
ap = calc_detection_voc_ap(prec, rec, use_07_metric=use_07_metric)
return {'ap': ap, 'map': np.nanmean(ap)}
def calc_detection_voc_prec_rec(
pred_bboxes, pred_labels, pred_scores, gt_bboxes, gt_labels,
gt_difficults=None,
iou_thresh=0.5):
"""Calculate precision and recall based on evaluation code of PASCAL VOC.
This function calculates precision and recall of
predicted bounding boxes obtained from a dataset which has :math:`N`
images.
The code is based on the evaluation code used in PASCAL VOC Challenge.
Args:
pred_bboxes (iterable of numpy.ndarray): See the table in
:func:`chainercv.evaluations.eval_detection_voc`.
pred_labels (iterable of numpy.ndarray): See the table in
:func:`chainercv.evaluations.eval_detection_voc`.
pred_scores (iterable of numpy.ndarray): See the table in
:func:`chainercv.evaluations.eval_detection_voc`.
gt_bboxes (iterable of numpy.ndarray): See the table in
:func:`chainercv.evaluations.eval_detection_voc`.
gt_labels (iterable of numpy.ndarray): See the table in
:func:`chainercv.evaluations.eval_detection_voc`.
gt_difficults (iterable of numpy.ndarray): See the table in
:func:`chainercv.evaluations.eval_detection_voc`.
By default, this is :obj:`None`. In that case, this function
considers all bounding boxes to be not difficult.
iou_thresh (float): A prediction is correct if its Intersection over
Union with the ground truth is above this value..
Returns:
tuple of two lists:
This function returns two lists: :obj:`prec` and :obj:`rec`.
* :obj:`prec`: A list of arrays. :obj:`prec[l]` is precision \
for class :math:`l`. If class :math:`l` does not exist in \
either :obj:`pred_labels` or :obj:`gt_labels`, :obj:`prec[l]` is \
set to :obj:`None`.
* :obj:`rec`: A list of arrays. :obj:`rec[l]` is recall \
for class :math:`l`. If class :math:`l` that is not marked as \
difficult does not exist in \
:obj:`gt_labels`, :obj:`rec[l]` is \
set to :obj:`None`.
"""
pred_bboxes = iter(pred_bboxes)
pred_labels = iter(pred_labels)
pred_scores = iter(pred_scores)
gt_bboxes = iter(gt_bboxes)
gt_labels = iter(gt_labels)
if gt_difficults is None:
gt_difficults = itertools.repeat(None)
else:
gt_difficults = iter(gt_difficults)
n_pos = defaultdict(int)
score = defaultdict(list)
match = defaultdict(list)
for pred_bbox, pred_label, pred_score, gt_bbox, gt_label, gt_difficult in \
six.moves.zip(
pred_bboxes, pred_labels, pred_scores,
gt_bboxes, gt_labels, gt_difficults):
if gt_difficult is None:
gt_difficult = np.zeros(gt_bbox.shape[0], dtype=bool)
for l in np.unique(np.concatenate((pred_label, gt_label)).astype(int)):
pred_mask_l = pred_label == l
pred_bbox_l = pred_bbox[pred_mask_l]
pred_score_l = pred_score[pred_mask_l]
# sort by score
order = pred_score_l.argsort()[::-1]
pred_bbox_l = pred_bbox_l[order]
pred_score_l = pred_score_l[order]
gt_mask_l = gt_label == l
gt_bbox_l = gt_bbox[gt_mask_l]
gt_difficult_l = gt_difficult[gt_mask_l]
n_pos[l] += np.logical_not(gt_difficult_l).sum()
score[l].extend(pred_score_l)
if len(pred_bbox_l) == 0:
continue
if len(gt_bbox_l) == 0:
match[l].extend((0,) * pred_bbox_l.shape[0])
continue
# VOC evaluation follows integer typed bounding boxes.
pred_bbox_l = pred_bbox_l.copy()
pred_bbox_l[:, 2:] += 1
gt_bbox_l = gt_bbox_l.copy()
gt_bbox_l[:, 2:] += 1
iou = bbox_iou(pred_bbox_l, gt_bbox_l)
gt_index = iou.argmax(axis=1)
# set -1 if there is no matching ground truth
gt_index[iou.max(axis=1) < iou_thresh] = -1
del iou
selec = np.zeros(gt_bbox_l.shape[0], dtype=bool)
for gt_idx in gt_index:
if gt_idx >= 0:
if gt_difficult_l[gt_idx]:
match[l].append(-1)
else:
if not selec[gt_idx]:
match[l].append(1)
else:
match[l].append(0)
selec[gt_idx] = True
else:
match[l].append(0)
for iter_ in (
pred_bboxes, pred_labels, pred_scores,
gt_bboxes, gt_labels, gt_difficults):
if next(iter_, None) is not None:
raise ValueError('Length of input iterables need to be same.')
n_fg_class = max(n_pos.keys()) + 1
prec = [None] * n_fg_class
rec = [None] * n_fg_class
for l in n_pos.keys():
score_l = np.array(score[l])
match_l = np.array(match[l], dtype=np.int8)
order = score_l.argsort()[::-1]
match_l = match_l[order]
tp = np.cumsum(match_l == 1)
fp = np.cumsum(match_l == 0)
# If an element of fp + tp is 0,
# the corresponding element of prec[l] is nan.
prec[l] = tp / (fp + tp)
# If n_pos[l] is 0, rec[l] is None.
if n_pos[l] > 0:
rec[l] = tp / n_pos[l]
return prec, rec
def calc_detection_voc_ap(prec, rec, use_07_metric=False):
"""Calculate average precisions based on evaluation code of PASCAL VOC.
This function calculates average precisions
from given precisions and recalls.
The code is based on the evaluation code used in PASCAL VOC Challenge.
Args:
prec (list of numpy.array): A list of arrays.
:obj:`prec[l]` indicates precision for class :math:`l`.
If :obj:`prec[l]` is :obj:`None`, this function returns
:obj:`numpy.nan` for class :math:`l`.
rec (list of numpy.array): A list of arrays.
:obj:`rec[l]` indicates recall for class :math:`l`.
If :obj:`rec[l]` is :obj:`None`, this function returns
:obj:`numpy.nan` for class :math:`l`.
use_07_metric (bool): Whether to use PASCAL VOC 2007 evaluation metric
for calculating average precision. The default value is
:obj:`False`.
Returns:
~numpy.ndarray:
This function returns an array of average precisions.
The :math:`l`-th value corresponds to the average precision
for class :math:`l`. If :obj:`prec[l]` or :obj:`rec[l]` is
:obj:`None`, the corresponding value is set to :obj:`numpy.nan`.
"""
n_fg_class = len(prec)
ap = np.empty(n_fg_class)
for l in six.moves.range(n_fg_class):
if prec[l] is None or rec[l] is None:
ap[l] = np.nan
continue
if use_07_metric:
# 11 point metric
ap[l] = 0
for t in np.arange(0., 1.1, 0.1):
if np.sum(rec[l] >= t) == 0:
p = 0
else:
p = np.max(np.nan_to_num(prec[l])[rec[l] >= t])
ap[l] += p / 11
else:
# correct AP calculation
# first append sentinel values at the end
mpre = np.concatenate(([0], np.nan_to_num(prec[l]), [0]))
mrec = np.concatenate(([0], rec[l], [1]))
mpre = np.maximum.accumulate(mpre[::-1])[::-1]
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mrec[1:] != mrec[:-1])[0]
# and sum (\Delta recall) * prec
ap[l] = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
|
import re
import sys
import time
import logging
import traceback
MODULE_NAME = __name__.split('.')[0]
logging_types = dict(debug=logging.DEBUG, info=logging.INFO,
warning=logging.WARNING, error=logging.ERROR,
critical=logging.CRITICAL)
class _Formatter(logging.Formatter):
"""Formatter that optionally prepends caller """
def __init__(self):
super().__init__() # '%(levelname)s %(name)s: %(message)s')
self.prepend_caller = False
def format(self, record):
base = '[{} {} {}] '.format(record.levelname[0],
time.strftime('%H:%M:%S'),
record.name)
if isinstance(record.msg, Exception):
# Get excepion info and skip first frames
type_, value, tb = sys.exc_info()
for _ in range(getattr(value, 'skip_tb', 0)):
tb = tb.tb_next
# Enable post mortem debugging
sys.last_type = type_
sys.last_value = value
sys.last_traceback = tb
# Compose message
cname = type_.__name__
out = ''.join(traceback.format_list(traceback.extract_tb(tb)))
del tb # we don't want to hold too much references to this
return base + cname + ': ' + str(value) + '\n' + out.rstrip()
else:
out = base + str(record.msg % record.args)
if self.prepend_caller:
part1, part2 = out.split(':', 1)
out = part1 + ' ' + record.funcName + '():' + part2
return out
class _Handler(logging.StreamHandler):
""" Stream handler that prints INFO and lower to stdout
"""
def emit(self, record):
if record.levelno >= logging.WARNING:
self.stream = sys.stderr
else:
self.stream = sys.stdout
super().emit(record)
class _MatchFilter:
""" To filter records on regexp matches.
"""
def __init__(self):
self.match = None
def filter(self, record):
match = self.match
if not match:
return True
elif isinstance(match, str):
return (match in record.name or
match in record.getMessage() or
match in record.funcName)
else:
return (re.search(match, record.name) or
re.search(match, record.getMessage()) or
re.search(match, record.funcName))
class _CaptureFilter:
""" To collect records in the capture_log context.
"""
def __init__(self):
self.records = []
def filter(self, record):
self.records.append(_formatter.format(record))
return False
def set_log_level(level, match=None):
"""Set the logging level and match filter
Parameters:
level (str, int): The verbosity of messages to print.
If a str, it can be either DEBUG, INFO, WARNING, ERROR, or
CRITICAL. Note that these are for convenience and are equivalent
to passing in logging.DEBUG, etc.
match (str, regexp, None): String to match. Only those messages
that contain ``match`` as a substring (and has the
appropriate ``level``) will be displayed. Match can also be
a compiled regexp.
Notes
-----
If level is DEBUG, the method emitting the log message will be
prepended to each log message. Note that if ``level`` is DEBUG or
if the ``match`` option is used, a small overhead is added to each
logged message.
"""
if isinstance(level, str):
level = level.lower()
if level not in logging_types:
raise ValueError('Invalid argument "%s"' % level)
level = logging_types[level]
elif not isinstance(level, int):
raise TypeError('log level must be an int or string')
logger.setLevel(level)
_filter.match = match
_formatter.prepend_caller = level <= logging.DEBUG
class capture_log:
""" Context manager to capture log messages. Useful for testing.
Usage:
.. code-block:: python
with capture_log(level, match) as log:
...
# log is a list strings (as they would have appeared in the console)
"""
def __init__(self, level, match=None):
self._args = level, match
def __enter__(self):
self._old_args = logger.level, _filter.match
set_log_level(*self._args)
self._filter = _CaptureFilter()
_handler.addFilter(self._filter)
return self._filter.records
def __exit__(self, type, value, traceback):
_handler.removeFilter(self._filter)
set_log_level(*self._old_args)
# Create logger
logger = logging.getLogger(MODULE_NAME)
logger.propagate = False
logger.setLevel(logging.INFO)
# Remove previous handlers, these can be leftovers when flexx is re-imorted,
# as can happen during tests
h = None
for h in list(logger.handlers):
if h.__class__.__module__ == __name__:
logger.removeHandler(h)
del h
_handler = _Handler()
_filter = _MatchFilter()
_formatter = _Formatter()
logger.addHandler(_handler)
_handler.addFilter(_filter)
_handler.setFormatter(_formatter)
|
from beewi_smartclim import BeewiSmartClimPoller # pylint: disable=import-error
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_MAC,
CONF_NAME,
DEVICE_CLASS_BATTERY,
DEVICE_CLASS_HUMIDITY,
DEVICE_CLASS_TEMPERATURE,
PERCENTAGE,
TEMP_CELSIUS,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
# Default values
DEFAULT_NAME = "BeeWi SmartClim"
# Sensor config
SENSOR_TYPES = [
[DEVICE_CLASS_TEMPERATURE, "Temperature", TEMP_CELSIUS],
[DEVICE_CLASS_HUMIDITY, "Humidity", PERCENTAGE],
[DEVICE_CLASS_BATTERY, "Battery", PERCENTAGE],
]
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_MAC): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the beewi_smartclim platform."""
mac = config[CONF_MAC]
prefix = config[CONF_NAME]
poller = BeewiSmartClimPoller(mac)
sensors = []
for sensor_type in SENSOR_TYPES:
device = sensor_type[0]
name = sensor_type[1]
unit = sensor_type[2]
# `prefix` is the name configured by the user for the sensor, we're appending
# the device type at the end of the name (garden -> garden temperature)
if prefix:
name = f"{prefix} {name}"
sensors.append(BeewiSmartclimSensor(poller, name, mac, device, unit))
add_entities(sensors)
class BeewiSmartclimSensor(Entity):
"""Representation of a Sensor."""
def __init__(self, poller, name, mac, device, unit):
"""Initialize the sensor."""
self._poller = poller
self._name = name
self._mac = mac
self._device = device
self._unit = unit
self._state = None
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor. State is returned in Celsius."""
return self._state
@property
def device_class(self):
"""Device class of this entity."""
return self._device
@property
def unique_id(self):
"""Return a unique, Home Assistant friendly identifier for this entity."""
return f"{self._mac}_{self._device}"
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return self._unit
def update(self):
"""Fetch new state data from the poller."""
self._poller.update_sensor()
self._state = None
if self._device == DEVICE_CLASS_TEMPERATURE:
self._state = self._poller.get_temperature()
if self._device == DEVICE_CLASS_HUMIDITY:
self._state = self._poller.get_humidity()
if self._device == DEVICE_CLASS_BATTERY:
self._state = self._poller.get_battery()
|
import mock
from kubernetes.client import V1Deployment
from kubernetes.client import V1StatefulSet
from pytest import fixture
from pytest import raises
from paasta_tools.cleanup_kubernetes_jobs import cleanup_unused_apps
from paasta_tools.cleanup_kubernetes_jobs import DontKillEverythingError
from paasta_tools.cleanup_kubernetes_jobs import main
from paasta_tools.kubernetes.application.controller_wrappers import DeploymentWrapper
@fixture
def fake_deployment():
fake_deployment = V1Deployment(
metadata=mock.Mock(
namespace="paasta",
labels={
"yelp.com/paasta_service": "service",
"yelp.com/paasta_instance": "instance-1",
"yelp.com/paasta_git_sha": "1234",
"yelp.com/paasta_config_sha": "1234",
"paasta.yelp.com/service": "service",
"paasta.yelp.com/instance": "instance-1",
"paasta.yelp.com/git_sha": "1234",
"paasta.yelp.com/config_sha": "1234",
},
),
spec=mock.Mock(replicas=0),
)
type(fake_deployment.metadata).name = mock.PropertyMock(
return_value="service-instance-1"
)
return fake_deployment
@fixture
def fake_stateful_set():
fake_stateful_set = V1StatefulSet(
metadata=mock.Mock(
namespace="paasta",
labels={
"yelp.com/paasta_service": "service",
"yelp.com/paasta_instance": "instance-2",
"yelp.com/paasta_git_sha": "1234",
"yelp.com/paasta_config_sha": "1234",
"paasta.yelp.com/service": "service",
"paasta.yelp.com/instance": "instance-2",
"paasta.yelp.com/git_sha": "1234",
"paasta.yelp.com/config_sha": "1234",
},
),
spec=mock.Mock(replicas=0),
)
type(fake_stateful_set.metadata).name = (
mock.PropertyMock(return_value="service-instance-2"),
)
return fake_stateful_set
@fixture
def invalid_app():
invalid_app = V1Deployment(
metadata=mock.Mock(namespace="paasta", labels={}), spec=mock.Mock(replicas=0)
)
type(invalid_app.metadata).name = (mock.PropertyMock(return_value="invalid_app"),)
return invalid_app
def test_main(fake_deployment, fake_stateful_set, invalid_app):
soa_dir = "paasta_maaaachine"
with mock.patch(
"paasta_tools.cleanup_kubernetes_jobs.cleanup_unused_apps", autospec=True
) as cleanup_patch:
main(("--soa-dir", soa_dir))
cleanup_patch.assert_called_once_with(soa_dir, kill_threshold=0.5, force=False)
def test_list_apps(fake_deployment, fake_stateful_set, invalid_app):
mock_kube_client = mock.MagicMock()
with mock.patch(
"paasta_tools.cleanup_kubernetes_jobs.KubeClient",
return_value=mock_kube_client,
autospec=True,
), mock.patch(
"paasta_tools.cleanup_kubernetes_jobs.get_services_for_cluster",
return_value={},
autospec=True,
), mock.patch(
"paasta_tools.cleanup_kubernetes_jobs.alert_state_change", autospec=True
) as mock_alert_state_change:
mock_alert_state_change.__enter__ = mock.Mock(return_value=(mock.Mock(), None))
mock_alert_state_change.__exit__ = mock.Mock(return_value=None)
cleanup_unused_apps("soa_dir", kill_threshold=1, force=False)
assert mock_kube_client.deployments.list_namespaced_deployment.call_count == 1
assert mock_kube_client.deployments.list_namespaced_stateful_set.call_count == 1
def test_cleanup_unused_apps(fake_deployment, fake_stateful_set, invalid_app):
mock_kube_client = mock.MagicMock()
with mock.patch(
"paasta_tools.cleanup_kubernetes_jobs.KubeClient",
return_value=mock_kube_client,
autospec=True,
), mock.patch(
"paasta_tools.cleanup_kubernetes_jobs.list_namespaced_applications",
return_value=[DeploymentWrapper(fake_deployment)],
autospec=True,
), mock.patch(
"paasta_tools.cleanup_kubernetes_jobs.get_services_for_cluster",
return_value={},
autospec=True,
), mock.patch(
"paasta_tools.cleanup_kubernetes_jobs.alert_state_change", autospec=True
) as mock_alert_state_change:
mock_alert_state_change.__enter__ = mock.Mock(return_value=(mock.Mock(), None))
mock_alert_state_change.__exit__ = mock.Mock(return_value=None)
cleanup_unused_apps("soa_dir", kill_threshold=1, force=False)
assert mock_kube_client.deployments.delete_namespaced_deployment.call_count == 1
def test_cleanup_unused_apps_does_not_delete(
fake_deployment, fake_stateful_set, invalid_app
):
mock_kube_client = mock.MagicMock()
with mock.patch(
"paasta_tools.cleanup_kubernetes_jobs.KubeClient",
return_value=mock_kube_client,
autospec=True,
), mock.patch(
"paasta_tools.cleanup_kubernetes_jobs.list_namespaced_applications",
return_value=[DeploymentWrapper(fake_deployment)],
autospec=True,
), mock.patch(
"paasta_tools.cleanup_kubernetes_jobs.get_services_for_cluster",
return_value={("service", "instance-1"), ("service", "instance-2")},
autospec=True,
), mock.patch(
"paasta_tools.cleanup_kubernetes_jobs.alert_state_change", autospec=True
) as mock_alert_state_change:
mock_alert_state_change.__enter__ = mock.Mock(return_value=(mock.Mock(), None))
mock_alert_state_change.__exit__ = mock.Mock(return_value=None)
cleanup_unused_apps("soa_dir", kill_threshold=1, force=False)
assert mock_kube_client.deployments.delete_namespaced_deployment.call_count == 0
def test_cleanup_unused_apps_dont_kill_everything(
fake_deployment, fake_stateful_set, invalid_app
):
mock_kube_client = mock.MagicMock()
with mock.patch(
"paasta_tools.cleanup_kubernetes_jobs.KubeClient",
return_value=mock_kube_client,
autospec=True,
), mock.patch(
"paasta_tools.cleanup_kubernetes_jobs.list_namespaced_applications",
return_value=[DeploymentWrapper(fake_deployment)],
autospec=True,
), mock.patch(
"paasta_tools.cleanup_kubernetes_jobs.get_services_for_cluster",
return_value={},
autospec=True,
), mock.patch(
"paasta_tools.cleanup_kubernetes_jobs.alert_state_change", autospec=True
) as mock_alert_state_change:
mock_alert_state_change.__enter__ = mock.Mock(return_value=(mock.Mock(), None))
mock_alert_state_change.__exit__ = mock.Mock(return_value=None)
with raises(DontKillEverythingError):
cleanup_unused_apps("soa_dir", kill_threshold=0, force=False)
assert mock_kube_client.deployments.delete_namespaced_deployment.call_count == 0
def test_cleanup_unused_apps_force(fake_deployment, fake_stateful_set, invalid_app):
mock_kube_client = mock.MagicMock()
with mock.patch(
"paasta_tools.cleanup_kubernetes_jobs.KubeClient",
return_value=mock_kube_client,
autospec=True,
), mock.patch(
"paasta_tools.cleanup_kubernetes_jobs.list_namespaced_applications",
return_value=[DeploymentWrapper(fake_deployment)],
autospec=True,
), mock.patch(
"paasta_tools.cleanup_kubernetes_jobs.get_services_for_cluster",
return_value={},
autospec=True,
), mock.patch(
"paasta_tools.cleanup_kubernetes_jobs.alert_state_change", autospec=True
) as mock_alert_state_change:
mock_alert_state_change.__enter__ = mock.Mock(return_value=(mock.Mock(), None))
mock_alert_state_change.__exit__ = mock.Mock(return_value=None)
cleanup_unused_apps("soa_dir", kill_threshold=0, force=True)
assert mock_kube_client.deployments.delete_namespaced_deployment.call_count == 1
def test_cleanup_unused_apps_ignore_invalid_apps(
fake_deployment, fake_stateful_set, invalid_app
):
mock_kube_client = mock.MagicMock()
with mock.patch(
"paasta_tools.cleanup_kubernetes_jobs.KubeClient",
return_value=mock_kube_client,
autospec=True,
), mock.patch(
"paasta_tools.cleanup_kubernetes_jobs.get_services_for_cluster",
return_value={},
autospec=True,
), mock.patch(
"paasta_tools.cleanup_kubernetes_jobs.alert_state_change", autospec=True
) as mock_alert_state_change:
mock_alert_state_change.__enter__ = mock.Mock(return_value=(mock.Mock(), None))
mock_alert_state_change.__exit__ = mock.Mock(return_value=None)
mock_kube_client.deployments.list_namespaced_deployment.return_value = mock.MagicMock(
items=[invalid_app]
)
cleanup_unused_apps("soa_dir", kill_threshold=0, force=True)
assert mock_kube_client.deployments.delete_namespaced_deployment.call_count == 0
|
import re
from io import StringIO
from html.entities import name2codepoint
from html.parser import HTMLParser
from urllib.error import URLError
import pygogo as gogo
from riko.utils import fetch
from meza.fntools import Objectify, remove_keys, listize
from meza.process import merge
from meza.compat import decode
from ijson import items
logger = gogo.Gogo(__name__, verbose=False, monolog=True).logger
try:
from lxml import etree, html
except ImportError:
try:
import xml.etree.cElementTree as etree
except ImportError:
logger.debug('xml parser: ElementTree')
import xml.etree.ElementTree as etree
from xml.etree.ElementTree import ElementTree
else:
logger.debug('xml parser: cElementTree')
from xml.etree.cElementTree import ElementTree
import html5lib as html
html5parser = None
else:
logger.debug('xml parser: lxml')
from lxml.html import html5parser
try:
import speedparser3 as speedparser
except ImportError:
import feedparser
logger.debug('rss parser: feedparser')
speedparser = None
else:
logger.debug('rss parser: speedparser')
rssparser = speedparser or feedparser
NAMESPACES = {
'owl': 'http://www.w3.org/2002/07/owl#',
'xhtml': 'http://www.w3.org/1999/xhtml'}
ESCAPE = {'&': '&', '<': '<', '>': '>', '"': '"', "'": '''}
SKIP_SWITCH = {
'contains': lambda text, value: text.lower() in value.lower(),
'intersection': lambda text, value: set(text).intersection(value),
're.search': lambda text, value: re.search(text, value),
}
class LinkParser(HTMLParser):
def reset(self):
HTMLParser.reset(self)
self.data = StringIO()
def handle_data(self, data):
self.data.write('%s\n' % decode(data))
def get_text(html, convert_charrefs=False):
try:
parser = LinkParser(convert_charrefs=convert_charrefs)
except TypeError:
parser = LinkParser()
try:
parser.feed(html)
except TypeError:
parser.feed(decode(html))
return parser.data.getvalue()
def parse_rss(url=None, **kwargs):
try:
f = fetch(decode(url), **kwargs)
except (ValueError, URLError):
parsed = rssparser.parse(url)
else:
content = f.read() if speedparser else f
try:
parsed = rssparser.parse(content)
finally:
f.close()
return parsed
def xpath(tree, path='/', pos=0, namespace=None):
try:
elements = tree.xpath(path)
except AttributeError:
stripped = path.lstrip('/')
tags = stripped.split('/') if stripped else []
try:
# TODO: consider replacing with twisted.words.xish.xpath
elements = tree.getElementsByTagName(tags[pos]) if tags else [tree]
except AttributeError:
element_name = str(tree).split(' ')[1]
if not namespace and {'{', '}'}.issubset(element_name):
start, end = element_name.find('{') + 1, element_name.find('}')
ns = element_name[start:end]
ns_iter = (name for name in NAMESPACES if name in ns)
namespace = next(ns_iter, namespace)
prefix = ('/%s:' % namespace) if namespace else '/'
match = './%s%s' % (prefix, prefix.join(tags[1:]))
elements = tree.findall(match, NAMESPACES)
except IndexError:
elements = [tree]
else:
for element in elements:
return xpath(element, path, pos + 1)
return iter(elements)
def xml2etree(f, xml=True, html5=False):
if xml:
element_tree = etree.parse(f)
elif html5 and html5parser:
element_tree = html5parser.parse(f)
elif html5parser:
element_tree = html.parse(f)
else:
# html5lib's parser returns an Element, so we must convert it into an
# ElementTree
element_tree = ElementTree(html.parse(f))
return element_tree
def _make_content(i, value=None, tag='content', append=True, strip=False):
content = i.get(tag)
try:
value = value.strip() if value and strip else value
except AttributeError:
pass
if content and value and append:
content = listize(content)
content.append(value)
elif content and value:
content = ''.join([content, value])
elif value:
content = value
return {tag: content} if content else {}
def etree2dict(element):
"""Convert an element tree into a dict imitating how Yahoo Pipes does it.
"""
i = dict(element.items())
i.update(_make_content(i, element.text, strip=True))
for child in element:
tag = child.tag
value = etree2dict(child)
i.update(_make_content(i, value, tag))
if element.text and not set(i).difference(['content']):
# element is leaf node and doesn't have attributes
i = i.get('content')
return i
def any2dict(f, ext='xml', html5=False, path=None):
path = path or ''
if ext in {'xml', 'html'}:
xml = ext == 'xml'
root = xml2etree(f, xml, html5).getroot()
replaced = '/'.join(path.split('.'))
tree = next(xpath(root, replaced)) if replaced else root
content = etree2dict(tree)
elif ext == 'json':
content = next(items(f, path))
else:
raise TypeError("Invalid file type: '%s'" % ext)
return content
def get_value(item, conf=None, force=False, default=None, **kwargs):
item = item or {}
try:
value = item.get(conf['subkey'], **kwargs)
except KeyError:
if conf and not (hasattr(conf, 'delete') or force):
raise TypeError('conf must be of type DotDict')
elif force:
value = conf
elif conf:
value = conf.get(**kwargs)
else:
value = default
except (TypeError, AttributeError):
# conf is already set to a value so use it or the default
value = default if conf is None else conf
except (ValueError):
# error converting subkey value with OPS['func'] so use the default
value = default
return value
def parse_conf(item, **kwargs):
kw = Objectify(kwargs, defaults={}, conf={})
# TODO: fix so .items() returns a DotDict instance
# parsed = {k: get_value(item, v) for k, v in kw.conf.items()}
sentinel = {'subkey', 'value', 'terminal'}
not_dict = not hasattr(kw.conf, 'keys')
if not_dict or (len(kw.conf) == 1 and sentinel.intersection(kw.conf)):
objectified = get_value(item, **kwargs)
else:
no_conf = remove_keys(kwargs, 'conf')
parsed = {k: get_value(item, kw.conf[k], **no_conf) for k in kw.conf}
result = merge([kw.defaults, parsed])
objectified = Objectify(result) if kw.objectify else result
return objectified
def get_skip(item, skip_if=None, **kwargs):
""" Determine whether or not to skip an item
Args:
item (dict): The entry to process
skip_if (func or Iter[dict]): The skipping criteria
Returns:
bool: whether or not to skip
Examples:
>>> item = {'content': 'Some content'}
>>> get_skip(item, lambda x: x['content'] == 'Some content')
True
>>> get_skip(item)
False
>>> get_skip(item, {'field': 'content'})
False
>>> bool(get_skip(item, {'field': 'content', 'include': True}))
True
>>> get_skip(item, {'field': 'content', 'text': 'some'})
True
>>> get_skip(item, {'field': 'content', 'text': 'some', 'include': True})
False
>>> get_skip(item, {'field': 'content', 'text': 'other'})
False
>>> get_skip(item, {'field': 'content', 'text': 'other', 'include': True})
True
"""
item = item or {}
skips = listize(skip_if)
for _skip in skips:
if callable(_skip):
skip = _skip(item)
elif _skip:
value = item.get(_skip['field'], '')
text = _skip.get('text')
op = _skip.get('op', 'contains')
match = not SKIP_SWITCH[op](text, value) if text else value
skip = match if _skip.get('include') else not match
else:
skip = False
if skip:
break
return skip
def get_field(item, field=None, **kwargs):
return item.get(field, **kwargs) if field else item
def text2entity(text):
"""Convert HTML/XML special chars to entity references
"""
return ESCAPE.get(text, text)
def entity2text(entitydef):
"""Convert an HTML entity reference into unicode.
http://stackoverflow.com/a/58125/408556
"""
if entitydef.startswith('&#x'):
cp = int(entitydef[3:-1], 16)
elif entitydef.startswith('&#'):
cp = int(entitydef[2:-1])
elif entitydef.startswith('&'):
cp = name2codepoint[entitydef[1:-1]]
else:
logger.debug(entitydef)
cp = None
return chr(cp) if cp else entitydef
|
import os
from typing import Optional
from django.urls import reverse
from django.utils.functional import cached_property
from weblate.accounts.avatar import get_user_display
from weblate.logger import LOGGER
class URLMixin:
"""Mixin for models providing standard shortcut API for few standard URLs."""
_reverse_url_name: Optional[str] = None
def get_reverse_url_kwargs(self):
"""Return kwargs for URL reversing."""
raise NotImplementedError()
def reverse_url(self, name=None):
"""Generic reverser for URL."""
if name is None:
urlname = self._reverse_url_name
else:
urlname = f"{name}_{self._reverse_url_name}"
return reverse(urlname, kwargs=self.get_reverse_url_kwargs())
def get_absolute_url(self):
return self.reverse_url()
def get_commit_url(self):
return self.reverse_url("commit")
def get_update_url(self):
return self.reverse_url("update")
def get_push_url(self):
return self.reverse_url("push")
def get_reset_url(self):
return self.reverse_url("reset")
def get_cleanup_url(self):
return self.reverse_url("cleanup")
def get_lock_url(self):
return self.reverse_url("lock")
def get_unlock_url(self):
return self.reverse_url("unlock")
def get_remove_url(self):
return self.reverse_url("remove")
class LoggerMixin:
"""Mixin for models with logging."""
@cached_property
def full_slug(self):
return self.slug
def log_hook(self, level, msg, *args):
return
def log_debug(self, msg, *args):
self.log_hook("DEBUG", msg, *args)
return LOGGER.debug(": ".join((self.full_slug, msg)), *args)
def log_info(self, msg, *args):
self.log_hook("INFO", msg, *args)
return LOGGER.info(": ".join((self.full_slug, msg)), *args)
def log_warning(self, msg, *args):
self.log_hook("WARNING", msg, *args)
return LOGGER.warning(": ".join((self.full_slug, msg)), *args)
def log_error(self, msg, *args):
self.log_hook("ERROR", msg, *args)
return LOGGER.error(": ".join((self.full_slug, msg)), *args)
class PathMixin(LoggerMixin):
"""Mixin for models with path manipulations."""
def _get_path(self):
"""Actual calculation of path."""
raise NotImplementedError()
@cached_property
def full_path(self):
return self._get_path()
def invalidate_path_cache(self):
if "full_path" in self.__dict__:
del self.__dict__["full_path"]
def check_rename(self, old, validate=False):
"""Detect slug changes and possibly renames underlaying directory."""
# No moving for links
if getattr(self, "is_repo_link", False) or getattr(old, "is_repo_link", False):
return
old_path = old.full_path
# Invalidate path cache (otherwise we would still get old path)
self.invalidate_path_cache()
new_path = self.full_path
if old_path != new_path:
if validate:
# Patch using old path for validation
# the actual rename happens only on save
self.__dict__["full_path"] = old_path
return
self.log_info("path changed from %s to %s", old_path, new_path)
if os.path.exists(old_path) and not os.path.exists(new_path):
self.log_info('renaming "%s" to "%s"', old_path, new_path)
os.rename(old_path, new_path)
def create_path(self):
"""Create filesystem directory for storing data."""
path = self.full_path
if not os.path.exists(path):
os.makedirs(path)
class UserDisplayMixin:
def get_user_display(self, icon: bool = True):
return get_user_display(self.user, icon, link=True)
def get_user_text_display(self):
return get_user_display(self.user, icon=False, link=True)
class CacheKeyMixin:
@cached_property
def cache_key(self):
return f"{self.__class__.__name__}-{self.pk}"
|
from pyps4_2ndscreen.credential import get_ddp_message
from homeassistant.components import ps4
from homeassistant.components.media_player.const import (
ATTR_INPUT_SOURCE,
ATTR_INPUT_SOURCE_LIST,
ATTR_MEDIA_CONTENT_ID,
ATTR_MEDIA_CONTENT_TYPE,
ATTR_MEDIA_TITLE,
MEDIA_TYPE_GAME,
)
from homeassistant.components.ps4.const import (
ATTR_MEDIA_IMAGE_URL,
CONFIG_ENTRY_VERSION as VERSION,
DEFAULT_REGION,
DOMAIN,
GAMES_FILE,
PS4_DATA,
)
from homeassistant.const import (
ATTR_COMMAND,
ATTR_ENTITY_ID,
ATTR_LOCKED,
CONF_HOST,
CONF_NAME,
CONF_REGION,
CONF_TOKEN,
STATE_IDLE,
STATE_PLAYING,
STATE_STANDBY,
STATE_UNKNOWN,
)
from homeassistant.setup import async_setup_component
from tests.async_mock import MagicMock, patch
from tests.common import MockConfigEntry, mock_device_registry, mock_registry
MOCK_CREDS = "123412341234abcd12341234abcd12341234abcd12341234abcd12341234abcd"
MOCK_NAME = "ha_ps4_name"
MOCK_REGION = DEFAULT_REGION
MOCK_GAMES_FILE = GAMES_FILE
MOCK_HOST = "192.168.0.2"
MOCK_HOST_NAME = "Fake PS4"
MOCK_HOST_ID = "A0000A0AA000"
MOCK_HOST_VERSION = "09879011"
MOCK_HOST_TYPE = "PS4"
MOCK_STATUS_REST = "Server Standby"
MOCK_STATUS_ON = "Ok"
MOCK_STANDBY_CODE = 620
MOCK_ON_CODE = 200
MOCK_TCP_PORT = 997
MOCK_DDP_PORT = 987
MOCK_DDP_VERSION = "00020020"
MOCK_RANDOM_PORT = "1234"
MOCK_TITLE_ID = "CUSA00000"
MOCK_TITLE_NAME = "Random Game"
MOCK_TITLE_TYPE = MEDIA_TYPE_GAME
MOCK_TITLE_ART_URL = "https://somecoverurl"
MOCK_GAMES_DATA = {
ATTR_LOCKED: False,
ATTR_MEDIA_CONTENT_TYPE: MEDIA_TYPE_GAME,
ATTR_MEDIA_IMAGE_URL: MOCK_TITLE_ART_URL,
ATTR_MEDIA_TITLE: MOCK_TITLE_NAME,
}
MOCK_GAMES_DATA_LOCKED = {
ATTR_LOCKED: True,
ATTR_MEDIA_CONTENT_TYPE: MEDIA_TYPE_GAME,
ATTR_MEDIA_IMAGE_URL: MOCK_TITLE_ART_URL,
ATTR_MEDIA_TITLE: MOCK_TITLE_NAME,
}
MOCK_STATUS_PLAYING = {
"host-type": MOCK_HOST_TYPE,
"host-ip": MOCK_HOST,
"host-request-port": MOCK_TCP_PORT,
"host-id": MOCK_HOST_ID,
"host-name": MOCK_HOST_NAME,
"running-app-titleid": MOCK_TITLE_ID,
"running-app-name": MOCK_TITLE_NAME,
"status": MOCK_STATUS_ON,
"status_code": MOCK_ON_CODE,
"device-discovery-protocol-version": MOCK_DDP_VERSION,
"system-version": MOCK_HOST_VERSION,
}
MOCK_STATUS_IDLE = {
"host-type": MOCK_HOST_TYPE,
"host-ip": MOCK_HOST,
"host-request-port": MOCK_TCP_PORT,
"host-id": MOCK_HOST_ID,
"host-name": MOCK_HOST_NAME,
"status": MOCK_STATUS_ON,
"status_code": MOCK_ON_CODE,
"device-discovery-protocol-version": MOCK_DDP_VERSION,
"system-version": MOCK_HOST_VERSION,
}
MOCK_STATUS_STANDBY = {
"host-type": MOCK_HOST_TYPE,
"host-ip": MOCK_HOST,
"host-request-port": MOCK_TCP_PORT,
"host-id": MOCK_HOST_ID,
"host-name": MOCK_HOST_NAME,
"status": MOCK_STATUS_REST,
"status_code": MOCK_STANDBY_CODE,
"device-discovery-protocol-version": MOCK_DDP_VERSION,
"system-version": MOCK_HOST_VERSION,
}
MOCK_DEVICE = {CONF_HOST: MOCK_HOST, CONF_NAME: MOCK_NAME, CONF_REGION: MOCK_REGION}
MOCK_ENTRY_ID = "SomeID"
MOCK_DEVICE_MODEL = "PlayStation 4"
MOCK_DATA = {CONF_TOKEN: MOCK_CREDS, "devices": [MOCK_DEVICE]}
MOCK_CONFIG = MockConfigEntry(domain=DOMAIN, data=MOCK_DATA, entry_id=MOCK_ENTRY_ID)
MOCK_LOAD = "homeassistant.components.ps4.media_player.load_games"
async def setup_mock_component(hass, entry=None):
"""Set up Mock Media Player."""
if entry is None:
mock_entry = MockConfigEntry(
domain=ps4.DOMAIN, data=MOCK_DATA, version=VERSION, entry_id=MOCK_ENTRY_ID
)
else:
mock_entry = entry
mock_entry.add_to_hass(hass)
await async_setup_component(hass, DOMAIN, {DOMAIN: {}})
await hass.async_block_till_done()
mock_entities = hass.states.async_entity_ids()
mock_entity_id = mock_entities[0]
return mock_entity_id
async def mock_ddp_response(hass, mock_status_data):
"""Mock raw UDP response from device."""
mock_protocol = hass.data[PS4_DATA].protocol
mock_code = mock_status_data.get("status_code")
mock_status = mock_status_data.get("status")
mock_status_header = f"{mock_code} {mock_status}"
mock_response = get_ddp_message(mock_status_header, mock_status_data).encode()
mock_protocol.datagram_received(mock_response, (MOCK_HOST, MOCK_RANDOM_PORT))
await hass.async_block_till_done()
async def test_media_player_is_setup_correctly_with_entry(hass):
"""Test entity is setup correctly with entry correctly."""
mock_entity_id = await setup_mock_component(hass)
mock_state = hass.states.get(mock_entity_id).state
# Assert status updated callback is added to protocol.
assert len(hass.data[PS4_DATA].protocol.callbacks) == 1
# Test that entity is added to hass.
assert hass.data[PS4_DATA].protocol is not None
assert mock_entity_id == f"media_player.{MOCK_NAME}"
assert mock_state == STATE_UNKNOWN
async def test_state_standby_is_set(hass):
"""Test that state is set to standby."""
mock_entity_id = await setup_mock_component(hass)
await mock_ddp_response(hass, MOCK_STATUS_STANDBY)
assert hass.states.get(mock_entity_id).state == STATE_STANDBY
async def test_state_playing_is_set(hass):
"""Test that state is set to playing."""
mock_entity_id = await setup_mock_component(hass)
mock_func = "{}{}".format(
"homeassistant.components.ps4.media_player.",
"pyps4.Ps4Async.async_get_ps_store_data",
)
with patch(mock_func, return_value=None):
await mock_ddp_response(hass, MOCK_STATUS_PLAYING)
assert hass.states.get(mock_entity_id).state == STATE_PLAYING
async def test_state_idle_is_set(hass):
"""Test that state is set to idle."""
mock_entity_id = await setup_mock_component(hass)
await mock_ddp_response(hass, MOCK_STATUS_IDLE)
assert hass.states.get(mock_entity_id).state == STATE_IDLE
async def test_state_none_is_set(hass):
"""Test that state is set to None."""
mock_entity_id = await setup_mock_component(hass)
assert hass.states.get(mock_entity_id).state == STATE_UNKNOWN
async def test_media_attributes_are_fetched(hass):
"""Test that media attributes are fetched."""
mock_entity_id = await setup_mock_component(hass)
mock_func = "{}{}".format(
"homeassistant.components.ps4.media_player.",
"pyps4.Ps4Async.async_get_ps_store_data",
)
# Mock result from fetching data.
mock_result = MagicMock()
mock_result.name = MOCK_TITLE_NAME
mock_result.cover_art = MOCK_TITLE_ART_URL
mock_result.game_type = "game"
with patch(mock_func, return_value=mock_result) as mock_fetch:
await mock_ddp_response(hass, MOCK_STATUS_PLAYING)
mock_state = hass.states.get(mock_entity_id)
mock_attrs = dict(mock_state.attributes)
assert len(mock_fetch.mock_calls) == 1
assert mock_state.state == STATE_PLAYING
assert len(mock_attrs.get(ATTR_INPUT_SOURCE_LIST)) == 1
assert mock_attrs.get(ATTR_INPUT_SOURCE_LIST)[0] == MOCK_TITLE_NAME
assert mock_attrs.get(ATTR_MEDIA_CONTENT_ID) == MOCK_TITLE_ID
assert mock_attrs.get(ATTR_MEDIA_TITLE) == MOCK_TITLE_NAME
assert mock_attrs.get(ATTR_MEDIA_CONTENT_TYPE) == MOCK_TITLE_TYPE
async def test_media_attributes_are_loaded(hass, patch_load_json):
"""Test that media attributes are loaded."""
mock_entity_id = await setup_mock_component(hass)
patch_load_json.return_value = {MOCK_TITLE_ID: MOCK_GAMES_DATA_LOCKED}
with patch(
"homeassistant.components.ps4.media_player."
"pyps4.Ps4Async.async_get_ps_store_data",
return_value=None,
) as mock_fetch:
await mock_ddp_response(hass, MOCK_STATUS_PLAYING)
mock_state = hass.states.get(mock_entity_id)
mock_attrs = dict(mock_state.attributes)
# Ensure that data is not fetched.
assert not mock_fetch.mock_calls
assert mock_state.state == STATE_PLAYING
assert len(mock_attrs.get(ATTR_INPUT_SOURCE_LIST)) == 1
assert mock_attrs.get(ATTR_INPUT_SOURCE_LIST)[0] == MOCK_TITLE_NAME
assert mock_attrs.get(ATTR_MEDIA_CONTENT_ID) == MOCK_TITLE_ID
assert mock_attrs.get(ATTR_MEDIA_TITLE) == MOCK_TITLE_NAME
assert mock_attrs.get(ATTR_MEDIA_CONTENT_TYPE) == MOCK_TITLE_TYPE
async def test_device_info_is_set_from_status_correctly(hass):
"""Test that device info is set correctly from status update."""
mock_d_registry = mock_device_registry(hass)
with patch("pyps4_2ndscreen.ps4.get_status", return_value=MOCK_STATUS_STANDBY):
mock_entity_id = await setup_mock_component(hass)
await hass.async_block_till_done()
# Reformat mock status-sw_version for assertion.
mock_version = MOCK_STATUS_STANDBY["system-version"]
mock_version = mock_version[1:4]
mock_version = "{}.{}".format(mock_version[0], mock_version[1:])
mock_state = hass.states.get(mock_entity_id).state
mock_d_entries = mock_d_registry.devices
mock_entry = mock_d_registry.async_get_device(
identifiers={(DOMAIN, MOCK_HOST_ID)}, connections={()}
)
assert mock_state == STATE_STANDBY
assert len(mock_d_entries) == 1
assert mock_entry.name == MOCK_HOST_NAME
assert mock_entry.model == MOCK_DEVICE_MODEL
assert mock_entry.sw_version == mock_version
assert mock_entry.identifiers == {(DOMAIN, MOCK_HOST_ID)}
async def test_device_info_is_assummed(hass):
"""Test that device info is assumed if device is unavailable."""
# Create a device registry entry with device info.
mock_d_registry = mock_device_registry(hass)
mock_d_registry.async_get_or_create(
config_entry_id=MOCK_ENTRY_ID,
name=MOCK_HOST_NAME,
model=MOCK_DEVICE_MODEL,
identifiers={(DOMAIN, MOCK_HOST_ID)},
sw_version=MOCK_HOST_VERSION,
)
mock_d_entries = mock_d_registry.devices
assert len(mock_d_entries) == 1
# Create a entity_registry entry which is using identifiers from device.
mock_unique_id = ps4.format_unique_id(MOCK_CREDS, MOCK_HOST_ID)
mock_e_registry = mock_registry(hass)
mock_e_registry.async_get_or_create(
"media_player", DOMAIN, mock_unique_id, config_entry=MOCK_CONFIG
)
mock_entity_id = mock_e_registry.async_get_entity_id(
"media_player", DOMAIN, mock_unique_id
)
mock_entity_id = await setup_mock_component(hass)
mock_state = hass.states.get(mock_entity_id).state
# Ensure that state is not set.
assert mock_state == STATE_UNKNOWN
# Ensure that entity_id is the same as the existing.
mock_entities = hass.states.async_entity_ids()
assert len(mock_entities) == 1
assert mock_entities[0] == mock_entity_id
async def test_device_info_assummed_works(hass):
"""Reverse test that device info assumption works."""
mock_d_registry = mock_device_registry(hass)
mock_entity_id = await setup_mock_component(hass)
mock_state = hass.states.get(mock_entity_id).state
mock_d_entries = mock_d_registry.devices
# Ensure that state is not set.
assert mock_state == STATE_UNKNOWN
# With no state/status and no existing entries, registry should be empty.
assert not mock_d_entries
async def test_turn_on(hass):
"""Test that turn on service calls function."""
mock_entity_id = await setup_mock_component(hass)
mock_func = "{}{}".format(
"homeassistant.components.ps4.media_player.", "pyps4.Ps4Async.wakeup"
)
with patch(mock_func) as mock_call:
await hass.services.async_call(
"media_player", "turn_on", {ATTR_ENTITY_ID: mock_entity_id}
)
await hass.async_block_till_done()
assert len(mock_call.mock_calls) == 1
async def test_turn_off(hass):
"""Test that turn off service calls function."""
mock_entity_id = await setup_mock_component(hass)
mock_func = "{}{}".format(
"homeassistant.components.ps4.media_player.", "pyps4.Ps4Async.standby"
)
with patch(mock_func) as mock_call:
await hass.services.async_call(
"media_player", "turn_off", {ATTR_ENTITY_ID: mock_entity_id}
)
await hass.async_block_till_done()
assert len(mock_call.mock_calls) == 1
async def test_toggle(hass):
"""Test that toggle service calls function."""
mock_entity_id = await setup_mock_component(hass)
mock_func = "{}{}".format(
"homeassistant.components.ps4.media_player.", "pyps4.Ps4Async.toggle"
)
with patch(mock_func) as mock_call:
await hass.services.async_call(
"media_player", "toggle", {ATTR_ENTITY_ID: mock_entity_id}
)
await hass.async_block_till_done()
assert len(mock_call.mock_calls) == 1
async def test_media_pause(hass):
"""Test that media pause service calls function."""
mock_entity_id = await setup_mock_component(hass)
mock_func = "{}{}".format(
"homeassistant.components.ps4.media_player.", "pyps4.Ps4Async.remote_control"
)
with patch(mock_func) as mock_call:
await hass.services.async_call(
"media_player", "media_pause", {ATTR_ENTITY_ID: mock_entity_id}
)
await hass.async_block_till_done()
assert len(mock_call.mock_calls) == 1
async def test_media_stop(hass):
"""Test that media stop service calls function."""
mock_entity_id = await setup_mock_component(hass)
mock_func = "{}{}".format(
"homeassistant.components.ps4.media_player.", "pyps4.Ps4Async.remote_control"
)
with patch(mock_func) as mock_call:
await hass.services.async_call(
"media_player", "media_stop", {ATTR_ENTITY_ID: mock_entity_id}
)
await hass.async_block_till_done()
assert len(mock_call.mock_calls) == 1
async def test_select_source(hass, patch_load_json):
"""Test that select source service calls function with title."""
patch_load_json.return_value = {MOCK_TITLE_ID: MOCK_GAMES_DATA}
with patch("pyps4_2ndscreen.ps4.get_status", return_value=MOCK_STATUS_IDLE):
mock_entity_id = await setup_mock_component(hass)
with patch("pyps4_2ndscreen.ps4.Ps4Async.start_title") as mock_call, patch(
"homeassistant.components.ps4.media_player.PS4Device.async_update"
):
# Test with title name.
await hass.services.async_call(
"media_player",
"select_source",
{ATTR_ENTITY_ID: mock_entity_id, ATTR_INPUT_SOURCE: MOCK_TITLE_NAME},
blocking=True,
)
assert len(mock_call.mock_calls) == 1
async def test_select_source_caps(hass, patch_load_json):
"""Test that select source service calls function with upper case title."""
patch_load_json.return_value = {MOCK_TITLE_ID: MOCK_GAMES_DATA}
with patch("pyps4_2ndscreen.ps4.get_status", return_value=MOCK_STATUS_IDLE):
mock_entity_id = await setup_mock_component(hass)
with patch("pyps4_2ndscreen.ps4.Ps4Async.start_title") as mock_call, patch(
"homeassistant.components.ps4.media_player.PS4Device.async_update"
):
# Test with title name in caps.
await hass.services.async_call(
"media_player",
"select_source",
{
ATTR_ENTITY_ID: mock_entity_id,
ATTR_INPUT_SOURCE: MOCK_TITLE_NAME.upper(),
},
blocking=True,
)
assert len(mock_call.mock_calls) == 1
async def test_select_source_id(hass, patch_load_json):
"""Test that select source service calls function with Title ID."""
patch_load_json.return_value = {MOCK_TITLE_ID: MOCK_GAMES_DATA}
with patch("pyps4_2ndscreen.ps4.get_status", return_value=MOCK_STATUS_IDLE):
mock_entity_id = await setup_mock_component(hass)
with patch("pyps4_2ndscreen.ps4.Ps4Async.start_title") as mock_call, patch(
"homeassistant.components.ps4.media_player.PS4Device.async_update"
):
# Test with title ID.
await hass.services.async_call(
"media_player",
"select_source",
{ATTR_ENTITY_ID: mock_entity_id, ATTR_INPUT_SOURCE: MOCK_TITLE_ID},
blocking=True,
)
assert len(mock_call.mock_calls) == 1
async def test_ps4_send_command(hass):
"""Test that ps4 send command service calls function."""
mock_entity_id = await setup_mock_component(hass)
with patch("pyps4_2ndscreen.ps4.Ps4Async.remote_control") as mock_call:
await hass.services.async_call(
DOMAIN,
"send_command",
{ATTR_ENTITY_ID: mock_entity_id, ATTR_COMMAND: "ps"},
blocking=True,
)
assert len(mock_call.mock_calls) == 1
async def test_entry_is_unloaded(hass):
"""Test that entry is unloaded."""
mock_entry = MockConfigEntry(
domain=ps4.DOMAIN, data=MOCK_DATA, version=VERSION, entry_id=MOCK_ENTRY_ID
)
mock_entity_id = await setup_mock_component(hass, mock_entry)
mock_unload = await ps4.async_unload_entry(hass, mock_entry)
assert mock_unload is True
assert not hass.data[PS4_DATA].devices
# Test that callback listener for entity is removed from protocol.
assert not hass.data[PS4_DATA].protocol.callbacks
assert hass.states.get(mock_entity_id) is None
|
import logging
import requests.exceptions
import upcloud_api
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import CONF_PASSWORD, CONF_SCAN_INTERVAL, CONF_USERNAME
from homeassistant.core import callback
# pylint: disable=unused-import # for DOMAIN https://github.com/PyCQA/pylint/issues/3202
from .const import DEFAULT_SCAN_INTERVAL, DOMAIN
_LOGGER = logging.getLogger(__name__)
class UpCloudConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""UpCloud config flow."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_POLL
username: str
password: str
async def async_step_user(self, user_input=None):
"""Handle user initiated flow."""
if user_input is None:
return self._async_show_form(step_id="user")
await self.async_set_unique_id(user_input[CONF_USERNAME])
manager = upcloud_api.CloudManager(
user_input[CONF_USERNAME], user_input[CONF_PASSWORD]
)
errors = {}
try:
await self.hass.async_add_executor_job(manager.authenticate)
except upcloud_api.UpCloudAPIError:
errors["base"] = "invalid_auth"
_LOGGER.debug("invalid_auth", exc_info=True)
except requests.exceptions.RequestException:
errors["base"] = "cannot_connect"
_LOGGER.debug("cannot_connect", exc_info=True)
if errors:
return self._async_show_form(
step_id="user", user_input=user_input, errors=errors
)
return self.async_create_entry(title=user_input[CONF_USERNAME], data=user_input)
async def async_step_import(self, user_input=None):
"""Handle import initiated flow."""
await self.async_set_unique_id(user_input[CONF_USERNAME])
self._abort_if_unique_id_configured()
return await self.async_step_user(user_input=user_input)
@callback
def _async_show_form(self, step_id, user_input=None, errors=None):
"""Show our form."""
if user_input is None:
user_input = {}
return self.async_show_form(
step_id=step_id,
data_schema=vol.Schema(
{
vol.Required(
CONF_USERNAME, default=user_input.get(CONF_USERNAME, "")
): str,
vol.Required(
CONF_PASSWORD, default=user_input.get(CONF_PASSWORD, "")
): str,
}
),
errors=errors or {},
)
@staticmethod
@callback
def async_get_options_flow(config_entry):
"""Get options flow."""
return UpCloudOptionsFlow(config_entry)
class UpCloudOptionsFlow(config_entries.OptionsFlow):
"""UpCloud options flow."""
def __init__(self, config_entry: config_entries.ConfigEntry):
"""Initialize options flow."""
self.config_entry = config_entry
async def async_step_init(self, user_input=None):
"""Handle options flow."""
if user_input is not None:
return self.async_create_entry(title="", data=user_input)
data_schema = vol.Schema(
{
vol.Optional(
CONF_SCAN_INTERVAL,
default=self.config_entry.options.get(CONF_SCAN_INTERVAL)
or DEFAULT_SCAN_INTERVAL.seconds,
): vol.All(vol.Coerce(int), vol.Range(min=30)),
}
)
return self.async_show_form(step_id="init", data_schema=data_schema)
|
from homeassistant.components.lock import LockEntity
from homeassistant.const import STATE_LOCKED, STATE_UNLOCKED
from homeassistant.core import callback
from homeassistant.helpers.event import async_call_later
from . import XiaomiDevice
from .const import DOMAIN, GATEWAYS_KEY
FINGER_KEY = "fing_verified"
PASSWORD_KEY = "psw_verified"
CARD_KEY = "card_verified"
VERIFIED_WRONG_KEY = "verified_wrong"
ATTR_VERIFIED_WRONG_TIMES = "verified_wrong_times"
UNLOCK_MAINTAIN_TIME = 5
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Perform the setup for Xiaomi devices."""
entities = []
gateway = hass.data[DOMAIN][GATEWAYS_KEY][config_entry.entry_id]
for device in gateway.devices["lock"]:
model = device["model"]
if model == "lock.aq1":
entities.append(XiaomiAqaraLock(device, "Lock", gateway, config_entry))
async_add_entities(entities)
class XiaomiAqaraLock(LockEntity, XiaomiDevice):
"""Representation of a XiaomiAqaraLock."""
def __init__(self, device, name, xiaomi_hub, config_entry):
"""Initialize the XiaomiAqaraLock."""
self._changed_by = 0
self._verified_wrong_times = 0
super().__init__(device, name, xiaomi_hub, config_entry)
@property
def is_locked(self) -> bool:
"""Return true if lock is locked."""
if self._state is not None:
return self._state == STATE_LOCKED
@property
def changed_by(self) -> int:
"""Last change triggered by."""
return self._changed_by
@property
def device_state_attributes(self) -> dict:
"""Return the state attributes."""
attributes = {ATTR_VERIFIED_WRONG_TIMES: self._verified_wrong_times}
return attributes
@callback
def clear_unlock_state(self, _):
"""Clear unlock state automatically."""
self._state = STATE_LOCKED
self.async_write_ha_state()
def parse_data(self, data, raw_data):
"""Parse data sent by gateway."""
value = data.get(VERIFIED_WRONG_KEY)
if value is not None:
self._verified_wrong_times = int(value)
return True
for key in (FINGER_KEY, PASSWORD_KEY, CARD_KEY):
value = data.get(key)
if value is not None:
self._changed_by = int(value)
self._verified_wrong_times = 0
self._state = STATE_UNLOCKED
async_call_later(
self.hass, UNLOCK_MAINTAIN_TIME, self.clear_unlock_state
)
return True
return False
|
import unittest
import uiautomator
import os
from mock import patch
class TestMisc(unittest.TestCase):
def test_proxy(self):
self.assertTrue('no_proxy' in os.environ)
self.assertTrue('localhost' in os.environ.get('no_proxy', ''))
def test_load(self):
try:
from imp import reload
except:
pass
reload(uiautomator)
self.assertIsNotNone(uiautomator.device)
self.assertIsNotNone(uiautomator.rect)
self.assertIsNotNone(uiautomator.point)
def test_rect(self):
import random
for i in range(10):
top, left = random.randint(0, 100), random.randint(0, 100)
bottom, right = random.randint(101, 1024), random.randint(101, 720)
self.assertEqual(uiautomator.rect(top, left, bottom, right), {"top": top, "left": left, "bottom": bottom, "right": right})
def test_point(self):
import random
for i in range(10):
x, y = random.randint(0, 1024), random.randint(0, 720)
self.assertEqual(uiautomator.point(x, y), {"x": x, "y": y})
def test_next_port(self):
with patch('socket.socket') as socket:
socket.return_value.connect_ex.side_effect = [0, 0, 1]
uiautomator._init_local_port = 9007
self.assertEqual(uiautomator.next_local_port(), 9010)
with patch('socket.socket') as socket:
socket.return_value.connect_ex.return_value = 1
uiautomator._init_local_port = 32764
self.assertEqual(uiautomator.next_local_port(), 9008)
|
from flexx import event
class Basic(event.Component):
@event.reaction('!foo')
def on_foo(self, *events):
print('foo reaction called with %i events' % len(events))
@event.reaction('!bar')
def on_bar(self, *events):
print('bar reaction called with %i events' % len(events))
b = Basic()
# Emit dummy events
b.emit('foo', {})
b.emit('foo', {})
b.emit('bar', {})
b.emit('spam', {}) # we can emit this, but nobody's listening
# Handle events
event.loop.iter()
|
import fnmatch
import os, sys
from trashcli.trash import TrashDir, parse_path, ParseError
from trashcli.trash import TrashDirs
from trashcli.trash import TopTrashDirRules
from trashcli.empty import CleanableTrashcan
from trashcli.fs import FileSystemReader
from trashcli.fs import FileRemover
class RmCmd:
def __init__(self,
environ,
getuid,
list_volumes,
stderr,
file_reader):
self.environ = environ
self.getuid = getuid
self.list_volumes = list_volumes
self.stderr = stderr
self.file_reader = file_reader
def run(self, argv):
args = argv[1:]
self.exit_code = 0
if not args:
self.print_err('Usage:\n'
' trash-rm PATTERN\n'
'\n'
'Please specify PATTERN')
self.exit_code = 8
return
trashcan = CleanableTrashcan(FileRemover())
cmd = Filter(trashcan.delete_trashinfo_and_backup_copy)
cmd.use_pattern(args[0])
listing = ListTrashinfos(cmd.delete_if_matches,
self.file_reader,
self.unable_to_parse_path)
trashdirs = TrashDirs(self.environ,
self.getuid,
self.list_volumes,
TopTrashDirRules(self.file_reader))
trashdirs.on_trash_dir_found = listing.list_from_volume_trashdir
trashdirs.list_trashdirs()
def unable_to_parse_path(self, trashinfo):
self.report_error('{}: unable to parse \'Path\''.format(trashinfo))
def report_error(self, error_msg):
self.print_err('trash-rm: {}'.format(error_msg))
def print_err(self, msg):
self.stderr.write(msg + '\n')
def main():
from trashcli.list_mount_points import os_mount_points
cmd = RmCmd(environ = os.environ
, getuid = os.getuid
, list_volumes = os_mount_points
, stderr = sys.stderr
, file_reader = FileSystemReader())
cmd.run(sys.argv)
return cmd.exit_code
class Filter:
def __init__(self, delete):
self.delete = delete
def use_pattern(self, pattern):
self.pattern = pattern
def delete_if_matches(self, original_location, info_file):
if self.pattern[0] == '/':
if self.pattern == original_location:
self.delete(info_file)
else:
basename = os.path.basename(original_location)
if fnmatch.fnmatchcase(basename, self.pattern):
self.delete(info_file)
class ListTrashinfos:
def __init__(self, out, file_reader, unable_to_parse_path):
self.out = out
self.file_reader = file_reader
self.unable_to_parse_path = unable_to_parse_path
def list_from_volume_trashdir(self, trashdir_path, volume):
self.volume = volume
trashdir = TrashDir(self.file_reader)
trashdir.open(trashdir_path, volume)
trashdir.each_trashinfo(self._report_original_location)
def _report_original_location(self, trashinfo_path):
trashinfo = self.file_reader.contents_of(trashinfo_path)
try:
path = parse_path(trashinfo)
except ParseError:
self.unable_to_parse_path(trashinfo_path)
else:
complete_path = os.path.join(self.volume, path)
self.out(complete_path, trashinfo_path)
|
import asyncio
from datetime import timedelta
import logging
import async_timeout
from pywemo.ouimeaux_device.api.service import ActionException
from homeassistant import util
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_COLOR_TEMP,
ATTR_HS_COLOR,
ATTR_TRANSITION,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR,
SUPPORT_COLOR_TEMP,
SUPPORT_TRANSITION,
LightEntity,
)
from homeassistant.helpers.dispatcher import async_dispatcher_connect
import homeassistant.util.color as color_util
from .const import DOMAIN as WEMO_DOMAIN
MIN_TIME_BETWEEN_SCANS = timedelta(seconds=10)
MIN_TIME_BETWEEN_FORCED_SCANS = timedelta(milliseconds=100)
_LOGGER = logging.getLogger(__name__)
SUPPORT_WEMO = (
SUPPORT_BRIGHTNESS | SUPPORT_COLOR_TEMP | SUPPORT_COLOR | SUPPORT_TRANSITION
)
# The WEMO_ constants below come from pywemo itself
WEMO_ON = 1
WEMO_OFF = 0
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up WeMo lights."""
async def _discovered_wemo(device):
"""Handle a discovered Wemo device."""
if device.model_name == "Dimmer":
async_add_entities([WemoDimmer(device)])
else:
await hass.async_add_executor_job(
setup_bridge, hass, device, async_add_entities
)
async_dispatcher_connect(hass, f"{WEMO_DOMAIN}.light", _discovered_wemo)
await asyncio.gather(
*[
_discovered_wemo(device)
for device in hass.data[WEMO_DOMAIN]["pending"].pop("light")
]
)
def setup_bridge(hass, bridge, async_add_entities):
"""Set up a WeMo link."""
lights = {}
@util.Throttle(MIN_TIME_BETWEEN_SCANS, MIN_TIME_BETWEEN_FORCED_SCANS)
def update_lights():
"""Update the WeMo led objects with latest info from the bridge."""
bridge.bridge_update()
new_lights = []
for light_id, device in bridge.Lights.items():
if light_id not in lights:
lights[light_id] = WemoLight(device, update_lights)
new_lights.append(lights[light_id])
if new_lights:
hass.add_job(async_add_entities, new_lights)
update_lights()
class WemoLight(LightEntity):
"""Representation of a WeMo light."""
def __init__(self, device, update_lights):
"""Initialize the WeMo light."""
self.wemo = device
self._state = None
self._update_lights = update_lights
self._available = True
self._update_lock = None
self._brightness = None
self._hs_color = None
self._color_temp = None
self._is_on = None
self._name = self.wemo.name
self._unique_id = self.wemo.uniqueID
self._model_name = type(self.wemo).__name__
async def async_added_to_hass(self):
"""Wemo light added to Home Assistant."""
# Define inside async context so we know our event loop
self._update_lock = asyncio.Lock()
@property
def unique_id(self):
"""Return the ID of this light."""
return self._unique_id
@property
def name(self):
"""Return the name of the light."""
return self._name
@property
def device_info(self):
"""Return the device info."""
return {
"name": self._name,
"identifiers": {(WEMO_DOMAIN, self._unique_id)},
"model": self._model_name,
"manufacturer": "Belkin",
}
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
return self._brightness
@property
def hs_color(self):
"""Return the hs color values of this light."""
return self._hs_color
@property
def color_temp(self):
"""Return the color temperature of this light in mireds."""
return self._color_temp
@property
def is_on(self):
"""Return true if device is on."""
return self._is_on
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_WEMO
@property
def available(self):
"""Return if light is available."""
return self._available
def turn_on(self, **kwargs):
"""Turn the light on."""
xy_color = None
brightness = kwargs.get(ATTR_BRIGHTNESS, self.brightness or 255)
color_temp = kwargs.get(ATTR_COLOR_TEMP)
hs_color = kwargs.get(ATTR_HS_COLOR)
transition_time = int(kwargs.get(ATTR_TRANSITION, 0))
if hs_color is not None:
xy_color = color_util.color_hs_to_xy(*hs_color)
turn_on_kwargs = {
"level": brightness,
"transition": transition_time,
"force_update": False,
}
try:
if xy_color is not None:
self.wemo.set_color(xy_color, transition=transition_time)
if color_temp is not None:
self.wemo.set_temperature(mireds=color_temp, transition=transition_time)
if self.wemo.turn_on(**turn_on_kwargs):
self._state["onoff"] = WEMO_ON
except ActionException as err:
_LOGGER.warning("Error while turning on device %s (%s)", self.name, err)
self._available = False
self.schedule_update_ha_state()
def turn_off(self, **kwargs):
"""Turn the light off."""
transition_time = int(kwargs.get(ATTR_TRANSITION, 0))
try:
if self.wemo.turn_off(transition=transition_time):
self._state["onoff"] = WEMO_OFF
except ActionException as err:
_LOGGER.warning("Error while turning off device %s (%s)", self.name, err)
self._available = False
self.schedule_update_ha_state()
def _update(self, force_update=True):
"""Synchronize state with bridge."""
try:
self._update_lights(no_throttle=force_update)
self._state = self.wemo.state
except (AttributeError, ActionException) as err:
_LOGGER.warning("Could not update status for %s (%s)", self.name, err)
self._available = False
self.wemo.reconnect_with_device()
else:
self._is_on = self._state.get("onoff") != WEMO_OFF
self._brightness = self._state.get("level", 255)
self._color_temp = self._state.get("temperature_mireds")
self._available = True
xy_color = self._state.get("color_xy")
if xy_color:
self._hs_color = color_util.color_xy_to_hs(*xy_color)
else:
self._hs_color = None
async def async_update(self):
"""Synchronize state with bridge."""
# If an update is in progress, we don't do anything
if self._update_lock.locked():
return
try:
with async_timeout.timeout(5):
await asyncio.shield(self._async_locked_update(True))
except asyncio.TimeoutError:
_LOGGER.warning("Lost connection to %s", self.name)
self._available = False
async def _async_locked_update(self, force_update):
"""Try updating within an async lock."""
async with self._update_lock:
await self.hass.async_add_executor_job(self._update, force_update)
class WemoDimmer(LightEntity):
"""Representation of a WeMo dimmer."""
def __init__(self, device):
"""Initialize the WeMo dimmer."""
self.wemo = device
self._state = None
self._available = True
self._update_lock = None
self._brightness = None
self._model_name = self.wemo.model_name
self._name = self.wemo.name
self._serialnumber = self.wemo.serialnumber
def _subscription_callback(self, _device, _type, _params):
"""Update the state by the Wemo device."""
_LOGGER.debug("Subscription update for %s", self.name)
updated = self.wemo.subscription_update(_type, _params)
self.hass.add_job(self._async_locked_subscription_callback(not updated))
async def _async_locked_subscription_callback(self, force_update):
"""Handle an update from a subscription."""
# If an update is in progress, we don't do anything
if self._update_lock.locked():
return
await self._async_locked_update(force_update)
self.async_write_ha_state()
async def async_added_to_hass(self):
"""Wemo dimmer added to Home Assistant."""
# Define inside async context so we know our event loop
self._update_lock = asyncio.Lock()
registry = self.hass.data[WEMO_DOMAIN]["registry"]
await self.hass.async_add_executor_job(registry.register, self.wemo)
registry.on(self.wemo, None, self._subscription_callback)
async def async_update(self):
"""Update WeMo state.
Wemo has an aggressive retry logic that sometimes can take over a
minute to return. If we don't get a state after 5 seconds, assume the
Wemo dimmer is unreachable. If update goes through, it will be made
available again.
"""
# If an update is in progress, we don't do anything
if self._update_lock.locked():
return
try:
with async_timeout.timeout(5):
await asyncio.shield(self._async_locked_update(True))
except asyncio.TimeoutError:
_LOGGER.warning("Lost connection to %s", self.name)
self._available = False
async def _async_locked_update(self, force_update):
"""Try updating within an async lock."""
async with self._update_lock:
await self.hass.async_add_executor_job(self._update, force_update)
@property
def unique_id(self):
"""Return the ID of this WeMo dimmer."""
return self._serialnumber
@property
def name(self):
"""Return the name of the dimmer if any."""
return self._name
@property
def device_info(self):
"""Return the device info."""
return {
"name": self._name,
"identifiers": {(WEMO_DOMAIN, self._serialnumber)},
"model": self._model_name,
"manufacturer": "Belkin",
}
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_BRIGHTNESS
@property
def brightness(self):
"""Return the brightness of this light between 1 and 100."""
return self._brightness
@property
def is_on(self):
"""Return true if dimmer is on. Standby is on."""
return self._state
def _update(self, force_update=True):
"""Update the device state."""
try:
self._state = self.wemo.get_state(force_update)
wemobrightness = int(self.wemo.get_brightness(force_update))
self._brightness = int((wemobrightness * 255) / 100)
if not self._available:
_LOGGER.info("Reconnected to %s", self.name)
self._available = True
except (AttributeError, ActionException) as err:
_LOGGER.warning("Could not update status for %s (%s)", self.name, err)
self._available = False
self.wemo.reconnect_with_device()
def turn_on(self, **kwargs):
"""Turn the dimmer on."""
# Wemo dimmer switches use a range of [0, 100] to control
# brightness. Level 255 might mean to set it to previous value
if ATTR_BRIGHTNESS in kwargs:
brightness = kwargs[ATTR_BRIGHTNESS]
brightness = int((brightness / 255) * 100)
else:
brightness = 255
try:
if self.wemo.on():
self._state = WEMO_ON
self.wemo.set_brightness(brightness)
except ActionException as err:
_LOGGER.warning("Error while turning on device %s (%s)", self.name, err)
self._available = False
self.schedule_update_ha_state()
def turn_off(self, **kwargs):
"""Turn the dimmer off."""
try:
if self.wemo.off():
self._state = WEMO_OFF
except ActionException as err:
_LOGGER.warning("Error while turning on device %s (%s)", self.name, err)
self._available = False
self.schedule_update_ha_state()
@property
def available(self):
"""Return if dimmer is available."""
return self._available
|
from flask_script import Manager
from lemur.api_keys import service as api_key_service
from lemur.auth.service import create_token
from datetime import datetime
manager = Manager(usage="Handles all api key related tasks.")
@manager.option(
"-u", "--user-id", dest="uid", help="The User ID this access key belongs too."
)
@manager.option("-n", "--name", dest="name", help="The name of this API Key.")
@manager.option(
"-t", "--ttl", dest="ttl", help="The TTL of this API Key. -1 for forever."
)
def create(uid, name, ttl):
"""
Create a new api key for a user.
:return:
"""
print("[+] Creating a new api key.")
key = api_key_service.create(
user_id=uid,
name=name,
ttl=ttl,
issued_at=int(datetime.utcnow().timestamp()),
revoked=False,
)
print("[+] Successfully created a new api key. Generating a JWT...")
jwt = create_token(uid, key.id, key.ttl)
print("[+] Your JWT is: {jwt}".format(jwt=jwt))
@manager.option("-a", "--api-key-id", dest="aid", help="The API Key ID to revoke.")
def revoke(aid):
"""
Revokes an api key for a user.
:return:
"""
print("[-] Revoking the API Key api key.")
api_key_service.revoke(aid=aid)
print("[+] Successfully revoked the api key")
|
import json
from types import SimpleNamespace
from unittest.mock import ANY, call
import attr
import pytest
from helpers import utils
qute_lastpass = utils.import_userscript('qute-lastpass')
default_lpass_match = [
{
"id": "12345",
"name": "www.example.com",
"username": "[email protected]",
"password": "foobar",
"url": "https://www.example.com",
}
]
@attr.s
class FakeOutput:
stdout = attr.ib(default='', converter=str.encode)
stderr = attr.ib(default='', converter=str.encode)
@pytest.fixture
def subprocess_mock(mocker):
return mocker.patch('subprocess.run')
@pytest.fixture
def qutecommand_mock(mocker):
return mocker.patch.object(qute_lastpass, 'qute_command')
@pytest.fixture
def stderr_mock(mocker):
return mocker.patch.object(qute_lastpass, 'stderr')
# Default arguments passed to qute-lastpass
@pytest.fixture
def arguments_mock():
arguments = SimpleNamespace()
arguments.url = ''
arguments.dmenu_invocation = 'rofi -dmenu'
arguments.insert_mode = True
arguments.io_encoding = 'UTF-8'
arguments.merge_candidates = False
arguments.password_only = False
arguments.username_only = False
return arguments
class TestQuteLastPassComponents:
"""Test qute-lastpass components."""
def test_fake_key_raw(self, qutecommand_mock):
"""Test if fake_key_raw properly escapes characters."""
qute_lastpass.fake_key_raw('[email protected] ')
qutecommand_mock.assert_called_once_with(
'fake-key \\j\\o\\h\\n\\.\\d\\o\\e\\@\\e\\x\\a\\m\\p\\l\\e\\.\\c\\o\\m" "'
)
def test_dmenu(self, subprocess_mock):
"""Test if dmenu command receives properly formatted lpass entries."""
entries = [
"1234 | example.com | https://www.example.com | [email protected]",
"2345 | example2.com | https://www.example2.com | [email protected]",
]
subprocess_mock.return_value = FakeOutput(stdout=entries[1])
selected = qute_lastpass.dmenu(entries, 'rofi -dmenu', 'UTF-8')
subprocess_mock.assert_called_once_with(
['rofi', '-dmenu'],
input='\n'.join(entries).encode(),
stdout=ANY)
assert selected == entries[1]
def test_pass_subprocess_args(self, subprocess_mock):
"""Test if pass_ calls subprocess with correct arguments."""
subprocess_mock.return_value = FakeOutput(stdout='[{}]')
qute_lastpass.pass_('example.com', 'utf-8')
subprocess_mock.assert_called_once_with(
['lpass', 'show', '-x', '-j', '-G', '\\bexample\\.com'],
stdout=ANY, stderr=ANY)
def test_pass_returns_candidates(self, subprocess_mock):
"""Test if pass_ returns expected lpass site entry."""
subprocess_mock.return_value = FakeOutput(
stdout=json.dumps(default_lpass_match))
response = qute_lastpass.pass_('www.example.com', 'utf-8')
assert response[1] == ''
candidates = response[0]
assert len(candidates) == 1
assert candidates[0] == default_lpass_match[0]
def test_pass_no_accounts(self, subprocess_mock):
"""Test if pass_ handles no accounts as an empty lpass result."""
error_message = 'Error: Could not find specified account(s).'
subprocess_mock.return_value = FakeOutput(stderr=error_message)
response = qute_lastpass.pass_('www.example.com', 'utf-8')
assert response[0] == []
assert response[1] == ''
def test_pass_returns_error(self, subprocess_mock):
"""Test if pass_ returns error from lpass."""
# pylint: disable=line-too-long
error_message = 'Error: Could not find decryption key. Perhaps you need to login with `lpass login`.'
subprocess_mock.return_value = FakeOutput(stderr=error_message)
response = qute_lastpass.pass_('www.example.com', 'utf-8')
assert response[0] == []
assert response[1] == error_message
class TestQuteLastPassMain:
"""Test qute-lastpass main."""
def test_main_happy_path(self, subprocess_mock, arguments_mock,
qutecommand_mock):
"""Test sending username/password to qutebrowser on *single* match."""
subprocess_mock.return_value = FakeOutput(
stdout=json.dumps(default_lpass_match))
arguments_mock.url = default_lpass_match[0]['url']
exit_code = qute_lastpass.main(arguments_mock)
assert exit_code == qute_lastpass.ExitCodes.SUCCESS
qutecommand_mock.assert_has_calls([
call('fake-key \\f\\a\\k\\e\\@\\f\\a\\k\\e\\.\\c\\o\\m'),
call('fake-key <Tab>'),
call('fake-key \\f\\o\\o\\b\\a\\r'),
call('enter-mode insert')
])
def test_main_no_candidates(self, subprocess_mock, arguments_mock,
stderr_mock,
qutecommand_mock):
"""Test correct exit code and message returned on no entries."""
error_message = 'Error: Could not find specified account(s).'
subprocess_mock.return_value = FakeOutput(stderr=error_message)
arguments_mock.url = default_lpass_match[0]['url']
exit_code = qute_lastpass.main(arguments_mock)
assert exit_code == qute_lastpass.ExitCodes.NO_PASS_CANDIDATES
stderr_mock.assert_called_with(
"No pass candidates for URL 'https://www.example.com' found!")
qutecommand_mock.assert_not_called()
def test_main_lpass_failure(self, subprocess_mock, arguments_mock,
stderr_mock,
qutecommand_mock):
"""Test correct exit code and message on lpass failure."""
# pylint: disable=line-too-long
error_message = 'Error: Could not find decryption key. Perhaps you need to login with `lpass login`.'
subprocess_mock.return_value = FakeOutput(stderr=error_message)
arguments_mock.url = default_lpass_match[0]['url']
exit_code = qute_lastpass.main(arguments_mock)
assert exit_code == qute_lastpass.ExitCodes.FAILURE
# pylint: disable=line-too-long
stderr_mock.assert_called_with(
"LastPass CLI returned for www.example.com - Error: Could not find decryption key. Perhaps you need to login with `lpass login`.")
qutecommand_mock.assert_not_called()
def test_main_username_only_flag(self, subprocess_mock, arguments_mock,
qutecommand_mock):
"""Test if --username-only flag sends username only."""
subprocess_mock.return_value = FakeOutput(
stdout=json.dumps(default_lpass_match))
arguments_mock.url = default_lpass_match[0]['url']
arguments_mock.username_only = True
qute_lastpass.main(arguments_mock)
qutecommand_mock.assert_has_calls([
call('fake-key \\f\\a\\k\\e\\@\\f\\a\\k\\e\\.\\c\\o\\m'),
call('enter-mode insert')
])
def test_main_password_only_flag(self, subprocess_mock, arguments_mock,
qutecommand_mock):
"""Test if --password-only flag sends password only."""
subprocess_mock.return_value = FakeOutput(
stdout=json.dumps(default_lpass_match))
arguments_mock.url = default_lpass_match[0]['url']
arguments_mock.password_only = True
qute_lastpass.main(arguments_mock)
qutecommand_mock.assert_has_calls([
call('fake-key \\f\\o\\o\\b\\a\\r'),
call('enter-mode insert')
])
def test_main_multiple_candidates(self, subprocess_mock, arguments_mock,
qutecommand_mock):
"""Test dmenu-invocation when lpass returns multiple candidates."""
multiple_matches = default_lpass_match.copy()
multiple_matches.append(
{
"id": "23456",
"name": "Sites/www.example.com",
"username": "[email protected]",
"password": "barfoo",
"url": "https://www.example.com",
}
)
lpass_response = FakeOutput(stdout=json.dumps(multiple_matches))
dmenu_response = FakeOutput(
stdout='23456 | Sites/www.example.com | https://www.example.com | [email protected]')
subprocess_mock.side_effect = [lpass_response, dmenu_response]
arguments_mock.url = multiple_matches[0]['url']
exit_code = qute_lastpass.main(arguments_mock)
assert exit_code == qute_lastpass.ExitCodes.SUCCESS
subprocess_mock.assert_has_calls([
call(['lpass', 'show', '-x', '-j', '-G', '\\bwww\\.example\\.com'],
stdout=ANY, stderr=ANY),
call(['rofi', '-dmenu'],
input=b'12345 | www.example.com | https://www.example.com | [email protected]\n23456 | Sites/www.example.com | https://www.example.com | [email protected]',
stdout=ANY)
])
qutecommand_mock.assert_has_calls([
call(
'fake-key \\j\\o\\h\\n\\.\\d\\o\\e\\@\\f\\a\\k\\e\\.\\c\\o\\m'),
call('fake-key <Tab>'),
call('fake-key \\b\\a\\r\\f\\o\\o'),
call('enter-mode insert')
])
def test_main_merge_candidates(self, subprocess_mock, arguments_mock,
qutecommand_mock):
"""Test merge of multiple responses from lpass."""
fqdn_matches = default_lpass_match.copy()
fqdn_matches.append(
{
"id": "23456",
"name": "Sites/www.example.com",
"username": "[email protected]",
"password": "barfoo",
"url": "https://www.example.com",
}
)
domain_matches = [
{
"id": "345",
"name": "example.com",
"username": "[email protected]",
"password": "barfoo1",
"url": "https://example.com",
},
{
"id": "456",
"name": "Sites/example.com",
"username": "[email protected]",
"password": "foofoo2",
"url": "http://example.com",
}
]
fqdn_response = FakeOutput(stdout=json.dumps(fqdn_matches))
domain_response = FakeOutput(stdout=json.dumps(domain_matches))
no_response = FakeOutput(
stderr='Error: Could not find specified account(s).')
dmenu_response = FakeOutput(
stdout='23456 | Sites/www.example.com | https://www.example.com | [email protected]')
# lpass command will return results for search against
# www.example.com, example.com, but not wwwexample.com and its ipv4
subprocess_mock.side_effect = [fqdn_response, domain_response,
no_response, no_response,
dmenu_response]
arguments_mock.url = fqdn_matches[0]['url']
arguments_mock.merge_candidates = True
exit_code = qute_lastpass.main(arguments_mock)
assert exit_code == qute_lastpass.ExitCodes.SUCCESS
subprocess_mock.assert_has_calls([
call(['lpass', 'show', '-x', '-j', '-G', '\\bwww\\.example\\.com'],
stdout=ANY, stderr=ANY),
call(['lpass', 'show', '-x', '-j', '-G', '\\bexample\\.com'],
stdout=ANY, stderr=ANY),
call(['lpass', 'show', '-x', '-j', '-G', '\\bwwwexample'],
stdout=ANY, stderr=ANY),
call(['lpass', 'show', '-x', '-j', '-G', '\\bexample'],
stdout=ANY, stderr=ANY),
call(['rofi', '-dmenu'],
input=b'12345 | www.example.com | https://www.example.com | [email protected]\n23456 | Sites/www.example.com | https://www.example.com | [email protected]\n345 | example.com | https://example.com | [email protected]\n456 | Sites/example.com | http://example.com | [email protected]',
stdout=ANY)
])
qutecommand_mock.assert_has_calls([
call(
'fake-key \\j\\o\\h\\n\\.\\d\\o\\e\\@\\f\\a\\k\\e\\.\\c\\o\\m'),
call('fake-key <Tab>'),
call('fake-key \\b\\a\\r\\f\\o\\o'),
call('enter-mode insert')
])
|
from __future__ import unicode_literals
from django.db import migrations
def forwards(apps, schema_editor):
Page = apps.get_model('cms', 'Page')
for page in Page.objects.filter(application_urls='ProductsListApp'):
page.application_urls = 'CatalogListApp'
page.save()
for page in Page.objects.filter(application_urls='ProductSearchApp'):
page.application_urls = 'CatalogSearchApp'
page.save()
def backwards(apps, schema_editor):
Page = apps.get_model('cms', 'Page')
for page in Page.objects.filter(application_urls='CatalogListApp'):
page.application_urls = 'ProductsListApp'
page.save()
for page in Page.objects.filter(application_urls='CatalogSearchApp'):
page.application_urls = 'ProductSearchApp'
page.save()
class Migration(migrations.Migration):
dependencies = [
('shop', '0005_unify_address'),
]
operations = [
migrations.RunPython(forwards, reverse_code=backwards)
]
|
import tensorflow as tf
from keras import backend as K
from keras.engine import Layer
from matchzoo.contrib.layers import DecayingDropoutLayer
class EncodingLayer(Layer):
"""
Apply a self-attention layer and a semantic composite fuse gate
to compute the encoding result of one tensor.
:param initial_keep_rate: the initial_keep_rate parameter of
DecayingDropoutLayer.
:param decay_interval: the decay_interval parameter of
DecayingDropoutLayer.
:param decay_rate: the decay_rate parameter of DecayingDropoutLayer.
:param kwargs: standard layer keyword arguments.
Example:
>>> import matchzoo as mz
>>> layer = mz.contrib.layers.EncodingLayer(1.0, 10000, 0.977)
>>> num_batch, left_len, num_dim = 5, 32, 10
>>> layer.build([num_batch, left_len, num_dim])
"""
def __init__(self,
initial_keep_rate: float,
decay_interval: int,
decay_rate: float,
**kwargs):
""":class: 'EncodingLayer' constructor."""
super(EncodingLayer, self).__init__(**kwargs)
self._initial_keep_rate = initial_keep_rate
self._decay_interval = decay_interval
self._decay_rate = decay_rate
self._w_itr_att = None
self._w1 = None
self._w2 = None
self._w3 = None
self._b1 = None
self._b2 = None
self._b3 = None
def build(self, input_shape):
"""
Build the layer.
:param input_shape: the shape of the input tensor,
for EncodingLayer we need one input tensor.
"""
d = input_shape[-1]
self._w_itr_att = self.add_weight(
name='w_itr_att', shape=(3 * d,), initializer='glorot_uniform')
self._w1 = self.add_weight(
name='w1', shape=(2 * d, d,), initializer='glorot_uniform')
self._w2 = self.add_weight(
name='w2', shape=(2 * d, d,), initializer='glorot_uniform')
self._w3 = self.add_weight(
name='w3', shape=(2 * d, d,), initializer='glorot_uniform')
self._b1 = self.add_weight(
name='b1', shape=(d,), initializer='zeros')
self._b2 = self.add_weight(
name='b2', shape=(d,), initializer='zeros')
self._b3 = self.add_weight(
name='b3', shape=(d,), initializer='zeros')
super(EncodingLayer, self).build(input_shape)
def call(self, inputs, **kwargs):
"""
The computation logic of EncodingLayer.
:param inputs: an input tensor.
"""
# Scalar dimensions referenced here:
# b = batch size
# p = inputs.shape()[1]
# d = inputs.shape()[2]
# The input shape is [b, p, d]
# shape = [b, 1, p, d]
x = tf.expand_dims(inputs, 1) * 0
# shape = [b, 1, d, p]
x = tf.transpose(x, (0, 1, 3, 2))
# shape = [b, p, d, p]
mid = x + tf.expand_dims(inputs, -1)
# shape = [b, p, d, p]
up = tf.transpose(mid, (0, 3, 2, 1))
# shape = [b, p, 3d, p]
inputs_concat = tf.concat([up, mid, up * mid], axis=2)
# Self-attention layer.
# shape = [b, p, p]
A = K.dot(self._w_itr_att, inputs_concat)
# shape = [b, p, p]
SA = tf.nn.softmax(A, axis=2)
# shape = [b, p, d]
itr_attn = K.batch_dot(SA, inputs)
# Semantic composite fuse gate.
# shape = [b, p, 2d]
inputs_attn_concat = tf.concat([inputs, itr_attn], axis=2)
concat_dropout = DecayingDropoutLayer(
initial_keep_rate=self._initial_keep_rate,
decay_interval=self._decay_interval,
decay_rate=self._decay_rate
)(inputs_attn_concat)
# shape = [b, p, d]
z = tf.tanh(K.dot(concat_dropout, self._w1) + self._b1)
# shape = [b, p, d]
r = tf.sigmoid(K.dot(concat_dropout, self._w2) + self._b2)
# shape = [b, p, d]
f = tf.sigmoid(K.dot(concat_dropout, self._w3) + self._b3)
# shape = [b, p, d]
encoding = r * inputs + f * z
return encoding
|
import datetime
import logging
import math
import re
import time
from pathlib import Path
from typing import List, Optional
import discord
import lavalink
from discord.embeds import EmptyEmbed
from redbot.core import commands
from redbot.core.i18n import Translator
from redbot.core.utils import AsyncIter
from redbot.core.utils.chat_formatting import box, escape
from ...audio_dataclasses import LocalPath, Query
from ...audio_logging import IS_DEBUG
from ..abc import MixinMeta
from ..cog_utils import CompositeMetaClass
log = logging.getLogger("red.cogs.Audio.cog.Utilities.formatting")
_ = Translator("Audio", Path(__file__))
RE_SQUARE = re.compile(r"[\[\]]")
class FormattingUtilities(MixinMeta, metaclass=CompositeMetaClass):
async def _genre_search_button_action(
self, ctx: commands.Context, options: List, emoji: str, page: int, playlist: bool = False
) -> str:
try:
if emoji == "\N{DIGIT ONE}\N{COMBINING ENCLOSING KEYCAP}":
search_choice = options[0 + (page * 5)]
elif emoji == "\N{DIGIT TWO}\N{COMBINING ENCLOSING KEYCAP}":
search_choice = options[1 + (page * 5)]
elif emoji == "\N{DIGIT THREE}\N{COMBINING ENCLOSING KEYCAP}":
search_choice = options[2 + (page * 5)]
elif emoji == "\N{DIGIT FOUR}\N{COMBINING ENCLOSING KEYCAP}":
search_choice = options[3 + (page * 5)]
elif emoji == "\N{DIGIT FIVE}\N{COMBINING ENCLOSING KEYCAP}":
search_choice = options[4 + (page * 5)]
else:
search_choice = options[0 + (page * 5)]
except IndexError:
search_choice = options[-1]
if not playlist:
return list(search_choice.items())[0]
else:
return search_choice.get("uri")
async def _build_genre_search_page(
self,
ctx: commands.Context,
tracks: List,
page_num: int,
title: str,
playlist: bool = False,
) -> discord.Embed:
search_num_pages = math.ceil(len(tracks) / 5)
search_idx_start = (page_num - 1) * 5
search_idx_end = search_idx_start + 5
search_list = ""
async for i, entry in AsyncIter(tracks[search_idx_start:search_idx_end]).enumerate(
start=search_idx_start
):
search_track_num = i + 1
if search_track_num > 5:
search_track_num = search_track_num % 5
if search_track_num == 0:
search_track_num = 5
if playlist:
name = "**[{}]({})** - {} {}".format(
entry.get("name"), entry.get("url"), str(entry.get("tracks")), _("tracks")
)
else:
name = f"{list(entry.keys())[0]}"
search_list += f"`{search_track_num}.` {name}\n"
embed = discord.Embed(
colour=await ctx.embed_colour(), title=title, description=search_list
)
embed.set_footer(
text=_("Page {page_num}/{total_pages}").format(
page_num=page_num, total_pages=search_num_pages
)
)
return embed
async def _search_button_action(
self, ctx: commands.Context, tracks: List, emoji: str, page: int
):
if not self._player_check(ctx):
if self.lavalink_connection_aborted:
msg = _("Connection to Lavalink has failed.")
description = EmptyEmbed
if await self.bot.is_owner(ctx.author):
description = _("Please check your console or logs for details.")
return await self.send_embed_msg(ctx, title=msg, description=description)
try:
await lavalink.connect(ctx.author.voice.channel)
player = lavalink.get_player(ctx.guild.id)
player.store("connect", datetime.datetime.utcnow())
await self.self_deafen(player)
except AttributeError:
return await self.send_embed_msg(ctx, title=_("Connect to a voice channel first."))
except IndexError:
return await self.send_embed_msg(
ctx, title=_("Connection to Lavalink has not yet been established.")
)
player = lavalink.get_player(ctx.guild.id)
guild_data = await self.config.guild(ctx.guild).all()
if len(player.queue) >= 10000:
return await self.send_embed_msg(
ctx, title=_("Unable To Play Tracks"), description=_("Queue size limit reached.")
)
if not await self.maybe_charge_requester(ctx, guild_data["jukebox_price"]):
return
try:
if emoji == "\N{DIGIT ONE}\N{COMBINING ENCLOSING KEYCAP}":
search_choice = tracks[0 + (page * 5)]
elif emoji == "\N{DIGIT TWO}\N{COMBINING ENCLOSING KEYCAP}":
search_choice = tracks[1 + (page * 5)]
elif emoji == "\N{DIGIT THREE}\N{COMBINING ENCLOSING KEYCAP}":
search_choice = tracks[2 + (page * 5)]
elif emoji == "\N{DIGIT FOUR}\N{COMBINING ENCLOSING KEYCAP}":
search_choice = tracks[3 + (page * 5)]
elif emoji == "\N{DIGIT FIVE}\N{COMBINING ENCLOSING KEYCAP}":
search_choice = tracks[4 + (page * 5)]
else:
search_choice = tracks[0 + (page * 5)]
except IndexError:
search_choice = tracks[-1]
if not hasattr(search_choice, "is_local") and getattr(search_choice, "uri", None):
description = await self.get_track_description(
search_choice, self.local_folder_current_path
)
else:
search_choice = Query.process_input(search_choice, self.local_folder_current_path)
if search_choice.is_local:
if (
search_choice.local_track_path.exists()
and search_choice.local_track_path.is_dir()
):
return await ctx.invoke(self.command_search, query=search_choice)
elif (
search_choice.local_track_path.exists()
and search_choice.local_track_path.is_file()
):
search_choice.invoked_from = "localtrack"
return await ctx.invoke(self.command_play, query=search_choice)
songembed = discord.Embed(title=_("Track Enqueued"), description=description)
queue_dur = await self.queue_duration(ctx)
queue_total_duration = self.format_time(queue_dur)
before_queue_length = len(player.queue)
query = Query.process_input(search_choice, self.local_folder_current_path)
if not await self.is_query_allowed(
self.config,
ctx,
f"{search_choice.title} {search_choice.author} {search_choice.uri} {str(query)}",
query_obj=query,
):
if IS_DEBUG:
log.debug(f"Query is not allowed in {ctx.guild} ({ctx.guild.id})")
self.update_player_lock(ctx, False)
return await self.send_embed_msg(
ctx, title=_("This track is not allowed in this server.")
)
elif guild_data["maxlength"] > 0:
if self.is_track_length_allowed(search_choice, guild_data["maxlength"]):
search_choice.extras.update(
{
"enqueue_time": int(time.time()),
"vc": player.channel.id,
"requester": ctx.author.id,
}
)
player.add(ctx.author, search_choice)
player.maybe_shuffle()
self.bot.dispatch(
"red_audio_track_enqueue", player.channel.guild, search_choice, ctx.author
)
else:
return await self.send_embed_msg(ctx, title=_("Track exceeds maximum length."))
else:
search_choice.extras.update(
{
"enqueue_time": int(time.time()),
"vc": player.channel.id,
"requester": ctx.author.id,
}
)
player.add(ctx.author, search_choice)
player.maybe_shuffle()
self.bot.dispatch(
"red_audio_track_enqueue", player.channel.guild, search_choice, ctx.author
)
if not guild_data["shuffle"] and queue_dur > 0:
songembed.set_footer(
text=_("{time} until track playback: #{position} in queue").format(
time=queue_total_duration, position=before_queue_length + 1
)
)
if not player.current:
await player.play()
return await self.send_embed_msg(ctx, embed=songembed)
async def _format_search_options(self, search_choice):
query = Query.process_input(search_choice, self.local_folder_current_path)
description = await self.get_track_description(
search_choice, self.local_folder_current_path
)
return description, query
async def _build_search_page(
self, ctx: commands.Context, tracks: List, page_num: int
) -> discord.Embed:
search_num_pages = math.ceil(len(tracks) / 5)
search_idx_start = (page_num - 1) * 5
search_idx_end = search_idx_start + 5
search_list = ""
command = ctx.invoked_with
folder = False
async for i, track in AsyncIter(tracks[search_idx_start:search_idx_end]).enumerate(
start=search_idx_start
):
search_track_num = i + 1
if search_track_num > 5:
search_track_num = search_track_num % 5
if search_track_num == 0:
search_track_num = 5
try:
query = Query.process_input(track.uri, self.local_folder_current_path)
if query.is_local:
search_list += "`{0}.` **{1}**\n[{2}]\n".format(
search_track_num,
track.title,
LocalPath(track.uri, self.local_folder_current_path).to_string_user(),
)
else:
search_list += "`{0}.` **[{1}]({2})**\n".format(
search_track_num, track.title, track.uri
)
except AttributeError:
track = Query.process_input(track, self.local_folder_current_path)
if track.is_local and command != "search":
search_list += "`{}.` **{}**\n".format(
search_track_num, track.to_string_user()
)
if track.is_album:
folder = True
else:
search_list += "`{}.` **{}**\n".format(
search_track_num, track.to_string_user()
)
if hasattr(tracks[0], "uri") and hasattr(tracks[0], "track_identifier"):
title = _("Tracks Found:")
footer = _("search results")
elif folder:
title = _("Folders Found:")
footer = _("local folders")
else:
title = _("Files Found:")
footer = _("local tracks")
embed = discord.Embed(
colour=await ctx.embed_colour(), title=title, description=search_list
)
embed.set_footer(
text=(_("Page {page_num}/{total_pages}") + " | {num_results} {footer}").format(
page_num=page_num,
total_pages=search_num_pages,
num_results=len(tracks),
footer=footer,
)
)
return embed
async def get_track_description(
self, track, local_folder_current_path, shorten=False
) -> Optional[str]:
"""Get the user facing formatted track name."""
string = None
if track and getattr(track, "uri", None):
query = Query.process_input(track.uri, local_folder_current_path)
if query.is_local or "localtracks/" in track.uri:
if (
hasattr(track, "title")
and track.title != "Unknown title"
and hasattr(track, "author")
and track.author != "Unknown artist"
):
if shorten:
string = f"{track.author} - {track.title}"
if len(string) > 40:
string = f"{(string[:40]).rstrip(' ')}..."
string = f'**{escape(f"{string}", formatting=True)}**'
else:
string = (
f'**{escape(f"{track.author} - {track.title}", formatting=True)}**'
+ escape(f"\n{query.to_string_user()} ", formatting=True)
)
elif hasattr(track, "title") and track.title != "Unknown title":
if shorten:
string = f"{track.title}"
if len(string) > 40:
string = f"{(string[:40]).rstrip(' ')}..."
string = f'**{escape(f"{string}", formatting=True)}**'
else:
string = f'**{escape(f"{track.title}", formatting=True)}**' + escape(
f"\n{query.to_string_user()} ", formatting=True
)
else:
string = query.to_string_user()
if shorten and len(string) > 40:
string = f"{(string[:40]).rstrip(' ')}..."
string = f'**{escape(f"{string}", formatting=True)}**'
else:
if track.is_stream:
icy = await self.icyparser(track.uri)
if icy:
title = icy
else:
title = f"{track.title} - {track.author}"
elif track.author.lower() not in track.title.lower():
title = f"{track.title} - {track.author}"
else:
title = track.title
string = f"{title}"
if shorten and len(string) > 40:
string = f"{(string[:40]).rstrip(' ')}..."
string = re.sub(RE_SQUARE, "", string)
string = f"**[{escape(string, formatting=True)}]({track.uri}) **"
elif hasattr(track, "to_string_user") and track.is_local:
string = track.to_string_user() + " "
if shorten and len(string) > 40:
string = f"{(string[:40]).rstrip(' ')}..."
string = f'**{escape(f"{string}", formatting=True)}**'
return string
async def get_track_description_unformatted(
self, track, local_folder_current_path
) -> Optional[str]:
"""Get the user facing unformatted track name."""
if track and hasattr(track, "uri"):
query = Query.process_input(track.uri, local_folder_current_path)
if query.is_local or "localtracks/" in track.uri:
if (
hasattr(track, "title")
and track.title != "Unknown title"
and hasattr(track, "author")
and track.author != "Unknown artist"
):
return f"{track.author} - {track.title}"
elif hasattr(track, "title") and track.title != "Unknown title":
return f"{track.title}"
else:
return query.to_string_user()
else:
if track.is_stream:
icy = await self.icyparser(track.uri)
if icy:
title = icy
else:
title = f"{track.title} - {track.author}"
elif track.author.lower() not in track.title.lower():
title = f"{track.title} - {track.author}"
else:
title = track.title
return f"{title}"
elif hasattr(track, "to_string_user") and track.is_local:
return track.to_string_user() + " "
return None
def format_playlist_picker_data(self, pid, pname, ptracks, pauthor, scope) -> str:
"""Format the values into a prettified codeblock."""
author = self.bot.get_user(pauthor) or pauthor or _("Unknown")
line = _(
" - Name: <{pname}>\n"
" - Scope: < {scope} >\n"
" - ID: < {pid} >\n"
" - Tracks: < {ptracks} >\n"
" - Author: < {author} >\n\n"
).format(
pname=pname, scope=self.humanize_scope(scope), pid=pid, ptracks=ptracks, author=author
)
return box(line, lang="md")
async def draw_time(self, ctx) -> str:
player = lavalink.get_player(ctx.guild.id)
paused = player.paused
pos = player.position or 1
dur = getattr(player.current, "length", player.position or 1)
sections = 12
loc_time = round((pos / dur if dur != 0 else pos) * sections)
bar = "\N{BOX DRAWINGS HEAVY HORIZONTAL}"
seek = "\N{RADIO BUTTON}"
if paused:
msg = "\N{DOUBLE VERTICAL BAR}\N{VARIATION SELECTOR-16}"
else:
msg = "\N{BLACK RIGHT-POINTING TRIANGLE}\N{VARIATION SELECTOR-16}"
for i in range(sections):
if i == loc_time:
msg += seek
else:
msg += bar
return msg
|
import warnings
from django.utils.encoding import force_bytes
from django.utils.encoding import force_str
from django.utils.html import linebreaks
from zinnia.settings import MARKDOWN_EXTENSIONS
from zinnia.settings import MARKUP_LANGUAGE
from zinnia.settings import RESTRUCTUREDTEXT_SETTINGS
def textile(value):
"""
Textile processing.
"""
try:
import textile
except ImportError:
warnings.warn("The Python textile library isn't installed.",
RuntimeWarning)
return value
return textile.textile(force_str(value))
def markdown(value, extensions=MARKDOWN_EXTENSIONS):
"""
Markdown processing with optionally using various extensions
that python-markdown supports.
`extensions` is an iterable of either markdown.Extension instances
or extension paths.
"""
try:
import markdown
except ImportError:
warnings.warn("The Python markdown library isn't installed.",
RuntimeWarning)
return value
return markdown.markdown(force_str(value), extensions=extensions)
def restructuredtext(value, settings=RESTRUCTUREDTEXT_SETTINGS):
"""
RestructuredText processing with optionnally custom settings.
"""
try:
from docutils.core import publish_parts
except ImportError:
warnings.warn("The Python docutils library isn't installed.",
RuntimeWarning)
return value
parts = publish_parts(source=force_bytes(value),
writer_name='html4css1',
settings_overrides=settings)
return force_str(parts['fragment'])
def html_format(value):
"""
Returns the value formatted in HTML,
depends on MARKUP_LANGUAGE setting.
"""
if not value:
return ''
elif MARKUP_LANGUAGE == 'markdown':
return markdown(value)
elif MARKUP_LANGUAGE == 'textile':
return textile(value)
elif MARKUP_LANGUAGE == 'restructuredtext':
return restructuredtext(value)
elif '</p>' not in value:
return linebreaks(value)
return value
|
from babelfish import LanguageReverseConverter, language_converters
class TVsubtitlesConverter(LanguageReverseConverter):
def __init__(self):
self.alpha2_converter = language_converters['alpha2']
self.from_tvsubtitles = {'br': ('por', 'BR'), 'ua': ('ukr',), 'gr': ('ell',), 'cn': ('zho',), 'jp': ('jpn',),
'cz': ('ces',)}
self.to_tvsubtitles = {v: k for k, v in self.from_tvsubtitles.items()}
self.codes = self.alpha2_converter.codes | set(self.from_tvsubtitles.keys())
def convert(self, alpha3, country=None, script=None):
if (alpha3, country) in self.to_tvsubtitles:
return self.to_tvsubtitles[(alpha3, country)]
if (alpha3,) in self.to_tvsubtitles:
return self.to_tvsubtitles[(alpha3,)]
return self.alpha2_converter.convert(alpha3, country, script)
def reverse(self, tvsubtitles):
if tvsubtitles in self.from_tvsubtitles:
return self.from_tvsubtitles[tvsubtitles]
return self.alpha2_converter.reverse(tvsubtitles)
|
import pytest
from tests.async_mock import Mock, patch
@pytest.fixture(autouse=True)
def mock_cases():
"""Mock coronavirus cases."""
with patch(
"coronavirus.get_cases",
return_value=[
Mock(country="Netherlands", confirmed=10, recovered=8, deaths=1, current=1),
Mock(country="Germany", confirmed=1, recovered=0, deaths=0, current=0),
Mock(
country="Sweden",
confirmed=None,
recovered=None,
deaths=None,
current=None,
),
],
) as mock_get_cases:
yield mock_get_cases
|
import argparse
import datetime
import logging
import smtplib
import sys
from collections import defaultdict
from email.message import EmailMessage
from socket import getfqdn
import pysensu_yelp
import requests
from paasta_tools import mesos_tools
from paasta_tools.monitoring_tools import send_event
from paasta_tools.utils import DEFAULT_SOA_DIR
from paasta_tools.utils import list_services
logger = logging.getLogger(__name__)
email_from_address = f"paasta@{getfqdn()}"
JUPYTER_PREFIX = "jupyterhub_"
def parse_args():
parser = argparse.ArgumentParser(
description="Reports long-running Spark frameworks."
)
parser.add_argument(
"--min-hours",
type=float,
help="Report frameworks that have been registered for more than this duration",
default=0,
)
parser.add_argument(
"--no-notify",
action="store_true",
help="Skip notifying the teams that own each framework",
)
parser.add_argument(
"--email-domain", default=None, help="Email domain for notifying users"
)
return parser.parse_args()
def get_time_running(framework):
registered_time = datetime.datetime.fromtimestamp(framework["registered_time"])
return datetime.datetime.now() - registered_time
def get_spark_properties(framework):
webui_url = framework.get("webui_url")
if not webui_url:
return None
env_endpoint = f"{webui_url}/api/v1/applications/{framework.id}/environment"
try:
response = requests.get(env_endpoint, timeout=5)
except (requests.exceptions.Timeout, requests.exceptions.ConnectionError) as e:
logger.warning(f"Unable to connect to {env_endpoint}: {e!r}")
return None
if response.status_code != 200:
logger.warning(f"Bad response from {env_endpoint}: {response.status_code}")
return None
try:
return response.json()["sparkProperties"]
except (ValueError, KeyError):
logger.warning(
f"Unable to get sparkProperties for {framework.id}: got response {response.text}"
)
return None
def guess_service(properties):
if not properties:
return None
for key, value in properties:
if key == "spark.executorEnv.PAASTA_SERVICE":
service = value
break
else:
return None
if service.startswith(JUPYTER_PREFIX):
return service[len(JUPYTER_PREFIX) :]
else:
return service
def get_matching_framework_info(min_hours):
frameworks = mesos_tools.get_all_frameworks(active_only=True)
matching_info = []
min_timedelta = datetime.timedelta(hours=min_hours)
for framework in frameworks:
if not framework.active:
continue
if framework.get("principal") != "spark":
continue
time_running = get_time_running(framework)
if time_running >= min_timedelta:
info = {
"id": framework.id,
"name": framework.name,
"webui_url": framework.get("webui_url"),
"service": guess_service(get_spark_properties(framework)),
"user": framework.user,
"time_running": str(time_running),
}
matching_info.append(info)
return matching_info
def format_framework(info):
result = [f'{info["name"]} (running for {info["time_running"]})']
result.append(f' user: {info["user"]}')
result.append(f' job UI: {info["webui_url"]}')
return "\n".join(result)
def format_message_for_service(service, frameworks):
output = f"Found the following long-running Spark frameworks associated with service {service}.\n"
output += (
f"Please check why they are still running and terminate if appropriate.\n\n"
)
output += "\n".join(format_framework(f) for f in frameworks)
return output
def get_messages_by_service(frameworks):
frameworks_by_service = defaultdict(list)
for framework in frameworks:
service = framework["service"]
frameworks_by_service[service].append(framework)
return {
service: format_message_for_service(service, frameworks)
for service, frameworks in frameworks_by_service.items()
}
def update_check_status(service, output, status):
overrides = {
"page": False,
"alert_after": 0,
"tip": "Ask the user to check the job UI and terminate the job if appropriate.",
"runbook": "http://y/spark-debug",
"ticket": True,
}
send_event(
service=service,
check_name=f"long_running_spark_jobs.{service}",
overrides=overrides,
status=status,
output=output,
soa_dir=DEFAULT_SOA_DIR,
)
def email_user(framework_info, email_domain):
guessed_user = None
if framework_info["user"] != "root":
guessed_user = framework_info["user"]
elif framework_info["name"].startswith(JUPYTER_PREFIX):
try:
# the job format is now `<AppName>_<UserName>_<UIPort>_<StartTime>`
guessed_user = framework_info["name"].split("_")[-3]
except IndexError:
pass
if guessed_user:
print(
f'Guessed {framework_info["name"]} belongs to {guessed_user}, sending email'
)
else:
print(f"Could not guess user from {framework_info}, skipping user email")
return
msg = EmailMessage()
msg["From"] = email_from_address
msg["To"] = f"{guessed_user}@{email_domain}"
msg["Subject"] = f'Long-running Spark framework {framework_info["name"]}'
content = "Please check why it is still running and terminate if appropriate.\n"
content += format_framework(framework_info)
msg.set_content(content)
with smtplib.SMTP("localhost") as s:
s.send_message(msg)
def report_spark_jobs(min_hours, no_notify, email_domain=None):
frameworks = get_matching_framework_info(min_hours=min_hours)
messages_by_service = get_messages_by_service(frameworks)
valid_services = set(list_services())
messages_for_unknown_services = []
for service, message in messages_by_service.items():
if service in valid_services:
print(f"{message}\n")
else:
messages_for_unknown_services.append(message)
if messages_for_unknown_services:
print("\nINVALID SERVICES")
print("----------------")
print(
"The following frameworks are associated with services that are not configured in PaaSTA.\n"
)
print("\n\n".join(messages_for_unknown_services))
if not no_notify:
for service in valid_services:
if service in messages_by_service:
update_check_status(service, message, pysensu_yelp.Status.WARNING)
else:
update_check_status(
service, "No long running spark jobs", pysensu_yelp.Status.OK
)
if email_domain:
for framework in frameworks:
email_user(framework, email_domain)
return 0 if len(frameworks) == 0 else 1
def main():
args = parse_args()
logging.basicConfig()
return report_spark_jobs(args.min_hours, args.no_notify, args.email_domain)
if __name__ == "__main__":
sys.exit(main())
|
from __future__ import print_function
import argparse
import sys
INVISIBLE = range(0x20) + [0x81, 0x8d, 0x8f, 0x90, 0x9d]
def main(args):
p = argparse.ArgumentParser(description=__doc__)
p.add_argument("file", action="store", nargs="+", help="one or more files to be printed")
ns = p.parse_args(args)
status = 0
for filename in ns.file:
try:
with open(filename, "rb") as f:
i = 0
chunk = f.read(16)
while chunk:
# Decoding as Latin-1 to get a visual representation for most
# bytes that would otherwise be non-printable.
strchunk = "".join("_" if ord(c) in INVISIBLE else c.decode("windows-1252") for c in chunk)
hexchunk = " ".join("{:0>2X}".format(ord(c)) for c in chunk)
print("0x{:>08X} | {:<48} | {:<16}".format(i, hexchunk, strchunk))
i += 16
chunk = f.read(16)
except Exception as err:
print("printhex: {}: {!s}".format(type(err).__name__, err), file=sys.stderr)
status = 1
sys.exit(status)
if __name__ == "__main__":
main(sys.argv[1:])
|
from collections import namedtuple
import functools
import logging
import os
import sys
from proboscis import TestProgram
from gmusicapi.clients import Musicmanager, Mobileclient
from gmusicapi import session
from gmusicapi.test import local_tests, server_tests # noqa
from gmusicapi.test.utils import NoticeLogging
EnvArg = namedtuple('EnvArg', 'envarg is_required kwarg description')
# these names needed to be compressed to fit everything into the travisci key size.
# there's also:
# * GM_A: when set (to anything) states that we are testing on a subscription account.
# * GM_AA_D_ID: a registered device id for use with mc streaming
# wc_envargs = (
# EnvArg('GM_U', 'email', 'WC user. If not present, user will be prompted.'),
# EnvArg('GM_P', 'password', 'WC password. If not present, user will be prompted.'),
# )
mc_envargs = (
EnvArg('GM_AA_D_ID', True, 'device_id', 'a registered device id for use with MC streaming'),
EnvArg('GM_R', False, 'oauth_credentials', 'an MC refresh token (defaults to MC.login default)'),
)
mm_envargs = (
EnvArg('GM_O', False, 'oauth_credentials', 'an MM refresh token (defaults to MM.login default)'),
EnvArg('GM_I', False, 'uploader_id', 'an MM uploader id (defaults to MM.login default)'),
EnvArg('GM_N', False, 'uploader_name', 'an MM uploader name (default to MM.login default)'),
)
# Webclient auth retreival removed while testing disabled.
#
# def prompt_for_wc_auth():
# """Return a valid (user, pass) tuple by continually
# prompting the user."""
#
# print("These tests will never delete or modify your music."
# "\n\n"
# "If the tests fail, you *might* end up with a test"
# " song/playlist in your library, though."
# "\n")
#
# wclient = Webclient()
# valid_wc_auth = False
#
# while not valid_wc_auth:
# print()
# email = input("Email: ")
# passwd = getpass()
#
# valid_wc_auth = wclient.login(email, passwd)
#
# return email, passwd
def _get_kwargs(envargs):
kwargs = {}
for arg in envargs:
if arg.is_required and arg.envarg not in os.environ:
raise ValueError("%s was not exported and must be %s" % (arg.envarg, arg.description))
val = os.environ.get(arg.envarg)
if arg.kwarg == 'oauth_credentials' and val is not None:
oauth_info = session.Musicmanager.oauth if arg.envarg == 'GM_O' else session.Mobileclient.oauth
kwargs['oauth_credentials'] = session.credentials_from_refresh_token(val, oauth_info)
else:
kwargs[arg.kwarg] = val
return kwargs
def retrieve_auth():
"""Searches the env for auth.
On success, return (mc_kwargs, mm_kwargs). On failure, raise ValueError."""
mc_kwargs = _get_kwargs(mc_envargs)
mm_kwargs = _get_kwargs(mm_envargs)
return (mc_kwargs, mm_kwargs)
def freeze_method_kwargs(klass, method_name, **kwargs):
method = getattr(klass, method_name)
partialfunc = functools.partialmethod
setattr(klass, method_name, partialfunc(method, **kwargs))
def freeze_login_details(mc_kwargs, mm_kwargs):
"""Set the given kwargs to be the default for client login methods."""
freeze_method_kwargs(Musicmanager, 'login', **mm_kwargs)
freeze_method_kwargs(Mobileclient, 'oauth_login', **mc_kwargs)
def main():
"""Search env for auth envargs and run tests."""
if '--group=local' not in sys.argv:
# hack: assume we're just running the proboscis local group
freeze_login_details(*retrieve_auth())
# warnings typically signal a change in protocol,
# so fail the build if anything >= warning are sent,
noticer = NoticeLogging()
noticer.setLevel(logging.WARNING)
root_logger = logging.getLogger('gmusicapi')
root_logger.addHandler(noticer)
# proboscis does not have an exit=False equivalent,
# so SystemExit must be caught instead (we need
# to check the log noticer)
try:
TestProgram(module=sys.modules[__name__]).run_and_exit()
except SystemExit as e:
print()
if noticer.seen_message:
print('(failing build due to log warnings)')
sys.exit(1)
if e.code is not None:
sys.exit(e.code)
if __name__ == '__main__':
main()
|
import copy
import datetime
import json
import logging
import os
import re
from typing import Dict, Text
from absl import flags
from perfkitbenchmarker import data
from perfkitbenchmarker import edw_service
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.providers import aws
from perfkitbenchmarker.providers.aws import s3
from perfkitbenchmarker.providers.aws import util
AWS_ATHENA_CMD_PREFIX = ['aws', 'athena']
AWS_ATHENA_CMD_POSTFIX = ['--output', 'json']
# TODO(user): Derive the full table set from the TPC suite.
TPC_H_TABLES = [
'customer', 'lineitem', 'nation', 'orders', 'part', 'partsupp', 'region',
'supplier'
]
TPC_DS_TABLES = [
'call_center', 'catalog_page', 'catalog_returns', 'catalog_sales',
'customer', 'customer_address', 'customer_demographics', 'date_dim',
'dbgen_version', 'household_demographics', 'income_band', 'inventory',
'item', 'promotion', 'reason', 'ship_mode', 'store', 'store_returns',
'store_sales', 'time_dim', 'warehouse', 'web_page', 'web_returns',
'web_sales', 'web_site'
]
FLAGS = flags.FLAGS
class AthenaQueryError(RuntimeError):
pass
def GetAthenaClientInterface(database: str, output_bucket: str,
region: str) -> edw_service.EdwClientInterface:
"""Builds and Returns the requested Athena client Interface.
Args:
database: Name of the Athena database to execute queries against.
output_bucket: String name of the S3 bucket to store query output.
region: String aws region in which the database exists and client operations
are performed.
Returns:
A concrete Client Interface object (subclass of EdwClientInterface)
Raises:
RuntimeError: if an unsupported athena_client_interface is requested
"""
if FLAGS.athena_client_interface == 'CLI':
return CliClientInterface(database, output_bucket, region)
if FLAGS.athena_client_interface == 'JAVA':
return JavaClientInterface(database, output_bucket, region)
raise RuntimeError('Unknown Athena Client Interface requested.' +
FLAGS.athena_client_interface)
class GenericClientInterface(edw_service.EdwClientInterface):
"""Generic Client Interface class for Athena.
Attributes:
database: String name of the Athena database to execute queries against.
output_bucket: String name of the S3 bucket to store query output.
region: String aws region in which the database exists and client operations
are performed.
"""
def __init__(self, database: str, output_bucket: str, region: str):
super(GenericClientInterface, self).__init__()
self.database = database
self.output_bucket = 's3://%s' % output_bucket
self.region = region
def GetMetadata(self) -> Dict[str, str]:
"""Gets the Metadata attributes for the Client Interface."""
return {
'client': FLAGS.athena_client_interface,
'client_region': self.region
}
class JavaClientInterface(GenericClientInterface):
"""Java Client Interface class for Athena.
"""
def Prepare(self, package_name: str) -> None:
"""Prepares the client vm to execute query.
Installs the Java Execution Environment and a uber jar with
a) Athena Java client libraries,
b) An application to execute a query and gather execution details, and
collect CW metrics
c) their dependencies.
Args:
package_name: String name of the package defining the preprovisioned data
(certificates, etc.) to extract and use during client vm preparation.
"""
self.client_vm.Install('openjdk')
# Push the executable jar to the working directory on client vm
self.client_vm.InstallPreprovisionedPackageData(
package_name, ['athena-java-client-1.0.jar'], '')
def ExecuteQuery(self, query_name: Text) -> (float, Dict[str, str]):
"""Executes a query and returns performance details.
Args:
query_name: String name of the query to execute
Returns:
A tuple of (execution_time, run_metadata)
execution_time: A Float variable set to the query's completion time in
secs. -1.0 is used as a sentinel value implying the query failed. For a
successful query the value is expected to be positive.
run_metadata: A dictionary of query execution attributes eg. script name
"""
query_command = (
'java -cp athena-java-client-1.0.jar '
'com.google.cloud.performance.edw.Single --region {} --database {} '
'--output_location {} --query_file {} --query_timeout_secs {}'
.format(self.region, self.database, self.output_bucket, query_name,
FLAGS.athena_query_timeout))
if not FLAGS.athena_metrics_collection:
# execute the query in default primary workgroup
query_command = '{} --workgroup primary'.format(query_command)
query_command = '{} --collect_metrics {} --delete_workgroup {}'.format(
query_command, FLAGS.athena_metrics_collection,
FLAGS.athena_workgroup_delete)
stdout, _ = self.client_vm.RemoteCommand(query_command)
details = copy.copy(self.GetMetadata()) # Copy the base metadata
details.update(json.loads(stdout)['details'])
details['query_start'] = json.loads(stdout)['query_start']
details['query_end'] = json.loads(stdout)['query_end']
performance = json.loads(stdout)['query_wall_time_in_secs']
return performance, details
class CliClientInterface(GenericClientInterface):
"""Command Line Client Interface class for Athena.
Uses the native Athena client available with the awscli
https://docs.aws.amazon.com/cli/latest/reference/athena/index.html.
"""
def Prepare(self, package_name: str) -> None:
"""Prepares the client vm to execute query.
Installs the bq tool dependencies and authenticates using a service account.
Args:
package_name: String name of the package defining the preprovisioned data
(certificates, etc.) to extract and use during client vm preparation.
"""
self.client_vm.Install('pip')
self.client_vm.RemoteCommand('sudo pip install absl-py')
for pkg in ('aws_credentials', 'awscli'):
self.client_vm.Install(pkg)
# Push the framework to execute a sql query and gather performance details.
service_specific_dir = os.path.join('edw', Athena.SERVICE_TYPE)
self.client_vm.PushFile(
data.ResourcePath(
os.path.join(service_specific_dir, 'script_runner.sh')))
runner_permission_update_cmd = 'chmod 755 {}'.format('script_runner.sh')
self.client_vm.RemoteCommand(runner_permission_update_cmd)
self.client_vm.PushFile(
data.ResourcePath(os.path.join('edw', 'script_driver.py')))
self.client_vm.PushFile(
data.ResourcePath(
os.path.join(service_specific_dir,
'provider_specific_script_driver.py')))
def ExecuteQuery(self, query_name: Text) -> (float, Dict[str, str]):
"""Executes a query and returns performance details.
Args:
query_name: String name of the query to execute
Returns:
A tuple of (execution_time, run_metadata)
execution_time: A Float variable set to the query's completion time in
secs. -1.0 is used as a sentinel value implying the query failed. For a
successful query the value is expected to be positive.
run_metadata: A dictionary of query execution attributes eg. script name
"""
stdout, _ = self.client_vm.RemoteCommand(
'python script_driver.py --script={} --database={} --query_timeout={} '
'--athena_query_output_bucket={} --athena_region={}'.format(
query_name, self.database, FLAGS.athena_query_timeout,
self.output_bucket, self.region))
script_performance = json.loads(str(stdout))
execution_time = script_performance[query_name]['execution_time']
run_metadata = {'script': query_name}
if 'error_details' in script_performance[query_name]:
run_metadata['error_details'] = script_performance[query_name][
'error_details']
run_metadata.update(self.GetMetadata())
return execution_time, run_metadata
def ReadScript(script_uri):
"""Method to read a sql script based on its local path.
Arguments:
script_uri: Local URI of file containing SQL query.
Returns:
Query String contents of the URI location.
Raises:
IOError: If the script cannot be read.
"""
with open(script_uri) as fp:
return fp.read()
def PrepareQueryString(query_string_template, substitutions):
"""Method to read a template Athena script and substitute placeholders.
Args:
query_string_template: Template version of the Athena query.
substitutions: A dictionary of string placeholder keys and corresponding
string values.
Returns:
Materialized Athena query as a string.
"""
for key, value in substitutions.items():
query_string = query_string_template.replace(key, value)
return query_string
def RunScriptCommand(script_command):
"""Method to execute an AWS Athena cli command.
Args:
script_command: Fully compiled AWS Athena cli command.
Returns:
String stdout result of executing the query.
Script Command execution duration in seconds (rounded).
Raises:
AthenaQueryError: If the return code does not indicate success.
"""
start_time = datetime.datetime.now()
stdout, _, retcode = vm_util.IssueCommand(
script_command, raise_on_failure=False)
if retcode:
raise AthenaQueryError
end_time = datetime.datetime.now()
return stdout, int((end_time - start_time).total_seconds())
class Athena(edw_service.EdwService):
"""Object representing a Athena data warehouse."""
CLOUD = aws.CLOUD
SERVICE_TYPE = 'athena'
def __init__(self, edw_service_spec):
super(Athena, self).__init__(edw_service_spec)
self.region = util.GetRegionFromZone(FLAGS.zones[0])
self.output_bucket = '-'.join(
[FLAGS.athena_output_location_prefix, self.region, FLAGS.run_uri])
self.client_interface = GetAthenaClientInterface(self.cluster_identifier,
self.output_bucket,
self.region)
self.s3_service = s3.S3Service()
self.s3_service.PrepareService(self.region)
self.s3_service.MakeBucket(self.output_bucket)
if FLAGS.provision_athena:
self.data_bucket = 'pkb' + self.cluster_identifier.replace('_', '')
self.tables = (
TPC_H_TABLES if FLAGS.edw_tpc_dsb_type == 'tpc_h' else TPC_DS_TABLES)
self.athena_db_create_time = 0
self.athena_table_create_time = 0
def BuildAthenaCommand(self, query_string, database=None):
"""Method to compile a AWS Athena cli command.
Arguments:
query_string: A string with the query that needs to be executed on Athena.
database: The Athena database against which the query should be executed.
Returns:
Fully compiled AWS Athena cli command.
"""
cmd = []
cmd.extend(AWS_ATHENA_CMD_PREFIX)
cmd.extend([
'--region', self.region,
'start-query-execution',
'--query-string', query_string
])
if database:
cmd.extend(['--query-execution-context', ('Database=%s' % database)])
cmd.extend([
'--result-configuration',
('OutputLocation=s3://%s' % self.output_bucket)
])
cmd.extend(AWS_ATHENA_CMD_POSTFIX)
return cmd
def _Create(self):
"""Create a Athena data warehouse."""
def _EmptyDatabase():
"""Remove tables, if they exist, so they can be refreshed.
If the database and/or tables don't already exist, the drop commands
will simply fail, which won't raise errors.
"""
drop_script_path = data.ResourcePath('edw/athena/%s/ddl/drop.sql' %
FLAGS.edw_tpc_dsb_type)
drop_script_contents = ReadScript(drop_script_path)
# Drop all tables so the database can be dropped.
for table in self.tables:
# Remove the folder backing each parquet table so they can be refreshed.
vm_util.IssueCommand([
'aws', 's3', 'rm',
's3://%s/%s_parquet' % (self.data_bucket, table), '--recursive'
], raise_on_failure=False)
# The parquet tables don't have the type suffix so that the queries can
# run as written without having to change the table names.
for suffix in ['_csv', '']:
script_contents = PrepareQueryString(drop_script_contents,
{'{table}': table + suffix})
script_command = self.BuildAthenaCommand(
script_contents, database=self.cluster_identifier)
RunScriptCommand(script_command)
drop_database_query_string = PrepareQueryString(
'drop database database_name',
{'database_name': self.cluster_identifier})
script_command = self.BuildAthenaCommand(drop_database_query_string)
RunScriptCommand(script_command)
def _CreateDatabase():
create_database_query_string = PrepareQueryString(
'create database database_name',
{'database_name': self.cluster_identifier})
script_command = self.BuildAthenaCommand(create_database_query_string)
return RunScriptCommand(script_command)
def _CreateTable(table_create_sql_template):
template_script_path = data.ResourcePath(table_create_sql_template)
template_script_contents = ReadScript(template_script_path)
script_contents = PrepareQueryString(template_script_contents,
{'{bucket}': self.data_bucket})
script_command = self.BuildAthenaCommand(
script_contents, database=self.cluster_identifier)
return RunScriptCommand(script_command)
def _CreateAllTables():
"""Create all TPC benchmarking tables."""
cumulative_table_create_time = 0
for table in self.tables:
for suffix in ['_csv', '_parquet']:
script = 'edw/athena/%s/ddl/%s.sql' % (FLAGS.edw_tpc_dsb_type,
table + suffix)
_, table_create_time = _CreateTable(script)
cumulative_table_create_time += table_create_time
return cumulative_table_create_time
_EmptyDatabase()
_, self.athena_db_create_time = _CreateDatabase()
self.athena_table_create_time = _CreateAllTables()
def _Exists(self):
"""Method to validate the existence of a Athena data warehouse.
Returns:
Boolean value indicating the existence of a Athena data warehouse.
"""
raise NotImplementedError
def _Delete(self):
"""Delete a Athena data warehouse."""
if not FLAGS.teardown_athena:
logging.info('The current resource is requested to be long living.')
return
raise NotImplementedError
def Cleanup(self):
# Direct cleanup is used instead of _DeleteDependencies because the Athena
# warehouse resource isn't created/deleted each time.
self.s3_service.DeleteBucket(self.output_bucket)
def GetDataDetails(self) -> Dict[str, str]:
"""Returns a dictionary with underlying data details.
cluster_identifier = <dataset_id>
Data details are extracted from the dataset_id that follows the format:
<dataset>_<format>_<compression>_<partitioning>
eg.
tpch100_parquet_uncompressed_unpartitoned
Returns:
A dictionary set to underlying data's details (format, etc.)
"""
data_details = {}
# If the information isn't in the cluster identifier, skip collecting it.
if '_' not in self.cluster_identifier:
return data_details
parsed_id = re.split(r'_', self.cluster_identifier)
data_details['format'] = parsed_id[1]
data_details['compression'] = parsed_id[2]
data_details['partitioning'] = parsed_id[3]
return data_details
def GetMetadata(self):
"""Return a dictionary of the metadata for the Athena data warehouse."""
basic_data = super(Athena, self).GetMetadata()
basic_data.update({'database': self.cluster_identifier})
basic_data.update(self.GetDataDetails())
basic_data.update(self.client_interface.GetMetadata())
return basic_data
|
import sys
import mne
def run():
"""Run command."""
parser = mne.commands.utils.get_optparser(__file__, usage='mne sys_info')
options, args = parser.parse_args()
if len(args) != 0:
parser.print_help()
sys.exit(1)
mne.sys_info()
mne.utils.run_command_if_main()
|
import logging
import voluptuous as vol
from homeassistant import config_entries, core
from homeassistant.const import ATTR_ENTITY_ID, CONF_HOST
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import config_validation as cv, entity_registry as er
from homeassistant.helpers.dispatcher import dispatcher_send
from .config_flow import (
CONF_SHOW_ALL_SOURCES,
CONF_ZONE2,
CONF_ZONE3,
DEFAULT_SHOW_SOURCES,
DEFAULT_TIMEOUT,
DEFAULT_ZONE2,
DEFAULT_ZONE3,
DOMAIN,
)
from .receiver import ConnectDenonAVR
CONF_RECEIVER = "receiver"
UNDO_UPDATE_LISTENER = "undo_update_listener"
SERVICE_GET_COMMAND = "get_command"
ATTR_COMMAND = "command"
_LOGGER = logging.getLogger(__name__)
CALL_SCHEMA = vol.Schema({vol.Required(ATTR_ENTITY_ID): cv.comp_entity_ids})
GET_COMMAND_SCHEMA = CALL_SCHEMA.extend({vol.Required(ATTR_COMMAND): cv.string})
SERVICE_TO_METHOD = {
SERVICE_GET_COMMAND: {"method": "get_command", "schema": GET_COMMAND_SCHEMA}
}
def setup(hass: core.HomeAssistant, config: dict):
"""Set up the denonavr platform."""
def service_handler(service):
method = SERVICE_TO_METHOD.get(service.service)
data = service.data.copy()
data["method"] = method["method"]
dispatcher_send(hass, DOMAIN, data)
for service in SERVICE_TO_METHOD:
schema = SERVICE_TO_METHOD[service]["schema"]
hass.services.register(DOMAIN, service, service_handler, schema=schema)
return True
async def async_setup_entry(
hass: core.HomeAssistant, entry: config_entries.ConfigEntry
):
"""Set up the denonavr components from a config entry."""
hass.data.setdefault(DOMAIN, {})
# Connect to receiver
connect_denonavr = ConnectDenonAVR(
hass,
entry.data[CONF_HOST],
DEFAULT_TIMEOUT,
entry.options.get(CONF_SHOW_ALL_SOURCES, DEFAULT_SHOW_SOURCES),
entry.options.get(CONF_ZONE2, DEFAULT_ZONE2),
entry.options.get(CONF_ZONE3, DEFAULT_ZONE3),
)
if not await connect_denonavr.async_connect_receiver():
raise ConfigEntryNotReady
receiver = connect_denonavr.receiver
undo_listener = entry.add_update_listener(update_listener)
hass.data[DOMAIN][entry.entry_id] = {
CONF_RECEIVER: receiver,
UNDO_UPDATE_LISTENER: undo_listener,
}
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, "media_player")
)
return True
async def async_unload_entry(
hass: core.HomeAssistant, config_entry: config_entries.ConfigEntry
):
"""Unload a config entry."""
unload_ok = await hass.config_entries.async_forward_entry_unload(
config_entry, "media_player"
)
hass.data[DOMAIN][config_entry.entry_id][UNDO_UPDATE_LISTENER]()
# Remove zone2 and zone3 entities if needed
entity_registry = await er.async_get_registry(hass)
entries = er.async_entries_for_config_entry(entity_registry, config_entry.entry_id)
zone2_id = f"{config_entry.unique_id}-Zone2"
zone3_id = f"{config_entry.unique_id}-Zone3"
for entry in entries:
if entry.unique_id == zone2_id and not config_entry.options.get(CONF_ZONE2):
entity_registry.async_remove(entry.entity_id)
_LOGGER.debug("Removing zone2 from DenonAvr")
if entry.unique_id == zone3_id and not config_entry.options.get(CONF_ZONE3):
entity_registry.async_remove(entry.entity_id)
_LOGGER.debug("Removing zone3 from DenonAvr")
if unload_ok:
hass.data[DOMAIN].pop(config_entry.entry_id)
return unload_ok
async def update_listener(
hass: core.HomeAssistant, config_entry: config_entries.ConfigEntry
):
"""Handle options update."""
await hass.config_entries.async_reload(config_entry.entry_id)
|
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import CONF_MONITORED_CONDITIONS, CONF_SCAN_INTERVAL
from homeassistant.core import callback
from . import server_id_valid
from .const import (
CONF_MANUAL,
CONF_SERVER_ID,
CONF_SERVER_NAME,
DEFAULT_NAME,
DEFAULT_SCAN_INTERVAL,
DEFAULT_SERVER,
)
from .const import DOMAIN # pylint: disable=unused-import
class SpeedTestFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle Speedtest.net config flow."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_POLL
@staticmethod
@callback
def async_get_options_flow(config_entry):
"""Get the options flow for this handler."""
return SpeedTestOptionsFlowHandler(config_entry)
async def async_step_user(self, user_input=None):
"""Handle a flow initialized by the user."""
if self._async_current_entries():
return self.async_abort(reason="single_instance_allowed")
if user_input is None:
return self.async_show_form(step_id="user")
return self.async_create_entry(title=DEFAULT_NAME, data=user_input)
async def async_step_import(self, import_config):
"""Import from config."""
if (
CONF_SERVER_ID in import_config
and not await self.hass.async_add_executor_job(
server_id_valid, import_config[CONF_SERVER_ID]
)
):
return self.async_abort(reason="wrong_server_id")
import_config[CONF_SCAN_INTERVAL] = int(
import_config[CONF_SCAN_INTERVAL].seconds / 60
)
import_config.pop(CONF_MONITORED_CONDITIONS)
return await self.async_step_user(user_input=import_config)
class SpeedTestOptionsFlowHandler(config_entries.OptionsFlow):
"""Handle SpeedTest options."""
def __init__(self, config_entry):
"""Initialize options flow."""
self.config_entry = config_entry
self._servers = {}
async def async_step_init(self, user_input=None):
"""Manage the options."""
errors = {}
if user_input is not None:
server_name = user_input[CONF_SERVER_NAME]
if server_name != "*Auto Detect":
server_id = self._servers[server_name]["id"]
user_input[CONF_SERVER_ID] = server_id
else:
user_input[CONF_SERVER_ID] = None
return self.async_create_entry(title="", data=user_input)
self._servers = self.hass.data[DOMAIN].servers
server = []
if self.config_entry.options.get(
CONF_SERVER_ID
) and not self.config_entry.options.get(CONF_SERVER_NAME):
server = [
key
for (key, value) in self._servers.items()
if value.get("id") == self.config_entry.options[CONF_SERVER_ID]
]
server_name = server[0] if server else DEFAULT_SERVER
options = {
vol.Optional(
CONF_SERVER_NAME,
default=self.config_entry.options.get(CONF_SERVER_NAME, server_name),
): vol.In(self._servers.keys()),
vol.Optional(
CONF_SCAN_INTERVAL,
default=self.config_entry.options.get(
CONF_SCAN_INTERVAL, DEFAULT_SCAN_INTERVAL
),
): int,
vol.Optional(
CONF_MANUAL, default=self.config_entry.options.get(CONF_MANUAL, False)
): bool,
}
return self.async_show_form(
step_id="init", data_schema=vol.Schema(options), errors=errors
)
|
import logging
import re
from babelfish import Language, language_converters
from guessit import guessit
from requests import Session
from . import ParserBeautifulSoup, Provider
from ..cache import SHOW_EXPIRATION_TIME, region
from ..exceptions import AuthenticationError, ConfigurationError, DownloadLimitExceeded
from ..matches import guess_matches
from ..subtitle import Subtitle, fix_line_ending
from ..utils import sanitize
from ..video import Episode
logger = logging.getLogger(__name__)
language_converters.register('addic7ed = subliminal.converters.addic7ed:Addic7edConverter')
# Series cell matching regex
show_cells_re = re.compile(b'<td class="version">.*?</td>', re.DOTALL)
#: Series header parsing regex
series_year_re = re.compile(r'^(?P<series>[ \w\'.:(),*&!?-]+?)(?: \((?P<year>\d{4})\))?$')
class Addic7edSubtitle(Subtitle):
"""Addic7ed Subtitle."""
provider_name = 'addic7ed'
def __init__(self, language, hearing_impaired, page_link, series, season, episode, title, year, version,
download_link):
super(Addic7edSubtitle, self).__init__(language, hearing_impaired=hearing_impaired, page_link=page_link)
self.series = series
self.season = season
self.episode = episode
self.title = title
self.year = year
self.version = version
self.download_link = download_link
@property
def id(self):
return self.download_link
@property
def info(self):
return '{series}{yopen}{year}{yclose} s{season:02d}e{episode:02d}{topen}{title}{tclose}{version}'.format(
series=self.series, season=self.season, episode=self.episode, title=self.title, year=self.year or '',
version=self.version, yopen=' (' if self.year else '', yclose=')' if self.year else '',
topen=' - ' if self.title else '', tclose=' - ' if self.version else ''
)
def get_matches(self, video):
# series name
matches = guess_matches(video, {
'title': self.series,
'season': self.season,
'episode': self.episode,
'episode_title': self.title,
'year': self.year,
'release_group': self.version,
})
# resolution
if video.resolution and self.version and video.resolution in self.version.lower():
matches.add('resolution')
# other properties
if self.version:
matches |= guess_matches(video, guessit(self.version, {'type': 'episode'}), partial=True)
return matches
class Addic7edProvider(Provider):
"""Addic7ed Provider."""
languages = {Language('por', 'BR')} | {Language(l) for l in [
'ara', 'aze', 'ben', 'bos', 'bul', 'cat', 'ces', 'dan', 'deu', 'ell', 'eng', 'eus', 'fas', 'fin', 'fra', 'glg',
'heb', 'hrv', 'hun', 'hye', 'ind', 'ita', 'jpn', 'kor', 'mkd', 'msa', 'nld', 'nor', 'pol', 'por', 'ron', 'rus',
'slk', 'slv', 'spa', 'sqi', 'srp', 'swe', 'tha', 'tur', 'ukr', 'vie', 'zho'
]}
video_types = (Episode,)
server_url = 'http://www.addic7ed.com/'
subtitle_class = Addic7edSubtitle
def __init__(self, username=None, password=None):
if any((username, password)) and not all((username, password)):
raise ConfigurationError('Username and password must be specified')
self.username = username
self.password = password
self.logged_in = False
self.session = None
def initialize(self):
self.session = Session()
self.session.headers['User-Agent'] = self.user_agent
# login
if self.username and self.password:
logger.info('Logging in')
data = {'username': self.username, 'password': self.password, 'Submit': 'Log in'}
r = self.session.post(self.server_url + 'dologin.php', data, allow_redirects=False, timeout=10)
if r.status_code != 302:
raise AuthenticationError(self.username)
logger.debug('Logged in')
self.logged_in = True
def terminate(self):
# logout
if self.logged_in:
logger.info('Logging out')
r = self.session.get(self.server_url + 'logout.php', timeout=10)
r.raise_for_status()
logger.debug('Logged out')
self.logged_in = False
self.session.close()
@region.cache_on_arguments(expiration_time=SHOW_EXPIRATION_TIME)
def _get_show_ids(self):
"""Get the ``dict`` of show ids per series by querying the `shows.php` page.
:return: show id per series, lower case and without quotes.
:rtype: dict
"""
# get the show page
logger.info('Getting show ids')
r = self.session.get(self.server_url + 'shows.php', timeout=10)
r.raise_for_status()
# LXML parser seems to fail when parsing Addic7ed.com HTML markup.
# Last known version to work properly is 3.6.4 (next version, 3.7.0, fails)
# Assuming the site's markup is bad, and stripping it down to only contain what's needed.
show_cells = re.findall(show_cells_re, r.content)
if show_cells:
soup = ParserBeautifulSoup(b''.join(show_cells), ['lxml', 'html.parser'])
else:
# If RegEx fails, fall back to original r.content and use 'html.parser'
soup = ParserBeautifulSoup(r.content, ['html.parser'])
# populate the show ids
show_ids = {}
for show in soup.select('td.version > h3 > a[href^="/show/"]'):
show_ids[sanitize(show.text)] = int(show['href'][6:])
logger.debug('Found %d show ids', len(show_ids))
return show_ids
@region.cache_on_arguments(expiration_time=SHOW_EXPIRATION_TIME)
def _search_show_id(self, series, year=None):
"""Search the show id from the `series` and `year`.
:param str series: series of the episode.
:param year: year of the series, if any.
:type year: int
:return: the show id, if found.
:rtype: int
"""
# addic7ed doesn't support search with quotes
series = series.replace('\'', ' ')
# build the params
series_year = '%s %d' % (series, year) if year is not None else series
params = {'search': series_year, 'Submit': 'Search'}
# make the search
logger.info('Searching show ids with %r', params)
r = self.session.get(self.server_url + 'srch.php', params=params, timeout=10)
r.raise_for_status()
soup = ParserBeautifulSoup(r.content, ['lxml', 'html.parser'])
# get the suggestion
suggestion = soup.select('span.titulo > a[href^="/show/"]')
if not suggestion:
logger.warning('Show id not found: no suggestion')
return None
if not sanitize(suggestion[0].i.text.replace('\'', ' ')) == sanitize(series_year):
logger.warning('Show id not found: suggestion does not match')
return None
show_id = int(suggestion[0]['href'][6:])
logger.debug('Found show id %d', show_id)
return show_id
def get_show_id(self, series, year=None, country_code=None):
"""Get the best matching show id for `series`, `year` and `country_code`.
First search in the result of :meth:`_get_show_ids` and fallback on a search with :meth:`_search_show_id`.
:param str series: series of the episode.
:param year: year of the series, if any.
:type year: int
:param country_code: country code of the series, if any.
:type country_code: str
:return: the show id, if found.
:rtype: int
"""
series_sanitized = sanitize(series).lower()
show_ids = self._get_show_ids()
show_id = None
# attempt with country
if not show_id and country_code:
logger.debug('Getting show id with country')
show_id = show_ids.get('%s %s' % (series_sanitized, country_code.lower()))
# attempt with year
if not show_id and year:
logger.debug('Getting show id with year')
show_id = show_ids.get('%s %d' % (series_sanitized, year))
# attempt clean
if not show_id:
logger.debug('Getting show id')
show_id = show_ids.get(series_sanitized)
# search as last resort
if not show_id:
logger.warning('Series %s not found in show ids', series)
show_id = self._search_show_id(series)
return show_id
def query(self, show_id, series, season, year=None, country=None):
# get the page of the season of the show
logger.info('Getting the page of show id %d, season %d', show_id, season)
r = self.session.get(self.server_url + 'show/%d' % show_id, params={'season': season}, timeout=10)
r.raise_for_status()
if not r.content:
# Provider returns a status of 304 Not Modified with an empty content
# raise_for_status won't raise exception for that status code
logger.debug('No data returned from provider')
return []
soup = ParserBeautifulSoup(r.content, ['lxml', 'html.parser'])
# loop over subtitle rows
match = series_year_re.match(soup.select('#header font')[0].text.strip()[:-10])
series = match.group('series')
year = int(match.group('year')) if match.group('year') else None
subtitles = []
for row in soup.select('tr.epeven'):
cells = row('td')
# ignore incomplete subtitles
status = cells[5].text
if status != 'Completed':
logger.debug('Ignoring subtitle with status %s', status)
continue
# read the item
language = Language.fromaddic7ed(cells[3].text)
hearing_impaired = bool(cells[6].text)
page_link = self.server_url + cells[2].a['href'][1:]
season = int(cells[0].text)
episode = int(cells[1].text)
title = cells[2].text
version = cells[4].text
download_link = cells[9].a['href'][1:]
subtitle = self.subtitle_class(language, hearing_impaired, page_link, series, season, episode, title, year,
version, download_link)
logger.debug('Found subtitle %r', subtitle)
subtitles.append(subtitle)
return subtitles
def list_subtitles(self, video, languages):
# lookup show_id
titles = [video.series] + video.alternative_series
show_id = None
for title in titles:
show_id = self.get_show_id(title, video.year)
if show_id is not None:
break
# query for subtitles with the show_id
if show_id is not None:
subtitles = [s for s in self.query(show_id, title, video.season, video.year)
if s.language in languages and s.episode == video.episode]
if subtitles:
return subtitles
else:
logger.error('No show id found for %r (%r)', video.series, {'year': video.year})
return []
def download_subtitle(self, subtitle):
# download the subtitle
logger.info('Downloading subtitle %r', subtitle)
r = self.session.get(self.server_url + subtitle.download_link, headers={'Referer': subtitle.page_link},
timeout=10)
r.raise_for_status()
if not r.content:
# Provider returns a status of 304 Not Modified with an empty content
# raise_for_status won't raise exception for that status code
logger.debug('Unable to download subtitle. No data returned from provider')
return
# detect download limit exceeded
if r.headers['Content-Type'] == 'text/html':
raise DownloadLimitExceeded
subtitle.content = fix_line_ending(r.content)
|
import asyncio
from collections import defaultdict
import logging
import async_timeout
from rflink.protocol import create_rflink_connection
from serial import SerialException
import voluptuous as vol
from homeassistant.const import (
ATTR_ENTITY_ID,
CONF_COMMAND,
CONF_HOST,
CONF_PORT,
EVENT_HOMEASSISTANT_STOP,
STATE_ON,
)
from homeassistant.core import CoreState, callback
from homeassistant.exceptions import HomeAssistantError
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.restore_state import RestoreEntity
_LOGGER = logging.getLogger(__name__)
ATTR_EVENT = "event"
ATTR_STATE = "state"
CONF_ALIASES = "aliases"
CONF_GROUP_ALIASES = "group_aliases"
CONF_GROUP = "group"
CONF_NOGROUP_ALIASES = "nogroup_aliases"
CONF_DEVICE_DEFAULTS = "device_defaults"
CONF_DEVICE_ID = "device_id"
CONF_DEVICES = "devices"
CONF_AUTOMATIC_ADD = "automatic_add"
CONF_FIRE_EVENT = "fire_event"
CONF_IGNORE_DEVICES = "ignore_devices"
CONF_RECONNECT_INTERVAL = "reconnect_interval"
CONF_SIGNAL_REPETITIONS = "signal_repetitions"
CONF_WAIT_FOR_ACK = "wait_for_ack"
DATA_DEVICE_REGISTER = "rflink_device_register"
DATA_ENTITY_LOOKUP = "rflink_entity_lookup"
DATA_ENTITY_GROUP_LOOKUP = "rflink_entity_group_only_lookup"
DEFAULT_RECONNECT_INTERVAL = 10
DEFAULT_SIGNAL_REPETITIONS = 1
CONNECTION_TIMEOUT = 10
EVENT_BUTTON_PRESSED = "button_pressed"
EVENT_KEY_COMMAND = "command"
EVENT_KEY_ID = "id"
EVENT_KEY_SENSOR = "sensor"
EVENT_KEY_UNIT = "unit"
RFLINK_GROUP_COMMANDS = ["allon", "alloff"]
DOMAIN = "rflink"
SERVICE_SEND_COMMAND = "send_command"
SIGNAL_AVAILABILITY = "rflink_device_available"
SIGNAL_HANDLE_EVENT = "rflink_handle_event_{}"
TMP_ENTITY = "tmp.{}"
DEVICE_DEFAULTS_SCHEMA = vol.Schema(
{
vol.Optional(CONF_FIRE_EVENT, default=False): cv.boolean,
vol.Optional(
CONF_SIGNAL_REPETITIONS, default=DEFAULT_SIGNAL_REPETITIONS
): vol.Coerce(int),
}
)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_PORT): vol.Any(cv.port, cv.string),
vol.Optional(CONF_HOST): cv.string,
vol.Optional(CONF_WAIT_FOR_ACK, default=True): cv.boolean,
vol.Optional(
CONF_RECONNECT_INTERVAL, default=DEFAULT_RECONNECT_INTERVAL
): int,
vol.Optional(CONF_IGNORE_DEVICES, default=[]): vol.All(
cv.ensure_list, [cv.string]
),
}
)
},
extra=vol.ALLOW_EXTRA,
)
SEND_COMMAND_SCHEMA = vol.Schema(
{vol.Required(CONF_DEVICE_ID): cv.string, vol.Required(CONF_COMMAND): cv.string}
)
def identify_event_type(event):
"""Look at event to determine type of device.
Async friendly.
"""
if EVENT_KEY_COMMAND in event:
return EVENT_KEY_COMMAND
if EVENT_KEY_SENSOR in event:
return EVENT_KEY_SENSOR
return "unknown"
async def async_setup(hass, config):
"""Set up the Rflink component."""
# Allow entities to register themselves by device_id to be looked up when
# new rflink events arrive to be handled
hass.data[DATA_ENTITY_LOOKUP] = {
EVENT_KEY_COMMAND: defaultdict(list),
EVENT_KEY_SENSOR: defaultdict(list),
}
hass.data[DATA_ENTITY_GROUP_LOOKUP] = {EVENT_KEY_COMMAND: defaultdict(list)}
# Allow platform to specify function to register new unknown devices
hass.data[DATA_DEVICE_REGISTER] = {}
async def async_send_command(call):
"""Send Rflink command."""
_LOGGER.debug("Rflink command for %s", str(call.data))
if not (
await RflinkCommand.send_command(
call.data.get(CONF_DEVICE_ID), call.data.get(CONF_COMMAND)
)
):
_LOGGER.error("Failed Rflink command for %s", str(call.data))
hass.services.async_register(
DOMAIN, SERVICE_SEND_COMMAND, async_send_command, schema=SEND_COMMAND_SCHEMA
)
@callback
def event_callback(event):
"""Handle incoming Rflink events.
Rflink events arrive as dictionaries of varying content
depending on their type. Identify the events and distribute
accordingly.
"""
event_type = identify_event_type(event)
_LOGGER.debug("event of type %s: %s", event_type, event)
# Don't propagate non entity events (eg: version string, ack response)
if event_type not in hass.data[DATA_ENTITY_LOOKUP]:
_LOGGER.debug("unhandled event of type: %s", event_type)
return
# Lookup entities who registered this device id as device id or alias
event_id = event.get(EVENT_KEY_ID)
is_group_event = (
event_type == EVENT_KEY_COMMAND
and event[EVENT_KEY_COMMAND] in RFLINK_GROUP_COMMANDS
)
if is_group_event:
entity_ids = hass.data[DATA_ENTITY_GROUP_LOOKUP][event_type].get(
event_id, []
)
else:
entity_ids = hass.data[DATA_ENTITY_LOOKUP][event_type][event_id]
_LOGGER.debug("entity_ids: %s", entity_ids)
if entity_ids:
# Propagate event to every entity matching the device id
for entity in entity_ids:
_LOGGER.debug("passing event to %s", entity)
async_dispatcher_send(hass, SIGNAL_HANDLE_EVENT.format(entity), event)
elif not is_group_event:
# If device is not yet known, register with platform (if loaded)
if event_type in hass.data[DATA_DEVICE_REGISTER]:
_LOGGER.debug("device_id not known, adding new device")
# Add bogus event_id first to avoid race if we get another
# event before the device is created
# Any additional events received before the device has been
# created will thus be ignored.
hass.data[DATA_ENTITY_LOOKUP][event_type][event_id].append(
TMP_ENTITY.format(event_id)
)
hass.async_create_task(
hass.data[DATA_DEVICE_REGISTER][event_type](event)
)
else:
_LOGGER.debug("device_id not known and automatic add disabled")
# When connecting to tcp host instead of serial port (optional)
host = config[DOMAIN].get(CONF_HOST)
# TCP port when host configured, otherwise serial port
port = config[DOMAIN][CONF_PORT]
@callback
def reconnect(exc=None):
"""Schedule reconnect after connection has been unexpectedly lost."""
# Reset protocol binding before starting reconnect
RflinkCommand.set_rflink_protocol(None)
async_dispatcher_send(hass, SIGNAL_AVAILABILITY, False)
# If HA is not stopping, initiate new connection
if hass.state != CoreState.stopping:
_LOGGER.warning("disconnected from Rflink, reconnecting")
hass.async_create_task(connect())
async def connect():
"""Set up connection and hook it into HA for reconnect/shutdown."""
_LOGGER.info("Initiating Rflink connection")
# Rflink create_rflink_connection decides based on the value of host
# (string or None) if serial or tcp mode should be used
# Initiate serial/tcp connection to Rflink gateway
connection = create_rflink_connection(
port=port,
host=host,
event_callback=event_callback,
disconnect_callback=reconnect,
loop=hass.loop,
ignore=config[DOMAIN][CONF_IGNORE_DEVICES],
)
try:
with async_timeout.timeout(CONNECTION_TIMEOUT):
transport, protocol = await connection
except (
SerialException,
ConnectionRefusedError,
TimeoutError,
OSError,
asyncio.TimeoutError,
) as exc:
reconnect_interval = config[DOMAIN][CONF_RECONNECT_INTERVAL]
_LOGGER.exception(
"Error connecting to Rflink, reconnecting in %s", reconnect_interval
)
# Connection to Rflink device is lost, make entities unavailable
async_dispatcher_send(hass, SIGNAL_AVAILABILITY, False)
hass.loop.call_later(reconnect_interval, reconnect, exc)
return
# There is a valid connection to a Rflink device now so
# mark entities as available
async_dispatcher_send(hass, SIGNAL_AVAILABILITY, True)
# Bind protocol to command class to allow entities to send commands
RflinkCommand.set_rflink_protocol(protocol, config[DOMAIN][CONF_WAIT_FOR_ACK])
# handle shutdown of Rflink asyncio transport
hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_STOP, lambda x: transport.close()
)
_LOGGER.info("Connected to Rflink")
hass.async_create_task(connect())
return True
class RflinkDevice(Entity):
"""Representation of a Rflink device.
Contains the common logic for Rflink entities.
"""
platform = None
_state = None
_available = True
def __init__(
self,
device_id,
initial_event=None,
name=None,
aliases=None,
group=True,
group_aliases=None,
nogroup_aliases=None,
fire_event=False,
signal_repetitions=DEFAULT_SIGNAL_REPETITIONS,
):
"""Initialize the device."""
# Rflink specific attributes for every component type
self._initial_event = initial_event
self._device_id = device_id
if name:
self._name = name
else:
self._name = device_id
self._aliases = aliases
self._group = group
self._group_aliases = group_aliases
self._nogroup_aliases = nogroup_aliases
self._should_fire_event = fire_event
self._signal_repetitions = signal_repetitions
@callback
def handle_event_callback(self, event):
"""Handle incoming event for device type."""
# Call platform specific event handler
self._handle_event(event)
# Propagate changes through ha
self.async_write_ha_state()
# Put command onto bus for user to subscribe to
if self._should_fire_event and identify_event_type(event) == EVENT_KEY_COMMAND:
self.hass.bus.async_fire(
EVENT_BUTTON_PRESSED,
{ATTR_ENTITY_ID: self.entity_id, ATTR_STATE: event[EVENT_KEY_COMMAND]},
)
_LOGGER.debug(
"Fired bus event for %s: %s", self.entity_id, event[EVENT_KEY_COMMAND]
)
def _handle_event(self, event):
"""Platform specific event handler."""
raise NotImplementedError()
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def name(self):
"""Return a name for the device."""
return self._name
@property
def is_on(self):
"""Return true if device is on."""
if self.assumed_state:
return False
return self._state
@property
def assumed_state(self):
"""Assume device state until first device event sets state."""
return self._state is None
@property
def available(self):
"""Return True if entity is available."""
return self._available
@callback
def _availability_callback(self, availability):
"""Update availability state."""
self._available = availability
self.async_write_ha_state()
async def async_added_to_hass(self):
"""Register update callback."""
await super().async_added_to_hass()
# Remove temporary bogus entity_id if added
tmp_entity = TMP_ENTITY.format(self._device_id)
if (
tmp_entity
in self.hass.data[DATA_ENTITY_LOOKUP][EVENT_KEY_COMMAND][self._device_id]
):
self.hass.data[DATA_ENTITY_LOOKUP][EVENT_KEY_COMMAND][
self._device_id
].remove(tmp_entity)
# Register id and aliases
self.hass.data[DATA_ENTITY_LOOKUP][EVENT_KEY_COMMAND][self._device_id].append(
self.entity_id
)
if self._group:
self.hass.data[DATA_ENTITY_GROUP_LOOKUP][EVENT_KEY_COMMAND][
self._device_id
].append(self.entity_id)
# aliases respond to both normal and group commands (allon/alloff)
if self._aliases:
for _id in self._aliases:
self.hass.data[DATA_ENTITY_LOOKUP][EVENT_KEY_COMMAND][_id].append(
self.entity_id
)
self.hass.data[DATA_ENTITY_GROUP_LOOKUP][EVENT_KEY_COMMAND][_id].append(
self.entity_id
)
# group_aliases only respond to group commands (allon/alloff)
if self._group_aliases:
for _id in self._group_aliases:
self.hass.data[DATA_ENTITY_GROUP_LOOKUP][EVENT_KEY_COMMAND][_id].append(
self.entity_id
)
# nogroup_aliases only respond to normal commands
if self._nogroup_aliases:
for _id in self._nogroup_aliases:
self.hass.data[DATA_ENTITY_LOOKUP][EVENT_KEY_COMMAND][_id].append(
self.entity_id
)
self.async_on_remove(
async_dispatcher_connect(
self.hass, SIGNAL_AVAILABILITY, self._availability_callback
)
)
self.async_on_remove(
async_dispatcher_connect(
self.hass,
SIGNAL_HANDLE_EVENT.format(self.entity_id),
self.handle_event_callback,
)
)
# Process the initial event now that the entity is created
if self._initial_event:
self.handle_event_callback(self._initial_event)
class RflinkCommand(RflinkDevice):
"""Singleton class to make Rflink command interface available to entities.
This class is to be inherited by every Entity class that is actionable
(switches/lights). It exposes the Rflink command interface for these
entities.
The Rflink interface is managed as a class level and set during setup (and
reset on reconnect).
"""
# Keep repetition tasks to cancel if state is changed before repetitions
# are sent
_repetition_task = None
_protocol = None
@classmethod
def set_rflink_protocol(cls, protocol, wait_ack=None):
"""Set the Rflink asyncio protocol as a class variable."""
cls._protocol = protocol
if wait_ack is not None:
cls._wait_ack = wait_ack
@classmethod
def is_connected(cls):
"""Return connection status."""
return bool(cls._protocol)
@classmethod
async def send_command(cls, device_id, action):
"""Send device command to Rflink and wait for acknowledgement."""
return await cls._protocol.send_command_ack(device_id, action)
async def _async_handle_command(self, command, *args):
"""Do bookkeeping for command, send it to rflink and update state."""
self.cancel_queued_send_commands()
if command == "turn_on":
cmd = "on"
self._state = True
elif command == "turn_off":
cmd = "off"
self._state = False
elif command == "dim":
# convert brightness to rflink dim level
cmd = str(int(args[0] / 17))
self._state = True
elif command == "toggle":
cmd = "on"
# if the state is unknown or false, it gets set as true
# if the state is true, it gets set as false
self._state = self._state in [None, False]
# Cover options for RFlink
elif command == "close_cover":
cmd = "DOWN"
self._state = False
elif command == "open_cover":
cmd = "UP"
self._state = True
elif command == "stop_cover":
cmd = "STOP"
self._state = True
# Send initial command and queue repetitions.
# This allows the entity state to be updated quickly and not having to
# wait for all repetitions to be sent
await self._async_send_command(cmd, self._signal_repetitions)
# Update state of entity
self.async_write_ha_state()
def cancel_queued_send_commands(self):
"""Cancel queued signal repetition commands.
For example when user changed state while repetitions are still
queued for broadcast. Or when an incoming Rflink command (remote
switch) changes the state.
"""
# cancel any outstanding tasks from the previous state change
if self._repetition_task:
self._repetition_task.cancel()
async def _async_send_command(self, cmd, repetitions):
"""Send a command for device to Rflink gateway."""
_LOGGER.debug("Sending command: %s to Rflink device: %s", cmd, self._device_id)
if not self.is_connected():
raise HomeAssistantError("Cannot send command, not connected!")
if self._wait_ack:
# Puts command on outgoing buffer then waits for Rflink to confirm
# the command has been send out in the ether.
await self._protocol.send_command_ack(self._device_id, cmd)
else:
# Puts command on outgoing buffer and returns straight away.
# Rflink protocol/transport handles asynchronous writing of buffer
# to serial/tcp device. Does not wait for command send
# confirmation.
self._protocol.send_command(self._device_id, cmd)
if repetitions > 1:
self._repetition_task = self.hass.async_create_task(
self._async_send_command(cmd, repetitions - 1)
)
class SwitchableRflinkDevice(RflinkCommand, RestoreEntity):
"""Rflink entity which can switch on/off (eg: light, switch)."""
async def async_added_to_hass(self):
"""Restore RFLink device state (ON/OFF)."""
await super().async_added_to_hass()
old_state = await self.async_get_last_state()
if old_state is not None:
self._state = old_state.state == STATE_ON
def _handle_event(self, event):
"""Adjust state if Rflink picks up a remote command for this device."""
self.cancel_queued_send_commands()
command = event["command"]
if command in ["on", "allon"]:
self._state = True
elif command in ["off", "alloff"]:
self._state = False
async def async_turn_on(self, **kwargs):
"""Turn the device on."""
await self._async_handle_command("turn_on")
async def async_turn_off(self, **kwargs):
"""Turn the device off."""
await self._async_handle_command("turn_off")
|
from mock import sentinel, call, Mock
from arctic.auth import get_auth
from arctic.hooks import register_get_auth_hook, register_log_exception_hook, \
register_resolve_mongodb_hook, get_mongodb_uri, log_exception
def test_log_exception_hook():
logger = Mock()
register_log_exception_hook(logger)
log_exception(sentinel.fn, sentinel.e, sentinel.r)
assert logger.call_args_list == [call(sentinel.fn, sentinel.e, sentinel.r)]
def test_get_mongodb_uri_hook():
resolver = Mock()
resolver.return_value = sentinel.result
register_resolve_mongodb_hook(resolver)
assert get_mongodb_uri(sentinel.host) == sentinel.result
assert resolver.call_args_list == [call(sentinel.host)]
def test_get_auth_hook():
auth_resolver = Mock()
register_get_auth_hook(auth_resolver)
get_auth(sentinel.host, sentinel.app_name, sentinel.database_name)
assert auth_resolver.call_args_list == [call(sentinel.host, sentinel.app_name, sentinel.database_name)]
|
import logging
import voluptuous as vol
from homeassistant.components import light
from homeassistant.components.mqtt import ATTR_DISCOVERY_HASH
from homeassistant.components.mqtt.discovery import (
MQTT_DISCOVERY_NEW,
clear_discovery_hash,
)
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.reload import async_setup_reload_service
from homeassistant.helpers.typing import ConfigType, HomeAssistantType
from .. import DOMAIN, PLATFORMS
from .schema import CONF_SCHEMA, MQTT_LIGHT_SCHEMA_SCHEMA
from .schema_basic import PLATFORM_SCHEMA_BASIC, async_setup_entity_basic
from .schema_json import PLATFORM_SCHEMA_JSON, async_setup_entity_json
from .schema_template import PLATFORM_SCHEMA_TEMPLATE, async_setup_entity_template
_LOGGER = logging.getLogger(__name__)
def validate_mqtt_light(value):
"""Validate MQTT light schema."""
schemas = {
"basic": PLATFORM_SCHEMA_BASIC,
"json": PLATFORM_SCHEMA_JSON,
"template": PLATFORM_SCHEMA_TEMPLATE,
}
return schemas[value[CONF_SCHEMA]](value)
PLATFORM_SCHEMA = vol.All(
MQTT_LIGHT_SCHEMA_SCHEMA.extend({}, extra=vol.ALLOW_EXTRA), validate_mqtt_light
)
async def async_setup_platform(
hass: HomeAssistantType, config: ConfigType, async_add_entities, discovery_info=None
):
"""Set up MQTT light through configuration.yaml."""
await async_setup_reload_service(hass, DOMAIN, PLATFORMS)
await _async_setup_entity(hass, config, async_add_entities)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up MQTT light dynamically through MQTT discovery."""
async def async_discover(discovery_payload):
"""Discover and add a MQTT light."""
discovery_data = discovery_payload.discovery_data
try:
config = PLATFORM_SCHEMA(discovery_payload)
await _async_setup_entity(
hass, config, async_add_entities, config_entry, discovery_data
)
except Exception:
clear_discovery_hash(hass, discovery_data[ATTR_DISCOVERY_HASH])
raise
async_dispatcher_connect(
hass, MQTT_DISCOVERY_NEW.format(light.DOMAIN, "mqtt"), async_discover
)
async def _async_setup_entity(
hass, config, async_add_entities, config_entry=None, discovery_data=None
):
"""Set up a MQTT Light."""
setup_entity = {
"basic": async_setup_entity_basic,
"json": async_setup_entity_json,
"template": async_setup_entity_template,
}
await setup_entity[config[CONF_SCHEMA]](
hass, config, async_add_entities, config_entry, discovery_data
)
|
from crispy_forms.helper import FormHelper
from django import forms
from django.utils.translation import gettext_lazy as _
from weblate.accounts.forms import EmailField
from weblate.wladmin.models import BackupService
class ActivateForm(forms.Form):
secret = forms.CharField(
label=_("Activation token"),
required=True,
max_length=400,
help_text=_(
"Please enter the activation token obtained when making the subscription."
),
)
class SSHAddForm(forms.Form):
host = forms.CharField(label=_("Hostname"), required=True, max_length=400)
port = forms.IntegerField(
label=_("Port"), required=False, min_value=1, max_value=65535
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper = FormHelper(self)
self.helper.form_tag = False
self.helper.form_class = "form-inline"
self.helper.field_template = "bootstrap3/layout/inline_field.html"
class TestMailForm(forms.Form):
email = EmailField(
required=True,
label=_("E-mail"),
help_text=_("The test e-mail will be sent to this address."),
)
class BackupForm(forms.ModelForm):
class Meta:
model = BackupService
fields = ("repository",)
class UserSearchForm(forms.Form):
email = forms.CharField(label=_("User e-mail"))
class FontField(forms.CharField):
def __init__(self, **kwargs):
super().__init__(
help_text=_("Please provide font family suitable for CSS."), **kwargs
)
class ColorField(forms.CharField):
def __init__(self, **kwargs):
super().__init__(widget=forms.TextInput(attrs={"type": "color"}), **kwargs)
class AppearanceForm(forms.Form):
page_font = FontField(label=_("Page font"), required=False)
brand_font = FontField(label=_("Header font"), required=False)
header_color = ColorField(
label=("Navigation color"), required=False, initial="#2a3744"
)
navi_color = ColorField(
label=("Navigation color"), required=False, initial="#1fa385"
)
focus_color = ColorField(label=_("Focus color"), required=False, initial="#2eccaa")
hover_color = ColorField(label=_("Hover color"), required=False, initial="#144d3f")
hide_footer = forms.BooleanField(label=_("Hide page footer"), required=False)
enforce_hamburger = forms.BooleanField(
label=_("Always show hamburger menu"),
required=False,
help_text=_(
"Persistent navigational drop-down menu in the top right corner, "
"even if there is room for a full menu."
),
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper = FormHelper(self)
self.helper.form_tag = False
|
from contextvars import ContextVar
from ipaddress import ip_network
import logging
import os
import ssl
from traceback import extract_stack
from typing import Dict, Optional, cast
from aiohttp import web
from aiohttp.web_exceptions import HTTPMovedPermanently
import voluptuous as vol
from homeassistant.const import (
EVENT_HOMEASSISTANT_START,
EVENT_HOMEASSISTANT_STOP,
SERVER_PORT,
)
from homeassistant.core import Event, HomeAssistant
from homeassistant.helpers import storage
import homeassistant.helpers.config_validation as cv
from homeassistant.loader import bind_hass
from homeassistant.setup import ATTR_COMPONENT, EVENT_COMPONENT_LOADED
import homeassistant.util as hass_util
from homeassistant.util import ssl as ssl_util
from .auth import setup_auth
from .ban import setup_bans
from .const import KEY_AUTHENTICATED, KEY_HASS, KEY_HASS_USER # noqa: F401
from .cors import setup_cors
from .forwarded import async_setup_forwarded
from .request_context import setup_request_context
from .static import CACHE_HEADERS, CachingStaticResource
from .view import HomeAssistantView # noqa: F401
from .web_runner import HomeAssistantTCPSite
# mypy: allow-untyped-defs, no-check-untyped-defs
DOMAIN = "http"
CONF_SERVER_HOST = "server_host"
CONF_SERVER_PORT = "server_port"
CONF_BASE_URL = "base_url"
CONF_SSL_CERTIFICATE = "ssl_certificate"
CONF_SSL_PEER_CERTIFICATE = "ssl_peer_certificate"
CONF_SSL_KEY = "ssl_key"
CONF_CORS_ORIGINS = "cors_allowed_origins"
CONF_USE_X_FORWARDED_FOR = "use_x_forwarded_for"
CONF_TRUSTED_PROXIES = "trusted_proxies"
CONF_LOGIN_ATTEMPTS_THRESHOLD = "login_attempts_threshold"
CONF_IP_BAN_ENABLED = "ip_ban_enabled"
CONF_SSL_PROFILE = "ssl_profile"
SSL_MODERN = "modern"
SSL_INTERMEDIATE = "intermediate"
_LOGGER = logging.getLogger(__name__)
DEFAULT_DEVELOPMENT = "0"
# To be able to load custom cards.
DEFAULT_CORS = "https://cast.home-assistant.io"
NO_LOGIN_ATTEMPT_THRESHOLD = -1
MAX_CLIENT_SIZE: int = 1024 ** 2 * 16
STORAGE_KEY = DOMAIN
STORAGE_VERSION = 1
HTTP_SCHEMA = vol.All(
cv.deprecated(CONF_BASE_URL),
vol.Schema(
{
vol.Optional(CONF_SERVER_HOST): vol.All(
cv.ensure_list, vol.Length(min=1), [cv.string]
),
vol.Optional(CONF_SERVER_PORT, default=SERVER_PORT): cv.port,
vol.Optional(CONF_BASE_URL): cv.string,
vol.Optional(CONF_SSL_CERTIFICATE): cv.isfile,
vol.Optional(CONF_SSL_PEER_CERTIFICATE): cv.isfile,
vol.Optional(CONF_SSL_KEY): cv.isfile,
vol.Optional(CONF_CORS_ORIGINS, default=[DEFAULT_CORS]): vol.All(
cv.ensure_list, [cv.string]
),
vol.Inclusive(CONF_USE_X_FORWARDED_FOR, "proxy"): cv.boolean,
vol.Inclusive(CONF_TRUSTED_PROXIES, "proxy"): vol.All(
cv.ensure_list, [ip_network]
),
vol.Optional(
CONF_LOGIN_ATTEMPTS_THRESHOLD, default=NO_LOGIN_ATTEMPT_THRESHOLD
): vol.Any(cv.positive_int, NO_LOGIN_ATTEMPT_THRESHOLD),
vol.Optional(CONF_IP_BAN_ENABLED, default=True): cv.boolean,
vol.Optional(CONF_SSL_PROFILE, default=SSL_MODERN): vol.In(
[SSL_INTERMEDIATE, SSL_MODERN]
),
}
),
)
CONFIG_SCHEMA = vol.Schema({DOMAIN: HTTP_SCHEMA}, extra=vol.ALLOW_EXTRA)
@bind_hass
async def async_get_last_config(hass: HomeAssistant) -> Optional[dict]:
"""Return the last known working config."""
store = storage.Store(hass, STORAGE_VERSION, STORAGE_KEY)
return cast(Optional[dict], await store.async_load())
class ApiConfig:
"""Configuration settings for API server."""
def __init__(
self,
local_ip: str,
host: str,
port: Optional[int] = SERVER_PORT,
use_ssl: bool = False,
) -> None:
"""Initialize a new API config object."""
self.local_ip = local_ip
self.host = host
self.port = port
self.use_ssl = use_ssl
host = host.rstrip("/")
if host.startswith(("http://", "https://")):
self.deprecated_base_url = host
elif use_ssl:
self.deprecated_base_url = f"https://{host}"
else:
self.deprecated_base_url = f"http://{host}"
if port is not None:
self.deprecated_base_url += f":{port}"
@property
def base_url(self) -> str:
"""Proxy property to find caller of this deprecated property."""
found_frame = None
for frame in reversed(extract_stack()[:-1]):
for path in ("custom_components/", "homeassistant/components/"):
try:
index = frame.filename.index(path)
# Skip webhook from the stack
if frame.filename[index:].startswith(
"homeassistant/components/webhook/"
):
continue
found_frame = frame
break
except ValueError:
continue
if found_frame is not None:
break
# Did not source from an integration? Hard error.
if found_frame is None:
raise RuntimeError(
"Detected use of deprecated `base_url` property in the Home Assistant core. Please report this issue."
)
# If a frame was found, it originated from an integration
if found_frame:
start = index + len(path)
end = found_frame.filename.index("/", start)
integration = found_frame.filename[start:end]
if path == "custom_components/":
extra = " to the custom component author"
else:
extra = ""
_LOGGER.warning(
"Detected use of deprecated `base_url` property, use `homeassistant.helpers.network.get_url` method instead. Please report issue%s for %s using this method at %s, line %s: %s",
extra,
integration,
found_frame.filename[index:],
found_frame.lineno,
found_frame.line.strip(),
)
return self.deprecated_base_url
async def async_setup(hass, config):
"""Set up the HTTP API and debug interface."""
conf = config.get(DOMAIN)
if conf is None:
conf = HTTP_SCHEMA({})
server_host = conf.get(CONF_SERVER_HOST)
server_port = conf[CONF_SERVER_PORT]
ssl_certificate = conf.get(CONF_SSL_CERTIFICATE)
ssl_peer_certificate = conf.get(CONF_SSL_PEER_CERTIFICATE)
ssl_key = conf.get(CONF_SSL_KEY)
cors_origins = conf[CONF_CORS_ORIGINS]
use_x_forwarded_for = conf.get(CONF_USE_X_FORWARDED_FOR, False)
trusted_proxies = conf.get(CONF_TRUSTED_PROXIES, [])
is_ban_enabled = conf[CONF_IP_BAN_ENABLED]
login_threshold = conf[CONF_LOGIN_ATTEMPTS_THRESHOLD]
ssl_profile = conf[CONF_SSL_PROFILE]
server = HomeAssistantHTTP(
hass,
server_host=server_host,
server_port=server_port,
ssl_certificate=ssl_certificate,
ssl_peer_certificate=ssl_peer_certificate,
ssl_key=ssl_key,
cors_origins=cors_origins,
use_x_forwarded_for=use_x_forwarded_for,
trusted_proxies=trusted_proxies,
login_threshold=login_threshold,
is_ban_enabled=is_ban_enabled,
ssl_profile=ssl_profile,
)
startup_listeners = []
async def stop_server(event: Event) -> None:
"""Stop the server."""
await server.stop()
async def start_server(event: Event) -> None:
"""Start the server."""
for listener in startup_listeners:
listener()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, stop_server)
await start_http_server_and_save_config(hass, dict(conf), server)
async def async_wait_frontend_load(event: Event) -> None:
"""Wait for the frontend to load."""
if event.data[ATTR_COMPONENT] != "frontend":
return
await start_server(event)
startup_listeners.append(
hass.bus.async_listen(EVENT_COMPONENT_LOADED, async_wait_frontend_load)
)
startup_listeners.append(
hass.bus.async_listen(EVENT_HOMEASSISTANT_START, start_server)
)
hass.http = server
host = conf.get(CONF_BASE_URL)
local_ip = await hass.async_add_executor_job(hass_util.get_local_ip)
if host:
port = None
elif server_host is not None:
# Assume the first server host name provided as API host
host = server_host[0]
port = server_port
else:
host = local_ip
port = server_port
hass.config.api = ApiConfig(local_ip, host, port, ssl_certificate is not None)
return True
class HomeAssistantHTTP:
"""HTTP server for Home Assistant."""
def __init__(
self,
hass,
ssl_certificate,
ssl_peer_certificate,
ssl_key,
server_host,
server_port,
cors_origins,
use_x_forwarded_for,
trusted_proxies,
login_threshold,
is_ban_enabled,
ssl_profile,
):
"""Initialize the HTTP Home Assistant server."""
app = self.app = web.Application(
middlewares=[], client_max_size=MAX_CLIENT_SIZE
)
app[KEY_HASS] = hass
# Order matters, forwarded middleware needs to go first.
# Only register middleware if `use_x_forwarded_for` is enabled
# and trusted proxies are provided
if use_x_forwarded_for and trusted_proxies:
async_setup_forwarded(app, trusted_proxies)
setup_request_context(app, current_request)
if is_ban_enabled:
setup_bans(hass, app, login_threshold)
setup_auth(hass, app)
setup_cors(app, cors_origins)
self.hass = hass
self.ssl_certificate = ssl_certificate
self.ssl_peer_certificate = ssl_peer_certificate
self.ssl_key = ssl_key
self.server_host = server_host
self.server_port = server_port
self.trusted_proxies = trusted_proxies
self.is_ban_enabled = is_ban_enabled
self.ssl_profile = ssl_profile
self._handler = None
self.runner = None
self.site = None
def register_view(self, view):
"""Register a view with the WSGI server.
The view argument must be a class that inherits from HomeAssistantView.
It is optional to instantiate it before registering; this method will
handle it either way.
"""
if isinstance(view, type):
# Instantiate the view, if needed
view = view()
if not hasattr(view, "url"):
class_name = view.__class__.__name__
raise AttributeError(f'{class_name} missing required attribute "url"')
if not hasattr(view, "name"):
class_name = view.__class__.__name__
raise AttributeError(f'{class_name} missing required attribute "name"')
view.register(self.app, self.app.router)
def register_redirect(self, url, redirect_to, *, redirect_exc=HTTPMovedPermanently):
"""Register a redirect with the server.
If given this must be either a string or callable. In case of a
callable it's called with the url adapter that triggered the match and
the values of the URL as keyword arguments and has to return the target
for the redirect, otherwise it has to be a string with placeholders in
rule syntax.
"""
async def redirect(request):
"""Redirect to location."""
raise redirect_exc(redirect_to)
self.app.router.add_route("GET", url, redirect)
def register_static_path(self, url_path, path, cache_headers=True):
"""Register a folder or file to serve as a static path."""
if os.path.isdir(path):
if cache_headers:
resource = CachingStaticResource
else:
resource = web.StaticResource
self.app.router.register_resource(resource(url_path, path))
return
if cache_headers:
async def serve_file(request):
"""Serve file from disk."""
return web.FileResponse(path, headers=CACHE_HEADERS)
else:
async def serve_file(request):
"""Serve file from disk."""
return web.FileResponse(path)
self.app.router.add_route("GET", url_path, serve_file)
async def start(self):
"""Start the aiohttp server."""
if self.ssl_certificate:
try:
if self.ssl_profile == SSL_INTERMEDIATE:
context = ssl_util.server_context_intermediate()
else:
context = ssl_util.server_context_modern()
await self.hass.async_add_executor_job(
context.load_cert_chain, self.ssl_certificate, self.ssl_key
)
except OSError as error:
_LOGGER.error(
"Could not read SSL certificate from %s: %s",
self.ssl_certificate,
error,
)
return
if self.ssl_peer_certificate:
context.verify_mode = ssl.CERT_REQUIRED
await self.hass.async_add_executor_job(
context.load_verify_locations, self.ssl_peer_certificate
)
else:
context = None
# Aiohttp freezes apps after start so that no changes can be made.
# However in Home Assistant components can be discovered after boot.
# This will now raise a RunTimeError.
# To work around this we now prevent the router from getting frozen
# pylint: disable=protected-access
self.app._router.freeze = lambda: None
self.runner = web.AppRunner(self.app)
await self.runner.setup()
self.site = HomeAssistantTCPSite(
self.runner, self.server_host, self.server_port, ssl_context=context
)
try:
await self.site.start()
except OSError as error:
_LOGGER.error(
"Failed to create HTTP server at port %d: %s", self.server_port, error
)
async def stop(self):
"""Stop the aiohttp server."""
await self.site.stop()
await self.runner.cleanup()
async def start_http_server_and_save_config(
hass: HomeAssistant, conf: Dict, server: HomeAssistantHTTP
) -> None:
"""Startup the http server and save the config."""
await server.start() # type: ignore
# If we are set up successful, we store the HTTP settings for safe mode.
store = storage.Store(hass, STORAGE_VERSION, STORAGE_KEY)
if CONF_TRUSTED_PROXIES in conf:
conf[CONF_TRUSTED_PROXIES] = [
str(ip.network_address) for ip in conf[CONF_TRUSTED_PROXIES]
]
await store.async_save(conf)
current_request: ContextVar[Optional[web.Request]] = ContextVar(
"current_request", default=None
)
|
from rest_framework import serializers
from shop.serializers.bases import ProductSerializer
class ProductSummarySerializer(ProductSerializer):
"""
Default serializer to create a summary from our Product model. This summary then is used to
render various list views, such as the catalog-, the cart-, and the list of ordered items.
In case the Product model is polymorphic, this shall serialize the smallest common denominator
of all product information.
"""
media = serializers.SerializerMethodField(
help_text="Returns a rendered HTML snippet containing a sample image among other elements",
)
caption = serializers.SerializerMethodField(
help_text="Returns the content from caption field if available",
)
class Meta(ProductSerializer.Meta):
fields = ['id', 'product_name', 'product_url', 'product_model', 'price', 'media', 'caption']
def get_media(self, product):
return self.render_html(product, 'media')
def get_caption(self, product):
return getattr(product, 'caption', None)
|
import filelock
import os
from chainer.dataset import download
from chainercv import utils
root = 'pfnet/chainercv/coco'
img_urls = {
'2014': {
'train': 'http://msvocds.blob.core.windows.net/coco2014/train2014.zip',
'val': 'http://msvocds.blob.core.windows.net/coco2014/val2014.zip'
},
'2017': {
'train': 'http://images.cocodataset.org/zips/train2017.zip',
'val': 'http://images.cocodataset.org/zips/val2017.zip'
}
}
instances_anno_urls = {
'2014': {
'train': 'http://msvocds.blob.core.windows.net/annotations-1-0-3/'
'instances_train-val2014.zip',
'val': 'http://msvocds.blob.core.windows.net/annotations-1-0-3/'
'instances_train-val2014.zip',
'valminusminival': 'https://dl.dropboxusercontent.com/s/'
's3tw5zcg7395368/instances_valminusminival2014.json.zip',
'minival': 'https://dl.dropboxusercontent.com/s/o43o90bna78omob/'
'instances_minival2014.json.zip'
},
'2017': {
'train': 'http://images.cocodataset.org/annotations/'
'annotations_trainval2017.zip',
'val': 'http://images.cocodataset.org/annotations/'
'annotations_trainval2017.zip'
}
}
panoptic_anno_url = 'http://images.cocodataset.org/annotations/' +\
'panoptic_annotations_trainval2017.zip'
def get_coco(split, img_split, year, mode):
# To support ChainerMN, the target directory should be locked.
with filelock.FileLock(os.path.join(download.get_dataset_directory(
'pfnet/chainercv/.lock'), 'coco.lock')):
data_dir = download.get_dataset_directory(root)
annos_root = os.path.join(data_dir, 'annotations')
img_root = os.path.join(data_dir, 'images')
created_img_root = os.path.join(
img_root, '{}{}'.format(img_split, year))
img_url = img_urls[year][img_split]
if mode == 'instances':
anno_url = instances_anno_urls[year][split]
anno_path = os.path.join(
annos_root, 'instances_{}{}.json'.format(split, year))
elif mode == 'panoptic':
anno_url = panoptic_anno_url
anno_path = os.path.join(
annos_root, 'panoptic_{}{}.json'.format(split, year))
if not os.path.exists(created_img_root):
download_file_path = utils.cached_download(img_url)
ext = os.path.splitext(img_url)[1]
utils.extractall(download_file_path, img_root, ext)
if not os.path.exists(anno_path):
download_file_path = utils.cached_download(anno_url)
ext = os.path.splitext(anno_url)[1]
if split in ['train', 'val']:
utils.extractall(download_file_path, data_dir, ext)
elif split in ['valminusminival', 'minival']:
utils.extractall(download_file_path, annos_root, ext)
if mode == 'panoptic':
pixelmap_path = os.path.join(
annos_root, 'panoptic_{}{}'.format(split, year))
if not os.path.exists(pixelmap_path):
utils.extractall(pixelmap_path + '.zip', annos_root, '.zip')
return data_dir
# How you can get the labels
# >>> from pycocotools.coco import COCO
# >>> coco = COCO('instances_train2014.json')
# >>> cat_dict = coco.loadCats(coco.getCatIds())
# >>> coco_bbox_label_names = [c['name'] for c in cat_dict]
coco_bbox_label_names = (
'person',
'bicycle',
'car',
'motorcycle',
'airplane',
'bus',
'train',
'truck',
'boat',
'traffic light',
'fire hydrant',
'stop sign',
'parking meter',
'bench',
'bird',
'cat',
'dog',
'horse',
'sheep',
'cow',
'elephant',
'bear',
'zebra',
'giraffe',
'backpack',
'umbrella',
'handbag',
'tie',
'suitcase',
'frisbee',
'skis',
'snowboard',
'sports ball',
'kite',
'baseball bat',
'baseball glove',
'skateboard',
'surfboard',
'tennis racket',
'bottle',
'wine glass',
'cup',
'fork',
'knife',
'spoon',
'bowl',
'banana',
'apple',
'sandwich',
'orange',
'broccoli',
'carrot',
'hot dog',
'pizza',
'donut',
'cake',
'chair',
'couch',
'potted plant',
'bed',
'dining table',
'toilet',
'tv',
'laptop',
'mouse',
'remote',
'keyboard',
'cell phone',
'microwave',
'oven',
'toaster',
'sink',
'refrigerator',
'book',
'clock',
'vase',
'scissors',
'teddy bear',
'hair drier',
'toothbrush')
# annos = json.load(open('panoptic_val2017'))
# label_names = [cat['name'] for cat in annos['categories']]
coco_semantic_segmentation_label_names = (
'person',
'bicycle',
'car',
'motorcycle',
'airplane',
'bus',
'train',
'truck',
'boat',
'traffic light',
'fire hydrant',
'stop sign',
'parking meter',
'bench',
'bird',
'cat',
'dog',
'horse',
'sheep',
'cow',
'elephant',
'bear',
'zebra',
'giraffe',
'backpack',
'umbrella',
'handbag',
'tie',
'suitcase',
'frisbee',
'skis',
'snowboard',
'sports ball',
'kite',
'baseball bat',
'baseball glove',
'skateboard',
'surfboard',
'tennis racket',
'bottle',
'wine glass',
'cup',
'fork',
'knife',
'spoon',
'bowl',
'banana',
'apple',
'sandwich',
'orange',
'broccoli',
'carrot',
'hot dog',
'pizza',
'donut',
'cake',
'chair',
'couch',
'potted plant',
'bed',
'dining table',
'toilet',
'tv',
'laptop',
'mouse',
'remote',
'keyboard',
'cell phone',
'microwave',
'oven',
'toaster',
'sink',
'refrigerator',
'book',
'clock',
'vase',
'scissors',
'teddy bear',
'hair drier',
'toothbrush',
'banner',
'blanket',
'bridge',
'cardboard',
'counter',
'curtain',
'door-stuff',
'floor-wood',
'flower',
'fruit',
'gravel',
'house',
'light',
'mirror-stuff',
'net',
'pillow',
'platform',
'playingfield',
'railroad',
'river',
'road',
'roof',
'sand',
'sea',
'shelf',
'snow',
'stairs',
'tent',
'towel',
'wall-brick',
'wall-stone',
'wall-tile',
'wall-wood',
'water-other',
'window-blind',
'window-other',
'tree-merged',
'fence-merged',
'ceiling-merged',
'sky-other-merged',
'cabinet-merged',
'table-merged',
'floor-other-merged',
'pavement-merged',
'mountain-merged',
'grass-merged',
'dirt-merged',
'paper-merged',
'food-other-merged',
'building-other-merged',
'rock-merged',
'wall-other-merged',
'rug-merged')
# https://raw.githubusercontent.com/cocodataset/panopticapi/master/
# panoptic_coco_categories.json
coco_semantic_segmentation_label_colors = (
(220, 20, 60),
(119, 11, 32),
(0, 0, 142),
(0, 0, 230),
(106, 0, 228),
(0, 60, 100),
(0, 80, 100),
(0, 0, 70),
(0, 0, 192),
(250, 170, 30),
(100, 170, 30),
(220, 220, 0),
(175, 116, 175),
(250, 0, 30),
(165, 42, 42),
(255, 77, 255),
(0, 226, 252),
(182, 182, 255),
(0, 82, 0),
(120, 166, 157),
(110, 76, 0),
(174, 57, 255),
(199, 100, 0),
(72, 0, 118),
(255, 179, 240),
(0, 125, 92),
(209, 0, 151),
(188, 208, 182),
(0, 220, 176),
(255, 99, 164),
(92, 0, 73),
(133, 129, 255),
(78, 180, 255),
(0, 228, 0),
(174, 255, 243),
(45, 89, 255),
(134, 134, 103),
(145, 148, 174),
(255, 208, 186),
(197, 226, 255),
(171, 134, 1),
(109, 63, 54),
(207, 138, 255),
(151, 0, 95),
(9, 80, 61),
(84, 105, 51),
(74, 65, 105),
(166, 196, 102),
(208, 195, 210),
(255, 109, 65),
(0, 143, 149),
(179, 0, 194),
(209, 99, 106),
(5, 121, 0),
(227, 255, 205),
(147, 186, 208),
(153, 69, 1),
(3, 95, 161),
(163, 255, 0),
(119, 0, 170),
(0, 182, 199),
(0, 165, 120),
(183, 130, 88),
(95, 32, 0),
(130, 114, 135),
(110, 129, 133),
(166, 74, 118),
(219, 142, 185),
(79, 210, 114),
(178, 90, 62),
(65, 70, 15),
(127, 167, 115),
(59, 105, 106),
(142, 108, 45),
(196, 172, 0),
(95, 54, 80),
(128, 76, 255),
(201, 57, 1),
(246, 0, 122),
(191, 162, 208),
(255, 255, 128),
(147, 211, 203),
(150, 100, 100),
(168, 171, 172),
(146, 112, 198),
(210, 170, 100),
(92, 136, 89),
(218, 88, 184),
(241, 129, 0),
(217, 17, 255),
(124, 74, 181),
(70, 70, 70),
(255, 228, 255),
(154, 208, 0),
(193, 0, 92),
(76, 91, 113),
(255, 180, 195),
(106, 154, 176),
(230, 150, 140),
(60, 143, 255),
(128, 64, 128),
(92, 82, 55),
(254, 212, 124),
(73, 77, 174),
(255, 160, 98),
(255, 255, 255),
(104, 84, 109),
(169, 164, 131),
(225, 199, 255),
(137, 54, 74),
(135, 158, 223),
(7, 246, 231),
(107, 255, 200),
(58, 41, 149),
(183, 121, 142),
(255, 73, 97),
(107, 142, 35),
(190, 153, 153),
(146, 139, 141),
(70, 130, 180),
(134, 199, 156),
(209, 226, 140),
(96, 36, 108),
(96, 96, 96),
(64, 170, 64),
(152, 251, 152),
(208, 229, 228),
(206, 186, 171),
(152, 161, 64),
(116, 112, 0),
(0, 114, 143),
(102, 102, 156),
(250, 141, 255))
coco_instance_segmentation_label_names = coco_bbox_label_names
|
import logging
import re
from absl import flags
from perfkitbenchmarker import container_service
from perfkitbenchmarker import errors
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.providers import aws
from perfkitbenchmarker.providers.aws import aws_virtual_machine
from perfkitbenchmarker.providers.aws import util
FLAGS = flags.FLAGS
class EksCluster(container_service.KubernetesCluster):
"""Class representing an Elastic Kubernetes Service cluster."""
CLOUD = aws.CLOUD
def __init__(self, spec):
super(EksCluster, self).__init__(spec)
# EKS requires a region and optionally a list of zones.
# Interpret the zone as a comma separated list of zones or a region.
self.zones = sorted(FLAGS.eks_zones) or (self.zone and self.zone.split(','))
if not self.zones:
raise errors.Config.MissingOption(
'container_cluster.vm_spec.AWS.zone is required.')
elif len(self.zones) > 1:
self.region = util.GetRegionFromZone(self.zones[0])
self.zone = ','.join(self.zones)
elif util.IsRegion(self.zones[0]):
self.region = self.zone = self.zones[0]
self.zones = []
logging.info("Interpreting zone '%s' as a region", self.zone)
else:
raise errors.Config.InvalidValue(
'container_cluster.vm_spec.AWS.zone must either be a comma separated '
'list of zones or a region.')
self.cluster_version = FLAGS.container_cluster_version
# TODO(user) support setting boot disk type if EKS does.
self.boot_disk_type = self.vm_config.DEFAULT_ROOT_DISK_TYPE
def GetResourceMetadata(self):
"""Returns a dict containing metadata about the cluster.
Returns:
dict mapping string property key to value.
"""
result = super(EksCluster, self).GetResourceMetadata()
result['container_cluster_version'] = self.cluster_version
result['boot_disk_type'] = self.boot_disk_type
result['boot_disk_size'] = self.vm_config.boot_disk_size
return result
def _CreateDependencies(self):
"""Set up the ssh key."""
aws_virtual_machine.AwsKeyFileManager.ImportKeyfile(self.region)
def _DeleteDependencies(self):
"""Delete the ssh key."""
aws_virtual_machine.AwsKeyFileManager.DeleteKeyfile(self.region)
def _Create(self):
"""Creates the control plane and worker nodes."""
tags = util.MakeDefaultTags()
eksctl_flags = {
'kubeconfig': FLAGS.kubeconfig,
'managed': True,
'name': self.name,
'nodegroup-name': 'eks',
'nodes': self.num_nodes,
'nodes-min': self.min_nodes,
'nodes-max': self.max_nodes,
'node-type': self.vm_config.machine_type,
'node-volume-size': self.vm_config.boot_disk_size,
'region': self.region,
'tags': ','.join('{}={}'.format(k, v) for k, v in tags.items()),
'ssh-public-key':
aws_virtual_machine.AwsKeyFileManager.GetKeyNameForRun(),
'version': self.cluster_version,
# NAT mode uses an EIP.
'vpc-nat-mode': 'Disable',
'zones': ','.join(self.zones),
}
cmd = [FLAGS.eksctl, 'create', 'cluster'] + sorted(
'--{}={}'.format(k, v) for k, v in eksctl_flags.items() if v)
vm_util.IssueCommand(cmd, timeout=1800)
def _Delete(self):
"""Deletes the control plane and worker nodes."""
cmd = [FLAGS.eksctl, 'delete', 'cluster',
'--name', self.name,
'--region', self.region]
vm_util.IssueCommand(cmd, timeout=1800)
def _IsReady(self):
"""Returns True if the workers are ready, else False."""
get_cmd = [
FLAGS.kubectl, '--kubeconfig', FLAGS.kubeconfig,
'get', 'nodes',
]
stdout, _, _ = vm_util.IssueCommand(get_cmd)
ready_nodes = len(re.findall('Ready', stdout))
return ready_nodes >= self.min_nodes
|
import logging
from mycroftapi import MycroftAPI
from homeassistant.components.notify import BaseNotificationService
_LOGGER = logging.getLogger(__name__)
def get_service(hass, config, discovery_info=None):
"""Get the Mycroft notification service."""
return MycroftNotificationService(hass.data["mycroft"])
class MycroftNotificationService(BaseNotificationService):
"""The Mycroft Notification Service."""
def __init__(self, mycroft_ip):
"""Initialize the service."""
self.mycroft_ip = mycroft_ip
def send_message(self, message="", **kwargs):
"""Send a message mycroft to speak on instance."""
text = message
mycroft = MycroftAPI(self.mycroft_ip)
if mycroft is not None:
mycroft.speak_text(text)
else:
_LOGGER.log("Could not reach this instance of mycroft")
|
import diamond.collector
import re
import time
RE_LSPACES = re.compile("^[\s\t]*")
RE_TSPACES = re.compile("[\s\t]*$")
class IcingaStatsCollector(diamond.collector.Collector):
"""
Collect Icinga Stats
"""
def collect(self):
"""
Collect and publish metrics
"""
stats = self.parse_stats_file(self.config["status_path"])
if len(stats) == 0:
return {}
elif "info" not in stats.keys():
return {}
elif "programstatus" not in stats.keys():
return {}
metrics = self.get_icinga_stats(stats["programstatus"])
if "hoststatus" in stats.keys():
metrics = dict(
metrics.items() + self.get_host_stats(
stats["hoststatus"]).items())
if "servicestatus" in stats.keys():
metrics = dict(
metrics.items() + self.get_svc_stats(
stats["servicestatus"]).items())
for metric in metrics.keys():
self.log.debug("Publishing '%s %s'.", metric, metrics[metric])
self.publish(metric, metrics[metric])
def get_default_config_help(self):
"""
Return help text
"""
config_help = super(IcingaStatsCollector,
self).get_default_config_help()
config_help.update({
"status_path": "Path to Icinga status.dat file"
})
return config_help
def get_default_config(self):
"""
Returns default settings for collector
"""
config = super(IcingaStatsCollector, self).get_default_config()
config.update({
"path": "icinga_stats",
"status_path": "/var/lib/icinga/status.dat",
})
return config
def get_icinga_stats(self, app_stats):
""" Extract metrics from 'programstatus' """
stats = {}
stats = dict(stats.items() + self._get_active_stats(app_stats).items())
stats = dict(stats.items() + self._get_cached_stats(app_stats).items())
stats = dict(
stats.items() + self._get_command_execution(app_stats).items())
stats = dict(
stats.items() + self._get_externalcmd_stats(app_stats).items())
stats["uptime"] = self._get_uptime(app_stats)
return stats
def parse_stats_file(self, file_name):
""" Read and parse given file_name, return config as a dictionary """
stats = {}
try:
with open(file_name, "r") as fhandle:
fbuffer = []
save_buffer = False
for line in fhandle:
line = line.rstrip("\n")
line = self._trim(line)
if line == "" or line.startswith("#"):
continue
elif line.endswith("{"):
save_buffer = True
fbuffer.append(line)
continue
elif line.endswith("}"):
tmp_dict = self._parse_config_buffer(fbuffer)
fbuffer = None
fbuffer = list()
if len(tmp_dict) < 1:
continue
if tmp_dict["_type"] == "info":
stats["info"] = tmp_dict
elif tmp_dict["_type"] == "programstatus":
stats["programstatus"] = tmp_dict
else:
entity_type = tmp_dict["_type"]
if entity_type not in stats.keys():
stats[entity_type] = []
stats[entity_type].append(tmp_dict)
continue
elif save_buffer is True:
fbuffer.append(line)
except Exception as exception:
self.log.info("Caught exception: %s", exception)
return stats
def get_host_stats(self, hosts):
""" Get statistics for Hosts, resp. Host entities """
stats = {
"hosts.total": 0,
"hosts.ok": 0,
"hosts.down": 0,
"hosts.unreachable": 0,
"hosts.flapping": 0,
"hosts.in_downtime": 0,
"hosts.checked": 0,
"hosts.scheduled": 0,
"hosts.active_checks": 0,
"hosts.passive_checks": 0,
}
for host in list(hosts):
if type(host) is not dict:
continue
sane = self._sanitize_entity(host)
stats["hosts.total"] += 1
stats["hosts.flapping"] += self._trans_binary(sane["flapping"])
stats[
"hosts.in_downtime"] += self._trans_dtime(sane["in_downtime"])
stats["hosts.checked"] += self._trans_binary(sane["checked"])
stats["hosts.scheduled"] += self._trans_binary(sane["scheduled"])
stats["hosts.active_checks"] += sane["active_checks"]
stats["hosts.passive_checks"] += sane["passive_checks"]
state_key = self._trans_host_state(sane["state"])
stats["hosts.%s" % (state_key)] += 1
return stats
def get_svc_stats(self, svcs):
""" Get statistics for Services, resp. Service entities """
stats = {
"services.total": 0,
"services.ok": 0,
"services.warning": 0,
"services.critical": 0,
"services.unknown": 0,
"services.flapping": 0,
"services.in_downtime": 0,
"services.checked": 0,
"services.scheduled": 0,
"services.active_checks": 0,
"services.passive_checks": 0,
}
for svc in svcs:
if type(svc) is not dict:
continue
sane = self._sanitize_entity(svc)
stats["services.total"] += 1
stats["services.flapping"] += self._trans_binary(sane["flapping"])
stats["services.in_downtime"] += self._trans_dtime(
sane["in_downtime"])
stats["services.checked"] += self._trans_binary(sane["checked"])
stats[
"services.scheduled"] += self._trans_binary(sane["scheduled"])
stats["services.active_checks"] += sane["active_checks"]
stats["services.passive_checks"] += sane["passive_checks"]
state_key = self._trans_svc_state(sane["state"])
stats["services.%s" % (state_key)] += 1
return stats
def _convert_tripplet(self, tripplet):
""" Turn '10,178,528' into tuple of integers """
splitted = tripplet.split(",")
if len(splitted) != 3:
self.log.debug("Got %i chunks, expected 3.", len(splitted))
return (0, 0, 0)
try:
x01 = int(splitted[0])
x05 = int(splitted[1])
x15 = int(splitted[2])
except Exception as exception:
self.log.warning("Caught exception: %s", exception)
x01 = 0
x05 = 0
x15 = 0
return (x01, x05, x15)
def _get_active_stats(self, app_stats):
"""
Process:
* active_scheduled_host_check_stats
* active_scheduled_service_check_stats
* active_ondemand_host_check_stats
* active_ondemand_service_check_stats
"""
stats = {}
app_keys = [
"active_scheduled_host_check_stats",
"active_scheduled_service_check_stats",
"active_ondemand_host_check_stats",
"active_ondemand_service_check_stats",
]
for app_key in app_keys:
if app_key not in app_stats.keys():
continue
splitted = app_key.split("_")
metric = "%ss.%s_%s" % (splitted[2], splitted[0], splitted[1])
(x01, x05, x15) = self._convert_tripplet(app_stats[app_key])
stats["%s.01" % (metric)] = x01
stats["%s.05" % (metric)] = x05
stats["%s.15" % (metric)] = x15
return stats
def _get_cached_stats(self, app_stats):
"""
Process:
* cached_host_check_stats
* cached_service_check_stats
"""
stats = {}
app_keys = [
"cached_host_check_stats",
"cached_service_check_stats",
]
for app_key in app_keys:
if app_key not in app_stats.keys():
continue
(x01, x05, x15) = self._convert_tripplet(app_stats[app_key])
scratch = app_key.split("_")[1]
stats["%ss.cached.01" % (scratch)] = x01
stats["%ss.cached.05" % (scratch)] = x05
stats["%ss.cached.15" % (scratch)] = x15
return stats
def _get_command_execution(self, app_stats):
"""
Process:
* serial_host_check_stats
* parallel_host_check_stats
"""
stats = {}
app_keys = [
"serial_host_check_stats",
"parallel_host_check_stats",
]
for app_key in app_keys:
if app_key not in app_stats.keys():
continue
scratch = app_key.split("_")[0]
(x01, x05, x15) = self._convert_tripplet(app_stats[app_key])
stats["hosts.executed_%s.01" % scratch] = x01
stats["hosts.executed_%s.05" % scratch] = x05
stats["hosts.executed_%s.15" % scratch] = x15
return stats
def _get_externalcmd_stats(self, app_stats):
"""
Process:
* high_external_command_buffer_slots
* total_external_command_buffer_slots
* used_external_command_buffer_slots
* external_command_stats=
"""
khigh = "high_external_command_buffer_slots"
ktotal = "total_external_command_buffer_slots"
kused = "used_external_command_buffer_slots"
kstats = "external_command_stats"
aliases = {
khigh: "external_command.buffer_high",
ktotal: "external_command.buffer_total",
kused: "external_command.buffer_used",
"x01": "external_command.01",
"x05": "external_command.05",
"x15": "external_command.15",
}
stats = {}
if khigh in app_stats.keys() and str(app_stats[khigh]).isdigit():
key = aliases[khigh]
stats[key] = int(app_stats[khigh])
if ktotal in app_stats.keys() and str(app_stats[ktotal].isdigit()):
key = aliases[ktotal]
stats[key] = int(app_stats[ktotal])
if kused in app_stats.keys() and str(app_stats[kused].isdigit()):
key = aliases[kused]
stats[key] = int(app_stats[ktotal])
if kstats in app_stats.keys():
(x01, x05, x15) = self._convert_tripplet(app_stats[kstats])
stats[aliases["x01"]] = x01
stats[aliases["x05"]] = x05
stats[aliases["x01"]] = x15
return stats
def _get_uptime(self, app_stats):
""" Return Icinga's uptime """
if "program_start" not in app_stats.keys():
return 0
if not app_stats["program_start"].isdigit():
return 0
uptime = int(time.time()) - int(app_stats["program_start"])
if uptime < 0:
return 0
return uptime
def _parse_config_buffer(self, fbuffer):
""" Parse buffered chunk of config into dict """
if len(fbuffer) < 1 or not fbuffer[0].endswith("{"):
# Invalid input
return {}
entity = {}
entity_type = fbuffer.pop(0)
entity_type = entity_type.rstrip("{")
entity["_type"] = self._trim(entity_type)
for chunk in fbuffer:
splitted = chunk.split("=")
if len(splitted) < 2:
# If there is no '=', then it's an invalid line
continue
key = self._trim(splitted[0])
value = self._trim("=".join(splitted[1:]))
entity[key] = value
return entity
def _sanitize_entity(self, entity):
"""
Make given entity 'sane' for further use.
"""
aliases = {
"current_state": "state",
"is_flapping": "flapping",
"scheduled_downtime_depth": "in_downtime",
"has_been_checked": "checked",
"should_be_scheduled": "scheduled",
"active_checks_enabled": "active_checks",
"passive_checks_enabled": "passive_checks",
}
sane = {}
for akey in aliases.keys():
sane[aliases[akey]] = None
aliases_keys = aliases.keys()
for key in entity.keys():
if key not in aliases_keys:
continue
alias = aliases[key]
try:
sane[alias] = int(entity[key])
except Exception:
sane[alias] = None
if sane["active_checks"] not in [0, 1]:
sane["active_checks"] = 0
elif sane["active_checks"] == 1:
sane["passive_checks"] = 0
if sane["passive_checks"] not in [0, 1]:
sane["passive_checks"] = 0
return sane
def _trans_binary(self, value):
""" Given value is expected to be a binary - 0/1 """
try:
conv = int(value)
except ValueError:
return 0
if conv not in [0, 1]:
return 0
return conv
def _trans_dtime(self, value):
""" Translate scheduled downtime """
try:
conv = int(value)
except ValueError:
return 0
if conv < 1:
return 0
return conv
def _trans_host_state(self, state):
""" Translate/validate Host state """
if state == 0:
return "ok"
elif state == 1:
return "down"
else:
return "unreachable"
def _trans_svc_state(self, state):
""" Translate/validate Service state """
if state == 0:
return "ok"
elif state == 1:
return "warning"
elif state == 2:
return "critical"
else:
return "unknown"
def _trim(self, somestr):
""" Trim left-right given string """
tmp = RE_LSPACES.sub("", somestr)
tmp = RE_TSPACES.sub("", tmp)
return str(tmp)
|
from datetime import timedelta
import ipaddress
import logging
from pysmarty import Smarty
import voluptuous as vol
from homeassistant.const import CONF_HOST, CONF_NAME
from homeassistant.helpers import discovery
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import dispatcher_send
from homeassistant.helpers.event import track_time_interval
DOMAIN = "smarty"
DATA_SMARTY = "smarty"
SMARTY_NAME = "Smarty"
_LOGGER = logging.getLogger(__name__)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_HOST): vol.All(ipaddress.ip_address, cv.string),
vol.Optional(CONF_NAME, default=SMARTY_NAME): cv.string,
}
)
},
extra=vol.ALLOW_EXTRA,
)
RPM = "rpm"
SIGNAL_UPDATE_SMARTY = "smarty_update"
def setup(hass, config):
"""Set up the smarty environment."""
conf = config[DOMAIN]
host = conf[CONF_HOST]
name = conf[CONF_NAME]
_LOGGER.debug("Name: %s, host: %s", name, host)
smarty = Smarty(host=host)
hass.data[DOMAIN] = {"api": smarty, "name": name}
# Initial update
smarty.update()
# Load platforms
discovery.load_platform(hass, "fan", DOMAIN, {}, config)
discovery.load_platform(hass, "sensor", DOMAIN, {}, config)
discovery.load_platform(hass, "binary_sensor", DOMAIN, {}, config)
def poll_device_update(event_time):
"""Update Smarty device."""
_LOGGER.debug("Updating Smarty device...")
if smarty.update():
_LOGGER.debug("Update success...")
dispatcher_send(hass, SIGNAL_UPDATE_SMARTY)
else:
_LOGGER.debug("Update failed...")
track_time_interval(hass, poll_device_update, timedelta(seconds=30))
return True
|
import copy
import numbers
import pint
import six.moves.copyreg
class _UnitRegistry(pint.UnitRegistry):
"""A customized pint.UnitRegistry used by PerfKit Benchmarker.
Supports 'K' prefix for 'kilo' (in addition to pint's default 'k').
Supports '%' as a unit, whereas pint tokenizes it as an operator.
"""
def __init__(self):
super(_UnitRegistry, self).__init__()
self.define('K- = 1000')
self.define('% = [percent] = percent')
def parse_expression(self, input_string, *args, **kwargs):
result = super(_UnitRegistry, self).parse_expression(input_string, *args,
**kwargs)
if (isinstance(result, numbers.Number) and
input_string.strip().endswith('%')):
return self.Quantity(result, self.Unit('percent'))
return result
# Pint recommends one global UnitRegistry for the entire program, so
# we create it here.
_UNIT_REGISTRY = _UnitRegistry()
# The Pint documentation suggests serializing Quantities as tuples. We
# supply serializers to make sure that Quantities are unpickled with
# our UnitRegistry, where we have added the K- unit.
def _PickleQuantity(q):
return _UnPickleQuantity, (q.to_tuple(),)
def _UnPickleQuantity(inp):
return _UNIT_REGISTRY.Quantity.from_tuple(inp)
six.moves.copyreg.pickle(_UNIT_REGISTRY.Quantity, _PickleQuantity)
# The following monkey-patch has been submitted to upstream Pint as
# pull request 357.
# TODO: once that PR is merged, get rid of this workaround.
def _unit_deepcopy(self, memo):
ret = self.__class__(copy.deepcopy(self._units))
return ret
_UNIT_REGISTRY.Unit.__deepcopy__ = _unit_deepcopy
# Fix for https://github.com/hgrecco/pint/issues/372
_UNIT_REGISTRY.Unit.__ne__ = lambda self, other: not self.__eq__(other)
# Forward access to pint's classes and functions.
DimensionalityError = pint.DimensionalityError
ParseExpression = _UNIT_REGISTRY.parse_expression
Quantity = _UNIT_REGISTRY.Quantity
Unit = _UNIT_REGISTRY.Unit
byte = Unit('byte')
bit = Unit('bit')
second = Unit('second')
percent = Unit('percent')
|
from collections import OrderedDict
import hmac
from typing import Any, Dict, Optional, cast
import voluptuous as vol
from homeassistant.core import callback
from homeassistant.exceptions import HomeAssistantError
from . import AUTH_PROVIDER_SCHEMA, AUTH_PROVIDERS, AuthProvider, LoginFlow
from ..models import Credentials, UserMeta
USER_SCHEMA = vol.Schema(
{
vol.Required("username"): str,
vol.Required("password"): str,
vol.Optional("name"): str,
}
)
CONFIG_SCHEMA = AUTH_PROVIDER_SCHEMA.extend(
{vol.Required("users"): [USER_SCHEMA]}, extra=vol.PREVENT_EXTRA
)
class InvalidAuthError(HomeAssistantError):
"""Raised when submitting invalid authentication."""
@AUTH_PROVIDERS.register("insecure_example")
class ExampleAuthProvider(AuthProvider):
"""Example auth provider based on hardcoded usernames and passwords."""
async def async_login_flow(self, context: Optional[Dict]) -> LoginFlow:
"""Return a flow to login."""
return ExampleLoginFlow(self)
@callback
def async_validate_login(self, username: str, password: str) -> None:
"""Validate a username and password."""
user = None
# Compare all users to avoid timing attacks.
for usr in self.config["users"]:
if hmac.compare_digest(
username.encode("utf-8"), usr["username"].encode("utf-8")
):
user = usr
if user is None:
# Do one more compare to make timing the same as if user was found.
hmac.compare_digest(password.encode("utf-8"), password.encode("utf-8"))
raise InvalidAuthError
if not hmac.compare_digest(
user["password"].encode("utf-8"), password.encode("utf-8")
):
raise InvalidAuthError
async def async_get_or_create_credentials(
self, flow_result: Dict[str, str]
) -> Credentials:
"""Get credentials based on the flow result."""
username = flow_result["username"]
for credential in await self.async_credentials():
if credential.data["username"] == username:
return credential
# Create new credentials.
return self.async_create_credentials({"username": username})
async def async_user_meta_for_credentials(
self, credentials: Credentials
) -> UserMeta:
"""Return extra user metadata for credentials.
Will be used to populate info when creating a new user.
"""
username = credentials.data["username"]
name = None
for user in self.config["users"]:
if user["username"] == username:
name = user.get("name")
break
return UserMeta(name=name, is_active=True)
class ExampleLoginFlow(LoginFlow):
"""Handler for the login flow."""
async def async_step_init(
self, user_input: Optional[Dict[str, str]] = None
) -> Dict[str, Any]:
"""Handle the step of the form."""
errors = {}
if user_input is not None:
try:
cast(ExampleAuthProvider, self._auth_provider).async_validate_login(
user_input["username"], user_input["password"]
)
except InvalidAuthError:
errors["base"] = "invalid_auth"
if not errors:
user_input.pop("password")
return await self.async_finish(user_input)
schema: Dict[str, type] = OrderedDict()
schema["username"] = str
schema["password"] = str
return self.async_show_form(
step_id="init", data_schema=vol.Schema(schema), errors=errors
)
|
from datetime import timedelta
import logging
import socket
import ssl
import librouteros
from librouteros.login import plain as login_plain, token as login_token
from homeassistant.const import CONF_HOST, CONF_PASSWORD, CONF_USERNAME, CONF_VERIFY_SSL
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers.dispatcher import async_dispatcher_send
from homeassistant.util import slugify
import homeassistant.util.dt as dt_util
from .const import (
ARP,
ATTR_DEVICE_TRACKER,
ATTR_FIRMWARE,
ATTR_MODEL,
ATTR_SERIAL_NUMBER,
CAPSMAN,
CONF_ARP_PING,
CONF_DETECTION_TIME,
CONF_FORCE_DHCP,
DEFAULT_DETECTION_TIME,
DHCP,
IDENTITY,
INFO,
IS_CAPSMAN,
IS_WIRELESS,
MIKROTIK_SERVICES,
NAME,
WIRELESS,
)
from .errors import CannotConnect, LoginError
_LOGGER = logging.getLogger(__name__)
class Device:
"""Represents a network device."""
def __init__(self, mac, params):
"""Initialize the network device."""
self._mac = mac
self._params = params
self._last_seen = None
self._attrs = {}
self._wireless_params = None
@property
def name(self):
"""Return device name."""
return self._params.get("host-name", self.mac)
@property
def mac(self):
"""Return device mac."""
return self._mac
@property
def last_seen(self):
"""Return device last seen."""
return self._last_seen
@property
def attrs(self):
"""Return device attributes."""
attr_data = self._wireless_params if self._wireless_params else self._params
for attr in ATTR_DEVICE_TRACKER:
if attr in attr_data:
self._attrs[slugify(attr)] = attr_data[attr]
self._attrs["ip_address"] = self._params.get("active-address")
return self._attrs
def update(self, wireless_params=None, params=None, active=False):
"""Update Device params."""
if wireless_params:
self._wireless_params = wireless_params
if params:
self._params = params
if active:
self._last_seen = dt_util.utcnow()
class MikrotikData:
"""Handle all communication with the Mikrotik API."""
def __init__(self, hass, config_entry, api):
"""Initialize the Mikrotik Client."""
self.hass = hass
self.config_entry = config_entry
self.api = api
self._host = self.config_entry.data[CONF_HOST]
self.all_devices = {}
self.devices = {}
self.available = True
self.support_capsman = False
self.support_wireless = False
self.hostname = None
self.model = None
self.firmware = None
self.serial_number = None
@staticmethod
def load_mac(devices=None):
"""Load dictionary using MAC address as key."""
if not devices:
return None
mac_devices = {}
for device in devices:
if "mac-address" in device:
mac = device["mac-address"]
mac_devices[mac] = device
return mac_devices
@property
def arp_enabled(self):
"""Return arp_ping option setting."""
return self.config_entry.options[CONF_ARP_PING]
@property
def force_dhcp(self):
"""Return force_dhcp option setting."""
return self.config_entry.options[CONF_FORCE_DHCP]
def get_info(self, param):
"""Return device model name."""
cmd = IDENTITY if param == NAME else INFO
data = self.command(MIKROTIK_SERVICES[cmd])
return (
data[0].get(param) # pylint: disable=unsubscriptable-object
if data
else None
)
def get_hub_details(self):
"""Get Hub info."""
self.hostname = self.get_info(NAME)
self.model = self.get_info(ATTR_MODEL)
self.firmware = self.get_info(ATTR_FIRMWARE)
self.serial_number = self.get_info(ATTR_SERIAL_NUMBER)
self.support_capsman = bool(self.command(MIKROTIK_SERVICES[IS_CAPSMAN]))
self.support_wireless = bool(self.command(MIKROTIK_SERVICES[IS_WIRELESS]))
def connect_to_hub(self):
"""Connect to hub."""
try:
self.api = get_api(self.hass, self.config_entry.data)
self.available = True
return True
except (LoginError, CannotConnect):
self.available = False
return False
def get_list_from_interface(self, interface):
"""Get devices from interface."""
result = self.command(MIKROTIK_SERVICES[interface])
return self.load_mac(result) if result else {}
def restore_device(self, mac):
"""Restore a missing device after restart."""
self.devices[mac] = Device(mac, self.all_devices[mac])
def update_devices(self):
"""Get list of devices with latest status."""
arp_devices = {}
device_list = {}
wireless_devices = {}
try:
self.all_devices = self.get_list_from_interface(DHCP)
if self.support_capsman:
_LOGGER.debug("Hub is a CAPSman manager")
device_list = wireless_devices = self.get_list_from_interface(CAPSMAN)
elif self.support_wireless:
_LOGGER.debug("Hub supports wireless Interface")
device_list = wireless_devices = self.get_list_from_interface(WIRELESS)
if not device_list or self.force_dhcp:
device_list = self.all_devices
_LOGGER.debug("Falling back to DHCP for scanning devices")
if self.arp_enabled:
_LOGGER.debug("Using arp-ping to check devices")
arp_devices = self.get_list_from_interface(ARP)
# get new hub firmware version if updated
self.firmware = self.get_info(ATTR_FIRMWARE)
except (CannotConnect, socket.timeout, OSError):
self.available = False
return
if not device_list:
return
for mac, params in device_list.items():
if mac not in self.devices:
self.devices[mac] = Device(mac, self.all_devices.get(mac, {}))
else:
self.devices[mac].update(params=self.all_devices.get(mac, {}))
if mac in wireless_devices:
# if wireless is supported then wireless_params are params
self.devices[mac].update(
wireless_params=wireless_devices[mac], active=True
)
continue
# for wired devices or when forcing dhcp check for active-address
if not params.get("active-address"):
self.devices[mac].update(active=False)
continue
# ping check the rest of active devices if arp ping is enabled
active = True
if self.arp_enabled and mac in arp_devices:
active = self.do_arp_ping(
params.get("active-address"), arp_devices[mac].get("interface")
)
self.devices[mac].update(active=active)
def do_arp_ping(self, ip_address, interface):
"""Attempt to arp ping MAC address via interface."""
_LOGGER.debug("pinging - %s", ip_address)
params = {
"arp-ping": "yes",
"interval": "100ms",
"count": 3,
"interface": interface,
"address": ip_address,
}
cmd = "/ping"
data = self.command(cmd, params)
if data is not None:
status = 0
for result in data: # pylint: disable=not-an-iterable
if "status" in result:
status += 1
if status == len(data):
_LOGGER.debug(
"Mikrotik %s - %s arp_ping timed out", ip_address, interface
)
return False
return True
def command(self, cmd, params=None):
"""Retrieve data from Mikrotik API."""
try:
_LOGGER.info("Running command %s", cmd)
if params:
response = list(self.api(cmd=cmd, **params))
else:
response = list(self.api(cmd=cmd))
except (
librouteros.exceptions.ConnectionClosed,
OSError,
socket.timeout,
) as api_error:
_LOGGER.error("Mikrotik %s connection error %s", self._host, api_error)
raise CannotConnect from api_error
except librouteros.exceptions.ProtocolError as api_error:
_LOGGER.warning(
"Mikrotik %s failed to retrieve data. cmd=[%s] Error: %s",
self._host,
cmd,
api_error,
)
return None
return response if response else None
def update(self):
"""Update device_tracker from Mikrotik API."""
if not self.available or not self.api:
if not self.connect_to_hub():
return
_LOGGER.debug("updating network devices for host: %s", self._host)
self.update_devices()
class MikrotikHub:
"""Mikrotik Hub Object."""
def __init__(self, hass, config_entry):
"""Initialize the Mikrotik Client."""
self.hass = hass
self.config_entry = config_entry
self._mk_data = None
self.progress = None
@property
def host(self):
"""Return the host of this hub."""
return self.config_entry.data[CONF_HOST]
@property
def hostname(self):
"""Return the hostname of the hub."""
return self._mk_data.hostname
@property
def model(self):
"""Return the model of the hub."""
return self._mk_data.model
@property
def firmware(self):
"""Return the firmware of the hub."""
return self._mk_data.firmware
@property
def serial_num(self):
"""Return the serial number of the hub."""
return self._mk_data.serial_number
@property
def available(self):
"""Return if the hub is connected."""
return self._mk_data.available
@property
def option_detection_time(self):
"""Config entry option defining number of seconds from last seen to away."""
return timedelta(seconds=self.config_entry.options[CONF_DETECTION_TIME])
@property
def signal_update(self):
"""Event specific per Mikrotik entry to signal updates."""
return f"mikrotik-update-{self.host}"
@property
def api(self):
"""Represent Mikrotik data object."""
return self._mk_data
async def async_add_options(self):
"""Populate default options for Mikrotik."""
if not self.config_entry.options:
data = dict(self.config_entry.data)
options = {
CONF_ARP_PING: data.pop(CONF_ARP_PING, False),
CONF_FORCE_DHCP: data.pop(CONF_FORCE_DHCP, False),
CONF_DETECTION_TIME: data.pop(
CONF_DETECTION_TIME, DEFAULT_DETECTION_TIME
),
}
self.hass.config_entries.async_update_entry(
self.config_entry, data=data, options=options
)
async def request_update(self):
"""Request an update."""
if self.progress is not None:
await self.progress
return
self.progress = self.hass.async_create_task(self.async_update())
await self.progress
self.progress = None
async def async_update(self):
"""Update Mikrotik devices information."""
await self.hass.async_add_executor_job(self._mk_data.update)
async_dispatcher_send(self.hass, self.signal_update)
async def async_setup(self):
"""Set up the Mikrotik hub."""
try:
api = await self.hass.async_add_executor_job(
get_api, self.hass, self.config_entry.data
)
except CannotConnect as api_error:
raise ConfigEntryNotReady from api_error
except LoginError:
return False
self._mk_data = MikrotikData(self.hass, self.config_entry, api)
await self.async_add_options()
await self.hass.async_add_executor_job(self._mk_data.get_hub_details)
await self.hass.async_add_executor_job(self._mk_data.update)
self.hass.async_create_task(
self.hass.config_entries.async_forward_entry_setup(
self.config_entry, "device_tracker"
)
)
return True
def get_api(hass, entry):
"""Connect to Mikrotik hub."""
_LOGGER.debug("Connecting to Mikrotik hub [%s]", entry[CONF_HOST])
_login_method = (login_plain, login_token)
kwargs = {"login_methods": _login_method, "port": entry["port"], "encoding": "utf8"}
if entry[CONF_VERIFY_SSL]:
ssl_context = ssl.create_default_context()
ssl_context.check_hostname = False
ssl_context.verify_mode = ssl.CERT_NONE
_ssl_wrapper = ssl_context.wrap_socket
kwargs["ssl_wrapper"] = _ssl_wrapper
try:
api = librouteros.connect(
entry[CONF_HOST],
entry[CONF_USERNAME],
entry[CONF_PASSWORD],
**kwargs,
)
_LOGGER.debug("Connected to %s successfully", entry[CONF_HOST])
return api
except (
librouteros.exceptions.LibRouterosError,
OSError,
socket.timeout,
) as api_error:
_LOGGER.error("Mikrotik %s error: %s", entry[CONF_HOST], api_error)
if "invalid user name or password" in str(api_error):
raise LoginError from api_error
raise CannotConnect from api_error
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.