text
stringlengths 213
32.3k
|
---|
from itertools import count
from unittest.mock import Mock
from case import ContextMock
from kombu.transport import base
from kombu.utils import json
def PromiseMock(*args, **kwargs):
m = Mock(*args, **kwargs)
def on_throw(exc=None, *args, **kwargs):
if exc:
raise exc
raise
m.throw.side_effect = on_throw
m.set_error_state.side_effect = on_throw
m.throw1.side_effect = on_throw
return m
class MockPool:
def __init__(self, value=None):
self.value = value or ContextMock()
def acquire(self, **kwargs):
return self.value
class Message(base.Message):
def __init__(self, *args, **kwargs):
self.throw_decode_error = kwargs.get('throw_decode_error', False)
super().__init__(*args, **kwargs)
def decode(self):
if self.throw_decode_error:
raise ValueError("can't decode message")
return super().decode()
class Channel(base.StdChannel):
open = True
throw_decode_error = False
_ids = count(1)
def __init__(self, connection):
self.connection = connection
self.called = []
self.deliveries = count(1)
self.to_deliver = []
self.events = {'basic_return': set()}
self.channel_id = next(self._ids)
def _called(self, name):
self.called.append(name)
def __contains__(self, key):
return key in self.called
def exchange_declare(self, *args, **kwargs):
self._called('exchange_declare')
def prepare_message(self, body, priority=0, content_type=None,
content_encoding=None, headers=None, properties={}):
self._called('prepare_message')
return {'body': body,
'headers': headers,
'properties': properties,
'priority': priority,
'content_type': content_type,
'content_encoding': content_encoding}
def basic_publish(self, message, exchange='', routing_key='',
mandatory=False, immediate=False, **kwargs):
self._called('basic_publish')
return message, exchange, routing_key
def exchange_delete(self, *args, **kwargs):
self._called('exchange_delete')
def queue_declare(self, *args, **kwargs):
self._called('queue_declare')
def queue_bind(self, *args, **kwargs):
self._called('queue_bind')
def queue_unbind(self, *args, **kwargs):
self._called('queue_unbind')
def queue_delete(self, queue, if_unused=False, if_empty=False, **kwargs):
self._called('queue_delete')
def basic_get(self, *args, **kwargs):
self._called('basic_get')
try:
return self.to_deliver.pop()
except IndexError:
pass
def queue_purge(self, *args, **kwargs):
self._called('queue_purge')
def basic_consume(self, *args, **kwargs):
self._called('basic_consume')
def basic_cancel(self, *args, **kwargs):
self._called('basic_cancel')
def basic_ack(self, *args, **kwargs):
self._called('basic_ack')
def basic_recover(self, requeue=False):
self._called('basic_recover')
def exchange_bind(self, *args, **kwargs):
self._called('exchange_bind')
def exchange_unbind(self, *args, **kwargs):
self._called('exchange_unbind')
def close(self):
self._called('close')
def message_to_python(self, message, *args, **kwargs):
self._called('message_to_python')
return Message(body=json.dumps(message),
channel=self,
delivery_tag=next(self.deliveries),
throw_decode_error=self.throw_decode_error,
content_type='application/json',
content_encoding='utf-8')
def flow(self, active):
self._called('flow')
def basic_reject(self, delivery_tag, requeue=False):
if requeue:
return self._called('basic_reject:requeue')
return self._called('basic_reject')
def basic_qos(self, prefetch_size=0, prefetch_count=0,
apply_global=False):
self._called('basic_qos')
class Connection:
connected = True
def __init__(self, client):
self.client = client
def channel(self):
return Channel(self)
class Transport(base.Transport):
def establish_connection(self):
return Connection(self.client)
def create_channel(self, connection):
return connection.channel()
def drain_events(self, connection, **kwargs):
return 'event'
def close_connection(self, connection):
connection.connected = False
|
from lxml import etree
try:
from urlparse import urljoin
from urllib2 import urlopen
except ImportError:
# Python 3
from urllib.parse import urljoin
from urllib.request import urlopen
XINCLUDE = "{http://www.w3.org/2001/XInclude}"
XINCLUDE_INCLUDE = XINCLUDE + "include"
XINCLUDE_FALLBACK = XINCLUDE + "fallback"
XINCLUDE_ITER_TAG = XINCLUDE + "*"
# For security reasons, the inclusion depth is limited to this read-only value by default.
DEFAULT_MAX_INCLUSION_DEPTH = 6
##
# Fatal include error.
class FatalIncludeError(etree.LxmlSyntaxError):
pass
class LimitedRecursiveIncludeError(FatalIncludeError):
pass
##
# ET compatible default loader.
# This loader reads an included resource from disk.
#
# @param href Resource reference.
# @param parse Parse mode. Either "xml" or "text".
# @param encoding Optional text encoding.
# @return The expanded resource. If the parse mode is "xml", this
# is an ElementTree instance. If the parse mode is "text", this
# is a Unicode string. If the loader fails, it can return None
# or raise an IOError exception.
# @throws IOError If the loader fails to load the resource.
def default_loader(href, parse, encoding=None):
file = open(href, 'rb')
if parse == "xml":
data = etree.parse(file).getroot()
else:
data = file.read()
if not encoding:
encoding = 'utf-8'
data = data.decode(encoding)
file.close()
return data
##
# Default loader used by lxml.etree - handles custom resolvers properly
#
def _lxml_default_loader(href, parse, encoding=None, parser=None):
if parse == "xml":
data = etree.parse(href, parser).getroot()
else:
if "://" in href:
f = urlopen(href)
else:
f = open(href, 'rb')
data = f.read()
f.close()
if not encoding:
encoding = 'utf-8'
data = data.decode(encoding)
return data
##
# Wrapper for ET compatibility - drops the parser
def _wrap_et_loader(loader):
def load(href, parse, encoding=None, parser=None):
return loader(href, parse, encoding)
return load
##
# Expand XInclude directives.
#
# @param elem Root element.
# @param loader Optional resource loader. If omitted, it defaults
# to {@link default_loader}. If given, it should be a callable
# that implements the same interface as <b>default_loader</b>.
# @param base_url The base URL of the original file, to resolve
# relative include file references.
# @param max_depth The maximum number of recursive inclusions.
# Limited to reduce the risk of malicious content explosion.
# Pass None to disable the limitation.
# @throws LimitedRecursiveIncludeError If the {@link max_depth} was exceeded.
# @throws FatalIncludeError If the function fails to include a given
# resource, or if the tree contains malformed XInclude elements.
# @throws IOError If the function fails to load a given resource.
# @returns the node or its replacement if it was an XInclude node
def include(elem, loader=None, base_url=None,
max_depth=DEFAULT_MAX_INCLUSION_DEPTH):
if max_depth is None:
max_depth = -1
elif max_depth < 0:
raise ValueError("expected non-negative depth or None for 'max_depth', got %r" % max_depth)
if base_url is None:
if hasattr(elem, 'getroot'):
tree = elem
elem = elem.getroot()
else:
tree = elem.getroottree()
if hasattr(tree, 'docinfo'):
base_url = tree.docinfo.URL
elif hasattr(elem, 'getroot'):
elem = elem.getroot()
_include(elem, loader, base_url, max_depth)
def _include(elem, loader=None, base_url=None,
max_depth=DEFAULT_MAX_INCLUSION_DEPTH, _parent_hrefs=None):
if loader is not None:
load_include = _wrap_et_loader(loader)
else:
load_include = _lxml_default_loader
if _parent_hrefs is None:
_parent_hrefs = set()
parser = elem.getroottree().parser
include_elements = list(
elem.iter(XINCLUDE_ITER_TAG))
for e in include_elements:
if e.tag == XINCLUDE_INCLUDE:
# process xinclude directive
href = urljoin(base_url, e.get("href"))
parse = e.get("parse", "xml")
parent = e.getparent()
if parse == "xml":
if href in _parent_hrefs:
raise FatalIncludeError(
"recursive include of %r detected" % href
)
if max_depth == 0:
raise LimitedRecursiveIncludeError(
"maximum xinclude depth reached when including file %s" % href)
node = load_include(href, parse, parser=parser)
if node is None:
raise FatalIncludeError(
"cannot load %r as %r" % (href, parse)
)
node = _include(node, loader, href, max_depth - 1, {href} | _parent_hrefs)
if e.tail:
node.tail = (node.tail or "") + e.tail
if parent is None:
return node # replaced the root node!
parent.replace(e, node)
elif parse == "text":
text = load_include(href, parse, encoding=e.get("encoding"))
if text is None:
raise FatalIncludeError(
"cannot load %r as %r" % (href, parse)
)
predecessor = e.getprevious()
if predecessor is not None:
predecessor.tail = (predecessor.tail or "") + text
elif parent is None:
return text # replaced the root node!
else:
parent.text = (parent.text or "") + text + (e.tail or "")
parent.remove(e)
else:
raise FatalIncludeError(
"unknown parse type in xi:include tag (%r)" % parse
)
elif e.tag == XINCLUDE_FALLBACK:
parent = e.getparent()
if parent is not None and parent.tag != XINCLUDE_INCLUDE:
raise FatalIncludeError(
"xi:fallback tag must be child of xi:include (%r)" % e.tag
)
else:
raise FatalIncludeError(
"Invalid element found in XInclude namespace (%r)" % e.tag
)
return elem
|
import difflib
import filecmp
import fnmatch
import os
import os.path
import re
import sys
import xml.etree.ElementTree
from coverage import env
from tests.coveragetest import TESTS_DIR
def gold_path(path):
"""Get a path to a gold file for comparison."""
return os.path.join(TESTS_DIR, "gold", path)
# "rU" was deprecated in 3.4
READ_MODE = "rU" if env.PYVERSION < (3, 4) else "r"
def versioned_directory(d):
"""Find a subdirectory of d specific to the Python version.
For example, on Python 3.6.4 rc 1, it returns the first of these
directories that exists::
d/3.6.4.candidate.1
d/3.6.4.candidate
d/3.6.4
d/3.6
d/3
d
Returns: a string, the path to an existing directory.
"""
ver_parts = list(map(str, sys.version_info))
for nparts in range(len(ver_parts), -1, -1):
version = ".".join(ver_parts[:nparts])
subdir = os.path.join(d, version)
if os.path.exists(subdir):
return subdir
raise Exception("Directory missing: {}".format(d)) # pragma: only failure
def compare(
expected_dir, actual_dir, file_pattern=None,
actual_extra=False, scrubs=None,
):
"""Compare files matching `file_pattern` in `expected_dir` and `actual_dir`.
A version-specific subdirectory of `expected_dir` will be used if
it exists.
`actual_extra` true means `actual_dir` can have extra files in it
without triggering an assertion.
`scrubs` is a list of pairs: regexes to find and replace to scrub the
files of unimportant differences.
An assertion will be raised if the directories fail one of their
matches.
"""
expected_dir = versioned_directory(expected_dir)
dc = filecmp.dircmp(expected_dir, actual_dir)
diff_files = fnmatch_list(dc.diff_files, file_pattern)
expected_only = fnmatch_list(dc.left_only, file_pattern)
actual_only = fnmatch_list(dc.right_only, file_pattern)
# filecmp only compares in binary mode, but we want text mode. So
# look through the list of different files, and compare them
# ourselves.
text_diff = []
for f in diff_files:
expected_file = os.path.join(expected_dir, f)
with open(expected_file, READ_MODE) as fobj:
expected = fobj.read()
if expected_file.endswith(".xml"):
expected = canonicalize_xml(expected)
actual_file = os.path.join(actual_dir, f)
with open(actual_file, READ_MODE) as fobj:
actual = fobj.read()
if actual_file.endswith(".xml"):
actual = canonicalize_xml(actual)
if scrubs:
expected = scrub(expected, scrubs)
actual = scrub(actual, scrubs)
if expected != actual: # pragma: only failure
text_diff.append('%s != %s' % (expected_file, actual_file))
expected = expected.splitlines()
actual = actual.splitlines()
print(":::: diff {!r} and {!r}".format(expected_file, actual_file))
print("\n".join(difflib.Differ().compare(expected, actual)))
print(":::: end diff {!r} and {!r}".format(expected_file, actual_file))
assert not text_diff, "Files differ: %s" % '\n'.join(text_diff)
assert not expected_only, "Files in %s only: %s" % (expected_dir, expected_only)
if not actual_extra:
assert not actual_only, "Files in %s only: %s" % (actual_dir, actual_only)
def canonicalize_xml(xtext):
"""Canonicalize some XML text."""
root = xml.etree.ElementTree.fromstring(xtext)
for node in root.iter():
node.attrib = dict(sorted(node.items()))
xtext = xml.etree.ElementTree.tostring(root)
return xtext.decode('utf8')
def contains(filename, *strlist):
"""Check that the file contains all of a list of strings.
An assert will be raised if one of the arguments in `strlist` is
missing in `filename`.
"""
with open(filename, "r") as fobj:
text = fobj.read()
for s in strlist:
assert s in text, "Missing content in %s: %r" % (filename, s)
def contains_any(filename, *strlist):
"""Check that the file contains at least one of a list of strings.
An assert will be raised if none of the arguments in `strlist` is in
`filename`.
"""
with open(filename, "r") as fobj:
text = fobj.read()
for s in strlist:
if s in text:
return
assert False, ( # pragma: only failure
"Missing content in %s: %r [1 of %d]" % (filename, strlist[0], len(strlist),)
)
def doesnt_contain(filename, *strlist):
"""Check that the file contains none of a list of strings.
An assert will be raised if any of the strings in `strlist` appears in
`filename`.
"""
with open(filename, "r") as fobj:
text = fobj.read()
for s in strlist:
assert s not in text, "Forbidden content in %s: %r" % (filename, s)
# Helpers
def fnmatch_list(files, file_pattern):
"""Filter the list of `files` to only those that match `file_pattern`.
If `file_pattern` is None, then return the entire list of files.
Returns a list of the filtered files.
"""
if file_pattern:
files = [f for f in files if fnmatch.fnmatch(f, file_pattern)]
return files
def scrub(strdata, scrubs):
"""Scrub uninteresting data from the payload in `strdata`.
`scrubs` is a list of (find, replace) pairs of regexes that are used on
`strdata`. A string is returned.
"""
for rgx_find, rgx_replace in scrubs:
strdata = re.sub(rgx_find, rgx_replace, strdata)
return strdata
|
import datetime
from homeassistant.components import geo_location
from homeassistant.components.geo_location import ATTR_SOURCE
from homeassistant.components.ign_sismologia.geo_location import (
ATTR_EXTERNAL_ID,
ATTR_IMAGE_URL,
ATTR_MAGNITUDE,
ATTR_PUBLICATION_DATE,
ATTR_REGION,
ATTR_TITLE,
SCAN_INTERVAL,
)
from homeassistant.const import (
ATTR_ATTRIBUTION,
ATTR_FRIENDLY_NAME,
ATTR_ICON,
ATTR_LATITUDE,
ATTR_LONGITUDE,
ATTR_UNIT_OF_MEASUREMENT,
CONF_LATITUDE,
CONF_LONGITUDE,
CONF_RADIUS,
EVENT_HOMEASSISTANT_START,
LENGTH_KILOMETERS,
)
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from tests.async_mock import MagicMock, call, patch
from tests.common import assert_setup_component, async_fire_time_changed
CONFIG = {geo_location.DOMAIN: [{"platform": "ign_sismologia", CONF_RADIUS: 200}]}
CONFIG_WITH_CUSTOM_LOCATION = {
geo_location.DOMAIN: [
{
"platform": "ign_sismologia",
CONF_RADIUS: 200,
CONF_LATITUDE: 40.4,
CONF_LONGITUDE: -3.7,
}
]
}
def _generate_mock_feed_entry(
external_id,
title,
distance_to_home,
coordinates,
region=None,
attribution=None,
published=None,
magnitude=None,
image_url=None,
):
"""Construct a mock feed entry for testing purposes."""
feed_entry = MagicMock()
feed_entry.external_id = external_id
feed_entry.title = title
feed_entry.distance_to_home = distance_to_home
feed_entry.coordinates = coordinates
feed_entry.region = region
feed_entry.attribution = attribution
feed_entry.published = published
feed_entry.magnitude = magnitude
feed_entry.image_url = image_url
return feed_entry
async def test_setup(hass):
"""Test the general setup of the platform."""
# Set up some mock feed entries for this test.
mock_entry_1 = _generate_mock_feed_entry(
"1234",
"Title 1",
15.5,
(38.0, -3.0),
region="Region 1",
attribution="Attribution 1",
published=datetime.datetime(2018, 9, 22, 8, 0, tzinfo=datetime.timezone.utc),
magnitude=5.7,
image_url="http://image.url/map.jpg",
)
mock_entry_2 = _generate_mock_feed_entry(
"2345", "Title 2", 20.5, (38.1, -3.1), magnitude=4.6
)
mock_entry_3 = _generate_mock_feed_entry(
"3456", "Title 3", 25.5, (38.2, -3.2), region="Region 3"
)
mock_entry_4 = _generate_mock_feed_entry("4567", "Title 4", 12.5, (38.3, -3.3))
# Patching 'utcnow' to gain more control over the timed update.
utcnow = dt_util.utcnow()
with patch("homeassistant.util.dt.utcnow", return_value=utcnow), patch(
"georss_ign_sismologia_client.IgnSismologiaFeed"
) as mock_feed:
mock_feed.return_value.update.return_value = (
"OK",
[mock_entry_1, mock_entry_2, mock_entry_3],
)
with assert_setup_component(1, geo_location.DOMAIN):
assert await async_setup_component(hass, geo_location.DOMAIN, CONFIG)
await hass.async_block_till_done()
# Artificially trigger update.
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
# Collect events.
await hass.async_block_till_done()
all_states = hass.states.async_all()
assert len(all_states) == 3
state = hass.states.get("geo_location.m_5_7_region_1")
assert state is not None
assert state.name == "M 5.7 - Region 1"
assert state.attributes == {
ATTR_EXTERNAL_ID: "1234",
ATTR_LATITUDE: 38.0,
ATTR_LONGITUDE: -3.0,
ATTR_FRIENDLY_NAME: "M 5.7 - Region 1",
ATTR_TITLE: "Title 1",
ATTR_REGION: "Region 1",
ATTR_ATTRIBUTION: "Attribution 1",
ATTR_PUBLICATION_DATE: datetime.datetime(
2018, 9, 22, 8, 0, tzinfo=datetime.timezone.utc
),
ATTR_IMAGE_URL: "http://image.url/map.jpg",
ATTR_MAGNITUDE: 5.7,
ATTR_UNIT_OF_MEASUREMENT: LENGTH_KILOMETERS,
ATTR_SOURCE: "ign_sismologia",
ATTR_ICON: "mdi:pulse",
}
assert float(state.state) == 15.5
state = hass.states.get("geo_location.m_4_6")
assert state is not None
assert state.name == "M 4.6"
assert state.attributes == {
ATTR_EXTERNAL_ID: "2345",
ATTR_LATITUDE: 38.1,
ATTR_LONGITUDE: -3.1,
ATTR_FRIENDLY_NAME: "M 4.6",
ATTR_TITLE: "Title 2",
ATTR_MAGNITUDE: 4.6,
ATTR_UNIT_OF_MEASUREMENT: LENGTH_KILOMETERS,
ATTR_SOURCE: "ign_sismologia",
ATTR_ICON: "mdi:pulse",
}
assert float(state.state) == 20.5
state = hass.states.get("geo_location.region_3")
assert state is not None
assert state.name == "Region 3"
assert state.attributes == {
ATTR_EXTERNAL_ID: "3456",
ATTR_LATITUDE: 38.2,
ATTR_LONGITUDE: -3.2,
ATTR_FRIENDLY_NAME: "Region 3",
ATTR_TITLE: "Title 3",
ATTR_REGION: "Region 3",
ATTR_UNIT_OF_MEASUREMENT: LENGTH_KILOMETERS,
ATTR_SOURCE: "ign_sismologia",
ATTR_ICON: "mdi:pulse",
}
assert float(state.state) == 25.5
# Simulate an update - one existing, one new entry,
# one outdated entry
mock_feed.return_value.update.return_value = (
"OK",
[mock_entry_1, mock_entry_4, mock_entry_3],
)
async_fire_time_changed(hass, utcnow + SCAN_INTERVAL)
await hass.async_block_till_done()
all_states = hass.states.async_all()
assert len(all_states) == 3
# Simulate an update - empty data, but successful update,
# so no changes to entities.
mock_feed.return_value.update.return_value = "OK_NO_DATA", None
async_fire_time_changed(hass, utcnow + 2 * SCAN_INTERVAL)
await hass.async_block_till_done()
all_states = hass.states.async_all()
assert len(all_states) == 3
# Simulate an update - empty data, removes all entities
mock_feed.return_value.update.return_value = "ERROR", None
async_fire_time_changed(hass, utcnow + 3 * SCAN_INTERVAL)
await hass.async_block_till_done()
all_states = hass.states.async_all()
assert len(all_states) == 0
async def test_setup_with_custom_location(hass):
"""Test the setup with a custom location."""
# Set up some mock feed entries for this test.
mock_entry_1 = _generate_mock_feed_entry("1234", "Title 1", 20.5, (38.1, -3.1))
with patch("georss_ign_sismologia_client.IgnSismologiaFeed") as mock_feed:
mock_feed.return_value.update.return_value = "OK", [mock_entry_1]
with assert_setup_component(1, geo_location.DOMAIN):
assert await async_setup_component(
hass, geo_location.DOMAIN, CONFIG_WITH_CUSTOM_LOCATION
)
await hass.async_block_till_done()
# Artificially trigger update.
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
# Collect events.
await hass.async_block_till_done()
all_states = hass.states.async_all()
assert len(all_states) == 1
assert mock_feed.call_args == call(
(40.4, -3.7), filter_minimum_magnitude=0.0, filter_radius=200.0
)
|
import os
import re
import pytest
import sh
import shutil
from molecule import util
from ..conftest import (
change_dir_to,
needs_inspec,
needs_rubocop,
)
@pytest.fixture
def scenario_to_test(request):
return request.param
@pytest.fixture
def scenario_name(request):
try:
return request.param
except AttributeError:
return None
@pytest.fixture
def driver_name(request):
return request.param
@pytest.mark.parametrize(
'scenario_to_test, driver_name, scenario_name', [
('side_effect', 'docker', 'default'),
],
indirect=[
'scenario_to_test',
'driver_name',
'scenario_name',
])
def test_command_side_effect(scenario_to_test, with_scenario, scenario_name):
options = {
'driver_name': 'docker',
'all': True,
}
cmd = sh.molecule.bake('test', **options)
pytest.helpers.run_command(cmd)
@pytest.mark.parametrize(
'scenario_to_test, driver_name, scenario_name', [
('cleanup', 'docker', 'default'),
],
indirect=[
'scenario_to_test',
'driver_name',
'scenario_name',
])
def test_command_cleanup(scenario_to_test, with_scenario, scenario_name):
options = {
'driver_name': 'docker',
'all': True,
}
cmd = sh.molecule.bake('test', **options)
pytest.helpers.run_command(cmd)
@needs_inspec
@needs_rubocop
def test_command_init_role_inspec(temp_dir):
role_directory = os.path.join(temp_dir.strpath, 'test-init')
options = {
'role_name': 'test-init',
'verifier_name': 'inspec',
}
cmd = sh.molecule.bake('init', 'role', **options)
pytest.helpers.run_command(cmd)
pytest.helpers.metadata_lint_update(role_directory)
with change_dir_to(role_directory):
sh.molecule('test')
def test_command_init_scenario_inspec(temp_dir):
role_directory = os.path.join(temp_dir.strpath, 'test-init')
options = {
'role_name': 'test-init',
}
cmd = sh.molecule.bake('init', 'role', **options)
pytest.helpers.run_command(cmd)
pytest.helpers.metadata_lint_update(role_directory)
with change_dir_to(role_directory):
molecule_directory = pytest.helpers.molecule_directory()
scenario_directory = os.path.join(molecule_directory, 'test-scenario')
options = {
'scenario_name': 'test-scenario',
'role_name': 'test-init',
'verifier_name': 'inspec',
}
cmd = sh.molecule.bake('init', 'scenario', **options)
pytest.helpers.run_command(cmd)
assert os.path.isdir(scenario_directory)
def test_command_init_role_goss(temp_dir):
role_directory = os.path.join(temp_dir.strpath, 'test-init')
options = {
'role_name': 'test-init',
'verifier_name': 'goss',
}
cmd = sh.molecule.bake('init', 'role', **options)
pytest.helpers.run_command(cmd)
pytest.helpers.metadata_lint_update(role_directory)
with change_dir_to(role_directory):
sh.molecule('test')
def test_command_init_scenario_goss(temp_dir):
role_directory = os.path.join(temp_dir.strpath, 'test-init')
options = {
'role_name': 'test-init',
}
cmd = sh.molecule.bake('init', 'role', **options)
pytest.helpers.run_command(cmd)
pytest.helpers.metadata_lint_update(role_directory)
with change_dir_to(role_directory):
molecule_directory = pytest.helpers.molecule_directory()
scenario_directory = os.path.join(molecule_directory, 'test-scenario')
options = {
'scenario_name': 'test-scenario',
'role_name': 'test-init',
'verifier_name': 'goss',
}
cmd = sh.molecule.bake('init', 'scenario', **options)
pytest.helpers.run_command(cmd)
assert os.path.isdir(scenario_directory)
def test_command_init_scenario_with_invalid_role_raises(temp_dir):
role_directory = os.path.join(temp_dir.strpath, 'test-role')
options = {
'role_name': 'test-role',
}
cmd = sh.molecule.bake('init', 'role', **options)
pytest.helpers.run_command(cmd)
pytest.helpers.metadata_lint_update(role_directory)
with change_dir_to(role_directory):
options = {
'scenario_name': 'default',
'role_name': 'invalid-role-name',
}
with pytest.raises(sh.ErrorReturnCode) as e:
cmd = sh.molecule.bake('init', 'scenario', **options)
pytest.helpers.run_command(cmd, log=False)
msg = ("ERROR: The role 'invalid-role-name' not found. "
'Please choose the proper role name.')
assert msg in str(e.value.stderr)
def test_command_init_scenario_as_default_without_default_scenario(temp_dir):
role_directory = os.path.join(temp_dir.strpath, 'test-role')
options = {
'role_name': 'test-role',
}
cmd = sh.molecule.bake('init', 'role', **options)
pytest.helpers.run_command(cmd)
pytest.helpers.metadata_lint_update(role_directory)
with change_dir_to(role_directory):
molecule_directory = pytest.helpers.molecule_directory()
scenario_directory = os.path.join(molecule_directory, 'default')
shutil.rmtree(scenario_directory)
options = {
'scenario_name': 'default',
'role_name': 'test-role',
}
cmd = sh.molecule.bake('init', 'scenario', **options)
pytest.helpers.run_command(cmd)
assert os.path.isdir(scenario_directory)
# NOTE(retr0h): Molecule does not allow the creation of a role without
# a default scenario. This tests roles not created by a newer Molecule.
def test_command_init_scenario_without_default_scenario_raises(temp_dir):
role_directory = os.path.join(temp_dir.strpath, 'test-role')
options = {
'role_name': 'test-role',
}
cmd = sh.molecule.bake('init', 'role', **options)
pytest.helpers.run_command(cmd)
pytest.helpers.metadata_lint_update(role_directory)
with change_dir_to(role_directory):
molecule_directory = pytest.helpers.molecule_directory()
scenario_directory = os.path.join(molecule_directory, 'default')
shutil.rmtree(scenario_directory)
options = {
'scenario_name': 'invalid-role-name',
'role_name': 'test-role',
}
with pytest.raises(sh.ErrorReturnCode) as e:
cmd = sh.molecule.bake('init', 'scenario', **options)
pytest.helpers.run_command(cmd, log=False)
msg = ('The default scenario not found. Please create a scenario '
"named 'default' first.")
assert msg in str(e.value.stderr)
def test_command_init_role_with_template(temp_dir):
role_name = 'test-init'
role_directory = os.path.join(temp_dir.strpath, role_name)
options = {
'url': 'https://github.com/ansible/molecule-cookiecutter.git',
'no_input': True,
'role_name': role_name,
}
cmd = sh.molecule.bake('init', 'template', **options)
pytest.helpers.run_command(cmd)
pytest.helpers.metadata_lint_update(role_directory)
with change_dir_to(role_directory):
sh.molecule('test')
@pytest.mark.parametrize(
'scenario_to_test, driver_name, scenario_name', [
('overrride_driver', 'docker', 'default'),
],
indirect=[
'scenario_to_test',
'driver_name',
'scenario_name',
])
def test_command_test_overrides_driver(scenario_to_test, with_scenario,
driver_name, scenario_name):
options = {
'driver_name': driver_name,
'all': True,
}
cmd = sh.molecule.bake('test', **options)
pytest.helpers.run_command(cmd)
@pytest.mark.parametrize(
'scenario_to_test, driver_name, scenario_name', [
('driver/docker', 'docker', 'default'),
],
indirect=[
'scenario_to_test',
'driver_name',
'scenario_name',
])
def test_command_test_builds_local_molecule_image(
scenario_to_test, with_scenario, scenario_name, driver_name):
try:
cmd = sh.docker.bake('rmi', 'molecule_local/centos:latest', '--force')
pytest.helpers.run_command(cmd)
except sh.ErrorReturnCode:
pass
pytest.helpers.test(driver_name, scenario_name)
@pytest.mark.parametrize(
'scenario_to_test, driver_name, scenario_name', [
('test_destroy_strategy', 'docker', 'default'),
],
indirect=[
'scenario_to_test',
'driver_name',
'scenario_name',
])
def test_command_test_destroy_strategy_always(scenario_to_test, with_scenario,
scenario_name, driver_name):
options = {
'destroy': 'always',
}
with pytest.raises(sh.ErrorReturnCode) as e:
cmd = sh.molecule.bake('test', **options)
pytest.helpers.run_command(cmd, log=False)
msg = ("An error occurred during the test sequence action: 'lint'. "
'Cleaning up.')
assert msg in str(e.value.stdout)
assert 'Action: \'cleanup\'' in str(e.value.stdout)
assert 'PLAY [Destroy]' in str(e.value.stdout)
assert 0 != e.value.exit_code
@pytest.mark.parametrize(
'scenario_to_test, driver_name, scenario_name', [
('test_destroy_strategy', 'docker', 'default'),
],
indirect=[
'scenario_to_test',
'driver_name',
'scenario_name',
])
def test_command_test_destroy_strategy_never(scenario_to_test, with_scenario,
scenario_name, driver_name):
options = {
'destroy': 'never',
}
with pytest.raises(sh.ErrorReturnCode) as e:
cmd = sh.molecule.bake('test', **options)
pytest.helpers.run_command(cmd, log=False)
msg = ("An error occurred during the test sequence action: 'lint'. "
'Cleaning up.')
assert msg not in str(e.value.stdout)
assert 0 != e.value.exit_code
@pytest.mark.parametrize(
'scenario_to_test, driver_name, scenario_name', [
('host_group_vars', 'docker', 'default'),
],
indirect=[
'scenario_to_test',
'driver_name',
'scenario_name',
])
def test_host_group_vars(scenario_to_test, with_scenario, scenario_name):
options = {
'all': True,
}
cmd = sh.molecule.bake('test', **options)
out = pytest.helpers.run_command(cmd, log=False)
out = util.strip_ansi_escape(out.stdout.decode('utf-8'))
assert re.search(r'\[instance\].*?ok: \[instance\]', out, re.DOTALL)
assert re.search(r'\[example\].*?ok: \[instance\]', out, re.DOTALL)
assert re.search(r'\[example_1\].*?ok: \[instance\]', out, re.DOTALL)
@pytest.mark.parametrize(
'scenario_to_test, driver_name, scenario_name', [
('idempotence', 'docker', 'raises'),
],
indirect=[
'scenario_to_test',
'driver_name',
'scenario_name',
])
def test_idempotence_raises(scenario_to_test, with_scenario, scenario_name):
options = {
'scenario_name': scenario_name,
'all': True,
'destroy': 'never',
}
cmd = sh.molecule.bake('test', **options)
with pytest.raises(sh.ErrorReturnCode_2) as e:
pytest.helpers.run_command(cmd)
assert 2 == e.value.exit_code
@pytest.mark.parametrize(
'scenario_to_test, driver_name, scenario_name', [
('interpolation', 'docker', 'default'),
],
indirect=[
'scenario_to_test',
'driver_name',
'scenario_name',
])
def test_interpolation(scenario_to_test, with_scenario, scenario_name):
# Modify global environment so cleanup inherits our environment.
options = {
'all': True,
}
env = os.environ
env.update({
'DRIVER_NAME': 'docker',
'INSTANCE_NAME': 'instance',
})
cmd = sh.molecule.bake('test', **options)
pytest.helpers.run_command(cmd, env=env)
@pytest.mark.parametrize(
'scenario_to_test, driver_name, scenario_name', [
('verifier', 'docker', 'testinfra'),
],
indirect=[
'scenario_to_test',
'driver_name',
'scenario_name',
])
def test_command_verify_testinfra(scenario_to_test, with_scenario,
scenario_name):
options = {
'scenario_name': scenario_name,
}
cmd = sh.molecule.bake('create', **options)
pytest.helpers.run_command(cmd)
options = {
'scenario_name': scenario_name,
}
cmd = sh.molecule.bake('converge', **options)
pytest.helpers.run_command(cmd)
options = {
'scenario_name': scenario_name,
}
cmd = sh.molecule.bake('verify', **options)
pytest.helpers.run_command(cmd)
@pytest.mark.parametrize(
'scenario_to_test, driver_name, scenario_name', [
('verifier', 'docker', 'goss'),
],
indirect=[
'scenario_to_test',
'driver_name',
'scenario_name',
])
def test_command_verify_goss(scenario_to_test, with_scenario, scenario_name):
options = {
'scenario_name': scenario_name,
}
cmd = sh.molecule.bake('create', **options)
pytest.helpers.run_command(cmd)
options = {
'scenario_name': scenario_name,
}
cmd = sh.molecule.bake('converge', **options)
pytest.helpers.run_command(cmd)
options = {
'scenario_name': scenario_name,
}
cmd = sh.molecule.bake('verify', **options)
pytest.helpers.run_command(cmd)
@pytest.mark.parametrize(
'scenario_to_test, driver_name, scenario_name', [
('verifier', 'docker', 'inspec'),
],
indirect=[
'scenario_to_test',
'driver_name',
'scenario_name',
])
def test_command_verify_inspec(scenario_to_test, with_scenario, scenario_name):
options = {
'scenario_name': scenario_name,
}
cmd = sh.molecule.bake('create', **options)
pytest.helpers.run_command(cmd)
options = {
'scenario_name': scenario_name,
}
cmd = sh.molecule.bake('converge', **options)
pytest.helpers.run_command(cmd)
options = {
'scenario_name': scenario_name,
}
cmd = sh.molecule.bake('verify', **options)
pytest.helpers.run_command(cmd)
@pytest.mark.parametrize(
'scenario_to_test, driver_name, scenario_name', [
('plugins', 'docker', 'default'),
],
indirect=[
'scenario_to_test',
'driver_name',
'scenario_name',
])
def test_plugins(scenario_to_test, with_scenario, scenario_name):
options = {
'scenario_name': scenario_name,
'all': True,
}
cmd = sh.molecule.bake('test', **options)
pytest.helpers.run_command(cmd)
|
import unittest
import numpy as np
import numpy.testing as np_test
from pgmpy.inference import DBNInference
from pgmpy.models import DynamicBayesianNetwork
from pgmpy.factors.discrete import TabularCPD
# The sample Dynamic Bayesian Network is taken from the following paper:-
# Novel recursive inference algorithm for discrete dynamic Bayesian networks
# Huange Wang, Xiaoguang Gao, Chris P. Thompson
class TestDBNInference(unittest.TestCase):
def setUp(self):
dbn_1 = DynamicBayesianNetwork()
dbn_1.add_edges_from(
[(("Z", 0), ("X", 0)), (("Z", 0), ("Y", 0)), (("Z", 0), ("Z", 1))]
)
cpd_start_z_1 = TabularCPD(("Z", 0), 2, [[0.8], [0.2]])
cpd_x_1 = TabularCPD(("X", 0), 2, [[0.9, 0.6], [0.1, 0.4]], [("Z", 0)], [2])
cpd_y_1 = TabularCPD(("Y", 0), 2, [[0.7, 0.2], [0.3, 0.8]], [("Z", 0)], [2])
cpd_trans_z_1 = TabularCPD(
("Z", 1), 2, [[0.9, 0.1], [0.1, 0.9]], [("Z", 0)], [2]
)
dbn_1.add_cpds(cpd_start_z_1, cpd_trans_z_1, cpd_x_1, cpd_y_1)
dbn_1.initialize_initial_state()
self.dbn_inference_1 = DBNInference(dbn_1)
dbn_2 = DynamicBayesianNetwork()
dbn_2.add_edges_from(
[(("Z", 0), ("X", 0)), (("X", 0), ("Y", 0)), (("Z", 0), ("Z", 1))]
)
cpd_start_z_2 = TabularCPD(("Z", 0), 2, [[0.5], [0.5]])
cpd_x_2 = TabularCPD(("X", 0), 2, [[0.6, 0.9], [0.4, 0.1]], [("Z", 0)], [2])
cpd_y_2 = TabularCPD(("Y", 0), 2, [[0.2, 0.3], [0.8, 0.7]], [("X", 0)], [2])
cpd_z_2 = TabularCPD(("Z", 1), 2, [[0.4, 0.7], [0.6, 0.3]], [("Z", 0)], [2])
dbn_2.add_cpds(cpd_x_2, cpd_y_2, cpd_z_2, cpd_start_z_2)
dbn_2.initialize_initial_state()
self.dbn_inference_2 = DBNInference(dbn_2)
def test_forward_inf_single_variable(self):
query_result = self.dbn_inference_1.forward_inference([("X", 0)])
np_test.assert_array_almost_equal(
query_result[("X", 0)].values, np.array([0.84, 0.16])
)
def test_forward_inf_multiple_variable(self):
query_result = self.dbn_inference_1.forward_inference([("X", 0), ("Y", 0)])
np_test.assert_array_almost_equal(
query_result[("X", 0)].values, np.array([0.84, 0.16])
)
np_test.assert_array_almost_equal(
query_result[("Y", 0)].values, np.array([0.6, 0.4])
)
def test_forward_inf_single_variable_with_evidence(self):
query_result = self.dbn_inference_1.forward_inference(
[("Z", 1)], {("Y", 0): 0, ("Y", 1): 0}
)
np_test.assert_array_almost_equal(
query_result[("Z", 1)].values, np.array([0.95080214, 0.04919786])
)
query_result = self.dbn_inference_2.forward_inference(
[("X", 2)], {("Y", 0): 1, ("Y", 1): 0, ("Y", 2): 1}
)
np_test.assert_array_almost_equal(
query_result[("X", 2)].values, np.array([0.76738736, 0.23261264])
)
def test_forward_inf_multiple_variable_with_evidence(self):
query_result = self.dbn_inference_1.forward_inference(
[("Z", 1), ("X", 1)], {("Y", 0): 0, ("Y", 1): 0}
)
np_test.assert_array_almost_equal(
query_result[("Z", 1)].values, np.array([0.95080214, 0.04919786])
)
np_test.assert_array_almost_equal(
query_result[("X", 1)].values, np.array([0.88524064, 0.11475936])
)
def test_backward_inf_single_variable(self):
query_result = self.dbn_inference_2.backward_inference([("Y", 0)])
np_test.assert_array_almost_equal(
query_result[("Y", 0)].values, np.array([0.225, 0.775])
)
def test_backward_inf_multiple_variables(self):
query_result = self.dbn_inference_2.backward_inference([("X", 0), ("Y", 0)])
np_test.assert_array_almost_equal(
query_result[("X", 0)].values, np.array([0.75, 0.25])
)
np_test.assert_array_almost_equal(
query_result[("Y", 0)].values, np.array([0.225, 0.775])
)
def test_backward_inf_single_variable_with_evidence(self):
query_result = self.dbn_inference_2.backward_inference(
[("X", 0)], {("Y", 0): 0, ("Y", 1): 1, ("Y", 2): 1}
)
np_test.assert_array_almost_equal(
query_result[("X", 0)].values, np.array([0.66594382, 0.33405618])
)
query_result = self.dbn_inference_1.backward_inference(
[("Z", 1)], {("Y", 0): 0, ("Y", 1): 0, ("Y", 2): 0}
)
np_test.assert_array_almost_equal(
query_result[("Z", 1)].values, np.array([0.98048698, 0.01951302])
)
def test_backward_inf_multiple_variables_with_evidence(self):
query_result = self.dbn_inference_2.backward_inference(
[("X", 0), ("X", 1)], {("Y", 0): 0, ("Y", 1): 1, ("Y", 2): 1}
)
np_test.assert_array_almost_equal(
query_result[("X", 0)].values, np.array([0.66594382, 0.33405618])
)
np_test.assert_array_almost_equal(
query_result[("X", 1)].values, np.array([0.7621772, 0.2378228])
)
|
import copy
import json
from hatasmota.const import (
CONF_MAC,
CONF_OFFLINE,
CONF_ONLINE,
CONF_PREFIX,
PREFIX_CMND,
PREFIX_TELE,
)
from hatasmota.utils import (
config_get_state_offline,
config_get_state_online,
get_topic_tele_state,
get_topic_tele_will,
)
from homeassistant.components.tasmota.const import DEFAULT_PREFIX
from homeassistant.const import STATE_UNAVAILABLE
from tests.async_mock import ANY
from tests.common import async_fire_mqtt_message
DEFAULT_CONFIG = {
"ip": "192.168.15.10",
"dn": "Tasmota",
"fn": ["Test", "Beer", "Milk", "Four", None],
"hn": "tasmota_49A3BC-0956",
"lk": 1, # RGB + white channels linked to a single light
"mac": "00000049A3BC",
"md": "Sonoff Basic",
"ofln": "Offline",
"onln": "Online",
"state": ["OFF", "ON", "TOGGLE", "HOLD"],
"sw": "8.4.0.2",
"t": "tasmota_49A3BC",
"ft": "%topic%/%prefix%/",
"tp": ["cmnd", "stat", "tele"],
"rl": [0, 0, 0, 0, 0, 0, 0, 0],
"swc": [-1, -1, -1, -1, -1, -1, -1, -1],
"btn": [0, 0, 0, 0],
"so": {
"11": 0, # Swap button single and double press functionality
"13": 0, # Allow immediate action on single button press
"17": 1, # Show Color string as hex or comma-separated
"20": 0, # Update of Dimmer/Color/CT without turning power on
"30": 0, # Enforce Home Assistant auto-discovery as light
"68": 0, # Multi-channel PWM instead of a single light
"73": 0, # Enable Buttons decoupling and send multi-press and hold MQTT messages
"80": 0, # Blinds and shutters support
"82": 0, # Reduce the CT range from 153..500 to 200.380
},
"ty": 0, # Tuya MCU
"lt_st": 0,
"ver": 1,
}
async def help_test_availability_when_connection_lost(
hass,
mqtt_client_mock,
mqtt_mock,
domain,
config,
sensor_config=None,
entity_id="test",
):
"""Test availability after MQTT disconnection.
This is a test helper for the TasmotaAvailability mixin.
"""
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{config[CONF_MAC]}/config",
json.dumps(config),
)
await hass.async_block_till_done()
if sensor_config:
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{config[CONF_MAC]}/sensors",
json.dumps(sensor_config),
)
await hass.async_block_till_done()
# Device online
async_fire_mqtt_message(
hass,
get_topic_tele_will(config),
config_get_state_online(config),
)
state = hass.states.get(f"{domain}.{entity_id}")
assert state.state != STATE_UNAVAILABLE
# Disconnected from MQTT server -> state changed to unavailable
mqtt_mock.connected = False
await hass.async_add_executor_job(mqtt_client_mock.on_disconnect, None, None, 0)
await hass.async_block_till_done()
await hass.async_block_till_done()
await hass.async_block_till_done()
state = hass.states.get(f"{domain}.{entity_id}")
assert state.state == STATE_UNAVAILABLE
# Reconnected to MQTT server -> state still unavailable
mqtt_mock.connected = True
await hass.async_add_executor_job(mqtt_client_mock.on_connect, None, None, None, 0)
await hass.async_block_till_done()
await hass.async_block_till_done()
await hass.async_block_till_done()
state = hass.states.get(f"{domain}.{entity_id}")
assert state.state == STATE_UNAVAILABLE
# Receive LWT again
async_fire_mqtt_message(
hass,
get_topic_tele_will(config),
config_get_state_online(config),
)
state = hass.states.get(f"{domain}.{entity_id}")
assert state.state != STATE_UNAVAILABLE
async def help_test_availability(
hass,
mqtt_mock,
domain,
config,
sensor_config=None,
entity_id="test",
):
"""Test availability.
This is a test helper for the TasmotaAvailability mixin.
"""
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{config[CONF_MAC]}/config",
json.dumps(config),
)
await hass.async_block_till_done()
if sensor_config:
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{config[CONF_MAC]}/sensors",
json.dumps(sensor_config),
)
await hass.async_block_till_done()
state = hass.states.get(f"{domain}.{entity_id}")
assert state.state == STATE_UNAVAILABLE
async_fire_mqtt_message(
hass,
get_topic_tele_will(config),
config_get_state_online(config),
)
state = hass.states.get(f"{domain}.{entity_id}")
assert state.state != STATE_UNAVAILABLE
async_fire_mqtt_message(
hass,
get_topic_tele_will(config),
config_get_state_offline(config),
)
state = hass.states.get(f"{domain}.{entity_id}")
assert state.state == STATE_UNAVAILABLE
async def help_test_availability_discovery_update(
hass,
mqtt_mock,
domain,
config,
sensor_config=None,
entity_id="test",
):
"""Test update of discovered TasmotaAvailability.
This is a test helper for the TasmotaAvailability mixin.
"""
# customize availability topic
config1 = copy.deepcopy(config)
config1[CONF_PREFIX][PREFIX_TELE] = "tele1"
config1[CONF_OFFLINE] = "offline1"
config1[CONF_ONLINE] = "online1"
config2 = copy.deepcopy(config)
config2[CONF_PREFIX][PREFIX_TELE] = "tele2"
config2[CONF_OFFLINE] = "offline2"
config2[CONF_ONLINE] = "online2"
data1 = json.dumps(config1)
data2 = json.dumps(config2)
availability_topic1 = get_topic_tele_will(config1)
availability_topic2 = get_topic_tele_will(config2)
assert availability_topic1 != availability_topic2
offline1 = config_get_state_offline(config1)
offline2 = config_get_state_offline(config2)
assert offline1 != offline2
online1 = config_get_state_online(config1)
online2 = config_get_state_online(config2)
assert online1 != online2
async_fire_mqtt_message(hass, f"{DEFAULT_PREFIX}/{config1[CONF_MAC]}/config", data1)
await hass.async_block_till_done()
if sensor_config:
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{config[CONF_MAC]}/sensors",
json.dumps(sensor_config),
)
await hass.async_block_till_done()
state = hass.states.get(f"{domain}.{entity_id}")
assert state.state == STATE_UNAVAILABLE
async_fire_mqtt_message(hass, availability_topic1, online1)
state = hass.states.get(f"{domain}.{entity_id}")
assert state.state != STATE_UNAVAILABLE
async_fire_mqtt_message(hass, availability_topic1, offline1)
state = hass.states.get(f"{domain}.{entity_id}")
assert state.state == STATE_UNAVAILABLE
# Change availability settings
async_fire_mqtt_message(hass, f"{DEFAULT_PREFIX}/{config2[CONF_MAC]}/config", data2)
await hass.async_block_till_done()
# Verify we are no longer subscribing to the old topic or payload
async_fire_mqtt_message(hass, availability_topic1, online1)
async_fire_mqtt_message(hass, availability_topic1, online2)
async_fire_mqtt_message(hass, availability_topic2, online1)
state = hass.states.get(f"{domain}.{entity_id}")
assert state.state == STATE_UNAVAILABLE
# Verify we are subscribing to the new topic
async_fire_mqtt_message(hass, availability_topic2, online2)
state = hass.states.get(f"{domain}.{entity_id}")
assert state.state != STATE_UNAVAILABLE
async def help_test_availability_poll_state(
hass,
mqtt_client_mock,
mqtt_mock,
domain,
config,
poll_topic,
poll_payload,
sensor_config=None,
):
"""Test polling of state when device is available.
This is a test helper for the TasmotaAvailability mixin.
"""
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{config[CONF_MAC]}/config",
json.dumps(config),
)
await hass.async_block_till_done()
if sensor_config:
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{config[CONF_MAC]}/sensors",
json.dumps(sensor_config),
)
await hass.async_block_till_done()
mqtt_mock.async_publish.reset_mock()
# Device online, verify poll for state
async_fire_mqtt_message(
hass,
get_topic_tele_will(config),
config_get_state_online(config),
)
await hass.async_block_till_done()
await hass.async_block_till_done()
await hass.async_block_till_done()
mqtt_mock.async_publish.assert_called_once_with(poll_topic, poll_payload, 0, False)
mqtt_mock.async_publish.reset_mock()
# Disconnected from MQTT server
mqtt_mock.connected = False
await hass.async_add_executor_job(mqtt_client_mock.on_disconnect, None, None, 0)
await hass.async_block_till_done()
await hass.async_block_till_done()
await hass.async_block_till_done()
assert not mqtt_mock.async_publish.called
# Reconnected to MQTT server
mqtt_mock.connected = True
await hass.async_add_executor_job(mqtt_client_mock.on_connect, None, None, None, 0)
await hass.async_block_till_done()
await hass.async_block_till_done()
await hass.async_block_till_done()
assert not mqtt_mock.async_publish.called
# Device online, verify poll for state
async_fire_mqtt_message(
hass,
get_topic_tele_will(config),
config_get_state_online(config),
)
await hass.async_block_till_done()
await hass.async_block_till_done()
await hass.async_block_till_done()
mqtt_mock.async_publish.assert_called_once_with(poll_topic, poll_payload, 0, False)
async def help_test_discovery_removal(
hass,
mqtt_mock,
caplog,
domain,
config1,
config2,
sensor_config1=None,
sensor_config2=None,
entity_id="test",
name="Test",
):
"""Test removal of discovered entity."""
device_reg = await hass.helpers.device_registry.async_get_registry()
entity_reg = await hass.helpers.entity_registry.async_get_registry()
data1 = json.dumps(config1)
data2 = json.dumps(config2)
assert config1[CONF_MAC] == config2[CONF_MAC]
async_fire_mqtt_message(hass, f"{DEFAULT_PREFIX}/{config1[CONF_MAC]}/config", data1)
await hass.async_block_till_done()
if sensor_config1:
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{config1[CONF_MAC]}/sensors",
json.dumps(sensor_config1),
)
await hass.async_block_till_done()
# Verify device and entity registry entries are created
device_entry = device_reg.async_get_device(set(), {("mac", config1[CONF_MAC])})
assert device_entry is not None
entity_entry = entity_reg.async_get(f"{domain}.{entity_id}")
assert entity_entry is not None
# Verify state is added
state = hass.states.get(f"{domain}.{entity_id}")
assert state is not None
assert state.name == name
async_fire_mqtt_message(hass, f"{DEFAULT_PREFIX}/{config2[CONF_MAC]}/config", data2)
await hass.async_block_till_done()
if sensor_config1:
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{config2[CONF_MAC]}/sensors",
json.dumps(sensor_config2),
)
await hass.async_block_till_done()
# Verify entity registry entries are cleared
device_entry = device_reg.async_get_device(set(), {("mac", config2[CONF_MAC])})
assert device_entry is not None
entity_entry = entity_reg.async_get(f"{domain}.{entity_id}")
assert entity_entry is None
# Verify state is removed
state = hass.states.get(f"{domain}.{entity_id}")
assert state is None
async def help_test_discovery_update_unchanged(
hass,
mqtt_mock,
caplog,
domain,
config,
discovery_update,
sensor_config=None,
entity_id="test",
name="Test",
):
"""Test update of discovered component with and without changes.
This is a test helper for the MqttDiscoveryUpdate mixin.
"""
config1 = copy.deepcopy(config)
config2 = copy.deepcopy(config)
config2[CONF_PREFIX][PREFIX_CMND] = "cmnd2"
config2[CONF_PREFIX][PREFIX_TELE] = "tele2"
data1 = json.dumps(config1)
data2 = json.dumps(config2)
async_fire_mqtt_message(hass, f"{DEFAULT_PREFIX}/{config[CONF_MAC]}/config", data1)
await hass.async_block_till_done()
if sensor_config:
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{config[CONF_MAC]}/sensors",
json.dumps(sensor_config),
)
await hass.async_block_till_done()
state = hass.states.get(f"{domain}.{entity_id}")
assert state is not None
assert state.name == name
async_fire_mqtt_message(hass, f"{DEFAULT_PREFIX}/{config[CONF_MAC]}/config", data1)
await hass.async_block_till_done()
if sensor_config:
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{config[CONF_MAC]}/sensors",
json.dumps(sensor_config),
)
await hass.async_block_till_done()
assert not discovery_update.called
async_fire_mqtt_message(hass, f"{DEFAULT_PREFIX}/{config[CONF_MAC]}/config", data2)
await hass.async_block_till_done()
assert discovery_update.called
async def help_test_discovery_device_remove(
hass, mqtt_mock, domain, unique_id, config, sensor_config=None
):
"""Test domain entity is removed when device is removed."""
device_reg = await hass.helpers.device_registry.async_get_registry()
entity_reg = await hass.helpers.entity_registry.async_get_registry()
config = copy.deepcopy(config)
data = json.dumps(config)
async_fire_mqtt_message(hass, f"{DEFAULT_PREFIX}/{config[CONF_MAC]}/config", data)
await hass.async_block_till_done()
if sensor_config:
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{config[CONF_MAC]}/sensors",
json.dumps(sensor_config),
)
await hass.async_block_till_done()
device = device_reg.async_get_device(set(), {("mac", config[CONF_MAC])})
assert device is not None
assert entity_reg.async_get_entity_id(domain, "tasmota", unique_id)
async_fire_mqtt_message(hass, f"{DEFAULT_PREFIX}/{config[CONF_MAC]}/config", "")
await hass.async_block_till_done()
device = device_reg.async_get_device(set(), {("mac", config[CONF_MAC])})
assert device is None
assert not entity_reg.async_get_entity_id(domain, "tasmota", unique_id)
async def help_test_entity_id_update_subscriptions(
hass, mqtt_mock, domain, config, topics=None, sensor_config=None, entity_id="test"
):
"""Test MQTT subscriptions are managed when entity_id is updated."""
entity_reg = await hass.helpers.entity_registry.async_get_registry()
config = copy.deepcopy(config)
data = json.dumps(config)
mqtt_mock.async_subscribe.reset_mock()
async_fire_mqtt_message(hass, f"{DEFAULT_PREFIX}/{config[CONF_MAC]}/config", data)
await hass.async_block_till_done()
if sensor_config:
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{config[CONF_MAC]}/sensors",
json.dumps(sensor_config),
)
await hass.async_block_till_done()
if not topics:
topics = [get_topic_tele_state(config), get_topic_tele_will(config)]
assert len(topics) > 0
state = hass.states.get(f"{domain}.{entity_id}")
assert state is not None
assert mqtt_mock.async_subscribe.call_count == len(topics)
for topic in topics:
mqtt_mock.async_subscribe.assert_any_call(topic, ANY, ANY, ANY)
mqtt_mock.async_subscribe.reset_mock()
entity_reg.async_update_entity(
f"{domain}.{entity_id}", new_entity_id=f"{domain}.milk"
)
await hass.async_block_till_done()
state = hass.states.get(f"{domain}.{entity_id}")
assert state is None
state = hass.states.get(f"{domain}.milk")
assert state is not None
for topic in topics:
mqtt_mock.async_subscribe.assert_any_call(topic, ANY, ANY, ANY)
async def help_test_entity_id_update_discovery_update(
hass, mqtt_mock, domain, config, sensor_config=None, entity_id="test"
):
"""Test MQTT discovery update after entity_id is updated."""
entity_reg = await hass.helpers.entity_registry.async_get_registry()
config = copy.deepcopy(config)
data = json.dumps(config)
topic = get_topic_tele_will(config)
async_fire_mqtt_message(hass, f"{DEFAULT_PREFIX}/{config[CONF_MAC]}/config", data)
await hass.async_block_till_done()
if sensor_config:
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{config[CONF_MAC]}/sensors",
json.dumps(sensor_config),
)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, topic, config_get_state_online(config))
state = hass.states.get(f"{domain}.{entity_id}")
assert state.state != STATE_UNAVAILABLE
async_fire_mqtt_message(hass, topic, config_get_state_offline(config))
state = hass.states.get(f"{domain}.{entity_id}")
assert state.state == STATE_UNAVAILABLE
entity_reg.async_update_entity(
f"{domain}.{entity_id}", new_entity_id=f"{domain}.milk"
)
await hass.async_block_till_done()
assert hass.states.get(f"{domain}.milk")
assert config[CONF_PREFIX][PREFIX_TELE] != "tele2"
config[CONF_PREFIX][PREFIX_TELE] = "tele2"
data = json.dumps(config)
async_fire_mqtt_message(hass, f"{DEFAULT_PREFIX}/{config[CONF_MAC]}/config", data)
await hass.async_block_till_done()
assert len(hass.states.async_entity_ids(domain)) == 1
topic = get_topic_tele_will(config)
async_fire_mqtt_message(hass, topic, config_get_state_online(config))
state = hass.states.get(f"{domain}.milk")
assert state.state != STATE_UNAVAILABLE
|
import logging
from absl import flags
from perfkitbenchmarker import configs
from perfkitbenchmarker import errors
from perfkitbenchmarker import hpc_util
from perfkitbenchmarker import sample
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.linux_packages import cuda_toolkit
from perfkitbenchmarker.linux_packages import nvidia_driver
FLAGS = flags.FLAGS
MACHINEFILE = 'HOSTFILE'
BENCHMARK_VERSION = 0.34
BENCHMARK_NAME = 'horovod'
BENCHMARK_CONFIG = """
horovod:
description: Runs Horovod. Specify the number of VMs with --num_vms
vm_groups:
default:
vm_spec:
GCP:
machine_type: n1-highmem-96
zone: us-central1-a
image_family: tf-latest-gpu-gvnic-debian-10
image_project: deeplearning-platform-release
boot_disk_size: 300
gpu_type: v100
gpu_count: 8
AWS:
machine_type: p3dn.24xlarge
zone: us-east-1a
image: ami-02e86b825fe559330
boot_disk_size: 300
Azure:
machine_type: Standard_NC24rs_v3
image: microsoft-dsvm:aml-workstation:ubuntu:19.11.13
zone: eastus
boot_disk_size: 300
vm_count: null
"""
# TODO(user): Use NVIDIA's repo after
# https://github.com/NVIDIA/DeepLearningExamples/pull/386 is merged
GITHUB_MODELS_URL = 'https://github.com/changlan/DeepLearningExamples.git'
BERT_BASE_URL = 'https://storage.googleapis.com/bert_models/2018_10_18/uncased_L-12_H-768_A-12.zip'
BERT_LARGE_URL = 'https://storage.googleapis.com/bert_models/2018_10_18/uncased_L-24_H-1024_A-16.zip'
flags.DEFINE_enum(
'horovod_model', 'resnet-50',
['resnet-50', 'bert-base', 'bert-large', 'maskrcnn', 'resnext-101'],
'name of the model to run.')
flags.DEFINE_integer('horovod_batch_size', 64, 'Batch size per compute device.')
flags.DEFINE_integer('horovod_num_steps', 10,
'Number of steps (epochs for BERT) to train for. ')
flags.DEFINE_bool('horovod_synthetic', False,
'Whether to train with synthetic data.')
flags.DEFINE_enum('horovod_max_seq_len', '128', ['128', '384'],
'Max sequence length for BERT.')
flags.DEFINE_enum('horovod_precision', 'fp16', ['fp16', 'fp32'], 'Precision.')
flags.DEFINE_bool('horovod_bert_finetune', True,
'Pretrain or finetune a BERT model.')
flags.DEFINE_bool('horovod_timelime', False, 'Enable timeline in Horovod.')
class HorovodParseOutputError(errors.Benchmarks.RunError):
pass
def GetConfig(user_config):
"""Load and return benchmark config.
Args:
user_config: user supplied configuration (flags and config file)
Returns:
loaded benchmark configuration
"""
return configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
def _UpdateBenchmarkSpecWithFlags(benchmark_spec):
"""Update the benchmark_spec with supplied command line flags.
Args:
benchmark_spec: benchmark specification to update
"""
gpus_per_node = nvidia_driver.QueryNumberOfGpus(benchmark_spec.vms[0])
num_vms = len(benchmark_spec.vms)
total_gpus = gpus_per_node * num_vms
benchmark_spec.gpus_per_node = gpus_per_node
benchmark_spec.num_vms = num_vms
benchmark_spec.total_gpus = total_gpus
benchmark_spec.model = FLAGS.horovod_model
benchmark_spec.batch_size = FLAGS.horovod_batch_size
benchmark_spec.num_steps = FLAGS.horovod_num_steps
benchmark_spec.precision = FLAGS.horovod_precision
benchmark_spec.max_seq_len = int(FLAGS.horovod_max_seq_len)
benchmark_spec.bert_finetune = FLAGS.horovod_bert_finetune
benchmark_spec.timeline = FLAGS.horovod_timelime
benchmark_spec.synthetic = FLAGS.horovod_synthetic
benchmark_spec.cuda_visible_devices = FLAGS.nccl_cuda_visible_devices
benchmark_spec.nccl_version = FLAGS.nccl_version
benchmark_spec.nccl_net_plugin = FLAGS.nccl_net_plugin
benchmark_spec.nccl_extra_params = FLAGS.nccl_extra_params
def _CopyAndUpdateRunScripts(model, vm):
"""Copy and update all necessary run scripts on the given vm.
Args:
model: name of the model
vm: vm to place and update run scripts on
"""
vm.RemoteCommand(
'[ -d "DeepLearningExamples" ] || git clone --branch clan-dev %s' %
GITHUB_MODELS_URL)
# MaskRCNN
if model == 'maskrcnn':
vm.RemoteCommand(
'wget -q -N http://models.tensorpack.com/FasterRCNN/ImageNet-R50-AlignPadding.npz'
)
vm.RemoteCommand(
'mkdir -p coco && cd coco && '
'wget -q -N http://images.cocodataset.org/zips/train2017.zip && '
'wget -q -N http://images.cocodataset.org/zips/val2017.zip && '
'wget -q -N http://images.cocodataset.org/annotations/annotations_trainval2017.zip && '
'unzip -q -o train2017.zip && unzip -q -o val2017.zip && '
'unzip -q -o annotations_trainval2017.zip && rm *.zip')
# BERT
bert_base_dir = 'DeepLearningExamples/TensorFlow/LanguageModeling/BERT'
if model == 'bert-base' or model == 'bert-large':
vm.RemoteCommand(
'mkdir -p {bert}/data/download/google_pretrained_weights &&'
'mkdir -p {bert}/data/download/squad/v1.1 && '
'cd {bert}/data/download/squad/v1.1 && '
'wget -q https://rajpurkar.github.io/SQuAD-explorer/dataset/train-v1.1.json'
.format(bert=bert_base_dir))
get_bert_data_cmd = ('cd {bert}/data/download/google_pretrained_weights/ && '
'wget -q {url} && unzip -o $(basename {url})')
if model == 'bert-base':
vm.RemoteCommand(
get_bert_data_cmd.format(bert=bert_base_dir, url=BERT_BASE_URL))
if model == 'bert-large':
vm.RemoteCommand(
get_bert_data_cmd.format(bert=bert_base_dir, url=BERT_LARGE_URL))
def _PrepareHorovod(vm):
"""Install dependencies on a single vm.
Args:
vm: vm to operate on
"""
logging.info('Installing Horovod on %s', vm)
vm.AuthenticateVm()
vm.Install('google_cloud_sdk')
vm.InstallPackages('wget git unzip')
vm.Install('nccl')
pip = 'pip'
if FLAGS.cloud == 'GCP': # temporary fix for DLVM images
pip = '/opt/conda/bin/pip'
vm.RemoteCommand(f'sudo {pip} install --force-reinstall pyarrow')
vm.Install('openmpi')
elif FLAGS.cloud == 'AWS':
vm.RobustRemoteCommand('. anaconda3/bin/activate tensorflow_p37')
pip = 'anaconda3/envs/tensorflow_p37/bin/pip'
# 10.0 -> 110
cuda_version = cuda_toolkit.GetCudaToolkitVersion(vm).replace('.', '')
vm.RemoteCommand(
f'sudo {pip} install '
'--extra-index-url https://developer.download.nvidia.com/compute/redist/ '
'git+https://github.com/NVIDIA/dllogger.git '
f'nvidia-dali-cuda{cuda_version}')
vm.RemoteCommand(
f'sudo {pip} install '
'--extra-index-url https://developer.download.nvidia.com/compute/redist/ '
f'nvidia-dali-tf-plugin-cuda{cuda_version}')
vm.RemoteCommand(
f'sudo {pip} install cython scipy \'opencv-python==3.4.2.17\'')
vm.RemoteCommand(
f'sudo {pip} install \'git+https://github.com/cocodataset/cocoapi.git#subdirectory=PythonAPI\''
)
vm.RemoteCommand(
f'[ -d "tensorpack" ] || git clone https://github.com/tensorpack/tensorpack.git && sudo {pip} install ./tensorpack'
)
def Prepare(benchmark_spec):
"""Install and set up Horovod on the target vms.
Args:
benchmark_spec: The benchmark specification
"""
vms = benchmark_spec.vms
vm_util.RunThreaded(_PrepareHorovod, vms)
_UpdateBenchmarkSpecWithFlags(benchmark_spec)
vm_util.RunThreaded(
lambda vm: _CopyAndUpdateRunScripts(benchmark_spec.model, vm), vms)
hpc_util.CreateMachineFile(vms, lambda _: benchmark_spec.gpus_per_node,
MACHINEFILE)
def _CreateMetadataDict(benchmark_spec):
"""Create metadata dict to be used in run results.
Args:
benchmark_spec: benchmark spec
Returns:
metadata dict
"""
vm = benchmark_spec.vms[0]
metadata = dict()
metadata.update(cuda_toolkit.GetMetadata(vm))
metadata['benchmark_version'] = BENCHMARK_VERSION
metadata['num_nodes'] = len(benchmark_spec.vms)
metadata['total_gpus'] = int(benchmark_spec.total_gpus)
metadata['model'] = benchmark_spec.model
metadata['batch_size'] = benchmark_spec.batch_size
metadata['num_steps'] = benchmark_spec.num_steps
metadata['synthetic'] = benchmark_spec.synthetic
metadata['precision'] = benchmark_spec.precision
metadata['max_seq_len'] = benchmark_spec.max_seq_len
metadata['nccl_version'] = benchmark_spec.nccl_version
metadata['nccl_net_plugin'] = benchmark_spec.nccl_net_plugin
metadata['cuda_visible_devices'] = benchmark_spec.cuda_visible_devices
metadata['nccl_extra_params'] = benchmark_spec.nccl_extra_params
return metadata
def _ExtractResNetThroughput(output):
"""Extract throughput from Horovod output.
Args:
output: Horovod output
Returns:
A tuple of:
Average throuput in images per second (float)
Unit of the throughput metric (str)
"""
# Start from last line and iterate backwards.
avg_throughput = 0
for line in output.splitlines()[::-1]:
if 'train_throughput' in line:
split_line = line.split()
avg_throughput = float(split_line[-1])
break
return round(avg_throughput, 1), 'images/second'
def _ExtractBertThroughput(output):
"""Extract throughput from Horovod output.
Args:
output: Horovod output
Returns:
A tuple of:
Average throughput in sentences per second (float)
Unit of the throughput metric (str)
"""
# Start from last line and iterate backwards.
avg_throughput = 0
for line in output.splitlines()[::-1]:
if 'Throughput Average (sentences/sec) =' in line:
split_line = line.split()
avg_throughput = float(split_line[-1])
break
return round(avg_throughput, 1), 'sentences/second'
def _ExtractMaskRCNNThroughput(output):
"""Extract throughput from Horovod output.
Args:
output: Horovod output
Returns:
A tuple of:
Average throughput in sentences per second (float)
Unit of the throughput metric (str)
"""
total_xput, unit = [], None
for line in output.splitlines()[::-1]:
if 'Throughput' in line:
split_line = line.split()
xput, unit = float(split_line[-1]), split_line[-2][1:-2]
total_xput.append(xput)
if not total_xput:
raise ValueError('No "Throughput" found in {}'.format(output))
return round(sum(total_xput) / len(total_xput), 1), unit
def _MakeSamplesFromOutput(benchmark_spec, stdout, stderr):
"""Create a sample continaing the measured Horovod throughput.
Args:
benchmark_spec: benchmark spec
stdout: stdout
stderr: stderr
Returns:
list of a Sample containing the Horovod throughput
"""
metadata = _CreateMetadataDict(benchmark_spec)
output = stdout + stderr
extractor = {
'resnet-50': _ExtractResNetThroughput,
'resnext-101': _ExtractResNetThroughput,
'bert-base': _ExtractBertThroughput,
'bert-large': _ExtractBertThroughput,
'maskrcnn': _ExtractMaskRCNNThroughput,
}
throughput, unit = extractor[benchmark_spec.model](output)
samples = []
samples.append(
sample.Sample('Training throughput', throughput, unit, metadata))
return samples
def Run(benchmark_spec):
"""Run Horovod on the cluster.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
Returns:
A list of sample.Sample objects.
"""
_UpdateBenchmarkSpecWithFlags(benchmark_spec)
vms = benchmark_spec.vms
vm_util.RunThreaded(lambda vm: vm.RemoteCommand('rm -rf /tmp/models'), vms)
master_vm = vms[0]
# GCP should work out of the box with the deep learning image but the AWS
# image requires us to use the correct Tensorflow Python environment.
if FLAGS.cloud == 'AWS':
master_vm.RobustRemoteCommand('. anaconda3/bin/activate tensorflow_p37')
python_interpreter = 'anaconda3/envs/tensorflow_p37/bin/python'
else:
python_interpreter = '/opt/conda/bin/python'
nccl_params = [
'TF_CPP_MIN_LOG_LEVEL=0',
'NCCL_SOCKET_IFNAME=^lo,docker0',
'NCCL_DEBUG=INFO',
]
if benchmark_spec.timeline:
nccl_params.extend([
'HOROVOD_TIMELINE={}/timeline.json'.format(vm_util.VM_TMP_DIR),
'HOROVOD_TIMELINE_MARK_CYCLES=1',
])
if benchmark_spec.cuda_visible_devices:
nccl_params.append('CUDA_VISIBLE_DEVICES={}'.format(
benchmark_spec.cuda_visible_devices))
if FLAGS.nccl_extra_params:
for extra_param in FLAGS.nccl_extra_params:
nccl_params.append(extra_param)
run_command = ('{mpi} -np {num_gpus} -hostfile {host_file} '
'-mca plm_rsh_no_tree_spawn 1 '
'--allow-run-as-root '
'-bind-to socket -map-by slot '
'{nccl_params} '
'-mca pml ob1 -mca btl ^openib '
'-mca btl_tcp_if_exclude lo,docker0 '
'{python} ').format(
mpi=FLAGS.nccl_mpi,
num_gpus=benchmark_spec.total_gpus,
host_file=MACHINEFILE,
python=python_interpreter,
nccl_params=' '.join(
['-x {}'.format(param) for param in nccl_params]))
if benchmark_spec.model == 'resnet-50':
run_flags = {
'arch': 'resnet50',
'mode': 'training_benchmark',
'warmup_steps': 101,
'results_dir': '/tmp/models',
'gpu_memory_fraction': 0.95,
'use_static_loss_scaling': None,
'loss_scale': 128,
'lr_init': 0.016,
'lr_warmup_epochs': 8,
'momentum': 0.875,
'weight_decay': 3.0517578125e-05,
'iter_unit': 'batch'
}
run_flags.update({
'precision': benchmark_spec.precision,
'batch_size': benchmark_spec.batch_size,
'num_iter': benchmark_spec.num_steps,
})
# Load ImageNet training data from GCS if benchmark is not in synthetic mode
if not benchmark_spec.synthetic:
run_flags['data_dir'] = 'gs://cloud-ml-nas-public/classification/imagenet'
run_command += 'DeepLearningExamples/TensorFlow/Classification/ConvNets/main.py '
run_command += ' '.join([
'--{}'.format(key) if value is None else '--{}={}'.format(key, value)
for key, value in sorted(run_flags.items())
])
elif benchmark_spec.model == 'resnext-101':
run_flags = {
'arch': 'resnext101-32x4d',
'mode': 'training_benchmark',
'warmup_steps': 101,
'results_dir': '/tmp/models',
'gpu_memory_fraction': 0.95,
'use_static_loss_scaling': None,
'loss_scale': 128,
'lr_init': 0.016,
'lr_warmup_epochs': 8,
'momentum': 0.875,
'weight_decay': 3.0517578125e-05,
'weight_init': 'fan_in',
'iter_unit': 'batch'
}
run_flags.update({
'precision': benchmark_spec.precision,
'batch_size': benchmark_spec.batch_size,
'num_iter': benchmark_spec.num_steps,
})
# Load ImageNet training data from GCS if benchmark is not in synthetic mode
if not benchmark_spec.synthetic:
run_flags['data_dir'] = 'gs://cloud-ml-nas-public/classification/imagenet'
run_command += 'DeepLearningExamples/TensorFlow/Classification/ConvNets/main.py '
run_command += ' '.join([
'--{}'.format(key) if value is None else '--{}={}'.format(key, value)
for key, value in sorted(run_flags.items())
])
elif benchmark_spec.model.startswith('bert'): # bert
if not benchmark_spec.bert_finetune:
raise NotImplementedError('BERT pretraining is not supported.')
bert_dir = 'DeepLearningExamples/TensorFlow/LanguageModeling/BERT/data/download/google_pretrained_weights/{}'.format(
'uncased_L-12_H-768_A-12' if benchmark_spec.model ==
'bert-base' else 'uncased_L-24_H-1024_A-16')
squad_train_file = 'DeepLearningExamples/TensorFlow/LanguageModeling/BERT/data/download/squad/v1.1/train-v1.1.json'
run_flags = {
'vocab_file': '{}/vocab.txt'.format(bert_dir),
'bert_config_file': '{}/bert_config.json'.format(bert_dir),
'init_checkpoint': '{}/bert_model.ckpt'.format(bert_dir),
'do_train': None,
'train_file': squad_train_file,
'learning_rate': 5e-6,
'output_dir': '/tmp/models',
'horovod': None,
'dllog_path': '/tmp/bert_dllog.json',
}
run_flags.update({
'precision': benchmark_spec.precision,
'train_batch_size': benchmark_spec.batch_size,
'num_train_epochs': benchmark_spec.num_steps,
'max_seq_length': benchmark_spec.max_seq_len,
'doc_stride': 64 if benchmark_spec.max_seq_len == 128 else 128,
'amp': benchmark_spec.precision == 'fp16'
})
run_command += 'DeepLearningExamples/TensorFlow/LanguageModeling/BERT/run_squad.py '
run_command += ' '.join([
'--{}'.format(key) if value is None else '--{}={}'.format(key, value)
for key, value in sorted(run_flags.items())
])
else:
run_command += (
'tensorpack/examples/FasterRCNN/train.py --config '
'BACKBONE.WEIGHTS=ImageNet-R50-AlignPadding.npz '
'DATA.BASEDIR=coco '
'TRAINER=horovod '
'TRAIN.EVAL_PERIOD=0 '
# LR_SCHEDULE means equivalent steps when the total batch size is 8.
'TRAIN.LR_SCHEDULE="[{step}, {step}, {step}]" '
'--logdir {log_dir}/maskrcnn ').format(
log_dir=vm_util.VM_TMP_DIR,
step=benchmark_spec.num_steps * benchmark_spec.total_gpus // 8)
stdout, stderr = master_vm.RobustRemoteCommand(run_command, should_log=True)
if benchmark_spec.timeline:
master_vm.PullFile(vm_util.GetTempDir(),
'{}/timeline.json'.format(vm_util.VM_TMP_DIR))
return _MakeSamplesFromOutput(benchmark_spec, stdout, stderr)
def Cleanup(benchmark_spec):
"""Cleanup Horovod on the cluster."""
del benchmark_spec
|
import pytest
from qutebrowser.browser import shared
@pytest.mark.parametrize('dnt, accept_language, custom_headers, expected', [
# DNT
(True, None, {}, {b'DNT': b'1'}),
(False, None, {}, {b'DNT': b'0'}),
(None, None, {}, {}),
# Accept-Language
(False, 'de, en', {}, {b'DNT': b'0', b'Accept-Language': b'de, en'}),
# Custom headers
(False, None, {'X-Qute': 'yes'}, {b'DNT': b'0', b'X-Qute': b'yes'}),
# Mixed
(False, 'de, en', {'X-Qute': 'yes'}, {b'DNT': b'0',
b'Accept-Language': b'de, en',
b'X-Qute': b'yes'}),
])
def test_custom_headers(config_stub, dnt, accept_language, custom_headers,
expected):
headers = config_stub.val.content.headers
headers.do_not_track = dnt
headers.accept_language = accept_language
headers.custom = custom_headers
expected_items = sorted(expected.items())
assert shared.custom_headers(url=None) == expected_items
|
import os.path as op
import numpy as np
import pytest
import matplotlib.pyplot as plt
from mne import (read_events, read_cov, read_source_spaces, read_evokeds,
read_dipole, SourceEstimate, pick_events)
from mne.datasets import testing
from mne.filter import create_filter
from mne.io import read_raw_fif
from mne.minimum_norm import read_inverse_operator
from mne.viz import (plot_bem, plot_events, plot_source_spectrogram,
plot_snr_estimate, plot_filter, plot_csd)
from mne.viz.misc import _handle_event_colors
from mne.viz.utils import _get_color_list
from mne.utils import requires_nibabel, run_tests_if_main
from mne.time_frequency import CrossSpectralDensity
data_path = testing.data_path(download=False)
subjects_dir = op.join(data_path, 'subjects')
src_fname = op.join(subjects_dir, 'sample', 'bem', 'sample-oct-6-src.fif')
inv_fname = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-eeg-oct-4-meg-inv.fif')
evoked_fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis-ave.fif')
dip_fname = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc_set1.dip')
base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
raw_fname = op.join(base_dir, 'test_raw.fif')
cov_fname = op.join(base_dir, 'test-cov.fif')
event_fname = op.join(base_dir, 'test-eve.fif')
def _get_raw():
"""Get raw data."""
return read_raw_fif(raw_fname, preload=True)
def _get_events():
"""Get events."""
return read_events(event_fname)
def test_plot_filter():
"""Test filter plotting."""
l_freq, h_freq, sfreq = 2., 40., 1000.
data = np.zeros(5000)
freq = [0, 2, 40, 50, 500]
gain = [0, 1, 1, 0, 0]
h = create_filter(data, sfreq, l_freq, h_freq, fir_design='firwin2')
plot_filter(h, sfreq)
plt.close('all')
plot_filter(h, sfreq, freq, gain)
plt.close('all')
iir = create_filter(data, sfreq, l_freq, h_freq, method='iir')
plot_filter(iir, sfreq)
plt.close('all')
plot_filter(iir, sfreq, freq, gain)
plt.close('all')
iir_ba = create_filter(data, sfreq, l_freq, h_freq, method='iir',
iir_params=dict(output='ba'))
plot_filter(iir_ba, sfreq, freq, gain)
plt.close('all')
fig = plot_filter(h, sfreq, freq, gain, fscale='linear')
assert len(fig.axes) == 3
plt.close('all')
fig = plot_filter(h, sfreq, freq, gain, fscale='linear',
plot=('time', 'delay'))
assert len(fig.axes) == 2
plt.close('all')
fig = plot_filter(h, sfreq, freq, gain, fscale='linear',
plot=['magnitude', 'delay'])
assert len(fig.axes) == 2
plt.close('all')
fig = plot_filter(h, sfreq, freq, gain, fscale='linear',
plot='magnitude')
assert len(fig.axes) == 1
plt.close('all')
fig = plot_filter(h, sfreq, freq, gain, fscale='linear',
plot=('magnitude'))
assert len(fig.axes) == 1
plt.close('all')
with pytest.raises(ValueError, match='Invalid value for the .plot'):
plot_filter(h, sfreq, freq, gain, plot=('turtles'))
_, axes = plt.subplots(1)
fig = plot_filter(h, sfreq, freq, gain, plot=('magnitude'), axes=axes)
assert len(fig.axes) == 1
_, axes = plt.subplots(2)
fig = plot_filter(h, sfreq, freq, gain, plot=('magnitude', 'delay'),
axes=axes)
assert len(fig.axes) == 2
plt.close('all')
_, axes = plt.subplots(1)
with pytest.raises(ValueError, match='Length of axes'):
plot_filter(h, sfreq, freq, gain,
plot=('magnitude', 'delay'), axes=axes)
def test_plot_cov():
"""Test plotting of covariances."""
raw = _get_raw()
cov = read_cov(cov_fname)
with pytest.warns(RuntimeWarning, match='projection'):
fig1, fig2 = cov.plot(raw.info, proj=True, exclude=raw.ch_names[6:])
plt.close('all')
@testing.requires_testing_data
@requires_nibabel()
def test_plot_bem():
"""Test plotting of BEM contours."""
with pytest.raises(IOError, match='MRI file .* not found'):
plot_bem(subject='bad-subject', subjects_dir=subjects_dir)
with pytest.raises(ValueError, match="Invalid value for the 'orientation"):
plot_bem(subject='sample', subjects_dir=subjects_dir,
orientation='bad-ori')
with pytest.raises(ValueError, match="sorted 1D array"):
plot_bem(subject='sample', subjects_dir=subjects_dir, slices=[0, 500])
fig = plot_bem(subject='sample', subjects_dir=subjects_dir,
orientation='sagittal', slices=[25, 50])
assert len(fig.axes) == 2
assert len(fig.axes[0].collections) == 3 # 3 BEM surfaces ...
fig = plot_bem(subject='sample', subjects_dir=subjects_dir,
orientation='coronal', brain_surfaces='white')
assert len(fig.axes[0].collections) == 5 # 3 BEM surfaces + 2 hemis
fig = plot_bem(subject='sample', subjects_dir=subjects_dir,
orientation='coronal', slices=[25, 50], src=src_fname)
assert len(fig.axes[0].collections) == 4 # 3 BEM surfaces + 1 src contour
with pytest.raises(ValueError, match='MRI coordinates, got head'):
plot_bem(subject='sample', subjects_dir=subjects_dir,
src=inv_fname)
def test_event_colors():
"""Test color assignment."""
events = pick_events(_get_events(), include=[1, 2])
unique_events = set(events[:, 2])
# make sure defaults work
colors = _handle_event_colors(None, unique_events, dict())
default_colors = _get_color_list()
assert colors[1] == default_colors[0]
# make sure custom color overrides default
colors = _handle_event_colors(color_dict=dict(foo='k', bar='#facade'),
unique_events=unique_events,
event_id=dict(foo=1, bar=2))
assert colors[1] == 'k'
assert colors[2] == '#facade'
def test_plot_events():
"""Test plotting events."""
event_labels = {'aud_l': 1, 'aud_r': 2, 'vis_l': 3, 'vis_r': 4}
color = {1: 'green', 2: 'yellow', 3: 'red', 4: 'c'}
raw = _get_raw()
events = _get_events()
fig = plot_events(events, raw.info['sfreq'], raw.first_samp)
assert fig.axes[0].get_legend() is not None # legend even with no event_id
plot_events(events, raw.info['sfreq'], raw.first_samp, equal_spacing=False)
# Test plotting events without sfreq
plot_events(events, first_samp=raw.first_samp)
with pytest.warns(RuntimeWarning, match='will be ignored'):
fig = plot_events(events, raw.info['sfreq'], raw.first_samp,
event_id=event_labels)
assert fig.axes[0].get_legend() is not None
with pytest.warns(RuntimeWarning, match='Color was not assigned'):
plot_events(events, raw.info['sfreq'], raw.first_samp,
color=color)
with pytest.warns(RuntimeWarning, match=r'vent \d+ missing from event_id'):
plot_events(events, raw.info['sfreq'], raw.first_samp,
event_id=event_labels, color=color)
multimatch = r'event \d+ missing from event_id|in the color dict but is'
with pytest.warns(RuntimeWarning, match=multimatch):
plot_events(events, raw.info['sfreq'], raw.first_samp,
event_id={'aud_l': 1}, color=color)
extra_id = {'missing': 111}
with pytest.raises(ValueError, match='from event_id is not present in'):
plot_events(events, raw.info['sfreq'], raw.first_samp,
event_id=extra_id)
with pytest.raises(RuntimeError, match='No usable event IDs'):
plot_events(events, raw.info['sfreq'], raw.first_samp,
event_id=extra_id, on_missing='ignore')
extra_id = {'aud_l': 1, 'missing': 111}
with pytest.warns(RuntimeWarning, match='from event_id is not present in'):
plot_events(events, raw.info['sfreq'], raw.first_samp,
event_id=extra_id, on_missing='warn')
with pytest.warns(RuntimeWarning, match='event 2 missing'):
plot_events(events, raw.info['sfreq'], raw.first_samp,
event_id=extra_id, on_missing='ignore')
events = events[events[:, 2] == 1]
assert len(events) > 0
plot_events(events, raw.info['sfreq'], raw.first_samp,
event_id=extra_id, on_missing='ignore')
with pytest.raises(ValueError, match='No events'):
plot_events(np.empty((0, 3)))
plt.close('all')
@testing.requires_testing_data
def test_plot_source_spectrogram():
"""Test plotting of source spectrogram."""
sample_src = read_source_spaces(op.join(subjects_dir, 'sample',
'bem', 'sample-oct-6-src.fif'))
# dense version
vertices = [s['vertno'] for s in sample_src]
n_times = 5
n_verts = sum(len(v) for v in vertices)
stc_data = np.ones((n_verts, n_times))
stc = SourceEstimate(stc_data, vertices, 1, 1)
plot_source_spectrogram([stc, stc], [[1, 2], [3, 4]])
pytest.raises(ValueError, plot_source_spectrogram, [], [])
pytest.raises(ValueError, plot_source_spectrogram, [stc, stc],
[[1, 2], [3, 4]], tmin=0)
pytest.raises(ValueError, plot_source_spectrogram, [stc, stc],
[[1, 2], [3, 4]], tmax=7)
plt.close('all')
@pytest.mark.slowtest
@testing.requires_testing_data
def test_plot_snr():
"""Test plotting SNR estimate."""
inv = read_inverse_operator(inv_fname)
evoked = read_evokeds(evoked_fname, baseline=(None, 0))[0]
plot_snr_estimate(evoked, inv)
plt.close('all')
@testing.requires_testing_data
def test_plot_dipole_amplitudes():
"""Test plotting dipole amplitudes."""
dipoles = read_dipole(dip_fname)
dipoles.plot_amplitudes(show=False)
plt.close('all')
def test_plot_csd():
"""Test plotting of CSD matrices."""
csd = CrossSpectralDensity([1, 2, 3], ['CH1', 'CH2'],
frequencies=[(10, 20)], n_fft=1,
tmin=0, tmax=1,)
plot_csd(csd, mode='csd') # Plot cross-spectral density
plot_csd(csd, mode='coh') # Plot coherence
plt.close('all')
run_tests_if_main()
|
import diamond.collector
try:
from boto import sqs
except ImportError:
sqs = False
class SqsCollector(diamond.collector.Collector):
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(SqsCollector, self).get_default_config()
config.update({
'path': 'sqs',
})
return config
def collect(self):
attribs = ['ApproximateNumberOfMessages',
'ApproximateNumberOfMessagesNotVisible',
'ApproximateNumberOfMessagesDelayed',
'CreatedTimestamp',
'DelaySeconds',
'LastModifiedTimestamp',
'MaximumMessageSize',
'MessageRetentionPeriod',
'ReceiveMessageWaitTimeSeconds',
'VisibilityTimeout']
if not sqs:
self.log.error("boto module not found!")
return
for (region, region_cfg) in self.config['regions'].items():
assert 'queues' in region_cfg
auth_kwargs = _get_auth_kwargs(config=region_cfg)
queues = region_cfg['queues'].split(',')
for queue_name in queues:
conn = sqs.connect_to_region(region, **auth_kwargs)
queue = conn.get_queue(queue_name)
for attrib in attribs:
d = queue.get_attributes(attrib)
self.publish(
'%s.%s.%s' % (region, queue_name, attrib),
d[attrib]
)
def _get_auth_kwargs(config):
"""Generate the kwargs for the AWS keys from a configuration dictionary.
If credentials are not present in the config, then assume that
we're using IAM roles with instance profiles. :mod:`boto` will
automatically take care of using the credentials from the instance
metadata if not provided with kwargs.
:param config: The configuration to use when looking for explicitly
provided AWS credentials.
:type config: dict
:returns: The kwargs for use with :mod:`boto` connect functions.
:rtype: dict
"""
if not ('access_key_id' in config and 'secret_access_key' in config):
return {}
return {
'aws_access_key_id': config['access_key_id'],
'aws_secret_access_key': config['secret_access_key'],
}
|
import logging
import typing
import voluptuous as vol
from homeassistant.const import (
ATTR_EDITABLE,
ATTR_MODE,
CONF_ICON,
CONF_ID,
CONF_MODE,
CONF_NAME,
CONF_UNIT_OF_MEASUREMENT,
SERVICE_RELOAD,
)
from homeassistant.core import callback
from homeassistant.helpers import collection
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.restore_state import RestoreEntity
import homeassistant.helpers.service
from homeassistant.helpers.storage import Store
from homeassistant.helpers.typing import ConfigType, HomeAssistantType, ServiceCallType
_LOGGER = logging.getLogger(__name__)
DOMAIN = "input_text"
CONF_INITIAL = "initial"
CONF_MIN = "min"
CONF_MIN_VALUE = 0
CONF_MAX = "max"
CONF_MAX_VALUE = 100
CONF_PATTERN = "pattern"
CONF_VALUE = "value"
MODE_TEXT = "text"
MODE_PASSWORD = "password"
ATTR_VALUE = CONF_VALUE
ATTR_MIN = "min"
ATTR_MAX = "max"
ATTR_PATTERN = CONF_PATTERN
SERVICE_SET_VALUE = "set_value"
STORAGE_KEY = DOMAIN
STORAGE_VERSION = 1
CREATE_FIELDS = {
vol.Required(CONF_NAME): vol.All(str, vol.Length(min=1)),
vol.Optional(CONF_MIN, default=CONF_MIN_VALUE): vol.Coerce(int),
vol.Optional(CONF_MAX, default=CONF_MAX_VALUE): vol.Coerce(int),
vol.Optional(CONF_INITIAL, ""): cv.string,
vol.Optional(CONF_ICON): cv.icon,
vol.Optional(CONF_UNIT_OF_MEASUREMENT): cv.string,
vol.Optional(CONF_PATTERN): cv.string,
vol.Optional(CONF_MODE, default=MODE_TEXT): vol.In([MODE_TEXT, MODE_PASSWORD]),
}
UPDATE_FIELDS = {
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_MIN): vol.Coerce(int),
vol.Optional(CONF_MAX): vol.Coerce(int),
vol.Optional(CONF_INITIAL): cv.string,
vol.Optional(CONF_ICON): cv.icon,
vol.Optional(CONF_UNIT_OF_MEASUREMENT): cv.string,
vol.Optional(CONF_PATTERN): cv.string,
vol.Optional(CONF_MODE): vol.In([MODE_TEXT, MODE_PASSWORD]),
}
def _cv_input_text(cfg):
"""Configure validation helper for input box (voluptuous)."""
minimum = cfg.get(CONF_MIN)
maximum = cfg.get(CONF_MAX)
if minimum > maximum:
raise vol.Invalid(
f"Max len ({minimum}) is not greater than min len ({maximum})"
)
state = cfg.get(CONF_INITIAL)
if state is not None and (len(state) < minimum or len(state) > maximum):
raise vol.Invalid(
f"Initial value {state} length not in range {minimum}-{maximum}"
)
return cfg
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: cv.schema_with_slug_keys(
vol.All(
lambda value: value or {},
{
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_MIN, default=CONF_MIN_VALUE): vol.Coerce(int),
vol.Optional(CONF_MAX, default=CONF_MAX_VALUE): vol.Coerce(int),
vol.Optional(CONF_INITIAL, ""): cv.string,
vol.Optional(CONF_ICON): cv.icon,
vol.Optional(CONF_UNIT_OF_MEASUREMENT): cv.string,
vol.Optional(CONF_PATTERN): cv.string,
vol.Optional(CONF_MODE, default=MODE_TEXT): vol.In(
[MODE_TEXT, MODE_PASSWORD]
),
},
_cv_input_text,
),
)
},
extra=vol.ALLOW_EXTRA,
)
RELOAD_SERVICE_SCHEMA = vol.Schema({})
async def async_setup(hass: HomeAssistantType, config: ConfigType) -> bool:
"""Set up an input text."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
id_manager = collection.IDManager()
yaml_collection = collection.YamlCollection(
logging.getLogger(f"{__name__}.yaml_collection"), id_manager
)
collection.attach_entity_component_collection(
component, yaml_collection, InputText.from_yaml
)
storage_collection = InputTextStorageCollection(
Store(hass, STORAGE_VERSION, STORAGE_KEY),
logging.getLogger(f"{__name__}.storage_collection"),
id_manager,
)
collection.attach_entity_component_collection(
component, storage_collection, InputText
)
await yaml_collection.async_load(
[{CONF_ID: id_, **(conf or {})} for id_, conf in config.get(DOMAIN, {}).items()]
)
await storage_collection.async_load()
collection.StorageCollectionWebsocket(
storage_collection, DOMAIN, DOMAIN, CREATE_FIELDS, UPDATE_FIELDS
).async_setup(hass)
collection.attach_entity_registry_cleaner(hass, DOMAIN, DOMAIN, yaml_collection)
collection.attach_entity_registry_cleaner(hass, DOMAIN, DOMAIN, storage_collection)
async def reload_service_handler(service_call: ServiceCallType) -> None:
"""Reload yaml entities."""
conf = await component.async_prepare_reload(skip_reset=True)
if conf is None:
conf = {DOMAIN: {}}
await yaml_collection.async_load(
[{CONF_ID: id_, **(cfg or {})} for id_, cfg in conf.get(DOMAIN, {}).items()]
)
homeassistant.helpers.service.async_register_admin_service(
hass,
DOMAIN,
SERVICE_RELOAD,
reload_service_handler,
schema=RELOAD_SERVICE_SCHEMA,
)
component.async_register_entity_service(
SERVICE_SET_VALUE, {vol.Required(ATTR_VALUE): cv.string}, "async_set_value"
)
return True
class InputTextStorageCollection(collection.StorageCollection):
"""Input storage based collection."""
CREATE_SCHEMA = vol.Schema(vol.All(CREATE_FIELDS, _cv_input_text))
UPDATE_SCHEMA = vol.Schema(UPDATE_FIELDS)
async def _process_create_data(self, data: typing.Dict) -> typing.Dict:
"""Validate the config is valid."""
return self.CREATE_SCHEMA(data)
@callback
def _get_suggested_id(self, info: typing.Dict) -> str:
"""Suggest an ID based on the config."""
return info[CONF_NAME]
async def _update_data(self, data: dict, update_data: typing.Dict) -> typing.Dict:
"""Return a new updated data object."""
update_data = self.UPDATE_SCHEMA(update_data)
return _cv_input_text({**data, **update_data})
class InputText(RestoreEntity):
"""Represent a text box."""
def __init__(self, config: typing.Dict):
"""Initialize a text input."""
self._config = config
self.editable = True
self._current_value = config.get(CONF_INITIAL)
@classmethod
def from_yaml(cls, config: typing.Dict) -> "InputText":
"""Return entity instance initialized from yaml storage."""
input_text = cls(config)
input_text.entity_id = f"{DOMAIN}.{config[CONF_ID]}"
input_text.editable = False
return input_text
@property
def should_poll(self):
"""If entity should be polled."""
return False
@property
def name(self):
"""Return the name of the text input entity."""
return self._config.get(CONF_NAME)
@property
def icon(self):
"""Return the icon to be used for this entity."""
return self._config.get(CONF_ICON)
@property
def _maximum(self) -> int:
"""Return max len of the text."""
return self._config[CONF_MAX]
@property
def _minimum(self) -> int:
"""Return min len of the text."""
return self._config[CONF_MIN]
@property
def state(self):
"""Return the state of the component."""
return self._current_value
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return self._config.get(CONF_UNIT_OF_MEASUREMENT)
@property
def unique_id(self) -> typing.Optional[str]:
"""Return unique id for the entity."""
return self._config[CONF_ID]
@property
def state_attributes(self):
"""Return the state attributes."""
return {
ATTR_EDITABLE: self.editable,
ATTR_MIN: self._minimum,
ATTR_MAX: self._maximum,
ATTR_PATTERN: self._config.get(CONF_PATTERN),
ATTR_MODE: self._config[CONF_MODE],
}
async def async_added_to_hass(self):
"""Run when entity about to be added to hass."""
await super().async_added_to_hass()
if self._current_value is not None:
return
state = await self.async_get_last_state()
value = state and state.state
# Check against None because value can be 0
if value is not None and self._minimum <= len(value) <= self._maximum:
self._current_value = value
async def async_set_value(self, value):
"""Select new value."""
if len(value) < self._minimum or len(value) > self._maximum:
_LOGGER.warning(
"Invalid value: %s (length range %s - %s)",
value,
self._minimum,
self._maximum,
)
return
self._current_value = value
self.async_write_ha_state()
async def async_update_config(self, config: typing.Dict) -> None:
"""Handle when the config is updated."""
self._config = config
self.async_write_ha_state()
|
from __future__ import division
from __future__ import print_function
import functools
import sys
import numpy as np
import six
import copy
from scipy.interpolate import PchipInterpolator as pchip
import seaborn as sns
import itertools
import pandas as pd
from matplotlib.lines import Line2D
np.seterr(divide='ignore', invalid='ignore')
def center(x):
assert type(x) is list, "Input data to center must be list"
x_stacked = np.vstack(x)
return [i - np.mean(x_stacked, 0) for i in x]
def scale(x):
assert type(x) is list, "Input data to scale must be list"
x_stacked = np.vstack(x)
m1 = np.min(x_stacked)
m2 = np.max(x_stacked - m1)
f = lambda x: 2*(np.divide(x - m1, m2)) - 1
return [f(i) for i in x]
def group_by_category(vals):
if any(isinstance(el, list) for el in vals):
vals = list(itertools.chain(*vals))
val_set = list(sorted(set(vals), key=list(vals).index))
return [val_set.index(val) for val in vals]
def vals2colors(vals, cmap='GnBu',res=100):
"""Maps values to colors
Args:
values (list or list of lists) - list of values to map to colors
cmap (str) - color map (default is 'GnBu')
res (int) - resolution of the color map (default: 100)
Returns:
list of rgb tuples
"""
# flatten if list of lists
if any(isinstance(el, list) for el in vals):
vals = list(itertools.chain(*vals))
# get palette from seaborn
palette = np.array(sns.color_palette(cmap, res))
ranks = np.digitize(vals, np.linspace(np.min(vals), np.max(vals)+1, res+1)) - 1
return [tuple(i) for i in palette[ranks, :]]
def vals2bins(vals,res=100):
"""Maps values to bins
Args:
values (list or list of lists) - list of values to map to colors
res (int) - resolution of the color map (default: 100)
Returns:
list of numbers representing bins
"""
# flatten if list of lists
if any(isinstance(el, list) for el in vals):
vals = list(itertools.chain(*vals))
return list(np.digitize(vals, np.linspace(np.min(vals), np.max(vals)+1, res+1)) - 1)
def interp_array(arr,interp_val=10):
x=np.arange(0, len(arr), 1)
xx=np.arange(0, len(arr)-1, 1/interp_val)
q=pchip(x,arr)
return q(xx)
def interp_array_list(arr_list,interp_val=10):
smoothed= [np.zeros(arr_list[0].shape) for item in arr_list]
for idx,arr in enumerate(arr_list):
smoothed[idx] = interp_array(arr,interp_val)
return smoothed
def parse_args(x,args):
args_list = []
for i,item in enumerate(x):
tmp = []
for ii, arg in enumerate(args):
if isinstance(arg, (tuple, list)):
if len(arg) == len(x):
tmp.append(arg[i])
else:
print('Error: arguments must be a list of the same length as x')
sys.exit(1)
else:
tmp.append(arg)
args_list.append(tuple(tmp))
return args_list
def parse_kwargs(x, kwargs):
kwargs_list = []
for i,item in enumerate(x):
tmp = {}
for kwarg in kwargs:
if isinstance(kwargs[kwarg], (tuple, list)):
if len(kwargs[kwarg]) == len(x):
tmp[kwarg]=kwargs[kwarg][i]
else:
tmp[kwarg] = None
else:
tmp[kwarg]=kwargs[kwarg]
kwargs_list.append(tmp)
return kwargs_list
def reshape_data(x, hue, labels):
categories = list(sorted(set(hue), key=list(hue).index))
x_stacked = np.vstack(x)
x_reshaped = [[] for _ in categories]
labels_reshaped = [[] for _ in categories]
if labels is None:
labels = [None]*len(hue)
for idx, (point, label) in enumerate(zip(hue, labels)):
x_reshaped[categories.index(point)].append(x_stacked[idx])
labels_reshaped[categories.index(point)].append(labels[idx])
return [np.vstack(i) for i in x_reshaped], labels_reshaped
def patch_lines(x):
"""
Draw lines between groups
"""
for idx in range(len(x)-1):
x[idx] = np.vstack([x[idx], x[idx+1][0,:]])
return x
def is_line(format_str):
if isinstance(format_str, np.bytes_):
format_str = format_str.decode('utf-8')
markers = list(map(lambda x: str(x), Line2D.markers.keys()))
return (format_str is None) or (all([str(symbol) not in format_str for symbol in markers]))
def memoize(obj):
cache = obj.cache = {}
@functools.wraps(obj)
def memoizer(*args, **kwargs):
key = str(args) + str(kwargs)
if key not in cache:
cache[key] = obj(*args, **kwargs)
return cache[key]
return memoizer
def get_type(data):
"""
Checks what the data type is and returns it as a string label
"""
import six
from ..datageometry import DataGeometry
if isinstance(data, list):
if isinstance(data[0], (six.string_types, six.text_type, six.binary_type)):
return 'list_str'
elif isinstance(data[0], (int, float)):
return 'list_num'
elif isinstance(data[0], np.ndarray):
return 'list_arr'
else:
raise TypeError('Unsupported data type passed. Supported types: '
'Numpy Array, Pandas DataFrame, String, List of strings'
', List of numbers')
elif isinstance(data, np.ndarray):
if isinstance(data[0][0], (six.string_types, six.text_type, six.binary_type)):
return 'arr_str'
else:
return 'arr_num'
elif isinstance(data, pd.DataFrame):
return 'df'
elif isinstance(data, (six.string_types, six.text_type, six.binary_type)):
return 'str'
elif isinstance(data, DataGeometry):
return 'geo'
else:
raise TypeError('Unsupported data type passed. Supported types: '
'Numpy Array, Pandas DataFrame, String, List of strings'
', List of numbers')
def convert_text(data):
dtype = get_type(data)
if dtype in ['list_str', 'str']:
data = np.array(data).reshape(-1, 1)
return data
def check_geo(geo):
""" Checks a geo and makes sure the text fields are not binary """
geo = copy.copy(geo)
def fix_item(item):
if isinstance(item, six.binary_type):
return item.decode()
return item
def fix_list(lst):
return [fix_item(i) for i in lst]
if isinstance(geo.reduce, six.binary_type):
geo.reduce = geo.reduce.decode()
for key in geo.kwargs.keys():
if geo.kwargs[key] is not None:
if isinstance(geo.kwargs[key], (list, np.ndarray)):
geo.kwargs[key] = fix_list(geo.kwargs[key])
elif isinstance(geo.kwargs[key], six.binary_type):
geo.kwargs[key] = fix_item(geo.kwargs[key])
return geo
def get_dtype(data):
"""
Checks what the data type is and returns it as a string label
"""
import six
from ..datageometry import DataGeometry
if isinstance(data, list):
return 'list'
elif isinstance(data, np.ndarray):
return 'arr'
elif isinstance(data, pd.DataFrame):
return 'df'
elif isinstance(data, (six.string_types, six.text_type, six.binary_type)):
return 'str'
elif isinstance(data, DataGeometry):
return 'geo'
else:
raise TypeError('Unsupported data type passed. Supported types: '
'Numpy Array, Pandas DataFrame, String, List of strings'
', List of numbers')
|
import asyncio
from ipaddress import IPv4Address
from typing import List, Mapping
from async_upnp_client import UpnpFactory
from async_upnp_client.aiohttp import AiohttpSessionRequester
from async_upnp_client.profiles.igd import IgdDevice
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.typing import HomeAssistantType
import homeassistant.util.dt as dt_util
from .const import (
BYTES_RECEIVED,
BYTES_SENT,
CONF_LOCAL_IP,
DISCOVERY_LOCATION,
DISCOVERY_ST,
DISCOVERY_UDN,
DISCOVERY_USN,
DOMAIN,
DOMAIN_CONFIG,
LOGGER as _LOGGER,
PACKETS_RECEIVED,
PACKETS_SENT,
TIMESTAMP,
)
class Device:
"""Home Assistant representation of an UPnP/IGD."""
def __init__(self, igd_device):
"""Initialize UPnP/IGD device."""
self._igd_device: IgdDevice = igd_device
self._mapped_ports = []
@classmethod
async def async_discover(cls, hass: HomeAssistantType) -> List[Mapping]:
"""Discover UPnP/IGD devices."""
_LOGGER.debug("Discovering UPnP/IGD devices")
local_ip = None
if DOMAIN in hass.data and DOMAIN_CONFIG in hass.data[DOMAIN]:
local_ip = hass.data[DOMAIN][DOMAIN_CONFIG].get(CONF_LOCAL_IP)
if local_ip:
local_ip = IPv4Address(local_ip)
discovery_infos = await IgdDevice.async_search(source_ip=local_ip, timeout=10)
# add extra info and store devices
devices = []
for discovery_info in discovery_infos:
discovery_info[DISCOVERY_UDN] = discovery_info["_udn"]
discovery_info[DISCOVERY_ST] = discovery_info["st"]
discovery_info[DISCOVERY_LOCATION] = discovery_info["location"]
usn = f"{discovery_info[DISCOVERY_UDN]}::{discovery_info[DISCOVERY_ST]}"
discovery_info[DISCOVERY_USN] = usn
_LOGGER.debug("Discovered device: %s", discovery_info)
devices.append(discovery_info)
return devices
@classmethod
async def async_create_device(cls, hass: HomeAssistantType, ssdp_location: str):
"""Create UPnP/IGD device."""
# build async_upnp_client requester
session = async_get_clientsession(hass)
requester = AiohttpSessionRequester(session, True, 10)
# create async_upnp_client device
factory = UpnpFactory(requester, disable_state_variable_validation=True)
upnp_device = await factory.async_create_device(ssdp_location)
igd_device = IgdDevice(upnp_device, None)
return cls(igd_device)
@property
def udn(self) -> str:
"""Get the UDN."""
return self._igd_device.udn
@property
def name(self) -> str:
"""Get the name."""
return self._igd_device.name
@property
def manufacturer(self) -> str:
"""Get the manufacturer."""
return self._igd_device.manufacturer
@property
def model_name(self) -> str:
"""Get the model name."""
return self._igd_device.model_name
@property
def device_type(self) -> str:
"""Get the device type."""
return self._igd_device.device_type
@property
def unique_id(self) -> str:
"""Get the unique id."""
return f"{self.udn}::{self.device_type}"
def __str__(self) -> str:
"""Get string representation."""
return f"IGD Device: {self.name}/{self.udn}"
async def async_get_traffic_data(self) -> Mapping[str, any]:
"""
Get all traffic data in one go.
Traffic data consists of:
- total bytes sent
- total bytes received
- total packets sent
- total packats received
Data is timestamped.
"""
_LOGGER.debug("Getting traffic statistics from device: %s", self)
values = await asyncio.gather(
self._igd_device.async_get_total_bytes_received(),
self._igd_device.async_get_total_bytes_sent(),
self._igd_device.async_get_total_packets_received(),
self._igd_device.async_get_total_packets_sent(),
)
return {
TIMESTAMP: dt_util.utcnow(),
BYTES_RECEIVED: values[0],
BYTES_SENT: values[1],
PACKETS_RECEIVED: values[2],
PACKETS_SENT: values[3],
}
|
import sys
def test_vcr_import_deprecation(recwarn):
if "vcr" in sys.modules:
# Remove imported module entry if already loaded in another test
del sys.modules["vcr"]
import vcr # noqa: F401
if sys.version_info[0] == 2:
assert len(recwarn) == 1
assert issubclass(recwarn[0].category, DeprecationWarning)
else:
assert len(recwarn) == 0
|
import pytest
from homeassistant.components import light
from homeassistant.components.mochad import light as mochad
from homeassistant.setup import async_setup_component
import tests.async_mock as mock
@pytest.fixture(autouse=True)
def pymochad_mock():
"""Mock pymochad."""
with mock.patch("homeassistant.components.mochad.light.device") as device:
yield device
@pytest.fixture
def light_mock(hass, brightness):
"""Mock light."""
controller_mock = mock.MagicMock()
dev_dict = {"address": "a1", "name": "fake_light", "brightness_levels": brightness}
return mochad.MochadLight(hass, controller_mock, dev_dict)
async def test_setup_adds_proper_devices(hass):
"""Test if setup adds devices."""
good_config = {
"mochad": {},
"light": {
"platform": "mochad",
"devices": [{"name": "Light1", "address": "a1"}],
},
}
assert await async_setup_component(hass, light.DOMAIN, good_config)
@pytest.mark.parametrize(
"brightness,expected", [(32, "on"), (256, "xdim 255"), (64, "xdim 63")]
)
async def test_turn_on_with_no_brightness(light_mock, expected):
"""Test turn_on."""
light_mock.turn_on()
light_mock.light.send_cmd.assert_called_once_with(expected)
@pytest.mark.parametrize(
"brightness,expected",
[
(32, [mock.call("on"), mock.call("dim 25")]),
(256, [mock.call("xdim 45")]),
(64, [mock.call("xdim 11")]),
],
)
async def test_turn_on_with_brightness(light_mock, expected):
"""Test turn_on."""
light_mock.turn_on(brightness=45)
light_mock.light.send_cmd.assert_has_calls(expected)
@pytest.mark.parametrize("brightness", [32])
async def test_turn_off(light_mock):
"""Test turn_off."""
light_mock.turn_off()
light_mock.light.send_cmd.assert_called_once_with("off")
|
import boto3
import credstash
import copy
def isInt(s):
try:
int(s)
return True
except ValueError:
return False
def updateVersions(region="us-east-1", table="credential-store"):
'''
do a full-table scan of the credential-store,
and update the version format of every credential if it is an integer
'''
dynamodb = boto3.resource('dynamodb', region_name=region)
secrets = dynamodb.Table(table)
response = secrets.scan(ProjectionExpression="#N, version, #K, contents, hmac",
ExpressionAttributeNames={"#N": "name", "#K": "key"})
items = response["Items"]
for old_item in items:
if isInt(old_item['version']):
new_item = copy.copy(old_item)
new_item['version'] = credstash.paddedInt(new_item['version'])
if new_item['version'] != old_item['version']:
secrets.put_item(Item=new_item)
secrets.delete_item(Key={'name': old_item['name'], 'version': old_item['version']})
else:
print "Skipping item: %s, %s" % (old_item['name'], old_item['version'])
if __name__ == "__main__":
updateVersions()
|
from august.authenticator import ValidationResult
from homeassistant import config_entries, setup
from homeassistant.components.august.const import (
CONF_ACCESS_TOKEN_CACHE_FILE,
CONF_INSTALL_ID,
CONF_LOGIN_METHOD,
DOMAIN,
VERIFICATION_CODE_KEY,
)
from homeassistant.components.august.exceptions import (
CannotConnect,
InvalidAuth,
RequireValidation,
)
from homeassistant.const import CONF_PASSWORD, CONF_TIMEOUT, CONF_USERNAME
from tests.async_mock import patch
from tests.common import MockConfigEntry
async def test_form(hass):
"""Test we get the form."""
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] == {}
with patch(
"homeassistant.components.august.config_flow.AugustGateway.async_authenticate",
return_value=True,
), patch(
"homeassistant.components.august.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.august.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_LOGIN_METHOD: "email",
CONF_USERNAME: "[email protected]",
CONF_PASSWORD: "test-password",
},
)
await hass.async_block_till_done()
assert result2["type"] == "create_entry"
assert result2["title"] == "[email protected]"
assert result2["data"] == {
CONF_LOGIN_METHOD: "email",
CONF_USERNAME: "[email protected]",
CONF_PASSWORD: "test-password",
CONF_INSTALL_ID: None,
CONF_TIMEOUT: 10,
CONF_ACCESS_TOKEN_CACHE_FILE: "[email protected]",
}
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_invalid_auth(hass):
"""Test we handle invalid auth."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.august.config_flow.AugustGateway.async_authenticate",
side_effect=InvalidAuth,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_LOGIN_METHOD: "email",
CONF_USERNAME: "[email protected]",
CONF_PASSWORD: "test-password",
},
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "invalid_auth"}
async def test_user_unexpected_exception(hass):
"""Test we handle an unexpected exception."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.august.config_flow.AugustGateway.async_authenticate",
side_effect=ValueError,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_LOGIN_METHOD: "email",
CONF_USERNAME: "[email protected]",
CONF_PASSWORD: "test-password",
},
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "unknown"}
async def test_form_cannot_connect(hass):
"""Test we handle cannot connect error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.august.config_flow.AugustGateway.async_authenticate",
side_effect=CannotConnect,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_LOGIN_METHOD: "email",
CONF_USERNAME: "[email protected]",
CONF_PASSWORD: "test-password",
},
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "cannot_connect"}
async def test_form_needs_validate(hass):
"""Test we present validation when we need to validate."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.august.config_flow.AugustGateway.async_authenticate",
side_effect=RequireValidation,
), patch(
"homeassistant.components.august.gateway.AuthenticatorAsync.async_send_verification_code",
return_value=True,
) as mock_send_verification_code:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_LOGIN_METHOD: "email",
CONF_USERNAME: "[email protected]",
CONF_PASSWORD: "test-password",
},
)
assert len(mock_send_verification_code.mock_calls) == 1
assert result2["type"] == "form"
assert result2["errors"] is None
assert result2["step_id"] == "validation"
# Try with the WRONG verification code give us the form back again
with patch(
"homeassistant.components.august.config_flow.AugustGateway.async_authenticate",
side_effect=RequireValidation,
), patch(
"homeassistant.components.august.gateway.AuthenticatorAsync.async_validate_verification_code",
return_value=ValidationResult.INVALID_VERIFICATION_CODE,
) as mock_validate_verification_code, patch(
"homeassistant.components.august.gateway.AuthenticatorAsync.async_send_verification_code",
return_value=True,
) as mock_send_verification_code, patch(
"homeassistant.components.august.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.august.async_setup_entry", return_value=True
) as mock_setup_entry:
result3 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{VERIFICATION_CODE_KEY: "incorrect"},
)
# Make sure we do not resend the code again
# so they have a chance to retry
assert len(mock_send_verification_code.mock_calls) == 0
assert len(mock_validate_verification_code.mock_calls) == 1
assert result3["type"] == "form"
assert result3["errors"] is None
assert result3["step_id"] == "validation"
# Try with the CORRECT verification code and we setup
with patch(
"homeassistant.components.august.config_flow.AugustGateway.async_authenticate",
return_value=True,
), patch(
"homeassistant.components.august.gateway.AuthenticatorAsync.async_validate_verification_code",
return_value=ValidationResult.VALIDATED,
) as mock_validate_verification_code, patch(
"homeassistant.components.august.gateway.AuthenticatorAsync.async_send_verification_code",
return_value=True,
) as mock_send_verification_code, patch(
"homeassistant.components.august.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.august.async_setup_entry", return_value=True
) as mock_setup_entry:
result4 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{VERIFICATION_CODE_KEY: "correct"},
)
await hass.async_block_till_done()
assert len(mock_send_verification_code.mock_calls) == 0
assert len(mock_validate_verification_code.mock_calls) == 1
assert result4["type"] == "create_entry"
assert result4["title"] == "[email protected]"
assert result4["data"] == {
CONF_LOGIN_METHOD: "email",
CONF_USERNAME: "[email protected]",
CONF_PASSWORD: "test-password",
CONF_INSTALL_ID: None,
CONF_TIMEOUT: 10,
CONF_ACCESS_TOKEN_CACHE_FILE: "[email protected]",
}
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_reauth(hass):
"""Test reauthenticate."""
entry = MockConfigEntry(
domain=DOMAIN,
data={
CONF_LOGIN_METHOD: "email",
CONF_USERNAME: "[email protected]",
CONF_PASSWORD: "test-password",
CONF_INSTALL_ID: None,
CONF_TIMEOUT: 10,
CONF_ACCESS_TOKEN_CACHE_FILE: "[email protected]",
},
unique_id="[email protected]",
)
entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "reauth"}, data=entry.data
)
assert result["type"] == "form"
assert result["errors"] == {}
with patch(
"homeassistant.components.august.config_flow.AugustGateway.async_authenticate",
return_value=True,
), patch(
"homeassistant.components.august.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.august.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_PASSWORD: "new-test-password",
},
)
await hass.async_block_till_done()
assert result2["type"] == "abort"
assert result2["reason"] == "reauth_successful"
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
|
import os
import numpy as np
from numpy.testing import assert_array_equal
from mne.io.kit.tests import data_dir as kit_data_dir
from mne.io.kit import read_mrk
from mne.utils import (requires_mayavi, run_tests_if_main, traits_test,
modified_env)
mrk_pre_path = os.path.join(kit_data_dir, 'test_mrk_pre.sqd')
mrk_post_path = os.path.join(kit_data_dir, 'test_mrk_post.sqd')
mrk_avg_path = os.path.join(kit_data_dir, 'test_mrk.sqd')
@requires_mayavi
@traits_test
def test_combine_markers_model(tmpdir):
"""Test CombineMarkersModel Traits Model."""
from mne.gui._marker_gui import CombineMarkersModel
tempdir = str(tmpdir)
tgt_fname = os.path.join(tempdir, 'test.txt')
model = CombineMarkersModel()
# set one marker file
assert not model.mrk3.can_save
model.mrk1.file = mrk_pre_path
assert model.mrk3.can_save
assert_array_equal(model.mrk3.points, model.mrk1.points)
# setting second marker file
model.mrk2.file = mrk_pre_path
assert_array_equal(model.mrk3.points, model.mrk1.points)
# set second marker
model.mrk2.clear = True
model.mrk2.file = mrk_post_path
assert np.any(model.mrk3.points)
points_interpolate_mrk1_mrk2 = model.mrk3.points
# change interpolation method
model.mrk3.method = 'Average'
mrk_avg = read_mrk(mrk_avg_path)
assert_array_equal(model.mrk3.points, mrk_avg)
# clear second marker
model.mrk2.clear = True
assert_array_equal(model.mrk1.points, model.mrk3.points)
# I/O
model.mrk2.file = mrk_post_path
model.mrk3.save(tgt_fname)
mrk_io = read_mrk(tgt_fname)
assert_array_equal(mrk_io, model.mrk3.points)
# exclude an individual marker
model.mrk1.use = [1, 2, 3, 4]
assert_array_equal(model.mrk3.points[0], model.mrk2.points[0])
assert_array_equal(model.mrk3.points[1:], mrk_avg[1:])
# reset model
model.clear = True
model.mrk1.file = mrk_pre_path
model.mrk2.file = mrk_post_path
assert_array_equal(model.mrk3.points, points_interpolate_mrk1_mrk2)
@requires_mayavi
@traits_test
def test_combine_markers_panel(check_gui_ci):
"""Test CombineMarkersPanel."""
from mne.gui._marker_gui import CombineMarkersPanel
with modified_env(_MNE_GUI_TESTING_MODE='true'):
CombineMarkersPanel()
run_tests_if_main()
|
import sys
import os.path
from lxml import etree as _etree # due to validator __init__ signature
# some compat stuff, borrowed from lxml.html
try:
unicode
except NameError:
# Python 3
unicode = str
try:
basestring
except NameError:
# Python 3
basestring = str
__all__ = ['extract_xsd', 'extract_rng', 'iso_dsdl_include',
'iso_abstract_expand', 'iso_svrl_for_xslt1',
'svrl_validation_errors', 'schematron_schema_valid',
'stylesheet_params', 'Schematron']
# some namespaces
#FIXME: Maybe lxml should provide a dedicated place for common namespace
#FIXME: definitions?
XML_SCHEMA_NS = "http://www.w3.org/2001/XMLSchema"
RELAXNG_NS = "http://relaxng.org/ns/structure/1.0"
SCHEMATRON_NS = "http://purl.oclc.org/dsdl/schematron"
SVRL_NS = "http://purl.oclc.org/dsdl/svrl"
# some helpers
_schematron_root = '{%s}schema' % SCHEMATRON_NS
_xml_schema_root = '{%s}schema' % XML_SCHEMA_NS
_resources_dir = os.path.join(os.path.dirname(__file__), 'resources')
# the iso-schematron skeleton implementation steps aka xsl transformations
extract_xsd = _etree.XSLT(_etree.parse(
os.path.join(_resources_dir, 'xsl', 'XSD2Schtrn.xsl')))
extract_rng = _etree.XSLT(_etree.parse(
os.path.join(_resources_dir, 'xsl', 'RNG2Schtrn.xsl')))
iso_dsdl_include = _etree.XSLT(_etree.parse(
os.path.join(_resources_dir, 'xsl', 'iso-schematron-xslt1',
'iso_dsdl_include.xsl')))
iso_abstract_expand = _etree.XSLT(_etree.parse(
os.path.join(_resources_dir, 'xsl', 'iso-schematron-xslt1',
'iso_abstract_expand.xsl')))
iso_svrl_for_xslt1 = _etree.XSLT(_etree.parse(
os.path.join(_resources_dir,
'xsl', 'iso-schematron-xslt1', 'iso_svrl_for_xslt1.xsl')))
# svrl result accessors
svrl_validation_errors = _etree.XPath(
'//svrl:failed-assert', namespaces={'svrl': SVRL_NS})
# RelaxNG validator for schematron schemas
schematron_schema_valid = _etree.RelaxNG(
file=os.path.join(_resources_dir, 'rng', 'iso-schematron.rng'))
def stylesheet_params(**kwargs):
"""Convert keyword args to a dictionary of stylesheet parameters.
XSL stylesheet parameters must be XPath expressions, i.e.:
* string expressions, like "'5'"
* simple (number) expressions, like "5"
* valid XPath expressions, like "/a/b/text()"
This function converts native Python keyword arguments to stylesheet
parameters following these rules:
If an arg is a string wrap it with XSLT.strparam().
If an arg is an XPath object use its path string.
If arg is None raise TypeError.
Else convert arg to string.
"""
result = {}
for key, val in kwargs.items():
if isinstance(val, basestring):
val = _etree.XSLT.strparam(val)
elif val is None:
raise TypeError('None not allowed as a stylesheet parameter')
elif not isinstance(val, _etree.XPath):
val = unicode(val)
result[key] = val
return result
# helper function for use in Schematron __init__
def _stylesheet_param_dict(paramsDict, kwargsDict):
"""Return a copy of paramsDict, updated with kwargsDict entries, wrapped as
stylesheet arguments.
kwargsDict entries with a value of None are ignored.
"""
# beware of changing mutable default arg
paramsDict = dict(paramsDict)
for k, v in kwargsDict.items():
if v is not None: # None values do not override
paramsDict[k] = v
paramsDict = stylesheet_params(**paramsDict)
return paramsDict
class Schematron(_etree._Validator):
"""An ISO Schematron validator.
Pass a root Element or an ElementTree to turn it into a validator.
Alternatively, pass a filename as keyword argument 'file' to parse from
the file system.
Schematron is a less well known, but very powerful schema language.
The main idea is to use the capabilities of XPath to put restrictions on
the structure and the content of XML documents.
The standard behaviour is to fail on ``failed-assert`` findings only
(``ASSERTS_ONLY``). To change this, you can either pass a report filter
function to the ``error_finder`` parameter (e.g. ``ASSERTS_AND_REPORTS``
or a custom ``XPath`` object), or subclass isoschematron.Schematron for
complete control of the validation process.
Built on the Schematron language 'reference' skeleton pure-xslt
implementation, the validator is created as an XSLT 1.0 stylesheet using
these steps:
0) (Extract from XML Schema or RelaxNG schema)
1) Process inclusions
2) Process abstract patterns
3) Compile the schematron schema to XSLT
The ``include`` and ``expand`` keyword arguments can be used to switch off
steps 1) and 2).
To set parameters for steps 1), 2) and 3) hand parameter dictionaries to the
keyword arguments ``include_params``, ``expand_params`` or
``compile_params``.
For convenience, the compile-step parameter ``phase`` is also exposed as a
keyword argument ``phase``. This takes precedence if the parameter is also
given in the parameter dictionary.
If ``store_schematron`` is set to True, the (included-and-expanded)
schematron document tree is stored and available through the ``schematron``
property.
If ``store_xslt`` is set to True, the validation XSLT document tree will be
stored and can be retrieved through the ``validator_xslt`` property.
With ``store_report`` set to True (default: False), the resulting validation
report document gets stored and can be accessed as the ``validation_report``
property.
Here is a usage example::
>>> from lxml import etree
>>> from lxml.isoschematron import Schematron
>>> schematron = Schematron(etree.XML('''
... <schema xmlns="http://purl.oclc.org/dsdl/schematron" >
... <pattern id="id_only_attribute">
... <title>id is the only permitted attribute name</title>
... <rule context="*">
... <report test="@*[not(name()='id')]">Attribute
... <name path="@*[not(name()='id')]"/> is forbidden<name/>
... </report>
... </rule>
... </pattern>
... </schema>'''),
... error_finder=Schematron.ASSERTS_AND_REPORTS)
>>> xml = etree.XML('''
... <AAA name="aaa">
... <BBB id="bbb"/>
... <CCC color="ccc"/>
... </AAA>
... ''')
>>> schematron.validate(xml)
False
>>> xml = etree.XML('''
... <AAA id="aaa">
... <BBB id="bbb"/>
... <CCC/>
... </AAA>
... ''')
>>> schematron.validate(xml)
True
"""
# libxml2 error categorization for validation errors
_domain = _etree.ErrorDomains.SCHEMATRONV
_level = _etree.ErrorLevels.ERROR
_error_type = _etree.ErrorTypes.SCHEMATRONV_ASSERT
# convenience definitions for common behaviours
ASSERTS_ONLY = svrl_validation_errors # Default
ASSERTS_AND_REPORTS = _etree.XPath(
'//svrl:failed-assert | //svrl:successful-report',
namespaces={'svrl': SVRL_NS})
def _extract(self, element):
"""Extract embedded schematron schema from non-schematron host schema.
This method will only be called by __init__ if the given schema document
is not a schematron schema by itself.
Must return a schematron schema document tree or None.
"""
schematron = None
if element.tag == _xml_schema_root:
schematron = self._extract_xsd(element)
elif element.nsmap[element.prefix] == RELAXNG_NS:
# RelaxNG does not have a single unique root element
schematron = self._extract_rng(element)
return schematron
# customization points
# etree.XSLT objects that provide the extract, include, expand, compile
# steps
_extract_xsd = extract_xsd
_extract_rng = extract_rng
_include = iso_dsdl_include
_expand = iso_abstract_expand
_compile = iso_svrl_for_xslt1
# etree.xpath object that determines input document validity when applied to
# the svrl result report; must return a list of result elements (empty if
# valid)
_validation_errors = ASSERTS_ONLY
def __init__(self, etree=None, file=None, include=True, expand=True,
include_params={}, expand_params={}, compile_params={},
store_schematron=False, store_xslt=False, store_report=False,
phase=None, error_finder=ASSERTS_ONLY):
super(Schematron, self).__init__()
self._store_report = store_report
self._schematron = None
self._validator_xslt = None
self._validation_report = None
if error_finder is not self.ASSERTS_ONLY:
self._validation_errors = error_finder
# parse schema document, may be a schematron schema or an XML Schema or
# a RelaxNG schema with embedded schematron rules
root = None
try:
if etree is not None:
if _etree.iselement(etree):
root = etree
else:
root = etree.getroot()
elif file is not None:
root = _etree.parse(file).getroot()
except Exception:
raise _etree.SchematronParseError(
"No tree or file given: %s" % sys.exc_info()[1])
if root is None:
raise ValueError("Empty tree")
if root.tag == _schematron_root:
schematron = root
else:
schematron = self._extract(root)
if schematron is None:
raise _etree.SchematronParseError(
"Document is not a schematron schema or schematron-extractable")
# perform the iso-schematron skeleton implementation steps to get a
# validating xslt
if include:
schematron = self._include(schematron, **include_params)
if expand:
schematron = self._expand(schematron, **expand_params)
if not schematron_schema_valid(schematron):
raise _etree.SchematronParseError(
"invalid schematron schema: %s" %
schematron_schema_valid.error_log)
if store_schematron:
self._schematron = schematron
# add new compile keyword args here if exposing them
compile_kwargs = {'phase': phase}
compile_params = _stylesheet_param_dict(compile_params, compile_kwargs)
validator_xslt = self._compile(schematron, **compile_params)
if store_xslt:
self._validator_xslt = validator_xslt
self._validator = _etree.XSLT(validator_xslt)
def __call__(self, etree):
"""Validate doc using Schematron.
Returns true if document is valid, false if not.
"""
self._clear_error_log()
result = self._validator(etree)
if self._store_report:
self._validation_report = result
errors = self._validation_errors(result)
if errors:
if _etree.iselement(etree):
fname = etree.getroottree().docinfo.URL or '<file>'
else:
fname = etree.docinfo.URL or '<file>'
for error in errors:
# Does svrl report the line number, anywhere? Don't think so.
self._append_log_message(
domain=self._domain, type=self._error_type,
level=self._level, line=0,
message=_etree.tostring(error, encoding='unicode'),
filename=fname)
return False
return True
@property
def schematron(self):
"""ISO-schematron schema document (None if object has been initialized
with store_schematron=False).
"""
return self._schematron
@property
def validator_xslt(self):
"""ISO-schematron skeleton implementation XSLT validator document (None
if object has been initialized with store_xslt=False).
"""
return self._validator_xslt
@property
def validation_report(self):
"""ISO-schematron validation result report (None if result-storing has
been turned off).
"""
return self._validation_report
|
from __future__ import print_function
import os
import io
import sys
import time
import platform
import plistlib
_stash = globals()['_stash']
try:
collapseuser = _stash.libcore.collapseuser
except AttributeError:
collapseuser = lambda p: p
IN_PYTHONISTA = sys.executable.find('Pythonista') >= 0
# Following functions for getting Pythonista and iOS version information are adapted from
# https://github.com/cclauss/Ten-lines-or-less/blob/master/pythonista_version.py
def pythonista_version(): # 2.0.1 (201000)
try:
path = os.path.abspath(os.path.join(sys.executable, '..', 'Info.plist'))
with io.open(path, "rb") as fin:
plist = plistlib.load(fin)
return '{CFBundleShortVersionString} ({CFBundleVersion})'.format(**plist)
except Exception as e:
return "UNKNOWN ({e})".format(e=repr(e))
def ios_version(): # 9.2 (64-bit iPad5,4)
try:
ios_ver, _, machine_model = platform.mac_ver()
except Exception as e:
return "UNKNOWN ({e})".format(e=repr(e))
else:
bit = platform.architecture()[0].rstrip('bit') + '-bit'
return '{} ({} {})'.format(ios_ver, bit, machine_model)
def print_stash_info():
"""
Print general StaSh information.
"""
STASH_ROOT = os.environ['STASH_ROOT']
print(_stash.text_style('StaSh v%s' % globals()['_stash'].__version__, {'color': 'blue', 'traits': ['bold']}))
print(u'{} {} ({})'.format(_stash.text_bold('Python'), os.environ['STASH_PY_VERSION'], platform.python_implementation()))
print(u'{} {}'.format(_stash.text_bold('UI'), _stash.ui.__module__))
print(u'{}: {}'.format(_stash.text_bold('root'), collapseuser(STASH_ROOT)))
_stat = os.stat(os.path.join(STASH_ROOT, 'core.py'))
last_modified = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(_stat.st_mtime))
print(u'{}: {}'.format(_stash.text_bold('core.py'), last_modified))
print(u'{}: {}'.format(_stash.text_bold('SELFUPDATE_TARGET'), os.environ['SELFUPDATE_TARGET']))
def print_pythonista_info():
"""
Print pythonista related informations.
"""
print(u'{} {}'.format(_stash.text_bold('Pythonista'), pythonista_version()))
print(u'{} {}'.format(_stash.text_bold('iOS'), ios_version()))
def print_paths():
"""
Print path related informations
"""
print(_stash.text_bold('BIN_PATH:'))
for p in os.environ['BIN_PATH'].split(':'):
print(' {}'.format(collapseuser(p)))
print(_stash.text_bold('PYTHONPATH:'))
for p in os.environ['PYTHONPATH'].split(':'):
print(' {}'.format(collapseuser(p)))
def print_machine_info():
"""
Print information about the current machine.
"""
if IN_PYTHONISTA:
print_pythonista_info()
print(u"{} {}".format(_stash.text_bold("Platform"), platform.platform()))
def print_libs():
"""
Print loaded libs.
"""
print(_stash.text_bold("Loaded libraries:"))
for an in dir(_stash):
if an.startswith("lib"):
print(u" {}".format(an))
def main():
print_stash_info()
print_machine_info()
print_paths()
print_libs()
if __name__ == '__main__':
main()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import base64
import logging
import os
from absl import flags
from perfkitbenchmarker import disk
from perfkitbenchmarker import errors
from perfkitbenchmarker import providers
from perfkitbenchmarker import virtual_machine
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.configs import option_decoders
from perfkitbenchmarker.configs import spec
from perfkitbenchmarker.providers import profitbricks
from perfkitbenchmarker.providers.profitbricks import profitbricks_disk
from perfkitbenchmarker.providers.profitbricks import util
import six
import yaml
PROFITBRICKS_API = profitbricks.PROFITBRICKS_API
FLAGS = flags.FLAGS
TIMEOUT = 1500 # 25 minutes
class CustomMachineTypeSpec(spec.BaseSpec):
"""Properties of a ProfitBricks custom machine type.
Attributes:
cores: int. Number of CPU cores for a custom VM.
ram: int. Amount of RAM in MBs for a custom VM.
"""
@classmethod
def _GetOptionDecoderConstructions(cls):
"""Gets decoder classes and constructor args for each configurable option.
Returns:
dict. Maps option name string to a (ConfigOptionDecoder class, dict) pair.
The pair specifies a decoder class and its __init__() keyword
arguments to construct in order to decode the named option.
"""
result = super(CustomMachineTypeSpec, cls)._GetOptionDecoderConstructions()
result.update({'cores': (option_decoders.IntDecoder, {'min': 1}),
'ram': (option_decoders.IntDecoder, {'min': 1024})})
return result
class MachineTypeDecoder(option_decoders.TypeVerifier):
"""Decodes the machine_type option of a ProfitBricks VM config."""
def __init__(self, **kwargs):
super(MachineTypeDecoder, self).__init__((six.string_types, dict), **kwargs)
def Decode(self, value, component_full_name, flag_values):
"""Decodes the machine_type option of a ProfitBricks VM config.
Args:
value: Either a string name of a PB machine type or a dict containing
'cores' and 'ram' keys describing a custom VM.
component_full_name: string. Fully qualified name of the configurable
component containing the config option.
flag_values: flags.FlagValues. Runtime flag values to be propagated to
BaseSpec constructors.
Returns:
If value is a string, returns it unmodified. Otherwise, returns the
decoded CustomMachineTypeSpec.
Raises:
errors.Config.InvalidValue upon invalid input value.
"""
super(MachineTypeDecoder, self).Decode(value, component_full_name,
flag_values)
if isinstance(value, six.string_types):
return value
return CustomMachineTypeSpec(self._GetOptionFullName(component_full_name),
flag_values=flag_values, **value)
class ProfitBricksVmSpec(virtual_machine.BaseVmSpec):
"""Object containing the information needed to create a
ProfitBricksVirtualMachine.
Attributes:
ram: None or int. RAM value in MB for custom ProfitBricks VM.
cores: None or int. CPU cores value for custom ProfitBricks VM.
"""
CLOUD = providers.PROFITBRICKS
def __init__(self, *args, **kwargs):
super(ProfitBricksVmSpec, self).__init__(*args, **kwargs)
if isinstance(self.machine_type, CustomMachineTypeSpec):
logging.info('Using custom hardware configuration.')
self.cores = self.machine_type.cores
self.ram = self.machine_type.ram
self.machine_type = 'Custom (RAM: {}, Cores: {})'.format(self.ram,
self.cores)
else:
logging.info('Using preset hardware configuration.')
self.ram, self.cores = util.ReturnFlavor(self.machine_type)
@classmethod
def _ApplyFlags(cls, config_values, flag_values):
"""Modifies config options based on runtime flag values.
Args:
config_values: dict mapping config option names to provided values. May
be modified by this function.
flag_values: flags.FlagValues. Runtime flags that may override the
provided config values.
"""
super(ProfitBricksVmSpec, cls)._ApplyFlags(config_values, flag_values)
if flag_values['machine_type'].present:
config_values['machine_type'] = yaml.safe_load(flag_values.machine_type)
if flag_values['profitbricks_location'].present:
config_values['location'] = flag_values.profitbricks_location
if flag_values['profitbricks_boot_volume_type'].present:
config_values['boot_volume_type'] = \
flag_values.profitbricks_boot_volume_type
if flag_values['profitbricks_boot_volume_size'].present:
config_values['boot_volume_size'] = \
flag_values.profitbricks_boot_volume_size
if flag_values['availability_zone'].present:
config_values['availability_zone'] = flag_values.availability_zone
if flag_values['profitbricks_image_alias'].present:
config_values['image_alias'] = flag_values.profitbricks_image_alias
@classmethod
def _GetOptionDecoderConstructions(cls):
"""Gets decoder classes and constructor args for each configurable option.
Returns:
dict. Maps option name string to a (ConfigOptionDecoder class, dict) pair.
The pair specifies a decoder class and its __init__() keyword
arguments to construct in order to decode the named option.
"""
result = super(ProfitBricksVmSpec, cls)._GetOptionDecoderConstructions()
result.update({
'machine_type': (MachineTypeDecoder, {}),
'location': (option_decoders.StringDecoder, {'default': 'us/las'}),
'image_alias': (option_decoders.StringDecoder, {'default': None}),
'boot_volume_type': (option_decoders.StringDecoder, {'default': 'HDD'}),
'boot_volume_size': (option_decoders.IntDecoder, {'default': 10,
'min': 10}),
'availability_zone': (option_decoders.StringDecoder,
{'default': 'AUTO'})})
return result
class ProfitBricksVirtualMachine(virtual_machine.BaseVirtualMachine):
"""Object representing a ProfitBricks Virtual Machine."""
CLOUD = providers.PROFITBRICKS
DEFAULT_IMAGE = None
def __init__(self, vm_spec):
"""Initialize a ProfitBricks virtual machine.
Args:
vm_spec: virtual_machine.BaseVirtualMachineSpec object of the vm.
"""
super(ProfitBricksVirtualMachine, self).__init__(vm_spec)
# Get user authentication credentials
user_config_path = os.path.expanduser(FLAGS.profitbricks_config)
with open(user_config_path) as f:
user_creds = f.read().rstrip('\n')
self.user_token = base64.b64encode(user_creds)
self.server_id = None
self.server_status = None
self.dc_id = None
self.dc_status = None
self.lan_id = None
self.lan_status = None
self.max_local_disks = 1
self.local_disk_counter = 0
self.ram = vm_spec.ram
self.cores = vm_spec.cores
self.machine_type = vm_spec.machine_type
self.image = self.image or self.DEFAULT_IMAGE
self.image_alias = vm_spec.image_alias
self.boot_volume_type = vm_spec.boot_volume_type
self.boot_volume_size = vm_spec.boot_volume_size
self.location = vm_spec.location
self.user_name = 'root'
self.availability_zone = vm_spec.availability_zone
self.header = {
'Authorization': 'Basic %s' % self.user_token,
'Content-Type': 'application/vnd.profitbricks.resource+json',
'User-Agent': 'profitbricks-perfkitbenchmarker',
}
def _Create(self):
"""Create a ProfitBricks VM instance."""
# Grab ssh pub key to inject into new VM
with open(self.ssh_public_key) as f:
public_key = f.read().rstrip('\n')
if self.image_alias is None:
# Find an Ubuntu image that matches our location
self.image = util.ReturnImage(self.header, self.location)
# Create server POST body
new_server = {
'properties': {
'name': self.name,
'ram': self.ram,
'cores': self.cores,
'availabilityZone': self.zone
},
'entities': {
'volumes': {
'items': [
{
'properties': {
'size': self.boot_volume_size,
'name': 'boot volume',
'image': self.image,
'imageAlias': self.image_alias,
'type': self.boot_volume_type,
'sshKeys': [public_key],
'availabilityZone': self.availability_zone
}
}
]
},
'nics': {
'items': [
{
'properties': {
'name': 'nic1',
'lan': self.lan_id
}
}
]
}
}
}
# Build Server URL
url = '%s/datacenters/%s/servers' % (PROFITBRICKS_API, self.dc_id)
# Provision Server
r = util.PerformRequest('post', url, self.header, json=new_server)
logging.info('Creating VM: %s' % self.name)
# Parse Required values from response
self.server_status = r.headers['Location']
response = r.json()
self.server_id = response['id']
# The freshly created server will be in a locked and unusable
# state for a while, and it cannot be deleted or modified in
# this state. Wait for the action to finish and check the
# reported result.
if not self._WaitUntilReady(self.server_status):
raise errors.Error('VM creation failed, see log.')
@vm_util.Retry()
def _PostCreate(self):
"""Get the instance's public IP address."""
# Build URL
url = '%s/datacenters/%s/servers/%s?depth=5' % (PROFITBRICKS_API,
self.dc_id,
self.server_id)
# Perform Request
r = util.PerformRequest('get', url, self.header)
response = r.json()
nic = response['entities']['nics']['items'][0]
self.ip_address = nic['properties']['ips'][0]
def _Delete(self):
"""Delete a ProfitBricks VM."""
# Build URL
url = '%s/datacenters/%s/servers/%s' % (PROFITBRICKS_API, self.dc_id,
self.server_id)
# Make call
logging.info('Deleting VM: %s' % self.server_id)
r = util.PerformRequest('delete', url, self.header)
# Check to make sure deletion has finished
delete_status = r.headers['Location']
if not self._WaitUntilReady(delete_status):
raise errors.Error('VM deletion failed, see log.')
def _CreateDependencies(self):
"""Create a data center and LAN prior to creating VM."""
# Create data center
self.dc_id, self.dc_status = util.CreateDatacenter(self.header,
self.location)
if not self._WaitUntilReady(self.dc_status):
raise errors.Error('Data center creation failed, see log.')
# Create LAN
self.lan_id, self.lan_status = util.CreateLan(self.header,
self.dc_id)
if not self._WaitUntilReady(self.lan_status):
raise errors.Error('LAN creation failed, see log.')
def _DeleteDependencies(self):
"""Delete a data center and LAN."""
# Build URL
url = '%s/datacenters/%s' % (PROFITBRICKS_API, self.dc_id)
# Make call to delete data center
logging.info('Deleting Datacenter: %s' % self.dc_id)
r = util.PerformRequest('delete', url, self.header)
# Check to make sure deletion has finished
delete_status = r.headers['Location']
if not self._WaitUntilReady(delete_status):
raise errors.Error('Data center deletion failed, see log.')
@vm_util.Retry(timeout=TIMEOUT, log_errors=False)
def _WaitUntilReady(self, status_url):
"""Returns true if the ProfitBricks resource is ready."""
# Poll resource for status update
logging.info('Polling ProfitBricks resource.')
r = util.PerformRequest('get', status_url, self.header)
response = r.json()
status = response['metadata']['status']
# Keep polling resource until a "DONE" state is returned
if status != 'DONE':
raise Exception # Exception triggers vm_util.Retry to go again
return True
def CreateScratchDisk(self, disk_spec):
"""Create a VM's scratch disk.
Args:
disk_spec: virtual_machine.BaseDiskSpec object of the disk.
"""
if disk_spec.disk_type != disk.STANDARD:
raise errors.Error('ProfitBricks does not support disk type %s.' %
disk_spec.disk_type)
if self.scratch_disks:
# We have a "disk" already, don't add more.
raise errors.Error('ProfitBricks does not require '
'a separate disk.')
# Just create a local directory at the specified path, don't mount
# anything.
self.RemoteCommand('sudo mkdir -p {0} && sudo chown -R $USER:$USER {0}'
.format(disk_spec.mount_point))
self.scratch_disks.append(profitbricks_disk.ProfitBricksDisk(
disk_spec))
|
from datetime import timedelta
import logging
from ProgettiHWSW.relay import Relay
import async_timeout
from homeassistant.components.switch import SwitchEntity
from homeassistant.helpers.update_coordinator import (
CoordinatorEntity,
DataUpdateCoordinator,
)
from . import setup_switch
from .const import DEFAULT_POLLING_INTERVAL_SEC, DOMAIN
_LOGGER = logging.getLogger(DOMAIN)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the switches from a config entry."""
board_api = hass.data[DOMAIN][config_entry.entry_id]
relay_count = config_entry.data["relay_count"]
switches = []
async def async_update_data():
"""Fetch data from API endpoint of board."""
async with async_timeout.timeout(5):
return await board_api.get_switches()
coordinator = DataUpdateCoordinator(
hass,
_LOGGER,
name="switch",
update_method=async_update_data,
update_interval=timedelta(seconds=DEFAULT_POLLING_INTERVAL_SEC),
)
await coordinator.async_refresh()
for i in range(1, int(relay_count) + 1):
switches.append(
ProgettihwswSwitch(
coordinator,
f"Relay #{i}",
setup_switch(board_api, i, config_entry.data[f"relay_{str(i)}"]),
)
)
async_add_entities(switches)
class ProgettihwswSwitch(CoordinatorEntity, SwitchEntity):
"""Represent a switch entity."""
def __init__(self, coordinator, name, switch: Relay):
"""Initialize the values."""
super().__init__(coordinator)
self._switch = switch
self._name = name
async def async_turn_on(self, **kwargs):
"""Turn the switch on."""
await self._switch.control(True)
await self.coordinator.async_request_refresh()
async def async_turn_off(self, **kwargs):
"""Turn the switch off."""
await self._switch.control(False)
await self.coordinator.async_request_refresh()
async def async_toggle(self, **kwargs):
"""Toggle the state of switch."""
await self._switch.toggle()
await self.coordinator.async_request_refresh()
@property
def name(self):
"""Return the switch name."""
return self._name
@property
def is_on(self):
"""Get switch state."""
return self.coordinator.data[self._switch.id]
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy.random as random
import numpy as np
import matplotlib.pyplot as plt
from filterpy.common import Saver
from filterpy.kalman import SquareRootKalmanFilter, KalmanFilter
DO_PLOT = False
def test_noisy_1d():
f = KalmanFilter (dim_x=2, dim_z=1)
f.x = np.array([[2.],
[0.]]) # initial state (location and velocity)
f.F = np.array([[1.,1.],
[0.,1.]]) # state transition matrix
f.H = np.array([[1.,0.]]) # Measurement function
f.P *= 1000. # covariance matrix
f.R *= 5 # state uncertainty
f.Q *= 0.0001 # process uncertainty
fsq = SquareRootKalmanFilter (dim_x=2, dim_z=1)
fsq.x = np.array([[2.],
[0.]]) # initial state (location and velocity)
fsq.F = np.array([[1.,1.],
[0.,1.]]) # state transition matrix
fsq.H = np.array([[1.,0.]]) # Measurement function
fsq.P = np.eye(2) * 1000. # covariance matrix
fsq.R *= 5 # state uncertainty
fsq.Q *= 0.0001 # process uncertainty
# does __repr__ work?
str(fsq)
measurements = []
results = []
zs = []
s = Saver(fsq)
for t in range (100):
# create measurement = t plus white noise
z = t + random.randn()*20
zs.append(z)
# perform kalman filtering
f.update(z)
f.predict()
fsq.update(z)
fsq.predict()
assert abs(f.x[0,0] - fsq.x[0,0]) < 1.e-12
assert abs(f.x[1,0] - fsq.x[1,0]) < 1.e-12
# save data
results.append (f.x[0,0])
measurements.append(z)
s.save()
s.to_array()
for i in range(f.P.shape[0]):
assert abs(f.P[i,i] - fsq.P[i,i]) < 0.01
# now do a batch run with the stored z values so we can test that
# it is working the same as the recursive implementation.
# give slightly different P so result is slightly different
f.x = np.array([[2.,0]]).T
f.P = np.eye(2)*100.
m,c,_,_ = f.batch_filter(zs,update_first=False)
# plot data
if DO_PLOT:
p1, = plt.plot(measurements,'r', alpha=0.5)
p2, = plt.plot (results,'b')
p4, = plt.plot(m[:,0], 'm')
p3, = plt.plot ([0,100],[0,100], 'g') # perfect result
plt.legend([p1,p2, p3, p4],
["noisy measurement", "KF output", "ideal", "batch"], loc=4)
plt.show()
if __name__ == "__main__":
DO_PLOT = True
test_noisy_1d()
|
import unittest
import numpy as np
from chainer import testing
from chainercv.transforms import random_flip
class TestRandomFlip(unittest.TestCase):
def test_random_flip(self):
img = np.random.uniform(size=(3, 24, 24))
out, param = random_flip(
img, y_random=True, x_random=True, return_param=True)
y_flip = param['y_flip']
x_flip = param['x_flip']
expected = img
if y_flip:
expected = expected[:, ::-1, :]
if x_flip:
expected = expected[:, :, ::-1]
np.testing.assert_equal(out, expected)
testing.run_module(__name__, __file__)
|
try:
input = raw_input # For Python2 compatibility
except NameError:
pass
import turtle
from lark import Lark
turtle_grammar = """
start: instruction+
instruction: MOVEMENT NUMBER -> movement
| "c" COLOR [COLOR] -> change_color
| "fill" code_block -> fill
| "repeat" NUMBER code_block -> repeat
code_block: "{" instruction+ "}"
MOVEMENT: "f"|"b"|"l"|"r"
COLOR: LETTER+
%import common.LETTER
%import common.INT -> NUMBER
%import common.WS
%ignore WS
"""
parser = Lark(turtle_grammar)
def run_instruction(t):
if t.data == 'change_color':
turtle.color(*t.children) # We just pass the color names as-is
elif t.data == 'movement':
name, number = t.children
{ 'f': turtle.fd,
'b': turtle.bk,
'l': turtle.lt,
'r': turtle.rt, }[name](int(number))
elif t.data == 'repeat':
count, block = t.children
for i in range(int(count)):
run_instruction(block)
elif t.data == 'fill':
turtle.begin_fill()
run_instruction(t.children[0])
turtle.end_fill()
elif t.data == 'code_block':
for cmd in t.children:
run_instruction(cmd)
else:
raise SyntaxError('Unknown instruction: %s' % t.data)
def run_turtle(program):
parse_tree = parser.parse(program)
for inst in parse_tree.children:
run_instruction(inst)
def main():
while True:
code = input('> ')
try:
run_turtle(code)
except Exception as e:
print(e)
def test():
text = """
c red yellow
fill { repeat 36 {
f200 l170
}}
"""
run_turtle(text)
if __name__ == '__main__':
# test()
main()
|
from perfkitbenchmarker import provider_info
from perfkitbenchmarker import providers
class MesosProviderInfo(provider_info.BaseProviderInfo):
SUPPORTED_BENCHMARKS = ['aerospike_ycsb', 'block_storage_workload',
'cassandra_stress', 'cassandra_ycsb', 'cluster_boot',
'copy_throughput', 'fio', 'hbase_ycsb', 'hpcc',
'iperf', 'mongodb_ycsb', 'netperf', 'oldisim', 'ping',
'redis', 'redis_ycsb', 'scimark2', 'silo',
'sysbench_oltp']
UNSUPPORTED_BENCHMARKS = ['bonnieplusplus', 'mysql_service']
CLOUD = providers.MESOS
@classmethod
def IsBenchmarkSupported(cls, benchmark):
if benchmark in cls.SUPPORTED_BENCHMARKS:
return True
elif benchmark in cls.UNSUPPORTED_BENCHMARKS:
return False
else:
return None
|
import asyncio
from datetime import timedelta
import logging
import time
from miio import ChuangmiIr, DeviceException # pylint: disable=import-error
import voluptuous as vol
from homeassistant.components.remote import (
ATTR_DELAY_SECS,
ATTR_NUM_REPEATS,
DEFAULT_DELAY_SECS,
PLATFORM_SCHEMA,
RemoteEntity,
)
from homeassistant.const import (
CONF_COMMAND,
CONF_HOST,
CONF_NAME,
CONF_TIMEOUT,
CONF_TOKEN,
)
from homeassistant.exceptions import PlatformNotReady
from homeassistant.helpers import config_validation as cv, entity_platform
from homeassistant.util.dt import utcnow
from .const import SERVICE_LEARN, SERVICE_SET_REMOTE_LED_OFF, SERVICE_SET_REMOTE_LED_ON
_LOGGER = logging.getLogger(__name__)
DATA_KEY = "remote.xiaomi_miio"
CONF_SLOT = "slot"
CONF_COMMANDS = "commands"
DEFAULT_TIMEOUT = 10
DEFAULT_SLOT = 1
COMMAND_SCHEMA = vol.Schema(
{vol.Required(CONF_COMMAND): vol.All(cv.ensure_list, [cv.string])}
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_NAME): cv.string,
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): cv.positive_int,
vol.Optional(CONF_SLOT, default=DEFAULT_SLOT): vol.All(
int, vol.Range(min=1, max=1000000)
),
vol.Required(CONF_TOKEN): vol.All(str, vol.Length(min=32, max=32)),
vol.Optional(CONF_COMMANDS, default={}): cv.schema_with_slug_keys(
COMMAND_SCHEMA
),
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the Xiaomi IR Remote (Chuangmi IR) platform."""
host = config[CONF_HOST]
token = config[CONF_TOKEN]
# Create handler
_LOGGER.info("Initializing with host %s (token %s...)", host, token[:5])
# The Chuang Mi IR Remote Controller wants to be re-discovered every
# 5 minutes. As long as polling is disabled the device should be
# re-discovered (lazy_discover=False) in front of every command.
device = ChuangmiIr(host, token, lazy_discover=False)
# Check that we can communicate with device.
try:
device_info = await hass.async_add_executor_job(device.info)
model = device_info.model
unique_id = f"{model}-{device_info.mac_address}"
_LOGGER.info(
"%s %s %s detected",
model,
device_info.firmware_version,
device_info.hardware_version,
)
except DeviceException as ex:
_LOGGER.error("Device unavailable or token incorrect: %s", ex)
raise PlatformNotReady from ex
if DATA_KEY not in hass.data:
hass.data[DATA_KEY] = {}
friendly_name = config.get(CONF_NAME, f"xiaomi_miio_{host.replace('.', '_')}")
slot = config.get(CONF_SLOT)
timeout = config.get(CONF_TIMEOUT)
xiaomi_miio_remote = XiaomiMiioRemote(
friendly_name, device, unique_id, slot, timeout, config.get(CONF_COMMANDS)
)
hass.data[DATA_KEY][host] = xiaomi_miio_remote
async_add_entities([xiaomi_miio_remote])
async def async_service_led_off_handler(entity, service):
"""Handle set_led_off command."""
await hass.async_add_executor_job(entity.device.set_indicator_led, False)
async def async_service_led_on_handler(entity, service):
"""Handle set_led_on command."""
await hass.async_add_executor_job(entity.device.set_indicator_led, True)
async def async_service_learn_handler(entity, service):
"""Handle a learn command."""
device = entity.device
slot = service.data.get(CONF_SLOT, entity.slot)
await hass.async_add_executor_job(device.learn, slot)
timeout = service.data.get(CONF_TIMEOUT, entity.timeout)
_LOGGER.info("Press the key you want Home Assistant to learn")
start_time = utcnow()
while (utcnow() - start_time) < timedelta(seconds=timeout):
message = await hass.async_add_executor_job(device.read, slot)
_LOGGER.debug("Message received from device: '%s'", message)
if "code" in message and message["code"]:
log_msg = "Received command is: {}".format(message["code"])
_LOGGER.info(log_msg)
hass.components.persistent_notification.async_create(
log_msg, title="Xiaomi Miio Remote"
)
return
if "error" in message and message["error"]["message"] == "learn timeout":
await hass.async_add_executor_job(device.learn, slot)
await asyncio.sleep(1)
_LOGGER.error("Timeout. No infrared command captured")
hass.components.persistent_notification.async_create(
"Timeout. No infrared command captured", title="Xiaomi Miio Remote"
)
platform = entity_platform.current_platform.get()
platform.async_register_entity_service(
SERVICE_LEARN,
{
vol.Optional(CONF_TIMEOUT, default=10): cv.positive_int,
vol.Optional(CONF_SLOT, default=1): vol.All(
int, vol.Range(min=1, max=1000000)
),
},
async_service_learn_handler,
)
platform.async_register_entity_service(
SERVICE_SET_REMOTE_LED_ON,
{},
async_service_led_on_handler,
)
platform.async_register_entity_service(
SERVICE_SET_REMOTE_LED_OFF,
{},
async_service_led_off_handler,
)
class XiaomiMiioRemote(RemoteEntity):
"""Representation of a Xiaomi Miio Remote device."""
def __init__(self, friendly_name, device, unique_id, slot, timeout, commands):
"""Initialize the remote."""
self._name = friendly_name
self._device = device
self._unique_id = unique_id
self._slot = slot
self._timeout = timeout
self._state = False
self._commands = commands
@property
def unique_id(self):
"""Return an unique ID."""
return self._unique_id
@property
def name(self):
"""Return the name of the remote."""
return self._name
@property
def device(self):
"""Return the remote object."""
return self._device
@property
def slot(self):
"""Return the slot to save learned command."""
return self._slot
@property
def timeout(self):
"""Return the timeout for learning command."""
return self._timeout
@property
def is_on(self):
"""Return False if device is unreachable, else True."""
try:
self.device.info()
return True
except DeviceException:
return False
@property
def should_poll(self):
"""We should not be polled for device up state."""
return False
async def async_turn_on(self, **kwargs):
"""Turn the device on."""
_LOGGER.error(
"Device does not support turn_on, "
"please use 'remote.send_command' to send commands"
)
async def async_turn_off(self, **kwargs):
"""Turn the device off."""
_LOGGER.error(
"Device does not support turn_off, "
"please use 'remote.send_command' to send commands"
)
def _send_command(self, payload):
"""Send a command."""
_LOGGER.debug("Sending payload: '%s'", payload)
try:
self.device.play(payload)
except DeviceException as ex:
_LOGGER.error(
"Transmit of IR command failed, %s, exception: %s", payload, ex
)
def send_command(self, command, **kwargs):
"""Send a command."""
num_repeats = kwargs.get(ATTR_NUM_REPEATS)
delay = kwargs.get(ATTR_DELAY_SECS, DEFAULT_DELAY_SECS)
for _ in range(num_repeats):
for payload in command:
if payload in self._commands:
for local_payload in self._commands[payload][CONF_COMMAND]:
self._send_command(local_payload)
else:
self._send_command(payload)
time.sleep(delay)
|
from functools import lru_cache
import logging
from typing import Any, Dict
import voluptuous as vol
from homeassistant.core import Event
from homeassistant.helpers import config_validation as cv
from homeassistant.util.json import (
find_paths_unserializable_data,
format_unserializable_data,
)
from homeassistant.util.yaml.loader import JSON_TYPE
from . import const
_LOGGER = logging.getLogger(__name__)
# mypy: allow-untyped-defs
# Minimal requirements of a message
MINIMAL_MESSAGE_SCHEMA = vol.Schema(
{vol.Required("id"): cv.positive_int, vol.Required("type"): cv.string},
extra=vol.ALLOW_EXTRA,
)
# Base schema to extend by message handlers
BASE_COMMAND_MESSAGE_SCHEMA = vol.Schema({vol.Required("id"): cv.positive_int})
IDEN_TEMPLATE = "__IDEN__"
IDEN_JSON_TEMPLATE = '"__IDEN__"'
def result_message(iden: int, result: Any = None) -> Dict:
"""Return a success result message."""
return {"id": iden, "type": const.TYPE_RESULT, "success": True, "result": result}
def error_message(iden: int, code: str, message: str) -> Dict:
"""Return an error result message."""
return {
"id": iden,
"type": const.TYPE_RESULT,
"success": False,
"error": {"code": code, "message": message},
}
def event_message(iden: JSON_TYPE, event: Any) -> Dict:
"""Return an event message."""
return {"id": iden, "type": "event", "event": event}
def cached_event_message(iden: int, event: Event) -> str:
"""Return an event message.
Serialize to json once per message.
Since we can have many clients connected that are
all getting many of the same events (mostly state changed)
we can avoid serializing the same data for each connection.
"""
return _cached_event_message(event).replace(IDEN_JSON_TEMPLATE, str(iden), 1)
@lru_cache(maxsize=128)
def _cached_event_message(event: Event) -> str:
"""Cache and serialize the event to json.
The IDEN_TEMPLATE is used which will be replaced
with the actual iden in cached_event_message
"""
return message_to_json(event_message(IDEN_TEMPLATE, event))
def message_to_json(message: Any) -> str:
"""Serialize a websocket message to json."""
try:
return const.JSON_DUMP(message)
except (ValueError, TypeError):
_LOGGER.error(
"Unable to serialize to JSON. Bad data found at %s",
format_unserializable_data(
find_paths_unserializable_data(message, dump=const.JSON_DUMP)
),
)
return const.JSON_DUMP(
error_message(
message["id"], const.ERR_UNKNOWN_ERROR, "Invalid JSON in response"
)
)
|
from homeassistant.components.rfxtrx import DOMAIN
from homeassistant.components.rfxtrx.const import EVENT_RFXTRX_EVENT
from homeassistant.core import callback
from homeassistant.setup import async_setup_component
from tests.async_mock import call
from tests.common import MockConfigEntry
from tests.components.rfxtrx.conftest import create_rfx_test_cfg
async def test_valid_config(hass):
"""Test configuration."""
assert await async_setup_component(
hass,
"rfxtrx",
{
"rfxtrx": {
"device": "/dev/serial/by-id/usb"
+ "-RFXCOM_RFXtrx433_A1Y0NJGR-if00-port0",
}
},
)
async def test_valid_config2(hass):
"""Test configuration."""
assert await async_setup_component(
hass,
"rfxtrx",
{
"rfxtrx": {
"device": "/dev/serial/by-id/usb"
+ "-RFXCOM_RFXtrx433_A1Y0NJGR-if00-port0",
"debug": True,
}
},
)
async def test_invalid_config(hass):
"""Test configuration."""
assert not await async_setup_component(hass, "rfxtrx", {"rfxtrx": {}})
assert not await async_setup_component(
hass,
"rfxtrx",
{
"rfxtrx": {
"device": "/dev/serial/by-id/usb"
+ "-RFXCOM_RFXtrx433_A1Y0NJGR-if00-port0",
"invalid_key": True,
}
},
)
async def test_fire_event(hass, rfxtrx):
"""Test fire event."""
entry_data = create_rfx_test_cfg(
device="/dev/serial/by-id/usb-RFXCOM_RFXtrx433_A1Y0NJGR-if00-port0",
automatic_add=True,
devices={
"0b1100cd0213c7f210010f51": {"fire_event": True},
"0716000100900970": {"fire_event": True},
},
)
mock_entry = MockConfigEntry(domain="rfxtrx", unique_id=DOMAIN, data=entry_data)
mock_entry.add_to_hass(hass)
await hass.config_entries.async_setup(mock_entry.entry_id)
await hass.async_block_till_done()
await hass.async_start()
calls = []
@callback
def record_event(event):
"""Add recorded event to set."""
assert event.event_type == "rfxtrx_event"
calls.append(event.data)
hass.bus.async_listen(EVENT_RFXTRX_EVENT, record_event)
await rfxtrx.signal("0b1100cd0213c7f210010f51")
await rfxtrx.signal("0716000100900970")
assert calls == [
{
"packet_type": 17,
"sub_type": 0,
"type_string": "AC",
"id_string": "213c7f2:16",
"data": "0b1100cd0213c7f210010f51",
"values": {"Command": "On", "Rssi numeric": 5},
},
{
"packet_type": 22,
"sub_type": 0,
"type_string": "Byron SX",
"id_string": "00:90",
"data": "0716000100900970",
"values": {"Command": "Chime", "Rssi numeric": 7, "Sound": 9},
},
]
async def test_send(hass, rfxtrx):
"""Test configuration."""
entry_data = create_rfx_test_cfg(device="/dev/null", devices={})
mock_entry = MockConfigEntry(domain="rfxtrx", unique_id=DOMAIN, data=entry_data)
mock_entry.add_to_hass(hass)
await hass.config_entries.async_setup(mock_entry.entry_id)
await hass.async_block_till_done()
await hass.services.async_call(
"rfxtrx", "send", {"event": "0a520802060101ff0f0269"}, blocking=True
)
assert rfxtrx.transport.send.mock_calls == [
call(bytearray(b"\x0a\x52\x08\x02\x06\x01\x01\xff\x0f\x02\x69"))
]
|
import logging
from pyfido.client import PyFidoError
from homeassistant.bootstrap import async_setup_component
from homeassistant.components.fido import sensor as fido
from tests.async_mock import MagicMock, patch
from tests.common import assert_setup_component
CONTRACT = "123456789"
class FidoClientMock:
"""Fake Fido client."""
def __init__(self, username, password, timeout=None, httpsession=None):
"""Fake Fido client init."""
pass
def get_phone_numbers(self):
"""Return Phone numbers."""
return ["1112223344"]
def get_data(self):
"""Return fake fido data."""
return {"balance": 160.12, "1112223344": {"data_remaining": 100.33}}
async def fetch_data(self):
"""Return fake fetching data."""
pass
class FidoClientMockError(FidoClientMock):
"""Fake Fido client error."""
async def fetch_data(self):
"""Return fake fetching data."""
raise PyFidoError("Fake Error")
async def test_fido_sensor(loop, hass):
"""Test the Fido number sensor."""
with patch("homeassistant.components.fido.sensor.FidoClient", new=FidoClientMock):
config = {
"sensor": {
"platform": "fido",
"name": "fido",
"username": "myusername",
"password": "password",
"monitored_variables": ["balance", "data_remaining"],
}
}
with assert_setup_component(1):
await async_setup_component(hass, "sensor", config)
await hass.async_block_till_done()
state = hass.states.get("sensor.fido_1112223344_balance")
assert state.state == "160.12"
assert state.attributes.get("number") == "1112223344"
state = hass.states.get("sensor.fido_1112223344_data_remaining")
assert state.state == "100.33"
async def test_error(hass, caplog):
"""Test the Fido sensor errors."""
caplog.set_level(logging.ERROR)
config = {}
fake_async_add_entities = MagicMock()
with patch("homeassistant.components.fido.sensor.FidoClient", FidoClientMockError):
await fido.async_setup_platform(hass, config, fake_async_add_entities)
assert fake_async_add_entities.called is False
|
import argparse
import chainer
from chainer import iterators
from chainercv.datasets import coco_instance_segmentation_label_names
from chainercv.datasets import COCOInstanceSegmentationDataset
from chainercv.datasets import sbd_instance_segmentation_label_names
from chainercv.datasets import SBDInstanceSegmentationDataset
from chainercv.evaluations import eval_instance_segmentation_coco
from chainercv.evaluations import eval_instance_segmentation_voc
from chainercv.experimental.links import FCISResNet101
from chainercv.links import MaskRCNNFPNResNet101
from chainercv.links import MaskRCNNFPNResNet50
from chainercv.utils import apply_to_iterator
from chainercv.utils import ProgressHook
models = {
# model: (class, dataset -> pretrained_model, default batchsize)
'fcis_resnet101': (FCISResNet101, {'sbd': 'sbd', 'coco': 'coco'}, 1),
'mask_rcnn_fpn_resnet50': (MaskRCNNFPNResNet50,
{}, 1),
'mask_rcnn_fpn_resnet101': (MaskRCNNFPNResNet101,
{}, 1),
}
def setup(dataset, model_name, pretrained_model, batchsize):
cls, pretrained_models, default_batchsize = models[model_name]
dataset_name = dataset
if pretrained_model is None:
pretrained_model = pretrained_models.get(dataset_name, dataset_name)
if batchsize is None:
batchsize = default_batchsize
if dataset_name == 'sbd':
dataset = SBDInstanceSegmentationDataset(split='val')
label_names = sbd_instance_segmentation_label_names
model = cls(
n_fg_class=len(label_names), pretrained_model=pretrained_model)
model.use_preset('evaluate')
def eval_(out_values, rest_values):
pred_masks, pred_labels, pred_scores = out_values
gt_masks, gt_labels = rest_values
result = eval_instance_segmentation_voc(
pred_masks, pred_labels, pred_scores,
gt_masks, gt_labels, use_07_metric=True)
print('')
print('mAP: {:f}'.format(result['map']))
for l, name in enumerate(sbd_instance_segmentation_label_names):
if result['ap'][l]:
print('{:s}: {:f}'.format(name, result['ap'][l]))
else:
print('{:s}: -'.format(name))
elif dataset_name == 'coco':
dataset = COCOInstanceSegmentationDataset(
split='minival', year='2014',
use_crowded=True, return_crowded=True, return_area=True)
label_names = coco_instance_segmentation_label_names
if model_name == 'fcis_resnet101':
proposal_creator_params = cls.proposal_creator_params
proposal_creator_params['min_size'] = 2
model = cls(
n_fg_class=len(label_names),
anchor_scales=(4, 8, 16, 32),
pretrained_model=pretrained_model,
proposal_creator_params=proposal_creator_params)
model.use_preset('coco_evaluate')
else:
model = cls(
n_fg_class=len(label_names), pretrained_model=pretrained_model)
model.use_preset('evaluate')
def eval_(out_values, rest_values):
pred_masks, pred_labels, pred_scores = out_values
gt_masks, gt_labels, gt_areas, gt_crowdeds = rest_values
result = eval_instance_segmentation_coco(
pred_masks, pred_labels, pred_scores,
gt_masks, gt_labels, gt_areas, gt_crowdeds)
print()
for area in ('all', 'large', 'medium', 'small'):
print('mmAP ({}):'.format(area),
result['map/iou=0.50:0.95/area={}/max_dets=100'.format(
area)])
return dataset, eval_, model, batchsize
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', choices=('sbd', 'coco'))
parser.add_argument('--model', choices=sorted(models.keys()))
parser.add_argument('--pretrained-model')
parser.add_argument('--batchsize', type=int)
parser.add_argument('--gpu', type=int, default=-1)
args = parser.parse_args()
dataset, eval_, model, batchsize = setup(
args.dataset, args.model, args.pretrained_model, args.batchsize)
if args.gpu >= 0:
chainer.cuda.get_device_from_id(args.gpu).use()
model.to_gpu()
iterator = iterators.MultithreadIterator(
dataset, batchsize, repeat=False, shuffle=False)
in_values, out_values, rest_values = apply_to_iterator(
model.predict, iterator, hook=ProgressHook(len(dataset)))
# delete unused iterators explicitly
del in_values
eval_(out_values, rest_values)
if __name__ == '__main__':
main()
|
import logging
from homeassistant.components.switch import SwitchEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_NAME, CONF_PIN
from homeassistant.core import HomeAssistant
from .const import CONF_INITIAL_STATE, CONF_NEGATE_STATE, CONF_PIN_MODE, DOMAIN
from .entity import FirmataPinEntity
from .pin import FirmataBinaryDigitalOutput, FirmataPinUsedException
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(
hass: HomeAssistant, config_entry: ConfigEntry, async_add_entities
) -> None:
"""Set up the Firmata switches."""
new_entities = []
board = hass.data[DOMAIN][config_entry.entry_id]
for switch in board.switches:
pin = switch[CONF_PIN]
pin_mode = switch[CONF_PIN_MODE]
initial = switch[CONF_INITIAL_STATE]
negate = switch[CONF_NEGATE_STATE]
api = FirmataBinaryDigitalOutput(board, pin, pin_mode, initial, negate)
try:
api.setup()
except FirmataPinUsedException:
_LOGGER.error(
"Could not setup switch on pin %s since pin already in use",
switch[CONF_PIN],
)
continue
name = switch[CONF_NAME]
switch_entity = FirmataSwitch(api, config_entry, name, pin)
new_entities.append(switch_entity)
if new_entities:
async_add_entities(new_entities)
class FirmataSwitch(FirmataPinEntity, SwitchEntity):
"""Representation of a switch on a Firmata board."""
async def async_added_to_hass(self) -> None:
"""Set up a switch."""
await self._api.start_pin()
@property
def is_on(self) -> bool:
"""Return true if switch is on."""
return self._api.is_on
async def async_turn_on(self, **kwargs) -> None:
"""Turn on switch."""
await self._api.turn_on()
self.async_write_ha_state()
async def async_turn_off(self, **kwargs) -> None:
"""Turn off switch."""
await self._api.turn_off()
self.async_write_ha_state()
|
from typing import Any, Dict
from homematicip.aio.device import (
AsyncBrandDimmer,
AsyncBrandSwitchMeasuring,
AsyncBrandSwitchNotificationLight,
AsyncDimmer,
AsyncFullFlushDimmer,
AsyncPluggableDimmer,
)
from homematicip.base.enums import RGBColorState
from homematicip.base.functionalChannels import NotificationLightChannel
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_COLOR_NAME,
ATTR_HS_COLOR,
ATTR_TRANSITION,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR,
SUPPORT_TRANSITION,
LightEntity,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.helpers.typing import HomeAssistantType
from . import DOMAIN as HMIPC_DOMAIN, HomematicipGenericEntity
from .hap import HomematicipHAP
ATTR_TODAY_ENERGY_KWH = "today_energy_kwh"
ATTR_CURRENT_POWER_W = "current_power_w"
async def async_setup_entry(
hass: HomeAssistantType, config_entry: ConfigEntry, async_add_entities
) -> None:
"""Set up the HomematicIP Cloud lights from a config entry."""
hap = hass.data[HMIPC_DOMAIN][config_entry.unique_id]
entities = []
for device in hap.home.devices:
if isinstance(device, AsyncBrandSwitchMeasuring):
entities.append(HomematicipLightMeasuring(hap, device))
elif isinstance(device, AsyncBrandSwitchNotificationLight):
entities.append(HomematicipLight(hap, device))
entities.append(
HomematicipNotificationLight(hap, device, device.topLightChannelIndex)
)
entities.append(
HomematicipNotificationLight(
hap, device, device.bottomLightChannelIndex
)
)
elif isinstance(
device,
(AsyncDimmer, AsyncPluggableDimmer, AsyncBrandDimmer, AsyncFullFlushDimmer),
):
entities.append(HomematicipDimmer(hap, device))
if entities:
async_add_entities(entities)
class HomematicipLight(HomematicipGenericEntity, LightEntity):
"""Representation of the HomematicIP light."""
def __init__(self, hap: HomematicipHAP, device) -> None:
"""Initialize the light entity."""
super().__init__(hap, device)
@property
def is_on(self) -> bool:
"""Return true if light is on."""
return self._device.on
async def async_turn_on(self, **kwargs) -> None:
"""Turn the light on."""
await self._device.turn_on()
async def async_turn_off(self, **kwargs) -> None:
"""Turn the light off."""
await self._device.turn_off()
class HomematicipLightMeasuring(HomematicipLight):
"""Representation of the HomematicIP measuring light."""
@property
def device_state_attributes(self) -> Dict[str, Any]:
"""Return the state attributes of the light."""
state_attr = super().device_state_attributes
current_power_w = self._device.currentPowerConsumption
if current_power_w > 0.05:
state_attr[ATTR_CURRENT_POWER_W] = round(current_power_w, 2)
state_attr[ATTR_TODAY_ENERGY_KWH] = round(self._device.energyCounter, 2)
return state_attr
class HomematicipDimmer(HomematicipGenericEntity, LightEntity):
"""Representation of HomematicIP Cloud dimmer."""
def __init__(self, hap: HomematicipHAP, device) -> None:
"""Initialize the dimmer light entity."""
super().__init__(hap, device)
@property
def is_on(self) -> bool:
"""Return true if dimmer is on."""
return self._device.dimLevel is not None and self._device.dimLevel > 0.0
@property
def brightness(self) -> int:
"""Return the brightness of this light between 0..255."""
return int((self._device.dimLevel or 0.0) * 255)
@property
def supported_features(self) -> int:
"""Flag supported features."""
return SUPPORT_BRIGHTNESS
async def async_turn_on(self, **kwargs) -> None:
"""Turn the dimmer on."""
if ATTR_BRIGHTNESS in kwargs:
await self._device.set_dim_level(kwargs[ATTR_BRIGHTNESS] / 255.0)
else:
await self._device.set_dim_level(1)
async def async_turn_off(self, **kwargs) -> None:
"""Turn the dimmer off."""
await self._device.set_dim_level(0)
class HomematicipNotificationLight(HomematicipGenericEntity, LightEntity):
"""Representation of HomematicIP Cloud notification light."""
def __init__(self, hap: HomematicipHAP, device, channel: int) -> None:
"""Initialize the notification light entity."""
if channel == 2:
super().__init__(hap, device, post="Top", channel=channel)
else:
super().__init__(hap, device, post="Bottom", channel=channel)
self._color_switcher = {
RGBColorState.WHITE: [0.0, 0.0],
RGBColorState.RED: [0.0, 100.0],
RGBColorState.YELLOW: [60.0, 100.0],
RGBColorState.GREEN: [120.0, 100.0],
RGBColorState.TURQUOISE: [180.0, 100.0],
RGBColorState.BLUE: [240.0, 100.0],
RGBColorState.PURPLE: [300.0, 100.0],
}
@property
def _func_channel(self) -> NotificationLightChannel:
return self._device.functionalChannels[self._channel]
@property
def is_on(self) -> bool:
"""Return true if light is on."""
return (
self._func_channel.dimLevel is not None
and self._func_channel.dimLevel > 0.0
)
@property
def brightness(self) -> int:
"""Return the brightness of this light between 0..255."""
return int((self._func_channel.dimLevel or 0.0) * 255)
@property
def hs_color(self) -> tuple:
"""Return the hue and saturation color value [float, float]."""
simple_rgb_color = self._func_channel.simpleRGBColorState
return self._color_switcher.get(simple_rgb_color, [0.0, 0.0])
@property
def device_state_attributes(self) -> Dict[str, Any]:
"""Return the state attributes of the notification light sensor."""
state_attr = super().device_state_attributes
if self.is_on:
state_attr[ATTR_COLOR_NAME] = self._func_channel.simpleRGBColorState
return state_attr
@property
def supported_features(self) -> int:
"""Flag supported features."""
return SUPPORT_BRIGHTNESS | SUPPORT_COLOR | SUPPORT_TRANSITION
@property
def unique_id(self) -> str:
"""Return a unique ID."""
return f"{self.__class__.__name__}_{self._post}_{self._device.id}"
async def async_turn_on(self, **kwargs) -> None:
"""Turn the light on."""
# Use hs_color from kwargs,
# if not applicable use current hs_color.
hs_color = kwargs.get(ATTR_HS_COLOR, self.hs_color)
simple_rgb_color = _convert_color(hs_color)
# Use brightness from kwargs,
# if not applicable use current brightness.
brightness = kwargs.get(ATTR_BRIGHTNESS, self.brightness)
# If no kwargs, use default value.
if not kwargs:
brightness = 255
# Minimum brightness is 10, otherwise the led is disabled
brightness = max(10, brightness)
dim_level = brightness / 255.0
transition = kwargs.get(ATTR_TRANSITION, 0.5)
await self._device.set_rgb_dim_level_with_time(
channelIndex=self._channel,
rgb=simple_rgb_color,
dimLevel=dim_level,
onTime=0,
rampTime=transition,
)
async def async_turn_off(self, **kwargs) -> None:
"""Turn the light off."""
simple_rgb_color = self._func_channel.simpleRGBColorState
transition = kwargs.get(ATTR_TRANSITION, 0.5)
await self._device.set_rgb_dim_level_with_time(
channelIndex=self._channel,
rgb=simple_rgb_color,
dimLevel=0.0,
onTime=0,
rampTime=transition,
)
def _convert_color(color: tuple) -> RGBColorState:
"""
Convert the given color to the reduced RGBColorState color.
RGBColorStat contains only 8 colors including white and black,
so a conversion is required.
"""
if color is None:
return RGBColorState.WHITE
hue = int(color[0])
saturation = int(color[1])
if saturation < 5:
return RGBColorState.WHITE
if 30 < hue <= 90:
return RGBColorState.YELLOW
if 90 < hue <= 160:
return RGBColorState.GREEN
if 150 < hue <= 210:
return RGBColorState.TURQUOISE
if 210 < hue <= 270:
return RGBColorState.BLUE
if 270 < hue <= 330:
return RGBColorState.PURPLE
return RGBColorState.RED
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from itertools import repeat
import math
from absl import flags
from perfkitbenchmarker import configs
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.linux_packages import redis_server
from perfkitbenchmarker.linux_packages import ycsb
from six.moves import range
flags.DEFINE_integer('redis_ycsb_processes', 1,
'Number of total ycsb processes across all clients.')
FLAGS = flags.FLAGS
BENCHMARK_NAME = 'redis_ycsb'
BENCHMARK_CONFIG = """
redis_ycsb:
description: >
Run YCSB against a single Redis server.
Specify the number of client VMs with --ycsb_client_vms.
vm_groups:
workers:
vm_spec: *default_single_core
clients:
vm_spec: *default_single_core
"""
def GetConfig(user_config):
config = configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
if FLAGS['ycsb_client_vms'].present:
config['vm_groups']['clients']['vm_count'] = FLAGS.ycsb_client_vms
return config
def PrepareLoadgen(load_vm):
load_vm.Install('ycsb')
def PrepareServer(redis_vm):
redis_vm.Install('redis_server')
redis_server.Configure(redis_vm)
redis_server.Start(redis_vm)
def Prepare(benchmark_spec):
"""Install Redis on one VM and memtier_benchmark on another.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
groups = benchmark_spec.vm_groups
redis_vm = groups['workers'][0]
ycsb_vms = groups['clients']
prepare_fns = ([functools.partial(PrepareServer, redis_vm)] +
[functools.partial(vm.Install, 'ycsb') for vm in ycsb_vms])
vm_util.RunThreaded(lambda f: f(), prepare_fns)
num_ycsb = FLAGS.redis_ycsb_processes
num_server = FLAGS.redis_total_num_processes
# Each redis process use different ports, number of ycsb processes should
# be at least as large as number of server processes, use round-robin
# to assign target server process to each ycsb process
server_metadata = [
{'redis.port': redis_server.REDIS_FIRST_PORT + i % num_server}
for i in range(num_ycsb)]
benchmark_spec.executor = ycsb.YCSBExecutor(
'redis', **{
'shardkeyspace': True,
'redis.host': redis_vm.internal_ip,
'perclientparam': server_metadata})
vm_util.SetupSimulatedMaintenance(redis_vm)
def Run(benchmark_spec):
"""Run YCSB against Redis.
This method can run with multiple number of redis processes (server) on a
single vm. Since redis is single threaded, there is no need to run on
multiple server instances. When running with multiple redis processes, key
space is sharded.
This method can also run with muliple number of ycsb processes (client) on
multiple client instances. Each ycsb process can only run against a single
server process. The number of ycsb processes should be no smaller than
the number of server processes or the number of client vms.
To avoid having multiple ycsb processes on the same client vm
targeting the same server process, this method hash ycsb processes to server
processes and ycsb processes to client vms differently.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
Returns:
A list of sample.Sample objects.
"""
groups = benchmark_spec.vm_groups
ycsb_vms = groups['clients']
num_ycsb = FLAGS.redis_ycsb_processes
num_server = FLAGS.redis_total_num_processes
num_client = FLAGS.ycsb_client_vms
metadata = {'ycsb_client_vms': num_client,
'ycsb_processes': num_ycsb,
'redis_total_num_processes': num_server,
'redis_server_version': FLAGS.redis_server_version}
# Matching client vms and ycsb processes sequentially:
# 1st to xth ycsb clients are assigned to client vm 1
# x+1th to 2xth ycsb clients are assigned to client vm 2, etc.
# Duplicate VirtualMachine objects passed into YCSBExecutor to match
# corresponding ycsb clients.
duplicate = int(math.ceil(num_ycsb / float(num_client)))
client_vms = [
vm for item in ycsb_vms for vm in repeat(item, duplicate)][:num_ycsb]
samples = list(benchmark_spec.executor.Load(client_vms,
load_kwargs={'threads': 4}))
vm_util.StartSimulatedMaintenance()
samples += list(benchmark_spec.executor.Run(client_vms))
for sample in samples:
sample.metadata.update(metadata)
return samples
def Cleanup(benchmark_spec):
"""Remove Redis and YCSB.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
redis_server.Cleanup(benchmark_spec.vm_groups['workers'][0])
|
from aiohomekit.model.characteristics import (
CharacteristicPermissions,
CharacteristicsTypes,
)
from aiohomekit.model.services import ServicesTypes
import pytest
from tests.components.homekit_controller.common import setup_test_component
CURRENT_MEDIA_STATE = ("television", "current-media-state")
TARGET_MEDIA_STATE = ("television", "target-media-state")
REMOTE_KEY = ("television", "remote-key")
ACTIVE_IDENTIFIER = ("television", "active-identifier")
def create_tv_service(accessory):
"""
Define tv characteristics.
The TV is not currently documented publicly - this is based on observing really TV's that have HomeKit support.
"""
tv_service = accessory.add_service(ServicesTypes.TELEVISION)
tv_service.add_char(CharacteristicsTypes.ACTIVE, value=True)
cur_state = tv_service.add_char(CharacteristicsTypes.CURRENT_MEDIA_STATE)
cur_state.value = 0
remote = tv_service.add_char(CharacteristicsTypes.REMOTE_KEY)
remote.value = None
remote.perms.append(CharacteristicPermissions.paired_write)
# Add a HDMI 1 channel
input_source_1 = accessory.add_service(ServicesTypes.INPUT_SOURCE)
input_source_1.add_char(CharacteristicsTypes.IDENTIFIER, value=1)
input_source_1.add_char(CharacteristicsTypes.CONFIGURED_NAME, value="HDMI 1")
tv_service.add_linked_service(input_source_1)
# Add a HDMI 2 channel
input_source_2 = accessory.add_service(ServicesTypes.INPUT_SOURCE)
input_source_2.add_char(CharacteristicsTypes.IDENTIFIER, value=2)
input_source_2.add_char(CharacteristicsTypes.CONFIGURED_NAME, value="HDMI 2")
tv_service.add_linked_service(input_source_2)
# Support switching channels
active_identifier = tv_service.add_char(CharacteristicsTypes.ACTIVE_IDENTIFIER)
active_identifier.value = 1
active_identifier.perms.append(CharacteristicPermissions.paired_write)
return tv_service
def create_tv_service_with_target_media_state(accessory):
"""Define a TV service that can play/pause/stop without generate remote events."""
service = create_tv_service(accessory)
tms = service.add_char(CharacteristicsTypes.TARGET_MEDIA_STATE)
tms.value = None
tms.perms.append(CharacteristicPermissions.paired_write)
return service
async def test_tv_read_state(hass, utcnow):
"""Test that we can read the state of a HomeKit fan accessory."""
helper = await setup_test_component(hass, create_tv_service)
helper.characteristics[CURRENT_MEDIA_STATE].value = 0
state = await helper.poll_and_get_state()
assert state.state == "playing"
helper.characteristics[CURRENT_MEDIA_STATE].value = 1
state = await helper.poll_and_get_state()
assert state.state == "paused"
helper.characteristics[CURRENT_MEDIA_STATE].value = 2
state = await helper.poll_and_get_state()
assert state.state == "idle"
async def test_tv_read_sources(hass, utcnow):
"""Test that we can read the input source of a HomeKit TV."""
helper = await setup_test_component(hass, create_tv_service)
state = await helper.poll_and_get_state()
assert state.attributes["source"] == "HDMI 1"
assert state.attributes["source_list"] == ["HDMI 1", "HDMI 2"]
async def test_play_remote_key(hass, utcnow):
"""Test that we can play media on a media player."""
helper = await setup_test_component(hass, create_tv_service)
helper.characteristics[CURRENT_MEDIA_STATE].value = 1
await helper.poll_and_get_state()
await hass.services.async_call(
"media_player",
"media_play",
{"entity_id": "media_player.testdevice"},
blocking=True,
)
assert helper.characteristics[REMOTE_KEY].value == 11
# Second time should be a no-op
helper.characteristics[CURRENT_MEDIA_STATE].value = 0
await helper.poll_and_get_state()
helper.characteristics[REMOTE_KEY].value = None
await hass.services.async_call(
"media_player",
"media_play",
{"entity_id": "media_player.testdevice"},
blocking=True,
)
assert helper.characteristics[REMOTE_KEY].value is None
async def test_pause_remote_key(hass, utcnow):
"""Test that we can pause a media player."""
helper = await setup_test_component(hass, create_tv_service)
helper.characteristics[CURRENT_MEDIA_STATE].value = 0
await helper.poll_and_get_state()
await hass.services.async_call(
"media_player",
"media_pause",
{"entity_id": "media_player.testdevice"},
blocking=True,
)
assert helper.characteristics[REMOTE_KEY].value == 11
# Second time should be a no-op
helper.characteristics[CURRENT_MEDIA_STATE].value = 1
await helper.poll_and_get_state()
helper.characteristics[REMOTE_KEY].value = None
await hass.services.async_call(
"media_player",
"media_pause",
{"entity_id": "media_player.testdevice"},
blocking=True,
)
assert helper.characteristics[REMOTE_KEY].value is None
async def test_play(hass, utcnow):
"""Test that we can play media on a media player."""
helper = await setup_test_component(hass, create_tv_service_with_target_media_state)
helper.characteristics[CURRENT_MEDIA_STATE].value = 1
await helper.poll_and_get_state()
await hass.services.async_call(
"media_player",
"media_play",
{"entity_id": "media_player.testdevice"},
blocking=True,
)
assert helper.characteristics[REMOTE_KEY].value is None
assert helper.characteristics[TARGET_MEDIA_STATE].value == 0
# Second time should be a no-op
helper.characteristics[CURRENT_MEDIA_STATE].value = 0
await helper.poll_and_get_state()
helper.characteristics[TARGET_MEDIA_STATE].value = None
await hass.services.async_call(
"media_player",
"media_play",
{"entity_id": "media_player.testdevice"},
blocking=True,
)
assert helper.characteristics[REMOTE_KEY].value is None
assert helper.characteristics[TARGET_MEDIA_STATE].value is None
async def test_pause(hass, utcnow):
"""Test that we can turn pause a media player."""
helper = await setup_test_component(hass, create_tv_service_with_target_media_state)
helper.characteristics[CURRENT_MEDIA_STATE].value = 0
await helper.poll_and_get_state()
await hass.services.async_call(
"media_player",
"media_pause",
{"entity_id": "media_player.testdevice"},
blocking=True,
)
assert helper.characteristics[REMOTE_KEY].value is None
assert helper.characteristics[TARGET_MEDIA_STATE].value == 1
# Second time should be a no-op
helper.characteristics[CURRENT_MEDIA_STATE].value = 1
await helper.poll_and_get_state()
helper.characteristics[REMOTE_KEY].value = None
await hass.services.async_call(
"media_player",
"media_pause",
{"entity_id": "media_player.testdevice"},
blocking=True,
)
assert helper.characteristics[REMOTE_KEY].value is None
async def test_stop(hass, utcnow):
"""Test that we can stop a media player."""
helper = await setup_test_component(hass, create_tv_service_with_target_media_state)
await hass.services.async_call(
"media_player",
"media_stop",
{"entity_id": "media_player.testdevice"},
blocking=True,
)
assert helper.characteristics[TARGET_MEDIA_STATE].value == 2
# Second time should be a no-op
helper.characteristics[CURRENT_MEDIA_STATE].value = 2
await helper.poll_and_get_state()
helper.characteristics[TARGET_MEDIA_STATE].value = None
await hass.services.async_call(
"media_player",
"media_stop",
{"entity_id": "media_player.testdevice"},
blocking=True,
)
assert helper.characteristics[REMOTE_KEY].value is None
assert helper.characteristics[TARGET_MEDIA_STATE].value is None
async def test_tv_set_source(hass, utcnow):
"""Test that we can set the input source of a HomeKit TV."""
helper = await setup_test_component(hass, create_tv_service)
await hass.services.async_call(
"media_player",
"select_source",
{"entity_id": "media_player.testdevice", "source": "HDMI 2"},
blocking=True,
)
assert helper.characteristics[ACTIVE_IDENTIFIER].value == 2
state = await helper.poll_and_get_state()
assert state.attributes["source"] == "HDMI 2"
async def test_tv_set_source_fail(hass, utcnow):
"""Test that we can set the input source of a HomeKit TV."""
helper = await setup_test_component(hass, create_tv_service)
with pytest.raises(ValueError):
await hass.services.async_call(
"media_player",
"select_source",
{"entity_id": "media_player.testdevice", "source": "HDMI 999"},
blocking=True,
)
state = await helper.poll_and_get_state()
assert state.attributes["source"] == "HDMI 1"
|
import pytest
from mock import patch, call
try:
from ConfigParser import NoSectionError
except ImportError:
from configparser import NoSectionError
from arctic.hosts import get_arctic_lib
def test_get_arctic_lib_with_known_host():
with patch('arctic.arctic.Arctic') as Arctic:
get_arctic_lib("foo@bar")
assert Arctic.call_args_list == [call('bar')]
def test_get_arctic_lib_with_unknown_host():
with patch('arctic.arctic.Arctic') as Arctic:
with patch('pymongo.MongoClient') as MongoClient:
get_arctic_lib("foo@bar:123")
assert Arctic.call_args_list == [call("bar:123")]
def test_get_arctic_connection_strings():
with patch('arctic.arctic.Arctic') as Arctic:
with patch('pymongo.MongoClient') as MongoClient:
get_arctic_lib("foo@bar")
get_arctic_lib("foo.sheep@bar")
get_arctic_lib("foo.sheep@bar:123")
get_arctic_lib("[email protected]:123")
@pytest.mark.parametrize(
["string"], [('donkey',), ('donkey:ride@blackpool',),
('donkey:ride',)])
def test_get_arctic_malformed_connection_strings(string):
with pytest.raises(ValueError):
get_arctic_lib(string)
|
import os
import pytest
from nikola import __main__
from .helper import cd, patch_config
from .test_demo_build import prepare_demo_site
from .test_empty_build import ( # NOQA
test_avoid_double_slash_in_rss,
test_check_files,
test_check_links,
test_index_in_sitemap,
)
@pytest.mark.parametrize(
"path",
[
pytest.param(["archive.html"], id="overall"),
pytest.param(["2012", "index.html"], id="year"),
pytest.param(["2012", "03", "index.html"], id="month"),
pytest.param(["2012", "03", "30", "index.html"], id="day"),
],
)
def test_full_archive(build, output_dir, path):
"""Check existance of archive pages"""
expected_path = os.path.join(output_dir, *path)
assert os.path.isfile(expected_path)
@pytest.fixture(scope="module")
def build(target_dir):
"""Fill the site with demo content and build it."""
prepare_demo_site(target_dir)
patch_config(
target_dir, ("# CREATE_FULL_ARCHIVES = False", "CREATE_FULL_ARCHIVES = True")
)
with cd(target_dir):
__main__.main(["build"])
|
from homeassistant.components import upnp
from homeassistant.components.upnp.const import (
DISCOVERY_LOCATION,
DISCOVERY_ST,
DISCOVERY_UDN,
)
from homeassistant.components.upnp.device import Device
from homeassistant.const import EVENT_HOMEASSISTANT_STOP
from homeassistant.setup import async_setup_component
from .mock_device import MockDevice
from tests.async_mock import AsyncMock, patch
from tests.common import MockConfigEntry
async def test_async_setup_entry_default(hass):
"""Test async_setup_entry."""
udn = "uuid:device_1"
mock_device = MockDevice(udn)
discovery_infos = [
{
DISCOVERY_UDN: mock_device.udn,
DISCOVERY_ST: mock_device.device_type,
DISCOVERY_LOCATION: "http://192.168.1.1/desc.xml",
}
]
entry = MockConfigEntry(
domain=upnp.DOMAIN, data={"udn": mock_device.udn, "st": mock_device.device_type}
)
config = {
# no upnp
}
async_discover = AsyncMock(return_value=[])
with patch.object(
Device, "async_create_device", AsyncMock(return_value=mock_device)
), patch.object(Device, "async_discover", async_discover):
# initialisation of component, no device discovered
await async_setup_component(hass, "upnp", config)
await hass.async_block_till_done()
# loading of config_entry, device discovered
async_discover.return_value = discovery_infos
assert await upnp.async_setup_entry(hass, entry) is True
# ensure device is stored/used
assert hass.data[upnp.DOMAIN]["devices"][udn] == mock_device
hass.bus.async_fire(EVENT_HOMEASSISTANT_STOP)
await hass.async_block_till_done()
|
from __future__ import division
import unittest
import numpy as np
from chainer.backends import cuda
from chainer import testing
from chainer.testing import attr
from chainercv.utils import bbox_iou
@testing.parameterize(
{'bbox_a': np.array([[0, 0, 8, 8]], dtype=np.float32),
'bbox_b': np.array(
[[3, 5, 10, 12], [9, 10, 11, 12], [0, 0, 8, 8]], dtype=np.float32),
'expected': np.array(
[[(5 * 3) / (8 * 8 + 7 * 7 - 5 * 3), 0., 1.]], dtype=np.float32)
},
{'bbox_a': np.array(
[[3, 5, 10, 12], [9, 10, 11, 12], [0, 0, 8, 8]], dtype=np.float32),
'bbox_b': np.array([[0, 0, 8, 8]], dtype=np.float32),
'expected': np.array(
[[(5 * 3) / (8 * 8 + 7 * 7 - 5 * 3)], [0.], [1.]], dtype=np.float32)
},
{'bbox_a': np.zeros((0, 4), dtype=np.float32),
'bbox_b': np.array([[0, 0, 1, 1]], dtype=np.float32),
'expected': np.zeros((0, 1), dtype=np.float32)
},
)
class TestBboxIou(unittest.TestCase):
def check(self, bbox_a, bbox_b, expected):
iou = bbox_iou(bbox_a, bbox_b)
self.assertIsInstance(iou, type(expected))
np.testing.assert_equal(
cuda.to_cpu(iou),
cuda.to_cpu(expected))
def test_bbox_iou_cpu(self):
self.check(self.bbox_a, self.bbox_b, self.expected)
@attr.gpu
def test_bbox_iou_gpu(self):
self.check(
cuda.to_gpu(self.bbox_a),
cuda.to_gpu(self.bbox_b),
cuda.to_gpu(self.expected))
@testing.parameterize(
{'bbox_a': [[0, 0, 8]], 'bbox_b': [[1, 1, 9, 9]]},
{'bbox_a': [[0, 0, 8, 0, 1]], 'bbox_b': [[1, 1, 9, 9]]},
{'bbox_a': [[0, 0, 8, 8]], 'bbox_b': [[1, 1, 9]]},
{'bbox_a': [[0, 0, 8, 8]], 'bbox_b': [[1, 1, 9, 9, 10]]}
)
class TestBboxIouInvalidShape(unittest.TestCase):
def test_bbox_iou_invalid(self):
bbox_a = np.array(self.bbox_a, dtype=np.float32)
bbox_b = np.array(self.bbox_b, dtype=np.float32)
with self.assertRaises(IndexError):
bbox_iou(bbox_a, bbox_b)
testing.run_module(__name__, __file__)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import multiprocessing
import ntpath
from absl import flags
from perfkitbenchmarker import background_tasks
from perfkitbenchmarker import errors
from perfkitbenchmarker import sample
from perfkitbenchmarker import vm_util
from six.moves import range
FLAGS = flags.FLAGS
CONTROL_PORT = 5000
UDP_PORT = 5001
NUTTCP_OUT_FILE = 'nuttcp_results'
CPU_OUT_FILE = 'cpu_results'
flags.DEFINE_integer('nuttcp_max_bandwidth_mb', 10000,
'The maximum bandwidth, in megabytes, to test in a '
'UDP stream.')
flags.DEFINE_integer('nuttcp_min_bandwidth_mb', 100,
'The minimum bandwidth, in megabytes, to test in a '
'UDP stream.')
flags.DEFINE_integer('nuttcp_bandwidth_step_mb', 1000,
'The amount of megabytes to increase bandwidth in each '
'UDP stream test.')
flags.DEFINE_integer('nuttcp_udp_stream_seconds', 60,
'The amount of time to run the UDP stream test.')
flags.DEFINE_integer('nuttcp_udp_packet_size', 1420,
'The size of each UDP packet sent in the UDP stream.')
flags.DEFINE_bool('nuttcp_udp_run_both_directions', False,
'Run the test twice, using each VM as a source.')
flags.DEFINE_integer('nuttcp_udp_iterations', 1,
'The number of consecutive tests to run.')
flags.DEFINE_bool('nuttcp_udp_unlimited_bandwidth', False,
'Run an "unlimited bandwidth" test')
flags.DEFINE_integer('nuttcp_cpu_sample_time', 3,
'Time, in seconds, to take the CPU usage sample.')
NUTTCP_DIR = 'nuttcp-8.1.4.win64'
NUTTCP_ZIP = NUTTCP_DIR + '.zip'
NUTTCP_URL = 'https://nuttcp.net/nuttcp/nuttcp-8.1.4/binaries/' + NUTTCP_ZIP
_COMMAND_TIMEOUT_BUFFER = 120
class NuttcpNotRunningError(Exception):
"""Raised when nuttcp is not running at a time that it is expected to be."""
def Install(vm):
"""Installs the nuttcp package on the VM."""
zip_path = ntpath.join(vm.temp_dir, NUTTCP_ZIP)
@vm_util.Retry()
def DownloadWithRetry():
vm.DownloadFile(NUTTCP_URL, zip_path)
try:
DownloadWithRetry()
except errors.VirtualMachine.RemoteCommandError as e:
# The mirror to download nuttcp from is temporarily unavailable.
raise errors.Benchmarks.KnownIntermittentError(
'Failed to download nuttcp package: %s' % e)
vm.UnzipFile(zip_path, vm.temp_dir)
def CheckPrerequisites():
if FLAGS.nuttcp_udp_stream_seconds <= FLAGS.nuttcp_cpu_sample_time:
raise errors.Config.InvalidValue(
'nuttcp_udp_stream_seconds must be greater than nuttcp_cpu_sample_time')
def GetExecPath():
return 'nuttcp-8.1.4.exe'
def _RunNuttcp(vm, options, exec_path):
"""Run nuttcp, server or client depending on options.
Args:
vm: vm to run nuttcp on
options: string of options to pass to nuttcp
exec_path: string path to the nuttcp executable
"""
command = 'cd {exec_dir}; .\\{exec_path} {options}'.format(
exec_dir=vm.temp_dir,
exec_path=exec_path,
options=options)
# Timeout after expected duration, 5sec server wait plus 120sec buffer
timeout_duration = FLAGS.nuttcp_udp_stream_seconds + _COMMAND_TIMEOUT_BUFFER
vm.RobustRemoteCommand(command, timeout=timeout_duration)
def _GetCpuUsage(vm):
"""Gather CPU usage data.
Args:
vm: the vm to gather cpu usage data on.
Raises:
NuttcpNotRunningError: raised if nuttcp is not running when the CPU usage
data gathering has finished.
"""
command = ('cd {exec_path}; '
"Get-Counter -Counter '\\Processor(*)\\% Processor Time' "
'-SampleInterval {sample_time} | '
'select -ExpandProperty CounterSamples | '
'select InstanceName,CookedValue > {out_file};'
'if ((ps | select-string nuttcp | measure-object).Count -eq 0) '
'{{echo "FAIL"}}').format(
exec_path=vm.temp_dir,
sample_time=FLAGS.nuttcp_cpu_sample_time,
out_file=CPU_OUT_FILE)
# returning from the command should never take longer than 120 seconds over
# the actual sample time. If it does, it is hung.
timeout_duration = FLAGS.nuttcp_cpu_sample_time + _COMMAND_TIMEOUT_BUFFER
stdout, _ = vm.RemoteCommand(command, timeout=timeout_duration)
if 'FAIL' in stdout:
raise NuttcpNotRunningError('nuttcp not running after getting CPU usage.')
@vm_util.Retry(max_retries=3)
def RunSingleBandwidth(bandwidth, sending_vm, receiving_vm, dest_ip, exec_path):
"""Create a server-client nuttcp pair.
The server exits after the client completes its request.
Args:
bandwidth: the requested transmission bandwidth
sending_vm: vm sending the UDP packets.
receiving_vm: vm receiving the UDP packets.
dest_ip: the IP of the receiver.
exec_path: path to the nuttcp executable.
Returns:
output from the client nuttcp process.
"""
sender_args = ('-u -p{data_port} -P{control_port} -R{bandwidth} '
'-T{time} -l{packet_size} {dest_ip} > {out_file}').format(
data_port=UDP_PORT,
control_port=CONTROL_PORT,
bandwidth=bandwidth,
time=FLAGS.nuttcp_udp_stream_seconds,
packet_size=FLAGS.nuttcp_udp_packet_size,
dest_ip=dest_ip,
out_file=NUTTCP_OUT_FILE)
receiver_args = '-p{data_port} -P{control_port} -1'.format(
data_port=UDP_PORT,
control_port=CONTROL_PORT)
# Process to run the nuttcp server
server_process = multiprocessing.Process(
name='server',
target=_RunNuttcp,
args=(receiving_vm, receiver_args, exec_path))
server_process.start()
receiving_vm.WaitForProcessRunning('nuttcp', 30)
# Process to run the nuttcp client
client_process = multiprocessing.Process(
name='client',
target=_RunNuttcp,
args=(sending_vm, sender_args, exec_path))
client_process.start()
sending_vm.WaitForProcessRunning('nuttcp', 30)
process_args = [
(_GetCpuUsage, (receiving_vm,), {}),
(_GetCpuUsage, (sending_vm,), {})]
background_tasks.RunParallelProcesses(process_args, 200)
server_process.join()
client_process.join()
@vm_util.Retry(max_retries=3)
def GatherResults(vm, out_file):
"""Gets the contents of out_file from vm.
Args:
vm: the VM to get the results from.
out_file: the name of the file that contains results.
Returns:
The contents of 'out_file' as a string.
"""
cat_command = 'cd {results_dir}; cat {out_file}'
results_command = cat_command.format(results_dir=vm.temp_dir,
out_file=out_file)
results, _ = vm.RemoteCommand(results_command)
return results
def RunNuttcp(sending_vm, receiving_vm, exec_path, dest_ip, network_type,
iteration):
"""Run nuttcp tests.
Args:
sending_vm: vm sending the UDP packets.
receiving_vm: vm receiving the UDP packets.
exec_path: path to the nuttcp executable.
dest_ip: the IP of the receiver.
network_type: string representing the type of the network.
iteration: the run number of the test.
Returns:
list of samples from the results of the nuttcp tests.
"""
samples = []
bandwidths = [
'{b}m'.format(b=b)
for b in range(
FLAGS.nuttcp_min_bandwidth_mb,
FLAGS.nuttcp_max_bandwidth_mb,
FLAGS.nuttcp_bandwidth_step_mb)
]
if FLAGS.nuttcp_udp_unlimited_bandwidth:
bandwidths.append('u')
for bandwidth in bandwidths:
RunSingleBandwidth(bandwidth, sending_vm, receiving_vm, dest_ip, exec_path)
# retrieve the results and parse them
udp_results = GatherResults(sending_vm, NUTTCP_OUT_FILE)
# get the cpu usage for the sender
sender_cpu_results = GatherResults(sending_vm, CPU_OUT_FILE)
# get the cpu usage for the receiver
receiving_cpu_results = GatherResults(receiving_vm, CPU_OUT_FILE)
samples.append(
GetUDPStreamSample(udp_results, sender_cpu_results,
receiving_cpu_results, sending_vm, receiving_vm,
bandwidth, network_type, iteration))
return samples
def _GetCpuResults(cpu_results):
r"""Transforms the string output of the cpu results.
Sample output:
'\r\n
InstanceName CookedValue\r\n
------------ -----------\r\n
0 22.7976893740141\r\n
1 32.6422793196096\r\n
2 18.6525988706054\r\n
3 44.5594145169094\r\n
_total 29.6629938622484\r\n
\r\n
\r\n'
Args:
cpu_results: string of the output of the cpu usage command.
Returns:
Array of (cpu_num, percentage)
"""
results = []
for entry in (line for line in cpu_results.splitlines()[3:] if line):
cpu_num, cpu_usage = entry.split()
results.append((cpu_num, float(cpu_usage)))
return results
# 1416.3418 MB / 10.00 sec = 1188.1121 Mbps 85 %TX 26 %RX 104429 / 1554763
# drop/pkt 6.72 %loss
def GetUDPStreamSample(command_out, sender_cpu_results, receiving_cpu_results,
sending_vm, receiving_vm, request_bandwidth,
network_type, iteration):
"""Get a sample from the nuttcp string results.
Args:
command_out: the nuttcp output.
sender_cpu_results: the cpu usage of the sender VM
receiving_cpu_results: the cpu usage of the sender VM
sending_vm: vm sending the UDP packets.
receiving_vm: vm receiving the UDP packets.
request_bandwidth: the requested bandwidth in the nuttcp sample.
network_type: the type of the network, external or internal.
iteration: the run number of the test.
Returns:
sample from the results of the nuttcp tests.
"""
data_line = command_out.split('\n')[0].split(' ')
data_line = [val for val in data_line if val]
try:
actual_bandwidth = float(data_line[6])
units = data_line[7]
except IndexError as e:
# nuttcp connection timed out during the run.
raise errors.Benchmarks.KnownIntermittentError(
f'Not enough data to parse nuttcp results, command timed out: {e}')
packet_loss = data_line[16]
metadata = {
'receiving_machine_type': receiving_vm.machine_type,
'receiving_zone': receiving_vm.zone,
'sending_machine_type': sending_vm.machine_type,
'sending_zone': sending_vm.zone,
'packet_loss': packet_loss,
'bandwidth_requested': request_bandwidth,
'network_type': network_type,
'packet_size': FLAGS.nuttcp_udp_packet_size,
'sample_time': FLAGS.nuttcp_udp_stream_seconds,
'iteration': iteration,
}
for cpu_usage in _GetCpuResults(sender_cpu_results):
metadata['sender cpu %s' % cpu_usage[0]] = cpu_usage[1]
for cpu_usage in _GetCpuResults(receiving_cpu_results):
metadata['receiver cpu %s' % cpu_usage[0]] = cpu_usage[1]
return sample.Sample('bandwidth', actual_bandwidth, units, metadata)
|
import voluptuous as vol
from homeassistant.components.switch import PLATFORM_SCHEMA, SwitchEntity
from homeassistant.const import CONF_HOST, DEVICE_DEFAULT_NAME
import homeassistant.helpers.config_validation as cv
from . import CONF_INVERT_LOGIC, DEFAULT_INVERT_LOGIC
from .. import remote_rpi_gpio
CONF_PORTS = "ports"
_SENSORS_SCHEMA = vol.Schema({cv.positive_int: cv.string})
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_PORTS): _SENSORS_SCHEMA,
vol.Optional(CONF_INVERT_LOGIC, default=DEFAULT_INVERT_LOGIC): cv.boolean,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Remote Raspberry PI GPIO devices."""
address = config[CONF_HOST]
invert_logic = config[CONF_INVERT_LOGIC]
ports = config[CONF_PORTS]
devices = []
for port, name in ports.items():
try:
led = remote_rpi_gpio.setup_output(address, port, invert_logic)
except (ValueError, IndexError, KeyError, OSError):
return
new_switch = RemoteRPiGPIOSwitch(name, led)
devices.append(new_switch)
add_entities(devices)
class RemoteRPiGPIOSwitch(SwitchEntity):
"""Representation of a Remote Raspberry Pi GPIO."""
def __init__(self, name, led):
"""Initialize the pin."""
self._name = name or DEVICE_DEFAULT_NAME
self._state = False
self._switch = led
@property
def name(self):
"""Return the name of the switch."""
return self._name
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def assumed_state(self):
"""If unable to access real state of the entity."""
return True
@property
def is_on(self):
"""Return true if device is on."""
return self._state
def turn_on(self, **kwargs):
"""Turn the device on."""
remote_rpi_gpio.write_output(self._switch, 1)
self._state = True
self.schedule_update_ha_state()
def turn_off(self, **kwargs):
"""Turn the device off."""
remote_rpi_gpio.write_output(self._switch, 0)
self._state = False
self.schedule_update_ha_state()
|
import asyncio
import os
import shutil
from homeassistant.components.media_player.const import (
DOMAIN as DOMAIN_MP,
SERVICE_PLAY_MEDIA,
)
import homeassistant.components.tts as tts
from homeassistant.config import async_process_ha_core_config
from homeassistant.const import HTTP_FORBIDDEN
from homeassistant.setup import setup_component
from tests.common import assert_setup_component, get_test_home_assistant, mock_service
from tests.components.tts.test_init import ( # noqa: F401, pylint: disable=unused-import
mutagen_mock,
)
class TestTTSYandexPlatform:
"""Test the speech component."""
def setup_method(self):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
self._base_url = "https://tts.voicetech.yandex.net/generate?"
asyncio.run_coroutine_threadsafe(
async_process_ha_core_config(
self.hass, {"internal_url": "http://example.local:8123"}
),
self.hass.loop,
)
def teardown_method(self):
"""Stop everything that was started."""
default_tts = self.hass.config.path(tts.DEFAULT_CACHE_DIR)
if os.path.isdir(default_tts):
shutil.rmtree(default_tts)
self.hass.stop()
def test_setup_component(self):
"""Test setup component."""
config = {tts.DOMAIN: {"platform": "yandextts", "api_key": "1234567xx"}}
with assert_setup_component(1, tts.DOMAIN):
setup_component(self.hass, tts.DOMAIN, config)
def test_setup_component_without_api_key(self):
"""Test setup component without api key."""
config = {tts.DOMAIN: {"platform": "yandextts"}}
with assert_setup_component(0, tts.DOMAIN):
setup_component(self.hass, tts.DOMAIN, config)
def test_service_say(self, aioclient_mock):
"""Test service call say."""
calls = mock_service(self.hass, DOMAIN_MP, SERVICE_PLAY_MEDIA)
url_param = {
"text": "HomeAssistant",
"lang": "en-US",
"key": "1234567xx",
"speaker": "zahar",
"format": "mp3",
"emotion": "neutral",
"speed": 1,
}
aioclient_mock.get(
self._base_url, status=200, content=b"test", params=url_param
)
config = {tts.DOMAIN: {"platform": "yandextts", "api_key": "1234567xx"}}
with assert_setup_component(1, tts.DOMAIN):
setup_component(self.hass, tts.DOMAIN, config)
self.hass.services.call(
tts.DOMAIN,
"yandextts_say",
{"entity_id": "media_player.something", tts.ATTR_MESSAGE: "HomeAssistant"},
)
self.hass.block_till_done()
assert len(aioclient_mock.mock_calls) == 1
assert len(calls) == 1
def test_service_say_russian_config(self, aioclient_mock):
"""Test service call say."""
calls = mock_service(self.hass, DOMAIN_MP, SERVICE_PLAY_MEDIA)
url_param = {
"text": "HomeAssistant",
"lang": "ru-RU",
"key": "1234567xx",
"speaker": "zahar",
"format": "mp3",
"emotion": "neutral",
"speed": 1,
}
aioclient_mock.get(
self._base_url, status=200, content=b"test", params=url_param
)
config = {
tts.DOMAIN: {
"platform": "yandextts",
"api_key": "1234567xx",
"language": "ru-RU",
}
}
with assert_setup_component(1, tts.DOMAIN):
setup_component(self.hass, tts.DOMAIN, config)
self.hass.services.call(
tts.DOMAIN,
"yandextts_say",
{"entity_id": "media_player.something", tts.ATTR_MESSAGE: "HomeAssistant"},
)
self.hass.block_till_done()
assert len(aioclient_mock.mock_calls) == 1
assert len(calls) == 1
def test_service_say_russian_service(self, aioclient_mock):
"""Test service call say."""
calls = mock_service(self.hass, DOMAIN_MP, SERVICE_PLAY_MEDIA)
url_param = {
"text": "HomeAssistant",
"lang": "ru-RU",
"key": "1234567xx",
"speaker": "zahar",
"format": "mp3",
"emotion": "neutral",
"speed": 1,
}
aioclient_mock.get(
self._base_url, status=200, content=b"test", params=url_param
)
config = {tts.DOMAIN: {"platform": "yandextts", "api_key": "1234567xx"}}
with assert_setup_component(1, tts.DOMAIN):
setup_component(self.hass, tts.DOMAIN, config)
self.hass.services.call(
tts.DOMAIN,
"yandextts_say",
{
"entity_id": "media_player.something",
tts.ATTR_MESSAGE: "HomeAssistant",
tts.ATTR_LANGUAGE: "ru-RU",
},
)
self.hass.block_till_done()
assert len(aioclient_mock.mock_calls) == 1
assert len(calls) == 1
def test_service_say_timeout(self, aioclient_mock):
"""Test service call say."""
calls = mock_service(self.hass, DOMAIN_MP, SERVICE_PLAY_MEDIA)
url_param = {
"text": "HomeAssistant",
"lang": "en-US",
"key": "1234567xx",
"speaker": "zahar",
"format": "mp3",
"emotion": "neutral",
"speed": 1,
}
aioclient_mock.get(
self._base_url, status=200, exc=asyncio.TimeoutError(), params=url_param
)
config = {tts.DOMAIN: {"platform": "yandextts", "api_key": "1234567xx"}}
with assert_setup_component(1, tts.DOMAIN):
setup_component(self.hass, tts.DOMAIN, config)
self.hass.services.call(
tts.DOMAIN,
"yandextts_say",
{"entity_id": "media_player.something", tts.ATTR_MESSAGE: "HomeAssistant"},
)
self.hass.block_till_done()
assert len(calls) == 0
assert len(aioclient_mock.mock_calls) == 1
def test_service_say_http_error(self, aioclient_mock):
"""Test service call say."""
calls = mock_service(self.hass, DOMAIN_MP, SERVICE_PLAY_MEDIA)
url_param = {
"text": "HomeAssistant",
"lang": "en-US",
"key": "1234567xx",
"speaker": "zahar",
"format": "mp3",
"emotion": "neutral",
"speed": 1,
}
aioclient_mock.get(
self._base_url, status=HTTP_FORBIDDEN, content=b"test", params=url_param
)
config = {tts.DOMAIN: {"platform": "yandextts", "api_key": "1234567xx"}}
with assert_setup_component(1, tts.DOMAIN):
setup_component(self.hass, tts.DOMAIN, config)
self.hass.services.call(
tts.DOMAIN,
"yandextts_say",
{"entity_id": "media_player.something", tts.ATTR_MESSAGE: "HomeAssistant"},
)
self.hass.block_till_done()
assert len(calls) == 0
def test_service_say_specified_speaker(self, aioclient_mock):
"""Test service call say."""
calls = mock_service(self.hass, DOMAIN_MP, SERVICE_PLAY_MEDIA)
url_param = {
"text": "HomeAssistant",
"lang": "en-US",
"key": "1234567xx",
"speaker": "alyss",
"format": "mp3",
"emotion": "neutral",
"speed": 1,
}
aioclient_mock.get(
self._base_url, status=200, content=b"test", params=url_param
)
config = {
tts.DOMAIN: {
"platform": "yandextts",
"api_key": "1234567xx",
"voice": "alyss",
}
}
with assert_setup_component(1, tts.DOMAIN):
setup_component(self.hass, tts.DOMAIN, config)
self.hass.services.call(
tts.DOMAIN,
"yandextts_say",
{"entity_id": "media_player.something", tts.ATTR_MESSAGE: "HomeAssistant"},
)
self.hass.block_till_done()
assert len(aioclient_mock.mock_calls) == 1
assert len(calls) == 1
def test_service_say_specified_emotion(self, aioclient_mock):
"""Test service call say."""
calls = mock_service(self.hass, DOMAIN_MP, SERVICE_PLAY_MEDIA)
url_param = {
"text": "HomeAssistant",
"lang": "en-US",
"key": "1234567xx",
"speaker": "zahar",
"format": "mp3",
"emotion": "evil",
"speed": 1,
}
aioclient_mock.get(
self._base_url, status=200, content=b"test", params=url_param
)
config = {
tts.DOMAIN: {
"platform": "yandextts",
"api_key": "1234567xx",
"emotion": "evil",
}
}
with assert_setup_component(1, tts.DOMAIN):
setup_component(self.hass, tts.DOMAIN, config)
self.hass.services.call(
tts.DOMAIN,
"yandextts_say",
{"entity_id": "media_player.something", tts.ATTR_MESSAGE: "HomeAssistant"},
)
self.hass.block_till_done()
assert len(aioclient_mock.mock_calls) == 1
assert len(calls) == 1
def test_service_say_specified_low_speed(self, aioclient_mock):
"""Test service call say."""
calls = mock_service(self.hass, DOMAIN_MP, SERVICE_PLAY_MEDIA)
url_param = {
"text": "HomeAssistant",
"lang": "en-US",
"key": "1234567xx",
"speaker": "zahar",
"format": "mp3",
"emotion": "neutral",
"speed": "0.1",
}
aioclient_mock.get(
self._base_url, status=200, content=b"test", params=url_param
)
config = {
tts.DOMAIN: {"platform": "yandextts", "api_key": "1234567xx", "speed": 0.1}
}
with assert_setup_component(1, tts.DOMAIN):
setup_component(self.hass, tts.DOMAIN, config)
self.hass.services.call(
tts.DOMAIN,
"yandextts_say",
{"entity_id": "media_player.something", tts.ATTR_MESSAGE: "HomeAssistant"},
)
self.hass.block_till_done()
assert len(aioclient_mock.mock_calls) == 1
assert len(calls) == 1
def test_service_say_specified_speed(self, aioclient_mock):
"""Test service call say."""
calls = mock_service(self.hass, DOMAIN_MP, SERVICE_PLAY_MEDIA)
url_param = {
"text": "HomeAssistant",
"lang": "en-US",
"key": "1234567xx",
"speaker": "zahar",
"format": "mp3",
"emotion": "neutral",
"speed": 2,
}
aioclient_mock.get(
self._base_url, status=200, content=b"test", params=url_param
)
config = {
tts.DOMAIN: {"platform": "yandextts", "api_key": "1234567xx", "speed": 2}
}
with assert_setup_component(1, tts.DOMAIN):
setup_component(self.hass, tts.DOMAIN, config)
self.hass.services.call(
tts.DOMAIN,
"yandextts_say",
{"entity_id": "media_player.something", tts.ATTR_MESSAGE: "HomeAssistant"},
)
self.hass.block_till_done()
assert len(aioclient_mock.mock_calls) == 1
assert len(calls) == 1
def test_service_say_specified_options(self, aioclient_mock):
"""Test service call say with options."""
calls = mock_service(self.hass, DOMAIN_MP, SERVICE_PLAY_MEDIA)
url_param = {
"text": "HomeAssistant",
"lang": "en-US",
"key": "1234567xx",
"speaker": "zahar",
"format": "mp3",
"emotion": "evil",
"speed": 2,
}
aioclient_mock.get(
self._base_url, status=200, content=b"test", params=url_param
)
config = {tts.DOMAIN: {"platform": "yandextts", "api_key": "1234567xx"}}
with assert_setup_component(1, tts.DOMAIN):
setup_component(self.hass, tts.DOMAIN, config)
self.hass.services.call(
tts.DOMAIN,
"yandextts_say",
{
"entity_id": "media_player.something",
tts.ATTR_MESSAGE: "HomeAssistant",
"options": {"emotion": "evil", "speed": 2},
},
)
self.hass.block_till_done()
assert len(aioclient_mock.mock_calls) == 1
assert len(calls) == 1
|
import argparse
import urllib.request
import urllib.error
import shutil
import json
import os
import sys
sys.path.insert(
0, os.path.join(os.path.dirname(__file__), os.pardir, os.pardir))
from scripts import dictcli
from qutebrowser.config import configdata
def download_nsis_plugins():
"""Download the plugins required by the NSIS script."""
github_url = 'https://raw.githubusercontent.com/Drizin/NsisMultiUser'
git_commit = 'master'
nsh_files = ('Include/NsisMultiUser.nsh', 'Include/NsisMultiUserLang.nsh',
'Include/UAC.nsh', 'Include/StdUtils.nsh',
'Demos/Common/Utils.nsh')
dll_files = ('Plugins/x86-unicode/UAC.dll',
'Plugins/x86-unicode/StdUtils.dll')
include_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'..', '..', 'misc', 'nsis',
'include')
plugins_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'..', '..', 'misc', 'nsis',
'plugins', 'x86-unicode')
os.makedirs(include_dir, exist_ok=True)
os.makedirs(plugins_dir, exist_ok=True)
print("=> Downloading NSIS plugins")
for nsh_file in nsh_files:
target_path = os.path.join(include_dir, os.path.basename(nsh_file))
urllib.request.urlretrieve('{}/{}/{}'.format(github_url, git_commit,
nsh_file), target_path)
for dll_file in dll_files:
target_path = os.path.join(plugins_dir, os.path.basename(dll_file))
urllib.request.urlretrieve('{}/{}/{}'.format(github_url, git_commit,
dll_file), target_path)
urllib.request.urlcleanup()
def get_latest_pdfjs_url():
"""Get the URL of the latest pdf.js prebuilt package.
Returns a (version, url)-tuple.
"""
github_api = 'https://api.github.com'
endpoint = 'repos/mozilla/pdf.js/releases/latest'
request_url = '{}/{}'.format(github_api, endpoint)
with urllib.request.urlopen(request_url) as fp:
data = json.loads(fp.read().decode('utf-8'))
download_url = data['assets'][0]['browser_download_url']
version_name = data['name']
return (version_name, download_url)
def update_pdfjs(target_version=None):
"""Download and extract the latest pdf.js version.
If target_version is not None, download the given version instead.
Args:
target_version: None or version string ('x.y.z')
"""
if target_version is None:
version, url = get_latest_pdfjs_url()
else:
# We need target_version as x.y.z, without the 'v' prefix, though the
# user might give it on the command line
if target_version.startswith('v'):
target_version = target_version[1:]
# version should have the prefix to be consistent with the return value
# of get_latest_pdfjs_url()
version = 'v' + target_version
url = ('https://github.com/mozilla/pdf.js/releases/download/'
'v{0}/pdfjs-{0}-dist.zip').format(target_version)
os.chdir(os.path.join(os.path.dirname(os.path.abspath(__file__)),
'..', '..'))
target_path = os.path.join('qutebrowser', '3rdparty', 'pdfjs')
print("=> Downloading pdf.js {}".format(version))
try:
(archive_path, _headers) = urllib.request.urlretrieve(url)
except urllib.error.HTTPError as error:
print("Could not retrieve pdfjs {}: {}".format(version, error))
return
if os.path.isdir(target_path):
print("Removing old version in {}".format(target_path))
shutil.rmtree(target_path)
os.makedirs(target_path)
print("Extracting new version")
shutil.unpack_archive(archive_path, target_path, 'zip')
urllib.request.urlcleanup()
def update_dmg_makefile():
"""Update fancy-dmg Makefile.
See https://el-tramo.be/blog/fancy-dmg/
"""
print("Updating fancy-dmg Makefile...")
url = 'https://raw.githubusercontent.com/remko/fancy-dmg/master/Makefile'
target_path = os.path.join('scripts', 'dev', 'Makefile-dmg')
urllib.request.urlretrieve(url, target_path)
urllib.request.urlcleanup()
def update_ace():
"""Update ACE.
See https://ace.c9.io/ and https://github.com/ajaxorg/ace-builds/
"""
print("Updating ACE...")
url = 'https://raw.githubusercontent.com/ajaxorg/ace-builds/master/src/ace.js'
target_path = os.path.join('tests', 'end2end', 'data', 'hints', 'ace',
'ace.js')
urllib.request.urlretrieve(url, target_path)
urllib.request.urlcleanup()
def test_dicts():
"""Test available dictionaries."""
configdata.init()
for lang in dictcli.available_languages():
print('Testing dictionary {}... '.format(lang.code), end='')
lang_url = urllib.parse.urljoin(dictcli.API_URL, lang.remote_filename)
request = urllib.request.Request(lang_url, method='HEAD')
response = urllib.request.urlopen(request)
if response.status == 200:
print('OK')
else:
print('ERROR: {}'.format(response.status))
def run(nsis=False, ace=False, pdfjs=True, fancy_dmg=False, pdfjs_version=None,
dicts=False):
"""Update components based on the given arguments."""
if nsis:
download_nsis_plugins()
if pdfjs:
update_pdfjs(pdfjs_version)
if ace:
update_ace()
if fancy_dmg:
update_dmg_makefile()
if dicts:
test_dicts()
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--nsis', '-n', help='Download NSIS plugins.',
required=False, action='store_true')
parser.add_argument(
'--pdfjs', '-p',
help='Specify pdfjs version. If not given, '
'the latest version is used.',
required=False, metavar='VERSION')
parser.add_argument('--fancy-dmg', help="Update fancy-dmg Makefile",
action='store_true')
parser.add_argument(
'--dicts', '-d',
help='Test whether all available dictionaries '
'can be reached at the remote repository.',
required=False, action='store_true')
args = parser.parse_args()
run(nsis=False, ace=True, pdfjs=True, fancy_dmg=args.fancy_dmg,
pdfjs_version=args.pdfjs, dicts=args.dicts)
if __name__ == '__main__':
main()
|
import logging
import requests
from homeassistant.const import (
CONF_DEVICE_CLASS,
CONF_ICON,
CONF_NAME,
CONF_UNIT_OF_MEASUREMENT,
DEVICE_CLASS_POWER,
DEVICE_CLASS_TEMPERATURE,
ENERGY_KILO_WATT_HOUR,
PERCENTAGE,
POWER_WATT,
TEMP_CELSIUS,
)
from homeassistant.helpers.entity import Entity
from . import (
DOMAIN as VICARE_DOMAIN,
PYVICARE_ERROR,
VICARE_API,
VICARE_HEATING_TYPE,
VICARE_NAME,
HeatingType,
)
_LOGGER = logging.getLogger(__name__)
CONF_GETTER = "getter"
SENSOR_TYPE_TEMPERATURE = "temperature"
SENSOR_OUTSIDE_TEMPERATURE = "outside_temperature"
SENSOR_SUPPLY_TEMPERATURE = "supply_temperature"
SENSOR_RETURN_TEMPERATURE = "return_temperature"
# gas sensors
SENSOR_BOILER_TEMPERATURE = "boiler_temperature"
SENSOR_BURNER_MODULATION = "burner_modulation"
SENSOR_BURNER_STARTS = "burner_starts"
SENSOR_BURNER_HOURS = "burner_hours"
SENSOR_BURNER_POWER = "burner_power"
SENSOR_DHW_GAS_CONSUMPTION_TODAY = "hotwater_gas_consumption_today"
SENSOR_DHW_GAS_CONSUMPTION_THIS_WEEK = "hotwater_gas_consumption_heating_this_week"
SENSOR_DHW_GAS_CONSUMPTION_THIS_MONTH = "hotwater_gas_consumption_heating_this_month"
SENSOR_DHW_GAS_CONSUMPTION_THIS_YEAR = "hotwater_gas_consumption_heating_this_year"
SENSOR_GAS_CONSUMPTION_TODAY = "gas_consumption_heating_today"
SENSOR_GAS_CONSUMPTION_THIS_WEEK = "gas_consumption_heating_this_week"
SENSOR_GAS_CONSUMPTION_THIS_MONTH = "gas_consumption_heating_this_month"
SENSOR_GAS_CONSUMPTION_THIS_YEAR = "gas_consumption_heating_this_year"
# heatpump sensors
SENSOR_COMPRESSOR_STARTS = "compressor_starts"
SENSOR_COMPRESSOR_HOURS = "compressor_hours"
SENSOR_TYPES = {
SENSOR_OUTSIDE_TEMPERATURE: {
CONF_NAME: "Outside Temperature",
CONF_ICON: None,
CONF_UNIT_OF_MEASUREMENT: TEMP_CELSIUS,
CONF_GETTER: lambda api: api.getOutsideTemperature(),
CONF_DEVICE_CLASS: DEVICE_CLASS_TEMPERATURE,
},
SENSOR_SUPPLY_TEMPERATURE: {
CONF_NAME: "Supply Temperature",
CONF_ICON: None,
CONF_UNIT_OF_MEASUREMENT: TEMP_CELSIUS,
CONF_GETTER: lambda api: api.getSupplyTemperature(),
CONF_DEVICE_CLASS: DEVICE_CLASS_TEMPERATURE,
},
# gas sensors
SENSOR_BOILER_TEMPERATURE: {
CONF_NAME: "Boiler Temperature",
CONF_ICON: None,
CONF_UNIT_OF_MEASUREMENT: TEMP_CELSIUS,
CONF_GETTER: lambda api: api.getBoilerTemperature(),
CONF_DEVICE_CLASS: DEVICE_CLASS_TEMPERATURE,
},
SENSOR_BURNER_MODULATION: {
CONF_NAME: "Burner modulation",
CONF_ICON: "mdi:percent",
CONF_UNIT_OF_MEASUREMENT: PERCENTAGE,
CONF_GETTER: lambda api: api.getBurnerModulation(),
CONF_DEVICE_CLASS: None,
},
SENSOR_DHW_GAS_CONSUMPTION_TODAY: {
CONF_NAME: "Hot water gas consumption today",
CONF_ICON: "mdi:power",
CONF_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR,
CONF_GETTER: lambda api: api.getGasConsumptionDomesticHotWaterToday(),
CONF_DEVICE_CLASS: None,
},
SENSOR_DHW_GAS_CONSUMPTION_THIS_WEEK: {
CONF_NAME: "Hot water gas consumption this week",
CONF_ICON: "mdi:power",
CONF_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR,
CONF_GETTER: lambda api: api.getGasConsumptionDomesticHotWaterThisWeek(),
CONF_DEVICE_CLASS: None,
},
SENSOR_DHW_GAS_CONSUMPTION_THIS_MONTH: {
CONF_NAME: "Hot water gas consumption this month",
CONF_ICON: "mdi:power",
CONF_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR,
CONF_GETTER: lambda api: api.getGasConsumptionDomesticHotWaterThisMonth(),
CONF_DEVICE_CLASS: None,
},
SENSOR_DHW_GAS_CONSUMPTION_THIS_YEAR: {
CONF_NAME: "Hot water gas consumption this year",
CONF_ICON: "mdi:power",
CONF_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR,
CONF_GETTER: lambda api: api.getGasConsumptionDomesticHotWaterThisYear(),
CONF_DEVICE_CLASS: None,
},
SENSOR_GAS_CONSUMPTION_TODAY: {
CONF_NAME: "Heating gas consumption today",
CONF_ICON: "mdi:power",
CONF_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR,
CONF_GETTER: lambda api: api.getGasConsumptionHeatingToday(),
CONF_DEVICE_CLASS: None,
},
SENSOR_GAS_CONSUMPTION_THIS_WEEK: {
CONF_NAME: "Heating gas consumption this week",
CONF_ICON: "mdi:power",
CONF_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR,
CONF_GETTER: lambda api: api.getGasConsumptionHeatingThisWeek(),
CONF_DEVICE_CLASS: None,
},
SENSOR_GAS_CONSUMPTION_THIS_MONTH: {
CONF_NAME: "Heating gas consumption this month",
CONF_ICON: "mdi:power",
CONF_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR,
CONF_GETTER: lambda api: api.getGasConsumptionHeatingThisMonth(),
CONF_DEVICE_CLASS: None,
},
SENSOR_GAS_CONSUMPTION_THIS_YEAR: {
CONF_NAME: "Heating gas consumption this year",
CONF_ICON: "mdi:power",
CONF_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR,
CONF_GETTER: lambda api: api.getGasConsumptionHeatingThisYear(),
CONF_DEVICE_CLASS: None,
},
SENSOR_BURNER_STARTS: {
CONF_NAME: "Burner Starts",
CONF_ICON: "mdi:counter",
CONF_UNIT_OF_MEASUREMENT: None,
CONF_GETTER: lambda api: api.getBurnerStarts(),
CONF_DEVICE_CLASS: None,
},
SENSOR_BURNER_HOURS: {
CONF_NAME: "Burner Hours",
CONF_ICON: "mdi:counter",
CONF_UNIT_OF_MEASUREMENT: None,
CONF_GETTER: lambda api: api.getBurnerHours(),
CONF_DEVICE_CLASS: None,
},
SENSOR_BURNER_POWER: {
CONF_NAME: "Burner Current Power",
CONF_ICON: None,
CONF_UNIT_OF_MEASUREMENT: POWER_WATT,
CONF_GETTER: lambda api: api.getCurrentPower(),
CONF_DEVICE_CLASS: DEVICE_CLASS_POWER,
},
# heatpump sensors
SENSOR_COMPRESSOR_STARTS: {
CONF_NAME: "Compressor Starts",
CONF_ICON: "mdi:counter",
CONF_UNIT_OF_MEASUREMENT: None,
CONF_GETTER: lambda api: api.getCompressorStarts(),
CONF_DEVICE_CLASS: None,
},
SENSOR_COMPRESSOR_HOURS: {
CONF_NAME: "Compressor Hours",
CONF_ICON: "mdi:counter",
CONF_UNIT_OF_MEASUREMENT: None,
CONF_GETTER: lambda api: api.getCompressorHours(),
CONF_DEVICE_CLASS: None,
},
SENSOR_RETURN_TEMPERATURE: {
CONF_NAME: "Return Temperature",
CONF_ICON: None,
CONF_UNIT_OF_MEASUREMENT: TEMP_CELSIUS,
CONF_GETTER: lambda api: api.getReturnTemperature(),
CONF_DEVICE_CLASS: DEVICE_CLASS_TEMPERATURE,
},
}
SENSORS_GENERIC = [SENSOR_OUTSIDE_TEMPERATURE, SENSOR_SUPPLY_TEMPERATURE]
SENSORS_BY_HEATINGTYPE = {
HeatingType.gas: [
SENSOR_BOILER_TEMPERATURE,
SENSOR_BURNER_HOURS,
SENSOR_BURNER_MODULATION,
SENSOR_BURNER_STARTS,
SENSOR_BURNER_POWER,
SENSOR_DHW_GAS_CONSUMPTION_TODAY,
SENSOR_DHW_GAS_CONSUMPTION_THIS_WEEK,
SENSOR_DHW_GAS_CONSUMPTION_THIS_MONTH,
SENSOR_DHW_GAS_CONSUMPTION_THIS_YEAR,
SENSOR_GAS_CONSUMPTION_TODAY,
SENSOR_GAS_CONSUMPTION_THIS_WEEK,
SENSOR_GAS_CONSUMPTION_THIS_MONTH,
SENSOR_GAS_CONSUMPTION_THIS_YEAR,
],
HeatingType.heatpump: [
SENSOR_COMPRESSOR_HOURS,
SENSOR_COMPRESSOR_STARTS,
SENSOR_RETURN_TEMPERATURE,
],
}
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Create the ViCare sensor devices."""
if discovery_info is None:
return
vicare_api = hass.data[VICARE_DOMAIN][VICARE_API]
heating_type = hass.data[VICARE_DOMAIN][VICARE_HEATING_TYPE]
sensors = SENSORS_GENERIC.copy()
if heating_type != HeatingType.generic:
sensors.extend(SENSORS_BY_HEATINGTYPE[heating_type])
add_entities(
[
ViCareSensor(hass.data[VICARE_DOMAIN][VICARE_NAME], vicare_api, sensor)
for sensor in sensors
]
)
class ViCareSensor(Entity):
"""Representation of a ViCare sensor."""
def __init__(self, name, api, sensor_type):
"""Initialize the sensor."""
self._sensor = SENSOR_TYPES[sensor_type]
self._name = f"{name} {self._sensor[CONF_NAME]}"
self._api = api
self._sensor_type = sensor_type
self._state = None
@property
def available(self):
"""Return True if entity is available."""
return self._state is not None and self._state != PYVICARE_ERROR
@property
def unique_id(self):
"""Return a unique ID."""
return f"{self._api.service.id}-{self._sensor_type}"
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return self._sensor[CONF_ICON]
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return self._sensor[CONF_UNIT_OF_MEASUREMENT]
@property
def device_class(self):
"""Return the class of this device, from component DEVICE_CLASSES."""
return self._sensor[CONF_DEVICE_CLASS]
def update(self):
"""Update state of sensor."""
try:
self._state = self._sensor[CONF_GETTER](self._api)
except requests.exceptions.ConnectionError:
_LOGGER.error("Unable to retrieve data from ViCare server")
except ValueError:
_LOGGER.error("Unable to decode data from ViCare server")
|
import io
import logging
try:
import google.cloud.exceptions
import google.cloud.storage
import google.auth.transport.requests
except ImportError:
MISSING_DEPS = True
import smart_open.bytebuffer
import smart_open.utils
from smart_open import constants
logger = logging.getLogger(__name__)
_BINARY_TYPES = (bytes, bytearray, memoryview)
"""Allowed binary buffer types for writing to the underlying GCS stream"""
_UNKNOWN = '*'
SCHEME = "gs"
"""Supported scheme for GCS"""
_MIN_MIN_PART_SIZE = _REQUIRED_CHUNK_MULTIPLE = 256 * 1024
"""Google requires you to upload in multiples of 256 KB, except for the last part."""
_DEFAULT_MIN_PART_SIZE = 50 * 1024**2
"""Default minimum part size for GCS multipart uploads"""
DEFAULT_BUFFER_SIZE = 256 * 1024
"""Default buffer size for working with GCS"""
_UPLOAD_INCOMPLETE_STATUS_CODES = (308, )
_UPLOAD_COMPLETE_STATUS_CODES = (200, 201)
def _make_range_string(start, stop=None, end=None):
#
# GCS seems to violate RFC-2616 (see utils.make_range_string), so we
# need a separate implementation.
#
# https://cloud.google.com/storage/docs/xml-api/resumable-upload#step_3upload_the_file_blocks
#
if end is None:
end = _UNKNOWN
if stop is None:
return 'bytes %d-/%s' % (start, end)
return 'bytes %d-%d/%s' % (start, stop, end)
class UploadFailedError(Exception):
def __init__(self, message, status_code, text):
"""Raise when a multi-part upload to GCS returns a failed response status code.
Parameters
----------
message: str
The error message to display.
status_code: int
The status code returned from the upload response.
text: str
The text returned from the upload response.
"""
super(UploadFailedError, self).__init__(message)
self.status_code = status_code
self.text = text
def _fail(response, part_num, content_length, total_size, headers):
status_code = response.status_code
response_text = response.text
total_size_gb = total_size / 1024.0 ** 3
msg = (
"upload failed (status code: %(status_code)d, response text: %(response_text)s), "
"part #%(part_num)d, %(total_size)d bytes (total %(total_size_gb).3fGB), headers: %(headers)r"
) % locals()
raise UploadFailedError(msg, response.status_code, response.text)
def parse_uri(uri_as_string):
sr = smart_open.utils.safe_urlsplit(uri_as_string)
assert sr.scheme == SCHEME
bucket_id = sr.netloc
blob_id = sr.path.lstrip('/')
return dict(scheme=SCHEME, bucket_id=bucket_id, blob_id=blob_id)
def open_uri(uri, mode, transport_params):
parsed_uri = parse_uri(uri)
kwargs = smart_open.utils.check_kwargs(open, transport_params)
return open(parsed_uri['bucket_id'], parsed_uri['blob_id'], mode, **kwargs)
def open(
bucket_id,
blob_id,
mode,
buffer_size=DEFAULT_BUFFER_SIZE,
min_part_size=_MIN_MIN_PART_SIZE,
client=None, # type: google.cloud.storage.Client
):
"""Open an GCS blob for reading or writing.
Parameters
----------
bucket_id: str
The name of the bucket this object resides in.
blob_id: str
The name of the blob within the bucket.
mode: str
The mode for opening the object. Must be either "rb" or "wb".
buffer_size: int, optional
The buffer size to use when performing I/O. For reading only.
min_part_size: int, optional
The minimum part size for multipart uploads. For writing only.
client: google.cloud.storage.Client, optional
The GCS client to use when working with google-cloud-storage.
"""
if mode == constants.READ_BINARY:
fileobj = Reader(
bucket_id,
blob_id,
buffer_size=buffer_size,
line_terminator=constants.BINARY_NEWLINE,
client=client,
)
elif mode == constants.WRITE_BINARY:
fileobj = Writer(
bucket_id,
blob_id,
min_part_size=min_part_size,
client=client,
)
else:
raise NotImplementedError('GCS support for mode %r not implemented' % mode)
fileobj.name = blob_id
return fileobj
class _RawReader(object):
"""Read an GCS object."""
def __init__(self, gcs_blob, size):
# type: (google.cloud.storage.Blob, int) -> None
self._blob = gcs_blob
self._size = size
self._position = 0
def seek(self, position):
"""Seek to the specified position (byte offset) in the GCS key.
:param int position: The byte offset from the beginning of the key.
Returns the position after seeking.
"""
self._position = position
return self._position
def read(self, size=-1):
if self._position >= self._size:
return b''
binary = self._download_blob_chunk(size)
self._position += len(binary)
return binary
def _download_blob_chunk(self, size):
start = position = self._position
if position == self._size:
#
# When reading, we can't seek to the first byte of an empty file.
# Similarly, we can't seek past the last byte. Do nothing here.
#
binary = b''
elif size == -1:
binary = self._blob.download_as_bytes(start=start)
else:
end = position + size
binary = self._blob.download_as_bytes(start=start, end=end)
return binary
class Reader(io.BufferedIOBase):
"""Reads bytes from GCS.
Implements the io.BufferedIOBase interface of the standard library.
:raises google.cloud.exceptions.NotFound: Raised when the blob to read from does not exist.
"""
def __init__(
self,
bucket,
key,
buffer_size=DEFAULT_BUFFER_SIZE,
line_terminator=constants.BINARY_NEWLINE,
client=None, # type: google.cloud.storage.Client
):
if client is None:
client = google.cloud.storage.Client()
self._blob = client.bucket(bucket).get_blob(key) # type: google.cloud.storage.Blob
if self._blob is None:
raise google.cloud.exceptions.NotFound('blob %s not found in %s' % (key, bucket))
self._size = self._blob.size if self._blob.size is not None else 0
self._raw_reader = _RawReader(self._blob, self._size)
self._current_pos = 0
self._current_part_size = buffer_size
self._current_part = smart_open.bytebuffer.ByteBuffer(buffer_size)
self._eof = False
self._line_terminator = line_terminator
#
# This member is part of the io.BufferedIOBase interface.
#
self.raw = None
#
# Override some methods from io.IOBase.
#
def close(self):
"""Flush and close this stream."""
logger.debug("close: called")
self._blob = None
self._current_part = None
self._raw_reader = None
def readable(self):
"""Return True if the stream can be read from."""
return True
def seekable(self):
"""If False, seek(), tell() and truncate() will raise IOError.
We offer only seek support, and no truncate support."""
return True
#
# io.BufferedIOBase methods.
#
def detach(self):
"""Unsupported."""
raise io.UnsupportedOperation
def seek(self, offset, whence=constants.WHENCE_START):
"""Seek to the specified position.
:param int offset: The offset in bytes.
:param int whence: Where the offset is from.
Returns the position after seeking."""
logger.debug('seeking to offset: %r whence: %r', offset, whence)
if whence not in constants.WHENCE_CHOICES:
raise ValueError('invalid whence, expected one of %r' % constants.WHENCE_CHOICES)
if whence == constants.WHENCE_START:
new_position = offset
elif whence == constants.WHENCE_CURRENT:
new_position = self._current_pos + offset
else:
new_position = self._size + offset
new_position = smart_open.utils.clamp(new_position, 0, self._size)
self._current_pos = new_position
self._raw_reader.seek(new_position)
logger.debug('current_pos: %r', self._current_pos)
self._current_part.empty()
self._eof = self._current_pos == self._size
return self._current_pos
def tell(self):
"""Return the current position within the file."""
return self._current_pos
def truncate(self, size=None):
"""Unsupported."""
raise io.UnsupportedOperation
def read(self, size=-1):
"""Read up to size bytes from the object and return them."""
if size == 0:
return b''
elif size < 0:
self._current_pos = self._size
return self._read_from_buffer() + self._raw_reader.read()
#
# Return unused data first
#
if len(self._current_part) >= size:
return self._read_from_buffer(size)
#
# If the stream is finished, return what we have.
#
if self._eof:
return self._read_from_buffer()
#
# Fill our buffer to the required size.
#
self._fill_buffer(size)
return self._read_from_buffer(size)
def read1(self, size=-1):
"""This is the same as read()."""
return self.read(size=size)
def readinto(self, b):
"""Read up to len(b) bytes into b, and return the number of bytes
read."""
data = self.read(len(b))
if not data:
return 0
b[:len(data)] = data
return len(data)
def readline(self, limit=-1):
"""Read up to and including the next newline. Returns the bytes read."""
if limit != -1:
raise NotImplementedError('limits other than -1 not implemented yet')
the_line = io.BytesIO()
while not (self._eof and len(self._current_part) == 0):
#
# In the worst case, we're reading the unread part of self._current_part
# twice here, once in the if condition and once when calling index.
#
# This is sub-optimal, but better than the alternative: wrapping
# .index in a try..except, because that is slower.
#
remaining_buffer = self._current_part.peek()
if self._line_terminator in remaining_buffer:
next_newline = remaining_buffer.index(self._line_terminator)
the_line.write(self._read_from_buffer(next_newline + 1))
break
else:
the_line.write(self._read_from_buffer())
self._fill_buffer()
return the_line.getvalue()
#
# Internal methods.
#
def _read_from_buffer(self, size=-1):
"""Remove at most size bytes from our buffer and return them."""
# logger.debug('reading %r bytes from %r byte-long buffer', size, len(self._current_part))
size = size if size >= 0 else len(self._current_part)
part = self._current_part.read(size)
self._current_pos += len(part)
# logger.debug('part: %r', part)
return part
def _fill_buffer(self, size=-1):
size = size if size >= 0 else self._current_part._chunk_size
while len(self._current_part) < size and not self._eof:
bytes_read = self._current_part.fill(self._raw_reader)
if bytes_read == 0:
logger.debug('reached EOF while filling buffer')
self._eof = True
def __str__(self):
return "(%s, %r, %r)" % (self.__class__.__name__, self._blob.bucket.name, self._blob.name)
def __repr__(self):
return "%s(bucket=%r, blob=%r, buffer_size=%r)" % (
self.__class__.__name__, self._blob.bucket.name, self._blob.name, self._current_part_size,
)
class Writer(io.BufferedIOBase):
"""Writes bytes to GCS.
Implements the io.BufferedIOBase interface of the standard library."""
def __init__(
self,
bucket,
blob,
min_part_size=_DEFAULT_MIN_PART_SIZE,
client=None, # type: google.cloud.storage.Client
):
if client is None:
client = google.cloud.storage.Client()
self._client = client
self._blob = self._client.bucket(bucket).blob(blob) # type: google.cloud.storage.Blob
assert min_part_size % _REQUIRED_CHUNK_MULTIPLE == 0, 'min part size must be a multiple of 256KB'
assert min_part_size >= _MIN_MIN_PART_SIZE, 'min part size must be greater than 256KB'
self._min_part_size = min_part_size
self._total_size = 0
self._total_parts = 0
self._bytes_uploaded = 0
self._current_part = io.BytesIO()
self._session = google.auth.transport.requests.AuthorizedSession(client._credentials)
#
# https://cloud.google.com/storage/docs/json_api/v1/how-tos/resumable-upload#start-resumable
#
self._resumable_upload_url = self._blob.create_resumable_upload_session()
#
# This member is part of the io.BufferedIOBase interface.
#
self.raw = None
def flush(self):
pass
#
# Override some methods from io.IOBase.
#
def close(self):
logger.debug("closing")
if not self.closed:
if self._total_size == 0: # empty files
self._upload_empty_part()
else:
self._upload_part(is_last=True)
self._client = None
logger.debug("successfully closed")
@property
def closed(self):
return self._client is None
def writable(self):
"""Return True if the stream supports writing."""
return True
def tell(self):
"""Return the current stream position."""
return self._total_size
#
# io.BufferedIOBase methods.
#
def detach(self):
raise io.UnsupportedOperation("detach() not supported")
def write(self, b):
"""Write the given bytes (binary string) to the GCS file.
There's buffering happening under the covers, so this may not actually
do any HTTP transfer right away."""
if not isinstance(b, _BINARY_TYPES):
raise TypeError("input must be one of %r, got: %r" % (_BINARY_TYPES, type(b)))
self._current_part.write(b)
self._total_size += len(b)
#
# If the size of this part is precisely equal to the minimum part size,
# we don't perform the actual write now, and wait until we see more data.
# We do this because the very last part of the upload must be handled slightly
# differently (see comments in the _upload_part method).
#
if self._current_part.tell() > self._min_part_size:
self._upload_part()
return len(b)
def terminate(self):
"""Cancel the underlying resumable upload."""
#
# https://cloud.google.com/storage/docs/xml-api/resumable-upload#example_cancelling_an_upload
#
self._session.delete(self._resumable_upload_url)
#
# Internal methods.
#
def _upload_part(self, is_last=False):
part_num = self._total_parts + 1
#
# Here we upload the largest amount possible given GCS's restriction
# of parts being multiples of 256kB, except for the last one.
#
# A final upload of 0 bytes does not work, so we need to guard against
# this edge case. This results in occasionally keeping an additional
# 256kB in the buffer after uploading a part, but until this is fixed
# on Google's end there is no other option.
#
# https://stackoverflow.com/questions/60230631/upload-zero-size-final-part-to-google-cloud-storage-resumable-upload
#
content_length = self._current_part.tell()
remainder = content_length % self._min_part_size
if is_last:
end = self._bytes_uploaded + content_length
elif remainder == 0:
content_length -= _REQUIRED_CHUNK_MULTIPLE
end = None
else:
content_length -= remainder
end = None
range_stop = self._bytes_uploaded + content_length - 1
content_range = _make_range_string(self._bytes_uploaded, range_stop, end=end)
headers = {
'Content-Length': str(content_length),
'Content-Range': content_range,
}
logger.info(
"uploading part #%i, %i bytes (total %.3fGB) headers %r",
part_num, content_length, range_stop / 1024.0 ** 3, headers,
)
self._current_part.seek(0)
response = self._session.put(
self._resumable_upload_url,
data=self._current_part.read(content_length),
headers=headers,
)
if is_last:
expected = _UPLOAD_COMPLETE_STATUS_CODES
else:
expected = _UPLOAD_INCOMPLETE_STATUS_CODES
if response.status_code not in expected:
_fail(response, part_num, content_length, self._total_size, headers)
logger.debug("upload of part #%i finished" % part_num)
self._total_parts += 1
self._bytes_uploaded += content_length
#
# For the last part, the below _current_part handling is a NOOP.
#
self._current_part = io.BytesIO(self._current_part.read())
self._current_part.seek(0, io.SEEK_END)
def _upload_empty_part(self):
logger.debug("creating empty file")
headers = {'Content-Length': '0'}
response = self._session.put(self._resumable_upload_url, headers=headers)
if response.status_code not in _UPLOAD_COMPLETE_STATUS_CODES:
_fail(response, self._total_parts + 1, 0, self._total_size, headers)
self._total_parts += 1
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is not None:
self.terminate()
else:
self.close()
def __str__(self):
return "(%s, %r, %r)" % (self.__class__.__name__, self._blob.bucket.name, self._blob.name)
def __repr__(self):
return "%s(bucket=%r, blob=%r, min_part_size=%r)" % (
self.__class__.__name__, self._blob.bucket.name, self._blob.name, self._min_part_size,
)
|
import asyncio
import logging
import async_timeout
from pywemo.ouimeaux_device.api.service import ActionException
from homeassistant.components.binary_sensor import BinarySensorEntity
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from .const import DOMAIN as WEMO_DOMAIN
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up WeMo binary sensors."""
async def _discovered_wemo(device):
"""Handle a discovered Wemo device."""
async_add_entities([WemoBinarySensor(device)])
async_dispatcher_connect(hass, f"{WEMO_DOMAIN}.binary_sensor", _discovered_wemo)
await asyncio.gather(
*[
_discovered_wemo(device)
for device in hass.data[WEMO_DOMAIN]["pending"].pop("binary_sensor")
]
)
class WemoBinarySensor(BinarySensorEntity):
"""Representation a WeMo binary sensor."""
def __init__(self, device):
"""Initialize the WeMo sensor."""
self.wemo = device
self._state = None
self._available = True
self._update_lock = None
self._model_name = self.wemo.model_name
self._name = self.wemo.name
self._serial_number = self.wemo.serialnumber
def _subscription_callback(self, _device, _type, _params):
"""Update the state by the Wemo sensor."""
_LOGGER.debug("Subscription update for %s", self.name)
updated = self.wemo.subscription_update(_type, _params)
self.hass.add_job(self._async_locked_subscription_callback(not updated))
async def _async_locked_subscription_callback(self, force_update):
"""Handle an update from a subscription."""
# If an update is in progress, we don't do anything
if self._update_lock.locked():
return
await self._async_locked_update(force_update)
self.async_write_ha_state()
async def async_added_to_hass(self):
"""Wemo sensor added to Home Assistant."""
# Define inside async context so we know our event loop
self._update_lock = asyncio.Lock()
registry = self.hass.data[WEMO_DOMAIN]["registry"]
await self.hass.async_add_executor_job(registry.register, self.wemo)
registry.on(self.wemo, None, self._subscription_callback)
async def async_update(self):
"""Update WeMo state.
Wemo has an aggressive retry logic that sometimes can take over a
minute to return. If we don't get a state after 5 seconds, assume the
Wemo sensor is unreachable. If update goes through, it will be made
available again.
"""
# If an update is in progress, we don't do anything
if self._update_lock.locked():
return
try:
with async_timeout.timeout(5):
await asyncio.shield(self._async_locked_update(True))
except asyncio.TimeoutError:
_LOGGER.warning("Lost connection to %s", self.name)
self._available = False
async def _async_locked_update(self, force_update):
"""Try updating within an async lock."""
async with self._update_lock:
await self.hass.async_add_executor_job(self._update, force_update)
def _update(self, force_update=True):
"""Update the sensor state."""
try:
self._state = self.wemo.get_state(force_update)
if not self._available:
_LOGGER.info("Reconnected to %s", self.name)
self._available = True
except (AttributeError, ActionException) as err:
_LOGGER.warning("Could not update status for %s (%s)", self.name, err)
self._available = False
self.wemo.reconnect_with_device()
@property
def unique_id(self):
"""Return the id of this WeMo sensor."""
return self._serial_number
@property
def name(self):
"""Return the name of the service if any."""
return self._name
@property
def is_on(self):
"""Return true if sensor is on."""
return self._state
@property
def available(self):
"""Return true if sensor is available."""
return self._available
@property
def device_info(self):
"""Return the device info."""
return {
"name": self._name,
"identifiers": {(WEMO_DOMAIN, self._serial_number)},
"model": self._model_name,
"manufacturer": "Belkin",
}
|
import enum
import logging
from PyViCare.PyViCareDevice import Device
from PyViCare.PyViCareGazBoiler import GazBoiler
from PyViCare.PyViCareHeatPump import HeatPump
import voluptuous as vol
from homeassistant.const import (
CONF_NAME,
CONF_PASSWORD,
CONF_SCAN_INTERVAL,
CONF_USERNAME,
)
from homeassistant.helpers import discovery
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.storage import STORAGE_DIR
_LOGGER = logging.getLogger(__name__)
VICARE_PLATFORMS = ["climate", "sensor", "binary_sensor", "water_heater"]
DOMAIN = "vicare"
PYVICARE_ERROR = "error"
VICARE_API = "api"
VICARE_NAME = "name"
VICARE_HEATING_TYPE = "heating_type"
CONF_CIRCUIT = "circuit"
CONF_HEATING_TYPE = "heating_type"
DEFAULT_HEATING_TYPE = "generic"
class HeatingType(enum.Enum):
"""Possible options for heating type."""
generic = "generic"
gas = "gas"
heatpump = "heatpump"
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_SCAN_INTERVAL, default=60): vol.All(
cv.time_period, lambda value: value.total_seconds()
),
vol.Optional(CONF_CIRCUIT): int,
vol.Optional(CONF_NAME, default="ViCare"): cv.string,
vol.Optional(CONF_HEATING_TYPE, default=DEFAULT_HEATING_TYPE): cv.enum(
HeatingType
),
}
)
},
extra=vol.ALLOW_EXTRA,
)
def setup(hass, config):
"""Create the ViCare component."""
conf = config[DOMAIN]
params = {"token_file": hass.config.path(STORAGE_DIR, "vicare_token.save")}
if conf.get(CONF_CIRCUIT) is not None:
params["circuit"] = conf[CONF_CIRCUIT]
params["cacheDuration"] = conf.get(CONF_SCAN_INTERVAL)
heating_type = conf[CONF_HEATING_TYPE]
try:
if heating_type == HeatingType.gas:
vicare_api = GazBoiler(conf[CONF_USERNAME], conf[CONF_PASSWORD], **params)
elif heating_type == HeatingType.heatpump:
vicare_api = HeatPump(conf[CONF_USERNAME], conf[CONF_PASSWORD], **params)
else:
vicare_api = Device(conf[CONF_USERNAME], conf[CONF_PASSWORD], **params)
except AttributeError:
_LOGGER.error(
"Failed to create PyViCare API client. Please check your credentials"
)
return False
hass.data[DOMAIN] = {}
hass.data[DOMAIN][VICARE_API] = vicare_api
hass.data[DOMAIN][VICARE_NAME] = conf[CONF_NAME]
hass.data[DOMAIN][VICARE_HEATING_TYPE] = heating_type
for platform in VICARE_PLATFORMS:
discovery.load_platform(hass, platform, DOMAIN, {}, config)
return True
|
import os
import os.path as op
import numpy as np
from numpy.testing import assert_allclose, assert_array_equal
import matplotlib.pyplot as plt
import pytest
from mne import (read_dipole, read_forward_solution,
convert_forward_solution, read_evokeds, read_cov,
SourceEstimate, write_evokeds, fit_dipole,
transform_surface_to, make_sphere_model, pick_types,
pick_info, EvokedArray, read_source_spaces, make_ad_hoc_cov,
make_forward_solution, Dipole, DipoleFixed, Epochs,
make_fixed_length_events, Evoked)
from mne.dipole import get_phantom_dipoles, _BDIP_ERROR_KEYS
from mne.simulation import simulate_evoked
from mne.datasets import testing
from mne.utils import run_tests_if_main, requires_mne, run_subprocess
from mne.proj import make_eeg_average_ref_proj
from mne.io import read_raw_fif, read_raw_ctf
from mne.io.constants import FIFF
from mne.surface import _compute_nearest
from mne.bem import _bem_find_surface, read_bem_solution
from mne.transforms import apply_trans, _get_trans
data_path = testing.data_path(download=False)
meg_path = op.join(data_path, 'MEG', 'sample')
fname_dip_xfit_80 = op.join(meg_path, 'sample_audvis-ave_xfit.dip')
fname_raw = op.join(meg_path, 'sample_audvis_trunc_raw.fif')
fname_dip = op.join(meg_path, 'sample_audvis_trunc_set1.dip')
fname_bdip = op.join(meg_path, 'sample_audvis_trunc_set1.bdip')
fname_dip_xfit = op.join(meg_path, 'sample_audvis_trunc_xfit.dip')
fname_bdip_xfit = op.join(meg_path, 'sample_audvis_trunc_xfit.bdip')
fname_evo = op.join(meg_path, 'sample_audvis_trunc-ave.fif')
fname_evo_full = op.join(meg_path, 'sample_audvis-ave.fif')
fname_cov = op.join(meg_path, 'sample_audvis_trunc-cov.fif')
fname_trans = op.join(meg_path, 'sample_audvis_trunc-trans.fif')
fname_fwd = op.join(meg_path, 'sample_audvis_trunc-meg-eeg-oct-6-fwd.fif')
fname_bem = op.join(data_path, 'subjects', 'sample', 'bem',
'sample-1280-1280-1280-bem-sol.fif')
fname_src = op.join(data_path, 'subjects', 'sample', 'bem',
'sample-oct-2-src.fif')
fname_xfit_dip = op.join(data_path, 'dip', 'fixed_auto.fif')
fname_xfit_dip_txt = op.join(data_path, 'dip', 'fixed_auto.dip')
fname_xfit_seq_txt = op.join(data_path, 'dip', 'sequential.dip')
fname_ctf = op.join(data_path, 'CTF', 'testdata_ctf_short.ds')
subjects_dir = op.join(data_path, 'subjects')
def _compare_dipoles(orig, new):
"""Compare dipole results for equivalence."""
assert_allclose(orig.times, new.times, atol=1e-3, err_msg='times')
assert_allclose(orig.pos, new.pos, err_msg='pos')
assert_allclose(orig.amplitude, new.amplitude, err_msg='amplitude')
assert_allclose(orig.gof, new.gof, err_msg='gof')
assert_allclose(orig.ori, new.ori, rtol=1e-4, atol=1e-4, err_msg='ori')
assert orig.name == new.name
def _check_dipole(dip, n_dipoles):
"""Check dipole sizes."""
assert len(dip) == n_dipoles
assert dip.pos.shape == (n_dipoles, 3)
assert dip.ori.shape == (n_dipoles, 3)
assert dip.gof.shape == (n_dipoles,)
assert dip.amplitude.shape == (n_dipoles,)
@testing.requires_testing_data
def test_io_dipoles(tmpdir):
"""Test IO for .dip files."""
dipole = read_dipole(fname_dip)
assert 'Dipole ' in repr(dipole) # test repr
out_fname = op.join(str(tmpdir), 'temp.dip')
dipole.save(out_fname)
dipole_new = read_dipole(out_fname)
_compare_dipoles(dipole, dipole_new)
@testing.requires_testing_data
def test_dipole_fitting_ctf():
"""Test dipole fitting with CTF data."""
raw_ctf = read_raw_ctf(fname_ctf).set_eeg_reference(projection=True)
events = make_fixed_length_events(raw_ctf, 1)
evoked = Epochs(raw_ctf, events, 1, 0, 0, baseline=None).average()
cov = make_ad_hoc_cov(evoked.info)
sphere = make_sphere_model((0., 0., 0.))
# XXX Eventually we should do some better checks about accuracy, but
# for now our CTF phantom fitting tutorials will have to do
# (otherwise we need to add that to the testing dataset, which is
# a bit too big)
fit_dipole(evoked, cov, sphere, rank=dict(meg=len(evoked.data)))
@pytest.mark.slowtest
@testing.requires_testing_data
@requires_mne
def test_dipole_fitting(tmpdir):
"""Test dipole fitting."""
amp = 100e-9
tempdir = str(tmpdir)
rng = np.random.RandomState(0)
fname_dtemp = op.join(tempdir, 'test.dip')
fname_sim = op.join(tempdir, 'test-ave.fif')
fwd = convert_forward_solution(read_forward_solution(fname_fwd),
surf_ori=False, force_fixed=True,
use_cps=True)
evoked = read_evokeds(fname_evo)[0]
cov = read_cov(fname_cov)
n_per_hemi = 5
vertices = [np.sort(rng.permutation(s['vertno'])[:n_per_hemi])
for s in fwd['src']]
nv = sum(len(v) for v in vertices)
stc = SourceEstimate(amp * np.eye(nv), vertices, 0, 0.001)
evoked = simulate_evoked(fwd, stc, evoked.info, cov, nave=evoked.nave,
random_state=rng)
# For speed, let's use a subset of channels (strange but works)
picks = np.sort(np.concatenate([
pick_types(evoked.info, meg=True, eeg=False)[::2],
pick_types(evoked.info, meg=False, eeg=True)[::2]]))
evoked.pick_channels([evoked.ch_names[p] for p in picks])
evoked.add_proj(make_eeg_average_ref_proj(evoked.info))
write_evokeds(fname_sim, evoked)
# Run MNE-C version
run_subprocess([
'mne_dipole_fit', '--meas', fname_sim, '--meg', '--eeg',
'--noise', fname_cov, '--dip', fname_dtemp,
'--mri', fname_fwd, '--reg', '0', '--tmin', '0',
])
dip_c = read_dipole(fname_dtemp)
# Run mne-python version
sphere = make_sphere_model(head_radius=0.1)
with pytest.warns(RuntimeWarning, match='projection'):
dip, residual = fit_dipole(evoked, cov, sphere, fname_fwd,
rank='info') # just to test rank support
assert isinstance(residual, Evoked)
# Sanity check: do our residuals have less power than orig data?
data_rms = np.sqrt(np.sum(evoked.data ** 2, axis=0))
resi_rms = np.sqrt(np.sum(residual.data ** 2, axis=0))
assert (data_rms > resi_rms * 0.95).all(), \
'%s (factor: %s)' % ((data_rms / resi_rms).min(), 0.95)
# Compare to original points
transform_surface_to(fwd['src'][0], 'head', fwd['mri_head_t'])
transform_surface_to(fwd['src'][1], 'head', fwd['mri_head_t'])
assert fwd['src'][0]['coord_frame'] == FIFF.FIFFV_COORD_HEAD
src_rr = np.concatenate([s['rr'][v] for s, v in zip(fwd['src'], vertices)],
axis=0)
src_nn = np.concatenate([s['nn'][v] for s, v in zip(fwd['src'], vertices)],
axis=0)
# MNE-C skips the last "time" point :(
out = dip.crop(dip_c.times[0], dip_c.times[-1])
assert (dip is out)
src_rr, src_nn = src_rr[:-1], src_nn[:-1]
# check that we did about as well
corrs, dists, gc_dists, amp_errs, gofs = [], [], [], [], []
for d in (dip_c, dip):
new = d.pos
diffs = new - src_rr
corrs += [np.corrcoef(src_rr.ravel(), new.ravel())[0, 1]]
dists += [np.sqrt(np.mean(np.sum(diffs * diffs, axis=1)))]
gc_dists += [180 / np.pi * np.mean(np.arccos(np.sum(src_nn * d.ori,
axis=1)))]
amp_errs += [np.sqrt(np.mean((amp - d.amplitude) ** 2))]
gofs += [np.mean(d.gof)]
# XXX possibly some OpenBLAS numerical differences make
# things slightly worse for us
factor = 0.7
assert dists[0] / factor >= dists[1], 'dists: %s' % dists
assert corrs[0] * factor <= corrs[1], 'corrs: %s' % corrs
assert gc_dists[0] / factor >= gc_dists[1] * 0.8, \
'gc-dists (ori): %s' % gc_dists
assert amp_errs[0] / factor >= amp_errs[1],\
'amplitude errors: %s' % amp_errs
# This one is weird because our cov/sim/picking is weird
assert gofs[0] * factor <= gofs[1] * 2, 'gof: %s' % gofs
@testing.requires_testing_data
def test_dipole_fitting_fixed(tmpdir):
"""Test dipole fitting with a fixed position."""
tpeak = 0.073
sphere = make_sphere_model(head_radius=0.1)
evoked = read_evokeds(fname_evo, baseline=(None, 0))[0]
evoked.pick_types(meg=True)
t_idx = np.argmin(np.abs(tpeak - evoked.times))
evoked_crop = evoked.copy().crop(tpeak, tpeak)
assert len(evoked_crop.times) == 1
cov = read_cov(fname_cov)
dip_seq, resid = fit_dipole(evoked_crop, cov, sphere)
assert isinstance(dip_seq, Dipole)
assert isinstance(resid, Evoked)
assert len(dip_seq.times) == 1
pos, ori, gof = dip_seq.pos[0], dip_seq.ori[0], dip_seq.gof[0]
amp = dip_seq.amplitude[0]
# Fix position, allow orientation to change
dip_free, resid_free = fit_dipole(evoked, cov, sphere, pos=pos)
assert isinstance(dip_free, Dipole)
assert isinstance(resid_free, Evoked)
assert_allclose(dip_free.times, evoked.times)
assert_allclose(np.tile(pos[np.newaxis], (len(evoked.times), 1)),
dip_free.pos)
assert_allclose(ori, dip_free.ori[t_idx]) # should find same ori
assert (np.dot(dip_free.ori, ori).mean() < 0.9) # but few the same
assert_allclose(gof, dip_free.gof[t_idx]) # ... same gof
assert_allclose(amp, dip_free.amplitude[t_idx]) # and same amp
assert_allclose(resid.data, resid_free.data[:, [t_idx]])
# Fix position and orientation
dip_fixed, resid_fixed = fit_dipole(evoked, cov, sphere, pos=pos, ori=ori)
assert (isinstance(dip_fixed, DipoleFixed))
assert_allclose(dip_fixed.times, evoked.times)
assert_allclose(dip_fixed.info['chs'][0]['loc'][:3], pos)
assert_allclose(dip_fixed.info['chs'][0]['loc'][3:6], ori)
assert_allclose(dip_fixed.data[1, t_idx], gof)
assert_allclose(resid.data, resid_fixed.data[:, [t_idx]])
_check_roundtrip_fixed(dip_fixed, tmpdir)
# bad resetting
evoked.info['bads'] = [evoked.ch_names[3]]
dip_fixed, resid_fixed = fit_dipole(evoked, cov, sphere, pos=pos, ori=ori)
# Degenerate conditions
evoked_nan = evoked.copy().crop(0, 0)
evoked_nan.data[0, 0] = None
pytest.raises(ValueError, fit_dipole, evoked_nan, cov, sphere)
pytest.raises(ValueError, fit_dipole, evoked, cov, sphere, ori=[1, 0, 0])
pytest.raises(ValueError, fit_dipole, evoked, cov, sphere, pos=[0, 0, 0],
ori=[2, 0, 0])
pytest.raises(ValueError, fit_dipole, evoked, cov, sphere, pos=[0.1, 0, 0])
# copying
dip_fixed_2 = dip_fixed.copy()
dip_fixed_2.data[:] = 0.
assert not np.isclose(dip_fixed.data, 0., atol=1e-20).any()
# plotting
plt.close('all')
dip_fixed.plot()
plt.close('all')
orig_times = np.array(dip_fixed.times)
shift_times = dip_fixed.shift_time(1.).times
assert_allclose(shift_times, orig_times + 1)
@testing.requires_testing_data
def test_len_index_dipoles():
"""Test len and indexing of Dipole objects."""
dipole = read_dipole(fname_dip)
d0 = dipole[0]
d1 = dipole[:1]
_check_dipole(d0, 1)
_check_dipole(d1, 1)
_compare_dipoles(d0, d1)
mask = dipole.gof > 15
idx = np.where(mask)[0]
d_mask = dipole[mask]
_check_dipole(d_mask, 4)
_compare_dipoles(d_mask, dipole[idx])
@pytest.mark.slowtest # slow-ish on Travis OSX
@testing.requires_testing_data
def test_min_distance_fit_dipole():
"""Test dipole min_dist to inner_skull."""
subject = 'sample'
raw = read_raw_fif(fname_raw, preload=True)
# select eeg data
picks = pick_types(raw.info, meg=False, eeg=True, exclude='bads')
info = pick_info(raw.info, picks)
# Let's use cov = Identity
cov = read_cov(fname_cov)
cov['data'] = np.eye(cov['data'].shape[0])
# Simulated scal map
simulated_scalp_map = np.zeros(picks.shape[0])
simulated_scalp_map[27:34] = 1
simulated_scalp_map = simulated_scalp_map[:, None]
evoked = EvokedArray(simulated_scalp_map, info, tmin=0)
min_dist = 5. # distance in mm
bem = read_bem_solution(fname_bem)
dip, residual = fit_dipole(evoked, cov, bem, fname_trans,
min_dist=min_dist)
assert isinstance(residual, Evoked)
dist = _compute_depth(dip, fname_bem, fname_trans, subject, subjects_dir)
# Constraints are not exact, so bump the minimum slightly
assert (min_dist - 0.1 < (dist[0] * 1000.) < (min_dist + 1.))
pytest.raises(ValueError, fit_dipole, evoked, cov, fname_bem, fname_trans,
-1.)
def _compute_depth(dip, fname_bem, fname_trans, subject, subjects_dir):
"""Compute dipole depth."""
trans = _get_trans(fname_trans)[0]
bem = read_bem_solution(fname_bem)
surf = _bem_find_surface(bem, 'inner_skull')
points = surf['rr']
points = apply_trans(trans['trans'], points)
depth = _compute_nearest(points, dip.pos, return_dists=True)[1][0]
return np.ravel(depth)
@testing.requires_testing_data
def test_accuracy():
"""Test dipole fitting to sub-mm accuracy."""
evoked = read_evokeds(fname_evo)[0].crop(0., 0.,)
evoked.pick_types(meg=True, eeg=False)
evoked.pick_channels([c for c in evoked.ch_names[::4]])
for rad, perc_90 in zip((0.09, None), (0.002, 0.004)):
bem = make_sphere_model('auto', rad, evoked.info,
relative_radii=(0.999, 0.998, 0.997, 0.995))
src = read_source_spaces(fname_src)
fwd = make_forward_solution(evoked.info, None, src, bem)
fwd = convert_forward_solution(fwd, force_fixed=True, use_cps=True)
vertices = [src[0]['vertno'], src[1]['vertno']]
n_vertices = sum(len(v) for v in vertices)
amp = 10e-9
data = np.eye(n_vertices + 1)[:n_vertices]
data[-1, -1] = 1.
data *= amp
stc = SourceEstimate(data, vertices, 0., 1e-3, 'sample')
evoked.info.normalize_proj()
sim = simulate_evoked(fwd, stc, evoked.info, cov=None, nave=np.inf)
cov = make_ad_hoc_cov(evoked.info)
dip = fit_dipole(sim, cov, bem, min_dist=0.001)[0]
ds = []
for vi in range(n_vertices):
if vi < len(vertices[0]):
hi = 0
vertno = vi
else:
hi = 1
vertno = vi - len(vertices[0])
vertno = src[hi]['vertno'][vertno]
rr = src[hi]['rr'][vertno]
d = np.sqrt(np.sum((rr - dip.pos[vi]) ** 2))
ds.append(d)
# make sure that our median is sub-mm and the large majority are very
# close (we expect some to be off by a bit e.g. because they are
# radial)
assert ((np.percentile(ds, [50, 90]) < [0.0005, perc_90]).all())
@testing.requires_testing_data
def test_dipole_fixed(tmpdir):
"""Test reading a fixed-position dipole (from Xfit)."""
dip = read_dipole(fname_xfit_dip)
# print the representation of the object DipoleFixed
assert 'DipoleFixed ' in repr(dip)
_check_roundtrip_fixed(dip, tmpdir)
with pytest.warns(RuntimeWarning, match='extra fields'):
dip_txt = read_dipole(fname_xfit_dip_txt)
assert_allclose(dip.info['chs'][0]['loc'][:3], dip_txt.pos[0])
assert_allclose(dip_txt.amplitude[0], 12.1e-9)
with pytest.warns(RuntimeWarning, match='extra fields'):
dip_txt_seq = read_dipole(fname_xfit_seq_txt)
assert_allclose(dip_txt_seq.gof, [27.3, 46.4, 43.7, 41., 37.3, 32.5])
def _check_roundtrip_fixed(dip, tmpdir):
"""Check roundtrip IO for fixed dipoles."""
tempdir = str(tmpdir)
dip.save(op.join(tempdir, 'test-dip.fif.gz'))
dip_read = read_dipole(op.join(tempdir, 'test-dip.fif.gz'))
assert_allclose(dip_read.data, dip_read.data)
assert_allclose(dip_read.times, dip.times, atol=1e-8)
assert dip_read.info['xplotter_layout'] == dip.info['xplotter_layout']
assert dip_read.ch_names == dip.ch_names
for ch_1, ch_2 in zip(dip_read.info['chs'], dip.info['chs']):
assert ch_1['ch_name'] == ch_2['ch_name']
for key in ('loc', 'kind', 'unit_mul', 'range', 'coord_frame', 'unit',
'cal', 'coil_type', 'scanno', 'logno'):
assert_allclose(ch_1[key], ch_2[key], err_msg=key)
def test_get_phantom_dipoles():
"""Test getting phantom dipole locations."""
pytest.raises(ValueError, get_phantom_dipoles, 0)
pytest.raises(ValueError, get_phantom_dipoles, 'foo')
for kind in ('vectorview', 'otaniemi'):
pos, ori = get_phantom_dipoles(kind)
assert pos.shape == (32, 3)
assert ori.shape == (32, 3)
@testing.requires_testing_data
def test_confidence(tmpdir):
"""Test confidence limits."""
evoked = read_evokeds(fname_evo_full, 'Left Auditory', baseline=(None, 0))
evoked.crop(0.08, 0.08).pick_types(meg=True) # MEG-only
cov = make_ad_hoc_cov(evoked.info)
sphere = make_sphere_model((0., 0., 0.04), 0.08)
dip_py = fit_dipole(evoked, cov, sphere)[0]
fname_test = op.join(str(tmpdir), 'temp-dip.txt')
dip_py.save(fname_test)
dip_read = read_dipole(fname_test)
with pytest.warns(RuntimeWarning, match="'noise/ft/cm', 'prob'"):
dip_xfit = read_dipole(fname_dip_xfit_80)
for dip_check in (dip_py, dip_read):
assert_allclose(dip_check.pos, dip_xfit.pos, atol=5e-4) # < 0.5 mm
assert_allclose(dip_check.gof, dip_xfit.gof, atol=5e-1) # < 0.5%
assert_array_equal(dip_check.nfree, dip_xfit.nfree) # exact match
assert_allclose(dip_check.khi2, dip_xfit.khi2, rtol=2e-2) # 2% miss
assert set(dip_check.conf.keys()) == set(dip_xfit.conf.keys())
for key in sorted(dip_check.conf.keys()):
assert_allclose(dip_check.conf[key], dip_xfit.conf[key],
rtol=1.5e-1, err_msg=key)
# bdip created with:
# mne_dipole_fit --meas sample_audvis_trunc-ave.fif --set 1 --meg --tmin 40 --tmax 95 --bmin -200 --bmax 0 --noise sample_audvis_trunc-cov.fif --bem ../../subjects/sample/bem/sample-1280-1280-1280-bem-sol.fif --origin 0\:0\:40 --mri sample_audvis_trunc-trans.fif --bdip sample_audvis_trunc_set1.bdip # noqa: E501
# It gives equivalent results to .dip in non-dipole mode.
# xfit bdip created by taking sample_audvis_trunc-ave.fif, picking MEG
# channels, writitng to disk (with MNE), then running xfit on 40-95 ms
# with a 3.3 ms step
@testing.requires_testing_data
@pytest.mark.parametrize('fname_dip_, fname_bdip_', [
(fname_dip, fname_bdip),
(fname_dip_xfit, fname_bdip_xfit),
])
def test_bdip(fname_dip_, fname_bdip_, tmpdir):
"""Test bdip I/O."""
# use text as veridical
with pytest.warns(None): # ignored fields
dip = read_dipole(fname_dip_)
# read binary
orig_size = os.stat(fname_bdip_).st_size
bdip = read_dipole(fname_bdip_)
# test round-trip by writing and reading, too
fname = tmpdir.join('test.bdip')
bdip.save(fname)
bdip_read = read_dipole(fname)
write_size = os.stat(str(fname)).st_size
assert orig_size == write_size
assert len(dip) == len(bdip) == len(bdip_read) == 17
dip_has_conf = fname_dip_ == fname_dip_xfit
for kind, this_bdip in (('orig', bdip), ('read', bdip_read)):
for key, atol in (
('pos', 5e-5),
('ori', 5e-3),
('gof', 0.5e-1),
('times', 5e-5),
('khi2', 1e-2)):
d = getattr(dip, key)
b = getattr(this_bdip, key)
if key == 'khi2' and dip_has_conf:
if d is not None:
assert_allclose(d, b, atol=atol,
err_msg='%s: %s' % (kind, key))
else:
assert b is None
if dip_has_conf:
# conf
conf_keys = _BDIP_ERROR_KEYS + ('vol',)
assert (set(this_bdip.conf.keys()) ==
set(dip.conf.keys()) ==
set(conf_keys))
for key in conf_keys:
d = dip.conf[key]
b = this_bdip.conf[key]
assert_allclose(d, b, rtol=0.12, # no so great, text I/O
err_msg='%s: %s' % (kind, key))
# Not stored
assert this_bdip.name is None
assert_allclose(this_bdip.nfree, 0.)
run_tests_if_main()
|
from contextlib import contextmanager
from datetime import timedelta
from homeassistant.components.sensor import DOMAIN as SENSOR_DOMAIN
from homeassistant.components.utility_meter.const import (
ATTR_TARIFF,
ATTR_VALUE,
DOMAIN,
SERVICE_CALIBRATE_METER,
SERVICE_SELECT_TARIFF,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_UNIT_OF_MEASUREMENT,
ENERGY_KILO_WATT_HOUR,
EVENT_HOMEASSISTANT_START,
)
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from tests.async_mock import patch
from tests.common import async_fire_time_changed
@contextmanager
def alter_time(retval):
"""Manage multiple time mocks."""
patch1 = patch("homeassistant.util.dt.utcnow", return_value=retval)
patch2 = patch("homeassistant.util.dt.now", return_value=retval)
with patch1, patch2:
yield
async def test_state(hass):
"""Test utility sensor state."""
config = {
"utility_meter": {
"energy_bill": {
"source": "sensor.energy",
"tariffs": ["onpeak", "midpeak", "offpeak"],
}
}
}
assert await async_setup_component(hass, DOMAIN, config)
assert await async_setup_component(hass, SENSOR_DOMAIN, config)
await hass.async_block_till_done()
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
entity_id = config[DOMAIN]["energy_bill"]["source"]
hass.states.async_set(
entity_id, 2, {ATTR_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR}
)
await hass.async_block_till_done()
now = dt_util.utcnow() + timedelta(seconds=10)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.states.async_set(
entity_id,
3,
{ATTR_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR},
force_update=True,
)
await hass.async_block_till_done()
state = hass.states.get("sensor.energy_bill_onpeak")
assert state is not None
assert state.state == "1"
state = hass.states.get("sensor.energy_bill_midpeak")
assert state is not None
assert state.state == "0"
state = hass.states.get("sensor.energy_bill_offpeak")
assert state is not None
assert state.state == "0"
await hass.services.async_call(
DOMAIN,
SERVICE_SELECT_TARIFF,
{ATTR_ENTITY_ID: "utility_meter.energy_bill", ATTR_TARIFF: "offpeak"},
blocking=True,
)
await hass.async_block_till_done()
now = dt_util.utcnow() + timedelta(seconds=20)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.states.async_set(
entity_id,
6,
{ATTR_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR},
force_update=True,
)
await hass.async_block_till_done()
state = hass.states.get("sensor.energy_bill_onpeak")
assert state is not None
assert state.state == "1"
state = hass.states.get("sensor.energy_bill_midpeak")
assert state is not None
assert state.state == "0"
state = hass.states.get("sensor.energy_bill_offpeak")
assert state is not None
assert state.state == "3"
await hass.services.async_call(
DOMAIN,
SERVICE_CALIBRATE_METER,
{ATTR_ENTITY_ID: "sensor.energy_bill_midpeak", ATTR_VALUE: "100"},
blocking=True,
)
await hass.async_block_till_done()
state = hass.states.get("sensor.energy_bill_midpeak")
assert state is not None
assert state.state == "100"
await hass.services.async_call(
DOMAIN,
SERVICE_CALIBRATE_METER,
{ATTR_ENTITY_ID: "sensor.energy_bill_midpeak", ATTR_VALUE: "0.123"},
blocking=True,
)
await hass.async_block_till_done()
state = hass.states.get("sensor.energy_bill_midpeak")
assert state is not None
assert state.state == "0.123"
async def test_net_consumption(hass):
"""Test utility sensor state."""
config = {
"utility_meter": {
"energy_bill": {"source": "sensor.energy", "net_consumption": True}
}
}
assert await async_setup_component(hass, DOMAIN, config)
assert await async_setup_component(hass, SENSOR_DOMAIN, config)
await hass.async_block_till_done()
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
entity_id = config[DOMAIN]["energy_bill"]["source"]
hass.states.async_set(
entity_id, 2, {ATTR_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR}
)
await hass.async_block_till_done()
now = dt_util.utcnow() + timedelta(seconds=10)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.states.async_set(
entity_id,
1,
{ATTR_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR},
force_update=True,
)
await hass.async_block_till_done()
state = hass.states.get("sensor.energy_bill")
assert state is not None
assert state.state == "-1"
async def test_non_net_consumption(hass):
"""Test utility sensor state."""
config = {
"utility_meter": {
"energy_bill": {"source": "sensor.energy", "net_consumption": False}
}
}
assert await async_setup_component(hass, DOMAIN, config)
assert await async_setup_component(hass, SENSOR_DOMAIN, config)
await hass.async_block_till_done()
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
entity_id = config[DOMAIN]["energy_bill"]["source"]
hass.states.async_set(
entity_id, 2, {ATTR_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR}
)
await hass.async_block_till_done()
now = dt_util.utcnow() + timedelta(seconds=10)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.states.async_set(
entity_id,
1,
{ATTR_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR},
force_update=True,
)
await hass.async_block_till_done()
state = hass.states.get("sensor.energy_bill")
assert state is not None
assert state.state == "0"
def gen_config(cycle, offset=None):
"""Generate configuration."""
config = {
"utility_meter": {"energy_bill": {"source": "sensor.energy", "cycle": cycle}}
}
if offset:
config["utility_meter"]["energy_bill"]["offset"] = {
"days": offset.days,
"seconds": offset.seconds,
}
return config
async def _test_self_reset(hass, config, start_time, expect_reset=True):
"""Test energy sensor self reset."""
assert await async_setup_component(hass, DOMAIN, config)
assert await async_setup_component(hass, SENSOR_DOMAIN, config)
await hass.async_block_till_done()
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
entity_id = config[DOMAIN]["energy_bill"]["source"]
now = dt_util.parse_datetime(start_time)
with alter_time(now):
async_fire_time_changed(hass, now)
hass.states.async_set(
entity_id, 1, {ATTR_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR}
)
await hass.async_block_till_done()
now += timedelta(seconds=30)
with alter_time(now):
async_fire_time_changed(hass, now)
hass.states.async_set(
entity_id,
3,
{ATTR_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR},
force_update=True,
)
await hass.async_block_till_done()
now += timedelta(seconds=30)
with alter_time(now):
async_fire_time_changed(hass, now)
await hass.async_block_till_done()
hass.states.async_set(
entity_id,
6,
{ATTR_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR},
force_update=True,
)
await hass.async_block_till_done()
state = hass.states.get("sensor.energy_bill")
if expect_reset:
assert state.attributes.get("last_period") == "2"
assert state.state == "3"
else:
assert state.attributes.get("last_period") == 0
assert state.state == "5"
async def test_self_reset_hourly(hass, legacy_patchable_time):
"""Test hourly reset of meter."""
await _test_self_reset(
hass, gen_config("hourly"), "2017-12-31T23:59:00.000000+00:00"
)
async def test_self_reset_daily(hass, legacy_patchable_time):
"""Test daily reset of meter."""
await _test_self_reset(
hass, gen_config("daily"), "2017-12-31T23:59:00.000000+00:00"
)
async def test_self_reset_weekly(hass, legacy_patchable_time):
"""Test weekly reset of meter."""
await _test_self_reset(
hass, gen_config("weekly"), "2017-12-31T23:59:00.000000+00:00"
)
async def test_self_reset_monthly(hass, legacy_patchable_time):
"""Test monthly reset of meter."""
await _test_self_reset(
hass, gen_config("monthly"), "2017-12-31T23:59:00.000000+00:00"
)
async def test_self_reset_bimonthly(hass, legacy_patchable_time):
"""Test bimonthly reset of meter occurs on even months."""
await _test_self_reset(
hass, gen_config("bimonthly"), "2017-12-31T23:59:00.000000+00:00"
)
async def test_self_no_reset_bimonthly(hass, legacy_patchable_time):
"""Test bimonthly reset of meter does not occur on odd months."""
await _test_self_reset(
hass,
gen_config("bimonthly"),
"2018-01-01T23:59:00.000000+00:00",
expect_reset=False,
)
async def test_self_reset_quarterly(hass, legacy_patchable_time):
"""Test quarterly reset of meter."""
await _test_self_reset(
hass, gen_config("quarterly"), "2017-03-31T23:59:00.000000+00:00"
)
async def test_self_reset_yearly(hass, legacy_patchable_time):
"""Test yearly reset of meter."""
await _test_self_reset(
hass, gen_config("yearly"), "2017-12-31T23:59:00.000000+00:00"
)
async def test_self_no_reset_yearly(hass, legacy_patchable_time):
"""Test yearly reset of meter does not occur after 1st January."""
await _test_self_reset(
hass,
gen_config("yearly"),
"2018-01-01T23:59:00.000000+00:00",
expect_reset=False,
)
async def test_reset_yearly_offset(hass, legacy_patchable_time):
"""Test yearly reset of meter."""
await _test_self_reset(
hass,
gen_config("yearly", timedelta(days=1, minutes=10)),
"2018-01-02T00:09:00.000000+00:00",
)
async def test_no_reset_yearly_offset(hass, legacy_patchable_time):
"""Test yearly reset of meter."""
await _test_self_reset(
hass,
gen_config("yearly", timedelta(31)),
"2018-01-30T23:59:00.000000+00:00",
expect_reset=False,
)
|
import asyncio
import logging
from typing import Any, Dict, Iterable, Optional
from homeassistant.const import (
ATTR_ENTITY_ID,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_OFF,
STATE_ON,
)
from homeassistant.core import Context, State
from homeassistant.helpers.typing import HomeAssistantType
from . import DOMAIN
_LOGGER = logging.getLogger(__name__)
VALID_STATES = {STATE_ON, STATE_OFF}
async def _async_reproduce_state(
hass: HomeAssistantType,
state: State,
*,
context: Optional[Context] = None,
reproduce_options: Optional[Dict[str, Any]] = None,
) -> None:
"""Reproduce a single state."""
cur_state = hass.states.get(state.entity_id)
if cur_state is None:
_LOGGER.warning("Unable to find entity %s", state.entity_id)
return
if state.state not in VALID_STATES:
_LOGGER.warning(
"Invalid state specified for %s: %s", state.entity_id, state.state
)
return
# Return if we are already at the right state.
if cur_state.state == state.state:
return
service_data = {ATTR_ENTITY_ID: state.entity_id}
if state.state == STATE_ON:
service = SERVICE_TURN_ON
elif state.state == STATE_OFF:
service = SERVICE_TURN_OFF
await hass.services.async_call(
DOMAIN, service, service_data, context=context, blocking=True
)
async def async_reproduce_states(
hass: HomeAssistantType,
states: Iterable[State],
*,
context: Optional[Context] = None,
reproduce_options: Optional[Dict[str, Any]] = None,
) -> None:
"""Reproduce Switch states."""
await asyncio.gather(
*(
_async_reproduce_state(
hass, state, context=context, reproduce_options=reproduce_options
)
for state in states
)
)
|
from homeassistant.components.binary_sensor import BinarySensorEntity
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_STATE,
CONF_BINARY_SENSORS,
CONF_DEVICES,
CONF_NAME,
CONF_TYPE,
)
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from .const import DOMAIN as KONNECTED_DOMAIN
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up binary sensors attached to a Konnected device from a config entry."""
data = hass.data[KONNECTED_DOMAIN]
device_id = config_entry.data["id"]
sensors = [
KonnectedBinarySensor(device_id, pin_num, pin_data)
for pin_num, pin_data in data[CONF_DEVICES][device_id][
CONF_BINARY_SENSORS
].items()
]
async_add_entities(sensors)
class KonnectedBinarySensor(BinarySensorEntity):
"""Representation of a Konnected binary sensor."""
def __init__(self, device_id, zone_num, data):
"""Initialize the Konnected binary sensor."""
self._data = data
self._device_id = device_id
self._zone_num = zone_num
self._state = self._data.get(ATTR_STATE)
self._device_class = self._data.get(CONF_TYPE)
self._unique_id = f"{device_id}-{zone_num}"
self._name = self._data.get(CONF_NAME)
@property
def unique_id(self) -> str:
"""Return the unique id."""
return self._unique_id
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def is_on(self):
"""Return the state of the sensor."""
return self._state
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def device_class(self):
"""Return the device class."""
return self._device_class
@property
def device_info(self):
"""Return the device info."""
return {
"identifiers": {(KONNECTED_DOMAIN, self._device_id)},
}
async def async_added_to_hass(self):
"""Store entity_id and register state change callback."""
self._data[ATTR_ENTITY_ID] = self.entity_id
self.async_on_remove(
async_dispatcher_connect(
self.hass, f"konnected.{self.entity_id}.update", self.async_set_state
)
)
@callback
def async_set_state(self, state):
"""Update the sensor's state."""
self._state = state
self.async_write_ha_state()
|
import asyncio
import logging
import async_timeout
import voluptuous as vol
from homeassistant.components.notify import PLATFORM_SCHEMA, BaseNotificationService
from homeassistant.const import CONF_ACCESS_TOKEN, HTTP_OK
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
_RESOURCE = "https://api.flock.com/hooks/sendMessage/"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({vol.Required(CONF_ACCESS_TOKEN): cv.string})
async def async_get_service(hass, config, discovery_info=None):
"""Get the Flock notification service."""
access_token = config.get(CONF_ACCESS_TOKEN)
url = f"{_RESOURCE}{access_token}"
session = async_get_clientsession(hass)
return FlockNotificationService(url, session)
class FlockNotificationService(BaseNotificationService):
"""Implement the notification service for Flock."""
def __init__(self, url, session):
"""Initialize the Flock notification service."""
self._url = url
self._session = session
async def async_send_message(self, message, **kwargs):
"""Send the message to the user."""
payload = {"text": message}
_LOGGER.debug("Attempting to call Flock at %s", self._url)
try:
with async_timeout.timeout(10):
response = await self._session.post(self._url, json=payload)
result = await response.json()
if response.status != HTTP_OK or "error" in result:
_LOGGER.error(
"Flock service returned HTTP status %d, response %s",
response.status,
result,
)
except asyncio.TimeoutError:
_LOGGER.error("Timeout accessing Flock at %s", self._url)
|
from unittest import TestCase
from scattertext.AutoTermSelector import AutoTermSelector
from scattertext.test.test_TermDocMat import make_a_test_term_doc_matrix
class TestAutoTermSelector(TestCase):
def test_reduce_terms(self):
tdm = make_a_test_term_doc_matrix()
scores = tdm.get_term_freq_df().sum(axis=1) % 10
new_tdm = AutoTermSelector.reduce_terms(
tdm, scores, num_term_to_keep=10)
self.assertLessEqual(len(new_tdm.get_term_freq_df().index), 10)
self.assertEqual(len(tdm.get_term_freq_df().index), 58)
def test_get_selected_terms(self):
tdm = make_a_test_term_doc_matrix()
scores = tdm.get_term_freq_df().sum(axis=1) % 10
selected_terms = AutoTermSelector.get_selected_terms(tdm, scores, num_term_to_keep=10)
self.assertLessEqual(len(selected_terms), 10)
self.assertEqual(len(tdm.get_term_freq_df().index), 58)
|
from homeassistant.core import State
from tests.common import async_mock_service
async def test_reproducing_states(hass, caplog):
"""Test reproducing Remote states."""
hass.states.async_set("remote.entity_off", "off", {})
hass.states.async_set("remote.entity_on", "on", {})
turn_on_calls = async_mock_service(hass, "remote", "turn_on")
turn_off_calls = async_mock_service(hass, "remote", "turn_off")
# These calls should do nothing as entities already in desired state
await hass.helpers.state.async_reproduce_state(
[State("remote.entity_off", "off"), State("remote.entity_on", "on")],
)
assert len(turn_on_calls) == 0
assert len(turn_off_calls) == 0
# Test invalid state is handled
await hass.helpers.state.async_reproduce_state(
[State("remote.entity_off", "not_supported")]
)
assert "not_supported" in caplog.text
assert len(turn_on_calls) == 0
assert len(turn_off_calls) == 0
# Make sure correct services are called
await hass.helpers.state.async_reproduce_state(
[
State("remote.entity_on", "off"),
State("remote.entity_off", "on", {}),
# Should not raise
State("remote.non_existing", "on"),
],
)
assert len(turn_on_calls) == 1
assert turn_on_calls[0].domain == "remote"
assert turn_on_calls[0].data == {
"entity_id": "remote.entity_off",
}
assert len(turn_off_calls) == 1
assert turn_off_calls[0].domain == "remote"
assert turn_off_calls[0].data == {"entity_id": "remote.entity_on"}
|
import unittest
from absl import flags
import mock
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.providers.aws import aws_cluster_subnet_group
from perfkitbenchmarker.providers.aws import util
from tests import pkb_common_test_case
TEST_RUN_URI = 'fakeru'
CLUSTER_SUBNET_ID = 'fake_redshift_cluster_subnet_id'
AWS_ZONE_US_EAST_1A = 'us-east-1a'
FLAGS = flags.FLAGS
class RedshiftClusterSubnetGroupTestCase(
pkb_common_test_case.PkbCommonTestCase):
def setUp(self):
super(RedshiftClusterSubnetGroupTestCase, self).setUp()
FLAGS.zones = [AWS_ZONE_US_EAST_1A]
FLAGS.run_uri = TEST_RUN_URI
def testValidClusterParameterGroupCreation(self):
csg = aws_cluster_subnet_group.RedshiftClusterSubnetGroup(
list(util.AWS_PREFIX))
csg.subnet_id = CLUSTER_SUBNET_ID
self.assertEqual(csg.name, 'pkb-%s' % TEST_RUN_URI)
with mock.patch(
vm_util.__name__ + '.IssueCommand',
return_value=('out_', 'err_', 0)) as mock_issue:
csg._Create()
mock_issue.assert_called_once()
mock_issue.assert_called_with([
'aws', '--output', 'json', 'redshift', 'create-cluster-subnet-group',
'--cluster-subnet-group-name', 'pkb-%s' % TEST_RUN_URI,
'--description', 'Cluster Subnet Group for run uri %s' % TEST_RUN_URI,
'--subnet-ids', CLUSTER_SUBNET_ID])
def testValidClusterParameterGroupDeletion(self):
csg = aws_cluster_subnet_group.RedshiftClusterSubnetGroup(
list(util.AWS_PREFIX))
self.assertEqual(csg.name, 'pkb-%s' % TEST_RUN_URI)
with mock.patch(
vm_util.__name__ + '.IssueCommand',
return_value=('out_', 'err_', 0)) as mock_issue:
csg._Delete()
mock_issue.assert_called_once()
mock_issue.assert_called_with(
['aws', '--output', 'json', 'redshift', 'delete-cluster-subnet-group',
'--cluster-subnet-group-name', 'pkb-%s' % TEST_RUN_URI],
raise_on_failure=False)
if __name__ == '__main__':
unittest.main()
|
import logging
from scsgate.tasks import ToggleStatusTask
import voluptuous as vol
from homeassistant.components.light import PLATFORM_SCHEMA, LightEntity
from homeassistant.const import ATTR_ENTITY_ID, ATTR_STATE, CONF_DEVICES, CONF_NAME
import homeassistant.helpers.config_validation as cv
from . import CONF_SCS_ID, DOMAIN, SCSGATE_SCHEMA
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Required(CONF_DEVICES): cv.schema_with_slug_keys(SCSGATE_SCHEMA)}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the SCSGate switches."""
devices = config.get(CONF_DEVICES)
lights = []
logger = logging.getLogger(__name__)
scsgate = hass.data[DOMAIN]
if devices:
for entity_info in devices.values():
if entity_info[CONF_SCS_ID] in scsgate.devices:
continue
name = entity_info[CONF_NAME]
scs_id = entity_info[CONF_SCS_ID]
logger.info("Adding %s scsgate.light", name)
light = SCSGateLight(
name=name, scs_id=scs_id, logger=logger, scsgate=scsgate
)
lights.append(light)
add_entities(lights)
scsgate.add_devices_to_register(lights)
class SCSGateLight(LightEntity):
"""Representation of a SCSGate light."""
def __init__(self, scs_id, name, logger, scsgate):
"""Initialize the light."""
self._name = name
self._scs_id = scs_id
self._toggled = False
self._logger = logger
self._scsgate = scsgate
@property
def scs_id(self):
"""Return the SCS ID."""
return self._scs_id
@property
def should_poll(self):
"""No polling needed for a SCSGate light."""
return False
@property
def name(self):
"""Return the name of the device if any."""
return self._name
@property
def is_on(self):
"""Return true if light is on."""
return self._toggled
def turn_on(self, **kwargs):
"""Turn the device on."""
self._scsgate.append_task(ToggleStatusTask(target=self._scs_id, toggled=True))
self._toggled = True
self.schedule_update_ha_state()
def turn_off(self, **kwargs):
"""Turn the device off."""
self._scsgate.append_task(ToggleStatusTask(target=self._scs_id, toggled=False))
self._toggled = False
self.schedule_update_ha_state()
def process_event(self, message):
"""Handle a SCSGate message related with this light."""
if self._toggled == message.toggled:
self._logger.info(
"Light %s, ignoring message %s because state already active",
self._scs_id,
message,
)
# Nothing changed, ignoring
return
self._toggled = message.toggled
self.schedule_update_ha_state()
command = "off"
if self._toggled:
command = "on"
self.hass.bus.fire(
"button_pressed", {ATTR_ENTITY_ID: self._scs_id, ATTR_STATE: command}
)
|
import json
import asynctest
import mock
from pyramid import testing
from paasta_tools.api.views.resources import parse_filters
from paasta_tools.api.views.resources import resources_utilization
from paasta_tools.metrics import metastatus_lib
def test_parse_filters_empty():
filters = None
parsed = parse_filters(filters)
assert parsed == {}
def test_parse_filters_good():
filters = ["foo:bar,baz", "qux:zol"]
parsed = parse_filters(filters)
assert "foo" in parsed.keys()
assert "qux" in parsed.keys()
assert "bar" in parsed["foo"]
assert "baz" in parsed["foo"]
assert "zol" in parsed["qux"]
@mock.patch(
"paasta_tools.api.views.resources.metastatus_lib.get_resource_utilization_by_grouping",
autospec=True,
)
@mock.patch("paasta_tools.api.views.resources.get_mesos_master", autospec=True)
def test_resources_utilization_nothing_special(
mock_get_mesos_master, mock_get_resource_utilization_by_grouping
):
request = testing.DummyRequest()
request.swagger_data = {"groupings": None, "filter": None}
mock_mesos_state = mock.Mock()
mock_master = mock.Mock(
state=asynctest.CoroutineMock(return_value=mock_mesos_state)
)
mock_get_mesos_master.return_value = mock_master
mock_get_resource_utilization_by_grouping.return_value = {
frozenset([("superregion", "unknown")]): {
"total": metastatus_lib.ResourceInfo(cpus=10.0, mem=512.0, disk=100.0),
"free": metastatus_lib.ResourceInfo(cpus=8.0, mem=312.0, disk=20.0),
}
}
resp = resources_utilization(request)
body = json.loads(resp.body.decode("utf-8"))
assert resp.status_int == 200
assert len(body) == 1
assert set(body[0].keys()) == {"disk", "mem", "groupings", "cpus", "gpus"}
mock_mesos_state = {
"slaves": [
{
"id": "foo1",
"resources": {"disk": 100, "cpus": 10, "mem": 50},
"attributes": {"pool": "default", "region": "top"},
"reserved_resources": {},
},
{
"id": "bar1",
"resources": {"disk": 100, "cpus": 10, "mem": 50},
"attributes": {"pool": "default", "region": "bottom"},
"reserved_resources": {},
},
{
"id": "foo2",
"resources": {"disk": 100, "cpus": 10, "mem": 50},
"attributes": {"pool": "other", "region": "top"},
"reserved_resources": {},
},
{
"id": "bar2",
"resources": {"disk": 100, "cpus": 10, "mem": 50},
"attributes": {"pool": "other", "region": "bottom"},
"reserved_resources": {},
},
{
"id": "foo3",
"resources": {"disk": 100, "cpus": 10, "mem": 50},
"attributes": {"pool": "other", "region": "top"},
"reserved_resources": {},
},
{
"id": "bar2",
"resources": {"disk": 100, "cpus": 10, "mem": 50},
"attributes": {"pool": "other", "region": "bottom"},
"reserved_resources": {},
},
],
"frameworks": [
{
"tasks": [
{
"state": "TASK_RUNNING",
"resources": {"cpus": 1, "mem": 10, "disk": 10},
"slave_id": "foo1",
},
{
"state": "TASK_RUNNING",
"resources": {"cpus": 1, "mem": 10, "disk": 10},
"slave_id": "bar1",
},
]
}
],
}
@mock.patch("paasta_tools.api.views.resources.get_mesos_master", autospec=True)
def test_resources_utilization_with_grouping(mock_get_mesos_master):
request = testing.DummyRequest()
request.swagger_data = {"groupings": ["region", "pool"], "filter": None}
mock_master = mock.Mock(
state=asynctest.CoroutineMock(
func=asynctest.CoroutineMock(), # https://github.com/notion/a_sync/pull/40
return_value=mock_mesos_state,
)
)
mock_get_mesos_master.return_value = mock_master
resp = resources_utilization(request)
body = json.loads(resp.body.decode("utf-8"))
assert resp.status_int == 200
# 4 groupings, 2x2 attrs for 5 slaves
assert len(body) == 4
@mock.patch("paasta_tools.api.views.resources.get_mesos_master", autospec=True)
def test_resources_utilization_with_filter(mock_get_mesos_master):
request = testing.DummyRequest()
request.swagger_data = {
"groupings": ["region", "pool"],
"filter": ["region:top", "pool:default,other"],
}
mock_master = mock.Mock(
state=asynctest.CoroutineMock(
func=asynctest.CoroutineMock(), # https://github.com/notion/a_sync/pull/40
return_value=mock_mesos_state,
)
)
mock_get_mesos_master.return_value = mock_master
resp = resources_utilization(request)
body = json.loads(resp.body.decode("utf-8"))
assert resp.status_int == 200
assert len(body) == 2
request.swagger_data = {
"groupings": ["region", "pool"],
"filter": ["region:non-exist", "pool:default,other"],
}
resp = resources_utilization(request)
body = json.loads(resp.body.decode("utf-8"))
assert resp.status_int == 200
assert len(body) == 0
|
import asyncio
import collections
import logging
from typing import Any, Dict, List
import zigpy.exceptions
from homeassistant.helpers.entity_registry import async_entries_for_device
from homeassistant.helpers.typing import HomeAssistantType
from .helpers import LogMixin
from .typing import (
ZhaDeviceType,
ZhaGatewayType,
ZhaGroupType,
ZigpyEndpointType,
ZigpyGroupType,
)
_LOGGER = logging.getLogger(__name__)
GroupMember = collections.namedtuple("GroupMember", "ieee endpoint_id")
GroupEntityReference = collections.namedtuple(
"GroupEntityReference", "name original_name entity_id"
)
class ZHAGroupMember(LogMixin):
"""Composite object that represents a device endpoint in a Zigbee group."""
def __init__(
self, zha_group: ZhaGroupType, zha_device: ZhaDeviceType, endpoint_id: int
):
"""Initialize the group member."""
self._zha_group: ZhaGroupType = zha_group
self._zha_device: ZhaDeviceType = zha_device
self._endpoint_id: int = endpoint_id
@property
def group(self) -> ZhaGroupType:
"""Return the group this member belongs to."""
return self._zha_group
@property
def endpoint_id(self) -> int:
"""Return the endpoint id for this group member."""
return self._endpoint_id
@property
def endpoint(self) -> ZigpyEndpointType:
"""Return the endpoint for this group member."""
return self._zha_device.device.endpoints.get(self.endpoint_id)
@property
def device(self) -> ZhaDeviceType:
"""Return the zha device for this group member."""
return self._zha_device
@property
def member_info(self) -> Dict[str, Any]:
"""Get ZHA group info."""
member_info: Dict[str, Any] = {}
member_info["endpoint_id"] = self.endpoint_id
member_info["device"] = self.device.zha_device_info
member_info["entities"] = self.associated_entities
return member_info
@property
def associated_entities(self) -> List[GroupEntityReference]:
"""Return the list of entities that were derived from this endpoint."""
ha_entity_registry = self.device.gateway.ha_entity_registry
zha_device_registry = self.device.gateway.device_registry
return [
GroupEntityReference(
ha_entity_registry.async_get(entity_ref.reference_id).name,
ha_entity_registry.async_get(entity_ref.reference_id).original_name,
entity_ref.reference_id,
)._asdict()
for entity_ref in zha_device_registry.get(self.device.ieee)
if list(entity_ref.cluster_channels.values())[
0
].cluster.endpoint.endpoint_id
== self.endpoint_id
]
async def async_remove_from_group(self) -> None:
"""Remove the device endpoint from the provided zigbee group."""
try:
await self._zha_device.device.endpoints[
self._endpoint_id
].remove_from_group(self._zha_group.group_id)
except (zigpy.exceptions.ZigbeeException, asyncio.TimeoutError) as ex:
self.debug(
"Failed to remove endpoint: %s for device '%s' from group: 0x%04x ex: %s",
self._endpoint_id,
self._zha_device.ieee,
self._zha_group.group_id,
str(ex),
)
def log(self, level: int, msg: str, *args) -> None:
"""Log a message."""
msg = f"[%s](%s): {msg}"
args = (f"0x{self._zha_group.group_id:04x}", self.endpoint_id) + args
_LOGGER.log(level, msg, *args)
class ZHAGroup(LogMixin):
"""ZHA Zigbee group object."""
def __init__(
self,
hass: HomeAssistantType,
zha_gateway: ZhaGatewayType,
zigpy_group: ZigpyGroupType,
):
"""Initialize the group."""
self.hass: HomeAssistantType = hass
self._zigpy_group: ZigpyGroupType = zigpy_group
self._zha_gateway: ZhaGatewayType = zha_gateway
@property
def name(self) -> str:
"""Return group name."""
return self._zigpy_group.name
@property
def group_id(self) -> int:
"""Return group name."""
return self._zigpy_group.group_id
@property
def endpoint(self) -> ZigpyEndpointType:
"""Return the endpoint for this group."""
return self._zigpy_group.endpoint
@property
def members(self) -> List[ZHAGroupMember]:
"""Return the ZHA devices that are members of this group."""
return [
ZHAGroupMember(
self, self._zha_gateway.devices.get(member_ieee), endpoint_id
)
for (member_ieee, endpoint_id) in self._zigpy_group.members.keys()
if member_ieee in self._zha_gateway.devices
]
async def async_add_members(self, members: List[GroupMember]) -> None:
"""Add members to this group."""
if len(members) > 1:
tasks = []
for member in members:
tasks.append(
self._zha_gateway.devices[member.ieee].async_add_endpoint_to_group(
member.endpoint_id, self.group_id
)
)
await asyncio.gather(*tasks)
else:
await self._zha_gateway.devices[
members[0].ieee
].async_add_endpoint_to_group(members[0].endpoint_id, self.group_id)
async def async_remove_members(self, members: List[GroupMember]) -> None:
"""Remove members from this group."""
if len(members) > 1:
tasks = []
for member in members:
tasks.append(
self._zha_gateway.devices[
member.ieee
].async_remove_endpoint_from_group(
member.endpoint_id, self.group_id
)
)
await asyncio.gather(*tasks)
else:
await self._zha_gateway.devices[
members[0].ieee
].async_remove_endpoint_from_group(members[0].endpoint_id, self.group_id)
@property
def member_entity_ids(self) -> List[str]:
"""Return the ZHA entity ids for all entities for the members of this group."""
all_entity_ids: List[str] = []
for member in self.members:
entity_references = member.associated_entities
for entity_reference in entity_references:
all_entity_ids.append(entity_reference["entity_id"])
return all_entity_ids
def get_domain_entity_ids(self, domain) -> List[str]:
"""Return entity ids from the entity domain for this group."""
domain_entity_ids: List[str] = []
for member in self.members:
entities = async_entries_for_device(
self._zha_gateway.ha_entity_registry, member.device.device_id
)
domain_entity_ids.extend(
[entity.entity_id for entity in entities if entity.domain == domain]
)
return domain_entity_ids
@property
def group_info(self) -> Dict[str, Any]:
"""Get ZHA group info."""
group_info: Dict[str, Any] = {}
group_info["group_id"] = self.group_id
group_info["name"] = self.name
group_info["members"] = [member.member_info for member in self.members]
return group_info
def log(self, level: int, msg: str, *args):
"""Log a message."""
msg = f"[%s](%s): {msg}"
args = (self.name, self.group_id) + args
_LOGGER.log(level, msg, *args)
|
from datetime import datetime, timedelta
import logging
import googlemaps
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
ATTR_ATTRIBUTION,
ATTR_LATITUDE,
ATTR_LONGITUDE,
CONF_API_KEY,
CONF_MODE,
CONF_NAME,
EVENT_HOMEASSISTANT_START,
TIME_MINUTES,
)
from homeassistant.helpers import location
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
import homeassistant.util.dt as dt_util
_LOGGER = logging.getLogger(__name__)
ATTRIBUTION = "Powered by Google"
CONF_DESTINATION = "destination"
CONF_OPTIONS = "options"
CONF_ORIGIN = "origin"
CONF_TRAVEL_MODE = "travel_mode"
DEFAULT_NAME = "Google Travel Time"
SCAN_INTERVAL = timedelta(minutes=5)
ALL_LANGUAGES = [
"ar",
"bg",
"bn",
"ca",
"cs",
"da",
"de",
"el",
"en",
"es",
"eu",
"fa",
"fi",
"fr",
"gl",
"gu",
"hi",
"hr",
"hu",
"id",
"it",
"iw",
"ja",
"kn",
"ko",
"lt",
"lv",
"ml",
"mr",
"nl",
"no",
"pl",
"pt",
"pt-BR",
"pt-PT",
"ro",
"ru",
"sk",
"sl",
"sr",
"sv",
"ta",
"te",
"th",
"tl",
"tr",
"uk",
"vi",
"zh-CN",
"zh-TW",
]
AVOID = ["tolls", "highways", "ferries", "indoor"]
TRANSIT_PREFS = ["less_walking", "fewer_transfers"]
TRANSPORT_TYPE = ["bus", "subway", "train", "tram", "rail"]
TRAVEL_MODE = ["driving", "walking", "bicycling", "transit"]
TRAVEL_MODEL = ["best_guess", "pessimistic", "optimistic"]
UNITS = ["metric", "imperial"]
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_API_KEY): cv.string,
vol.Required(CONF_DESTINATION): cv.string,
vol.Required(CONF_ORIGIN): cv.string,
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_TRAVEL_MODE): vol.In(TRAVEL_MODE),
vol.Optional(CONF_OPTIONS, default={CONF_MODE: "driving"}): vol.All(
dict,
vol.Schema(
{
vol.Optional(CONF_MODE, default="driving"): vol.In(TRAVEL_MODE),
vol.Optional("language"): vol.In(ALL_LANGUAGES),
vol.Optional("avoid"): vol.In(AVOID),
vol.Optional("units"): vol.In(UNITS),
vol.Exclusive("arrival_time", "time"): cv.string,
vol.Exclusive("departure_time", "time"): cv.string,
vol.Optional("traffic_model"): vol.In(TRAVEL_MODEL),
vol.Optional("transit_mode"): vol.In(TRANSPORT_TYPE),
vol.Optional("transit_routing_preference"): vol.In(TRANSIT_PREFS),
}
),
),
}
)
TRACKABLE_DOMAINS = ["device_tracker", "sensor", "zone", "person"]
DATA_KEY = "google_travel_time"
def convert_time_to_utc(timestr):
"""Take a string like 08:00:00 and convert it to a unix timestamp."""
combined = datetime.combine(
dt_util.start_of_local_day(), dt_util.parse_time(timestr)
)
if combined < datetime.now():
combined = combined + timedelta(days=1)
return dt_util.as_timestamp(combined)
def setup_platform(hass, config, add_entities_callback, discovery_info=None):
"""Set up the Google travel time platform."""
def run_setup(event):
"""
Delay the setup until Home Assistant is fully initialized.
This allows any entities to be created already
"""
hass.data.setdefault(DATA_KEY, [])
options = config.get(CONF_OPTIONS)
if options.get("units") is None:
options["units"] = hass.config.units.name
travel_mode = config.get(CONF_TRAVEL_MODE)
mode = options.get(CONF_MODE)
if travel_mode is not None:
wstr = (
"Google Travel Time: travel_mode is deprecated, please "
"add mode to the options dictionary instead!"
)
_LOGGER.warning(wstr)
if mode is None:
options[CONF_MODE] = travel_mode
titled_mode = options.get(CONF_MODE).title()
formatted_name = f"{DEFAULT_NAME} - {titled_mode}"
name = config.get(CONF_NAME, formatted_name)
api_key = config.get(CONF_API_KEY)
origin = config.get(CONF_ORIGIN)
destination = config.get(CONF_DESTINATION)
sensor = GoogleTravelTimeSensor(
hass, name, api_key, origin, destination, options
)
hass.data[DATA_KEY].append(sensor)
if sensor.valid_api_connection:
add_entities_callback([sensor])
# Wait until start event is sent to load this component.
hass.bus.listen_once(EVENT_HOMEASSISTANT_START, run_setup)
class GoogleTravelTimeSensor(Entity):
"""Representation of a Google travel time sensor."""
def __init__(self, hass, name, api_key, origin, destination, options):
"""Initialize the sensor."""
self._hass = hass
self._name = name
self._options = options
self._unit_of_measurement = TIME_MINUTES
self._matrix = None
self.valid_api_connection = True
# Check if location is a trackable entity
if origin.split(".", 1)[0] in TRACKABLE_DOMAINS:
self._origin_entity_id = origin
else:
self._origin = origin
if destination.split(".", 1)[0] in TRACKABLE_DOMAINS:
self._destination_entity_id = destination
else:
self._destination = destination
self._client = googlemaps.Client(api_key, timeout=10)
try:
self.update()
except googlemaps.exceptions.ApiError as exp:
_LOGGER.error(exp)
self.valid_api_connection = False
return
@property
def state(self):
"""Return the state of the sensor."""
if self._matrix is None:
return None
_data = self._matrix["rows"][0]["elements"][0]
if "duration_in_traffic" in _data:
return round(_data["duration_in_traffic"]["value"] / 60)
if "duration" in _data:
return round(_data["duration"]["value"] / 60)
return None
@property
def name(self):
"""Get the name of the sensor."""
return self._name
@property
def device_state_attributes(self):
"""Return the state attributes."""
if self._matrix is None:
return None
res = self._matrix.copy()
res.update(self._options)
del res["rows"]
_data = self._matrix["rows"][0]["elements"][0]
if "duration_in_traffic" in _data:
res["duration_in_traffic"] = _data["duration_in_traffic"]["text"]
if "duration" in _data:
res["duration"] = _data["duration"]["text"]
if "distance" in _data:
res["distance"] = _data["distance"]["text"]
res["origin"] = self._origin
res["destination"] = self._destination
res[ATTR_ATTRIBUTION] = ATTRIBUTION
return res
@property
def unit_of_measurement(self):
"""Return the unit this state is expressed in."""
return self._unit_of_measurement
def update(self):
"""Get the latest data from Google."""
options_copy = self._options.copy()
dtime = options_copy.get("departure_time")
atime = options_copy.get("arrival_time")
if dtime is not None and ":" in dtime:
options_copy["departure_time"] = convert_time_to_utc(dtime)
elif dtime is not None:
options_copy["departure_time"] = dtime
elif atime is None:
options_copy["departure_time"] = "now"
if atime is not None and ":" in atime:
options_copy["arrival_time"] = convert_time_to_utc(atime)
elif atime is not None:
options_copy["arrival_time"] = atime
# Convert device_trackers to google friendly location
if hasattr(self, "_origin_entity_id"):
self._origin = self._get_location_from_entity(self._origin_entity_id)
if hasattr(self, "_destination_entity_id"):
self._destination = self._get_location_from_entity(
self._destination_entity_id
)
self._destination = self._resolve_zone(self._destination)
self._origin = self._resolve_zone(self._origin)
if self._destination is not None and self._origin is not None:
self._matrix = self._client.distance_matrix(
self._origin, self._destination, **options_copy
)
def _get_location_from_entity(self, entity_id):
"""Get the location from the entity state or attributes."""
entity = self._hass.states.get(entity_id)
if entity is None:
_LOGGER.error("Unable to find entity %s", entity_id)
self.valid_api_connection = False
return None
# Check if the entity has location attributes
if location.has_location(entity):
return self._get_location_from_attributes(entity)
# Check if device is in a zone
zone_entity = self._hass.states.get("zone.%s" % entity.state)
if location.has_location(zone_entity):
_LOGGER.debug(
"%s is in %s, getting zone location", entity_id, zone_entity.entity_id
)
return self._get_location_from_attributes(zone_entity)
# If zone was not found in state then use the state as the location
if entity_id.startswith("sensor."):
return entity.state
# When everything fails just return nothing
return None
@staticmethod
def _get_location_from_attributes(entity):
"""Get the lat/long string from an entities attributes."""
attr = entity.attributes
return f"{attr.get(ATTR_LATITUDE)},{attr.get(ATTR_LONGITUDE)}"
def _resolve_zone(self, friendly_name):
entities = self._hass.states.all()
for entity in entities:
if entity.domain == "zone" and entity.name == friendly_name:
return self._get_location_from_attributes(entity)
return friendly_name
|
def monkeypatch_method(cls):
'''Guido's monkeypatch decorator.'''
def decorator(func):
setattr(cls, func.__name__, func)
return func
return decorator
def monkeypatch_class(name, bases, namespace):
'''Guido's monkeypatch metaclass.'''
assert len(bases) == 1, "Exactly one base class required"
base = bases[0]
for name, value in namespace.iteritems():
if name != "__metaclass__" and name != "__doc__":
setattr(base, name, value)
return base
class Config(object):
def __init__(self, config):
self._config = config
def __repr__(self):
return repr(self._config)
def __getattr__(self, key):
if key not in self._config:
return None
return self._config[key]
def __getitem__(self, key):
return getattr(self, key)
|
import re
from qutebrowser.utils import log, utils
class ShellLexer:
"""A lexical analyzer class for simple shell-like syntaxes.
Based on Python's shlex, but cleaned up, removed some features, and added
some features useful for qutebrowser.
Attributes:
FIXME
"""
def __init__(self, s):
self.string = s
self.whitespace = ' \t\r'
self.quotes = '\'"'
self.escape = '\\'
self.escapedquotes = '"'
self.keep = False
self.quoted = False
self.escapedstate = ' '
self.token = ''
self.state = ' '
def reset(self):
"""Reset the state machine state to the defaults."""
self.quoted = False
self.escapedstate = ' '
self.token = ''
self.state = ' '
def __iter__(self): # noqa: C901 pragma: no mccabe
"""Read a raw token from the input stream."""
self.reset()
for nextchar in self.string:
if self.state == ' ':
if self.keep:
self.token += nextchar
if nextchar in self.whitespace:
if self.token or self.quoted:
yield self.token
self.reset()
elif nextchar in self.escape:
self.escapedstate = 'a'
self.state = nextchar
elif nextchar in self.quotes:
self.state = nextchar
else:
self.token = nextchar
self.state = 'a'
elif self.state in self.quotes:
self.quoted = True
if nextchar == self.state:
if self.keep:
self.token += nextchar
self.state = 'a'
elif (nextchar in self.escape and
self.state in self.escapedquotes):
if self.keep:
self.token += nextchar
self.escapedstate = self.state
self.state = nextchar
else:
self.token += nextchar
elif self.state in self.escape:
# In posix shells, only the quote itself or the escape
# character may be escaped within quotes.
if (self.escapedstate in self.quotes and
nextchar != self.state and
nextchar != self.escapedstate and not self.keep):
self.token += self.state
self.token += nextchar
self.state = self.escapedstate
elif self.state == 'a':
if nextchar in self.whitespace:
self.state = ' '
assert self.token or self.quoted
yield self.token
self.reset()
if self.keep:
yield nextchar
elif nextchar in self.quotes:
if self.keep:
self.token += nextchar
self.state = nextchar
elif nextchar in self.escape:
if self.keep:
self.token += nextchar
self.escapedstate = 'a'
self.state = nextchar
else:
self.token += nextchar
else:
raise utils.Unreachable(
"Invalid state {!r}!".format(self.state))
if self.state in self.escape and not self.keep:
self.token += self.state
if self.token or self.quoted:
yield self.token
def split(s, keep=False):
"""Split a string via ShellLexer.
Args:
keep: Whether to keep special chars in the split output.
"""
lexer = ShellLexer(s)
lexer.keep = keep
tokens = list(lexer)
if not tokens:
return []
out = []
spaces = ""
log.shlexer.vdebug( # type: ignore[attr-defined]
"{!r} -> {!r}".format(s, tokens))
for t in tokens:
if t.isspace():
spaces += t
else:
out.append(spaces + t)
spaces = ""
if spaces:
out.append(spaces)
return out
def _combine_ws(parts, whitespace):
"""Combine whitespace in a list with the element following it.
Args:
parts: A list of strings.
whitespace: A string containing what's considered whitespace.
Return:
The modified list.
"""
out = []
ws = ''
for part in parts:
if not part:
continue
elif part in whitespace:
ws += part
else:
out.append(ws + part)
ws = ''
if ws:
out.append(ws)
return out
def simple_split(s, keep=False, maxsplit=None):
"""Split a string on whitespace, optionally keeping the whitespace.
Args:
s: The string to split.
keep: Whether to keep whitespace.
maxsplit: The maximum count of splits.
Return:
A list of split strings.
"""
whitespace = '\n\t '
if maxsplit == 0:
# re.split with maxsplit=0 splits everything, while str.split splits
# nothing (which is the behavior we want).
if keep:
return [s]
else:
return [s.strip(whitespace)]
elif maxsplit is None:
maxsplit = 0
if keep:
pattern = '([' + whitespace + '])'
parts = re.split(pattern, s, maxsplit)
return _combine_ws(parts, whitespace)
else:
pattern = '[' + whitespace + ']'
parts = re.split(pattern, s, maxsplit)
parts[-1] = parts[-1].rstrip()
return [p for p in parts if p]
|
import sys
from setuptools import setup
from setuptools.command.test import test as TestCommand
about = {}
with open("tmuxp/__about__.py") as fp:
exec(fp.read(), about)
with open('requirements/base.txt') as f:
install_reqs = [line for line in f.read().split('\n') if line]
with open('requirements/test.txt') as f:
tests_reqs = [line for line in f.read().split('\n') if line]
if sys.version_info[0] > 2:
readme = open('README.rst', encoding='utf-8').read()
else:
readme = open('README.rst').read()
history = open('CHANGES').read().replace('.. :changelog:', '')
class PyTest(TestCommand):
user_options = [('pytest-args=', 'a', "Arguments to pass to py.test")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args = []
def run_tests(self):
import pytest
errno = pytest.main(self.pytest_args)
sys.exit(errno)
setup(
name=about['__title__'],
version=about['__version__'],
url=about['__github__'],
download_url=about['__pypi__'],
license=about['__license__'],
author=about['__author__'],
author_email=about['__email__'],
description=about['__description__'],
long_description=readme,
packages=['tmuxp'],
include_package_data=True,
install_requires=install_reqs,
tests_require=tests_reqs,
cmdclass={'test': PyTest},
zip_safe=False,
keywords=about['__title__'],
entry_points=dict(console_scripts=['tmuxp=tmuxp:cli.cli']),
classifiers=[
'Development Status :: 5 - Production/Stable',
"License :: OSI Approved :: MIT License",
"Operating System :: POSIX",
"Operating System :: MacOS :: MacOS X",
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: Implementation :: PyPy',
"Topic :: Utilities",
"Topic :: System :: Shells",
],
)
|
__docformat__ = "restructuredtext en"
__metaclass__ = type
import os.path as osp
import os
import sys
import tempfile
import codecs
import errno
def escape(value):
"""Make <value> usable in a dot file."""
lines = [line.replace('"', '\\"') for line in value.split('\n')]
data = '\\l'.join(lines)
return '\\n' + data
def target_info_from_filename(filename):
"""Transforms /some/path/foo.png into ('/some/path', 'foo.png', 'png')."""
basename = osp.basename(filename)
storedir = osp.dirname(osp.abspath(filename))
target = filename.split('.')[-1]
return storedir, basename, target
class DotBackend:
"""Dot File backend."""
def __init__(self, graphname, rankdir=None, size=None, ratio=None,
charset='utf-8', renderer='dot', additionnal_param={}):
self.graphname = graphname
self.renderer = renderer
self.lines = []
self._source = None
self.emit("digraph %s {" % normalize_node_id(graphname))
if rankdir:
self.emit('rankdir=%s' % rankdir)
if ratio:
self.emit('ratio=%s' % ratio)
if size:
self.emit('size="%s"' % size)
if charset:
assert charset.lower() in ('utf-8', 'iso-8859-1', 'latin1'), \
'unsupported charset %s' % charset
self.emit('charset="%s"' % charset)
for param in sorted(additionnal_param.items()):
self.emit('='.join(param))
def get_source(self):
"""returns self._source"""
if self._source is None:
self.emit("}\n")
self._source = '\n'.join(self.lines)
del self.lines
return self._source
source = property(get_source)
def generate(self, outputfile=None, dotfile=None, mapfile=None):
"""Generates a graph file.
:param outputfile: filename and path [defaults to graphname.png]
:param dotfile: filename and path [defaults to graphname.dot]
:rtype: str
:return: a path to the generated file
"""
import subprocess # introduced in py 2.4
name = self.graphname
if not dotfile:
# if 'outputfile' is a dot file use it as 'dotfile'
if outputfile and outputfile.endswith(".dot"):
dotfile = outputfile
else:
dotfile = '%s.dot' % name
if outputfile is not None:
storedir, basename, target = target_info_from_filename(outputfile)
if target != "dot":
pdot, dot_sourcepath = tempfile.mkstemp(".dot", name)
os.close(pdot)
else:
dot_sourcepath = osp.join(storedir, dotfile)
else:
target = 'png'
pdot, dot_sourcepath = tempfile.mkstemp(".dot", name)
ppng, outputfile = tempfile.mkstemp(".png", name)
os.close(pdot)
os.close(ppng)
pdot = codecs.open(dot_sourcepath, 'w', encoding='utf8')
pdot.write(self.source)
pdot.close()
if target != 'dot':
if sys.platform == 'win32':
use_shell = True
else:
use_shell = False
try:
if mapfile:
subprocess.call([self.renderer, '-Tcmapx', '-o', mapfile, '-T', target, dot_sourcepath, '-o', outputfile],
shell=use_shell)
else:
subprocess.call([self.renderer, '-T', target,
dot_sourcepath, '-o', outputfile],
shell=use_shell)
except OSError as e:
if e.errno == errno.ENOENT:
e.strerror = 'File not found: {0}'.format(self.renderer)
raise
os.unlink(dot_sourcepath)
return outputfile
def emit(self, line):
"""Adds <line> to final output."""
self.lines.append(line)
def emit_edge(self, name1, name2, **props):
"""emit an edge from <name1> to <name2>.
edge properties: see http://www.graphviz.org/doc/info/attrs.html
"""
attrs = ['%s="%s"' % (prop, value) for prop, value in props.items()]
n_from, n_to = normalize_node_id(name1), normalize_node_id(name2)
self.emit('%s -> %s [%s];' % (n_from, n_to, ', '.join(sorted(attrs))) )
def emit_node(self, name, **props):
"""emit a node with given properties.
node properties: see http://www.graphviz.org/doc/info/attrs.html
"""
attrs = ['%s="%s"' % (prop, value) for prop, value in props.items()]
self.emit('%s [%s];' % (normalize_node_id(name), ', '.join(sorted(attrs))))
def normalize_node_id(nid):
"""Returns a suitable DOT node id for `nid`."""
return '"%s"' % nid
class GraphGenerator:
def __init__(self, backend):
# the backend is responsible to output the graph in a particular format
self.backend = backend
# XXX doesn't like space in outpufile / mapfile
def generate(self, visitor, propshdlr, outputfile=None, mapfile=None):
# the visitor
# the property handler is used to get node and edge properties
# according to the graph and to the backend
self.propshdlr = propshdlr
for nodeid, node in visitor.nodes():
props = propshdlr.node_properties(node)
self.backend.emit_node(nodeid, **props)
for subjnode, objnode, edge in visitor.edges():
props = propshdlr.edge_properties(edge, subjnode, objnode)
self.backend.emit_edge(subjnode, objnode, **props)
return self.backend.generate(outputfile=outputfile, mapfile=mapfile)
class UnorderableGraph(Exception):
pass
def ordered_nodes(graph):
"""takes a dependency graph dict as arguments and return an ordered tuple of
nodes starting with nodes without dependencies and up to the outermost node.
If there is some cycle in the graph, :exc:`UnorderableGraph` will be raised.
Also the given graph dict will be emptied.
"""
# check graph consistency
cycles = get_cycles(graph)
if cycles:
cycles = '\n'.join([' -> '.join(cycle) for cycle in cycles])
raise UnorderableGraph('cycles in graph: %s' % cycles)
vertices = set(graph)
to_vertices = set()
for edges in graph.values():
to_vertices |= set(edges)
missing_vertices = to_vertices - vertices
if missing_vertices:
raise UnorderableGraph('missing vertices: %s' % ', '.join(missing_vertices))
# order vertices
order = []
order_set = set()
old_len = None
while graph:
if old_len == len(graph):
raise UnorderableGraph('unknown problem with %s' % graph)
old_len = len(graph)
deps_ok = []
for node, node_deps in graph.items():
for dep in node_deps:
if dep not in order_set:
break
else:
deps_ok.append(node)
order.append(deps_ok)
order_set |= set(deps_ok)
for node in deps_ok:
del graph[node]
result = []
for grp in reversed(order):
result.extend(sorted(grp))
return tuple(result)
def get_cycles(graph_dict, vertices=None):
'''given a dictionary representing an ordered graph (i.e. key are vertices
and values is a list of destination vertices representing edges), return a
list of detected cycles
'''
if not graph_dict:
return ()
result = []
if vertices is None:
vertices = graph_dict.keys()
for vertice in vertices:
_get_cycles(graph_dict, [], set(), result, vertice)
return result
def _get_cycles(graph_dict, path, visited, result, vertice):
"""recursive function doing the real work for get_cycles"""
if vertice in path:
cycle = [vertice]
for node in path[::-1]:
if node == vertice:
break
cycle.insert(0, node)
# make a canonical representation
start_from = min(cycle)
index = cycle.index(start_from)
cycle = cycle[index:] + cycle[0:index]
# append it to result if not already in
if not cycle in result:
result.append(cycle)
return
path.append(vertice)
try:
for node in graph_dict[vertice]:
# don't check already visited nodes again
if node not in visited:
_get_cycles(graph_dict, path, visited, result, node)
visited.add(node)
except KeyError:
pass
path.pop()
def has_path(graph_dict, fromnode, tonode, path=None):
"""generic function taking a simple graph definition as a dictionary, with
node has key associated to a list of nodes directly reachable from it.
Return None if no path exists to go from `fromnode` to `tonode`, else the
first path found (as a list including the destination node at last)
"""
if path is None:
path = []
elif fromnode in path:
return None
path.append(fromnode)
for destnode in graph_dict[fromnode]:
if destnode == tonode or has_path(graph_dict, destnode, tonode, path):
return path[1:] + [tonode]
path.pop()
return None
|
from email import message_from_string
import importlib
import pkg_resources
import sys
assert sys.version_info > (3, 5), 'Python 3 required to build docs'
def try_import(mod_name):
"""Attempt importing module and suppress failure of doing this."""
try:
return importlib.import_module(mod_name)
except ImportError:
pass
def get_supported_pythons(classifiers):
"""Return min and max supported Python version from meta as tuples."""
PY_VER_CLASSIFIER = 'Programming Language :: Python :: '
vers = filter(lambda c: c.startswith(PY_VER_CLASSIFIER), classifiers)
vers = map(lambda c: c[len(PY_VER_CLASSIFIER):], vers)
vers = filter(lambda c: c[0].isdigit() and '.' in c, vers)
vers = map(lambda c: tuple(c.split('.')), vers)
vers = sorted(vers)
del vers[1:-1]
return vers
custom_sphinx_theme = try_import('alabaster')
prj_dist = pkg_resources.get_distribution('cherrypy')
prj_pkg_info = prj_dist.get_metadata(prj_dist.PKG_INFO)
prj_meta = message_from_string(prj_pkg_info)
prj_author = prj_meta['Author']
prj_license = prj_meta['License']
prj_description = prj_meta['Description']
prj_py_ver_range = get_supported_pythons(prj_meta.get_all('Classifier'))
prj_py_min_supported, prj_py_max_supported = map(
lambda v: '.'.join(v), prj_py_ver_range
)
project = prj_dist.project_name
github_url = 'https://github.com'
github_repo_org = project.lower()
github_repo_name = project.lower()
github_repo_slug = f'{github_repo_org}/{github_repo_name}'
github_repo_url = f'{github_url}/{github_repo_slug}'
cr_github_repo_url = f'{github_url}/{github_repo_org}/cheroot'
github_sponsors_url = f'{github_url}/sponsors'
rst_epilog = f"""
.. |project| replace:: {project}
.. |min_py_supported| replace:: {prj_py_min_supported}
.. |max_py_supported| replace:: {prj_py_max_supported}
"""
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.append(os.path.abspath('.'))
# -- General configuration -----------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.extlinks',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon',
'rst.linker',
'jaraco.packaging.sphinx',
]
extlinks = {
'issue': (f'{github_repo_url}/issues/%s', '#'),
'pr': (f'{github_repo_url}/pull/%s', 'PR #'),
'commit': (f'{github_repo_url}/commit/%s', ''),
'cr-issue': (f'{cr_github_repo_url}/issues/%s', 'Cheroot #'),
'cr-pr': (f'{cr_github_repo_url}/pull/%s', 'Cheroot PR #'),
'gh': (f'{github_url}/%s', 'GitHub: '),
'user': (f'{github_sponsors_url}/%s', '@'),
}
intersphinx_mapping = {
'python': ('https://docs.python.org/3', None),
'cheroot': ('https://cheroot.cherrypy.org/en/latest/', None),
'pytest-docs': ('https://docs.pytest.org/en/latest/', None),
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output ---------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = getattr(custom_sphinx_theme, '__name__', 'default')
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {
# "relbarbgcolor": "#880000",
# "relbartextcolor": "white",
# "relbarlinkcolor": "#FFEEEE",
# "sidebarbgcolor": "#880000",
# "sidebartextcolor": "white",
# "sidebarlinkcolor": "#FFEEEE",
# "headbgcolor": "#FFF8FB",
# "headtextcolor": "black",
# "headlinkcolor": "#660000",
# "footerbgcolor": "#880000",
# "footertextcolor": "white",
# "codebgcolor": "#FFEEEE",
# }
html_theme_options = {
'logo': 'images/cherrypy_logo_big.png',
'github_user': project.lower(),
'github_repo': project.lower(),
'github_button': True,
'github_banner': True,
'github_type': 'star',
'github_count': True,
'travis_button': True,
'codecov_button': True,
# 'analytics_id': ...,
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# html_style = 'cpdocmain.css'
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'index': [
'about.html', 'searchbox.html', 'navigation.html', 'python_2_eol.html',
],
'**': [
'about.html', 'searchbox.html', 'navigation.html', 'python_2_eol.html',
],
}
# Output file base name for HTML help builder.
htmlhelp_basename = 'CherryPydoc'
# -- Options for LaTeX output --------------------------------------------
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author,
# documentclass [howto/manual]).
latex_documents = [
(
'index',
'CherryPy.tex',
'CherryPy Documentation',
'CherryPy Team',
'manual',
),
]
def mock_pywin32():
"""Mock pywin32 module.
Resulting in Linux hosts, including ReadTheDocs,
and other environments that don't have pywin32 can generate the docs
properly including the PDF version.
See:
http://read-the-docs.readthedocs.org/en/latest/faq.html#i-get-import-errors-on-libraries-that-depend-on-c-modules
"""
if try_import('win32api'):
return
from unittest import mock
MOCK_MODULES = [
'win32api', 'win32con', 'win32event', 'win32service',
'win32serviceutil',
]
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = mock.MagicMock()
mock_pywin32()
link_files = {
'../CHANGES.rst': dict(
using=dict(
GH='https://github.com',
),
replace=[
dict(
pattern=r'^(?m)((?P<scm_version>v?\d+(\.\d+){1,2}))\n[-=]+\n',
with_scm='{text}\n{rev[timestamp]:%d %b %Y}\n',
),
],
),
}
# Ref: https://github.com/python-attrs/attrs/pull/571/files\
# #diff-85987f48f1258d9ee486e3191495582dR82
default_role = 'any'
|
import asyncio
import logging
import aiohttp
import async_timeout
import pysensibo
import voluptuous as vol
from homeassistant.components.climate import PLATFORM_SCHEMA, ClimateEntity
from homeassistant.components.climate.const import (
HVAC_MODE_COOL,
HVAC_MODE_DRY,
HVAC_MODE_FAN_ONLY,
HVAC_MODE_HEAT,
HVAC_MODE_HEAT_COOL,
HVAC_MODE_OFF,
SUPPORT_FAN_MODE,
SUPPORT_SWING_MODE,
SUPPORT_TARGET_TEMPERATURE,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_STATE,
ATTR_TEMPERATURE,
CONF_API_KEY,
CONF_ID,
STATE_ON,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
)
from homeassistant.exceptions import PlatformNotReady
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.util.temperature import convert as convert_temperature
from .const import DOMAIN as SENSIBO_DOMAIN
_LOGGER = logging.getLogger(__name__)
ALL = ["all"]
TIMEOUT = 10
SERVICE_ASSUME_STATE = "assume_state"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_API_KEY): cv.string,
vol.Optional(CONF_ID, default=ALL): vol.All(cv.ensure_list, [cv.string]),
}
)
ASSUME_STATE_SCHEMA = vol.Schema(
{vol.Optional(ATTR_ENTITY_ID): cv.entity_ids, vol.Required(ATTR_STATE): cv.string}
)
_FETCH_FIELDS = ",".join(
[
"room{name}",
"measurements",
"remoteCapabilities",
"acState",
"connectionStatus{isAlive}",
"temperatureUnit",
]
)
_INITIAL_FETCH_FIELDS = f"id,{_FETCH_FIELDS}"
FIELD_TO_FLAG = {
"fanLevel": SUPPORT_FAN_MODE,
"swing": SUPPORT_SWING_MODE,
"targetTemperature": SUPPORT_TARGET_TEMPERATURE,
}
SENSIBO_TO_HA = {
"cool": HVAC_MODE_COOL,
"heat": HVAC_MODE_HEAT,
"fan": HVAC_MODE_FAN_ONLY,
"auto": HVAC_MODE_HEAT_COOL,
"dry": HVAC_MODE_DRY,
}
HA_TO_SENSIBO = {value: key for key, value in SENSIBO_TO_HA.items()}
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up Sensibo devices."""
client = pysensibo.SensiboClient(
config[CONF_API_KEY], session=async_get_clientsession(hass), timeout=TIMEOUT
)
devices = []
try:
for dev in await client.async_get_devices(_INITIAL_FETCH_FIELDS):
if config[CONF_ID] == ALL or dev["id"] in config[CONF_ID]:
devices.append(
SensiboClimate(client, dev, hass.config.units.temperature_unit)
)
except (
aiohttp.client_exceptions.ClientConnectorError,
asyncio.TimeoutError,
pysensibo.SensiboError,
) as err:
_LOGGER.exception("Failed to connect to Sensibo servers")
raise PlatformNotReady from err
if not devices:
return
async_add_entities(devices)
async def async_assume_state(service):
"""Set state according to external service call.."""
entity_ids = service.data.get(ATTR_ENTITY_ID)
if entity_ids:
target_climate = [
device for device in devices if device.entity_id in entity_ids
]
else:
target_climate = devices
update_tasks = []
for climate in target_climate:
await climate.async_assume_state(service.data.get(ATTR_STATE))
update_tasks.append(climate.async_update_ha_state(True))
if update_tasks:
await asyncio.wait(update_tasks)
hass.services.async_register(
SENSIBO_DOMAIN,
SERVICE_ASSUME_STATE,
async_assume_state,
schema=ASSUME_STATE_SCHEMA,
)
class SensiboClimate(ClimateEntity):
"""Representation of a Sensibo device."""
def __init__(self, client, data, units):
"""Build SensiboClimate.
client: aiohttp session.
data: initially-fetched data.
"""
self._client = client
self._id = data["id"]
self._external_state = None
self._units = units
self._available = False
self._do_update(data)
@property
def supported_features(self):
"""Return the list of supported features."""
return self._supported_features
def _do_update(self, data):
self._name = data["room"]["name"]
self._measurements = data["measurements"]
self._ac_states = data["acState"]
self._available = data["connectionStatus"]["isAlive"]
capabilities = data["remoteCapabilities"]
self._operations = [SENSIBO_TO_HA[mode] for mode in capabilities["modes"]]
self._operations.append(HVAC_MODE_OFF)
self._current_capabilities = capabilities["modes"][self._ac_states["mode"]]
temperature_unit_key = data.get("temperatureUnit") or self._ac_states.get(
"temperatureUnit"
)
if temperature_unit_key:
self._temperature_unit = (
TEMP_CELSIUS if temperature_unit_key == "C" else TEMP_FAHRENHEIT
)
self._temperatures_list = (
self._current_capabilities["temperatures"]
.get(temperature_unit_key, {})
.get("values", [])
)
else:
self._temperature_unit = self._units
self._temperatures_list = []
self._supported_features = 0
for key in self._ac_states:
if key in FIELD_TO_FLAG:
self._supported_features |= FIELD_TO_FLAG[key]
@property
def state(self):
"""Return the current state."""
return self._external_state or super().state
@property
def device_state_attributes(self):
"""Return the state attributes."""
return {"battery": self.current_battery}
@property
def temperature_unit(self):
"""Return the unit of measurement which this thermostat uses."""
return self._temperature_unit
@property
def available(self):
"""Return True if entity is available."""
return self._available
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self._ac_states.get("targetTemperature")
@property
def target_temperature_step(self):
"""Return the supported step of target temperature."""
if self.temperature_unit == self.hass.config.units.temperature_unit:
# We are working in same units as the a/c unit. Use whole degrees
# like the API supports.
return 1
# Unit conversion is going on. No point to stick to specific steps.
return None
@property
def hvac_mode(self):
"""Return current operation ie. heat, cool, idle."""
if not self._ac_states["on"]:
return HVAC_MODE_OFF
return SENSIBO_TO_HA.get(self._ac_states["mode"])
@property
def current_humidity(self):
"""Return the current humidity."""
return self._measurements["humidity"]
@property
def current_battery(self):
"""Return the current battery voltage."""
return self._measurements.get("batteryVoltage")
@property
def current_temperature(self):
"""Return the current temperature."""
# This field is not affected by temperatureUnit.
# It is always in C
return convert_temperature(
self._measurements["temperature"], TEMP_CELSIUS, self.temperature_unit
)
@property
def hvac_modes(self):
"""List of available operation modes."""
return self._operations
@property
def fan_mode(self):
"""Return the fan setting."""
return self._ac_states.get("fanLevel")
@property
def fan_modes(self):
"""List of available fan modes."""
return self._current_capabilities.get("fanLevels")
@property
def swing_mode(self):
"""Return the fan setting."""
return self._ac_states.get("swing")
@property
def swing_modes(self):
"""List of available swing modes."""
return self._current_capabilities.get("swing")
@property
def name(self):
"""Return the name of the entity."""
return self._name
@property
def min_temp(self):
"""Return the minimum temperature."""
return (
self._temperatures_list[0] if self._temperatures_list else super().min_temp
)
@property
def max_temp(self):
"""Return the maximum temperature."""
return (
self._temperatures_list[-1] if self._temperatures_list else super().max_temp
)
@property
def unique_id(self):
"""Return unique ID based on Sensibo ID."""
return self._id
async def async_set_temperature(self, **kwargs):
"""Set new target temperature."""
temperature = kwargs.get(ATTR_TEMPERATURE)
if temperature is None:
return
temperature = int(temperature)
if temperature not in self._temperatures_list:
# Requested temperature is not supported.
if temperature == self.target_temperature:
return
index = self._temperatures_list.index(self.target_temperature)
if (
temperature > self.target_temperature
and index < len(self._temperatures_list) - 1
):
temperature = self._temperatures_list[index + 1]
elif temperature < self.target_temperature and index > 0:
temperature = self._temperatures_list[index - 1]
else:
return
with async_timeout.timeout(TIMEOUT):
await self._client.async_set_ac_state_property(
self._id, "targetTemperature", temperature, self._ac_states
)
async def async_set_fan_mode(self, fan_mode):
"""Set new target fan mode."""
with async_timeout.timeout(TIMEOUT):
await self._client.async_set_ac_state_property(
self._id, "fanLevel", fan_mode, self._ac_states
)
async def async_set_hvac_mode(self, hvac_mode):
"""Set new target operation mode."""
if hvac_mode == HVAC_MODE_OFF:
with async_timeout.timeout(TIMEOUT):
await self._client.async_set_ac_state_property(
self._id, "on", False, self._ac_states
)
return
# Turn on if not currently on.
if not self._ac_states["on"]:
with async_timeout.timeout(TIMEOUT):
await self._client.async_set_ac_state_property(
self._id, "on", True, self._ac_states
)
with async_timeout.timeout(TIMEOUT):
await self._client.async_set_ac_state_property(
self._id, "mode", HA_TO_SENSIBO[hvac_mode], self._ac_states
)
async def async_set_swing_mode(self, swing_mode):
"""Set new target swing operation."""
with async_timeout.timeout(TIMEOUT):
await self._client.async_set_ac_state_property(
self._id, "swing", swing_mode, self._ac_states
)
async def async_turn_on(self):
"""Turn Sensibo unit on."""
with async_timeout.timeout(TIMEOUT):
await self._client.async_set_ac_state_property(
self._id, "on", True, self._ac_states
)
async def async_turn_off(self):
"""Turn Sensibo unit on."""
with async_timeout.timeout(TIMEOUT):
await self._client.async_set_ac_state_property(
self._id, "on", False, self._ac_states
)
async def async_assume_state(self, state):
"""Set external state."""
change_needed = (state != HVAC_MODE_OFF and not self._ac_states["on"]) or (
state == HVAC_MODE_OFF and self._ac_states["on"]
)
if change_needed:
with async_timeout.timeout(TIMEOUT):
await self._client.async_set_ac_state_property(
self._id,
"on",
state != HVAC_MODE_OFF, # value
self._ac_states,
True, # assumed_state
)
if state in [STATE_ON, HVAC_MODE_OFF]:
self._external_state = None
else:
self._external_state = state
async def async_update(self):
"""Retrieve latest state."""
try:
with async_timeout.timeout(TIMEOUT):
data = await self._client.async_get_device(self._id, _FETCH_FIELDS)
self._do_update(data)
except (aiohttp.client_exceptions.ClientError, pysensibo.SensiboError):
_LOGGER.warning("Failed to connect to Sensibo servers")
self._available = False
|
import numpy as np
import unittest
import chainer
from chainer.backends.cuda import to_cpu
from chainer.function import Function
from chainer import testing
from chainer.testing import attr
from chainercv.links import PickableSequentialChain
from chainercv.utils.testing import ConstantStubLink
class DummyFunc(Function):
def forward(self, inputs):
return inputs[0] * 2,
class PickableSequentialChainTestBase(object):
def setUpBase(self):
self.l1 = ConstantStubLink(np.random.uniform(size=(1, 3, 24, 24)))
self.f1 = DummyFunc()
self.f2 = DummyFunc()
self.l2 = ConstantStubLink(np.random.uniform(size=(1, 3, 24, 24)))
self.link = PickableSequentialChain()
with self.link.init_scope():
self.link.l1 = self.l1
self.link.f1 = self.f1
self.link.f2 = self.f2
self.link.l2 = self.l2
if self.pick:
self.link.pick = self.pick
self.x = np.random.uniform(size=(1, 3, 24, 24))
if not hasattr(self, 'assertRaisesRegex'):
self.assertRaisesRegex = self.assertRaisesRegexp
def test_pick(self):
self.assertEqual(self.link.pick, self.pick)
def test_pick_setter(self):
invalid_name = 'nonexistent'
self.assertNotIn(invalid_name, self.link.layer_names)
expected_message_pattern = str.format(
'^Invalid layer name .{:s}.$', invalid_name)
with self.assertRaisesRegex(ValueError, expected_message_pattern):
self.link.pick = invalid_name
invalid_names = 'nonexistent', 'nonexistent2'
for n in invalid_names:
self.assertNotIn(n, self.link.layer_names)
expected_message_pattern = str.format(
'^Invalid layer name .{:s}.$', invalid_names[0])
with self.assertRaisesRegex(ValueError, expected_message_pattern):
self.link.pick = invalid_names
def test_layer_names(self):
self.assertEqual(self.link.layer_names, ['l1', 'f1', 'f2', 'l2'])
def check_call(self, x, expects):
outs = self.link(x)
if isinstance(self.pick, tuple):
pick = self.pick
else:
if self.pick is None:
pick = ('l2',)
else:
pick = (self.pick,)
outs = (outs,)
self.assertEqual(len(outs), len(pick))
for out, layer_name in zip(outs, pick):
self.assertIsInstance(out, chainer.Variable)
self.assertIsInstance(out.array, self.link.xp.ndarray)
out = to_cpu(out.array)
np.testing.assert_equal(out, to_cpu(expects[layer_name].array))
def check_basic(self):
x = self.link.xp.asarray(self.x)
expects = {}
expects['l1'] = self.l1(x)
expects['f1'] = self.f1(expects['l1'])
expects['f2'] = self.f2(expects['f1'])
expects['l2'] = self.l2(expects['f2'])
self.check_call(x, expects)
def test_basic_cpu(self):
self.check_basic()
@attr.gpu
def test_call_gpu(self):
self.link.to_gpu()
self.check_basic()
def check_deletion(self):
x = self.link.xp.asarray(self.x)
if self.pick == 'l1' or \
(isinstance(self.pick, tuple)
and 'l1' in self.pick):
with self.assertRaises(AttributeError):
del self.link.l1
return
else:
del self.link.l1
expects = {}
expects['f1'] = self.f1(x)
expects['f2'] = self.f2(expects['f1'])
expects['l2'] = self.l2(expects['f2'])
self.check_call(x, expects)
def test_deletion_cpu(self):
self.check_deletion()
@attr.gpu
def test_deletion_gpu(self):
self.link.to_gpu()
self.check_deletion()
@testing.parameterize(
{'pick': None},
{'pick': 'f2'},
{'pick': ('f2',)},
{'pick': ('l2', 'l1', 'f2')},
{'pick': ('l2', 'l2')},
)
class TestPickableSequentialChain(
unittest.TestCase, PickableSequentialChainTestBase):
def setUp(self):
self.setUpBase()
@testing.parameterize(
*testing.product({
'mode': ['init', 'share', 'copy'],
'pick': [None, 'f1', ('f1', 'f2'), ('l2', 'l2'), ('l2', 'l1', 'f2')]
})
)
class TestCopiedPickableSequentialChain(
unittest.TestCase, PickableSequentialChainTestBase):
def setUp(self):
self.setUpBase()
self.f100 = DummyFunc()
self.l100 = ConstantStubLink(np.random.uniform(size=(1, 3, 24, 24)))
self.link, self.original_link = \
self.link.copy(mode=self.mode), self.link
def check_unchanged(self, link, x):
class Checker(object):
def __init__(self, tester, link, x):
self.tester = tester
self.link = link
self.x = x
def __enter__(self):
self.expected = self.link(self.x)
def __exit__(self, exc_type, exc_value, traceback):
if exc_type is not None:
return None
self.actual = self.link(self.x)
if isinstance(self.expected, tuple):
self.tester.assertEqual(
len(self.expected), len(self.actual))
for e, a in zip(self.expected, self.actual):
self.tester.assertEqual(type(e.array), type(a.array))
np.testing.assert_equal(
to_cpu(e.array), to_cpu(a.array))
else:
self.tester.assertEqual(type(self.expected.array),
type(self.actual.array))
np.testing.assert_equal(
to_cpu(self.expected.array),
to_cpu(self.actual.array))
return Checker(self, link, x)
def test_original_unaffected_by_setting_pick(self):
with self.check_unchanged(self.original_link, self.x):
self.link.pick = 'f2'
def test_original_unaffected_by_function_addition(self):
with self.check_unchanged(self.original_link, self.x):
with self.link.init_scope():
self.link.f100 = self.f100
def test_original_unaffected_by_link_addition(self):
with self.check_unchanged(self.original_link, self.x):
with self.link.init_scope():
self.link.l100 = self.l100
def test_original_unaffected_by_function_deletion(self):
with self.check_unchanged(self.original_link, self.x):
with self.link.init_scope():
self.link.pick = None
del self.link.f1
def test_original_unaffected_by_link_deletion(self):
with self.check_unchanged(self.original_link, self.x):
with self.link.init_scope():
self.link.pick = None
del self.link.l1
@testing.parameterize(
{'pick': 'l1', 'layer_names': ['l1']},
{'pick': 'f1', 'layer_names': ['l1', 'f1']},
{'pick': ['f1', 'f2'], 'layer_names': ['l1', 'f1', 'f2']},
{'pick': None, 'layer_names': ['l1', 'f1', 'f2', 'l2']}
)
class TestPickableSequentialChainRemoveUnused(unittest.TestCase):
def setUp(self):
self.l1 = ConstantStubLink(np.random.uniform(size=(1, 3, 24, 24)))
self.f1 = DummyFunc()
self.f2 = DummyFunc()
self.l2 = ConstantStubLink(np.random.uniform(size=(1, 3, 24, 24)))
self.link = PickableSequentialChain()
with self.link.init_scope():
self.link.l1 = self.l1
self.link.f1 = self.f1
self.link.f2 = self.f2
self.link.l2 = self.l2
self.link.pick = self.pick
def check_remove_unused(self):
self.link.remove_unused()
self.assertEqual(self.link.layer_names, self.layer_names)
for name in ['l1', 'f1', 'f2', 'l2']:
if name in self.layer_names:
self.assertTrue(hasattr(self.link, name))
else:
self.assertFalse(hasattr(self.link, name))
def test_remove_unused_cpu(self):
self.check_remove_unused()
@attr.gpu
def test_remove_unused_gpu(self):
self.link.to_gpu()
self.check_remove_unused()
testing.run_module(__name__, __file__)
|
from datetime import timedelta
import logging
from eagle200_reader import EagleReader
from requests.exceptions import ConnectionError as ConnectError, HTTPError, Timeout
from uEagle import Eagle as LegacyReader
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_IP_ADDRESS,
DEVICE_CLASS_POWER,
ENERGY_KILO_WATT_HOUR,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
CONF_CLOUD_ID = "cloud_id"
CONF_INSTALL_CODE = "install_code"
POWER_KILO_WATT = "kW"
_LOGGER = logging.getLogger(__name__)
MIN_SCAN_INTERVAL = timedelta(seconds=30)
SENSORS = {
"instantanous_demand": ("Eagle-200 Meter Power Demand", POWER_KILO_WATT),
"summation_delivered": (
"Eagle-200 Total Meter Energy Delivered",
ENERGY_KILO_WATT_HOUR,
),
"summation_received": (
"Eagle-200 Total Meter Energy Received",
ENERGY_KILO_WATT_HOUR,
),
"summation_total": (
"Eagle-200 Net Meter Energy (Delivered minus Received)",
ENERGY_KILO_WATT_HOUR,
),
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_IP_ADDRESS): cv.string,
vol.Required(CONF_CLOUD_ID): cv.string,
vol.Required(CONF_INSTALL_CODE): cv.string,
}
)
def hwtest(cloud_id, install_code, ip_address):
"""Try API call 'get_network_info' to see if target device is Legacy or Eagle-200."""
reader = LeagleReader(cloud_id, install_code, ip_address)
response = reader.get_network_info()
# Branch to test if target is Legacy Model
if "NetworkInfo" in response:
if response["NetworkInfo"].get("ModelId", None) == "Z109-EAGLE":
return reader
# Branch to test if target is Eagle-200 Model
if "Response" in response:
if response["Response"].get("Command", None) == "get_network_info":
return EagleReader(ip_address, cloud_id, install_code)
# Catch-all if hardware ID tests fail
raise ValueError("Couldn't determine device model.")
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Create the Eagle-200 sensor."""
ip_address = config[CONF_IP_ADDRESS]
cloud_id = config[CONF_CLOUD_ID]
install_code = config[CONF_INSTALL_CODE]
try:
eagle_reader = hwtest(cloud_id, install_code, ip_address)
except (ConnectError, HTTPError, Timeout, ValueError) as error:
_LOGGER.error("Failed to connect during setup: %s", error)
return
eagle_data = EagleData(eagle_reader)
eagle_data.update()
monitored_conditions = list(SENSORS)
sensors = []
for condition in monitored_conditions:
sensors.append(
EagleSensor(
eagle_data, condition, SENSORS[condition][0], SENSORS[condition][1]
)
)
add_entities(sensors)
class EagleSensor(Entity):
"""Implementation of the Rainforest Eagle-200 sensor."""
def __init__(self, eagle_data, sensor_type, name, unit):
"""Initialize the sensor."""
self.eagle_data = eagle_data
self._type = sensor_type
self._name = name
self._unit_of_measurement = unit
self._state = None
@property
def device_class(self):
"""Return the power device class for the instantanous_demand sensor."""
if self._type == "instantanous_demand":
return DEVICE_CLASS_POWER
return None
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return self._unit_of_measurement
def update(self):
"""Get the energy information from the Rainforest Eagle."""
self.eagle_data.update()
self._state = self.eagle_data.get_state(self._type)
class EagleData:
"""Get the latest data from the Eagle-200 device."""
def __init__(self, eagle_reader):
"""Initialize the data object."""
self._eagle_reader = eagle_reader
self.data = {}
@Throttle(MIN_SCAN_INTERVAL)
def update(self):
"""Get the latest data from the Eagle-200 device."""
try:
self.data = self._eagle_reader.update()
_LOGGER.debug("API data: %s", self.data)
except (ConnectError, HTTPError, Timeout, ValueError) as error:
_LOGGER.error("Unable to connect during update: %s", error)
self.data = {}
def get_state(self, sensor_type):
"""Get the sensor value from the dictionary."""
state = self.data.get(sensor_type)
_LOGGER.debug("Updating: %s - %s", sensor_type, state)
return state
class LeagleReader(LegacyReader):
"""Wraps uEagle to make it behave like eagle_reader, offering update()."""
def update(self):
"""Fetch and return the four sensor values in a dict."""
out = {}
resp = self.get_instantaneous_demand()["InstantaneousDemand"]
out["instantanous_demand"] = resp["Demand"]
resp = self.get_current_summation()["CurrentSummation"]
out["summation_delivered"] = resp["SummationDelivered"]
out["summation_received"] = resp["SummationReceived"]
out["summation_total"] = out["summation_delivered"] - out["summation_received"]
return out
|
import unittest
from kalliope.core.NeuronModule import MissingParameterException
from kalliope.neurons.sleep.sleep import Sleep
class TestSleep(unittest.TestCase):
def setUp(self):
self.seconds = 10
self.random="random"
def testParameters(self):
def run_test(parameters_to_test):
with self.assertRaises(MissingParameterException):
Sleep(**parameters_to_test)
# empty
parameters = dict()
run_test(parameters)
# missing seconds
parameters = {
"random": self.random
}
run_test(parameters)
if __name__ == '__main__':
unittest.main()
|
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from mock import Mock
from mock import patch
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from diamond.collector import Collector
from vmstat import VMStatCollector
###############################################################################
class TestVMStatCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('VMStatCollector', {
'interval': 10
})
self.collector = VMStatCollector(config, None)
def test_import(self):
self.assertTrue(VMStatCollector)
@patch('__builtin__.open')
@patch('os.access', Mock(return_value=True))
@patch.object(Collector, 'publish')
def test_should_open_proc_vmstat(self, publish_mock, open_mock):
open_mock.return_value = StringIO('')
self.collector.collect()
open_mock.assert_called_once_with('/proc/vmstat')
@patch.object(Collector, 'publish')
def test_should_work_with_real_data(self, publish_mock):
VMStatCollector.PROC = self.getFixturePath('proc_vmstat_1')
self.collector.collect()
self.assertPublishedMany(publish_mock, {})
VMStatCollector.PROC = self.getFixturePath('proc_vmstat_2')
self.collector.collect()
metrics = {
'pgfault': 71.1,
'pgmajfault': 0.0,
'pgpgin': 0.0,
'pgpgout': 9.2,
'pswpin': 0.0,
'pswpout': 0.0,
}
self.setDocExample(collector=self.collector.__class__.__name__,
metrics=metrics,
defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics)
###############################################################################
if __name__ == "__main__":
unittest.main()
|
import platform
import os
import diamond.collector
# Detect the architecture of the system
# and set the counters for MAX_VALUES
# appropriately. Otherwise, rolling over
# counters will cause incorrect or
# negative values.
if platform.architecture()[0] == '64bit':
counter = (2 ** 64) - 1
else:
counter = (2 ** 32) - 1
class SlabInfoCollector(diamond.collector.Collector):
PROC = '/proc/slabinfo'
def get_default_config_help(self):
config_help = super(SlabInfoCollector, self).get_default_config_help()
config_help.update({
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(SlabInfoCollector, self).get_default_config()
config.update({
'path': 'slabinfo'
})
return config
def collect(self):
"""
Collect process stat data
"""
if not os.access(self.PROC, os.R_OK):
return False
# Open PROC file
file = open(self.PROC, 'r')
# Get data
for line in file:
if line.startswith('slabinfo'):
continue
if line.startswith('#'):
keys = line.split()[1:]
continue
data = line.split()
for key in ['<active_objs>', '<num_objs>', '<objsize>',
'<objperslab>', '<pagesperslab>']:
i = keys.index(key)
metric_name = data[0] + '.' + key.replace(
'<', '').replace('>', '')
metric_value = int(data[i])
self.publish(metric_name, metric_value)
for key in ['<limit>', '<batchcount>', '<sharedfactor>']:
i = keys.index(key)
metric_name = data[0] + '.tunables.' + key.replace(
'<', '').replace('>', '')
metric_value = int(data[i])
self.publish(metric_name, metric_value)
for key in ['<active_slabs>', '<num_slabs>', '<sharedavail>']:
i = keys.index(key)
metric_name = data[0] + '.slabdata.' + key.replace(
'<', '').replace('>', '')
metric_value = int(data[i])
self.publish(metric_name, metric_value)
# Close file
file.close()
|
from abc import ABC, abstractmethod
from datetime import timedelta
import logging
from pynuki import NukiBridge
from requests.exceptions import RequestException
import voluptuous as vol
from homeassistant.components.lock import PLATFORM_SCHEMA, SUPPORT_OPEN, LockEntity
from homeassistant.const import ATTR_ENTITY_ID, CONF_HOST, CONF_PORT, CONF_TOKEN
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.service import extract_entity_ids
from . import DOMAIN
_LOGGER = logging.getLogger(__name__)
DEFAULT_PORT = 8080
DEFAULT_TIMEOUT = 20
ATTR_BATTERY_CRITICAL = "battery_critical"
ATTR_NUKI_ID = "nuki_id"
ATTR_UNLATCH = "unlatch"
MIN_TIME_BETWEEN_FORCED_SCANS = timedelta(seconds=5)
MIN_TIME_BETWEEN_SCANS = timedelta(seconds=30)
NUKI_DATA = "nuki"
SERVICE_LOCK_N_GO = "lock_n_go"
ERROR_STATES = (0, 254, 255)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Required(CONF_TOKEN): cv.string,
}
)
LOCK_N_GO_SERVICE_SCHEMA = vol.Schema(
{
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
vol.Optional(ATTR_UNLATCH, default=False): cv.boolean,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Nuki lock platform."""
bridge = NukiBridge(
config[CONF_HOST],
config[CONF_TOKEN],
config[CONF_PORT],
True,
DEFAULT_TIMEOUT,
)
devices = [NukiLockEntity(lock) for lock in bridge.locks]
def service_handler(service):
"""Service handler for nuki services."""
entity_ids = extract_entity_ids(hass, service)
unlatch = service.data[ATTR_UNLATCH]
for lock in devices:
if lock.entity_id not in entity_ids:
continue
lock.lock_n_go(unlatch=unlatch)
hass.services.register(
DOMAIN,
SERVICE_LOCK_N_GO,
service_handler,
schema=LOCK_N_GO_SERVICE_SCHEMA,
)
devices.extend([NukiOpenerEntity(opener) for opener in bridge.openers])
add_entities(devices)
class NukiDeviceEntity(LockEntity, ABC):
"""Representation of a Nuki device."""
def __init__(self, nuki_device):
"""Initialize the lock."""
self._nuki_device = nuki_device
self._available = nuki_device.state not in ERROR_STATES
@property
def name(self):
"""Return the name of the lock."""
return self._nuki_device.name
@property
def unique_id(self) -> str:
"""Return a unique ID."""
return self._nuki_device.nuki_id
@property
@abstractmethod
def is_locked(self):
"""Return true if lock is locked."""
@property
def device_state_attributes(self):
"""Return the device specific state attributes."""
data = {
ATTR_BATTERY_CRITICAL: self._nuki_device.battery_critical,
ATTR_NUKI_ID: self._nuki_device.nuki_id,
}
return data
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_OPEN
@property
def available(self) -> bool:
"""Return True if entity is available."""
return self._available
def update(self):
"""Update the nuki lock properties."""
for level in (False, True):
try:
self._nuki_device.update(aggressive=level)
except RequestException:
_LOGGER.warning("Network issues detect with %s", self.name)
self._available = False
continue
# If in error state, we force an update and repoll data
self._available = self._nuki_device.state not in ERROR_STATES
if self._available:
break
@abstractmethod
def lock(self, **kwargs):
"""Lock the device."""
@abstractmethod
def unlock(self, **kwargs):
"""Unlock the device."""
@abstractmethod
def open(self, **kwargs):
"""Open the door latch."""
class NukiLockEntity(NukiDeviceEntity):
"""Representation of a Nuki lock."""
@property
def is_locked(self):
"""Return true if lock is locked."""
return self._nuki_device.is_locked
def lock(self, **kwargs):
"""Lock the device."""
self._nuki_device.lock()
def unlock(self, **kwargs):
"""Unlock the device."""
self._nuki_device.unlock()
def open(self, **kwargs):
"""Open the door latch."""
self._nuki_device.unlatch()
def lock_n_go(self, unlatch=False, **kwargs):
"""Lock and go.
This will first unlock the door, then wait for 20 seconds (or another
amount of time depending on the lock settings) and relock.
"""
self._nuki_device.lock_n_go(unlatch, kwargs)
class NukiOpenerEntity(NukiDeviceEntity):
"""Representation of a Nuki opener."""
@property
def is_locked(self):
"""Return true if ring-to-open is enabled."""
return not self._nuki_device.is_rto_activated
def lock(self, **kwargs):
"""Disable ring-to-open."""
self._nuki_device.deactivate_rto()
def unlock(self, **kwargs):
"""Enable ring-to-open."""
self._nuki_device.activate_rto()
def open(self, **kwargs):
"""Buzz open the door."""
self._nuki_device.electric_strike_actuation()
|
import pytest
from unittest.mock import Mock
from kombu import Exchange
from kombu.utils.imports import symbol_by_name
class test_symbol_by_name:
def test_instance_returns_instance(self):
instance = object()
assert symbol_by_name(instance) is instance
def test_returns_default(self):
default = object()
assert symbol_by_name(
'xyz.ryx.qedoa.weq:foz', default=default) is default
def test_no_default(self):
with pytest.raises(ImportError):
symbol_by_name('xyz.ryx.qedoa.weq:foz')
def test_imp_reraises_ValueError(self):
imp = Mock()
imp.side_effect = ValueError()
with pytest.raises(ValueError):
symbol_by_name('kombu.Connection', imp=imp)
def test_package(self):
assert symbol_by_name('.entity:Exchange', package='kombu') is Exchange
assert symbol_by_name(':Consumer', package='kombu')
|
from __future__ import division
import numpy as np
import unittest
import chainer
from chainer import testing
from chainer.testing import attr
from chainercv.links.model.fpn import RPN
from chainercv.links.model.fpn import rpn_loss
def _random_array(xp, shape):
return xp.array(
np.random.uniform(-1, 1, size=shape), dtype=np.float32)
class TestRPN(unittest.TestCase):
def setUp(self):
self.link = RPN(scales=(1 / 2, 1 / 4, 1 / 8))
def _check_call(self):
hs = [
chainer.Variable(_random_array(self.link.xp, (2, 64, 32, 32))),
chainer.Variable(_random_array(self.link.xp, (2, 64, 16, 16))),
chainer.Variable(_random_array(self.link.xp, (2, 64, 8, 8))),
]
locs, confs = self.link(hs)
self.assertEqual(len(locs), 3)
self.assertEqual(len(confs), 3)
for l in range(3):
self.assertIsInstance(locs[l], chainer.Variable)
self.assertIsInstance(locs[l].array, self.link.xp.ndarray)
self.assertEqual(locs[l].shape, (2, (32 * 32 >> 2 * l) * 3, 4))
self.assertIsInstance(confs[l], chainer.Variable)
self.assertIsInstance(confs[l].array, self.link.xp.ndarray)
self.assertEqual(confs[l].shape, (2, (32 * 32 >> 2 * l) * 3))
def test_call_cpu(self):
self._check_call()
@attr.gpu
def test_call_gpu(self):
self.link.to_gpu()
self._check_call()
def _check_anchors(self):
anchors = self.link.anchors(((32, 32), (16, 16), (8, 8)))
self.assertEqual(len(anchors), 3)
for l in range(3):
self.assertIsInstance(anchors[l], self.link.xp.ndarray)
self.assertEqual(anchors[l].shape, ((32 * 32 >> 2 * l) * 3, 4))
def test_anchors_cpu(self):
self._check_anchors()
@attr.gpu
def test_anchors_gpu(self):
self.link.to_gpu()
self._check_anchors()
def _check_decode(self):
locs = [
chainer.Variable(_random_array(
self.link.xp, (2, 32 * 32 * 3, 4))),
chainer.Variable(_random_array(
self.link.xp, (2, 16 * 16 * 3, 4))),
chainer.Variable(_random_array(
self.link.xp, (2, 8 * 8 * 3, 4))),
]
confs = [
chainer.Variable(_random_array(
self.link.xp, (2, 32 * 32 * 3))),
chainer.Variable(_random_array(
self.link.xp, (2, 16 * 16 * 3))),
chainer.Variable(_random_array(
self.link.xp, (2, 8 * 8 * 3))),
]
anchors = self.link.anchors(((32, 32), (16, 16), (8, 8)))
rois, roi_indices = self.link.decode(
locs, confs, anchors, (2, 3, 64, 64))
self.assertIsInstance(rois, self.link.xp.ndarray)
self.assertIsInstance(roi_indices, self.link.xp.ndarray)
self.assertEqual(rois.shape[0], roi_indices.shape[0])
self.assertEqual(rois.shape[1:], (4,))
self.assertEqual(roi_indices.shape[1:], ())
def test_decode_cpu(self):
self._check_decode()
@attr.gpu
def test_decode_gpu(self):
self.link.to_gpu()
self._check_decode()
class TestRPNLoss(unittest.TestCase):
def _check_rpn_loss(self, xp):
locs = [
chainer.Variable(_random_array(
xp, (2, 32 * 32 * 3, 4))),
chainer.Variable(_random_array(
xp, (2, 16 * 16 * 3, 4))),
chainer.Variable(_random_array(
xp, (2, 8 * 8 * 3, 4))),
]
confs = [
chainer.Variable(_random_array(
xp, (2, 32 * 32 * 3))),
chainer.Variable(_random_array(
xp, (2, 16 * 16 * 3))),
chainer.Variable(_random_array(
xp, (2, 8 * 8 * 3))),
]
anchors = RPN(scales=(1 / 2, 1 / 4, 1 / 8)) \
.anchors(((32, 32), (16, 16), (8, 8)))
bboxes = [
xp.array(((2, 4, 6, 7), (1, 12, 3, 30)), dtype=np.float32),
xp.array(((10, 2, 12, 12),), dtype=np.float32),
]
loc_loss, conf_loss = rpn_loss(
locs, confs, anchors, ((480, 640), (320, 320)), bboxes)
self.assertIsInstance(loc_loss, chainer.Variable)
self.assertIsInstance(loc_loss.array, xp.ndarray)
self.assertEqual(loc_loss.shape, ())
self.assertIsInstance(conf_loss, chainer.Variable)
self.assertIsInstance(conf_loss.array, xp.ndarray)
self.assertEqual(conf_loss.shape, ())
def test_rpn_loss_cpu(self):
self._check_rpn_loss(np)
@attr.gpu
def test_rpn_loss_gpu(self):
import cupy
self._check_rpn_loss(cupy)
testing.run_module(__name__, __file__)
|
from unittest.mock import Mock
import pytest
from homeassistant import config_entries
from homeassistant.components import hue
from homeassistant.setup import async_setup_component
from tests.async_mock import AsyncMock, patch
from tests.common import MockConfigEntry
@pytest.fixture
def mock_bridge_setup():
"""Mock bridge setup."""
with patch.object(hue, "HueBridge") as mock_bridge:
mock_bridge.return_value.async_setup = AsyncMock(return_value=True)
mock_bridge.return_value.api.config = Mock(bridgeid="mock-id")
yield mock_bridge.return_value
async def test_setup_with_no_config(hass):
"""Test that we do not discover anything or try to set up a bridge."""
assert await async_setup_component(hass, hue.DOMAIN, {}) is True
# No flows started
assert len(hass.config_entries.flow.async_progress()) == 0
# No configs stored
assert hass.data[hue.DOMAIN] == {}
async def test_unload_entry(hass, mock_bridge_setup):
"""Test being able to unload an entry."""
entry = MockConfigEntry(domain=hue.DOMAIN, data={"host": "0.0.0.0"})
entry.add_to_hass(hass)
assert await async_setup_component(hass, hue.DOMAIN, {}) is True
assert len(mock_bridge_setup.mock_calls) == 1
mock_bridge_setup.async_reset = AsyncMock(return_value=True)
assert await hue.async_unload_entry(hass, entry)
assert len(mock_bridge_setup.async_reset.mock_calls) == 1
assert hass.data[hue.DOMAIN] == {}
async def test_setting_unique_id(hass, mock_bridge_setup):
"""Test we set unique ID if not set yet."""
entry = MockConfigEntry(domain=hue.DOMAIN, data={"host": "0.0.0.0"})
entry.add_to_hass(hass)
assert await async_setup_component(hass, hue.DOMAIN, {}) is True
assert entry.unique_id == "mock-id"
async def test_fixing_unique_id_no_other(hass, mock_bridge_setup):
"""Test we set unique ID if not set yet."""
entry = MockConfigEntry(
domain=hue.DOMAIN, data={"host": "0.0.0.0"}, unique_id="invalid-id"
)
entry.add_to_hass(hass)
assert await async_setup_component(hass, hue.DOMAIN, {}) is True
assert entry.unique_id == "mock-id"
async def test_fixing_unique_id_other_ignored(hass, mock_bridge_setup):
"""Test we set unique ID if not set yet."""
MockConfigEntry(
domain=hue.DOMAIN,
data={"host": "0.0.0.0"},
unique_id="mock-id",
source=config_entries.SOURCE_IGNORE,
).add_to_hass(hass)
entry = MockConfigEntry(
domain=hue.DOMAIN,
data={"host": "0.0.0.0"},
unique_id="invalid-id",
)
entry.add_to_hass(hass)
assert await async_setup_component(hass, hue.DOMAIN, {}) is True
await hass.async_block_till_done()
assert entry.unique_id == "mock-id"
assert hass.config_entries.async_entries() == [entry]
async def test_fixing_unique_id_other_correct(hass, mock_bridge_setup):
"""Test we remove config entry if another one has correct ID."""
correct_entry = MockConfigEntry(
domain=hue.DOMAIN,
data={"host": "0.0.0.0"},
unique_id="mock-id",
)
correct_entry.add_to_hass(hass)
entry = MockConfigEntry(
domain=hue.DOMAIN,
data={"host": "0.0.0.0"},
unique_id="invalid-id",
)
entry.add_to_hass(hass)
assert await async_setup_component(hass, hue.DOMAIN, {}) is True
await hass.async_block_till_done()
assert hass.config_entries.async_entries() == [correct_entry]
async def test_security_vuln_check(hass):
"""Test that we report security vulnerabilities."""
assert await async_setup_component(hass, "persistent_notification", {})
entry = MockConfigEntry(domain=hue.DOMAIN, data={"host": "0.0.0.0"})
entry.add_to_hass(hass)
config = Mock(bridgeid="", mac="", modelid="BSB002", swversion="1935144020")
config.name = "Hue"
with patch.object(
hue,
"HueBridge",
Mock(
return_value=Mock(
async_setup=AsyncMock(return_value=True), api=Mock(config=config)
)
),
):
assert await async_setup_component(hass, "hue", {})
await hass.async_block_till_done()
state = hass.states.get("persistent_notification.hue_hub_firmware")
assert state is not None
assert "CVE-2020-6007" in state.attributes["message"]
|
import pytest
from qutebrowser.mainwindow.statusbar import progress
from qutebrowser.utils import usertypes, utils
@pytest.fixture
def progress_widget(qtbot, config_stub):
"""Create a Progress widget and checks its initial state."""
widget = progress.Progress()
widget.enabled = True
qtbot.add_widget(widget)
assert not widget.isVisible()
assert not widget.isTextVisible()
return widget
def test_load_started(progress_widget):
"""Ensure the Progress widget reacts properly when the page starts loading.
Args:
progress_widget: Progress widget that will be tested.
"""
progress_widget.on_load_started()
assert progress_widget.value() == 0
assert progress_widget.isVisible()
@pytest.mark.parametrize('progress, load_status, expected_visible', [
(15, usertypes.LoadStatus.loading, True),
(100, usertypes.LoadStatus.success, False),
(100, usertypes.LoadStatus.error, False),
(100, usertypes.LoadStatus.warn, False),
(100, usertypes.LoadStatus.none, False),
])
def test_tab_changed(fake_web_tab, progress_widget, progress, load_status,
expected_visible):
"""Test that progress widget value and visibility state match expectations.
Args:
progress_widget: Progress widget that will be tested.
"""
tab = fake_web_tab(progress=progress, load_status=load_status)
progress_widget.on_tab_changed(tab)
actual = progress_widget.value(), progress_widget.isVisible()
expected = tab.progress(), expected_visible
assert actual == expected
def test_progress_affecting_statusbar_height(config_stub, fake_statusbar,
progress_widget):
"""Make sure the statusbar stays the same height when progress is shown.
https://github.com/qutebrowser/qutebrowser/issues/886
https://github.com/qutebrowser/qutebrowser/pull/890
"""
if not utils.is_mac:
# There is a difference depending on the font. This seems to avoid
# this, but on macOS, we get a warning about the font not being found.
config_stub.val.fonts.statusbar = '8pt Monospace'
fake_statusbar.container.expose()
expected_height = fake_statusbar.fontMetrics().height()
assert fake_statusbar.height() == expected_height
fake_statusbar.hbox.addWidget(progress_widget)
progress_widget.show()
assert fake_statusbar.height() == expected_height
def test_progress_big_statusbar(qtbot, fake_statusbar, progress_widget):
"""Make sure the progress bar is small with a big statusbar.
https://github.com/qutebrowser/qutebrowser/commit/46d1760798b730852e2207e2cdc05a9308e44f80
"""
fake_statusbar.hbox.addWidget(progress_widget)
progress_widget.show()
expected_height = progress_widget.height()
fake_statusbar.hbox.addStrut(50)
assert progress_widget.height() == expected_height
|
import pypck
from homeassistant.components.cover import CoverEntity
from homeassistant.const import CONF_ADDRESS
from . import LcnDevice
from .const import CONF_CONNECTIONS, CONF_MOTOR, CONF_REVERSE_TIME, DATA_LCN
from .helpers import get_connection
async def async_setup_platform(
hass, hass_config, async_add_entities, discovery_info=None
):
"""Setups the LCN cover platform."""
if discovery_info is None:
return
devices = []
for config in discovery_info:
address, connection_id = config[CONF_ADDRESS]
addr = pypck.lcn_addr.LcnAddr(*address)
connections = hass.data[DATA_LCN][CONF_CONNECTIONS]
connection = get_connection(connections, connection_id)
address_connection = connection.get_address_conn(addr)
if config[CONF_MOTOR] == "OUTPUTS":
devices.append(LcnOutputsCover(config, address_connection))
else: # RELAYS
devices.append(LcnRelayCover(config, address_connection))
async_add_entities(devices)
class LcnOutputsCover(LcnDevice, CoverEntity):
"""Representation of a LCN cover connected to output ports."""
def __init__(self, config, address_connection):
"""Initialize the LCN cover."""
super().__init__(config, address_connection)
self.output_ids = [
pypck.lcn_defs.OutputPort["OUTPUTUP"].value,
pypck.lcn_defs.OutputPort["OUTPUTDOWN"].value,
]
if CONF_REVERSE_TIME in config:
self.reverse_time = pypck.lcn_defs.MotorReverseTime[
config[CONF_REVERSE_TIME]
]
else:
self.reverse_time = None
self._is_closed = False
self._is_closing = False
self._is_opening = False
async def async_added_to_hass(self):
"""Run when entity about to be added to hass."""
await super().async_added_to_hass()
await self.address_connection.activate_status_request_handler(
pypck.lcn_defs.OutputPort["OUTPUTUP"]
)
await self.address_connection.activate_status_request_handler(
pypck.lcn_defs.OutputPort["OUTPUTDOWN"]
)
@property
def is_closed(self):
"""Return if the cover is closed."""
return self._is_closed
@property
def is_opening(self):
"""Return if the cover is opening or not."""
return self._is_opening
@property
def is_closing(self):
"""Return if the cover is closing or not."""
return self._is_closing
@property
def assumed_state(self):
"""Return True if unable to access real state of the entity."""
return True
async def async_close_cover(self, **kwargs):
"""Close the cover."""
self._is_opening = False
self._is_closing = True
state = pypck.lcn_defs.MotorStateModifier.DOWN
self.address_connection.control_motors_outputs(state, self.reverse_time)
self.async_write_ha_state()
async def async_open_cover(self, **kwargs):
"""Open the cover."""
self._is_closed = False
self._is_opening = True
self._is_closing = False
state = pypck.lcn_defs.MotorStateModifier.UP
self.address_connection.control_motors_outputs(state, self.reverse_time)
self.async_write_ha_state()
async def async_stop_cover(self, **kwargs):
"""Stop the cover."""
self._is_closing = False
self._is_opening = False
state = pypck.lcn_defs.MotorStateModifier.STOP
self.address_connection.control_motors_outputs(state)
self.async_write_ha_state()
def input_received(self, input_obj):
"""Set cover states when LCN input object (command) is received."""
if (
not isinstance(input_obj, pypck.inputs.ModStatusOutput)
or input_obj.get_output_id() not in self.output_ids
):
return
if input_obj.get_percent() > 0: # motor is on
if input_obj.get_output_id() == self.output_ids[0]:
self._is_opening = True
self._is_closing = False
else: # self.output_ids[1]
self._is_opening = False
self._is_closing = True
self._is_closed = self._is_closing
else: # motor is off
# cover is assumed to be closed if we were in closing state before
self._is_closed = self._is_closing
self._is_closing = False
self._is_opening = False
self.async_write_ha_state()
class LcnRelayCover(LcnDevice, CoverEntity):
"""Representation of a LCN cover connected to relays."""
def __init__(self, config, address_connection):
"""Initialize the LCN cover."""
super().__init__(config, address_connection)
self.motor = pypck.lcn_defs.MotorPort[config[CONF_MOTOR]]
self.motor_port_onoff = self.motor.value * 2
self.motor_port_updown = self.motor_port_onoff + 1
self._is_closed = False
self._is_closing = False
self._is_opening = False
async def async_added_to_hass(self):
"""Run when entity about to be added to hass."""
await super().async_added_to_hass()
await self.address_connection.activate_status_request_handler(self.motor)
@property
def is_closed(self):
"""Return if the cover is closed."""
return self._is_closed
@property
def is_opening(self):
"""Return if the cover is opening or not."""
return self._is_opening
@property
def is_closing(self):
"""Return if the cover is closing or not."""
return self._is_closing
@property
def assumed_state(self):
"""Return True if unable to access real state of the entity."""
return True
async def async_close_cover(self, **kwargs):
"""Close the cover."""
self._is_opening = False
self._is_closing = True
states = [pypck.lcn_defs.MotorStateModifier.NOCHANGE] * 4
states[self.motor.value] = pypck.lcn_defs.MotorStateModifier.DOWN
self.address_connection.control_motors_relays(states)
self.async_write_ha_state()
async def async_open_cover(self, **kwargs):
"""Open the cover."""
self._is_closed = False
self._is_opening = True
self._is_closing = False
states = [pypck.lcn_defs.MotorStateModifier.NOCHANGE] * 4
states[self.motor.value] = pypck.lcn_defs.MotorStateModifier.UP
self.address_connection.control_motors_relays(states)
self.async_write_ha_state()
async def async_stop_cover(self, **kwargs):
"""Stop the cover."""
self._is_closing = False
self._is_opening = False
states = [pypck.lcn_defs.MotorStateModifier.NOCHANGE] * 4
states[self.motor.value] = pypck.lcn_defs.MotorStateModifier.STOP
self.address_connection.control_motors_relays(states)
self.async_write_ha_state()
def input_received(self, input_obj):
"""Set cover states when LCN input object (command) is received."""
if not isinstance(input_obj, pypck.inputs.ModStatusRelays):
return
states = input_obj.states # list of boolean values (relay on/off)
if states[self.motor_port_onoff]: # motor is on
self._is_opening = not states[self.motor_port_updown] # set direction
self._is_closing = states[self.motor_port_updown] # set direction
else: # motor is off
self._is_opening = False
self._is_closing = False
self._is_closed = states[self.motor_port_updown]
self.async_write_ha_state()
|
import re
import time
from absl import flags
from perfkitbenchmarker import configs
from perfkitbenchmarker import flag_util
from perfkitbenchmarker import publisher
from perfkitbenchmarker import sample
flags.DEFINE_integer(
'pgbench_scale_factor', 1, 'scale factor used to fill the database',
lower_bound=1)
flags.DEFINE_integer(
'pgbench_seconds_per_test', 10, 'number of seconds to run each test phase',
lower_bound=1)
flags.DEFINE_integer(
'pgbench_seconds_to_pause_before_steps', 30,
'number of seconds to pause before each client load step')
flag_util.DEFINE_integerlist(
'pgbench_client_counts',
flag_util.IntegerList([1]),
'array of client counts passed to pgbench', module_name=__name__)
FLAGS = flags.FLAGS
BENCHMARK_NAME = 'pgbench'
BENCHMARK_CONFIG = """
pgbench:
description: pgbench benchmark for managed PostgreSQL databases
relational_db:
engine: postgres
db_spec:
GCP:
machine_type:
cpus: 16
memory: 64GiB
zone: us-central1-c
AWS:
machine_type: db.m4.4xlarge
zone: us-west-1c
Azure:
machine_type:
tier: Standard
compute_units: 800
zone: eastus
db_disk_spec:
GCP:
disk_size: 1000
disk_type: pd-ssd
AWS:
disk_size: 6144
disk_type: gp2
Azure:
#Valid storage sizes range from minimum of 128000 MB and additional increments of 128000 MB up to maximum of 1024000 MB.
disk_size: 128
vm_groups:
clients:
os_type: ubuntu1604
vm_spec:
GCP:
machine_type: n1-standard-16
zone: us-central1-c
AWS:
machine_type: m4.4xlarge
zone: us-west-1c
Azure:
machine_type: Standard_A4m_v2
zone: eastus
disk_spec: *default_500_gb
servers:
os_type: ubuntu1604
vm_spec:
GCP:
machine_type: n1-standard-16
zone: us-central1-c
AWS:
machine_type: m4.4xlarge
zone: us-west-1c
Azure:
machine_type: Standard_A4m_v2
zone: eastus
disk_spec: *default_500_gb
"""
TEST_DB_NAME = 'perftest'
DEFAULT_DB_NAME = 'postgres'
# TODO(ferneyhough): determine MAX_JOBS from VM NumCpusForBenchmark()
MAX_JOBS = 16
def GetConfig(user_config):
config = configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
return config
def CheckPrerequisites(benchmark_config):
"""Verifies that the required resources are present.
Args:
benchmark_config: Benchmark config to verify
Raises:
perfkitbenchmarker.data.ResourceNotFound: On missing resource.
"""
del benchmark_config
def UpdateBenchmarkSpecWithPrepareStageFlags(benchmark_spec):
"""Updates benchmark_spec with flags that are used in the prepare stage."""
benchmark_spec.scale_factor = FLAGS.pgbench_scale_factor
def UpdateBenchmarkSpecWithRunStageFlags(benchmark_spec):
"""Updates benchmark_spec with flags that are used in the run stage."""
benchmark_spec.seconds_per_test = FLAGS.pgbench_seconds_per_test
benchmark_spec.seconds_to_pause = FLAGS.pgbench_seconds_to_pause_before_steps
benchmark_spec.client_counts = FLAGS.pgbench_client_counts
def Prepare(benchmark_spec):
"""Prepares the client and server VM for the pgbench test.
This function installs pgbench on the client and creates and populates
the test database on the server.
If DEFAULT_DB_NAME exists, it will be dropped and recreated,
else it will be created. pgbench will populate the database
with sample data using FLAGS.pgbench_scale_factor.
Args:
benchmark_spec: benchmark_spec object which contains the database server
and client_vm
"""
vm = benchmark_spec.vms[0]
vm.Install('pgbench')
UpdateBenchmarkSpecWithPrepareStageFlags(benchmark_spec)
db = benchmark_spec.relational_db
connection_string = db.MakePsqlConnectionString(DEFAULT_DB_NAME)
CreateDatabase(benchmark_spec, DEFAULT_DB_NAME, TEST_DB_NAME)
connection_string = db.MakePsqlConnectionString(TEST_DB_NAME)
vm.RobustRemoteCommand('pgbench {0} -i -s {1}'.format(
connection_string, benchmark_spec.scale_factor))
stdout = _IssueDatabaseCommand(
benchmark_spec,
TEST_DB_NAME,
'SELECT pg_size_pretty(pg_database_size(\'{0}\'))'.format(TEST_DB_NAME))
db.postgres_db_size_MB = ParseSizeFromTable(stdout)
def ParseSizeFromTable(stdout):
"""Parse stdout of a table representing size of the database.
Example stdoutput is:
pg_size_pretty
----------------
22 MB
(1 row)
Args:
stdout: stdout from psql query obtaining the table size.
Returns:
size in MB that was parsed from the table
Raises:
Exception: if unknown how to parse the output.
"""
size_line = stdout.splitlines()[2]
match = re.match(r' *(\d+) *(\w*)$', size_line)
size = float(match.group(1))
units = match.group(2)
if units == 'MB':
return size
elif units == 'GB':
return size * 1000
else:
raise Exception('Unknown how to parse units {0} {1}.'.format(size, units))
def DoesDatabaseExist(client_vm, connection_string, database_name):
"""Returns whether or not the specifid database exists on the server.
Args:
client_vm: client vm which will run the query
connection_string: database server connection string understood by psql
database_name: name of database to check for existense
Returns:
True if database_name exists, else False
"""
command = 'psql {0} -lqt | cut -d \| -f 1 | grep -qw {1}'.format(
connection_string, database_name)
_, _, return_value = client_vm.RemoteCommandWithReturnCode(
command, ignore_failure=True)
return return_value == 0
def _IssueDatabaseCommand(benchmark_spec, database_name, command):
client_vm = benchmark_spec.vms[0]
db = benchmark_spec.relational_db
connection_string = db.MakePsqlConnectionString(database_name)
command = 'psql {0} -c "{1};"'.format(
connection_string,
command)
stdout, _ = client_vm.RemoteCommand(command, should_log=True)
return stdout
def CreateDatabase(benchmark_spec, default_database_name, new_database_name):
"""Creates a new database on the database server.
If new_database_name already exists on the server, it will be dropped and
recreated.
Args:
benchmark_spec: benchmark_spec object which contains the database server
and client_vm
default_database_name: name of the default database guaranteed to exist on
the server
new_database_name: name of the new database to create, or drop and recreate
"""
client_vm = benchmark_spec.vms[0]
db = benchmark_spec.relational_db
connection_string = db.MakePsqlConnectionString(default_database_name)
if DoesDatabaseExist(client_vm, connection_string, new_database_name):
_IssueDatabaseCommand(
benchmark_spec,
default_database_name,
'DROP DATABASE {0}'.format(new_database_name))
_IssueDatabaseCommand(
benchmark_spec,
default_database_name,
'CREATE DATABASE {0}'.format(new_database_name))
def MakeSamplesFromOutput(pgbench_stderr, num_clients, num_jobs,
additional_metadata):
"""Creates sample objects from the given pgbench output and metadata.
Two samples will be returned, one containing a latency list and
the other a tps (transactions per second) list. Each will contain
N floating point samples, where N = FLAGS.pgbench_seconds_per_test.
Args:
pgbench_stderr: stderr from the pgbench run command
num_clients: number of pgbench clients used
num_jobs: number of pgbench jobs (threads) used
additional_metadata: additional metadata to add to each sample
Returns:
A list containing a latency sample and a tps sample. Each sample
consists of a list of floats, sorted by time that were collected
by running pgbench with the given client and job counts.
"""
lines = pgbench_stderr.splitlines()[2:]
tps_numbers = [float(line.split(' ')[3]) for line in lines]
latency_numbers = [float(line.split(' ')[6]) for line in lines]
metadata = additional_metadata.copy()
metadata.update({'clients': num_clients, 'jobs': num_jobs})
tps_metadata = metadata.copy()
tps_metadata.update({'tps': tps_numbers})
latency_metadata = metadata.copy()
latency_metadata.update({'latency': latency_numbers})
tps_sample = sample.Sample('tps_array', -1, 'tps', tps_metadata)
latency_sample = sample.Sample('latency_array', -1, 'ms', latency_metadata)
return [tps_sample, latency_sample]
def Run(benchmark_spec):
"""Runs the pgbench benchark on the client vm, against the db server.
Args:
benchmark_spec: benchmark_spec object which contains the database server
and client_vm
Returns:
a list of sample objects
"""
UpdateBenchmarkSpecWithRunStageFlags(benchmark_spec)
db = benchmark_spec.relational_db
connection_string = db.MakePsqlConnectionString(TEST_DB_NAME)
common_metadata = {
'scale_factor': benchmark_spec.scale_factor,
'postgres_db_size_MB': db.postgres_db_size_MB,
'seconds_per_test': benchmark_spec.seconds_per_test,
'seconds_to_pause_before_steps': benchmark_spec.seconds_to_pause,
}
for client in benchmark_spec.client_counts:
time.sleep(benchmark_spec.seconds_to_pause)
jobs = min(client, 16)
command = ('pgbench {0} --client={1} --jobs={2} --time={3} --progress=1 '
'--report-latencies'.format(
connection_string,
client,
jobs,
benchmark_spec.seconds_per_test))
_, stderr = benchmark_spec.vms[0].RobustRemoteCommand(
command, should_log=True)
samples = MakeSamplesFromOutput(
stderr, client, jobs, common_metadata)
publisher.PublishRunStageSamples(benchmark_spec, samples)
return []
def Cleanup(benchmark_spec):
"""Uninstalls pgbench from the client vm."""
vm = benchmark_spec.vms[0]
vm.Uninstall('pgbench')
|
import unittest
import numpy as np
from chainer import testing
from chainercv.transforms import ten_crop
class TestTenCrop(unittest.TestCase):
def test_ten_crop(self):
img = np.random.uniform(size=(3, 48, 32))
out = ten_crop(img, (48, 32))
self.assertEqual(out.shape, (10, 3, 48, 32))
for crop in out[:5]:
np.testing.assert_equal(crop, img)
for crop in out[5:]:
np.testing.assert_equal(crop[:, :, ::-1], img)
out = ten_crop(img, (24, 12))
self.assertEqual(out.shape, (10, 3, 24, 12))
testing.run_module(__name__, __file__)
|
from pylatex.base_classes import Environment
def test_alltt():
class AllTT(Environment):
escape = False
content_separator = "\n"
alltt = AllTT()
alltt.append("This is alltt content\nIn two lines")
s = alltt.dumps()
assert s.startswith('\\begin{alltt}\nThis is'), \
"Unexpected start of environment"
assert s.endswith('two lines\n\\end{alltt}'), \
"Unexpected end of environment"
|
import asyncio
import logging
import konnected
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_STATE,
CONF_ACCESS_TOKEN,
CONF_BINARY_SENSORS,
CONF_DEVICES,
CONF_HOST,
CONF_ID,
CONF_NAME,
CONF_PIN,
CONF_PORT,
CONF_SENSORS,
CONF_SWITCHES,
CONF_TYPE,
CONF_ZONE,
)
from homeassistant.core import callback
from homeassistant.helpers import aiohttp_client, device_registry as dr
from homeassistant.helpers.dispatcher import async_dispatcher_send
from homeassistant.helpers.network import get_url
from .const import (
CONF_ACTIVATION,
CONF_API_HOST,
CONF_BLINK,
CONF_DEFAULT_OPTIONS,
CONF_DHT_SENSORS,
CONF_DISCOVERY,
CONF_DS18B20_SENSORS,
CONF_INVERSE,
CONF_MOMENTARY,
CONF_PAUSE,
CONF_POLL_INTERVAL,
CONF_REPEAT,
DOMAIN,
ENDPOINT_ROOT,
STATE_LOW,
ZONE_TO_PIN,
)
from .errors import CannotConnect
_LOGGER = logging.getLogger(__name__)
KONN_MODEL = "Konnected"
KONN_MODEL_PRO = "Konnected Pro"
# Indicate how each unit is controlled (pin or zone)
KONN_API_VERSIONS = {
KONN_MODEL: CONF_PIN,
KONN_MODEL_PRO: CONF_ZONE,
}
class AlarmPanel:
"""A representation of a Konnected alarm panel."""
def __init__(self, hass, config_entry):
"""Initialize the Konnected device."""
self.hass = hass
self.config_entry = config_entry
self.config = config_entry.data
self.options = config_entry.options or config_entry.data.get(
CONF_DEFAULT_OPTIONS, {}
)
self.host = self.config.get(CONF_HOST)
self.port = self.config.get(CONF_PORT)
self.client = None
self.status = None
self.api_version = KONN_API_VERSIONS[KONN_MODEL]
self.connected = False
self.connect_attempts = 0
self.cancel_connect_retry = None
@property
def device_id(self):
"""Device id is the chipId (pro) or MAC address as string with punctuation removed."""
return self.config.get(CONF_ID)
@property
def stored_configuration(self):
"""Return the configuration stored in `hass.data` for this device."""
return self.hass.data[DOMAIN][CONF_DEVICES].get(self.device_id)
@property
def available(self):
"""Return whether the device is available."""
return self.connected
def format_zone(self, zone, other_items=None):
"""Get zone or pin based dict based on the client type."""
payload = {
self.api_version: zone
if self.api_version == CONF_ZONE
else ZONE_TO_PIN[zone]
}
payload.update(other_items or {})
return payload
async def async_connect(self, now=None):
"""Connect to and setup a Konnected device."""
if self.connected:
return
if self.cancel_connect_retry:
# cancel any pending connect attempt and try now
self.cancel_connect_retry()
try:
self.client = konnected.Client(
host=self.host,
port=str(self.port),
websession=aiohttp_client.async_get_clientsession(self.hass),
)
self.status = await self.client.get_status()
self.api_version = KONN_API_VERSIONS.get(
self.status.get("model", KONN_MODEL), KONN_API_VERSIONS[KONN_MODEL]
)
_LOGGER.info(
"Connected to new %s device", self.status.get("model", "Konnected")
)
_LOGGER.debug(self.status)
await self.async_update_initial_states()
# brief delay to allow processing of recent status req
await asyncio.sleep(0.1)
await self.async_sync_device_config()
except self.client.ClientError as err:
_LOGGER.warning("Exception trying to connect to panel: %s", err)
# retry in a bit, never more than ~3 min
self.connect_attempts += 1
self.cancel_connect_retry = self.hass.helpers.event.async_call_later(
2 ** min(self.connect_attempts, 5) * 5, self.async_connect
)
return
self.connect_attempts = 0
self.connected = True
_LOGGER.info(
"Set up Konnected device %s. Open http://%s:%s in a "
"web browser to view device status",
self.device_id,
self.host,
self.port,
)
device_registry = await dr.async_get_registry(self.hass)
device_registry.async_get_or_create(
config_entry_id=self.config_entry.entry_id,
connections={(dr.CONNECTION_NETWORK_MAC, self.status.get("mac"))},
identifiers={(DOMAIN, self.device_id)},
manufacturer="Konnected.io",
name=self.config_entry.title,
model=self.config_entry.title,
sw_version=self.status.get("swVersion"),
)
async def update_switch(self, zone, state, momentary=None, times=None, pause=None):
"""Update the state of a switchable output."""
try:
if self.client:
if self.api_version == CONF_ZONE:
return await self.client.put_zone(
zone,
state,
momentary,
times,
pause,
)
# device endpoint uses pin number instead of zone
return await self.client.put_device(
ZONE_TO_PIN[zone],
state,
momentary,
times,
pause,
)
except self.client.ClientError as err:
_LOGGER.warning("Exception trying to update panel: %s", err)
raise CannotConnect
async def async_save_data(self):
"""Save the device configuration to `hass.data`."""
binary_sensors = {}
for entity in self.options.get(CONF_BINARY_SENSORS) or []:
zone = entity[CONF_ZONE]
binary_sensors[zone] = {
CONF_TYPE: entity[CONF_TYPE],
CONF_NAME: entity.get(
CONF_NAME, f"Konnected {self.device_id[6:]} Zone {zone}"
),
CONF_INVERSE: entity.get(CONF_INVERSE),
ATTR_STATE: None,
}
_LOGGER.debug(
"Set up binary_sensor %s (initial state: %s)",
binary_sensors[zone].get("name"),
binary_sensors[zone].get(ATTR_STATE),
)
actuators = []
for entity in self.options.get(CONF_SWITCHES) or []:
zone = entity[CONF_ZONE]
act = {
CONF_ZONE: zone,
CONF_NAME: entity.get(
CONF_NAME,
f"Konnected {self.device_id[6:]} Actuator {zone}",
),
ATTR_STATE: None,
CONF_ACTIVATION: entity[CONF_ACTIVATION],
CONF_MOMENTARY: entity.get(CONF_MOMENTARY),
CONF_PAUSE: entity.get(CONF_PAUSE),
CONF_REPEAT: entity.get(CONF_REPEAT),
}
actuators.append(act)
_LOGGER.debug("Set up switch %s", act)
sensors = []
for entity in self.options.get(CONF_SENSORS) or []:
zone = entity[CONF_ZONE]
sensor = {
CONF_ZONE: zone,
CONF_NAME: entity.get(
CONF_NAME, f"Konnected {self.device_id[6:]} Sensor {zone}"
),
CONF_TYPE: entity[CONF_TYPE],
CONF_POLL_INTERVAL: entity.get(CONF_POLL_INTERVAL),
}
sensors.append(sensor)
_LOGGER.debug(
"Set up %s sensor %s (initial state: %s)",
sensor.get(CONF_TYPE),
sensor.get(CONF_NAME),
sensor.get(ATTR_STATE),
)
device_data = {
CONF_BINARY_SENSORS: binary_sensors,
CONF_SENSORS: sensors,
CONF_SWITCHES: actuators,
CONF_BLINK: self.options.get(CONF_BLINK),
CONF_DISCOVERY: self.options.get(CONF_DISCOVERY),
CONF_HOST: self.host,
CONF_PORT: self.port,
"panel": self,
}
if CONF_DEVICES not in self.hass.data[DOMAIN]:
self.hass.data[DOMAIN][CONF_DEVICES] = {}
_LOGGER.debug(
"Storing data in hass.data[%s][%s][%s]: %s",
DOMAIN,
CONF_DEVICES,
self.device_id,
device_data,
)
self.hass.data[DOMAIN][CONF_DEVICES][self.device_id] = device_data
@callback
def async_binary_sensor_configuration(self):
"""Return the configuration map for syncing binary sensors."""
return [
self.format_zone(p) for p in self.stored_configuration[CONF_BINARY_SENSORS]
]
@callback
def async_actuator_configuration(self):
"""Return the configuration map for syncing actuators."""
return [
self.format_zone(
data[CONF_ZONE],
{"trigger": (0 if data.get(CONF_ACTIVATION) in [0, STATE_LOW] else 1)},
)
for data in self.stored_configuration[CONF_SWITCHES]
]
@callback
def async_dht_sensor_configuration(self):
"""Return the configuration map for syncing DHT sensors."""
return [
self.format_zone(
sensor[CONF_ZONE], {CONF_POLL_INTERVAL: sensor[CONF_POLL_INTERVAL]}
)
for sensor in self.stored_configuration[CONF_SENSORS]
if sensor[CONF_TYPE] == "dht"
]
@callback
def async_ds18b20_sensor_configuration(self):
"""Return the configuration map for syncing DS18B20 sensors."""
return [
self.format_zone(sensor[CONF_ZONE])
for sensor in self.stored_configuration[CONF_SENSORS]
if sensor[CONF_TYPE] == "ds18b20"
]
async def async_update_initial_states(self):
"""Update the initial state of each sensor from status poll."""
for sensor_data in self.status.get("sensors"):
sensor_config = self.stored_configuration[CONF_BINARY_SENSORS].get(
sensor_data.get(CONF_ZONE, sensor_data.get(CONF_PIN)), {}
)
entity_id = sensor_config.get(ATTR_ENTITY_ID)
state = bool(sensor_data.get(ATTR_STATE))
if sensor_config.get(CONF_INVERSE):
state = not state
async_dispatcher_send(self.hass, f"konnected.{entity_id}.update", state)
@callback
def async_desired_settings_payload(self):
"""Return a dict representing the desired device configuration."""
# keeping self.hass.data check for backwards compatibility
# newly configured integrations store this in the config entry
desired_api_host = self.options.get(CONF_API_HOST) or (
self.hass.data[DOMAIN].get(CONF_API_HOST) or get_url(self.hass)
)
desired_api_endpoint = desired_api_host + ENDPOINT_ROOT
return {
"sensors": self.async_binary_sensor_configuration(),
"actuators": self.async_actuator_configuration(),
"dht_sensors": self.async_dht_sensor_configuration(),
"ds18b20_sensors": self.async_ds18b20_sensor_configuration(),
"auth_token": self.config.get(CONF_ACCESS_TOKEN),
"endpoint": desired_api_endpoint,
"blink": self.options.get(CONF_BLINK, True),
"discovery": self.options.get(CONF_DISCOVERY, True),
}
@callback
def async_current_settings_payload(self):
"""Return a dict of configuration currently stored on the device."""
settings = self.status["settings"]
if not settings:
settings = {}
return {
"sensors": [
{self.api_version: s[self.api_version]}
for s in self.status.get("sensors")
],
"actuators": self.status.get("actuators"),
"dht_sensors": self.status.get(CONF_DHT_SENSORS),
"ds18b20_sensors": self.status.get(CONF_DS18B20_SENSORS),
"auth_token": settings.get("token"),
"endpoint": settings.get("endpoint"),
"blink": settings.get(CONF_BLINK),
"discovery": settings.get(CONF_DISCOVERY),
}
async def async_sync_device_config(self):
"""Sync the new zone configuration to the Konnected device if needed."""
_LOGGER.debug(
"Device %s settings payload: %s",
self.device_id,
self.async_desired_settings_payload(),
)
if (
self.async_desired_settings_payload()
!= self.async_current_settings_payload()
):
_LOGGER.info("pushing settings to device %s", self.device_id)
await self.client.put_settings(**self.async_desired_settings_payload())
async def get_status(hass, host, port):
"""Get the status of a Konnected Panel."""
client = konnected.Client(
host, str(port), aiohttp_client.async_get_clientsession(hass)
)
try:
return await client.get_status()
except client.ClientError as err:
_LOGGER.error("Exception trying to get panel status: %s", err)
raise CannotConnect from err
|
import gi
gi.require_version('Gdk', '3.0')
from gi.repository import Gdk
import struct
import subprocess
KEY_MAPPING = {
# Row 0
'ESC': (0, 1), 'F1': (0, 3), 'F2': (0, 4), 'F3': (0, 5), 'F4': (0, 6), 'F5': (0, 7), 'F6': (0, 8), 'F7': (0, 9), 'F8': (0, 10), 'F9': (0, 11), 'F10': (0, 12), 'F11': (0, 13), 'F12': (0, 14), 'PRTSCR': (0, 15), 'SCRLK': (0, 16), 'PAUSE': (0, 17), 'LOGO': (0, 20), 'JP1': (0, 21),
# Row 1
'M1': (1, 0), 'BACKTICK': (1, 1), '1': (1, 2), '2': (1, 3), '3': (1, 4), '4': (1, 5), '5': (1, 6), '6': (1, 7), '7': (1, 8), '8': (1, 9), '9': (1, 10), '0': (1, 11), 'DASH': (1, 12), 'EQUALS': (1, 13), 'BACKSPACE': (1, 14), 'INS': (1, 15), 'HOME': (1, 16), 'PAGEUP': (1, 17), 'NUMLK': (1, 18), 'NPFORWARDSLASH': (1, 19), 'NPASTERISK': (1, 20), 'NPDASH': (1, 21),
# Row 2
'M2': (2, 0), 'TAB': (2, 1), 'Q': (2, 2), 'W': (2, 3), 'E': (2, 4), 'R': (2, 5), 'T': (2, 6), 'Y': (2, 7), 'U': (2, 8), 'I': (2, 9), 'O': (2, 10), 'P': (2, 11), 'LEFTSQUAREBRACKET': (2, 12), 'RIGHTSQUAREBRACKET': (2, 13), 'DELETE': (2, 15), 'END': (2, 16), 'PAGEDOWN': (2, 17), 'NP7': (2, 18), 'NP8': (2, 19), 'NP9': (2, 20), 'NPPLUS': (2, 21),
# Row 3
'M3': (3, 0), 'CAPSLK': (3, 1), 'A': (3, 2), 'S': (3, 3), 'D': (3, 4), 'F': (3, 5), 'G': (3, 6), 'H': (3, 7), 'J': (3, 8), 'K': (3, 9), 'L': (3, 10), 'SEMICOLON': (3, 11), 'APOSTROPHE': (3, 12), 'POUNDSIGN': (3, 13), 'RETURN': (3, 14), 'NP4': (3, 18), 'NP5': (3, 19), 'NP6': (3, 20),
# Row 4
'M4': (4, 0), 'LEFTSHIFT': (4, 1), 'BACKSLASH': (4, 2), 'Z': (4, 3), 'X': (4, 4), 'C': (4, 5), 'V': (4, 6), 'B': (4, 7), 'N': (4, 8), 'M': (4, 9), 'COMMA': (4, 10), 'PERIOD': (4, 11), 'FORWARDSLASH': (4, 12), 'JP2': (4, 13), 'RIGHTSHIFT': (4, 14), 'UPARROW': (4, 16), 'NP1': (4, 18), 'NP2': (4, 19), 'NP3': (4, 20), 'ENTER': (4, 21),
# Row 5
'M5': (5, 0), 'LEFTCTRL': (5, 1), 'SUPER': (5, 2), 'LEFTALT': (5, 3), 'SPACE': (5, 7), 'RIGHTALT': (5, 11), 'FN': (5, 12), 'CTXMENU': (5, 13), 'RIGHTCTRL': (5, 14), 'LEFTARROW': (5, 15), 'DOWNARROW': (5, 16), 'RIGHTARROW': (5, 17), 'NP0': (5, 19), 'NPPERIOD': (5, 20),
# Additional mappings
'MACROMODE': (0, 11), 'GAMEMODE': (0, 12), 'MUTE': (0, 3), 'VOL_DOWN': (0, 4), 'VOL_UP': (0, 5), 'MEDIA_BACK': (0, 7), 'MEDIA_PLAY': (0, 8), 'MEDIA_FORWARD': (0, 9), 'BRIGHTNESSDOWN': (0, 13), 'BRIGHTNESSUP': (0, 14)
}
# 0 1 2 3 4 5 6 7
# ------------------------------------
# 0 | 1 2 3 4 5 UP
# 1 | 6 7 8 9 10 LEFT RIGHT
# 2 | 11 12 13 14 15 TMB DOWN MS
#
#
#
TARTARUS_KEY_MAPPING = {
'1': (0, 0),
'2': (0, 1),
'3': (0, 2),
'4': (0, 3),
'5': (0, 4),
'UP': (0, 6),
'6': (1, 0),
'7': (1, 1),
'8': (1, 2),
'9': (1, 3),
'10': (1, 4),
'LEFT': (1, 5),
'RIGHT': (1, 7),
'11': (2, 0),
'12': (2, 1),
'13': (2, 2),
'14': (2, 3),
'15': (2, 4),
'THUMB': (2, 5),
'DOWN': (2, 6),
'MODE_SWITCH': (2, 7)
}
# 0 1 2 3 4 5 6 7
# ------------------------------------
# 0 | 1 2 3 4 5 UP
# 1 | 6 7 8 9 10 LEFT RIGHT
# 2 | 11 12 13 14 15 TMB DOWN MS
# 3 | 16 17 18 19 20
#
ORBWEAVER_KEY_MAPPING = {
'1': (0, 0),
'2': (0, 1),
'3': (0, 2),
'4': (0, 3),
'5': (0, 4),
'UP': (0, 6),
'6': (1, 0),
'7': (1, 1),
'8': (1, 2),
'9': (1, 3),
'10': (1, 4),
'LEFT': (1, 5),
'RIGHT': (1, 7),
'11': (2, 0),
'12': (2, 1),
'13': (2, 2),
'14': (2, 3),
'15': (2, 4),
'THUMB': (2, 5),
'DOWN': (2, 6),
'MODE_SWITCH': (2, 7),
'16': (3, 0),
'17': (3, 1),
'18': (3, 2),
'19': (3, 3),
'20': (3, 4)
}
# Naga hex buttons are normal 1-7 keys
# 0 1 2 3 4
# --------------
# 0 | 1 2
# 1 | 4 3
# 2 | 5 7
# 3 | 6
NAGA_HEX_V2_KEY_MAPPING = {
'1': (0, 1), '2': (0, 3),
'4': (1, 0), '3': (1, 4),
'5': (2, 1), '7': (2, 3),
'6': (3, 2)
}
#
EVENT_MAPPING = {
1: 'ESC', 2: '1', 3: '2', 4: '3', 5: '4', 6: '5', 7: '6', 8: '7', 9: '8',
10: '9', 11: '0', 12: 'DASH', 13: 'EQUALS', 14: 'BACKSPACE', 15: 'TAB', 16: 'Q', 17: 'W', 18: 'E', 19: 'R',
20: 'T', 21: 'Y', 22: 'U', 23: 'I', 24: 'O', 25: 'P', 26: 'LEFTSQUAREBRACKET', 27: 'RIGHTSQUAREBRACKET', 28: 'RETURN', 29: 'LEFTCTRL',
30: 'A', 31: 'S', 32: 'D', 33: 'F', 34: 'G', 35: 'H', 36: 'J', 37: 'K', 38: 'L', 39: 'SEMICOLON',
40: 'APOSTROPHE', 41: 'BACKTICK', 42: 'LEFTSHIFT', 43: 'POUNDSIGN', 44: 'Z', 45: 'X', 46: 'C', 47: 'V', 48: 'B', 49: 'N',
50: 'M', 51: 'COMMA', 52: 'PERIOD', 53: 'FORWARDSLASH', 54: 'RIGHTSHIFT', 55: 'NPASTERISK', 56: 'LEFTALT', 57: 'SPACE',
58: 'CAPSLK', 59: 'F1',
60: 'F2', 61: 'F3', 62: 'F4', 63: 'F5', 64: 'F6', 65: 'F7', 66: 'F8', 67: 'F9', 68: 'F10', 69: 'NUMLK',
70: 'SCRLK', 71: 'NP7', 72: 'NP8', 73: 'NP9', 74: 'NPDASH', 75: 'NP4', 76: 'NP5', 77: 'NP6', 78: 'NPPLUS', 79: 'NP1',
80: 'NP2', 81: 'NP3', 82: 'NP0', 83: 'NPPERIOD', 86: 'BACKSLASH', 87: 'F11', 88: 'F12',
96: 'ENTER', 97: 'RIGHTCTRL', 98: 'NPFORWARDSLASH', 99: 'PRTSCR',
100: 'RIGHTALT', 102: 'HOME', 103: 'UPARROW', 104: 'PAGEUP', 105: 'LEFTARROW', 106: 'RIGHTARROW', 107: 'END', 108: 'DOWNARROW', 109: 'PAGEDOWN', 110: 'INS',
111: 'DELETE', 113: 'MUTE', 114: 'VOL_DOWN', 115: 'VOL_UP', 119: 'PAUSE',
125: 'SUPER', 127: 'CTXMENU',
163: 'MEDIA_FORWARD', 164: 'MEDIA_PLAY', 165: 'MEDIA_BACK',
183: 'M1', 184: 'M2', 185: 'M3', 186: 'M4', 187: 'M5', 188: 'MACROMODE', 189: 'GAMEMODE', 190: 'BRIGHTNESSDOWN', 194: 'BRIGHTNESSUP'
}
TARTARUS_EVENT_MAPPING = {
15: '1',
16: '2',
17: '3',
18: '4',
19: '5',
58: '6',
30: '7',
31: '8',
32: '9',
33: '10',
42: '11',
44: '12',
45: '13',
46: '14',
47: '15',
56: 'MODE_SWITCH',
57: 'THUMB',
103: 'UP',
105: 'LEFT',
106: 'RIGHT',
108: 'DOWN',
}
ORBWEAVER_EVENT_MAPPING = {
41: '1',
2: '2',
3: '3',
4: '4',
5: '5',
15: '6',
16: '7',
17: '8',
18: '9',
19: '10',
58: '11',
30: '12',
31: '13',
32: '14',
33: '15',
42: '16',
44: '17',
45: '18',
46: '19',
47: '20',
56: 'MODE_SWITCH',
57: 'THUMB',
103: 'UP',
105: 'LEFT',
106: 'RIGHT',
108: 'DOWN',
}
NAGA_HEX_V2_EVENT_MAPPING = {
2: '1', 3: '2', 4: '3', 5: '4', 6: '5', 7: '6', 8: '7'
}
XTE_MAPPING = {
'ESC': 'Escape',
'DASH': 'minus',
'EQUALS': 'equal',
'BACKSPACE': 'BackSpace',
'TAB': 'Tab',
'LEFTSQUAREBRACKET': 'bracketleft',
'RIGHTSQUAREBRACKET': 'bracketright',
'RETURN': 'Return',
'LEFTCTRL': 'Control_L',
'SEMICOLON': 'semicolon',
'APOSTROPHE': 'apostrophe',
'BACKTICK': 'grave',
'LEFTSHIFT': 'Shift_L',
'POUNDSIGN': 'numbersign',
'COMMA': 'comma',
'PERIOD': 'period',
'FORWARDSLASH': 'slash',
'RIGHTSHIFT': 'Shift_R',
'NPASTERISK': 'KP_Multiply',
'LEFTALT': 'Alt_L',
'SPACE': 'space',
'CAPSLK': 'Caps_Lock',
'NUMLK': 'Num_Lock',
'SCRLK': 'Scroll_Lock',
'PAUSE': 'Pause',
'NP7': '7',
'NP8': '8',
'NP9': '9',
'NPDASH': 'KP_Subtract',
'NP4': '4',
'NP5': '5',
'NP6': '6',
'NPPLUS': 'KP_Add',
'NP1': '1',
'NP2': '2',
'NP3': '3',
'NP0': '0',
'PRTSCR': 'Print',
'NPPERIOD': 'KP_Decimal',
'BACKSLASH': 'backslash',
'ENTER': 'Enter',
'RIGHTCTRL': 'Control_r',
'NPFORWARDSLASH': 'KP_Divide',
'RIGHTALT': 'Alt_R',
'HOME': 'Home',
'UPARROW': 'Up',
'PAGEUP': 'Page_Up',
'LEFTARROW': 'Left',
'RIGHTARROW': 'Right',
'END': 'End',
'DOWNARROW': 'Down',
'PAGEDOWN': 'Page_Down',
'INS': 'Insert',
'DELETE': 'Delete',
'SUPER': 'Super_L',
'CTXMENU': 'Menu',
'M1': 'XF86Tools',
'M2': 'XF86Launch5',
'M3': 'XF86Launch6',
'M4': 'XF86Launch7',
'M5': 'XF86Launch8',
'FN': None,
'GAMEMODE': None,
'MACROMODE': None,
}
class KeyDoesNotExistError(Exception):
"""
Simple custom error
"""
pass
class NoBackupError(Exception):
pass
class RGB(object):
@staticmethod
def clamp(value):
"""
Clamp a value to 0-255
:param value: Value to be clamped
:type value: integer or float
:return: Integer in the range of 0-255
:rtype: int
"""
result = int(value)
if value > 255:
result = 255
elif value < 0:
result = 0
return result
def __init__(self, red=0, green=0, blue=0):
self._red = red
self._green = green
self._blue = blue
@property
def red(self):
"""
Getter for red element
:return: Red element
:rtype: int
"""
return self._red
@red.setter
def red(self, value):
"""
Setter for red value
:param value: Red value
:type value: int or float
"""
self._red = RGB.clamp(value)
@property
def green(self):
"""
Getter for green element
:return: Green element
:rtype: int
"""
return self._green
@green.setter
def green(self, value):
"""
Setter for green value
:param value: Green value
:type value: int or float
"""
self._green = RGB.clamp(value)
@property
def blue(self):
"""
Getter for blue element
:return: Blue element
:rtype: int
"""
return self._blue
@blue.setter
def blue(self, value):
"""
Setter for blue value
:param value: Blue value
:type value: int or float
"""
self._blue = RGB.clamp(value)
def set(self, colour_tuple):
"""
Sets all the colours at once
:param colour_tuple: Tuple of R,G,B elements
:type colour_tuple: tuple
"""
# Shortcut to clamp all parameters and assign to the 3 variables
self._red, self._green, self._blue = list(map(RGB.clamp, colour_tuple))
def get(self):
"""
Gets all the colours as a tuple
:return: RGB tuple
:rtype: tuple
"""
return self._red, self._green, self._blue
def __bytes__(self):
"""
Convert to bytes
:return: Byte string
:rtype: bytearray
"""
return bytes((self._red, self._green, self._blue))
def __repr__(self):
"""
String representation
:return: String
:rtype: str
"""
return "RGB Object (#{0:02X}{1:02X}{2:02X})".format(self._red, self._green, self._blue)
class KeyboardColour(object):
"""
Keyboard class which represents the colour state of the keyboard.
"""
@staticmethod
def gdk_colour_to_rgb(gdk_color):
"""
Converts GDK colour to (R,G,B) tuple
:param gdk_color: GDK colour
:type gdk_color: Gdk.Color or tuple
:return: Tuple of 3 ints
:rtype: tuple
"""
if isinstance(gdk_color, (list, tuple)):
return gdk_color
assert type(gdk_color) is Gdk.Color, "Is not of type Gdk.Color"
red = int(gdk_color.red_float * 255)
green = int(gdk_color.green_float * 255)
blue = int(gdk_color.blue_float * 255)
return red, green, blue
def __init__(self, rows, columns):
self.rows = rows
self.columns = columns
# Two-dimensional array to hold color data
self.colors = []
# Backup object (currently not used)
self.backup = None
# Initialize array with empty values
self.reset_rows()
def backup_configuration(self):
"""
Backs up the current configuration
"""
self.backup = KeyboardColour(self.rows, self.columns)
self.backup.get_from_total_binary(self.get_total_binary())
def restore_configuration(self):
"""
Restores the previous configuration
"""
if self.backup is None:
raise NoBackupError()
self.colors = self.backup.colors
self.backup = None
def get_rows_raw(self):
"""
Gets the raw representation of the rows
:return: Rows
:rtype: list
"""
return self.colors
def reset_rows(self):
"""
Reset the rows of the keyboard
"""
self.colors.clear()
for row in range(0, self.rows):
# Create 22 rgb values
self.colors.append([RGB() for _ in range(0, self.columns)])
def set_key_colour(self, row, col, colour):
"""
Set the colour of a key
:param row: Row ID
:type row: int
:param col: Column ID
:type col: int
:param colour: Colour to set
:type colour: Gdk.Color or tuple
:raises KeyDoesNotExistError: If given key does not exist
"""
self.colors[row][col].set(KeyboardColour.gdk_colour_to_rgb(colour))
def get_key_colour(self, key):
"""
Get the colour of a key
:param key: Key to set the colour of
:type key: str
:raises KeyDoesNotExistError: If given key does not exist
"""
if key not in KEY_MAPPING:
raise KeyDoesNotExistError("The key \"{0}\" does not exist".format(key))
row_id, col_id = KEY_MAPPING[key]
return self.colors[row_id][col_id].get()
def reset_key(self, row, col):
"""
Reset the colour of a key
:param row: Row ID
:type row: int
:param col: Column ID
:type col: int
:raises KeyDoesNotExistError: If given key does not exist
"""
self.colors[row][col].set((0, 0, 0))
def get_row_binary(self, row_id):
"""
Gets the binary payload for a given row
:param row_id: Row ID
:type row_id: int
:return: Byte string of 67 bytes, Row ID byte then 22 RGB bytes
:rtype: bytearray
"""
assert isinstance(row_id, int), "Row ID is not an int"
payload = bytes([row_id, 0x00, len(self.colors[row_id]) - 1])
for rgb in self.colors[row_id]:
payload += bytes(rgb)
return payload
def get_total_binary(self):
"""
Gets the binary payload for the whole keyboard
:return: Byte string of 6*67 bytes, (Row ID byte then 22 RGB bytes) * 6
:rtype: bytearray
"""
payload = b''
for row in range(0, self.rows):
payload += self.get_row_binary(row)
return payload
def get_from_total_binary(self, binary_blob):
"""
Load in a binary blob which is the output from get_total_binary
:param binary_blob: Binary blob
:type binary_blob: bytearray
"""
self.reset_rows()
for row_id in range(0, self.rows):
binary_blob = binary_blob[1:] # Skip first byte
for col_id, binary_rgb in enumerate([binary_blob[i:i + 3] for i in range(0, 66, 3)]):
rgb = struct.unpack('=BBB', binary_rgb)
self.colors[row_id][col_id].set(rgb)
binary_blob = binary_blob[66:] # Skip the current row
def get_keyboard_layout():
"""
Function to get the keyboard layout
I can see this becoming an ugly mess as it'll need to support multiple keyboard layouts which have different names
on different systems
:return: Keyboard layout
:rtype: str
"""
cmd = ['setxkbmap', '-query']
output = subprocess.check_output(cmd)
result = 'gb'
if output:
output = output.decode('utf-8').splitlines()
layout = 'gb'
variant = ''
for line in output:
if line.startswith('layout'):
layout = line.split(':', 1)[1].strip()
if layout.find(',') > -1:
layout = layout.split(',')[0]
elif line.startswith('variant'):
variant = line.split(':', 1)[1].strip().split(',')[0]
if 'latin9' in variant: # Removes some rubbish from ubuntu
variant = 'latin9'
if variant == '':
result = layout
else:
result = layout + '-' + variant
# If the user has an international layout variant ignore that part
result = result.replace('-altgr-intl', '')
return result
|
import logging
from bs4 import BeautifulSoup
from requests.auth import HTTPBasicAuth, HTTPDigestAuth
import voluptuous as vol
from homeassistant.components.rest.data import RestData
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_AUTHENTICATION,
CONF_HEADERS,
CONF_NAME,
CONF_PASSWORD,
CONF_RESOURCE,
CONF_UNIT_OF_MEASUREMENT,
CONF_USERNAME,
CONF_VALUE_TEMPLATE,
CONF_VERIFY_SSL,
HTTP_BASIC_AUTHENTICATION,
HTTP_DIGEST_AUTHENTICATION,
)
from homeassistant.exceptions import PlatformNotReady
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
CONF_ATTR = "attribute"
CONF_SELECT = "select"
CONF_INDEX = "index"
DEFAULT_NAME = "Web scrape"
DEFAULT_VERIFY_SSL = True
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_RESOURCE): cv.string,
vol.Required(CONF_SELECT): cv.string,
vol.Optional(CONF_ATTR): cv.string,
vol.Optional(CONF_INDEX, default=0): cv.positive_int,
vol.Optional(CONF_AUTHENTICATION): vol.In(
[HTTP_BASIC_AUTHENTICATION, HTTP_DIGEST_AUTHENTICATION]
),
vol.Optional(CONF_HEADERS): vol.Schema({cv.string: cv.string}),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PASSWORD): cv.string,
vol.Optional(CONF_UNIT_OF_MEASUREMENT): cv.string,
vol.Optional(CONF_USERNAME): cv.string,
vol.Optional(CONF_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_VERIFY_SSL, default=DEFAULT_VERIFY_SSL): cv.boolean,
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the Web scrape sensor."""
name = config.get(CONF_NAME)
resource = config.get(CONF_RESOURCE)
method = "GET"
payload = None
headers = config.get(CONF_HEADERS)
verify_ssl = config.get(CONF_VERIFY_SSL)
select = config.get(CONF_SELECT)
attr = config.get(CONF_ATTR)
index = config.get(CONF_INDEX)
unit = config.get(CONF_UNIT_OF_MEASUREMENT)
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
value_template = config.get(CONF_VALUE_TEMPLATE)
if value_template is not None:
value_template.hass = hass
if username and password:
if config.get(CONF_AUTHENTICATION) == HTTP_DIGEST_AUTHENTICATION:
auth = HTTPDigestAuth(username, password)
else:
auth = HTTPBasicAuth(username, password)
else:
auth = None
rest = RestData(method, resource, auth, headers, payload, verify_ssl)
await rest.async_update()
if rest.data is None:
raise PlatformNotReady
async_add_entities(
[ScrapeSensor(rest, name, select, attr, index, value_template, unit)], True
)
class ScrapeSensor(Entity):
"""Representation of a web scrape sensor."""
def __init__(self, rest, name, select, attr, index, value_template, unit):
"""Initialize a web scrape sensor."""
self.rest = rest
self._name = name
self._state = None
self._select = select
self._attr = attr
self._index = index
self._value_template = value_template
self._unit_of_measurement = unit
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return self._unit_of_measurement
@property
def state(self):
"""Return the state of the device."""
return self._state
def _extract_value(self):
"""Parse the html extraction in the executor."""
raw_data = BeautifulSoup(self.rest.data, "html.parser")
_LOGGER.debug(raw_data)
if self._attr is not None:
value = raw_data.select(self._select)[self._index][self._attr]
else:
tag = raw_data.select(self._select)[self._index]
if tag.name in ("style", "script", "template"):
value = tag.string
else:
value = tag.text
_LOGGER.debug(value)
return value
async def async_update(self):
"""Get the latest data from the source and updates the state."""
await self.rest.async_update()
if self.rest.data is None:
_LOGGER.error("Unable to retrieve data for %s", self.name)
return
try:
value = await self.hass.async_add_executor_job(self._extract_value)
except IndexError:
_LOGGER.error("Unable to extract data from HTML for %s", self.name)
return
if self._value_template is not None:
self._state = self._value_template.async_render_with_possible_json_value(
value, None
)
else:
self._state = value
async def async_will_remove_from_hass(self):
"""Shutdown the session."""
await self.rest.async_remove()
|
import diamond.collector
import os
import re
import subprocess
try:
import json
except ImportError:
import simplejson as json
# Setup a set of VARs
# Set this for use in curl request
header = '"Content-Type: application/json"'
operational_type = [
'app',
'web',
'jvm',
'thread_pool',
]
web_stats = [
'errorCount',
'requestCount',
'bytesReceived',
'bytesSent',
'processingTime',
]
memory_types = [
'init',
'used',
'committed',
'max',
]
buffer_pool_types = [
'count',
'memory-used',
]
thread_types = [
'thread-count',
'daemon-thread-count'
]
memory_topics = [
'heap-memory-usage',
'non-heap-memory-usage',
]
gc_types = [
'collection-count',
'collection-time',
]
class JbossApiCollector(diamond.collector.Collector):
def process_config(self):
super(JbossApiCollector, self).process_config()
if self.config['hosts'].__class__.__name__ != 'list':
self.config['hosts'] = [self.config['hosts']]
# get the params for each host
if 'host' in self.config:
hoststr = "%s:%s@%s:%s:%s" % (
self.config['user'],
self.config['password'],
self.config['host'],
self.config['port'],
self.config['proto'],
)
self.config['hosts'].append(hoststr)
if type(self.config['connector_options']) is not list:
self.config['connector_options'] = [
self.config['connector_options']]
def get_default_config_help(self):
# Need to update this when done to help explain details when running
# diamond-setup.
config_help = super(JbossApiCollector, self).get_default_config_help()
config_help.update({
'curl_bin': 'Path to system curl executable',
'hosts': 'List of hosts to collect from. Format is yourusername:yourpassword@host:port:proto', # NOQA
'app_stats': 'Collect application pool stats',
'jvm_memory_pool_stats': 'Collect JVM memory-pool stats',
'jvm_buffer_pool_stats': 'Collect JVM buffer-pool stats',
'jvm_memory_stats': 'Collect JVM basic memory stats',
'jvm_gc_stats': 'Collect JVM garbage-collector stats',
'jvm_thread_stats': 'Collect JVM thread stats',
'thread_pool_stats': 'Collect JBoss thread pool stats',
'connector_stats': 'Collect HTTP and AJP Connector stats',
'connector_options': 'Types of connectors to collect'
})
return config_help
def get_default_config(self):
# Initialize default config
config = super(JbossApiCollector, self).get_default_config()
config.update({
'path': 'jboss',
'curl_bin': '/usr/bin/curl',
'connect_timeout': '4',
'ssl_options': '--sslv3 -k',
'curl_options': '-s --digest -L ',
'interface_regex': '^(.+?)\.', # matches up to first "."
'hosts': [],
'app_stats': 'True',
'connector_options': ['http', 'ajp'],
'jvm_memory_pool_stats': 'True',
'jvm_buffer_pool_stats': 'True',
'jvm_memory_stats': 'True',
'jvm_gc_stats': 'True',
'jvm_thread_stats': 'True',
'connector_stats': 'True',
'thread_pool_stats': 'True'
})
# Return default config
return config
def get_stats(self, current_host, current_port, current_proto, current_user,
current_pword):
if not os.access(self.config['curl_bin'], os.X_OK):
self.log.error("%s is not executable or does not exist.",
self.config['curl_bin'])
# Check if there is a RegEx to perform on the interface names
if self.config['interface_regex'] != '':
interface = self.string_regex(self.config['interface_regex'],
current_host)
else:
# Clean up any possible extra "."'s in the interface, keeps
# graphite from creating directories
interface = self.string_fix(current_host)
for op_type in operational_type:
output = self.get_data(op_type, current_host, current_port,
current_proto, current_user, current_pword)
if op_type == 'app' and self.config['app_stats'] == 'True':
if output:
# Grab the pool stats for each Instance
for instance in output['result']['data-source']:
datasource = output['result']['data-source'][instance]
for metric in datasource['statistics']['pool']:
metricName = '%s.%s.%s.statistics.pool.%s' % (
interface, op_type, instance, metric)
metricValue = datasource[
'statistics']['pool'][metric]
self.publish(metricName, float(metricValue))
if (op_type == 'thread_pool' and
self.config['thread_pool_stats'] == 'True' and output):
# Grab the stats from each thread pool type
for pool_type in output['result']:
if output['result'][pool_type]:
pool_types = output['result'][pool_type]
for pool in pool_types:
for metric in pool_types[pool]:
metricName = '%s.%s.%s.%s.statistics.%s' % (
interface, op_type, pool_type,
pool, metric)
metricValue = pool_types[pool][metric]
if self.is_number(metricValue):
self.publish(metricName, float(metricValue))
if op_type == 'web' and self.config['connector_stats'] == 'True':
if output:
# Grab http and ajp info (make these options)
for c_type in self.config['connector_options']:
for metric in web_stats:
metricName = ('%s.%s.connector.%s.%s' %
(interface,
op_type,
c_type,
metric))
connector = output['result']['connector']
metricValue = connector[c_type][metric]
self.publish(metricName, float(metricValue))
if op_type == 'jvm':
if output:
if self.config['jvm_memory_pool_stats'] == 'True':
# Grab JVM memory pool stats
mempool = output['result']['type']['memory-pool']
for pool_name in mempool['name']:
for metric in memory_types:
metricName = ('%s.%s.%s.%s.%s.%s' %
(interface,
op_type,
'memory-pool',
pool_name,
'usage',
metric))
metricValue = mempool['name'][pool_name][
'usage'][metric]
self.publish(metricName, float(metricValue))
# Grab JVM buffer-pool stats
if self.config['jvm_buffer_pool_stats'] == 'True':
bufferpool = output['result']['type']['buffer-pool']
for pool in bufferpool['name']:
for metric in buffer_pool_types:
metricName = ('%s.%s.%s.%s.%s' %
(interface,
op_type,
'buffer-pool',
pool,
metric))
metricValue = bufferpool['name'][pool][metric]
self.publish(metricName, float(metricValue))
# Grab basic memory stats
if self.config['jvm_memory_stats'] == 'True':
for mem_type in memory_topics:
for metric in memory_types:
metricName = ('%s.%s.%s.%s.%s' %
(interface,
op_type,
'memory',
mem_type,
metric))
memory = output['result']['type']['memory']
metricValue = memory[mem_type][metric]
self.publish(metricName, float(metricValue))
# Grab Garbage collection stats
if self.config['jvm_gc_stats'] == 'True':
garbage = output['result']['type']['garbage-collector']
for gc_name in garbage['name']:
for metric in gc_types:
metricName = ('%s.%s.%s.%s.%s' %
(interface,
op_type,
'garbage-collector',
gc_name,
metric))
metricValue = garbage['name'][gc_name][metric]
self.publish(metricName, float(metricValue))
# Grab threading stats
if self.config['jvm_thread_stats'] == 'True':
for metric in thread_types:
metricName = ('%s.%s.%s.%s' %
(interface,
op_type,
'threading',
metric))
threading = output['result']['type']['threading']
metricValue = threading[metric]
self.publish(metricName, float(metricValue))
return True
def get_data(self, op_type, current_host, current_port, current_proto,
current_user, current_pword):
output = {}
if op_type == 'app':
data = ('{"operation":"read-resource", ' +
'"include-runtime":"true", ' +
'"recursive":"true", ' +
'"address":["subsystem","datasources"]}')
if op_type == 'thread_pool':
data = ('{"operation":"read-resource", "include-runtime":"true", ' +
'"recursive":"true" , "address":["subsystem","threads"]}')
if op_type == 'web':
data = ('{"operation":"read-resource", ' +
'"include-runtime":"true", ' +
'"recursive":"true", ' +
'"address":["subsystem","web"]}')
if op_type == 'jvm':
data = ('{"operation":"read-resource", ' +
'"include-runtime":"true", ' +
'"recursive":"true", ' +
'"address":["core-service","platform-mbean"]}')
the_cmd = (("%s --connect-timeout %s %s %s %s://%s:%s/management " +
"--header %s -d '%s' -u %s:%s") % (
self.config['curl_bin'], self.config['connect_timeout'],
self.config['ssl_options'], self.config['curl_options'],
current_proto, current_host, current_port, header, data,
current_user, current_pword))
try:
attributes = subprocess.Popen(the_cmd, shell=True,
stdout=subprocess.PIPE
).communicate()[0]
output = json.loads(attributes)
except Exception as e:
self.log.error("JbossApiCollector: There was an exception %s", e)
output = ''
return output
def is_number(self, value):
return (isinstance(value, (int, long, float)) and
not isinstance(value, bool))
def string_fix(self, s):
return re.sub(r"[^a-zA-Z0-9_]", "_", s)
def string_regex(self, pattern, s):
tmp_result = re.match(pattern, s)
return tmp_result.group(1)
def collect(self):
for host in self.config['hosts']:
matches = re.search(
'^([^:]*):([^@]*)@([^:]*):([^:]*):?(.*)', host)
if not matches:
continue
current_host = matches.group(3)
current_port = int(matches.group(4))
current_proto = matches.group(5)
current_user = matches.group(1)
current_pword = matches.group(2)
# Call get_stats for each instance of jboss
self.get_stats(current_host, current_port, current_proto,
current_user, current_pword)
return True
|
import os
import importlib
import errno
import pkg_resources
import socket
from logging import Formatter, StreamHandler
from logging.handlers import RotatingFileHandler
from flask import Flask
from flask_replicated import FlaskReplicated
import logmatic
from lemur.certificates.hooks import activate_debug_dump
from lemur.common.health import mod as health
from lemur.extensions import db, migrate, principal, smtp_mail, metrics, sentry, cors
DEFAULT_BLUEPRINTS = (health,)
API_VERSION = 1
def create_app(app_name=None, blueprints=None, config=None):
"""
Lemur application factory
:param config:
:param app_name:
:param blueprints:
:return:
"""
if not blueprints:
blueprints = DEFAULT_BLUEPRINTS
else:
blueprints = blueprints + DEFAULT_BLUEPRINTS
if not app_name:
app_name = __name__
app = Flask(app_name)
configure_app(app, config)
configure_blueprints(app, blueprints)
configure_extensions(app)
configure_logging(app)
configure_database(app)
install_plugins(app)
@app.teardown_appcontext
def teardown(exception=None):
if db.session:
db.session.remove()
return app
def from_file(file_path, silent=False):
"""
Updates the values in the config from a Python file. This function
behaves as if the file was imported as module with the
:param file_path:
:param silent:
"""
module_spec = importlib.util.spec_from_file_location("config", file_path)
d = importlib.util.module_from_spec(module_spec)
try:
with open(file_path) as config_file:
exec( # nosec: config file safe
compile(config_file.read(), file_path, "exec"), d.__dict__
)
except IOError as e:
if silent and e.errno in (errno.ENOENT, errno.EISDIR):
return False
e.strerror = "Unable to load configuration file (%s)" % e.strerror
raise
return d
def configure_app(app, config=None):
"""
Different ways of configuration
:param app:
:param config:
:return:
"""
# respect the config first
if config and config != "None":
app.config["CONFIG_PATH"] = config
app.config.from_object(from_file(config))
else:
try:
app.config.from_envvar("LEMUR_CONF")
except RuntimeError:
# look in default paths
if os.path.isfile(os.path.expanduser("~/.lemur/lemur.conf.py")):
app.config.from_object(
from_file(os.path.expanduser("~/.lemur/lemur.conf.py"))
)
else:
app.config.from_object(
from_file(
os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"default.conf.py",
)
)
)
# we don't use this
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
def configure_extensions(app):
"""
Attaches and configures any needed flask extensions
to our app.
:param app:
"""
db.init_app(app)
migrate.init_app(app, db)
principal.init_app(app)
smtp_mail.init_app(app)
metrics.init_app(app)
sentry.init_app(app)
if app.config["CORS"]:
app.config["CORS_HEADERS"] = "Content-Type"
cors.init_app(
app,
resources=r"/api/*",
headers="Content-Type",
origin="*",
supports_credentials=True,
)
def configure_blueprints(app, blueprints):
"""
We prefix our APIs with their given version so that we can support
multiple concurrent API versions.
:param app:
:param blueprints:
"""
for blueprint in blueprints:
app.register_blueprint(blueprint, url_prefix="/api/{0}".format(API_VERSION))
def configure_database(app):
if app.config.get("SQLALCHEMY_ENABLE_FLASK_REPLICATED"):
FlaskReplicated(app)
def configure_logging(app):
"""
Sets up application wide logging.
:param app:
"""
handler = RotatingFileHandler(
app.config.get("LOG_FILE", "lemur.log"), maxBytes=10000000, backupCount=100
)
handler.setFormatter(
Formatter(
"%(asctime)s %(levelname)s: %(message)s " "[in %(pathname)s:%(lineno)d]"
)
)
if app.config.get("LOG_JSON", False):
handler.setFormatter(
logmatic.JsonFormatter(extra={"hostname": socket.gethostname()})
)
handler.setLevel(app.config.get("LOG_LEVEL", "DEBUG"))
app.logger.setLevel(app.config.get("LOG_LEVEL", "DEBUG"))
app.logger.addHandler(handler)
stream_handler = StreamHandler()
stream_handler.setLevel(app.config.get("LOG_LEVEL", "DEBUG"))
app.logger.addHandler(stream_handler)
if app.config.get("DEBUG_DUMP", False):
activate_debug_dump()
def install_plugins(app):
"""
Installs new issuers that are not currently bundled with Lemur.
:param app:
:return:
"""
from lemur.plugins import plugins
from lemur.plugins.base import register
# entry_points={
# 'lemur.plugins': [
# 'verisign = lemur_verisign.plugin:VerisignPlugin'
# ],
# },
for ep in pkg_resources.iter_entry_points("lemur.plugins"):
try:
plugin = ep.load()
except Exception:
import traceback
app.logger.error(
"Failed to load plugin %r:\n%s\n" % (ep.name, traceback.format_exc())
)
else:
register(plugin)
# ensure that we have some way to notify
with app.app_context():
slug = app.config.get("LEMUR_DEFAULT_NOTIFICATION_PLUGIN", "email-notification")
try:
plugins.get(slug)
except KeyError:
raise Exception(
"Unable to location notification plugin: {slug}. Ensure that "
"LEMUR_DEFAULT_NOTIFICATION_PLUGIN is set to a valid and installed notification plugin.".format(
slug=slug
)
)
|
import codecs
import collections
import logging
import os
import os.path as P
import pathlib
import urllib.parse
import warnings
import sys
#
# This module defines a function called smart_open so we cannot use
# smart_open.submodule to reference to the submodules.
#
import smart_open.local_file as so_file
from smart_open import compression
from smart_open import doctools
from smart_open import transport
#
# For backwards compatibility and keeping old unit tests happy.
#
from smart_open.compression import register_compressor # noqa: F401
from smart_open.utils import check_kwargs as _check_kwargs # noqa: F401
from smart_open.utils import inspect_kwargs as _inspect_kwargs # noqa: F401
logger = logging.getLogger(__name__)
SYSTEM_ENCODING = sys.getdefaultencoding()
def _sniff_scheme(uri_as_string):
"""Returns the scheme of the URL only, as a string."""
#
# urlsplit doesn't work on Windows -- it parses the drive as the scheme...
# no protocol given => assume a local file
#
if os.name == 'nt' and '://' not in uri_as_string:
uri_as_string = 'file://' + uri_as_string
return urllib.parse.urlsplit(uri_as_string).scheme
def parse_uri(uri_as_string):
"""
Parse the given URI from a string.
Parameters
----------
uri_as_string: str
The URI to parse.
Returns
-------
collections.namedtuple
The parsed URI.
Notes
-----
smart_open/doctools.py magic goes here
"""
scheme = _sniff_scheme(uri_as_string)
submodule = transport.get_transport(scheme)
as_dict = submodule.parse_uri(uri_as_string)
#
# The conversion to a namedtuple is just to keep the old tests happy while
# I'm still refactoring.
#
Uri = collections.namedtuple('Uri', sorted(as_dict.keys()))
return Uri(**as_dict)
#
# To keep old unit tests happy while I'm refactoring.
#
_parse_uri = parse_uri
_builtin_open = open
def open(
uri,
mode='r',
buffering=-1,
encoding=None,
errors=None,
newline=None,
closefd=True,
opener=None,
ignore_ext=False,
transport_params=None,
):
r"""Open the URI object, returning a file-like object.
The URI is usually a string in a variety of formats.
For a full list of examples, see the :func:`parse_uri` function.
The URI may also be one of:
- an instance of the pathlib.Path class
- a stream (anything that implements io.IOBase-like functionality)
Parameters
----------
uri: str or object
The object to open.
mode: str, optional
Mimicks built-in open parameter of the same name.
buffering: int, optional
Mimicks built-in open parameter of the same name.
encoding: str, optional
Mimicks built-in open parameter of the same name.
errors: str, optional
Mimicks built-in open parameter of the same name.
newline: str, optional
Mimicks built-in open parameter of the same name.
closefd: boolean, optional
Mimicks built-in open parameter of the same name. Ignored.
opener: object, optional
Mimicks built-in open parameter of the same name. Ignored.
ignore_ext: boolean, optional
Disable transparent compression/decompression based on the file extension.
transport_params: dict, optional
Additional parameters for the transport layer (see notes below).
Returns
-------
A file-like object.
Notes
-----
smart_open has several implementations for its transport layer (e.g. S3, HTTP).
Each transport layer has a different set of keyword arguments for overriding
default behavior. If you specify a keyword argument that is *not* supported
by the transport layer being used, smart_open will ignore that argument and
log a warning message.
smart_open/doctools.py magic goes here
See Also
--------
- `Standard library reference <https://docs.python.org/3.7/library/functions.html#open>`__
- `smart_open README.rst
<https://github.com/RaRe-Technologies/smart_open/blob/master/README.rst>`__
"""
logger.debug('%r', locals())
if not isinstance(mode, str):
raise TypeError('mode should be a string')
if transport_params is None:
transport_params = {}
fobj = _shortcut_open(
uri,
mode,
ignore_ext=ignore_ext,
buffering=buffering,
encoding=encoding,
errors=errors,
newline=newline,
)
if fobj is not None:
return fobj
#
# This is a work-around for the problem described in Issue #144.
# If the user has explicitly specified an encoding, then assume they want
# us to open the destination in text mode, instead of the default binary.
#
# If we change the default mode to be text, and match the normal behavior
# of Py2 and 3, then the above assumption will be unnecessary.
#
if encoding is not None and 'b' in mode:
mode = mode.replace('b', '')
if isinstance(uri, pathlib.Path):
uri = str(uri)
explicit_encoding = encoding
encoding = explicit_encoding if explicit_encoding else SYSTEM_ENCODING
#
# This is how we get from the filename to the end result. Decompression is
# optional, but it always accepts bytes and returns bytes.
#
# Decoding is also optional, accepts bytes and returns text. The diagram
# below is for reading, for writing, the flow is from right to left, but
# the code is identical.
#
# open as binary decompress? decode?
# filename ---------------> bytes -------------> bytes ---------> text
# binary decompressed decode
#
try:
binary_mode = _get_binary_mode(mode)
except ValueError as ve:
raise NotImplementedError(ve.args[0])
binary = _open_binary_stream(uri, binary_mode, transport_params)
if ignore_ext:
decompressed = binary
else:
decompressed = compression.compression_wrapper(binary, binary_mode)
if 'b' not in mode or explicit_encoding is not None:
decoded = _encoding_wrapper(decompressed, mode, encoding=encoding, errors=errors)
else:
decoded = decompressed
return decoded
def _get_binary_mode(mode_str):
#
# https://docs.python.org/3/library/functions.html#open
#
# The order of characters in the mode parameter appears to be unspecified.
# The implementation follows the examples, just to be safe.
#
mode = list(mode_str)
binmode = []
if 't' in mode and 'b' in mode:
raise ValueError("can't have text and binary mode at once")
counts = [mode.count(x) for x in 'rwa']
if sum(counts) > 1:
raise ValueError("must have exactly one of create/read/write/append mode")
def transfer(char):
binmode.append(mode.pop(mode.index(char)))
if 'a' in mode:
transfer('a')
elif 'w' in mode:
transfer('w')
elif 'r' in mode:
transfer('r')
else:
raise ValueError(
"Must have exactly one of create/read/write/append "
"mode and at most one plus"
)
if 'b' in mode:
transfer('b')
elif 't' in mode:
mode.pop(mode.index('t'))
binmode.append('b')
else:
binmode.append('b')
if '+' in mode:
transfer('+')
#
# There shouldn't be anything left in the mode list at this stage.
# If there is, then either we've missed something and the implementation
# of this function is broken, or the original input mode is invalid.
#
if mode:
raise ValueError('invalid mode: %r' % mode_str)
return ''.join(binmode)
def _shortcut_open(
uri,
mode,
ignore_ext=False,
buffering=-1,
encoding=None,
errors=None,
newline=None,
):
"""Try to open the URI using the standard library io.open function.
This can be much faster than the alternative of opening in binary mode and
then decoding.
This is only possible under the following conditions:
1. Opening a local file
2. Ignore extension is set to True
If it is not possible to use the built-in open for the specified URI, returns None.
:param str uri: A string indicating what to open.
:param str mode: The mode to pass to the open function.
:returns: The opened file
:rtype: file
"""
if not isinstance(uri, str):
return None
scheme = _sniff_scheme(uri)
if scheme not in (transport.NO_SCHEME, so_file.SCHEME):
return None
local_path = so_file.extract_local_path(uri)
_, extension = P.splitext(local_path)
if extension in compression.get_supported_extensions() and not ignore_ext:
return None
open_kwargs = {}
if encoding is not None:
open_kwargs['encoding'] = encoding
mode = mode.replace('b', '')
if newline is not None:
open_kwargs['newline'] = newline
#
# binary mode of the builtin/stdlib open function doesn't take an errors argument
#
if errors and 'b' not in mode:
open_kwargs['errors'] = errors
return _builtin_open(local_path, mode, buffering=buffering, **open_kwargs)
def _open_binary_stream(uri, mode, transport_params):
"""Open an arbitrary URI in the specified binary mode.
Not all modes are supported for all protocols.
:arg uri: The URI to open. May be a string, or something else.
:arg str mode: The mode to open with. Must be rb, wb or ab.
:arg transport_params: Keyword argumens for the transport layer.
:returns: A named file object
:rtype: file-like object with a .name attribute
"""
if mode not in ('rb', 'rb+', 'wb', 'wb+', 'ab', 'ab+'):
#
# This should really be a ValueError, but for the sake of compatibility
# with older versions, which raise NotImplementedError, we do the same.
#
raise NotImplementedError('unsupported mode: %r' % mode)
if hasattr(uri, 'read'):
# simply pass-through if already a file-like
# we need to return something as the file name, but we don't know what
# so we probe for uri.name (e.g., this works with open() or tempfile.NamedTemporaryFile)
# if the value ends with COMPRESSED_EXT, we will note it in compression_wrapper()
# if there is no such an attribute, we return "unknown" - this
# effectively disables any compression
if not hasattr(uri, 'name'):
uri.name = getattr(uri, 'name', 'unknown')
return uri
if not isinstance(uri, str):
raise TypeError("don't know how to handle uri %s" % repr(uri))
scheme = _sniff_scheme(uri)
submodule = transport.get_transport(scheme)
fobj = submodule.open_uri(uri, mode, transport_params)
if not hasattr(fobj, 'name'):
fobj.name = uri
return fobj
def _encoding_wrapper(fileobj, mode, encoding=None, errors=None):
"""Decode bytes into text, if necessary.
If mode specifies binary access, does nothing, unless the encoding is
specified. A non-null encoding implies text mode.
:arg fileobj: must quack like a filehandle object.
:arg str mode: is the mode which was originally requested by the user.
:arg str encoding: The text encoding to use. If mode is binary, overrides mode.
:arg str errors: The method to use when handling encoding/decoding errors.
:returns: a file object
"""
logger.debug('encoding_wrapper: %r', locals())
#
# If the mode is binary, but the user specified an encoding, assume they
# want text. If we don't make this assumption, ignore the encoding and
# return bytes, smart_open behavior will diverge from the built-in open:
#
# open(filename, encoding='utf-8') returns a text stream in Py3
# smart_open(filename, encoding='utf-8') would return a byte stream
# without our assumption, because the default mode is rb.
#
if 'b' in mode and encoding is None:
return fileobj
if encoding is None:
encoding = SYSTEM_ENCODING
kw = {'errors': errors} if errors else {}
if mode[0] == 'r' or mode.endswith('+'):
fileobj = codecs.getreader(encoding)(fileobj, **kw)
if mode[0] in ('w', 'a') or mode.endswith('+'):
fileobj = codecs.getwriter(encoding)(fileobj, **kw)
return fileobj
class patch_pathlib(object):
"""Replace `Path.open` with `smart_open.open`"""
def __init__(self):
self.old_impl = _patch_pathlib(open)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
_patch_pathlib(self.old_impl)
def _patch_pathlib(func):
"""Replace `Path.open` with `func`"""
old_impl = pathlib.Path.open
pathlib.Path.open = func
return old_impl
def smart_open(
uri,
mode='rb',
buffering=-1,
encoding=None,
errors=None,
newline=None,
closefd=True,
opener=None,
ignore_extension=False,
**kwargs
):
#
# This is a thin wrapper of smart_open.open. It's here for backward
# compatibility. It works exactly like smart_open.open when the passed
# parameters are identical. Otherwise, it raises a DeprecationWarning.
#
# For completeness, the main differences of the old smart_open function:
#
# 1. Default mode was read binary (mode='rb')
# 2. ignore_ext parameter was called ignore_extension
# 3. Transport parameters were passed directly as kwargs
#
url = 'https://github.com/RaRe-Technologies/smart_open/blob/develop/MIGRATING_FROM_OLDER_VERSIONS.rst'
if kwargs:
raise DeprecationWarning(
'The following keyword parameters are not supported: %r. '
'See %s for more information.' % (sorted(kwargs), url)
)
message = 'This function is deprecated. See %s for more information' % url
warnings.warn(message, category=DeprecationWarning)
ignore_ext = ignore_extension
del kwargs, url, message, ignore_extension
return open(**locals())
#
# Prevent failures with doctools from messing up the entire library. We don't
# expect such failures, but contributed modules (e.g. new transport mechanisms)
# may not be as polished.
#
try:
doctools.tweak_open_docstring(open)
doctools.tweak_parse_uri_docstring(parse_uri)
except Exception as ex:
logger.error(
'Encountered a non-fatal error while building docstrings (see below). '
'help(smart_open) will provide incomplete information as a result. '
'For full help text, see '
'<https://github.com/RaRe-Technologies/smart_open/blob/master/help.txt>.'
)
logger.exception(ex)
|
import logging
import re
from libsoundtouch import soundtouch_device
from libsoundtouch.utils import Source
import voluptuous as vol
from homeassistant.components.media_player import PLATFORM_SCHEMA, MediaPlayerEntity
from homeassistant.components.media_player.const import (
SUPPORT_NEXT_TRACK,
SUPPORT_PAUSE,
SUPPORT_PLAY,
SUPPORT_PLAY_MEDIA,
SUPPORT_PREVIOUS_TRACK,
SUPPORT_SELECT_SOURCE,
SUPPORT_TURN_OFF,
SUPPORT_TURN_ON,
SUPPORT_VOLUME_MUTE,
SUPPORT_VOLUME_SET,
SUPPORT_VOLUME_STEP,
)
from homeassistant.const import (
CONF_HOST,
CONF_NAME,
CONF_PORT,
EVENT_HOMEASSISTANT_START,
STATE_OFF,
STATE_PAUSED,
STATE_PLAYING,
STATE_UNAVAILABLE,
)
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from .const import (
DOMAIN,
SERVICE_ADD_ZONE_SLAVE,
SERVICE_CREATE_ZONE,
SERVICE_PLAY_EVERYWHERE,
SERVICE_REMOVE_ZONE_SLAVE,
)
_LOGGER = logging.getLogger(__name__)
MAP_STATUS = {
"PLAY_STATE": STATE_PLAYING,
"BUFFERING_STATE": STATE_PLAYING,
"PAUSE_STATE": STATE_PAUSED,
"STOP_STATE": STATE_OFF,
}
DATA_SOUNDTOUCH = "soundtouch"
ATTR_SOUNDTOUCH_GROUP = "soundtouch_group"
ATTR_SOUNDTOUCH_ZONE = "soundtouch_zone"
SOUNDTOUCH_PLAY_EVERYWHERE = vol.Schema({vol.Required("master"): cv.entity_id})
SOUNDTOUCH_CREATE_ZONE_SCHEMA = vol.Schema(
{vol.Required("master"): cv.entity_id, vol.Required("slaves"): cv.entity_ids}
)
SOUNDTOUCH_ADD_ZONE_SCHEMA = vol.Schema(
{vol.Required("master"): cv.entity_id, vol.Required("slaves"): cv.entity_ids}
)
SOUNDTOUCH_REMOVE_ZONE_SCHEMA = vol.Schema(
{vol.Required("master"): cv.entity_id, vol.Required("slaves"): cv.entity_ids}
)
DEFAULT_NAME = "Bose Soundtouch"
DEFAULT_PORT = 8090
SUPPORT_SOUNDTOUCH = (
SUPPORT_PAUSE
| SUPPORT_VOLUME_STEP
| SUPPORT_VOLUME_MUTE
| SUPPORT_PREVIOUS_TRACK
| SUPPORT_NEXT_TRACK
| SUPPORT_TURN_OFF
| SUPPORT_VOLUME_SET
| SUPPORT_TURN_ON
| SUPPORT_PLAY
| SUPPORT_PLAY_MEDIA
| SUPPORT_SELECT_SOURCE
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Bose Soundtouch platform."""
if DATA_SOUNDTOUCH not in hass.data:
hass.data[DATA_SOUNDTOUCH] = []
if discovery_info:
host = discovery_info["host"]
port = int(discovery_info["port"])
# if device already exists by config
if host in [device.config["host"] for device in hass.data[DATA_SOUNDTOUCH]]:
return
remote_config = {"id": "ha.component.soundtouch", "host": host, "port": port}
bose_soundtouch_entity = SoundTouchDevice(None, remote_config)
hass.data[DATA_SOUNDTOUCH].append(bose_soundtouch_entity)
add_entities([bose_soundtouch_entity], True)
else:
name = config.get(CONF_NAME)
remote_config = {
"id": "ha.component.soundtouch",
"port": config.get(CONF_PORT),
"host": config.get(CONF_HOST),
}
bose_soundtouch_entity = SoundTouchDevice(name, remote_config)
hass.data[DATA_SOUNDTOUCH].append(bose_soundtouch_entity)
add_entities([bose_soundtouch_entity], True)
def service_handle(service):
"""Handle the applying of a service."""
master_device_id = service.data.get("master")
slaves_ids = service.data.get("slaves")
slaves = []
if slaves_ids:
slaves = [
device
for device in hass.data[DATA_SOUNDTOUCH]
if device.entity_id in slaves_ids
]
master = next(
[
device
for device in hass.data[DATA_SOUNDTOUCH]
if device.entity_id == master_device_id
].__iter__(),
None,
)
if master is None:
_LOGGER.warning(
"Unable to find master with entity_id: %s", str(master_device_id)
)
return
if service.service == SERVICE_PLAY_EVERYWHERE:
slaves = [
d for d in hass.data[DATA_SOUNDTOUCH] if d.entity_id != master_device_id
]
master.create_zone(slaves)
elif service.service == SERVICE_CREATE_ZONE:
master.create_zone(slaves)
elif service.service == SERVICE_REMOVE_ZONE_SLAVE:
master.remove_zone_slave(slaves)
elif service.service == SERVICE_ADD_ZONE_SLAVE:
master.add_zone_slave(slaves)
hass.services.register(
DOMAIN,
SERVICE_PLAY_EVERYWHERE,
service_handle,
schema=SOUNDTOUCH_PLAY_EVERYWHERE,
)
hass.services.register(
DOMAIN,
SERVICE_CREATE_ZONE,
service_handle,
schema=SOUNDTOUCH_CREATE_ZONE_SCHEMA,
)
hass.services.register(
DOMAIN,
SERVICE_REMOVE_ZONE_SLAVE,
service_handle,
schema=SOUNDTOUCH_REMOVE_ZONE_SCHEMA,
)
hass.services.register(
DOMAIN,
SERVICE_ADD_ZONE_SLAVE,
service_handle,
schema=SOUNDTOUCH_ADD_ZONE_SCHEMA,
)
class SoundTouchDevice(MediaPlayerEntity):
"""Representation of a SoundTouch Bose device."""
def __init__(self, name, config):
"""Create Soundtouch Entity."""
self._device = soundtouch_device(config["host"], config["port"])
if name is None:
self._name = self._device.config.name
else:
self._name = name
self._status = None
self._volume = None
self._config = config
self._zone = None
@property
def config(self):
"""Return specific soundtouch configuration."""
return self._config
@property
def device(self):
"""Return Soundtouch device."""
return self._device
def update(self):
"""Retrieve the latest data."""
self._status = self._device.status()
self._volume = self._device.volume()
self._zone = self.get_zone_info()
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
return self._volume.actual / 100
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def state(self):
"""Return the state of the device."""
if self._status.source == "STANDBY":
return STATE_OFF
return MAP_STATUS.get(self._status.play_status, STATE_UNAVAILABLE)
@property
def source(self):
"""Name of the current input source."""
return self._status.source
@property
def source_list(self):
"""List of available input sources."""
return [
Source.AUX.value,
Source.BLUETOOTH.value,
]
@property
def is_volume_muted(self):
"""Boolean if volume is currently muted."""
return self._volume.muted
@property
def supported_features(self):
"""Flag media player features that are supported."""
return SUPPORT_SOUNDTOUCH
def turn_off(self):
"""Turn off media player."""
self._device.power_off()
def turn_on(self):
"""Turn on media player."""
self._device.power_on()
def volume_up(self):
"""Volume up the media player."""
self._device.volume_up()
def volume_down(self):
"""Volume down media player."""
self._device.volume_down()
def set_volume_level(self, volume):
"""Set volume level, range 0..1."""
self._device.set_volume(int(volume * 100))
def mute_volume(self, mute):
"""Send mute command."""
self._device.mute()
def media_play_pause(self):
"""Simulate play pause media player."""
self._device.play_pause()
def media_play(self):
"""Send play command."""
self._device.play()
def media_pause(self):
"""Send media pause command to media player."""
self._device.pause()
def media_next_track(self):
"""Send next track command."""
self._device.next_track()
def media_previous_track(self):
"""Send the previous track command."""
self._device.previous_track()
@property
def media_image_url(self):
"""Image url of current playing media."""
return self._status.image
@property
def media_title(self):
"""Title of current playing media."""
if self._status.station_name is not None:
return self._status.station_name
if self._status.artist is not None:
return f"{self._status.artist} - {self._status.track}"
return None
@property
def media_duration(self):
"""Duration of current playing media in seconds."""
return self._status.duration
@property
def media_artist(self):
"""Artist of current playing media."""
return self._status.artist
@property
def media_track(self):
"""Artist of current playing media."""
return self._status.track
@property
def media_album_name(self):
"""Album name of current playing media."""
return self._status.album
async def async_added_to_hass(self):
"""Populate zone info which requires entity_id."""
@callback
def async_update_on_start(event):
"""Schedule an update when all platform entities have been added."""
self.async_schedule_update_ha_state(True)
self.hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_START, async_update_on_start
)
def play_media(self, media_type, media_id, **kwargs):
"""Play a piece of media."""
_LOGGER.debug("Starting media with media_id: %s", media_id)
if re.match(r"http?://", str(media_id)):
# URL
_LOGGER.debug("Playing URL %s", str(media_id))
self._device.play_url(str(media_id))
else:
# Preset
presets = self._device.presets()
preset = next(
[
preset for preset in presets if preset.preset_id == str(media_id)
].__iter__(),
None,
)
if preset is not None:
_LOGGER.debug("Playing preset: %s", preset.name)
self._device.select_preset(preset)
else:
_LOGGER.warning("Unable to find preset with id %s", media_id)
def select_source(self, source):
"""Select input source."""
if source == Source.AUX.value:
_LOGGER.debug("Selecting source AUX")
self._device.select_source_aux()
elif source == Source.BLUETOOTH.value:
_LOGGER.debug("Selecting source Bluetooth")
self._device.select_source_bluetooth()
else:
_LOGGER.warning("Source %s is not supported", source)
def create_zone(self, slaves):
"""
Create a zone (multi-room) and play on selected devices.
:param slaves: slaves on which to play
"""
if not slaves:
_LOGGER.warning("Unable to create zone without slaves")
else:
_LOGGER.info("Creating zone with master %s", self._device.config.name)
self._device.create_zone([slave.device for slave in slaves])
def remove_zone_slave(self, slaves):
"""
Remove slave(s) from and existing zone (multi-room).
Zone must already exist and slaves array can not be empty.
Note: If removing last slave, the zone will be deleted and you'll have
to create a new one. You will not be able to add a new slave anymore
:param slaves: slaves to remove from the zone
"""
if not slaves:
_LOGGER.warning("Unable to find slaves to remove")
else:
_LOGGER.info(
"Removing slaves from zone with master %s", self._device.config.name
)
# SoundTouch API seems to have a bug and won't remove slaves if there are
# more than one in the payload. Therefore we have to loop over all slaves
# and remove them individually
for slave in slaves:
# make sure to not try to remove the master (aka current device)
if slave.entity_id != self.entity_id:
self._device.remove_zone_slave([slave.device])
def add_zone_slave(self, slaves):
"""
Add slave(s) to and existing zone (multi-room).
Zone must already exist and slaves array can not be empty.
:param slaves:slaves to add
"""
if not slaves:
_LOGGER.warning("Unable to find slaves to add")
else:
_LOGGER.info(
"Adding slaves to zone with master %s", self._device.config.name
)
self._device.add_zone_slave([slave.device for slave in slaves])
@property
def device_state_attributes(self):
"""Return entity specific state attributes."""
attributes = {}
if self._zone and "master" in self._zone:
attributes[ATTR_SOUNDTOUCH_ZONE] = self._zone
# Compatibility with how other components expose their groups (like SONOS).
# First entry is the master, others are slaves
group_members = [self._zone["master"]] + self._zone["slaves"]
attributes[ATTR_SOUNDTOUCH_GROUP] = group_members
return attributes
def get_zone_info(self):
"""Return the current zone info."""
zone_status = self._device.zone_status()
if not zone_status:
return None
# Due to a bug in the SoundTouch API itself client devices do NOT return their
# siblings as part of the "slaves" list. Only the master has the full list of
# slaves for some reason. To compensate for this shortcoming we have to fetch
# the zone info from the master when the current device is a slave until this is
# fixed in the SoundTouch API or libsoundtouch, or of course until somebody has a
# better idea on how to fix this.
# In addition to this shortcoming, libsoundtouch seems to report the "is_master"
# property wrong on some slaves, so the only reliable way to detect if the current
# devices is the master, is by comparing the master_id of the zone with the device_id
if zone_status.master_id == self._device.config.device_id:
return self._build_zone_info(self.entity_id, zone_status.slaves)
# The master device has to be searched by it's ID and not IP since libsoundtouch / BOSE API
# do not return the IP of the master for some slave objects/responses
master_instance = self._get_instance_by_id(zone_status.master_id)
if master_instance is not None:
master_zone_status = master_instance.device.zone_status()
return self._build_zone_info(
master_instance.entity_id, master_zone_status.slaves
)
# We should never end up here since this means we haven't found a master device to get the
# correct zone info from. In this case, assume current device is master
return self._build_zone_info(self.entity_id, zone_status.slaves)
def _get_instance_by_ip(self, ip_address):
"""Search and return a SoundTouchDevice instance by it's IP address."""
for instance in self.hass.data[DATA_SOUNDTOUCH]:
if instance and instance.config["host"] == ip_address:
return instance
return None
def _get_instance_by_id(self, instance_id):
"""Search and return a SoundTouchDevice instance by it's ID (aka MAC address)."""
for instance in self.hass.data[DATA_SOUNDTOUCH]:
if instance and instance.device.config.device_id == instance_id:
return instance
return None
def _build_zone_info(self, master, zone_slaves):
"""Build the exposed zone attributes."""
slaves = []
for slave in zone_slaves:
slave_instance = self._get_instance_by_ip(slave.device_ip)
if slave_instance:
slaves.append(slave_instance.entity_id)
attributes = {
"master": master,
"is_master": master == self.entity_id,
"slaves": slaves,
}
return attributes
|
from pathlib import Path
import tabulate
import inspect
import pandas as pd
import matchzoo
def _generate():
full = _make_title()
for model_class in matchzoo.models.list_available():
full += _make_model_class_subtitle(model_class)
full += _make_doc_section_subsubtitle()
full += _make_model_doc(model_class)
model = model_class()
full += _make_params_section_subsubtitle()
full += _make_model_params_table(model)
_write_to_files(full)
def _make_title():
title = 'MatchZoo Model Reference'
line = '*' * len(title)
return line + '\n' + title + '\n' + line + '\n\n'
def _make_model_class_subtitle(model_class):
subtitle = model_class.__name__
line = '#' * len(subtitle)
return subtitle + '\n' + line + '\n\n'
def _make_doc_section_subsubtitle():
subsubtitle = 'Model Documentation'
line = '*' * len(subsubtitle)
return subsubtitle + '\n' + line + '\n\n'
def _make_params_section_subsubtitle():
subsubtitle = 'Model Hyper Parameters'
line = '*' * len(subsubtitle)
return subsubtitle + '\n' + line + '\n\n'
def _make_model_doc(model_class):
return inspect.getdoc(model_class) + '\n\n'
def _make_model_params_table(model):
params = model.get_default_params()
df = params.to_frame()
df = df.rename({
'Value': 'Default Value',
'Hyper-Space': 'Default Hyper-Space'
}, axis='columns')
return tabulate.tabulate(df, tablefmt='rst', headers='keys') + '\n\n'
def _write_to_files(full):
readme_file_path = Path(__file__).parent.joinpath('README.rst')
doc_file_path = Path(__file__).parent.parent.parent. \
joinpath('docs').joinpath('source').joinpath('model_reference.rst')
for file_path in readme_file_path, doc_file_path:
with open(file_path, 'w', encoding='utf-8') as out_file:
out_file.write(full)
if __name__ == '__main__':
_generate()
|
import hashlib
import itertools
import logging
import os
import posixpath
import re
from absl import flags
from perfkitbenchmarker import data
from perfkitbenchmarker import errors
from perfkitbenchmarker import os_types
from perfkitbenchmarker import sample
from perfkitbenchmarker import stages
from perfkitbenchmarker.linux_packages import build_tools
BASE_MODE = 'base'
PEAK_MODE = 'peak'
ALL_MODE = 'all'
FLAGS = flags.FLAGS
flags.DEFINE_string(
'runspec_tar', None,
'Used by the PKB speccpu benchmarks. Name of the .tgz file to use. '
'Defaults to None. ')
flags.DEFINE_string(
'runspec_config', None,
'Used by the PKB speccpu benchmarks. Name of the cfg file to use as the '
'SPEC CPU config file provided to the runspec binary via its --config '
'flag. If the benchmark is run using an .iso file, then the '
'cfg file must be placed in the local PKB data directory and will be '
'copied to the remote machine prior to executing runspec/runcpu. Defaults '
'to None. '
'See README.md for instructions if running with a repackaged .tgz file.')
flags.DEFINE_string(
'runspec_build_tool_version', None,
'Version of gcc/g++/gfortran. This should match runspec_config. Note, if '
'neither runspec_config and runspec_build_tool_version is set, the test '
'install gcc/g++/gfortran-4.7, since that matches default config version. '
'If runspec_config is set, but not runspec_build_tool_version, default '
'version of build tools will be installed. Also this flag only works with '
'debian.')
flags.DEFINE_integer(
'runspec_iterations', 3,
'Used by the PKB speccpu benchmarks. The number of benchmark iterations '
'to execute, provided to the runspec binary via its --iterations flag.')
flags.DEFINE_string(
'runspec_define', '',
'Used by the PKB speccpu benchmarks. Optional comma-separated list of '
'SYMBOL[=VALUE] preprocessor macros provided to the runspec binary via '
'repeated --define flags. Example: numa,smt,sse=SSE4.2')
flags.DEFINE_boolean(
'runspec_enable_32bit', False,
'Used by the PKB speccpu benchmarks. If set, multilib packages will be '
'installed on the remote machine to enable use of 32-bit SPEC CPU '
'binaries. This may be useful when running on memory-constrained instance '
'types (i.e. less than 2 GiB memory/core), where 64-bit execution may be '
'problematic.')
flags.DEFINE_boolean(
'runspec_keep_partial_results', False,
'Used by the PKB speccpu benchmarks. If set, the benchmark will report '
'an aggregate score even if some of the SPEC CPU component tests '
'failed with status "NR". Available results will be saved, and PKB samples '
'will be marked with a metadata value of partial=true. If unset, partial '
'failures are treated as errors.')
flags.DEFINE_boolean(
'runspec_estimate_spec', False,
'Used by the PKB speccpu benchmarks. If set, the benchmark will report '
'an estimated aggregate score even if SPEC CPU did not compute one. '
'This usually occurs when --runspec_iterations is less than 3. '
'--runspec_keep_partial_results is also required to be set. Samples will be'
'created as estimated_SPECint(R)_rate_base and '
'estimated_SPECfp(R)_rate_base. Available results will be saved, '
'and PKB samples will be marked with a metadata value of partial=true. If '
'unset, SPECint(R)_rate_base20** and SPECfp(R)_rate_base20** are listed '
'in the metadata under missing_results.')
flags.DEFINE_enum(
'spec_runmode', BASE_MODE,
[BASE_MODE, PEAK_MODE, ALL_MODE],
'Run mode to use. Defaults to base. ')
VM_STATE_ATTR = 'speccpu_vm_state'
def _CheckTarFile(vm, runspec_config, examine_members, speccpu_vm_state):
"""Performs preliminary checks on the format of tar file downloaded on vm.
Args:
vm: virtual machine
runspec_config: String. User-specified name of the config file that is
expected to be in the tar file.
examine_members: Boolean. If True, this function will examine the tar file's
members to verify that certain required members are present.
speccpu_vm_state: SpecInstallConfigurations. Install configurations.
Raises:
errors.Benchmarks.PrepareException: If the tar file does not contain a
required member.
errors.Config.InvalidValue: If the tar file is found, and runspec_config is
not a valid file name.
"""
if posixpath.basename(runspec_config) != runspec_config:
raise errors.Config.InvalidValue(
'Invalid runspec_config value: {0}{1}When running speccpu with a '
'tar file, runspec_config cannot specify a file in a sub-directory. '
'See README.md for information about running speccpu with a tar '
'file.'.format(runspec_config, os.linesep))
if not examine_members:
return
# Copy the cfg to the VM.
local_cfg_file_path = data.ResourcePath(speccpu_vm_state.runspec_config)
vm.PushFile(local_cfg_file_path, speccpu_vm_state.cfg_file_path)
scratch_dir = vm.GetScratchDir()
cfg_member = '{0}/config/{1}'.format(speccpu_vm_state.base_spec_dir,
runspec_config)
required_members = itertools.chain(speccpu_vm_state.required_members,
[cfg_member])
missing_members = []
for member in required_members:
stdout, _ = vm.RemoteCommand(
'cd {scratch_dir} && (test -f {member} || test -d {member}) ; echo $?'
.format(scratch_dir=scratch_dir, member=member))
if stdout.strip() != '0':
missing_members.append(member)
if missing_members:
raise errors.Benchmarks.PrepareException(
'The following files were not found within tar file:{linesep}{members}'
'{linesep}This is an indication that the tar file is formatted '
'incorrectly. See README.md for information about the expected format '
'of the tar file.'.format(
linesep=os.linesep,
members=os.linesep.join(sorted(missing_members))))
def _CheckIsoAndCfgFile(runspec_config, spec_iso, clang_flag):
"""Searches for the iso file and cfg file.
Args:
runspec_config: String. Name of the config file to provide to runspec.
spec_iso: String. Location of spec iso file.
clang_flag: String. Location of the clang flag file.
Raises:
data.ResourcePath: If one of the required files could not be found.
"""
# Search for the iso.
try:
data.ResourcePath(spec_iso)
except data.ResourceNotFound:
logging.error(
'%(iso)s not found. To run the speccpu benchmark, %(iso)s must be '
'in the perfkitbenchmarker/data directory (or one of the specified '
'data directories if the --data_search_paths flag is used). Visit '
'https://www.spec.org/ to learn more about purchasing %(iso)s.',
{'iso': spec_iso})
raise
# Search for the cfg.
try:
data.ResourcePath(runspec_config)
except data.ResourceNotFound:
logging.error(
'%s not found. To run the speccpu benchmark, the config file '
'specified by the --runspec_config flag must be in the '
'perfkitbenchmarker/data directory (or one of the specified data '
'directories if the --data_search_paths flag is used). Visit '
'https://www.spec.org/cpu2006/docs/runspec.html#about_config to learn '
'more about config files.', runspec_config)
raise
if not clang_flag: # 2017 ISO does not contain clang.xml
return
# Search for the flag.
try:
data.ResourcePath(clang_flag)
except data.ResourceNotFound:
logging.error(
'%s not found. To run the speccpu benchmark, the clang.xml file '
'must be in the perfkitbenchmarker/data directory (or one of the '
'specified data directories if the --data_search_paths flag is '
'used). Visit https://www.spec.org/cpu2017/docs/flag-description.html '
'to learn more about flag files.', clang_flag)
raise
def _GenerateMd5sum(file_name):
"""Generates md5sum from file_name."""
# https://stackoverflow.com/questions/3431825/generating-an-md5-checksum-of-a-file
hash_md5 = hashlib.md5()
file_name_path = data.ResourcePath(file_name)
with open(file_name_path, 'rb') as f:
for chunk in iter(lambda: f.read(4096), b''):
hash_md5.update(chunk)
return hash_md5.hexdigest()
class SpecInstallConfigurations(object):
"""Configs for SPEC CPU run that must be preserved between PKB stages.
Specifies directories to look for install files and tracks install locations.
An instance of this class is attached to the VM as an attribute and is
therefore preserved as part of the pickled BenchmarkSpec between PKB stages.
Each attribute represents a possible file or directory that may be created on
the remote machine as part of running the benchmark.
Attributes:
package_name: String. Either speccpu2006 or speccpu2017.
cfg_file_path: Optional string. Path of the cfg file on the remote machine.
base_mount_dir: Optional string. Base directory where iso file is mounted.
mount_dir: Optional string. Path where the iso file is mounted on the
remote machine.
base_spec_dir: Optional string. Base directory where spec files are located.
spec_dir: Optional string. Path of a created directory on the remote machine
where the SPEC files are stored.
base_iso_file_path: Optional string. Basename of iso file.
iso_file_path: Optional string. Path of the iso file on the remote machine.
base_tar_file_path: Optional string. Base directory of tar file.
tar_file_path: Optional string. Path of the tar file on the remote machine.
required_members: List. File components that must exist for spec to run.
log_format: String. Logging format of this spec run.
runspec_config: String. Name of the config file to run with.
base_clang_flag_file_path: Optional String. Basename of clang flag file.
clang_flag_file_path: Optional String. Path of clang flag file on the
remote machine.
"""
def __init__(self):
self.package_name = None
self.cfg_file_path = None
self.base_mount_dir = None
self.mount_dir = None
self.base_spec_dir = None
self.spec_dir = None
self.base_iso_file_path = None
self.iso_file_path = None
self.base_tar_file_path = None
self.tar_file_path = None
self.required_members = None
self.log_format = None
self.runspec_config = None
self.base_clang_flag_file_path = None
self.clang_flag_file_path = None
def UpdateConfig(self, scratch_dir):
"""Updates the configuration after other attributes have been set.
Args:
scratch_dir: The scratch directory on the VM that SPEC is installed on.
"""
self.spec_dir = posixpath.join(scratch_dir, self.base_spec_dir)
self.cfg_file_path = posixpath.join(
self.spec_dir, 'config', os.path.basename(self.runspec_config))
if self.base_iso_file_path:
self.iso_file_path = posixpath.join(scratch_dir, self.base_iso_file_path)
if self.base_mount_dir:
self.mount_dir = posixpath.join(scratch_dir, self.base_mount_dir)
if self.base_clang_flag_file_path:
self.clang_flag_file_path = posixpath.join(
self.spec_dir, 'config', 'flags',
os.path.basename(self.base_clang_flag_file_path))
def InstallSPECCPU(vm, speccpu_vm_state):
"""Installs SPEC CPU2006 or 2017 on the target vm.
Args:
vm: Vm on which speccpu is installed.
speccpu_vm_state: SpecInstallConfigurations. Install configuration for spec.
"""
scratch_dir = vm.GetScratchDir()
vm.RemoteCommand('chmod 777 {0}'.format(scratch_dir))
try:
# Since this will override 'build_tools' installation, install this
# before we install 'build_tools' package
_PrepareWithPreprovisionedTarFile(vm, speccpu_vm_state)
_CheckTarFile(vm, speccpu_vm_state.runspec_config,
stages.PROVISION in FLAGS.run_stage,
speccpu_vm_state)
except errors.Setup.BadPreprovisionedDataError:
_CheckIsoAndCfgFile(speccpu_vm_state.runspec_config,
speccpu_vm_state.base_iso_file_path,
speccpu_vm_state.base_clang_flag_file_path)
_PrepareWithIsoFile(vm, speccpu_vm_state)
vm.Install('speccpu')
def Install(vm):
"""Installs SPECCPU dependencies."""
vm.Install('wget')
vm.Install('fortran')
vm.Install('build_tools')
# If runspec_build_tool_version is not set,
# install 4.7 gcc/g++/gfortan. If either one of the flag is set, we assume
# user is smart
if FLAGS.runspec_build_tool_version:
build_tool_version = FLAGS.runspec_build_tool_version or '4.7'
if not (vm.OS_TYPE == os_types.DEBIAN9 and build_tool_version == '6'):
# debian9 already comes with version 6
build_tools.Reinstall(vm, version=build_tool_version)
if FLAGS.runspec_enable_32bit:
vm.Install('multilib')
vm.Install('numactl')
def _PrepareWithPreprovisionedTarFile(vm, speccpu_vm_state):
"""Prepares the VM to run using tar file in preprovisioned cloud.
Args:
vm: BaseVirtualMachine. Vm on which the tar file is installed.
speccpu_vm_state: SpecInstallConfigurations. Install configuration for spec.
"""
scratch_dir = vm.GetScratchDir()
vm.InstallPreprovisionedPackageData(speccpu_vm_state.package_name,
[speccpu_vm_state.base_tar_file_path],
scratch_dir)
vm.RemoteCommand('cd {dir} && tar xvfz {tar}'.format(
dir=scratch_dir, tar=speccpu_vm_state.base_tar_file_path))
def _PrepareWithIsoFile(vm, speccpu_vm_state):
"""Prepares the VM to run using the iso file.
Copies the iso to the VM, mounts it, and extracts the contents. Copies the
config file to the VM. Runs the SPEC install.sh script on the VM.
Args:
vm: BaseVirtualMachine. Recipient of the iso file.
speccpu_vm_state: SpecInstallConfigurations. Modified by this function to
reflect any changes to the VM that may need to be cleaned up.
"""
scratch_dir = vm.GetScratchDir()
# Make cpu2006 or cpu2017 directory on the VM.
vm.RemoteCommand('mkdir {0}'.format(speccpu_vm_state.spec_dir))
# Copy the iso to the VM.
local_iso_file_path = data.ResourcePath(speccpu_vm_state.base_iso_file_path)
vm.PushFile(local_iso_file_path, scratch_dir)
# Extract files from the iso to the cpu2006 or cpu2017 directory.
vm.RemoteCommand('mkdir {0}'.format(speccpu_vm_state.mount_dir))
vm.RemoteCommand('sudo mount -t iso9660 -o loop {0} {1}'.format(
speccpu_vm_state.iso_file_path, speccpu_vm_state.mount_dir))
vm.RemoteCommand('cp -r {0}/* {1}'.format(speccpu_vm_state.mount_dir,
speccpu_vm_state.spec_dir))
# cpu2017 iso does not come with config directory nor clang.xml
if speccpu_vm_state.clang_flag_file_path:
vm.RemoteCommand('mkdir -p {0}'.format(
os.path.dirname(speccpu_vm_state.clang_flag_file_path)))
vm.PushFile(data.ResourcePath(speccpu_vm_state.base_clang_flag_file_path),
speccpu_vm_state.clang_flag_file_path)
vm.RemoteCommand('chmod -R 777 {0}'.format(speccpu_vm_state.spec_dir))
# Copy the cfg to the VM.
local_cfg_file_path = data.ResourcePath(speccpu_vm_state.runspec_config)
vm.PushFile(local_cfg_file_path, speccpu_vm_state.cfg_file_path)
# Run SPEC CPU2006 or 2017 installation.
install_script_path = posixpath.join(speccpu_vm_state.spec_dir, 'install.sh')
vm.RobustRemoteCommand('yes | {0}'.format(install_script_path))
def _ExtractScore(stdout, vm, keep_partial_results, runspec_metric):
"""Extracts the SPEC(int|fp) score from stdout.
Args:
stdout: String. stdout from running RemoteCommand.
vm: The vm instance where SPEC CPU was run.
keep_partial_results: Boolean. True if partial results should
be extracted in the event that not all benchmarks were successfully
run. See the "runspec_keep_partial_results" flag for more info.
runspec_metric: String. Indicates whether this is spec speed or rate run.
Sample input for SPECint (Refer to unit test for more examples):
...
...Base Peak
============================================= ==========================
400.perlbench 9770 417 23.4 * 9770 417 23.4 *
401.bzip2 9650 565 17.1 * 9650 565 17.1 *
403.gcc 8050 364 22.1 *
429.mcf 9120 364 25.1 *
445.gobmk 10490 499 21.0 *
456.hmmer 9330 491 19.0 *
458.sjeng 12100 588 20.6 *
462.libquantum 20720 468 44.2 *
464.h264ref 22130 700 31.6 *
471.omnetpp 6250 349 17.9 *
473.astar 7020 482 14.6 *
483.xalancbmk 6900 248 27.8 *
Est. SPECint(R)_base2006 22.7
Est. SPECint(R)_peak2006 20
Sample input for SPECfp:
...
...Base Peak
============================================= ============================
410.bwaves 13590 717 19.0 * 13550 710 19.0 *
416.gamess 19580 923 21.2 *
433.milc 9180 480 19.1 *
434.zeusmp 9100 600 15.2 *
435.gromacs 7140 605 11.8 *
436.cactusADM 11950 1289 9.27 *
437.leslie3d 9400 859 10.9 *
444.namd 8020 504 15.9 *
447.dealII 11440 409 28.0 *
450.soplex 8340 272 30.6 *
453.povray 5320 231 23.0 *
454.calculix 8250 993 8.31 *
459.GemsFDTD 10610 775 13.7 *
465.tonto 9840 565 17.4 *
470.lbm 13740 365 37.7 *
481.wrf 11170 788 14.2 *
482.sphinx3 19490 668 29.2 *
Est. SPECfp(R)_base2006 17.5
Est. SPECfp(R)_peak2006 20
Returns:
A list of sample.Sample objects.
"""
results = []
speccpu_vm_state = getattr(vm, VM_STATE_ATTR, None)
re_begin_section = re.compile('^={1,}')
re_end_section = re.compile(speccpu_vm_state.log_format)
result_section = []
in_result_section = False
at_peak_results_line, peak_name, peak_score = False, None, None
# Extract the summary section
for line in stdout.splitlines():
if in_result_section:
result_section.append(line)
# search for begin of result section
match = re.search(re_begin_section, line)
if match:
assert not in_result_section
in_result_section = True
continue
# search for end of result section
match = re.search(re_end_section, line)
if at_peak_results_line:
_, peak_name, peak_score = line.split()
at_peak_results_line = False
if match:
assert in_result_section
spec_name = str(match.group(1))
if runspec_metric == 'speed':
spec_name += ':speed'
try:
spec_score = float(match.group(2))
except ValueError:
# Partial results may get reported as '--' instead of a number.
spec_score = None
if FLAGS.spec_runmode != BASE_MODE:
at_peak_results_line = True
in_result_section = False
# remove the final SPEC(int|fp) score, which has only 2 columns.
result_section.pop()
metadata = {
'runspec_config': speccpu_vm_state.runspec_config,
'runspec_config_md5sum': _GenerateMd5sum(speccpu_vm_state.runspec_config),
'runspec_iterations': str(FLAGS.runspec_iterations),
'runspec_enable_32bit': str(FLAGS.runspec_enable_32bit),
'runspec_define': FLAGS.runspec_define,
'runspec_metric': runspec_metric,
'spec_runmode': FLAGS.spec_runmode,
'spec17_copies': FLAGS.spec17_copies,
'spec17_threads': FLAGS.spec17_threads,
'spec17_fdo': FLAGS.spec17_fdo,
'spec17_subset': FLAGS.spec17_subset,
'gcc_version': build_tools.GetVersion(vm, 'gcc')
}
missing_results = []
scores = []
for benchmark in result_section:
# Skip over failed runs, but count them since they make the overall
# result invalid.
not_reported = benchmark.count('NR')
if not_reported > 1 or (
not_reported == 1 and FLAGS.spec_runmode != PEAK_MODE):
logging.warning('SPEC CPU missing result: %s', benchmark)
missing_results.append(str(benchmark.split()[0]))
continue
base_score_str, peak_score_str = None, None
if FLAGS.spec_runmode == BASE_MODE:
# name, copies/threads, time, score, misc
name, _, _, base_score_str, _ = benchmark.split()
elif FLAGS.spec_runmode == PEAK_MODE:
# name, base_not_reported(NR), copies/threads, time, score, misc
name, _, _, _, peak_score_str, _ = benchmark.split()
else:
# name, copies/threads, base time, base score, base misc,
# copies/threads, peak time, peak score, peak misc
name, _, _, base_score_str, _, _, _, peak_score_str, _ = benchmark.split()
if runspec_metric == 'speed':
name += ':speed'
if base_score_str:
base_score_float = float(base_score_str)
scores.append(base_score_float)
results.append(sample.Sample(str(name), base_score_float, '', metadata))
if peak_score_str:
peak_score_float = float(peak_score_str)
results.append(
sample.Sample(str(name) + ':peak', peak_score_float, '', metadata))
if spec_score is None and FLAGS.spec_runmode != PEAK_MODE:
missing_results.append(spec_name)
if missing_results:
if keep_partial_results:
metadata['partial'] = 'true'
metadata['missing_results'] = ','.join(missing_results)
else:
raise errors.Benchmarks.RunError(
'speccpu: results missing, see log: ' + ','.join(missing_results))
if spec_score:
results.append(sample.Sample(spec_name, spec_score, '', metadata))
elif FLAGS.runspec_estimate_spec:
estimated_spec_score = _GeometricMean(scores)
results.append(sample.Sample('estimated_' + spec_name,
estimated_spec_score, '', metadata))
if peak_score:
results.append(sample.Sample(peak_name, float(peak_score), '', metadata))
return results
def _GeometricMean(arr):
"""Calculates the geometric mean of the array."""
product = 1
for val in arr:
product *= val
return product ** (1.0 / len(arr))
def ParseOutput(vm, log_files, is_partial_results, runspec_metric,
results_directory=None):
"""Retrieves the SPEC CPU output from the VM and parses it.
Args:
vm: Vm. The vm instance where SPEC CPU was run.
log_files: String. Path of the directory on the remote machine where the
SPEC files, including binaries and logs, are located.
is_partial_results: Boolean. True if the output is partial result.
runspec_metric: String. Indicates whether this is spec speed or rate run.
results_directory: Optional String. Indicates where the spec directory is.
Defaults to the results folder inside the speccpu directory.
Returns:
A list of samples to be published (in the same format as Run() returns).
"""
speccpu_vm_state = getattr(vm, VM_STATE_ATTR, None)
results = []
for log in log_files:
results_dir = results_directory or '%s/result' % speccpu_vm_state.spec_dir
stdout, _ = vm.RemoteCommand(
'cat %s/%s' % (results_dir, log), should_log=True)
results.extend(_ExtractScore(
stdout, vm, FLAGS.runspec_keep_partial_results or is_partial_results,
runspec_metric))
return results
def Run(vm, cmd, benchmark_subset, version_specific_parameters=None):
"""Runs SPEC CPU on the target vm.
Args:
vm: Vm. The vm on which speccpu will run.
cmd: command to issue.
benchmark_subset: List. Subset of the benchmark to run.
version_specific_parameters: List. List of parameters for specific versions.
Returns:
A Tuple of (stdout, stderr) the run output.
"""
speccpu_vm_state = getattr(vm, VM_STATE_ATTR, None)
runspec_flags = [
('config', posixpath.basename(speccpu_vm_state.cfg_file_path)),
('tune', FLAGS.spec_runmode), ('size', 'ref'),
('iterations', FLAGS.runspec_iterations)]
if FLAGS.runspec_define:
for runspec_define in FLAGS.runspec_define.split(','):
runspec_flags.append(('define', runspec_define))
fl = ' '.join('--{0}={1}'.format(k, v) for k, v in runspec_flags)
if version_specific_parameters:
fl += ' '.join(version_specific_parameters)
runspec_cmd = '{cmd} --noreportable {flags} {subset}'.format(
cmd=cmd, flags=fl, subset=benchmark_subset)
cmd = ' && '.join((
'cd {0}'.format(speccpu_vm_state.spec_dir), 'rm -rf result', '. ./shrc',
'. ./shrc', runspec_cmd))
return vm.RobustRemoteCommand(cmd)
def Uninstall(vm):
"""Cleans up SPECCPU from the target vm.
Args:
vm: The vm on which SPECCPU is uninstalled.
"""
speccpu_vm_state = getattr(vm, VM_STATE_ATTR, None)
if speccpu_vm_state:
if speccpu_vm_state.mount_dir:
try:
vm.RemoteCommand('sudo umount {0}'.format(speccpu_vm_state.mount_dir))
except errors.VirtualMachine.RemoteCommandError:
# Even if umount failed, continue to clean up.
logging.exception('umount failed.')
targets = ' '.join(p for p in speccpu_vm_state.__dict__.values() if p)
vm.RemoteCommand('rm -rf {0}'.format(targets))
|
from flask import Flask, request, redirect, Response, jsonify
from functools import wraps
from flasgger import Swagger
def requires_basic_auth(f):
"""Decorator to require HTTP Basic Auth for your endpoint."""
def check_auth(username, password):
return username == "guest" and password == "secret"
def authenticate():
return Response(
"Authentication required.", 401,
{"WWW-Authenticate": "Basic realm='Login Required'"},
)
@wraps(f)
def decorated(*args, **kwargs):
# NOTE: This example will require Basic Auth only when you run the
# app directly. For unit tests, we can't block it from getting the
# Swagger specs so we just allow it to go thru without auth.
# The following two lines of code wouldn't be needed in a normal
# production environment.
if __name__ != "__main__":
return f(*args, **kwargs)
auth = request.authorization
if not auth or not check_auth(auth.username, auth.password):
return authenticate()
return f(*args, **kwargs)
return decorated
app = Flask(__name__)
app.config["SWAGGER"] = {
"title": "Swagger Basic Auth App",
"uiversion": 2,
}
swag = Swagger(app,
decorators=[ requires_basic_auth ],
template={
"swagger": "2.0",
"info": {
"title": "Swagger Basic Auth App",
"version": "1.0",
},
"consumes": [
"application/json",
],
"produces": [
"application/json",
],
},
)
@app.route("/echo/<name>", methods=["GET", "POST"])
def echo(name):
"""
Echo back the name and any posted parameters.
---
tags:
- echo
parameters:
- in: path
name: name
type: string
required: true
- in: body
name: body
description: JSON parameters.
schema:
properties:
first_name:
type: string
description: First name.
example: Alice
last_name:
type: string
description: Last name.
example: Smith
dob:
type: string
format: date
description: Date of birth.
example: 1990-01-01
comment:
type: string
description: Something arbitrary.
example: Hello world
responses:
200:
description: OK.
"""
data = {
"url_name": name,
"json": request.json,
}
return jsonify(data)
@app.route("/")
def index():
return redirect("/apidocs")
if __name__ == "__main__":
app.run(debug=True)
|
import rumps
import time
rumps.debug_mode(True) # turn on command line logging information for development - default is off
@rumps.clicked("About")
def about(sender):
sender.title = 'NOM' if sender.title == 'About' else 'About' # can adjust titles of menu items dynamically
rumps.alert("This is a cool app!")
@rumps.clicked("Arbitrary", "Depth", "It's pretty easy") # very simple to access nested menu items
def does_something(sender):
my_data = {'poop': 88}
rumps.notification(title='Hi', subtitle='There.', message='Friend!', sound=does_something.sound, data=my_data)
does_something.sound = True
@rumps.clicked("Preferences")
def not_actually_prefs(sender):
if not sender.icon:
sender.icon = 'level_4.png'
sender.state = not sender.state
does_something.sound = not does_something.sound
@rumps.timer(4) # create a new thread that calls the decorated function every 4 seconds
def write_unix_time(sender):
with app.open('times', 'a') as f: # this opens files in your app's Application Support folder
f.write('The unix time now: {}\n'.format(time.time()))
@rumps.clicked("Arbitrary")
def change_statusbar_title(sender):
app.title = 'Hello World' if app.title != 'Hello World' else 'World, Hello'
@rumps.notifications
def notifications(notification): # function that reacts to incoming notification dicts
print(notification)
def onebitcallback(sender): # functions don't have to be decorated to serve as callbacks for buttons
print(4848484) # this function is specified as a callback when creating a MenuItem below
if __name__ == "__main__":
app = rumps.App("My Toolbar App", title='World, Hello')
app.menu = [
rumps.MenuItem('About', icon='pony.jpg', dimensions=(18, 18)), # can specify an icon to be placed near text
'Preferences',
None, # None functions as a separator in your menu
{'Arbitrary':
{"Depth": ["Menus", "It's pretty easy"],
"And doesn't": ["Even look like Objective C", rumps.MenuItem("One bit", callback=onebitcallback)]}},
None
]
app.run()
|
import json
import os.path
# Load the HSTS list from disk
__dirname = os.path.abspath(os.path.dirname(__file__))
__filename = os.path.join(__dirname, '..', '..', 'conf', 'hsts-preload.json')
with open(__filename, 'r') as f:
hsts = json.load(f)
def is_hpkp_preloaded(hostname):
# Just see if the hostname is in the HSTS list and pinned
if hsts.get(hostname, {}).get('pinned'):
return hsts[hostname]
# Either the hostname is in the list *or* one of its subdomains is
host = hostname.split('.')
levels = len(host)
# If hostname is foo.bar.baz.mozilla.org, check bar.baz.mozilla.org, baz.mozilla.org, mozilla.org, and .org
for i in range(1, levels):
domain = '.'.join(host[i:levels])
if hsts.get(domain, {}).get('pinned') is True and hsts.get(domain, {}).get('includeSubDomainsForPinning'):
return hsts[domain]
return False
def is_hsts_preloaded(hostname):
# Just see if the hostname is the HSTS list with the right mode -- no need to check includeSubDomains
if hsts.get(hostname, {}).get('mode') == 'force-https':
return hsts[hostname]
# Either the hostname is in the list *or* the TLD is and includeSubDomains is true
host = hostname.split('.')
levels = len(host)
# If hostname is foo.bar.baz.mozilla.org, check bar.baz.mozilla.org, baz.mozilla.org, mozilla.org, and .org
for i in range(1, levels):
domain = '.'.join(host[i:levels])
if hsts.get(domain, {}).get('mode') == 'force-https' and hsts.get(domain, {}).get('includeSubDomains'):
return hsts[domain]
return False
# Return the new result if it's worse than the existing result, otherwise just the current result
def only_if_worse(new_result: str, old_result: str, order) -> str:
if not old_result:
return new_result
elif order.index(new_result) > order.index(old_result):
return new_result
else:
return old_result
# Let this file be run directly so you can see the JSON for the Google HSTS thingie
if __name__ == '__main__':
print(hsts)
|
import os
from pi1wire import Pi1Wire
from pyownet import protocol
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_HOST, CONF_PORT, CONF_TYPE
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers.typing import HomeAssistantType
from .const import CONF_MOUNT_DIR, CONF_TYPE_OWSERVER, CONF_TYPE_SYSBUS
class OneWireHub:
"""Hub to communicate with SysBus or OWServer."""
def __init__(self, hass: HomeAssistantType):
"""Initialize."""
self.hass = hass
self.type: str = None
self.pi1proxy: Pi1Wire = None
self.owproxy: protocol._Proxy = None
self.devices = None
async def connect(self, host: str, port: int) -> None:
"""Connect to the owserver host."""
try:
self.owproxy = await self.hass.async_add_executor_job(
protocol.proxy, host, port
)
except protocol.ConnError as exc:
raise CannotConnect from exc
async def check_mount_dir(self, mount_dir: str) -> None:
"""Test that the mount_dir is a valid path."""
if not await self.hass.async_add_executor_job(os.path.isdir, mount_dir):
raise InvalidPath
self.pi1proxy = Pi1Wire(mount_dir)
async def initialize(self, config_entry: ConfigEntry) -> None:
"""Initialize a config entry."""
self.type = config_entry.data[CONF_TYPE]
if self.type == CONF_TYPE_SYSBUS:
await self.check_mount_dir(config_entry.data[CONF_MOUNT_DIR])
elif self.type == CONF_TYPE_OWSERVER:
host = config_entry.data[CONF_HOST]
port = config_entry.data[CONF_PORT]
await self.connect(host, port)
await self.discover_devices()
async def discover_devices(self):
"""Discover all devices."""
if self.devices is None:
if self.type == CONF_TYPE_SYSBUS:
self.devices = await self.hass.async_add_executor_job(
self.pi1proxy.find_all_sensors
)
if self.type == CONF_TYPE_OWSERVER:
self.devices = await self.hass.async_add_executor_job(
self._discover_devices_owserver
)
return self.devices
def _discover_devices_owserver(self):
"""Discover all owserver devices."""
devices = []
for device_path in self.owproxy.dir():
devices.append(
{
"path": device_path,
"family": self.owproxy.read(f"{device_path}family").decode(),
"type": self.owproxy.read(f"{device_path}type").decode(),
}
)
return devices
class CannotConnect(HomeAssistantError):
"""Error to indicate we cannot connect."""
class InvalidPath(HomeAssistantError):
"""Error to indicate the path is invalid."""
|
import numpy as np
import os
import os.path as op
from .constants import FIFF
from .meas_info import _get_valid_units
def _check_orig_units(orig_units):
"""Check original units from a raw file.
Units that are close to a valid_unit but not equal can be remapped to fit
into the valid_units. All other units that are not valid will be replaced
with "n/a".
Parameters
----------
orig_units : dict
Dictionary mapping channel names to their units as specified in
the header file. Example: {'FC1': 'nV'}
Returns
-------
orig_units_remapped : dict
Dictionary mapping channel names to their VALID units as specified in
the header file. Invalid units are now labeled "n/a".
Example: {'FC1': 'nV', 'Hfp3erz': 'n/a'}
"""
if orig_units is None:
return
valid_units = _get_valid_units()
valid_units_lowered = [unit.lower() for unit in valid_units]
orig_units_remapped = dict(orig_units)
for ch_name, unit in orig_units.items():
# Be lenient: we ignore case for now.
if unit.lower() in valid_units_lowered:
continue
# Common "invalid units" can be remapped to their valid equivalent
remap_dict = dict()
remap_dict['uv'] = 'µV'
remap_dict['μv'] = 'µV' # greek letter mu vs micro sign. use micro
if unit.lower() in remap_dict:
orig_units_remapped[ch_name] = remap_dict[unit.lower()]
continue
# Some units cannot be saved, they are invalid: assign "n/a"
orig_units_remapped[ch_name] = 'n/a'
return orig_units_remapped
def _find_channels(ch_names, ch_type='EOG'):
"""Find EOG channel."""
substrings = (ch_type,)
substrings = [s.upper() for s in substrings]
if ch_type == 'EOG':
substrings = ('EOG', 'EYE')
eog_idx = [idx for idx, ch in enumerate(ch_names) if
any(substring in ch.upper() for substring in substrings)]
return eog_idx
def _mult_cal_one(data_view, one, idx, cals, mult):
"""Take a chunk of raw data, multiply by mult or cals, and store."""
one = np.asarray(one, dtype=data_view.dtype)
assert data_view.shape[1] == one.shape[1], \
(data_view.shape[1], one.shape[1])
if mult is not None:
mult.ndim == one.ndim == 2
data_view[:] = mult @ one[idx]
else:
assert cals is not None
if isinstance(idx, slice):
data_view[:] = one[idx]
else:
# faster than doing one = one[idx]
np.take(one, idx, axis=0, out=data_view)
data_view *= cals
def _blk_read_lims(start, stop, buf_len):
"""Deal with indexing in the middle of a data block.
Parameters
----------
start : int
Starting index.
stop : int
Ending index (exclusive).
buf_len : int
Buffer size in samples.
Returns
-------
block_start_idx : int
The first block to start reading from.
r_lims : list
The read limits.
d_lims : list
The write limits.
Notes
-----
Consider this example::
>>> start, stop, buf_len = 2, 27, 10
+---------+---------+---------
File structure: | buf0 | buf1 | buf2 |
+---------+---------+---------
File time: 0 10 20 30
+---------+---------+---------
Requested time: 2 27
| |
blockstart blockstop
| |
start stop
We need 27 - 2 = 25 samples (per channel) to store our data, and
we need to read from 3 buffers (30 samples) to get all of our data.
On all reads but the first, the data we read starts at
the first sample of the buffer. On all reads but the last,
the data we read ends on the last sample of the buffer.
We call ``this_data`` the variable that stores the current buffer's data,
and ``data`` the variable that stores the total output.
On the first read, we need to do this::
>>> data[0:buf_len-2] = this_data[2:buf_len] # doctest: +SKIP
On the second read, we need to do::
>>> data[1*buf_len-2:2*buf_len-2] = this_data[0:buf_len] # doctest: +SKIP
On the final read, we need to do::
>>> data[2*buf_len-2:3*buf_len-2-3] = this_data[0:buf_len-3] # doctest: +SKIP
This function encapsulates this logic to allow a loop over blocks, where
data is stored using the following limits::
>>> data[d_lims[ii, 0]:d_lims[ii, 1]] = this_data[r_lims[ii, 0]:r_lims[ii, 1]] # doctest: +SKIP
""" # noqa: E501
# this is used to deal with indexing in the middle of a sampling period
assert all(isinstance(x, int) for x in (start, stop, buf_len))
block_start_idx = (start // buf_len)
block_start = block_start_idx * buf_len
last_used_samp = stop - 1
block_stop = last_used_samp - last_used_samp % buf_len + buf_len
read_size = block_stop - block_start
n_blk = read_size // buf_len + (read_size % buf_len != 0)
start_offset = start - block_start
end_offset = block_stop - stop
d_lims = np.empty((n_blk, 2), int)
r_lims = np.empty((n_blk, 2), int)
for bi in range(n_blk):
# Triage start (sidx) and end (eidx) indices for
# data (d) and read (r)
if bi == 0:
d_sidx = 0
r_sidx = start_offset
else:
d_sidx = bi * buf_len - start_offset
r_sidx = 0
if bi == n_blk - 1:
d_eidx = stop - start
r_eidx = buf_len - end_offset
else:
d_eidx = (bi + 1) * buf_len - start_offset
r_eidx = buf_len
d_lims[bi] = [d_sidx, d_eidx]
r_lims[bi] = [r_sidx, r_eidx]
return block_start_idx, r_lims, d_lims
def _file_size(fname):
"""Get the file size in bytes."""
with open(fname, 'rb') as f:
f.seek(0, os.SEEK_END)
return f.tell()
def _read_segments_file(raw, data, idx, fi, start, stop, cals, mult,
dtype, n_channels=None, offset=0, trigger_ch=None):
"""Read a chunk of raw data."""
if n_channels is None:
n_channels = raw._raw_extras[fi]['orig_nchan']
n_bytes = np.dtype(dtype).itemsize
# data_offset and data_left count data samples (channels x time points),
# not bytes.
data_offset = n_channels * start * n_bytes + offset
data_left = (stop - start) * n_channels
# Read up to 100 MB of data at a time, block_size is in data samples
block_size = ((int(100e6) // n_bytes) // n_channels) * n_channels
block_size = min(data_left, block_size)
with open(raw._filenames[fi], 'rb', buffering=0) as fid:
fid.seek(data_offset)
# extract data in chunks
for sample_start in np.arange(0, data_left, block_size) // n_channels:
count = min(block_size, data_left - sample_start * n_channels)
block = np.fromfile(fid, dtype, count)
if block.size != count:
raise RuntimeError('Incorrect number of samples (%s != %s), '
'please report this error to MNE-Python '
'developers' % (block.size, count))
block = block.reshape(n_channels, -1, order='F')
n_samples = block.shape[1] # = count // n_channels
sample_stop = sample_start + n_samples
if trigger_ch is not None:
stim_ch = trigger_ch[start:stop][sample_start:sample_stop]
block = np.vstack((block, stim_ch))
data_view = data[:, sample_start:sample_stop]
_mult_cal_one(data_view, block, idx, cals, mult)
def read_str(fid, count=1):
"""Read string from a binary file in a python version compatible way."""
dtype = np.dtype('>S%i' % count)
string = fid.read(dtype.itemsize)
data = np.frombuffer(string, dtype=dtype)[0]
bytestr = b''.join([data[0:data.index(b'\x00') if
b'\x00' in data else count]])
return str(bytestr.decode('ascii')) # Return native str type for Py2/3
def _create_chs(ch_names, cals, ch_coil, ch_kind, eog, ecg, emg, misc):
"""Initialize info['chs'] for eeg channels."""
chs = list()
for idx, ch_name in enumerate(ch_names):
if ch_name in eog or idx in eog:
coil_type = FIFF.FIFFV_COIL_NONE
kind = FIFF.FIFFV_EOG_CH
elif ch_name in ecg or idx in ecg:
coil_type = FIFF.FIFFV_COIL_NONE
kind = FIFF.FIFFV_ECG_CH
elif ch_name in emg or idx in emg:
coil_type = FIFF.FIFFV_COIL_NONE
kind = FIFF.FIFFV_EMG_CH
elif ch_name in misc or idx in misc:
coil_type = FIFF.FIFFV_COIL_NONE
kind = FIFF.FIFFV_MISC_CH
else:
coil_type = ch_coil
kind = ch_kind
chan_info = {'cal': cals[idx], 'logno': idx + 1, 'scanno': idx + 1,
'range': 1.0, 'unit_mul': FIFF.FIFF_UNITM_NONE,
'ch_name': ch_name, 'unit': FIFF.FIFF_UNIT_V,
'coord_frame': FIFF.FIFFV_COORD_HEAD,
'coil_type': coil_type, 'kind': kind, 'loc': np.zeros(12)}
chs.append(chan_info)
return chs
def _synthesize_stim_channel(events, n_samples):
"""Synthesize a stim channel from events read from an event file.
Parameters
----------
events : array, shape (n_events, 3)
Each row representing an event.
n_samples : int
The number of samples.
Returns
-------
stim_channel : array, shape (n_samples,)
An array containing the whole recording's event marking.
"""
# select events overlapping buffer
events = events.copy()
events[events[:, 1] < 1, 1] = 1
# create output buffer
stim_channel = np.zeros(n_samples, int)
for onset, duration, trigger in events:
stim_channel[onset:onset + duration] = trigger
return stim_channel
def _construct_bids_filename(base, ext, part_idx):
"""Construct a BIDS compatible filename for split files."""
# insert index in filename
dirname = op.dirname(base)
base = op.basename(base)
deconstructed_base = base.split('_')
if len(deconstructed_base) < 2:
raise ValueError('Filename base must end with an underscore followed '
f'by the modality (e.g., _eeg or _meg), got {base}')
modality = deconstructed_base[-1]
base = '_'.join(deconstructed_base[:-1])
use_fname = '{}_split-{:02}_{}{}'.format(base, part_idx, modality, ext)
if dirname:
use_fname = op.join(dirname, use_fname)
return use_fname
|
class Parent(object):
def meth(self):
print("METH")
class Child(Parent):
pass
def trace(frame, event, args):
# Thanks to Aleksi Torhamo for code and idea.
co = frame.f_code
fname = co.co_name
if not co.co_varnames:
return
locs = frame.f_locals
first_arg = co.co_varnames[0]
if co.co_argcount:
self = locs[first_arg]
elif co.co_flags & 0x04: # *args syntax
self = locs[first_arg][0]
else:
return
func = getattr(self, fname).__func__
if hasattr(func, '__qualname__'):
qname = func.__qualname__
else:
for cls in self.__class__.__mro__:
f = cls.__dict__.get(fname, None)
if f is None:
continue
if f is func:
qname = cls.__name__ + "." + fname
break
print("{}: {}.{} {}".format(event, self, fname, qname))
return trace
import sys
sys.settrace(trace)
Child().meth()
|
import ctypes
import logging
from multiprocessing import Value, Process
from yandextank.common.util import Cleanup, Finish, Status
from yandextank.core.tankworker import TankWorker
logger = logging.getLogger()
class ApiWorker(Process, TankWorker):
SECTION = 'core'
FINISH_FILENAME = 'finish_status.yaml'
def __init__(self, manager, config_paths, cli_options=None, cfg_patches=None, cli_args=None, no_local=False,
log_handlers=None, wait_lock=False, files=None, ammo_file=None):
Process.__init__(self)
TankWorker.__init__(self, configs=config_paths, cli_options=cli_options, cfg_patches=cfg_patches,
cli_args=cli_args, no_local=no_local, log_handlers=log_handlers,
wait_lock=wait_lock, files=files, ammo_file=ammo_file, api_start=True, manager=manager)
self._status = Value(ctypes.c_char_p, Status.TEST_INITIATED)
self._test_id = Value(ctypes.c_char_p, self.core.test_id.encode('utf8'))
self._retcode = Value(ctypes.c_int, 0)
self._msg = Value(ctypes.c_char_p, b'')
@property
def test_id(self):
return self._test_id.value.decode('utf8')
@property
def status(self):
self._status.acquire()
res = self._status.value
self._status.release()
return res
@status.setter
def status(self, val):
self._status.acquire()
self._status.value = val
self._status.release()
@property
def retcode(self):
return self._retcode.value
@retcode.setter
def retcode(self, val):
self._retcode.value = val
@property
def msg(self):
self._msg.acquire()
res = self._msg.value.decode('utf8')
self._msg.release()
return res
@msg.setter
def msg(self, val):
value = val.encode('utf8')
self._msg.acquire()
self._msg.value = value
self._msg.release()
def run(self):
with Cleanup(self) as add_cleanup:
lock = self.get_lock()
add_cleanup('release lock', lock.release)
self.status = Status.TEST_PREPARING
logger.info('Created a folder for the test. %s' % self.folder)
self.core.plugins_configure()
add_cleanup('plugins cleanup', self.core.plugins_cleanup)
self.core.plugins_prepare_test()
with Finish(self):
self.status = Status.TEST_RUNNING
self.core.plugins_start_test()
self.retcode = self.core.wait_for_finish()
self.status = Status.TEST_POST_PROCESS
self.retcode = self.core.plugins_post_process(self.retcode)
class SingleLevelFilter(logging.Filter):
"""Exclude or approve one msg type at a time. """
def __init__(self, passlevel, reject):
logging.Filter.__init__(self)
self.passlevel = passlevel
self.reject = reject
def filter(self, record):
if self.reject:
return record.levelno != self.passlevel
else:
return record.levelno == self.passlevel
|
import asyncio
from datetime import timedelta
import itertools
import logging
import aiohttp
from defusedxml import ElementTree
from netdisco import ssdp, util
from homeassistant.const import EVENT_HOMEASSISTANT_STARTED
from homeassistant.helpers.event import async_track_time_interval
from homeassistant.loader import async_get_ssdp
DOMAIN = "ssdp"
SCAN_INTERVAL = timedelta(seconds=60)
# Attributes for accessing info from SSDP response
ATTR_SSDP_LOCATION = "ssdp_location"
ATTR_SSDP_ST = "ssdp_st"
ATTR_SSDP_USN = "ssdp_usn"
ATTR_SSDP_EXT = "ssdp_ext"
ATTR_SSDP_SERVER = "ssdp_server"
# Attributes for accessing info from retrieved UPnP device description
ATTR_UPNP_DEVICE_TYPE = "deviceType"
ATTR_UPNP_FRIENDLY_NAME = "friendlyName"
ATTR_UPNP_MANUFACTURER = "manufacturer"
ATTR_UPNP_MANUFACTURER_URL = "manufacturerURL"
ATTR_UPNP_MODEL_DESCRIPTION = "modelDescription"
ATTR_UPNP_MODEL_NAME = "modelName"
ATTR_UPNP_MODEL_NUMBER = "modelNumber"
ATTR_UPNP_MODEL_URL = "modelURL"
ATTR_UPNP_SERIAL = "serialNumber"
ATTR_UPNP_UDN = "UDN"
ATTR_UPNP_UPC = "UPC"
ATTR_UPNP_PRESENTATION_URL = "presentationURL"
_LOGGER = logging.getLogger(__name__)
async def async_setup(hass, config):
"""Set up the SSDP integration."""
async def initialize(_):
scanner = Scanner(hass, await async_get_ssdp(hass))
await scanner.async_scan(None)
async_track_time_interval(hass, scanner.async_scan, SCAN_INTERVAL)
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STARTED, initialize)
return True
def _run_ssdp_scans():
_LOGGER.debug("Scanning")
# Run 3 times as packets can get lost
return itertools.chain.from_iterable([ssdp.scan() for _ in range(3)])
class Scanner:
"""Class to manage SSDP scanning."""
def __init__(self, hass, integration_matchers):
"""Initialize class."""
self.hass = hass
self.seen = set()
self._integration_matchers = integration_matchers
self._description_cache = {}
async def async_scan(self, _):
"""Scan for new entries."""
entries = await self.hass.async_add_executor_job(_run_ssdp_scans)
await self._process_entries(entries)
# We clear the cache after each run. We track discovered entries
# so will never need a description twice.
self._description_cache.clear()
async def _process_entries(self, entries):
"""Process SSDP entries."""
tasks = []
for entry in entries:
key = (entry.st, entry.location)
if key in self.seen:
continue
self.seen.add(key)
tasks.append(self._process_entry(entry))
if not tasks:
return
to_load = [
result for result in await asyncio.gather(*tasks) if result is not None
]
if not to_load:
return
tasks = []
for entry, info, domains in to_load:
for domain in domains:
_LOGGER.debug("Discovered %s at %s", domain, entry.location)
tasks.append(
self.hass.config_entries.flow.async_init(
domain, context={"source": DOMAIN}, data=info
)
)
await asyncio.wait(tasks)
async def _process_entry(self, entry):
"""Process a single entry."""
info = {"st": entry.st}
for key in "usn", "ext", "server":
if key in entry.values:
info[key] = entry.values[key]
if entry.location:
# Multiple entries usually share same location. Make sure
# we fetch it only once.
info_req = self._description_cache.get(entry.location)
if info_req is None:
info_req = self._description_cache[
entry.location
] = self.hass.async_create_task(self._fetch_description(entry.location))
info.update(await info_req)
domains = set()
for domain, matchers in self._integration_matchers.items():
for matcher in matchers:
if all(info.get(k) == v for (k, v) in matcher.items()):
domains.add(domain)
if domains:
return (entry, info_from_entry(entry, info), domains)
return None
async def _fetch_description(self, xml_location):
"""Fetch an XML description."""
session = self.hass.helpers.aiohttp_client.async_get_clientsession()
try:
resp = await session.get(xml_location, timeout=5)
xml = await resp.text()
# Samsung Smart TV sometimes returns an empty document the
# first time. Retry once.
if not xml:
resp = await session.get(xml_location, timeout=5)
xml = await resp.text()
except (aiohttp.ClientError, asyncio.TimeoutError) as err:
_LOGGER.debug("Error fetching %s: %s", xml_location, err)
return {}
try:
tree = ElementTree.fromstring(xml)
except ElementTree.ParseError as err:
_LOGGER.debug("Error parsing %s: %s", xml_location, err)
return {}
return util.etree_to_dict(tree).get("root", {}).get("device", {})
def info_from_entry(entry, device_info):
"""Get info from an entry."""
info = {
ATTR_SSDP_LOCATION: entry.location,
ATTR_SSDP_ST: entry.st,
}
if device_info:
info.update(device_info)
info.pop("st", None)
if "usn" in info:
info[ATTR_SSDP_USN] = info.pop("usn")
if "ext" in info:
info[ATTR_SSDP_EXT] = info.pop("ext")
if "server" in info:
info[ATTR_SSDP_SERVER] = info.pop("server")
return info
|
import collections
from itertools import cycle, product
import random
import re
import sys
import coverage
from coverage.parser import PythonParser
class PythonSpinner(object):
"""Spin Python source from a simple AST."""
def __init__(self):
self.lines = []
self.lines.append("async def func():")
self.indent = 4
@property
def lineno(self):
return len(self.lines) + 1
@classmethod
def generate_python(cls, ast):
spinner = cls()
spinner.gen_python_internal(ast)
return "\n".join(spinner.lines)
def add_line(self, line):
g = "g{}".format(self.lineno)
self.lines.append(' ' * self.indent + line.format(g=g, lineno=self.lineno))
def add_block(self, node):
self.indent += 4
self.gen_python_internal(node)
self.indent -= 4
def maybe_block(self, node, nodei, keyword):
if len(node) > nodei and node[nodei] is not None:
self.add_line(keyword + ":")
self.add_block(node[nodei])
def gen_python_internal(self, ast):
for node in ast:
if isinstance(node, list):
op = node[0]
if op == "if":
self.add_line("if {g}:")
self.add_block(node[1])
self.maybe_block(node, 2, "else")
elif op == "for":
self.add_line("for x in {g}:")
self.add_block(node[1])
self.maybe_block(node, 2, "else")
elif op == "while":
self.add_line("while {g}:")
self.add_block(node[1])
self.maybe_block(node, 2, "else")
elif op == "try":
self.add_line("try:")
self.add_block(node[1])
# 'except' clauses are different, because there can be any
# number.
if len(node) > 2 and node[2] is not None:
for except_node in node[2]:
self.add_line("except Exception{}:".format(self.lineno))
self.add_block(except_node)
self.maybe_block(node, 3, "else")
self.maybe_block(node, 4, "finally")
elif op == "with":
self.add_line("with {g} as x:")
self.add_block(node[1])
else:
raise Exception("Bad list node: {!r}".format(node))
else:
op = node
if op == "assign":
self.add_line("x = {lineno}")
elif op in ["break", "continue"]:
self.add_line(op)
elif op == "return":
self.add_line("return")
elif op == "yield":
self.add_line("yield {lineno}")
else:
raise Exception("Bad atom node: {!r}".format(node))
def weighted_choice(rand, choices):
"""Choose from a list of [(choice, weight), ...] options, randomly."""
total = sum(w for c, w in choices)
r = rand.uniform(0, total)
upto = 0
for c, w in choices:
if upto + w >= r:
return c
upto += w
assert False, "Shouldn't get here"
class RandomAstMaker(object):
def __init__(self, seed=None):
self.r = random.Random()
if seed is not None:
self.r.seed(seed)
self.depth = 0
self.bc_allowed = set()
def roll(self, prob=0.5):
return self.r.random() <= prob
def choose(self, choices):
"""Roll the dice to choose an option."""
return weighted_choice(self.r, choices)
STMT_CHOICES = [
[("if", 10), ("for", 10), ("try", 10), ("while", 3), ("with", 10), ("assign", 20), ("return", 1), ("yield", 0)],
[("if", 10), ("for", 10), ("try", 10), ("while", 3), ("with", 10), ("assign", 40), ("return", 1), ("yield", 0), ("break", 10), ("continue", 10)],
[("if", 10), ("for", 10), ("try", 10), ("while", 3), ("with", 10), ("assign", 40), ("return", 1), ("yield", 0), ("break", 10), ("continue", 10)],
[("if", 10), ("for", 10), ("try", 10), ("while", 3), ("with", 10), ("assign", 40), ("return", 1), ("yield", 0), ("break", 10), ("continue", 10)],
[("if", 10), ("for", 10), ("try", 10), ("while", 3), ("with", 10), ("assign", 40), ("return", 1), ("yield", 0), ("break", 10), ("continue", 10)],
# Last element has to have no compound statements, to limit depth.
[("assign", 10), ("return", 1), ("yield", 0), ("break", 10), ("continue", 10)],
]
def make_body(self, parent):
body = []
choices = self.STMT_CHOICES[self.depth]
self.depth += 1
nstmts = self.choose([(1, 10), (2, 25), (3, 10), (4, 10), (5, 5)])
for _ in range(nstmts):
stmt = self.choose(choices)
if stmt == "if":
body.append(["if", self.make_body("if")])
if self.roll():
body[-1].append(self.make_body("ifelse"))
elif stmt == "for":
old_allowed = self.bc_allowed
self.bc_allowed = self.bc_allowed | set(["break", "continue"])
body.append(["for", self.make_body("for")])
self.bc_allowed = old_allowed
if self.roll():
body[-1].append(self.make_body("forelse"))
elif stmt == "while":
old_allowed = self.bc_allowed
self.bc_allowed = self.bc_allowed | set(["break", "continue"])
body.append(["while", self.make_body("while")])
self.bc_allowed = old_allowed
if self.roll():
body[-1].append(self.make_body("whileelse"))
elif stmt == "try":
else_clause = self.make_body("try") if self.roll() else None
old_allowed = self.bc_allowed
self.bc_allowed = self.bc_allowed - set(["continue"])
finally_clause = self.make_body("finally") if self.roll() else None
self.bc_allowed = old_allowed
if else_clause:
with_exceptions = True
elif not else_clause and not finally_clause:
with_exceptions = True
else:
with_exceptions = self.roll()
if with_exceptions:
num_exceptions = self.choose([(1, 50), (2, 50)])
exceptions = [self.make_body("except") for _ in range(num_exceptions)]
else:
exceptions = None
body.append(
["try", self.make_body("tryelse"), exceptions, else_clause, finally_clause]
)
elif stmt == "with":
body.append(["with", self.make_body("with")])
elif stmt == "return":
body.append(stmt)
break
elif stmt == "yield":
body.append("yield")
elif stmt in ["break", "continue"]:
if stmt in self.bc_allowed:
# A break or continue immediately after a loop is not
# interesting. So if we are immediately after a loop, then
# insert an assignment.
if not body and (parent in ["for", "while"]):
body.append("assign")
body.append(stmt)
break
else:
stmt = "assign"
if stmt == "assign":
# Don't put two assignments in a row, there's no point.
if not body or body[-1] != "assign":
body.append("assign")
self.depth -= 1
return body
def async_alternatives(source):
parts = re.split(r"(for |with )", source)
nchoices = len(parts) // 2
#print("{} choices".format(nchoices))
def constant(s):
return [s]
def maybe_async(s):
return [s, "async "+s]
choices = [f(x) for f, x in zip(cycle([constant, maybe_async]), parts)]
for result in product(*choices):
source = "".join(result)
yield source
def compare_alternatives(source):
all_all_arcs = collections.defaultdict(list)
for i, alternate_source in enumerate(async_alternatives(source)):
parser = PythonParser(alternate_source)
arcs = parser.arcs()
all_all_arcs[tuple(arcs)].append((i, alternate_source))
return len(all_all_arcs)
def show_a_bunch():
longest = ""
for i in range(100):
maker = RandomAstMaker(i)
source = PythonSpinner.generate_python(maker.make_body("def"))
try:
print("-"*80, "\n", source, sep="")
compile(source, "<string>", "exec")
except Exception as ex:
print("Oops: {}\n{}".format(ex, source))
if len(source) > len(longest):
longest = source
def show_alternatives():
for i in range(1000):
maker = RandomAstMaker(i)
source = PythonSpinner.generate_python(maker.make_body("def"))
nlines = len(source.splitlines())
if nlines < 15:
nalt = compare_alternatives(source)
if nalt > 1:
print("--- {:3} lines, {:2} alternatives ---------".format(nlines, nalt))
print(source)
def show_one():
maker = RandomAstMaker()
source = PythonSpinner.generate_python(maker.make_body("def"))
print(source)
if __name__ == "__main__":
show_one()
#show_alternatives()
|
import logging
import apprise
import voluptuous as vol
from homeassistant.components.notify import (
ATTR_TARGET,
ATTR_TITLE,
ATTR_TITLE_DEFAULT,
PLATFORM_SCHEMA,
BaseNotificationService,
)
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
CONF_FILE = "config"
CONF_URL = "url"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_URL): vol.All(cv.ensure_list, [str]),
vol.Optional(CONF_FILE): cv.string,
}
)
def get_service(hass, config, discovery_info=None):
"""Get the Apprise notification service."""
# Create our Apprise Instance (reference our asset)
a_obj = apprise.Apprise()
if config.get(CONF_FILE):
# Sourced from a Configuration File
a_config = apprise.AppriseConfig()
if not a_config.add(config[CONF_FILE]):
_LOGGER.error("Invalid Apprise config url provided")
return None
if not a_obj.add(a_config):
_LOGGER.error("Invalid Apprise config url provided")
return None
if config.get(CONF_URL):
# Ordered list of URLs
if not a_obj.add(config[CONF_URL]):
_LOGGER.error("Invalid Apprise URL(s) supplied")
return None
return AppriseNotificationService(a_obj)
class AppriseNotificationService(BaseNotificationService):
"""Implement the notification service for Apprise."""
def __init__(self, a_obj):
"""Initialize the service."""
self.apprise = a_obj
def send_message(self, message="", **kwargs):
"""Send a message to a specified target.
If no target/tags are specified, then services are notified as is
However, if any tags are specified, then they will be applied
to the notification causing filtering (if set up that way).
"""
targets = kwargs.get(ATTR_TARGET)
title = kwargs.get(ATTR_TITLE, ATTR_TITLE_DEFAULT)
self.apprise.notify(body=message, title=title, tag=targets)
|
revision = "3adfdd6598df"
down_revision = "556ceb3e3c3e"
import sqlalchemy as sa
from alembic import op
from sqlalchemy.dialects.postgresql import JSON
from sqlalchemy_utils import ArrowType
from lemur.utils import Vault
def upgrade():
# create provider table
print("Creating dns_providers table")
op.create_table(
"dns_providers",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("name", sa.String(length=256), nullable=True),
sa.Column("description", sa.String(length=1024), nullable=True),
sa.Column("provider_type", sa.String(length=256), nullable=True),
sa.Column("credentials", Vault(), nullable=True),
sa.Column("api_endpoint", sa.String(length=256), nullable=True),
sa.Column(
"date_created", ArrowType(), server_default=sa.text("now()"), nullable=False
),
sa.Column("status", sa.String(length=128), nullable=True),
sa.Column("options", JSON),
sa.Column("domains", sa.JSON(), nullable=True),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("name"),
)
print("Adding dns_provider_id column to certificates")
op.add_column(
"certificates", sa.Column("dns_provider_id", sa.Integer(), nullable=True)
)
print("Adding dns_provider_id column to pending_certs")
op.add_column(
"pending_certs", sa.Column("dns_provider_id", sa.Integer(), nullable=True)
)
print("Adding options column to pending_certs")
op.add_column("pending_certs", sa.Column("options", JSON))
print("Creating pending_dns_authorizations table")
op.create_table(
"pending_dns_authorizations",
sa.Column("id", sa.Integer(), primary_key=True, autoincrement=True),
sa.Column("account_number", sa.String(length=128), nullable=True),
sa.Column("domains", JSON, nullable=True),
sa.Column("dns_provider_type", sa.String(length=128), nullable=True),
sa.Column("options", JSON, nullable=True),
)
print("Creating certificates_dns_providers_fk foreign key")
op.create_foreign_key(
"certificates_dns_providers_fk",
"certificates",
"dns_providers",
["dns_provider_id"],
["id"],
ondelete="cascade",
)
print("Altering column types in the api_keys table")
op.alter_column("api_keys", "issued_at", existing_type=sa.BIGINT(), nullable=True)
op.alter_column("api_keys", "revoked", existing_type=sa.BOOLEAN(), nullable=True)
op.alter_column("api_keys", "ttl", existing_type=sa.BIGINT(), nullable=True)
op.alter_column("api_keys", "user_id", existing_type=sa.INTEGER(), nullable=True)
print("Creating dns_providers_id foreign key on pending_certs table")
op.create_foreign_key(
None,
"pending_certs",
"dns_providers",
["dns_provider_id"],
["id"],
ondelete="CASCADE",
)
def downgrade():
print("Removing dns_providers_id foreign key on pending_certs table")
op.drop_constraint(None, "pending_certs", type_="foreignkey")
print("Reverting column types in the api_keys table")
op.alter_column("api_keys", "user_id", existing_type=sa.INTEGER(), nullable=False)
op.alter_column("api_keys", "ttl", existing_type=sa.BIGINT(), nullable=False)
op.alter_column("api_keys", "revoked", existing_type=sa.BOOLEAN(), nullable=False)
op.alter_column("api_keys", "issued_at", existing_type=sa.BIGINT(), nullable=False)
print("Reverting certificates_dns_providers_fk foreign key")
op.drop_constraint(
"certificates_dns_providers_fk", "certificates", type_="foreignkey"
)
print("Dropping pending_dns_authorizations table")
op.drop_table("pending_dns_authorizations")
print("Undoing modifications to pending_certs table")
op.drop_column("pending_certs", "options")
op.drop_column("pending_certs", "dns_provider_id")
print("Undoing modifications to certificates table")
op.drop_column("certificates", "dns_provider_id")
print("Deleting dns_providers table")
op.drop_table("dns_providers")
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.