text
stringlengths 213
32.3k
|
---|
from datetime import datetime, timedelta
import logging
import pytz
import voluptuous as vol
from homeassistant.components.binary_sensor import PLATFORM_SCHEMA, BinarySensorEntity
from homeassistant.const import (
CONF_AFTER,
CONF_BEFORE,
CONF_NAME,
SUN_EVENT_SUNRISE,
SUN_EVENT_SUNSET,
)
from homeassistant.core import callback
from homeassistant.helpers import config_validation as cv, event
from homeassistant.helpers.sun import get_astral_event_date, get_astral_event_next
from homeassistant.util import dt as dt_util
_LOGGER = logging.getLogger(__name__)
ATTR_AFTER = "after"
ATTR_BEFORE = "before"
ATTR_NEXT_UPDATE = "next_update"
CONF_AFTER_OFFSET = "after_offset"
CONF_BEFORE_OFFSET = "before_offset"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_AFTER): vol.Any(cv.time, vol.All(vol.Lower, cv.sun_event)),
vol.Required(CONF_BEFORE): vol.Any(cv.time, vol.All(vol.Lower, cv.sun_event)),
vol.Required(CONF_NAME): cv.string,
vol.Optional(CONF_AFTER_OFFSET, default=timedelta(0)): cv.time_period,
vol.Optional(CONF_BEFORE_OFFSET, default=timedelta(0)): cv.time_period,
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the ToD sensors."""
if hass.config.time_zone is None:
_LOGGER.error("Timezone is not set in Home Assistant configuration")
return
after = config[CONF_AFTER]
after_offset = config[CONF_AFTER_OFFSET]
before = config[CONF_BEFORE]
before_offset = config[CONF_BEFORE_OFFSET]
name = config[CONF_NAME]
sensor = TodSensor(name, after, after_offset, before, before_offset)
async_add_entities([sensor])
def is_sun_event(sun_event):
"""Return true if event is sun event not time."""
return sun_event in (SUN_EVENT_SUNRISE, SUN_EVENT_SUNSET)
class TodSensor(BinarySensorEntity):
"""Time of the Day Sensor."""
def __init__(self, name, after, after_offset, before, before_offset):
"""Init the ToD Sensor..."""
self._name = name
self._time_before = self._time_after = self._next_update = None
self._after_offset = after_offset
self._before_offset = before_offset
self._before = before
self._after = after
@property
def should_poll(self):
"""Sensor does not need to be polled."""
return False
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def after(self):
"""Return the timestamp for the beginning of the period."""
return self._time_after
@property
def before(self):
"""Return the timestamp for the end of the period."""
return self._time_before
@property
def is_on(self):
"""Return True is sensor is on."""
if self.after < self.before:
return self.after <= self.current_datetime < self.before
return False
@property
def current_datetime(self):
"""Return local current datetime according to hass configuration."""
return dt_util.utcnow()
@property
def next_update(self):
"""Return the next update point in the UTC time."""
return self._next_update
@property
def device_state_attributes(self):
"""Return the state attributes of the sensor."""
return {
ATTR_AFTER: self.after.astimezone(self.hass.config.time_zone).isoformat(),
ATTR_BEFORE: self.before.astimezone(self.hass.config.time_zone).isoformat(),
ATTR_NEXT_UPDATE: self.next_update.astimezone(
self.hass.config.time_zone
).isoformat(),
}
def _naive_time_to_utc_datetime(self, naive_time):
"""Convert naive time from config to utc_datetime with current day."""
# get the current local date from utc time
current_local_date = self.current_datetime.astimezone(
self.hass.config.time_zone
).date()
# calculate utc datetime corecponding to local time
utc_datetime = self.hass.config.time_zone.localize(
datetime.combine(current_local_date, naive_time)
).astimezone(tz=pytz.UTC)
return utc_datetime
def _calculate_initial_boudary_time(self):
"""Calculate internal absolute time boundaries."""
nowutc = self.current_datetime
# If after value is a sun event instead of absolute time
if is_sun_event(self._after):
# Calculate the today's event utc time or
# if not available take next
after_event_date = get_astral_event_date(
self.hass, self._after, nowutc
) or get_astral_event_next(self.hass, self._after, nowutc)
else:
# Convert local time provided to UTC today
# datetime.combine(date, time, tzinfo) is not supported
# in python 3.5. The self._after is provided
# with hass configured TZ not system wide
after_event_date = self._naive_time_to_utc_datetime(self._after)
self._time_after = after_event_date
# If before value is a sun event instead of absolute time
if is_sun_event(self._before):
# Calculate the today's event utc time or if not available take
# next
before_event_date = get_astral_event_date(
self.hass, self._before, nowutc
) or get_astral_event_next(self.hass, self._before, nowutc)
# Before is earlier than after
if before_event_date < after_event_date:
# Take next day for before
before_event_date = get_astral_event_next(
self.hass, self._before, after_event_date
)
else:
# Convert local time provided to UTC today, see above
before_event_date = self._naive_time_to_utc_datetime(self._before)
# It is safe to add timedelta days=1 to UTC as there is no DST
if before_event_date < after_event_date + self._after_offset:
before_event_date += timedelta(days=1)
self._time_before = before_event_date
# We are calculating the _time_after value assuming that it will happen today
# But that is not always true, e.g. after 23:00, before 12:00 and now is 10:00
# If _time_before and _time_after are ahead of current_datetime:
# _time_before is set to 12:00 next day
# _time_after is set to 23:00 today
# current_datetime is set to 10:00 today
if (
self._time_after > self.current_datetime
and self._time_before > self.current_datetime + timedelta(days=1)
):
# remove one day from _time_before and _time_after
self._time_after -= timedelta(days=1)
self._time_before -= timedelta(days=1)
# Add offset to utc boundaries according to the configuration
self._time_after += self._after_offset
self._time_before += self._before_offset
def _turn_to_next_day(self):
"""Turn to to the next day."""
if is_sun_event(self._after):
self._time_after = get_astral_event_next(
self.hass, self._after, self._time_after - self._after_offset
)
self._time_after += self._after_offset
else:
# Offset is already there
self._time_after += timedelta(days=1)
if is_sun_event(self._before):
self._time_before = get_astral_event_next(
self.hass, self._before, self._time_before - self._before_offset
)
self._time_before += self._before_offset
else:
# Offset is already there
self._time_before += timedelta(days=1)
async def async_added_to_hass(self):
"""Call when entity about to be added to Home Assistant."""
self._calculate_initial_boudary_time()
self._calculate_next_update()
self._point_in_time_listener(dt_util.now())
def _calculate_next_update(self):
"""Datetime when the next update to the state."""
now = self.current_datetime
if now < self.after:
self._next_update = self.after
return
if now < self.before:
self._next_update = self.before
return
self._turn_to_next_day()
self._next_update = self.after
@callback
def _point_in_time_listener(self, now):
"""Run when the state of the sensor should be updated."""
self._calculate_next_update()
self.async_write_ha_state()
event.async_track_point_in_utc_time(
self.hass, self._point_in_time_listener, self.next_update
)
|
import asyncio
import logging
import pathlib
import secrets
import shutil
import typing
from PIL import Image, ImageOps, UnidentifiedImageError
from aiohttp import hdrs, web
from aiohttp.web_request import FileField
import voluptuous as vol
from homeassistant.components.http.static import CACHE_HEADERS
from homeassistant.components.http.view import HomeAssistantView
from homeassistant.const import CONF_ID
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers import collection
from homeassistant.helpers.storage import Store
import homeassistant.util.dt as dt_util
from .const import DOMAIN
_LOGGER = logging.getLogger(__name__)
STORAGE_KEY = DOMAIN
STORAGE_VERSION = 1
VALID_SIZES = {256, 512}
MAX_SIZE = 1024 * 1024 * 10
CREATE_FIELDS = {
vol.Required("file"): FileField,
}
UPDATE_FIELDS = {
vol.Optional("name"): vol.All(str, vol.Length(min=1)),
}
async def async_setup(hass: HomeAssistant, config: dict):
"""Set up the Image integration."""
image_dir = pathlib.Path(hass.config.path(DOMAIN))
hass.data[DOMAIN] = storage_collection = ImageStorageCollection(hass, image_dir)
await storage_collection.async_load()
collection.StorageCollectionWebsocket(
storage_collection,
DOMAIN,
DOMAIN,
CREATE_FIELDS,
UPDATE_FIELDS,
).async_setup(hass, create_create=False)
hass.http.register_view(ImageUploadView)
hass.http.register_view(ImageServeView(image_dir, storage_collection))
return True
class ImageStorageCollection(collection.StorageCollection):
"""Image collection stored in storage."""
CREATE_SCHEMA = vol.Schema(CREATE_FIELDS)
UPDATE_SCHEMA = vol.Schema(UPDATE_FIELDS)
def __init__(self, hass: HomeAssistant, image_dir: pathlib.Path) -> None:
"""Initialize media storage collection."""
super().__init__(
Store(hass, STORAGE_VERSION, STORAGE_KEY),
logging.getLogger(f"{__name__}.storage_collection"),
)
self.async_add_listener(self._change_listener)
self.image_dir = image_dir
async def _process_create_data(self, data: typing.Dict) -> typing.Dict:
"""Validate the config is valid."""
data = self.CREATE_SCHEMA(dict(data))
uploaded_file: FileField = data["file"]
if not uploaded_file.content_type.startswith("image/"):
raise vol.Invalid("Only images are allowed")
data[CONF_ID] = secrets.token_hex(16)
data["filesize"] = await self.hass.async_add_executor_job(self._move_data, data)
data["content_type"] = uploaded_file.content_type
data["name"] = uploaded_file.filename
data["uploaded_at"] = dt_util.utcnow().isoformat()
return data
def _move_data(self, data):
"""Move data."""
uploaded_file: FileField = data.pop("file")
# Verify we can read the image
try:
image = Image.open(uploaded_file.file)
except UnidentifiedImageError as err:
raise vol.Invalid("Unable to identify image file") from err
# Reset content
uploaded_file.file.seek(0)
media_folder: pathlib.Path = self.image_dir / data[CONF_ID]
media_folder.mkdir(parents=True)
media_file = media_folder / "original"
# Raises if path is no longer relative to the media dir
media_file.relative_to(media_folder)
_LOGGER.debug("Storing file %s", media_file)
with media_file.open("wb") as target:
shutil.copyfileobj(uploaded_file.file, target)
image.close()
return media_file.stat().st_size
@callback
def _get_suggested_id(self, info: typing.Dict) -> str:
"""Suggest an ID based on the config."""
return info[CONF_ID]
async def _update_data(self, data: dict, update_data: typing.Dict) -> typing.Dict:
"""Return a new updated data object."""
return {**data, **self.UPDATE_SCHEMA(update_data)}
async def _change_listener(self, change_type, item_id, data):
"""Handle change."""
if change_type != collection.CHANGE_REMOVED:
return
await self.hass.async_add_executor_job(shutil.rmtree, self.image_dir / item_id)
class ImageUploadView(HomeAssistantView):
"""View to upload images."""
url = "/api/image/upload"
name = "api:image:upload"
async def post(self, request):
"""Handle upload."""
# Increase max payload
request._client_max_size = MAX_SIZE # pylint: disable=protected-access
data = await request.post()
item = await request.app["hass"].data[DOMAIN].async_create_item(data)
return self.json(item)
class ImageServeView(HomeAssistantView):
"""View to download images."""
url = "/api/image/serve/{image_id}/{filename}"
name = "api:image:serve"
requires_auth = False
def __init__(
self, image_folder: pathlib.Path, image_collection: ImageStorageCollection
):
"""Initialize image serve view."""
self.transform_lock = asyncio.Lock()
self.image_folder = image_folder
self.image_collection = image_collection
async def get(self, request: web.Request, image_id: str, filename: str):
"""Serve image."""
image_size = filename.split("-", 1)[0]
try:
parts = image_size.split("x", 1)
width = int(parts[0])
height = int(parts[1])
except (ValueError, IndexError) as err:
raise web.HTTPBadRequest from err
if not width or width != height or width not in VALID_SIZES:
raise web.HTTPBadRequest
image_info = self.image_collection.data.get(image_id)
if image_info is None:
raise web.HTTPNotFound()
hass = request.app["hass"]
target_file = self.image_folder / image_id / f"{width}x{height}"
if not target_file.is_file():
async with self.transform_lock:
# Another check in case another request already finished it while waiting
if not target_file.is_file():
await hass.async_add_executor_job(
_generate_thumbnail,
self.image_folder / image_id / "original",
image_info["content_type"],
target_file,
(width, height),
)
return web.FileResponse(
target_file,
headers={**CACHE_HEADERS, hdrs.CONTENT_TYPE: image_info["content_type"]},
)
def _generate_thumbnail(original_path, content_type, target_path, target_size):
"""Generate a size."""
image = ImageOps.exif_transpose(Image.open(original_path))
image.thumbnail(target_size)
image.save(target_path, format=content_type.split("/", 1)[1])
|
import numpy as np
from numpy.testing import assert_almost_equal
from mne.inverse_sparse.mxne_debiasing import compute_bias
def test_compute_debiasing():
"""Test source amplitude debiasing."""
rng = np.random.RandomState(42)
G = rng.randn(10, 4)
X = rng.randn(4, 20)
debias_true = np.arange(1, 5, dtype=np.float64)
M = np.dot(G, X * debias_true[:, np.newaxis])
debias = compute_bias(M, G, X, max_iter=10000, n_orient=1, tol=1e-7)
assert_almost_equal(debias, debias_true, decimal=5)
debias = compute_bias(M, G, X, max_iter=10000, n_orient=2, tol=1e-5)
assert_almost_equal(debias, [1.8, 1.8, 3.72, 3.72], decimal=2)
|
import pytest
from molecule.model import schema_v2
@pytest.fixture
def _model_platforms_docker_section_data():
return """
---
platforms:
- name: instance
registry:
credentials:
password: $BAR
""".strip()
@pytest.fixture
def _env():
return {}
@pytest.fixture
def _keep_string():
return 'MOLECULE_'
@pytest.mark.parametrize('_stream', [(_model_platforms_docker_section_data)])
def test_platforms_docker(_stream, _env, _keep_string):
assert {} == schema_v2.pre_validate(_stream(), _env, _keep_string)
@pytest.fixture
def _model_platforms_docker_errors_section_data():
return """
---
platforms:
- name: instance
registry:
credentials:
password: foo
""".strip()
@pytest.mark.parametrize('_stream',
[_model_platforms_docker_errors_section_data])
def test_platforms_docker_has_errors(_stream, _env, _keep_string):
x = {
'platforms': [{
0: [{
'registry': [{
'credentials': [{
'password': [
('value does not match regex '
"'^[{$]+[a-z0-9A-Z]+[}]*$'"),
]
}]
}]
}]
}]
}
assert x == schema_v2.pre_validate(_stream(), _env, _keep_string)
@pytest.fixture
def _model_molecule_env_errors_section_data():
return """
---
dependency:
name: $MOLECULE_DEPENDENCY_NAME
driver:
name: $MOLECULE_DRIVER_NAME
lint:
name: $MOLECULE_LINT_NAME
platforms:
- name: instance
image: centos:latest
networks:
- name: foo
- name: bar
provisioner:
name: $MOLECULE_PROVISIONER_NAME
lint:
name: $MOLECULE_PROVISIONER_LINT_NAME
scenario:
name: $MOLECULE_SCENARIO_NAME
verifier:
name: $MOLECULE_VERIFIER_NAME
lint:
name: $MOLECULE_VERIFIER_LINT_NAME
""".strip()
@pytest.mark.parametrize('_stream',
[(_model_molecule_env_errors_section_data)])
def test_has_errors_when_molecule_env_var_referenced_in_unallowed_sections(
_stream, _env, _keep_string):
x = {
'scenario': [{
'name':
['cannot reference $MOLECULE special variables in this section']
}],
'lint': [{
'name': [
'cannot reference $MOLECULE special variables in this section',
'unallowed value $MOLECULE_LINT_NAME'
]
}],
'driver': [{
'name': [
'cannot reference $MOLECULE special variables in this section',
'unallowed value $MOLECULE_DRIVER_NAME'
]
}],
'dependency': [{
'name': [
'cannot reference $MOLECULE special variables in this section',
'unallowed value $MOLECULE_DEPENDENCY_NAME'
]
}],
'verifier': [{
'lint': [{
'name':
[('cannot reference $MOLECULE special variables in this '
'section'), 'unallowed value $MOLECULE_VERIFIER_LINT_NAME']
}],
'name': [
'cannot reference $MOLECULE special variables in this section',
'unallowed value $MOLECULE_VERIFIER_NAME'
]
}],
'provisioner': [{
'lint': [{
'name':
[('cannot reference $MOLECULE special variables in this '
'section'),
'unallowed value $MOLECULE_PROVISIONER_LINT_NAME']
}],
'name': [
'cannot reference $MOLECULE special variables in this section',
'unallowed value $MOLECULE_PROVISIONER_NAME'
]
}]
}
assert x == schema_v2.pre_validate(_stream(), _env, _keep_string)
|
import os
import unittest
import mock
from perfkitbenchmarker import test_util
from perfkitbenchmarker.linux_benchmarks import nccl_benchmark
from perfkitbenchmarker.sample import Sample
class NcclBenchmarkTest(unittest.TestCase, test_util.SamplesTestMixin):
def setUp(self):
super(NcclBenchmarkTest, self).setUp()
path = os.path.join(os.path.dirname(__file__), '..', 'data',
'nccl_output.txt')
with open(path) as fp:
self.contents = fp.read()
@mock.patch('time.time', mock.MagicMock(return_value=1550279509.59))
def testNcclResults(self):
samples, bandwidth = nccl_benchmark.MakeSamplesFromOutput({}, self.contents)
metadata = {
'nThread': '1',
'nGpus': '1',
'minBytes': '8',
'maxBytes': '8589934592',
'step': '2(factor)',
'warmup_iters': '5',
'iters': '100',
'validation': '1',
'Rank 0': 'Pid 40529 on ip-172-31-20-44 device 0 [0x00] Tesla '
'V100-SXM2-32GB',
'Rank 1': 'Pid 40530 on ip-172-31-20-44 device 1 [0x00] Tesla '
'V100-SXM2-32GB',
'Rank 2': 'Pid 40531 on ip-172-31-20-44 device 2 [0x00] Tesla '
'V100-SXM2-32GB',
'Rank 3': 'Pid 40532 on ip-172-31-20-44 device 3 [0x00] Tesla '
'V100-SXM2-32GB',
'Rank 4': 'Pid 40533 on ip-172-31-20-44 device 4 [0x00] Tesla '
'V100-SXM2-32GB',
'Rank 5': 'Pid 40534 on ip-172-31-20-44 device 5 [0x00] Tesla '
'V100-SXM2-32GB',
'Rank 6': 'Pid 40535 on ip-172-31-20-44 device 6 [0x00] Tesla '
'V100-SXM2-32GB',
'Rank 7': 'Pid 40536 on ip-172-31-20-44 device 7 [0x00] Tesla '
'V100-SXM2-32GB',
'Rank 8': 'Pid 25655 on ip-172-31-26-32 device 0 [0x00] Tesla '
'V100-SXM2-32GB',
'Rank 9': 'Pid 25656 on ip-172-31-26-32 device 1 [0x00] Tesla '
'V100-SXM2-32GB',
'Rank 10': 'Pid 25657 on ip-172-31-26-32 device 2 [0x00] Tesla '
'V100-SXM2-32GB',
'Rank 11': 'Pid 25658 on ip-172-31-26-32 device 3 [0x00] Tesla '
'V100-SXM2-32GB',
'Rank 12': 'Pid 25659 on ip-172-31-26-32 device 4 [0x00] Tesla '
'V100-SXM2-32GB',
'Rank 13': 'Pid 25660 on ip-172-31-26-32 device 5 [0x00] Tesla '
'V100-SXM2-32GB',
'Rank 14': 'Pid 25661 on ip-172-31-26-32 device 6 [0x00] Tesla '
'V100-SXM2-32GB',
'Rank 15': 'Pid 25664 on ip-172-31-26-32 device 7 [0x00] Tesla '
'V100-SXM2-32GB',
'size': '8',
'count': '2',
'nccl_type': 'float',
'redop': 'sum',
'out_of_place_time': '58.10',
'out_of_place_algbw': '0.00',
'out_of_place_busbw': '0.00',
'out_of_place_error': '4e-07',
'in_place_time': '57.87',
'in_place_algbw': '0.00',
'in_place_busbw': '0.00',
'in_place_error': '4e-07'}
golden = Sample(metric='In place algorithm bandwidth', value=0.0,
unit='GB/s', metadata=metadata)
self.assertIn(golden, samples)
self.assertAlmostEqual(bandwidth, 8.43)
if __name__ == '__main__':
unittest.main()
|
import asyncio
from unittest.mock import Mock, patch
import aiohttp
import pytest
from homeassistant.components import ssdp
from tests.common import mock_coro
async def test_scan_match_st(hass):
"""Test matching based on ST."""
scanner = ssdp.Scanner(hass, {"mock-domain": [{"st": "mock-st"}]})
with patch(
"netdisco.ssdp.scan",
return_value=[
Mock(
st="mock-st",
location=None,
values={"usn": "mock-usn", "server": "mock-server", "ext": ""},
)
],
), patch.object(
hass.config_entries.flow, "async_init", return_value=mock_coro()
) as mock_init:
await scanner.async_scan(None)
assert len(mock_init.mock_calls) == 1
assert mock_init.mock_calls[0][1][0] == "mock-domain"
assert mock_init.mock_calls[0][2]["context"] == {"source": "ssdp"}
assert mock_init.mock_calls[0][2]["data"] == {
ssdp.ATTR_SSDP_ST: "mock-st",
ssdp.ATTR_SSDP_LOCATION: None,
ssdp.ATTR_SSDP_USN: "mock-usn",
ssdp.ATTR_SSDP_SERVER: "mock-server",
ssdp.ATTR_SSDP_EXT: "",
}
@pytest.mark.parametrize(
"key", (ssdp.ATTR_UPNP_MANUFACTURER, ssdp.ATTR_UPNP_DEVICE_TYPE)
)
async def test_scan_match_upnp_devicedesc(hass, aioclient_mock, key):
"""Test matching based on UPnP device description data."""
aioclient_mock.get(
"http://1.1.1.1",
text=f"""
<root>
<device>
<{key}>Paulus</{key}>
</device>
</root>
""",
)
scanner = ssdp.Scanner(hass, {"mock-domain": [{key: "Paulus"}]})
with patch(
"netdisco.ssdp.scan",
return_value=[Mock(st="mock-st", location="http://1.1.1.1", values={})],
), patch.object(
hass.config_entries.flow, "async_init", return_value=mock_coro()
) as mock_init:
await scanner.async_scan(None)
assert len(mock_init.mock_calls) == 1
assert mock_init.mock_calls[0][1][0] == "mock-domain"
assert mock_init.mock_calls[0][2]["context"] == {"source": "ssdp"}
async def test_scan_not_all_present(hass, aioclient_mock):
"""Test match fails if some specified attributes are not present."""
aioclient_mock.get(
"http://1.1.1.1",
text="""
<root>
<device>
<deviceType>Paulus</deviceType>
</device>
</root>
""",
)
scanner = ssdp.Scanner(
hass,
{
"mock-domain": [
{
ssdp.ATTR_UPNP_DEVICE_TYPE: "Paulus",
ssdp.ATTR_UPNP_MANUFACTURER: "Paulus",
}
]
},
)
with patch(
"netdisco.ssdp.scan",
return_value=[Mock(st="mock-st", location="http://1.1.1.1", values={})],
), patch.object(
hass.config_entries.flow, "async_init", return_value=mock_coro()
) as mock_init:
await scanner.async_scan(None)
assert not mock_init.mock_calls
async def test_scan_not_all_match(hass, aioclient_mock):
"""Test match fails if some specified attribute values differ."""
aioclient_mock.get(
"http://1.1.1.1",
text="""
<root>
<device>
<deviceType>Paulus</deviceType>
<manufacturer>Paulus</manufacturer>
</device>
</root>
""",
)
scanner = ssdp.Scanner(
hass,
{
"mock-domain": [
{
ssdp.ATTR_UPNP_DEVICE_TYPE: "Paulus",
ssdp.ATTR_UPNP_MANUFACTURER: "Not-Paulus",
}
]
},
)
with patch(
"netdisco.ssdp.scan",
return_value=[Mock(st="mock-st", location="http://1.1.1.1", values={})],
), patch.object(
hass.config_entries.flow, "async_init", return_value=mock_coro()
) as mock_init:
await scanner.async_scan(None)
assert not mock_init.mock_calls
@pytest.mark.parametrize("exc", [asyncio.TimeoutError, aiohttp.ClientError])
async def test_scan_description_fetch_fail(hass, aioclient_mock, exc):
"""Test failing to fetch description."""
aioclient_mock.get("http://1.1.1.1", exc=exc)
scanner = ssdp.Scanner(hass, {})
with patch(
"netdisco.ssdp.scan",
return_value=[Mock(st="mock-st", location="http://1.1.1.1", values={})],
):
await scanner.async_scan(None)
async def test_scan_description_parse_fail(hass, aioclient_mock):
"""Test invalid XML."""
aioclient_mock.get(
"http://1.1.1.1",
text="""
<root>INVALIDXML
""",
)
scanner = ssdp.Scanner(hass, {})
with patch(
"netdisco.ssdp.scan",
return_value=[Mock(st="mock-st", location="http://1.1.1.1", values={})],
):
await scanner.async_scan(None)
|
import json
from pathlib import Path
import pytest
from redbot.pytest.downloader import *
from redbot.cogs.downloader.installable import Installable, InstallableType
from redbot.core import VersionInfo
def test_process_info_file(installable):
for k, v in INFO_JSON.items():
if k == "type":
assert installable.type == InstallableType.COG
elif k in ("min_bot_version", "max_bot_version"):
assert getattr(installable, k) == VersionInfo.from_str(v)
else:
assert getattr(installable, k) == v
def test_process_lib_info_file(library_installable):
for k, v in LIBRARY_INFO_JSON.items():
if k == "type":
assert library_installable.type == InstallableType.SHARED_LIBRARY
elif k in ("min_bot_version", "max_bot_version"):
assert getattr(library_installable, k) == VersionInfo.from_str(v)
elif k == "hidden":
# libraries are always hidden, even if False
assert library_installable.hidden is True
else:
assert getattr(library_installable, k) == v
# noinspection PyProtectedMember
def test_location_is_dir(installable):
assert installable._location.exists()
assert installable._location.is_dir()
# noinspection PyProtectedMember
def test_info_file_is_file(installable):
assert installable._info_file.exists()
assert installable._info_file.is_file()
def test_name(installable):
assert installable.name == "test_cog"
def test_repo_name(installable):
assert installable.repo_name == "test_repo"
def test_serialization(installed_cog):
data = installed_cog.to_json()
cog_name = data["module_name"]
assert cog_name == "test_installed_cog"
|
from unittest.mock import patch, Mock
import arrow
from cryptography import x509
from lemur.plugins.lemur_entrust import plugin
from freezegun import freeze_time
def config_mock(*args):
values = {
"ENTRUST_API_CERT": "-----BEGIN CERTIFICATE-----abc-----END CERTIFICATE-----",
"ENTRUST_API_KEY": False,
"ENTRUST_API_USER": "test",
"ENTRUST_API_PASS": "password",
"ENTRUST_URL": "http",
"ENTRUST_ROOT": None,
"ENTRUST_NAME": "test",
"ENTRUST_EMAIL": "[email protected]",
"ENTRUST_PHONE": "0123456",
"ENTRUST_PRODUCT_ENTRUST": "ADVANTAGE_SSL"
}
return values[args[0]]
@patch("lemur.plugins.lemur_digicert.plugin.current_app")
def test_determine_end_date(mock_current_app):
with freeze_time(time_to_freeze=arrow.get(2016, 11, 3).datetime):
assert arrow.get(2017, 12, 3).format('YYYY-MM-DD') == plugin.determine_end_date(0) # 1 year + 1 month
assert arrow.get(2017, 3, 5).format('YYYY-MM-DD') == plugin.determine_end_date(arrow.get(2017, 3, 5))
assert arrow.get(2017, 12, 3).format('YYYY-MM-DD') == plugin.determine_end_date(arrow.get(2020, 5, 7))
@patch("lemur.plugins.lemur_entrust.plugin.current_app")
def test_process_options(mock_current_app, authority):
mock_current_app.config.get = Mock(side_effect=config_mock)
plugin.determine_end_date = Mock(return_value=arrow.get(2017, 11, 5).format('YYYY-MM-DD'))
authority.name = "Entrust"
names = [u"one.example.com", u"two.example.com", u"three.example.com"]
options = {
"common_name": "example.com",
"owner": "[email protected]",
"description": "test certificate",
"extensions": {"sub_alt_names": {"names": [x509.DNSName(x) for x in names]}},
"organization": "Example, Inc.",
"organizational_unit": "Example Org",
"validity_end": arrow.utcnow().shift(years=1, months=+1),
"authority": authority,
}
expected = {
"signingAlg": "SHA-2",
"eku": "SERVER_AND_CLIENT_AUTH",
"certType": "ADVANTAGE_SSL",
"certExpiryDate": arrow.get(2017, 11, 5).format('YYYY-MM-DD'),
"tracking": {
"requesterName": mock_current_app.config.get("ENTRUST_NAME"),
"requesterEmail": mock_current_app.config.get("ENTRUST_EMAIL"),
"requesterPhone": mock_current_app.config.get("ENTRUST_PHONE")
},
"org": "Example, Inc.",
"clientId": 1
}
client_id = 1
assert expected == plugin.process_options(options, client_id)
|
import asyncio
import json
import logging
from aiohttp import web
from aiohttp.web_exceptions import HTTPBadRequest
import async_timeout
import voluptuous as vol
from homeassistant.auth.permissions.const import POLICY_READ
from homeassistant.bootstrap import DATA_LOGGING
from homeassistant.components.http import HomeAssistantView
from homeassistant.const import (
EVENT_HOMEASSISTANT_STOP,
EVENT_TIME_CHANGED,
HTTP_BAD_REQUEST,
HTTP_CREATED,
HTTP_NOT_FOUND,
HTTP_OK,
MATCH_ALL,
URL_API,
URL_API_COMPONENTS,
URL_API_CONFIG,
URL_API_DISCOVERY_INFO,
URL_API_ERROR_LOG,
URL_API_EVENTS,
URL_API_SERVICES,
URL_API_STATES,
URL_API_STREAM,
URL_API_TEMPLATE,
__version__,
)
import homeassistant.core as ha
from homeassistant.exceptions import ServiceNotFound, TemplateError, Unauthorized
from homeassistant.helpers import template
from homeassistant.helpers.json import JSONEncoder
from homeassistant.helpers.network import NoURLAvailableError, get_url
from homeassistant.helpers.service import async_get_all_descriptions
from homeassistant.helpers.state import AsyncTrackStates
from homeassistant.helpers.system_info import async_get_system_info
_LOGGER = logging.getLogger(__name__)
ATTR_BASE_URL = "base_url"
ATTR_EXTERNAL_URL = "external_url"
ATTR_INTERNAL_URL = "internal_url"
ATTR_LOCATION_NAME = "location_name"
ATTR_INSTALLATION_TYPE = "installation_type"
ATTR_REQUIRES_API_PASSWORD = "requires_api_password"
ATTR_UUID = "uuid"
ATTR_VERSION = "version"
DOMAIN = "api"
STREAM_PING_PAYLOAD = "ping"
STREAM_PING_INTERVAL = 50 # seconds
def setup(hass, config):
"""Register the API with the HTTP interface."""
hass.http.register_view(APIStatusView)
hass.http.register_view(APIEventStream)
hass.http.register_view(APIConfigView)
hass.http.register_view(APIDiscoveryView)
hass.http.register_view(APIStatesView)
hass.http.register_view(APIEntityStateView)
hass.http.register_view(APIEventListenersView)
hass.http.register_view(APIEventView)
hass.http.register_view(APIServicesView)
hass.http.register_view(APIDomainServicesView)
hass.http.register_view(APIComponentsView)
hass.http.register_view(APITemplateView)
if DATA_LOGGING in hass.data:
hass.http.register_view(APIErrorLog)
return True
class APIStatusView(HomeAssistantView):
"""View to handle Status requests."""
url = URL_API
name = "api:status"
@ha.callback
def get(self, request):
"""Retrieve if API is running."""
return self.json_message("API running.")
class APIEventStream(HomeAssistantView):
"""View to handle EventStream requests."""
url = URL_API_STREAM
name = "api:stream"
async def get(self, request):
"""Provide a streaming interface for the event bus."""
if not request["hass_user"].is_admin:
raise Unauthorized()
hass = request.app["hass"]
stop_obj = object()
to_write = asyncio.Queue()
restrict = request.query.get("restrict")
if restrict:
restrict = restrict.split(",") + [EVENT_HOMEASSISTANT_STOP]
async def forward_events(event):
"""Forward events to the open request."""
if event.event_type == EVENT_TIME_CHANGED:
return
if restrict and event.event_type not in restrict:
return
_LOGGER.debug("STREAM %s FORWARDING %s", id(stop_obj), event)
if event.event_type == EVENT_HOMEASSISTANT_STOP:
data = stop_obj
else:
data = json.dumps(event, cls=JSONEncoder)
await to_write.put(data)
response = web.StreamResponse()
response.content_type = "text/event-stream"
await response.prepare(request)
unsub_stream = hass.bus.async_listen(MATCH_ALL, forward_events)
try:
_LOGGER.debug("STREAM %s ATTACHED", id(stop_obj))
# Fire off one message so browsers fire open event right away
await to_write.put(STREAM_PING_PAYLOAD)
while True:
try:
with async_timeout.timeout(STREAM_PING_INTERVAL):
payload = await to_write.get()
if payload is stop_obj:
break
msg = f"data: {payload}\n\n"
_LOGGER.debug("STREAM %s WRITING %s", id(stop_obj), msg.strip())
await response.write(msg.encode("UTF-8"))
except asyncio.TimeoutError:
await to_write.put(STREAM_PING_PAYLOAD)
except asyncio.CancelledError:
_LOGGER.debug("STREAM %s ABORT", id(stop_obj))
finally:
_LOGGER.debug("STREAM %s RESPONSE CLOSED", id(stop_obj))
unsub_stream()
return response
class APIConfigView(HomeAssistantView):
"""View to handle Configuration requests."""
url = URL_API_CONFIG
name = "api:config"
@ha.callback
def get(self, request):
"""Get current configuration."""
return self.json(request.app["hass"].config.as_dict())
class APIDiscoveryView(HomeAssistantView):
"""View to provide Discovery information."""
requires_auth = False
url = URL_API_DISCOVERY_INFO
name = "api:discovery"
async def get(self, request):
"""Get discovery information."""
hass = request.app["hass"]
uuid = await hass.helpers.instance_id.async_get()
system_info = await async_get_system_info(hass)
data = {
ATTR_UUID: uuid,
ATTR_BASE_URL: None,
ATTR_EXTERNAL_URL: None,
ATTR_INTERNAL_URL: None,
ATTR_LOCATION_NAME: hass.config.location_name,
ATTR_INSTALLATION_TYPE: system_info[ATTR_INSTALLATION_TYPE],
# always needs authentication
ATTR_REQUIRES_API_PASSWORD: True,
ATTR_VERSION: __version__,
}
try:
data["external_url"] = get_url(hass, allow_internal=False)
except NoURLAvailableError:
pass
try:
data["internal_url"] = get_url(hass, allow_external=False)
except NoURLAvailableError:
pass
# Set old base URL based on external or internal
data["base_url"] = data["external_url"] or data["internal_url"]
return self.json(data)
class APIStatesView(HomeAssistantView):
"""View to handle States requests."""
url = URL_API_STATES
name = "api:states"
@ha.callback
def get(self, request):
"""Get current states."""
user = request["hass_user"]
entity_perm = user.permissions.check_entity
states = [
state
for state in request.app["hass"].states.async_all()
if entity_perm(state.entity_id, "read")
]
return self.json(states)
class APIEntityStateView(HomeAssistantView):
"""View to handle EntityState requests."""
url = "/api/states/{entity_id}"
name = "api:entity-state"
@ha.callback
def get(self, request, entity_id):
"""Retrieve state of entity."""
user = request["hass_user"]
if not user.permissions.check_entity(entity_id, POLICY_READ):
raise Unauthorized(entity_id=entity_id)
state = request.app["hass"].states.get(entity_id)
if state:
return self.json(state)
return self.json_message("Entity not found.", HTTP_NOT_FOUND)
async def post(self, request, entity_id):
"""Update state of entity."""
if not request["hass_user"].is_admin:
raise Unauthorized(entity_id=entity_id)
hass = request.app["hass"]
try:
data = await request.json()
except ValueError:
return self.json_message("Invalid JSON specified.", HTTP_BAD_REQUEST)
new_state = data.get("state")
if new_state is None:
return self.json_message("No state specified.", HTTP_BAD_REQUEST)
attributes = data.get("attributes")
force_update = data.get("force_update", False)
is_new_state = hass.states.get(entity_id) is None
# Write state
hass.states.async_set(
entity_id, new_state, attributes, force_update, self.context(request)
)
# Read the state back for our response
status_code = HTTP_CREATED if is_new_state else HTTP_OK
resp = self.json(hass.states.get(entity_id), status_code)
resp.headers.add("Location", f"/api/states/{entity_id}")
return resp
@ha.callback
def delete(self, request, entity_id):
"""Remove entity."""
if not request["hass_user"].is_admin:
raise Unauthorized(entity_id=entity_id)
if request.app["hass"].states.async_remove(entity_id):
return self.json_message("Entity removed.")
return self.json_message("Entity not found.", HTTP_NOT_FOUND)
class APIEventListenersView(HomeAssistantView):
"""View to handle EventListeners requests."""
url = URL_API_EVENTS
name = "api:event-listeners"
@ha.callback
def get(self, request):
"""Get event listeners."""
return self.json(async_events_json(request.app["hass"]))
class APIEventView(HomeAssistantView):
"""View to handle Event requests."""
url = "/api/events/{event_type}"
name = "api:event"
async def post(self, request, event_type):
"""Fire events."""
if not request["hass_user"].is_admin:
raise Unauthorized()
body = await request.text()
try:
event_data = json.loads(body) if body else None
except ValueError:
return self.json_message(
"Event data should be valid JSON.", HTTP_BAD_REQUEST
)
if event_data is not None and not isinstance(event_data, dict):
return self.json_message(
"Event data should be a JSON object", HTTP_BAD_REQUEST
)
# Special case handling for event STATE_CHANGED
# We will try to convert state dicts back to State objects
if event_type == ha.EVENT_STATE_CHANGED and event_data:
for key in ("old_state", "new_state"):
state = ha.State.from_dict(event_data.get(key))
if state:
event_data[key] = state
request.app["hass"].bus.async_fire(
event_type, event_data, ha.EventOrigin.remote, self.context(request)
)
return self.json_message(f"Event {event_type} fired.")
class APIServicesView(HomeAssistantView):
"""View to handle Services requests."""
url = URL_API_SERVICES
name = "api:services"
async def get(self, request):
"""Get registered services."""
services = await async_services_json(request.app["hass"])
return self.json(services)
class APIDomainServicesView(HomeAssistantView):
"""View to handle DomainServices requests."""
url = "/api/services/{domain}/{service}"
name = "api:domain-services"
async def post(self, request, domain, service):
"""Call a service.
Returns a list of changed states.
"""
hass = request.app["hass"]
body = await request.text()
try:
data = json.loads(body) if body else None
except ValueError:
return self.json_message("Data should be valid JSON.", HTTP_BAD_REQUEST)
with AsyncTrackStates(hass) as changed_states:
try:
await hass.services.async_call(
domain, service, data, True, self.context(request)
)
except (vol.Invalid, ServiceNotFound) as ex:
raise HTTPBadRequest() from ex
return self.json(changed_states)
class APIComponentsView(HomeAssistantView):
"""View to handle Components requests."""
url = URL_API_COMPONENTS
name = "api:components"
@ha.callback
def get(self, request):
"""Get current loaded components."""
return self.json(request.app["hass"].config.components)
class APITemplateView(HomeAssistantView):
"""View to handle Template requests."""
url = URL_API_TEMPLATE
name = "api:template"
async def post(self, request):
"""Render a template."""
if not request["hass_user"].is_admin:
raise Unauthorized()
try:
data = await request.json()
tpl = template.Template(data["template"], request.app["hass"])
return tpl.async_render(variables=data.get("variables"), parse_result=False)
except (ValueError, TemplateError) as ex:
return self.json_message(
f"Error rendering template: {ex}", HTTP_BAD_REQUEST
)
class APIErrorLog(HomeAssistantView):
"""View to fetch the API error log."""
url = URL_API_ERROR_LOG
name = "api:error_log"
async def get(self, request):
"""Retrieve API error log."""
if not request["hass_user"].is_admin:
raise Unauthorized()
return web.FileResponse(request.app["hass"].data[DATA_LOGGING])
async def async_services_json(hass):
"""Generate services data to JSONify."""
descriptions = await async_get_all_descriptions(hass)
return [{"domain": key, "services": value} for key, value in descriptions.items()]
@ha.callback
def async_events_json(hass):
"""Generate event data to JSONify."""
return [
{"event": key, "listener_count": value}
for key, value in hass.bus.async_listeners().items()
]
|
import subprocess
import logging
import os
import sys
import signal
import configparser
import json
import threading
import time
from argparse import ArgumentParser
import queue as q
logger = logging.getLogger("agent")
collector_logger = logging.getLogger("telegraf")
def signal_handler(sig, frame):
""" required for non-tty python runs to interrupt """
logger.warning("Got signal %s, going to stop", sig)
raise KeyboardInterrupt()
def ignore_handler(sig, frame):
logger.warning("Got signal %s, ignoring", sig)
def set_sig_handler():
uncatchable = ['SIG_DFL', 'SIGSTOP', 'SIGKILL']
ignore = ['SIGCHLD', 'SIGCLD']
all_sig = [s for s in dir(signal) if s.startswith("SIG")]
for sig_name in ignore:
try:
sig_num = getattr(signal, sig_name)
signal.signal(sig_num, ignore_handler)
except Exception:
pass
for sig_name in [s for s in all_sig if s not in (uncatchable + ignore)]:
try:
sig_num = getattr(signal, sig_name)
signal.signal(sig_num, signal_handler)
except Exception as ex:
logger.error("Can't set handler for %s, %s", sig_name, ex)
class DataReader(object):
"""generator reads from source line-by-line"""
def __init__(self, filename, pipe=False):
self.buffer = ""
self.closed = False
self.broken = False
self.pipe = pipe
if not self.pipe:
try:
self.monout = open(filename, 'rb')
except Exception:
logger.error("Can't open source file %s: %s", filename, exc_info=True)
self.broken = True
else:
self.monout = filename
def __iter__(self):
while not self.closed:
if self.broken:
data = ''
else:
data = self.monout.readline().decode('utf8')
if data:
parts = data.rsplit('\n', 1)
if len(parts) > 1:
ready_chunk = self.buffer + parts[0] + '\n'
self.buffer = parts[1]
yield ready_chunk
else:
self.buffer += parts[0]
else:
yield None
if not self.pipe:
self.monout.close()
def close(self):
self.closed = True
class Consolidator(object):
"""generator consolidates data from source, cache it by timestamp"""
def __init__(self, sources):
self.sources = sources
self.results = {}
def append_chunk(self, source, chunk):
try:
data = json.loads(chunk)
except ValueError:
logger.error('unable to decode chunk %s', chunk, exc_info=True)
else:
try:
ts = data['timestamp']
self.results.setdefault(ts, {})
for key, value in data['fields'].items():
if data['name'] == 'diskio':
data['name'] = "{metric_name}-{disk_id}".format(
metric_name=data['name'],
disk_id=data['tags']['name'])
elif data['name'] == 'net':
data['name'] = "{metric_name}-{interface}".format(
metric_name=data['name'],
interface=data['tags']['interface'])
elif data['name'] == 'cpu':
data['name'] = "{metric_name}-{cpu_id}".format(
metric_name=data['name'],
cpu_id=data['tags']['cpu'])
key = data['name'] + "_" + key
if key.endswith('_exec_value'):
key = key.replace('_exec_value', '')
self.results[ts][key] = value
except KeyError:
logger.error(
'Malformed json from source %s: %s',
source,
chunk,
exc_info=True)
except BaseException:
logger.error(
'Something nasty happend in consolidator work',
exc_info=True)
def __iter__(self):
while True:
for s in self.sources:
chunk_limit = 10
chunks_done = 0
chunk = next(s)
while chunk and chunks_done < chunk_limit:
self.append_chunk(s, chunk)
chunk = next(s)
if len(self.results) > 2:
logger.debug(
'Now in buffer: %s', list(self.results.keys()))
dump_seconds = sorted(
list(self.results.keys()))[:-2]
for ready_second in dump_seconds:
yield json.dumps({
ready_second: self.results.pop(ready_second, None)
})
time.sleep(0.5)
class Drain(threading.Thread):
"""
Drain a generator to a destination that answers to put(), in a thread
"""
def __init__(self, source, destination):
super(Drain, self).__init__()
self.source = source
self.destination = destination
self._finished = threading.Event()
self._interrupted = threading.Event()
def run(self):
for item in self.source:
self.destination.put(item)
if self._interrupted.is_set():
break
self._finished.set()
def wait(self, timeout=None):
self._finished.wait(timeout=timeout)
def close(self):
self._interrupted.set()
class AgentWorker(threading.Thread):
def __init__(self, telegraf_path):
super(AgentWorker, self).__init__()
self.working_dir = os.path.dirname(__file__)
self.startups = []
self.startup_processes = []
self.shutdowns = []
self.custom_sources = []
self.daemon = True # Thread auto-shutdown
self.finished = False
self.drain = None
self.drain_stdout = None
self.drain_err = None
self.data_reader = None
self.telegraf_path = telegraf_path
self.results = q.Queue()
self.results_stdout = q.Queue()
self.results_err = q.Queue()
@staticmethod
def __popen(cmnd, shell=False):
logger.info('Starting telegraf binary:\n{}'.format(' '.join(cmnd)))
return subprocess.Popen(
cmnd,
bufsize=0,
preexec_fn=os.setsid,
close_fds=True,
shell=shell,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE, )
def read_startup_config(self, cfg_file='agent_startup.cfg'):
try:
config = configparser.RawConfigParser(strict=False)
with open(os.path.join(self.working_dir, cfg_file), 'r') as f:
config.read_file(f)
if config.has_section('startup'):
for option in config.options('startup'):
if option.startswith('cmd'):
self.startups.append(config.get('startup', option))
if config.has_section('shutdown'):
for option in config.options('shutdown'):
if option.startswith('cmd'):
self.shutdowns.append(config.get('shutdown', option))
if config.has_section('source'):
for option in config.options('source'):
if option.startswith('file'):
self.custom_sources.append(
config.get('source', option))
logger.info(
'Successfully loaded startup config.\n'
'Startups: %s\n'
'Shutdowns: %s\n', self.startups, self.shutdowns)
except BaseException:
logger.error(
'Error trying to read agent startup config', exc_info=True)
def run(self):
logger.info("Running startup commands")
for cmnd in self.startups:
logger.debug("Run: %s", cmnd)
# fixme: shell=True is insecure, should save startup script and
# launch directly
proc = self.__popen(cmnd, shell=True)
logger.info('Started with pid %d', proc.pid)
self.startup_processes.append(proc)
logger.info('Starting metrics collector..')
# todo: add identificators into {} for python 2.6
args = [self.telegraf_path, '-config',
'{0}/agent.cfg'.format(self.working_dir)]
self.collector = self.__popen(cmnd=args)
logger.info('Started with pid %d', self.collector.pid)
telegraf_output = self.working_dir + '/monitoring.rawdata'
sources = [telegraf_output] + self.custom_sources
for _ in range(10):
self.collector.poll()
if not self.collector.returncode:
logger.info("Waiting for telegraf...")
else:
logger.info(
"Telegraf with pid %d ended with code %d",
self.collector.pid, self.collector.returncode)
if os.path.isfile(telegraf_output):
break
time.sleep(1)
self.drain = Drain(
Consolidator([iter(DataReader(f)) for f in sources]), self.results)
self.drain.start()
self.drain_stdout = Drain(
DataReader(
self.collector.stdout, pipe=True), self.results_stdout)
self.drain_stdout.start()
self.drain_err = Drain(
DataReader(
self.collector.stderr, pipe=True), self.results_err)
self.drain_err.start()
while not self.finished:
for _ in range(self.results.qsize()):
try:
data = self.results.get_nowait()
logger.debug(
'send %s bytes of data to collector', len(data))
sys.stdout.write(str(data) + '\n')
sys.stdout.flush()
except q.Empty:
break
except BaseException:
logger.error(
'Something nasty happend trying to send data',
exc_info=True)
for _ in range(self.results_stdout.qsize()):
try:
data = self.results_stdout.get_nowait()
if data:
collector_logger.info("STDOUT: %s", data)
except q.Empty:
break
for _ in range(self.results_err.qsize()):
try:
data = self.results_err.get_nowait()
if data:
collector_logger.info("STDERR: %s", data.rstrip('\n'))
except q.Empty:
break
time.sleep(1)
self.drain.close()
self.drain_stdout.close()
self.drain_err.close()
self.stop()
@staticmethod
def proc_stop(proc, kill=False):
proc.poll()
if proc.returncode is None:
try:
if kill:
logger.info("Killing PID %s", proc.pid)
os.killpg(proc.pid, signal.SIGKILL)
else:
logger.debug("Terminating: %s", proc.pid)
os.killpg(proc.pid, signal.SIGTERM)
proc.wait()
logger.info(
'Retcode for PID %s %s', proc.pid, proc.returncode)
except OSError as ex:
if ex.errno == 3:
logger.info("PID %s already died", proc.pid)
def kill(self):
logger.info("Forced stop")
for proc in self.startup_processes:
self.proc_stop(proc, kill=True)
self.proc_stop(self.collector, kill=True)
def stop(self):
logger.info("Terminating startup commands")
for proc in self.startup_processes:
self.proc_stop(proc)
logger.info('Terminating collector process: %s', self.collector)
self.proc_stop(self.collector)
logger.info("Running shutdown commands")
for cmnd in self.shutdowns:
logger.debug("Run: %s", cmnd)
subprocess.call(cmnd, shell=True)
self.finished = True
logger.info("Worker thread finished")
sys.stderr.write('stopped\n')
def kill_old_agents(telegraf_path):
my_pid = os.getpid()
parent = os.getppid()
logger.info('My pid: {0} Parent pid: {1}'.format(my_pid, parent))
ps_output = subprocess.check_output(['ps', 'aux'])
for line in ps_output.splitlines():
if telegraf_path in line:
pid = int(line.split()[1])
logger.info('Found pid: {0}'.format(pid))
if pid not in [my_pid, parent]:
logger.info('Killing process {0}:\n{1}'.format(pid, line))
os.kill(pid, signal.SIGKILL)
def main():
fname = os.path.dirname(__file__) + "/_agent.log"
logging.basicConfig(
level=logging.DEBUG,
filename=fname,
format='%(asctime)s [%(levelname)s] %(name)s:%(lineno)d %(message)s')
parser = ArgumentParser()
parser.add_argument(
"--telegraf",
dest="telegraf_path",
help="telegraf_path",
default="/tmp/telegraf")
parser.add_argument(
"--host",
dest="hostname_path",
help="telegraf_path",
default="/usr/bin/telegraf")
parser.add_argument(
"-k", "--kill-old",
action="store_true",
dest="kill_old"
)
options = parser.parse_args()
logger.info('Init')
customs_script = os.path.dirname(__file__) + '/agent_customs.sh'
# todo: deprecate
if options.kill_old:
kill_old_agents(options.telegraf_path)
try:
logger.info(
'Trying to make telegraf executable: %s', options.telegraf_path)
# 0o755 compatible with old python versions. 744 is NOT enough
os.chmod(options.telegraf_path, 493)
except OSError:
logger.warning(
'Unable to set %s access rights to execute.',
options.telegraf_path,
exc_info=True)
try:
logger.info(
'Trying to make customs script executable: %s', customs_script)
# 0o755 compatible with old python versions. 744 is NOT enough
os.chmod(customs_script, 493)
except OSError:
logger.warning(
'Unable to set %s access rights to execute.',
customs_script,
exc_info=True)
worker = AgentWorker(options.telegraf_path)
worker.read_startup_config()
logger.info('Starting AgentWorker: %s', worker)
worker.start()
try:
logger.debug("Check for any stdin command for shutdown")
cmd = sys.stdin.readline()
if cmd:
logger.info("Stdin cmd received: %s", cmd)
except KeyboardInterrupt:
logger.debug("Interrupted")
except BaseException:
logger.error(
"Something nasty happened while waiting for stop", exc_info=True)
worker.finished = True
agent_finished = False
while not agent_finished:
try:
if worker.isAlive():
logger.debug("Join the worker thread, waiting for cleanup")
worker.join(10)
if worker.isAlive():
logger.error(
"Worker have not finished shutdown in 10 seconds, going to exit anyway"
)
worker.kill()
agent_finished = True
else:
agent_finished = True
except BaseException:
logger.info(
"Something nasty happened while waiting for worker shutdown",
exc_info=True)
if __name__ == '__main__':
set_sig_handler()
main()
|
import unittest, sys
from lxml.tests.common_imports import make_doctest, HelperTestCase
try:
import lxml.html.soupparser
BS_INSTALLED = True
except ImportError:
if 'bs4' in sys.modules or 'BeautifulSoup' in sys.modules:
raise # seems we managed to import BS but not soupparser
BS_INSTALLED = False
from lxml.html import tostring
if BS_INSTALLED:
class SoupParserTestCase(HelperTestCase):
soupparser = lxml.html.soupparser
def test_broken_attribute(self):
html = """\
<html><head></head><body>
<form><input type='text' disabled size='10'></form>
</body></html>
"""
root = self.soupparser.fromstring(html)
self.assertTrue(root.find('.//input').get('disabled') is not None)
def test_empty(self):
tree = self.soupparser.fromstring('')
res = b'''<html></html>'''
self.assertEqual(tostring(tree), res)
def test_text(self):
tree = self.soupparser.fromstring('huhu')
res = b'''<html>huhu</html>'''
self.assertEqual(tostring(tree), res)
def test_body(self):
html = '''<body><p>test</p></body>'''
res = b'''<html><body><p>test</p></body></html>'''
tree = self.soupparser.fromstring(html)
self.assertEqual(tostring(tree), res)
def test_head_body(self):
# HTML tag missing, parser should fix that
html = '<head><title>test</title></head><body><p>test</p></body>'
res = b'<html><head><title>test</title></head><body><p>test</p></body></html>'
tree = self.soupparser.fromstring(html)
self.assertEqual(tostring(tree), res)
def test_wrap_html(self):
# <head> outside <html>, parser should fix that
html = '<head><title>title</test></head><html><body/></html>'
res = b'<html><head><title>title</title></head><body></body></html>'
tree = self.soupparser.fromstring(html)
self.assertEqual(tostring(tree), res)
def test_comment_hyphen(self):
# These are really invalid XML as per specification
# https://www.w3.org/TR/REC-xml/#sec-comments
html = b'<html><!-- comment -- with double-hyphen --></html>'
tree = self.soupparser.fromstring(html)
self.assertEqual(tostring(tree), html)
html = b'<html><!-- comment ends with hyphen ---></html>'
tree = self.soupparser.fromstring(html)
self.assertEqual(tostring(tree), html)
def test_comment_pi(self):
html = '''<!-- comment -->
<?test asdf?>
<head><title>test</title></head><body><p>test</p></body>
<!-- another comment -->'''
res = b'''<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN" "http://www.w3.org/TR/REC-html40/loose.dtd">
<!-- comment --><?test asdf?><html><head><title>test</title></head><body><p>test</p></body></html><!-- another comment -->'''
tree = self.soupparser.fromstring(html).getroottree()
self.assertEqual(tostring(tree, method='html'), res)
def test_doctype1(self):
# Test document type declaration, comments and PI's
# outside the root
html = \
'''<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
<!--another comment--><html><head><title>My first HTML document</title></head><body><p>Hello world!</p></body></html><?foo bar>'''
res = \
b'''<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
<!--another comment--><html><head><title>My first HTML document</title></head><body><p>Hello world!</p></body></html><?foo bar?>'''
tree = self.soupparser.fromstring(html).getroottree()
self.assertEqual(tree.docinfo.public_id, "-//W3C//DTD HTML 4.01//EN")
self.assertEqual(tostring(tree), res)
def test_doctype2(self):
# Test document type declaration, comments and PI's
# outside the root
html = \
'''<!DOCTYPE html PUBLIC "-//IETF//DTD HTML//EN">
<!--another comment--><html><head><title>My first HTML document</title></head><body><p>Hello world!</p></body></html><?foo bar?>'''
res = \
b'''<!DOCTYPE html PUBLIC "-//IETF//DTD HTML//EN">
<!--another comment--><html><head><title>My first HTML document</title></head><body><p>Hello world!</p></body></html><?foo bar?>'''
tree = self.soupparser.fromstring(html).getroottree()
self.assertEqual(tree.docinfo.public_id, "-//IETF//DTD HTML//EN")
self.assertEqual(tostring(tree), res)
def test_doctype_html5(self):
# html 5 doctype declaration
html = b'<!DOCTYPE html>\n<html lang="en"></html>'
tree = self.soupparser.fromstring(html).getroottree()
self.assertTrue(tree.docinfo.public_id is None)
self.assertEqual(tostring(tree), html)
def test_suite():
suite = unittest.TestSuite()
if BS_INSTALLED:
suite.addTests([unittest.makeSuite(SoupParserTestCase)])
if sys.version_info[0] < 3:
suite.addTests([make_doctest('../../../../doc/elementsoup.txt')])
return suite
if __name__ == '__main__':
unittest.main()
|
from homeassistant.const import (
ATTR_ATTRIBUTION,
ATTR_DEVICE_CLASS,
CONF_NAME,
DEVICE_CLASS_TEMPERATURE,
)
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from .const import (
ATTR_FORECAST,
ATTR_ICON,
ATTR_LABEL,
ATTRIBUTION,
COORDINATOR,
DOMAIN,
FORECAST_DAYS,
FORECAST_SENSOR_TYPES,
MANUFACTURER,
NAME,
OPTIONAL_SENSORS,
SENSOR_TYPES,
)
PARALLEL_UPDATES = 1
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Add AccuWeather entities from a config_entry."""
name = config_entry.data[CONF_NAME]
coordinator = hass.data[DOMAIN][config_entry.entry_id][COORDINATOR]
sensors = []
for sensor in SENSOR_TYPES:
sensors.append(AccuWeatherSensor(name, sensor, coordinator))
if coordinator.forecast:
for sensor in FORECAST_SENSOR_TYPES:
for day in FORECAST_DAYS:
# Some air quality/allergy sensors are only available for certain
# locations.
if sensor in coordinator.data[ATTR_FORECAST][0]:
sensors.append(
AccuWeatherSensor(name, sensor, coordinator, forecast_day=day)
)
async_add_entities(sensors, False)
class AccuWeatherSensor(CoordinatorEntity):
"""Define an AccuWeather entity."""
def __init__(self, name, kind, coordinator, forecast_day=None):
"""Initialize."""
super().__init__(coordinator)
self._name = name
self.kind = kind
self._device_class = None
self._attrs = {ATTR_ATTRIBUTION: ATTRIBUTION}
self._unit_system = "Metric" if self.coordinator.is_metric else "Imperial"
self.forecast_day = forecast_day
@property
def name(self):
"""Return the name."""
if self.forecast_day is not None:
return f"{self._name} {FORECAST_SENSOR_TYPES[self.kind][ATTR_LABEL]} {self.forecast_day}d"
return f"{self._name} {SENSOR_TYPES[self.kind][ATTR_LABEL]}"
@property
def unique_id(self):
"""Return a unique_id for this entity."""
if self.forecast_day is not None:
return f"{self.coordinator.location_key}-{self.kind}-{self.forecast_day}".lower()
return f"{self.coordinator.location_key}-{self.kind}".lower()
@property
def device_info(self):
"""Return the device info."""
return {
"identifiers": {(DOMAIN, self.coordinator.location_key)},
"name": NAME,
"manufacturer": MANUFACTURER,
"entry_type": "service",
}
@property
def state(self):
"""Return the state."""
if self.forecast_day is not None:
if (
FORECAST_SENSOR_TYPES[self.kind][ATTR_DEVICE_CLASS]
== DEVICE_CLASS_TEMPERATURE
):
return self.coordinator.data[ATTR_FORECAST][self.forecast_day][
self.kind
]["Value"]
if self.kind in ["WindGustDay", "WindGustNight"]:
return self.coordinator.data[ATTR_FORECAST][self.forecast_day][
self.kind
]["Speed"]["Value"]
if self.kind in ["Grass", "Mold", "Ragweed", "Tree", "UVIndex", "Ozone"]:
return self.coordinator.data[ATTR_FORECAST][self.forecast_day][
self.kind
]["Value"]
return self.coordinator.data[ATTR_FORECAST][self.forecast_day][self.kind]
if self.kind == "Ceiling":
return round(self.coordinator.data[self.kind][self._unit_system]["Value"])
if self.kind == "PressureTendency":
return self.coordinator.data[self.kind]["LocalizedText"].lower()
if SENSOR_TYPES[self.kind][ATTR_DEVICE_CLASS] == DEVICE_CLASS_TEMPERATURE:
return self.coordinator.data[self.kind][self._unit_system]["Value"]
if self.kind == "Precipitation":
return self.coordinator.data["PrecipitationSummary"][self.kind][
self._unit_system
]["Value"]
if self.kind == "WindGust":
return self.coordinator.data[self.kind]["Speed"][self._unit_system]["Value"]
return self.coordinator.data[self.kind]
@property
def icon(self):
"""Return the icon."""
if self.forecast_day is not None:
return FORECAST_SENSOR_TYPES[self.kind][ATTR_ICON]
return SENSOR_TYPES[self.kind][ATTR_ICON]
@property
def device_class(self):
"""Return the device_class."""
if self.forecast_day is not None:
return FORECAST_SENSOR_TYPES[self.kind][ATTR_DEVICE_CLASS]
return SENSOR_TYPES[self.kind][ATTR_DEVICE_CLASS]
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
if self.forecast_day is not None:
return FORECAST_SENSOR_TYPES[self.kind][self._unit_system]
return SENSOR_TYPES[self.kind][self._unit_system]
@property
def device_state_attributes(self):
"""Return the state attributes."""
if self.forecast_day is not None:
if self.kind in ["WindGustDay", "WindGustNight"]:
self._attrs["direction"] = self.coordinator.data[ATTR_FORECAST][
self.forecast_day
][self.kind]["Direction"]["English"]
elif self.kind in ["Grass", "Mold", "Ragweed", "Tree", "UVIndex", "Ozone"]:
self._attrs["level"] = self.coordinator.data[ATTR_FORECAST][
self.forecast_day
][self.kind]["Category"]
return self._attrs
if self.kind == "UVIndex":
self._attrs["level"] = self.coordinator.data["UVIndexText"]
elif self.kind == "Precipitation":
self._attrs["type"] = self.coordinator.data["PrecipitationType"]
return self._attrs
@property
def entity_registry_enabled_default(self):
"""Return if the entity should be enabled when first added to the entity registry."""
return bool(self.kind not in OPTIONAL_SENSORS)
|
import asyncio
import voluptuous as vol
from homeassistant.auth.permissions.const import CAT_ENTITIES, POLICY_READ
from homeassistant.components.websocket_api.const import ERR_NOT_FOUND
from homeassistant.const import EVENT_STATE_CHANGED, EVENT_TIME_CHANGED, MATCH_ALL
from homeassistant.core import DOMAIN as HASS_DOMAIN, callback
from homeassistant.exceptions import (
HomeAssistantError,
ServiceNotFound,
TemplateError,
Unauthorized,
)
from homeassistant.helpers import config_validation as cv, entity
from homeassistant.helpers.event import TrackTemplate, async_track_template_result
from homeassistant.helpers.service import async_get_all_descriptions
from homeassistant.helpers.template import Template
from homeassistant.loader import IntegrationNotFound, async_get_integration
from . import const, decorators, messages
# mypy: allow-untyped-calls, allow-untyped-defs
@callback
def async_register_commands(hass, async_reg):
"""Register commands."""
async_reg(hass, handle_subscribe_events)
async_reg(hass, handle_unsubscribe_events)
async_reg(hass, handle_call_service)
async_reg(hass, handle_get_states)
async_reg(hass, handle_get_services)
async_reg(hass, handle_get_config)
async_reg(hass, handle_ping)
async_reg(hass, handle_render_template)
async_reg(hass, handle_manifest_list)
async_reg(hass, handle_manifest_get)
async_reg(hass, handle_entity_source)
async_reg(hass, handle_subscribe_trigger)
async_reg(hass, handle_test_condition)
def pong_message(iden):
"""Return a pong message."""
return {"id": iden, "type": "pong"}
@callback
@decorators.websocket_command(
{
vol.Required("type"): "subscribe_events",
vol.Optional("event_type", default=MATCH_ALL): str,
}
)
def handle_subscribe_events(hass, connection, msg):
"""Handle subscribe events command."""
# Circular dep
# pylint: disable=import-outside-toplevel
from .permissions import SUBSCRIBE_WHITELIST
event_type = msg["event_type"]
if event_type not in SUBSCRIBE_WHITELIST and not connection.user.is_admin:
raise Unauthorized
if event_type == EVENT_STATE_CHANGED:
@callback
def forward_events(event):
"""Forward state changed events to websocket."""
if not connection.user.permissions.check_entity(
event.data["entity_id"], POLICY_READ
):
return
connection.send_message(messages.cached_event_message(msg["id"], event))
else:
@callback
def forward_events(event):
"""Forward events to websocket."""
if event.event_type == EVENT_TIME_CHANGED:
return
connection.send_message(messages.cached_event_message(msg["id"], event))
connection.subscriptions[msg["id"]] = hass.bus.async_listen(
event_type, forward_events
)
connection.send_message(messages.result_message(msg["id"]))
@callback
@decorators.websocket_command(
{
vol.Required("type"): "unsubscribe_events",
vol.Required("subscription"): cv.positive_int,
}
)
def handle_unsubscribe_events(hass, connection, msg):
"""Handle unsubscribe events command."""
subscription = msg["subscription"]
if subscription in connection.subscriptions:
connection.subscriptions.pop(subscription)()
connection.send_message(messages.result_message(msg["id"]))
else:
connection.send_message(
messages.error_message(
msg["id"], const.ERR_NOT_FOUND, "Subscription not found."
)
)
@decorators.websocket_command(
{
vol.Required("type"): "call_service",
vol.Required("domain"): str,
vol.Required("service"): str,
vol.Optional("service_data"): dict,
}
)
@decorators.async_response
async def handle_call_service(hass, connection, msg):
"""Handle call service command."""
blocking = True
if msg["domain"] == HASS_DOMAIN and msg["service"] in ["restart", "stop"]:
blocking = False
try:
await hass.services.async_call(
msg["domain"],
msg["service"],
msg.get("service_data"),
blocking,
connection.context(msg),
)
connection.send_message(
messages.result_message(msg["id"], {"context": connection.context(msg)})
)
except ServiceNotFound as err:
if err.domain == msg["domain"] and err.service == msg["service"]:
connection.send_message(
messages.error_message(
msg["id"], const.ERR_NOT_FOUND, "Service not found."
)
)
else:
connection.send_message(
messages.error_message(
msg["id"], const.ERR_HOME_ASSISTANT_ERROR, str(err)
)
)
except HomeAssistantError as err:
connection.logger.exception(err)
connection.send_message(
messages.error_message(msg["id"], const.ERR_HOME_ASSISTANT_ERROR, str(err))
)
except Exception as err: # pylint: disable=broad-except
connection.logger.exception(err)
connection.send_message(
messages.error_message(msg["id"], const.ERR_UNKNOWN_ERROR, str(err))
)
@callback
@decorators.websocket_command({vol.Required("type"): "get_states"})
def handle_get_states(hass, connection, msg):
"""Handle get states command."""
if connection.user.permissions.access_all_entities("read"):
states = hass.states.async_all()
else:
entity_perm = connection.user.permissions.check_entity
states = [
state
for state in hass.states.async_all()
if entity_perm(state.entity_id, "read")
]
connection.send_message(messages.result_message(msg["id"], states))
@decorators.websocket_command({vol.Required("type"): "get_services"})
@decorators.async_response
async def handle_get_services(hass, connection, msg):
"""Handle get services command."""
descriptions = await async_get_all_descriptions(hass)
connection.send_message(messages.result_message(msg["id"], descriptions))
@callback
@decorators.websocket_command({vol.Required("type"): "get_config"})
def handle_get_config(hass, connection, msg):
"""Handle get config command."""
connection.send_message(messages.result_message(msg["id"], hass.config.as_dict()))
@decorators.websocket_command({vol.Required("type"): "manifest/list"})
@decorators.async_response
async def handle_manifest_list(hass, connection, msg):
"""Handle integrations command."""
integrations = await asyncio.gather(
*[
async_get_integration(hass, domain)
for domain in hass.config.components
# Filter out platforms.
if "." not in domain
]
)
connection.send_result(
msg["id"], [integration.manifest for integration in integrations]
)
@decorators.websocket_command(
{vol.Required("type"): "manifest/get", vol.Required("integration"): str}
)
@decorators.async_response
async def handle_manifest_get(hass, connection, msg):
"""Handle integrations command."""
try:
integration = await async_get_integration(hass, msg["integration"])
connection.send_result(msg["id"], integration.manifest)
except IntegrationNotFound:
connection.send_error(msg["id"], const.ERR_NOT_FOUND, "Integration not found")
@callback
@decorators.websocket_command({vol.Required("type"): "ping"})
def handle_ping(hass, connection, msg):
"""Handle ping command."""
connection.send_message(pong_message(msg["id"]))
@decorators.websocket_command(
{
vol.Required("type"): "render_template",
vol.Required("template"): str,
vol.Optional("entity_ids"): cv.entity_ids,
vol.Optional("variables"): dict,
vol.Optional("timeout"): vol.Coerce(float),
}
)
@decorators.async_response
async def handle_render_template(hass, connection, msg):
"""Handle render_template command."""
template_str = msg["template"]
template = Template(template_str, hass)
variables = msg.get("variables")
timeout = msg.get("timeout")
info = None
if timeout:
try:
timed_out = await template.async_render_will_timeout(timeout)
except TemplateError as ex:
connection.send_error(msg["id"], const.ERR_TEMPLATE_ERROR, str(ex))
return
if timed_out:
connection.send_error(
msg["id"],
const.ERR_TEMPLATE_ERROR,
f"Exceeded maximum execution time of {timeout}s",
)
return
@callback
def _template_listener(event, updates):
nonlocal info
track_template_result = updates.pop()
result = track_template_result.result
if isinstance(result, TemplateError):
connection.send_error(msg["id"], const.ERR_TEMPLATE_ERROR, str(result))
return
connection.send_message(
messages.event_message(
msg["id"], {"result": result, "listeners": info.listeners} # type: ignore
)
)
try:
info = async_track_template_result(
hass,
[TrackTemplate(template, variables)],
_template_listener,
raise_on_template_error=True,
)
except TemplateError as ex:
connection.send_error(msg["id"], const.ERR_TEMPLATE_ERROR, str(ex))
return
connection.subscriptions[msg["id"]] = info.async_remove
connection.send_result(msg["id"])
hass.loop.call_soon_threadsafe(info.async_refresh)
@callback
@decorators.websocket_command(
{vol.Required("type"): "entity/source", vol.Optional("entity_id"): [cv.entity_id]}
)
def handle_entity_source(hass, connection, msg):
"""Handle entity source command."""
raw_sources = entity.entity_sources(hass)
entity_perm = connection.user.permissions.check_entity
if "entity_id" not in msg:
if connection.user.permissions.access_all_entities("read"):
sources = raw_sources
else:
sources = {
entity_id: source
for entity_id, source in raw_sources.items()
if entity_perm(entity_id, "read")
}
connection.send_message(messages.result_message(msg["id"], sources))
return
sources = {}
for entity_id in msg["entity_id"]:
if not entity_perm(entity_id, "read"):
raise Unauthorized(
context=connection.context(msg),
permission=POLICY_READ,
perm_category=CAT_ENTITIES,
)
source = raw_sources.get(entity_id)
if source is None:
connection.send_error(msg["id"], ERR_NOT_FOUND, "Entity not found")
return
sources[entity_id] = source
connection.send_result(msg["id"], sources)
@callback
@decorators.websocket_command(
{
vol.Required("type"): "subscribe_trigger",
vol.Required("trigger"): cv.TRIGGER_SCHEMA,
vol.Optional("variables"): dict,
}
)
@decorators.require_admin
@decorators.async_response
async def handle_subscribe_trigger(hass, connection, msg):
"""Handle subscribe trigger command."""
# Circular dep
# pylint: disable=import-outside-toplevel
from homeassistant.helpers import trigger
trigger_config = await trigger.async_validate_trigger_config(hass, msg["trigger"])
@callback
def forward_triggers(variables, context=None):
"""Forward events to websocket."""
connection.send_message(
messages.event_message(
msg["id"], {"variables": variables, "context": context}
)
)
connection.subscriptions[msg["id"]] = (
await trigger.async_initialize_triggers(
hass,
trigger_config,
forward_triggers,
const.DOMAIN,
const.DOMAIN,
connection.logger.log,
variables=msg.get("variables"),
)
) or (
# Some triggers won't return an unsub function. Since the caller expects
# a subscription, we're going to fake one.
lambda: None
)
connection.send_result(msg["id"])
@decorators.websocket_command(
{
vol.Required("type"): "test_condition",
vol.Required("condition"): cv.CONDITION_SCHEMA,
vol.Optional("variables"): dict,
}
)
@decorators.require_admin
@decorators.async_response
async def handle_test_condition(hass, connection, msg):
"""Handle test condition command."""
# Circular dep
# pylint: disable=import-outside-toplevel
from homeassistant.helpers import condition
check_condition = await condition.async_from_config(hass, msg["condition"])
connection.send_result(
msg["id"], {"result": check_condition(hass, msg.get("variables"))}
)
|
from datetime import timedelta
from urllib.parse import urlparse
import av
import pytest
from homeassistant.components.stream import request_stream
from homeassistant.const import HTTP_NOT_FOUND
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from tests.async_mock import patch
from tests.common import async_fire_time_changed
from tests.components.stream.common import generate_h264_video, preload_stream
@pytest.mark.skip("Flaky in CI")
async def test_hls_stream(hass, hass_client):
"""
Test hls stream.
Purposefully not mocking anything here to test full
integration with the stream component.
"""
await async_setup_component(hass, "stream", {"stream": {}})
# Setup demo HLS track
source = generate_h264_video()
stream = preload_stream(hass, source)
stream.add_provider("hls")
# Request stream
url = request_stream(hass, source)
http_client = await hass_client()
# Fetch playlist
parsed_url = urlparse(url)
playlist_response = await http_client.get(parsed_url.path)
assert playlist_response.status == 200
# Fetch init
playlist = await playlist_response.text()
playlist_url = "/".join(parsed_url.path.split("/")[:-1])
init_url = playlist_url + "/init.mp4"
init_response = await http_client.get(init_url)
assert init_response.status == 200
# Fetch segment
playlist = await playlist_response.text()
playlist_url = "/".join(parsed_url.path.split("/")[:-1])
segment_url = playlist_url + playlist.splitlines()[-1][1:]
segment_response = await http_client.get(segment_url)
assert segment_response.status == 200
# Stop stream, if it hasn't quit already
stream.stop()
# Ensure playlist not accessible after stream ends
fail_response = await http_client.get(parsed_url.path)
assert fail_response.status == HTTP_NOT_FOUND
@pytest.mark.skip("Flaky in CI")
async def test_stream_timeout(hass, hass_client):
"""Test hls stream timeout."""
await async_setup_component(hass, "stream", {"stream": {}})
# Setup demo HLS track
source = generate_h264_video()
stream = preload_stream(hass, source)
stream.add_provider("hls")
# Request stream
url = request_stream(hass, source)
http_client = await hass_client()
# Fetch playlist
parsed_url = urlparse(url)
playlist_response = await http_client.get(parsed_url.path)
assert playlist_response.status == 200
# Wait a minute
future = dt_util.utcnow() + timedelta(minutes=1)
async_fire_time_changed(hass, future)
# Fetch again to reset timer
playlist_response = await http_client.get(parsed_url.path)
assert playlist_response.status == 200
# Wait 5 minutes
future = dt_util.utcnow() + timedelta(minutes=5)
async_fire_time_changed(hass, future)
# Ensure playlist not accessible
fail_response = await http_client.get(parsed_url.path)
assert fail_response.status == HTTP_NOT_FOUND
@pytest.mark.skip("Flaky in CI")
async def test_stream_ended(hass):
"""Test hls stream packets ended."""
await async_setup_component(hass, "stream", {"stream": {}})
# Setup demo HLS track
source = generate_h264_video()
stream = preload_stream(hass, source)
track = stream.add_provider("hls")
# Request stream
request_stream(hass, source)
# Run it dead
while True:
segment = await track.recv()
if segment is None:
break
segments = segment.sequence
assert segments > 1
assert not track.get_segment()
# Stop stream, if it hasn't quit already
stream.stop()
async def test_stream_keepalive(hass):
"""Test hls stream retries the stream when keepalive=True."""
await async_setup_component(hass, "stream", {"stream": {}})
# Setup demo HLS track
source = "test_stream_keepalive_source"
stream = preload_stream(hass, source)
track = stream.add_provider("hls")
track.num_segments = 2
cur_time = 0
def time_side_effect():
nonlocal cur_time
if cur_time >= 80:
stream.keepalive = False # Thread should exit and be joinable.
cur_time += 40
return cur_time
with patch("av.open") as av_open, patch(
"homeassistant.components.stream.worker.time"
) as mock_time:
av_open.side_effect = av.error.InvalidDataError(-2, "error")
mock_time.time.side_effect = time_side_effect
# Request stream
request_stream(hass, source, keepalive=True)
stream._thread.join()
stream._thread = None
assert av_open.call_count == 2
# Stop stream, if it hasn't quit already
stream.stop()
|
import pytest
import sys
import os
import socket
import time
import logging
import plumbum
from copy import deepcopy
from plumbum import RemotePath, SshMachine, CommandNotFound, ProcessExecutionError, local, ProcessTimedOut, NOHUP
from plumbum import CommandNotFound
from plumbum.lib import six
from plumbum.machines.session import IncorrectLogin, HostPublicKeyUnknown
from plumbum._testtools import skip_without_chown, skip_on_windows
try:
import paramiko
except ImportError:
paramiko = None
else:
from plumbum.machines.paramiko_machine import ParamikoMachine
def strassert(one, two):
assert str(one) == str(two)
#TEST_HOST = "192.168.1.143"
TEST_HOST = "127.0.0.1"
if TEST_HOST not in ("::1", "127.0.0.1", "localhost"):
plumbum.local.env.path.append("c:\\Program Files\\Git\\bin")
@pytest.fixture(scope='session')
def sshpass():
try:
return plumbum.local['sshpass']
except CommandNotFound:
pytest.skip('Test requires sshpass')
@skip_on_windows
def test_connection():
SshMachine(TEST_HOST)
def test_incorrect_login(sshpass):
def connect():
SshMachine(TEST_HOST, password='swordfish',
ssh_opts=['-o', 'PubkeyAuthentication=no',
'-o', 'PreferredAuthentications=password'])
pytest.raises(IncorrectLogin, connect)
def test_hostpubkey_unknown(sshpass):
def connect():
SshMachine(TEST_HOST, password='swordfish',
ssh_opts=['-o', 'UserKnownHostsFile=/dev/null',
'-o', 'UpdateHostKeys=no'])
pytest.raises(HostPublicKeyUnknown, connect)
@skip_on_windows
class TestRemotePath:
def _connect(self):
return SshMachine(TEST_HOST)
def test_name(self):
name = RemotePath(self._connect(), "/some/long/path/to/file.txt").name
assert isinstance(name, six.string_types)
assert "file.txt" == str(name)
def test_dirname(self):
name = RemotePath(self._connect(), "/some/long/path/to/file.txt").dirname
assert isinstance(name, RemotePath)
assert "/some/long/path/to" == str(name)
def test_uri(self):
p1 = RemotePath(self._connect(), "/some/long/path/to/file.txt")
assert "ftp://" == p1.as_uri('ftp')[:6]
assert "ssh://" == p1.as_uri('ssh')[:6]
assert "/some/long/path/to/file.txt" == p1.as_uri()[-27:]
def test_stem(self):
p = RemotePath(self._connect(), "/some/long/path/to/file.txt")
assert p.stem == "file"
p = RemotePath(self._connect(), "/some/long/path/")
assert p.stem == "path"
def test_suffix(self):
p1 = RemotePath(self._connect(), "/some/long/path/to/file.txt")
p2 = RemotePath(self._connect(), "file.tar.gz")
assert p1.suffix == ".txt"
assert p1.suffixes == [".txt"]
assert p2.suffix == ".gz"
assert p2.suffixes == [".tar",".gz"]
strassert(p1.with_suffix(".tar.gz"), RemotePath(self._connect(), "/some/long/path/to/file.tar.gz"))
strassert(p2.with_suffix(".other"), RemotePath(self._connect(), "file.tar.other"))
strassert(p2.with_suffix(".other", 2), RemotePath(self._connect(), "file.other"))
strassert(p2.with_suffix(".other", 0), RemotePath(self._connect(), "file.tar.gz.other"))
strassert(p2.with_suffix(".other", None), RemotePath(self._connect(), "file.other"))
def test_newname(self):
p1 = RemotePath(self._connect(), "/some/long/path/to/file.txt")
p2 = RemotePath(self._connect(), "file.tar.gz")
strassert(p1.with_name("something.tar"), RemotePath(self._connect(), "/some/long/path/to/something.tar"))
strassert(p2.with_name("something.tar"), RemotePath(self._connect(), "something.tar"))
@skip_without_chown
def test_chown(self):
with self._connect() as rem:
with rem.tempdir() as dir:
p = dir / "foo.txt"
p.write(six.b("hello"))
# because we're connected to localhost, we expect UID and GID to be the same
assert p.uid == os.getuid()
assert p.gid == os.getgid()
p.chown(p.uid.name)
assert p.uid == os.getuid()
def test_parent(self):
p1 = RemotePath(self._connect(), "/some/long/path/to/file.txt")
p2 = p1.parent
assert str(p2) == "/some/long/path/to"
def test_mkdir(self):
# (identical to test_local.TestLocalPath.test_mkdir)
with self._connect() as rem:
with rem.tempdir() as tmp:
(tmp / "a").mkdir(exist_ok=False, parents=False)
assert (tmp / "a").exists()
assert (tmp / "a").is_dir()
(tmp / "a").mkdir(exist_ok=True, parents=False)
(tmp / "a").mkdir(exist_ok=True, parents=True)
with pytest.raises(OSError):
(tmp / "a").mkdir(exist_ok=False, parents=False)
with pytest.raises(OSError):
(tmp / "a").mkdir(exist_ok=False, parents=True)
(tmp / "b" / "bb").mkdir(exist_ok=False, parents=True)
assert (tmp / "b" / "bb").exists()
assert (tmp / "b" / "bb").is_dir()
assert not tmp.exists()
@pytest.mark.xfail(reason="mkdir's mode argument is not yet implemented "\
"for remote paths", strict=True)
def test_mkdir_mode(self):
# (identical to test_local.TestLocalPath.test_mkdir_mode)
with self._connect() as rem:
with rem.tempdir() as tmp:
# just verify that mode argument works the same way it does for
# Python's own os.mkdir, which takes into account the umask
# (different from shell mkdir mode argument!); umask on my
# system is 022 by default, so 033 is ok for testing this
try:
(tmp / "pb_333").mkdir(exist_ok=False, parents=False,
mode=0o333)
rem.python('-c', 'import os; os.mkdir({0}, 0o333)'.format(
repr(str(tmp / "py_333"))))
pb_final_mode = oct((tmp / "pb_333").stat().st_mode)
py_final_mode = oct((tmp / "py_333").stat().st_mode)
assert pb_final_mode == py_final_mode
finally:
# we have to revert this so the tempdir deletion works
if (tmp / "pb_333").exists():
(tmp / "pb_333").chmod(0o777)
if (tmp / "py_333").exists():
(tmp / "py_333").chmod(0o777)
assert not tmp.exists()
def test_copy(self):
"""
tests `RemotePath.copy` for the following scenarios:
* copying a simple file from `file_a` to `copy_of_a` succeeds
* copying file `file_a` into a directory `a_dir/copy_of_a` succeeds
* copying a directory `a_dir` over an existing directory path with
`override=False` fails
* copying a directory `a_dir` over an existing directory path with
`override=True` succeeds
"""
with self._connect() as rem:
with rem.tempdir() as tmp:
# setup a file and make sure it exists...
(tmp / "file_a").touch()
assert (tmp / "file_a").exists()
assert (tmp / "file_a").is_file()
# setup a directory for copying into...
(tmp / "a_dir").mkdir(exist_ok=False, parents=False)
assert (tmp / "a_dir").exists()
assert (tmp / "a_dir").is_dir()
# setup a 2nd directory for testing `override=False`
(tmp / "b_dir").mkdir(exist_ok=False, parents=False)
assert (tmp / "b_dir").exists()
assert (tmp / "b_dir").is_dir()
# copying a simple file
(tmp / "file_a").copy(tmp / "copy_of_a")
assert (tmp / "copy_of_a").exists()
assert (tmp / "copy_of_a").is_file()
# copying into a directory
(tmp / "file_a").copy(tmp / "a_dir/copy_of_a")
assert (tmp / "a_dir/copy_of_a").exists()
assert (tmp / "a_dir/copy_of_a").is_file()
# copying a directory on top of an existing directory using
# `override=False` (should fail with TypeError)
with pytest.raises(TypeError):
(tmp / "a_dir").copy(tmp / "b_dir", override=False)
# copying a directory on top of an existing directory using
# `override=True` (should copy transparently)
(tmp / "a_dir").copy(tmp / "b_dir", override=True)
assert "copy_of_a" in (tmp / "b_dir")
assert not tmp.exists()
class BaseRemoteMachineTest(object):
TUNNEL_PROG = r"""import sys, socket
s = socket.socket()
s.bind(("", 0))
s.listen(1)
sys.stdout.write("{0}\n".format( s.getsockname()[1]))
sys.stdout.flush()
s2, _ = s.accept()
data = s2.recv(100)
s2.send(b"hello " + data)
s2.close()
s.close()
"""
def test_basic(self):
with self._connect() as rem:
r_ssh = rem["ssh"]
r_ls = rem["ls"]
r_grep = rem["grep"]
lines = r_ls("-a").splitlines()
assert ".bashrc" in lines or ".bash_profile" in lines
with rem.cwd(os.path.dirname(os.path.abspath(__file__))):
cmd = r_ssh["localhost", "cd", rem.cwd, "&&", r_ls, "|", r_grep["\\.py"]]
assert "'|'" in str(cmd)
assert "test_remote.py" in cmd()
assert "test_remote.py" in [f.name for f in rem.cwd // "*.py"]
# Testing for #271
def test_double_chdir(self):
with self._connect() as rem:
with rem.cwd(os.path.dirname(os.path.abspath(__file__))):
rem["ls"]()
with rem.cwd("/tmp"):
rem["pwd"]()
def test_glob(self):
with self._connect() as rem:
with rem.cwd(os.path.dirname(os.path.abspath(__file__))):
filenames = [f.name for f in rem.cwd // ("*.py", "*.bash")]
assert "test_remote.py" in filenames
assert "slow_process.bash" in filenames
def test_glob_spaces(self):
with self._connect() as rem:
with rem.cwd(os.path.dirname(os.path.abspath(__file__))):
filenames = [f.name for f in rem.cwd // ("*space.txt")]
assert "file with space.txt" in filenames
filenames = [f.name for f in rem.cwd // ("*with space.txt")]
assert "file with space.txt" in filenames
def test_cmd(self):
with self._connect() as rem:
rem.cmd.ls("/tmp")
@pytest.mark.usefixtures("testdir")
def test_download_upload(self):
with self._connect() as rem:
rem.upload("test_remote.py", "/tmp")
r_ls = rem["ls"]
r_rm = rem["rm"]
assert "test_remote.py" in r_ls("/tmp").splitlines()
rem.download("/tmp/test_remote.py", "/tmp/test_download.txt")
r_rm("/tmp/test_remote.py")
r_rm("/tmp/test_download.txt")
def test_session(self):
with self._connect() as rem:
sh = rem.session()
for _ in range(4):
_, out, _ = sh.run("ls -a")
assert ".bashrc" in out or ".bash_profile" in out
def test_env(self):
with self._connect() as rem:
with pytest.raises(ProcessExecutionError):
rem.python("-c", "import os;os.environ['FOOBAR72']")
with rem.env(FOOBAR72 = "lala"):
with rem.env(FOOBAR72 = "baba"):
out = rem.python("-c", "import os;print(os.environ['FOOBAR72'])")
assert out.strip() == "baba"
out = rem.python("-c", "import os;print(os.environ['FOOBAR72'])")
assert out.strip() == "lala"
# path manipulation
with pytest.raises(CommandNotFound):
rem.which("dummy-executable")
with rem.cwd(os.path.dirname(os.path.abspath(__file__))):
rem.env.path.insert(0, rem.cwd / "not-in-path")
p = rem.which("dummy-executable")
assert p == rem.cwd / "not-in-path" / "dummy-executable"
@pytest.mark.parametrize(
"env",
["lala", "-Wl,-O2 -Wl,--sort-common", "{{}}", "''", "!@%_-+=:", "'",
"`", "$", "\\"])
def test_env_special_characters(self, env):
with self._connect() as rem:
with pytest.raises(ProcessExecutionError):
rem.python("-c", "import os;print(os.environ['FOOBAR72'])")
rem.env["FOOBAR72"] = env
out = rem.python("-c", "import os;print(os.environ['FOOBAR72'])")
assert out.strip() == env
def test_read_write(self):
with self._connect() as rem:
with rem.tempdir() as dir:
assert dir.is_dir()
data = six.b("hello world")
(dir / "foo.txt").write(data)
assert (dir / "foo.txt").read() == data
assert not dir.exists()
def test_contains(self):
with self._connect() as rem:
assert "ls" in rem
def test_iter_lines_timeout(self):
with self._connect() as rem:
try:
for i, (out, err) in enumerate(rem["ping"]["-i", 0.5, "127.0.0.1"].popen().iter_lines(timeout=4)):
print("out:", out)
print("err:", err)
except NotImplementedError:
try:
pytest.skip(str(sys.exc_info()[1]))
except AttributeError:
return
except ProcessTimedOut:
assert i > 3
else:
pytest.fail("Expected a timeout")
def test_iter_lines_error(self):
with self._connect() as rem:
with pytest.raises(ProcessExecutionError) as ex:
for i, lines in enumerate(rem["ls"]["--bla"].popen()):
pass
assert i == 1
assert "/bin/ls: " in ex.value.stderr
def test_touch(self):
with self._connect() as rem:
rfile = rem.cwd / 'sillyfile'
assert not rfile.exists()
rfile.touch()
assert rfile.exists()
rfile.delete()
@skip_on_windows
class TestRemoteMachine(BaseRemoteMachineTest):
def _connect(self):
return SshMachine(TEST_HOST)
def test_tunnel(self):
with self._connect() as rem:
p = (rem.python["-u"] << self.TUNNEL_PROG).popen()
try:
port = int(p.stdout.readline().decode("ascii").strip())
except ValueError:
print(p.communicate())
raise
with rem.tunnel(12222, port) as tun:
s = socket.socket()
s.connect(("localhost", 12222))
s.send(six.b("world"))
data = s.recv(100)
s.close()
print(p.communicate())
assert data == b"hello world"
def test_get(self):
with self._connect() as rem:
assert str(rem['ls']) == str(rem.get('ls'))
assert str(rem['ls']) == str(rem.get('not_a_valid_process_234','ls'))
assert 'ls' in rem
assert 'not_a_valid_process_234' not in rem
def test_list_processes(self):
with self._connect() as rem:
assert list(rem.list_processes())
def test_pgrep(self):
with self._connect() as rem:
assert list(rem.pgrep("ssh"))
@pytest.mark.xfail(reason="Randomly does not work on Travis, not sure why")
def test_nohup(self):
with self._connect() as rem:
sleep = rem["sleep"]
sleep["5.793817"] & NOHUP(stdout = None, append=False)
time.sleep(.5)
print(rem["ps"]("aux"))
assert list(rem.pgrep("5.793817"))
time.sleep(6)
assert not list(rem.pgrep("5.793817"))
def test_bound_env(self):
with self._connect() as rem:
printenv = rem["printenv"]
with rem.env(FOO = "hello"):
assert printenv.with_env(BAR = "world")("FOO") == "hello\n"
assert printenv.with_env(BAR = "world")("BAR") == "world\n"
assert printenv.with_env(FOO = "sea", BAR = "world")("FOO") == "sea\n"
assert printenv.with_env(FOO = "sea", BAR = "world")("BAR") == "world\n"
@pytest.mark.skipif('useradd' not in local,
reason = "System does not have useradd (Mac?)")
def test_sshpass(self):
with local.as_root():
local["useradd"]("-m", "-b", "/tmp", "testuser")
try:
with local.as_root():
try:
(local["passwd"] << "123456")("--stdin", "testuser")
except ProcessExecutionError:
# some versions of passwd don't support --stdin, nothing to do in this case
logging.warn("passwd failed")
return
with SshMachine("localhost", user = "testuser", password = "123456") as rem:
assert rem["pwd"]().strip() == "/tmp/testuser"
finally:
with local.as_root():
local["userdel"]("-r", "testuser")
@skip_on_windows
class TestParamikoMachine(BaseRemoteMachineTest):
def _connect(self):
if paramiko is None:
pytest.skip("System does not have paramiko installed")
return ParamikoMachine(TEST_HOST, missing_host_policy = paramiko.AutoAddPolicy())
def test_tunnel(self):
with self._connect() as rem:
p = rem.python["-c", self.TUNNEL_PROG].popen()
try:
port = int(p.stdout.readline().strip())
except ValueError:
print(p.communicate())
raise
s = rem.connect_sock(port)
s.send(b"world")
data = s.recv(100)
s.close()
print(p.communicate())
assert data == b"hello world"
def test_piping(self):
with self._connect() as rem:
try:
cmd = rem["ls"] | rem["cat"]
except NotImplementedError:
pass
else:
pytest.fail("Should not pipe")
@pytest.mark.xfail(message="Not working yet")
def test_encoding(self):
with self._connect() as rem:
unicode_half = b"\xc2\xbd".decode("utf8")
ret = rem['bash']("-c", 'echo -e "\xC2\xBD"')
assert ret == "%s\n" % unicode_half
ret = list(rem['bash']["-c", 'echo -e "\xC2\xBD"'].popen())
assert ret == [["%s\n" % unicode_half, None]]
def test_path_open_remote_write_local_read(self):
with self._connect() as rem:
# TODO: once Python 2.6 support is dropped, the nested
# with-statements below can be combined using "with x as a, y as b"
with rem.tempdir() as remote_tmpdir:
with local.tempdir() as tmpdir:
assert remote_tmpdir.is_dir()
assert tmpdir.is_dir()
data = six.b("hello world")
with (remote_tmpdir / "bar.txt").open("wb") as f:
f.write(data)
rem.download(
(remote_tmpdir / "bar.txt"),
(tmpdir / "bar.txt")
)
assert (tmpdir / "bar.txt").open("rb").read() == data
assert not remote_tmpdir.exists()
assert not tmpdir.exists()
def test_path_open_local_write_remote_read(self):
with self._connect() as rem:
# TODO: cf. note on Python 2.6 support above
with rem.tempdir() as remote_tmpdir:
with local.tempdir() as tmpdir:
assert remote_tmpdir.is_dir()
assert tmpdir.is_dir()
data = six.b("hello world")
with (tmpdir / "bar.txt").open("wb") as f:
f.write(data)
rem.upload(
(tmpdir / "bar.txt"),
(remote_tmpdir / "bar.txt")
)
assert (remote_tmpdir / "bar.txt").open("rb").read() == data
assert not remote_tmpdir.exists()
assert not tmpdir.exists()
|
import numpy as np
import unittest
import chainer
from chainer.backends import cuda
from chainer import testing
from chainer.testing import attr
from chainercv.experimental.links import FCISResNet101
from chainercv.experimental.links.model.fcis import FCISTrainChain
from chainercv.utils import mask_to_bbox
from tests.experimental_tests.links_tests.model_tests.fcis_tests.test_fcis \
import _random_array
@testing.parameterize(
{'train': False, 'iter2': True},
{'train': True, 'iter2': False}
)
class TestFCISResNet101(unittest.TestCase):
B = 1
n_fg_class = 20
n_class = n_fg_class + 1
n_anchor = 9
n_train_post_nms = 12
n_test_post_nms = 8
def setUp(self):
proposal_creator_params = {
'n_train_post_nms': self.n_train_post_nms,
'n_test_post_nms': self.n_test_post_nms,
}
self.link = FCISResNet101(
self.n_fg_class, pretrained_model=None,
iter2=self.iter2,
proposal_creator_params=proposal_creator_params)
def check_call(self):
xp = self.link.xp
feat_size = (12, 16)
x = chainer.Variable(
xp.random.uniform(
low=-1., high=1.,
size=(self.B, 3, feat_size[0] * 16, feat_size[1] * 16)
).astype(np.float32))
with chainer.using_config('train', self.train):
(roi_ag_seg_scores, roi_ag_locs, roi_cls_scores,
rois, roi_indices) = self.link(x)
n_roi = roi_ag_seg_scores.shape[0]
if self.train:
self.assertGreaterEqual(self.B * self.n_train_post_nms, n_roi)
else:
self.assertGreaterEqual(self.B * self.n_test_post_nms * 2, n_roi)
self.assertIsInstance(roi_ag_seg_scores, chainer.Variable)
self.assertIsInstance(roi_ag_seg_scores.array, xp.ndarray)
self.assertEqual(
roi_ag_seg_scores.shape, (n_roi, 2, 21, 21))
self.assertIsInstance(roi_ag_locs, chainer.Variable)
self.assertIsInstance(roi_ag_locs.array, xp.ndarray)
self.assertEqual(roi_ag_locs.shape, (n_roi, 2, 4))
self.assertIsInstance(roi_cls_scores, chainer.Variable)
self.assertIsInstance(roi_cls_scores.array, xp.ndarray)
self.assertEqual(roi_cls_scores.shape, (n_roi, self.n_class))
self.assertIsInstance(rois, xp.ndarray)
self.assertEqual(rois.shape, (n_roi, 4))
self.assertIsInstance(roi_indices, xp.ndarray)
self.assertEqual(roi_indices.shape, (n_roi,))
@attr.slow
def test_call_cpu(self):
self.check_call()
@attr.gpu
@attr.slow
def test_call_gpu(self):
self.link.to_gpu()
self.check_call()
class TestFCISResNet101Loss(unittest.TestCase):
B = 1
n_fg_class = 20
n_bbox = 3
n_anchor = 9
n_train_post_nms = 12
n_test_post_nms = 8
def setUp(self):
proposal_creator_params = {
'n_train_post_nms': self.n_train_post_nms,
'n_test_post_nms': self.n_test_post_nms,
}
self.model = FCISTrainChain(
FCISResNet101(
self.n_fg_class, pretrained_model=None, iter2=False,
proposal_creator_params=proposal_creator_params))
self.masks = np.random.randint(
0, 2, size=(1, self.n_bbox, 600, 800)).astype(np.bool)
self.labels = np.random.randint(
0, self.n_fg_class, size=(1, self.n_bbox)).astype(np.int32)
self.imgs = _random_array(np, (1, 3, 600, 800))
self.scale = np.array(1.)
def check_call(self, model, imgs, masks, labels, scale):
bboxes = mask_to_bbox(masks[0])[None]
loss = model(imgs, masks, labels, bboxes, scale)
self.assertEqual(loss.shape, ())
def test_call_cpu(self):
self.check_call(
self.model, self.imgs, self.masks, self.labels, self.scale)
@attr.gpu
def test_call_gpu(self):
self.model.to_gpu()
self.check_call(
self.model, cuda.to_gpu(self.imgs),
self.masks, self.labels, self.scale)
@testing.parameterize(*testing.product({
'n_fg_class': [None, 10, 20, 80],
'anchor_scales': [(8, 16, 32), (4, 8, 16, 32)],
'pretrained_model': ['sbd', 'sbd_converted', 'coco', 'coco_converted'],
}))
class TestFCISResNet101Pretrained(unittest.TestCase):
@attr.slow
def test_pretrained(self):
kwargs = {
'n_fg_class': self.n_fg_class,
'anchor_scales': self.anchor_scales,
'pretrained_model': self.pretrained_model,
}
if self.pretrained_model.startswith('sbd'):
valid = self.n_fg_class in [None, 20]
valid = valid and self.anchor_scales == (8, 16, 32)
elif self.pretrained_model.startswith('coco'):
valid = self.n_fg_class in [None, 80]
valid = valid and self.anchor_scales == (4, 8, 16, 32)
if valid:
FCISResNet101(**kwargs)
else:
with self.assertRaises(ValueError):
FCISResNet101(**kwargs)
testing.run_module(__name__, __file__)
|
import logging
import voluptuous as vol
import yeelight
from homeassistant import config_entries, exceptions
from homeassistant.const import CONF_HOST, CONF_ID, CONF_NAME
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from . import (
CONF_DEVICE,
CONF_MODE_MUSIC,
CONF_MODEL,
CONF_NIGHTLIGHT_SWITCH,
CONF_NIGHTLIGHT_SWITCH_TYPE,
CONF_SAVE_ON_CHANGE,
CONF_TRANSITION,
NIGHTLIGHT_SWITCH_TYPE_LIGHT,
_async_unique_name,
)
from . import DOMAIN # pylint:disable=unused-import
_LOGGER = logging.getLogger(__name__)
class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for Yeelight."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_POLL
@staticmethod
@callback
def async_get_options_flow(config_entry):
"""Return the options flow."""
return OptionsFlowHandler(config_entry)
def __init__(self):
"""Initialize the config flow."""
self._discovered_devices = {}
async def async_step_user(self, user_input=None):
"""Handle the initial step."""
errors = {}
if user_input is not None:
if user_input.get(CONF_HOST):
try:
await self._async_try_connect(user_input[CONF_HOST])
return self.async_create_entry(
title=user_input[CONF_HOST],
data=user_input,
)
except CannotConnect:
errors["base"] = "cannot_connect"
except AlreadyConfigured:
return self.async_abort(reason="already_configured")
else:
return await self.async_step_pick_device()
user_input = user_input or {}
return self.async_show_form(
step_id="user",
data_schema=vol.Schema(
{vol.Optional(CONF_HOST, default=user_input.get(CONF_HOST, "")): str}
),
errors=errors,
)
async def async_step_pick_device(self, user_input=None):
"""Handle the step to pick discovered device."""
if user_input is not None:
unique_id = user_input[CONF_DEVICE]
capabilities = self._discovered_devices[unique_id]
await self.async_set_unique_id(unique_id)
self._abort_if_unique_id_configured()
return self.async_create_entry(
title=_async_unique_name(capabilities),
data={CONF_ID: unique_id},
)
configured_devices = {
entry.data[CONF_ID]
for entry in self._async_current_entries()
if entry.data[CONF_ID]
}
devices_name = {}
# Run 3 times as packets can get lost
for _ in range(3):
devices = await self.hass.async_add_executor_job(yeelight.discover_bulbs)
for device in devices:
capabilities = device["capabilities"]
unique_id = capabilities["id"]
if unique_id in configured_devices:
continue # ignore configured devices
model = capabilities["model"]
host = device["ip"]
name = f"{host} {model} {unique_id}"
self._discovered_devices[unique_id] = capabilities
devices_name[unique_id] = name
# Check if there is at least one device
if not devices_name:
return self.async_abort(reason="no_devices_found")
return self.async_show_form(
step_id="pick_device",
data_schema=vol.Schema({vol.Required(CONF_DEVICE): vol.In(devices_name)}),
)
async def async_step_import(self, user_input=None):
"""Handle import step."""
host = user_input[CONF_HOST]
try:
await self._async_try_connect(host)
except CannotConnect:
_LOGGER.error("Failed to import %s: cannot connect", host)
return self.async_abort(reason="cannot_connect")
except AlreadyConfigured:
return self.async_abort(reason="already_configured")
if CONF_NIGHTLIGHT_SWITCH_TYPE in user_input:
user_input[CONF_NIGHTLIGHT_SWITCH] = (
user_input.pop(CONF_NIGHTLIGHT_SWITCH_TYPE)
== NIGHTLIGHT_SWITCH_TYPE_LIGHT
)
return self.async_create_entry(title=user_input[CONF_NAME], data=user_input)
async def _async_try_connect(self, host):
"""Set up with options."""
for entry in self._async_current_entries():
if entry.data.get(CONF_HOST) == host:
raise AlreadyConfigured
bulb = yeelight.Bulb(host)
try:
capabilities = await self.hass.async_add_executor_job(bulb.get_capabilities)
if capabilities is None: # timeout
_LOGGER.debug("Failed to get capabilities from %s: timeout", host)
else:
_LOGGER.debug("Get capabilities: %s", capabilities)
await self.async_set_unique_id(capabilities["id"])
self._abort_if_unique_id_configured()
return
except OSError as err:
_LOGGER.debug("Failed to get capabilities from %s: %s", host, err)
# Ignore the error since get_capabilities uses UDP discovery packet
# which does not work in all network environments
# Fallback to get properties
try:
await self.hass.async_add_executor_job(bulb.get_properties)
except yeelight.BulbException as err:
_LOGGER.error("Failed to get properties from %s: %s", host, err)
raise CannotConnect from err
_LOGGER.debug("Get properties: %s", bulb.last_properties)
class OptionsFlowHandler(config_entries.OptionsFlow):
"""Handle a option flow for Yeelight."""
def __init__(self, config_entry):
"""Initialize the option flow."""
self._config_entry = config_entry
async def async_step_init(self, user_input=None):
"""Handle the initial step."""
if user_input is not None:
options = {**self._config_entry.options}
options.update(user_input)
return self.async_create_entry(title="", data=options)
options = self._config_entry.options
return self.async_show_form(
step_id="init",
data_schema=vol.Schema(
{
vol.Optional(CONF_MODEL, default=options[CONF_MODEL]): str,
vol.Required(
CONF_TRANSITION,
default=options[CONF_TRANSITION],
): cv.positive_int,
vol.Required(
CONF_MODE_MUSIC, default=options[CONF_MODE_MUSIC]
): bool,
vol.Required(
CONF_SAVE_ON_CHANGE,
default=options[CONF_SAVE_ON_CHANGE],
): bool,
vol.Required(
CONF_NIGHTLIGHT_SWITCH,
default=options[CONF_NIGHTLIGHT_SWITCH],
): bool,
}
),
)
class CannotConnect(exceptions.HomeAssistantError):
"""Error to indicate we cannot connect."""
class AlreadyConfigured(exceptions.HomeAssistantError):
"""Indicate the ip address is already configured."""
|
try:
from collections.abc import Mapping
except ImportError:
from collections import Mapping
from functools import partial
try:
from urllib.parse import parse_qsl, quote, unquote, urlparse
except ImportError:
from urllib import quote, unquote # noqa
from urlparse import urlparse, parse_qsl # noqa
try:
import ssl
ssl_available = True
except ImportError: # pragma: no cover
ssl_available = False
from .compat import NamedTuple
from ..log import get_logger
safequote = partial(quote, safe='')
logger = get_logger(__name__)
urlparts = NamedTuple('urlparts', [
('scheme', str),
('hostname', str),
('port', int),
('username', str),
('password', str),
('path', str),
('query', Mapping),
])
def parse_url(url):
# type: (str) -> Dict
"""Parse URL into mapping of components."""
scheme, host, port, user, password, path, query = _parse_url(url)
if query:
keys = [key for key in query.keys() if key.startswith('ssl_')]
for key in keys:
if key == 'ssl_cert_reqs':
query[key] = parse_ssl_cert_reqs(query[key])
if query[key] is None:
logger.warning('Defaulting to insecure SSL behaviour.')
if 'ssl' not in query:
query['ssl'] = {}
query['ssl'][key] = query[key]
del query[key]
return dict(transport=scheme, hostname=host,
port=port, userid=user,
password=password, virtual_host=path, **query)
def url_to_parts(url):
# type: (str) -> urlparts
"""Parse URL into :class:`urlparts` tuple of components."""
scheme = urlparse(url).scheme
schemeless = url[len(scheme) + 3:]
# parse with HTTP URL semantics
parts = urlparse('http://' + schemeless)
path = parts.path or ''
path = path[1:] if path and path[0] == '/' else path
return urlparts(
scheme,
unquote(parts.hostname or '') or None,
parts.port,
unquote(parts.username or '') or None,
unquote(parts.password or '') or None,
unquote(path or '') or None,
dict(parse_qsl(parts.query)),
)
_parse_url = url_to_parts # noqa
def as_url(scheme, host=None, port=None, user=None, password=None,
path=None, query=None, sanitize=False, mask='**'):
# type: (str, str, int, str, str, str, str, bool, str) -> str
"""Generate URL from component parts."""
parts = [f'{scheme}://']
if user or password:
if user:
parts.append(safequote(user))
if password:
if sanitize:
parts.extend([':', mask] if mask else [':'])
else:
parts.extend([':', safequote(password)])
parts.append('@')
parts.append(safequote(host) if host else '')
if port:
parts.extend([':', port])
parts.extend(['/', path])
return ''.join(str(part) for part in parts if part)
def sanitize_url(url, mask='**'):
# type: (str, str) -> str
"""Return copy of URL with password removed."""
return as_url(*_parse_url(url), sanitize=True, mask=mask)
def maybe_sanitize_url(url, mask='**'):
# type: (Any, str) -> Any
"""Sanitize url, or do nothing if url undefined."""
if isinstance(url, str) and '://' in url:
return sanitize_url(url, mask)
return url
def parse_ssl_cert_reqs(query_value):
# type: (str) -> Any
"""Given the query parameter for ssl_cert_reqs, return the SSL constant or None."""
if ssl_available:
query_value_to_constant = {
'CERT_REQUIRED': ssl.CERT_REQUIRED,
'CERT_OPTIONAL': ssl.CERT_OPTIONAL,
'CERT_NONE': ssl.CERT_NONE,
'required': ssl.CERT_REQUIRED,
'optional': ssl.CERT_OPTIONAL,
'none': ssl.CERT_NONE,
}
return query_value_to_constant[query_value]
else:
return None
|
from __future__ import print_function
import argparse
import sys
import json
from mlpatches import base
_stash = globals()["_stash"]
from mlpatches import patches
def patch_is_compatible(patch):
"""Return True if the patch is compatible."""
if _stash.PY3:
return patch.PY3
else:
return patch.PY2
def save_config(path):
"""save the current config to path."""
ts = {}
for k in patches.PATCHES:
v = patches.PATCHES[k].enabled
ts[k] = v
with open(path, "w") as f:
json.dump(ts, f)
def load_config(path):
"""load the config from path"""
with open(path, "rU") as f:
tl = json.load(f)
patches.PATCHES["ALL"].disable()
for k in sorted(tl): # sort is important to load groups first
v = tl[k]
p = patches.PATCHES[k]
if v:
p.enable()
else:
p.disable()
def main(ns):
if (ns.name is None):
if ns.action == "enable":
name = "STABLE"
elif ns.action == "disable":
name = "ALL"
elif (ns.action == "loadconf") or (ns.action == "saveconf"):
print(_stash.text_color("Name/Path needs to be specified for this action!", "red"))
sys.exit(2)
else:
name = "ALL"
else:
name = ns.name
if ns.action == "enable":
# enable a patch
if name not in patches.PATCHES:
print(_stash.text_color("Error: Patch '{n}' not found!".format(n=name), "red"))
sys.exit(1)
patch = patches.PATCHES[name]
if not patch_is_compatible(patch):
print(_stash.text_color("Error: Patch '{n}' not compatible with this python version!".format(n=name), "red"))
sys.exit(1)
patch.enable()
elif ns.action == "disable":
# disable a patch
if name not in patches.PATCHES:
print(_stash.text_color("Error: Patch '{n}' not found!".format(n=name), "red"))
sys.exit(1)
patch = patches.PATCHES[name]
if not patch_is_compatible(patch):
print(_stash.text_color("Error: Patch '{n}' not compatible with this python version!".format(n=name), "red"))
sys.exit(1)
patch.disable()
elif ns.action == "list":
# show monkeypatches and their state
print(_stash.text_bold("Available Monkeypatches:"))
mlength = max([len(e) for e in patches.PATCHES.keys()]) + 2
for pn in sorted(patches.PATCHES.keys()):
patch = patches.PATCHES[pn]
if not patch_is_compatible(patch):
continue
if patch.enabled:
t = "[enabled]"
c = "green"
else:
t = "[disabled]"
c = "red"
print("{n}{e}{s}".format(n=pn, e=" " * (mlength - len(pn)), s=_stash.text_color(t, c)))
elif ns.action == "saveconf":
save_config(name)
elif ns.action == "loadconf":
load_config(name)
if __name__ == "__main__":
# main code
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"action",
choices=["enable",
"disable",
"list",
"loadconf",
"saveconf"],
help="What to do",
)
parser.add_argument(
"name",
help="patch to perform action on",
nargs="?",
default=None,
)
ns = parser.parse_args()
main(ns)
|
import sys
import argparse
import os.path
import subprocess
sys.path.insert(0, os.path.join(os.path.dirname(__file__), os.pardir,
os.pardir))
from scripts import utils
def bump_version(version_leap="patch"):
"""Update qutebrowser release version.
Args:
version_leap: define the jump between versions
("major", "minor", "patch")
"""
subprocess.run([sys.executable, '-m', 'bumpversion', version_leap],
check=True)
def show_commit():
subprocess.run(['git', 'show'], check=True)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Update release version.")
parser.add_argument('bump', action="store",
choices=["major", "minor", "patch"],
help="Update release version")
parser.add_argument('--commands', action="store_true",
help="Only show commands to run post-release.")
args = parser.parse_args()
utils.change_cwd()
if not args.commands:
bump_version(args.bump)
show_commit()
import qutebrowser
version = qutebrowser.__version__
x_version = '.'.join([str(p) for p in qutebrowser.__version_info__[:-1]] +
['x'])
print("Run the following commands to create a new release:")
print("* git push origin; git push origin v{v}".format(v=version))
if args.bump == 'patch':
print("* git checkout master && git cherry-pick v{v} && "
"git push origin".format(v=version))
else:
print("* git branch v{x} v{v} && git push --set-upstream origin v{x}"
.format(v=version, x=x_version))
print("* Create new release via GitHub (required to upload release "
"artifacts)")
print("* Linux: git fetch && git checkout v{v} && "
"tox -e build-release -- --upload"
.format(v=version))
print("* Windows: git fetch; git checkout v{v}; "
"py -3.7 -m tox -e build-release -- --asciidoc "
"$env:userprofile\\bin\\asciidoc-9.0.2\\asciidoc.py --upload"
.format(v=version))
print("* macOS: git fetch && git checkout v{v} && "
"tox -e build-release -- --upload"
.format(v=version))
|
import os.path
import sys
# pylint: disable=import-error,no-member,useless-suppression
from PyInstaller.utils.win32 import versioninfo as vs
# pylint: enable=import-error,no-member,useless-suppression
sys.path.insert(0, os.path.join(os.path.dirname(__file__), os.pardir,
os.pardir))
import qutebrowser
from scripts import utils
def main():
utils.change_cwd()
out_filename = 'misc/file_version_info.txt'
filevers = qutebrowser.__version_info__ + (0,)
prodvers = qutebrowser.__version_info__ + (0,)
str_filevers = qutebrowser.__version__
str_prodvers = qutebrowser.__version__
comment_text = qutebrowser.__doc__
copyright_text = qutebrowser.__copyright__
trademark_text = ("qutebrowser is free software under the GNU General "
"Public License")
# https://www.science.co.il/language/Locale-codes.php#definitions
# https://msdn.microsoft.com/en-us/library/windows/desktop/dd317756.aspx
en_us = 1033 # 0x0409
utf_16 = 1200 # 0x04B0
ffi = vs.FixedFileInfo(filevers, prodvers)
kids = [
vs.StringFileInfo([
# 0x0409: MAKELANGID(LANG_ENGLISH, SUBLANG_ENGLISH_US)
# 0x04B0: codepage 1200 (UTF-16LE)
vs.StringTable('040904B0', [
vs.StringStruct('Comments', comment_text),
vs.StringStruct('CompanyName', "qutebrowser.org"),
vs.StringStruct('FileDescription', "qutebrowser"),
vs.StringStruct('FileVersion', str_filevers),
vs.StringStruct('InternalName', "qutebrowser"),
vs.StringStruct('LegalCopyright', copyright_text),
vs.StringStruct('LegalTrademarks', trademark_text),
vs.StringStruct('OriginalFilename', "qutebrowser.exe"),
vs.StringStruct('ProductName', "qutebrowser"),
vs.StringStruct('ProductVersion', str_prodvers)
]),
]),
vs.VarFileInfo([vs.VarStruct('Translation', [en_us, utf_16])]),
]
file_version_info = vs.VSVersionInfo(ffi, kids)
with open(out_filename, 'w', encoding='utf-8') as f:
f.write(str(file_version_info))
if __name__ == '__main__':
main()
|
from datetime import timedelta
import logging
from tapsaff import TapsAff
import voluptuous as vol
from homeassistant.components.binary_sensor import PLATFORM_SCHEMA, BinarySensorEntity
from homeassistant.const import CONF_NAME
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
CONF_LOCATION = "location"
DEFAULT_NAME = "Taps Aff"
SCAN_INTERVAL = timedelta(minutes=30)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_LOCATION): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Taps Aff binary sensor."""
name = config.get(CONF_NAME)
location = config.get(CONF_LOCATION)
taps_aff_data = TapsAffData(location)
add_entities([TapsAffSensor(taps_aff_data, name)], True)
class TapsAffSensor(BinarySensorEntity):
"""Implementation of a Taps Aff binary sensor."""
def __init__(self, taps_aff_data, name):
"""Initialize the Taps Aff sensor."""
self.data = taps_aff_data
self._name = name
@property
def name(self):
"""Return the name of the sensor."""
return f"{self._name}"
@property
def is_on(self):
"""Return true if taps aff."""
return self.data.is_taps_aff
def update(self):
"""Get the latest data."""
self.data.update()
class TapsAffData:
"""Class for handling the data retrieval for pins."""
def __init__(self, location):
"""Initialize the data object."""
self._is_taps_aff = None
self.taps_aff = TapsAff(location)
@property
def is_taps_aff(self):
"""Return true if taps aff."""
return self._is_taps_aff
def update(self):
"""Get the latest data from the Taps Aff API and updates the states."""
try:
self._is_taps_aff = self.taps_aff.is_taps_aff
except RuntimeError:
_LOGGER.error("Update failed. Check configured location")
|
import os
import random
import string
import tarfile
import backports.lzma as lzma
import base
import mock
from docker_registry.core import compat
from docker_registry.lib import layers
from docker_registry import storage
json = compat.json
StringIO = compat.StringIO
# from mock import patch
# from mockredis import mock_strict_redis_client
def comp(n, f, *args, **kwargs):
return (f(*args, **kwargs) for i in xrange(n))
def rndstr(length=5):
palette = string.ascii_uppercase + string.digits
return ''.join(comp(length, random.choice, palette))
def _get_tarfile(filenames):
tfobj = StringIO()
tar = tarfile.TarFile(fileobj=tfobj, mode='w')
data = rndstr(512)
for filename in filenames:
tarinfo = tarfile.TarInfo(filename)
tarinfo.size = len(data)
io = StringIO()
io.write(data)
io.seek(0)
tar.addfile(tarinfo, io)
tfobj.seek(0)
return tfobj
def _get_xzfile(filenames):
tar_data = _get_tarfile(filenames)
lzma_fobj = StringIO()
xz_file = lzma.open(lzma_fobj, 'w')
xz_file.write(tar_data.read())
xz_file.close()
lzma_fobj.seek(0)
return lzma_fobj
class TestHelpers(base.TestCase):
@mock.patch.object(layers.cache, 'redis_conn')
@mock.patch.object(layers.diff_queue, 'push')
@mock.patch.object(layers.logger, 'warning')
def test_enqueue_diff(self, logger, diff_queue, redis):
redis.return_value = False
self.assertEqual(logger.call_count, 0)
diff_queue.return_value = mock.MagicMock()
redis.return_value = True
image_id = 'abcd'
layers.enqueue_diff(image_id)
diff_queue.assert_called_once_with(image_id)
self.assertEqual(logger.call_count, 0)
diff_queue.side_effect = layers.cache.redis.exceptions.ConnectionError
layers.enqueue_diff(image_id)
self.assertEqual(logger.call_count, 1)
class TestArchive(base.TestCase):
def setUp(self):
self.archive = layers.Archive(_get_tarfile(list(comp(5, rndstr))))
def test_properties(self):
self.assertEqual(self.archive.seekable(), True)
self.assertEqual(self.archive.readable(), True)
self.assertEqual(self.archive._check_can_seek(), True)
class TestTarFilesInfo(base.TestCase):
def setUp(self):
self.tar_files_info = layers.TarFilesInfo()
def test__init__(self):
self.assertEqual(type(self.tar_files_info.infos), list)
@mock.patch('docker_registry.lib.layers.serialize_tar_info')
def test_append(self, serialize_tar_info):
tar_info = ('test', True)
serialize_tar_info.return_value = tar_info
self.assertEqual(len(self.tar_files_info.infos), 0)
self.assertEqual(self.tar_files_info.append('test'), None)
self.assertNotEqual(len(self.tar_files_info.infos), 0)
self.assertTrue(tar_info in self.tar_files_info.infos)
def test_json(self):
self.assertEqual(type(self.tar_files_info.json()), str)
self.assertEqual(self.tar_files_info.json(), '[]')
class TestLayers(base.TestCase):
def setUp(self):
self.store = storage.load(kind='file')
self.filenames = list(comp(5, rndstr))
def test_tar_archive(self):
tfobj = _get_tarfile(self.filenames)
archive = layers.Archive(tfobj)
tar = tarfile.open(fileobj=archive)
members = tar.getmembers()
for tarinfo in members:
assert tarinfo.name in self.filenames
def test_xz_archive(self):
tfobj = _get_xzfile(self.filenames)
archive = layers.Archive(tfobj)
tar = tarfile.open(fileobj=archive)
members = tar.getmembers()
for tarinfo in members:
assert tarinfo.name in self.filenames
def test_info_serialization(self):
tfobj = _get_tarfile(self.filenames)
archive = layers.Archive(tfobj)
tar = tarfile.open(fileobj=archive)
members = tar.getmembers()
for tarinfo in members:
sinfo = layers.serialize_tar_info(tarinfo)
assert sinfo[0] in self.filenames
assert sinfo[1:] == ('f', False, 512, 0, 420, 0, 0)
tar_info = mock.MagicMock()
expectations = [(".", "/"), ("./", "/"), ("./ab", "/ab")]
for name_in, name_out in expectations:
tar_info.name = name_in
out = layers.serialize_tar_info(tar_info)
self.assertEqual(out[0], name_out)
self.assertEqual(out[2], False)
tar_info.name = "./.wh..wh."
self.assertEqual(layers.serialize_tar_info(tar_info), None)
expectations = [("./.wh.", "/"), ("/.wh.", "/")]
for name_in, name_out in expectations:
tar_info.name = name_in
out = layers.serialize_tar_info(tar_info)
self.assertEqual(out[0], name_out)
self.assertEqual(out[2], True)
def test_tar_serialization(self):
tfobj = _get_tarfile(self.filenames)
archive = layers.Archive(tfobj)
tar = tarfile.open(fileobj=archive)
infos = layers.read_tarfile(tar)
for tarinfo in infos:
assert tarinfo[0] in self.filenames
assert tarinfo[1:] == ('f', False, 512, 0, 420, 0, 0)
def test_layer_cache(self):
layer_id = rndstr(16)
layers.set_image_files_cache(layer_id, "{}")
fetched_json = layers.get_image_files_cache(layer_id)
assert fetched_json == "{}"
def test_tar_from_fobj(self):
tfobj = _get_tarfile(self.filenames)
files = layers.get_image_files_from_fobj(tfobj)
for file in files:
assert file[0] in self.filenames
assert file[1:] == ('f', False, 512, 0, 420, 0, 0)
def test_get_image_files_json_cached(self):
layer_id = rndstr(16)
layers.set_image_files_cache(layer_id, "{}")
files_json = layers.get_image_files_json(layer_id)
assert files_json == "{}"
def test_get_image_files_json(self):
layer_id = rndstr(16)
tfobj = _get_tarfile(self.filenames)
layer_path = self.store.image_layer_path(layer_id)
layer_path = os.path.join(self.store._root_path, layer_path)
os.makedirs(os.path.dirname(layer_path))
with open(layer_path, 'w') as fobj:
fobj.write(tfobj.read())
files_json = layers.get_image_files_json(layer_id)
file_infos = json.loads(files_json)
for info in file_infos:
assert info[0] in self.filenames
assert info[1:] == [u"f", False, 512, 0, 420, 0, 0]
def test_get_file_info_map(self):
files = (
("test", "f", False, 512, 0, 420, 0, 0),
)
map = layers.get_file_info_map(files)
assert "test" in map
assert map['test'] == ("f", False, 512, 0, 420, 0, 0)
def test_image_diff_cache(self):
layer_id = rndstr(16)
layers.set_image_diff_cache(layer_id, layer_id)
diff_json = layers.get_image_diff_cache(layer_id)
assert layer_id == diff_json
def test_image_diff_json(self):
layer_1 = (
("deleted", "f", False, 512, 0, 420, 0, 0),
("changed", "f", False, 512, 0, 420, 0, 0),
)
layer_2 = (
("deleted", "f", True, 512, 0, 420, 0, 0),
("changed", "f", False, 512, 0, 420, 0, 0),
("created", "f", False, 512, 0, 420, 0, 0),
)
layer_1_id = rndstr(16)
layer_2_id = rndstr(16)
ancestry = json.dumps([layer_2_id, layer_1_id])
ancestry_path = self.store.image_ancestry_path(layer_2_id)
self.store.put_content(ancestry_path, ancestry)
layer_1_files_path = self.store.image_files_path(layer_1_id)
self.store.put_content(layer_1_files_path, json.dumps(layer_1))
layer_2_files_path = self.store.image_files_path(layer_2_id)
self.store.put_content(layer_2_files_path, json.dumps(layer_2))
diff_json = layers.get_image_diff_json(layer_2_id)
diff = json.loads(diff_json)
for type in ("deleted", "changed", "created"):
assert type in diff
assert type in diff[type]
@mock.patch('docker_registry.lib.layers.get_image_diff_cache')
def test_get_image_diff_json(self, get_image_diff_cache):
diff_json = 'test'
get_image_diff_cache.return_value = diff_json
self.assertEqual(layers.get_image_diff_json(1), diff_json)
|
import argparse
import json
import logging
from collections import defaultdict
from typing import Dict
from typing import List
from typing import Set
from mypy_extensions import TypedDict
from paasta_tools.marathon_tools import get_marathon_clients
from paasta_tools.marathon_tools import get_marathon_servers
from paasta_tools.marathon_tools import MarathonClient
from paasta_tools.marathon_tools import MarathonClients
from paasta_tools.marathon_tools import MarathonServiceConfig
from paasta_tools.paasta_service_config_loader import PaastaServiceConfigLoader
from paasta_tools.utils import DEFAULT_SOA_DIR
from paasta_tools.utils import get_services_for_cluster
from paasta_tools.utils import load_system_paasta_config
from paasta_tools.utils import SystemPaastaConfig
log = logging.getLogger(__name__)
def parse_args(argv) -> argparse.Namespace:
parser = argparse.ArgumentParser(
description="Generates links from marathon instances to their respective web dashboard."
)
parser.add_argument(
"-c",
"--cluster",
dest="cluster",
metavar="CLUSTER",
default=None,
help="define a specific cluster to read from",
)
parser.add_argument(
"-d",
"--soa-dir",
dest="soa_dir",
metavar="SOA_DIR",
default=DEFAULT_SOA_DIR,
help="define a different soa config directory",
)
args = parser.parse_args(argv)
return args
Marathon_Dashboard_Item = TypedDict(
"Marathon_Dashboard_Item", {"service": str, "instance": str, "shard_url": str}
)
Marathon_Dashboard = Dict[str, List[Marathon_Dashboard_Item]]
def create_marathon_dashboard(
cluster: str,
soa_dir: str = DEFAULT_SOA_DIR,
marathon_clients: MarathonClients = None,
system_paasta_config: SystemPaastaConfig = None,
) -> Marathon_Dashboard:
try:
instances: List = get_services_for_cluster(
cluster=cluster, instance_type="marathon", soa_dir=soa_dir
)
except FileNotFoundError:
instances = []
dashboard: Marathon_Dashboard = {cluster: []}
if system_paasta_config is None:
system_paasta_config = load_system_paasta_config()
marathon_servers = get_marathon_servers(system_paasta_config=system_paasta_config)
if marathon_clients is None:
marathon_clients = get_marathon_clients(
marathon_servers=marathon_servers, cached=False
)
dashboard_links = system_paasta_config.get_dashboard_links()
marathon_links = dashboard_links.get(cluster, {}).get("Marathon RO")
# e.g. 'http://10.64.97.75:5052': 'http://marathon-norcal-prod.yelpcorp.com'
shard_url_to_marathon_link_dict: Dict[str, str] = {}
if isinstance(marathon_links, list):
# Sanity check and log error if necessary
if len(marathon_links) != len(marathon_servers.current):
log.error(
"len(marathon_links) != len(marathon_servers.current). This may be a cause of concern"
)
for shard_number, shard in enumerate(marathon_servers.current):
shard_url_to_marathon_link_dict[shard.url[0]] = marathon_links[shard_number]
elif isinstance(marathon_links, str):
# In this case, the shard url will be the same for every service instance
static_shard_url = marathon_links.split(" ")[0]
return {
cluster: [
{"service": si[0], "instance": si[1], "shard_url": static_shard_url}
for si in instances
]
}
# Setup with service as key since will instantiate 1 PSCL per service
service_instances_dict: Dict[str, Set[str]] = defaultdict(set)
for si in instances:
service, instance = si[0], si[1]
service_instances_dict[service].add(instance)
for service, instance_set in service_instances_dict.items():
pscl = PaastaServiceConfigLoader(
service=service, soa_dir=soa_dir, load_deployments=False
)
for marathon_service_config in pscl.instance_configs(
cluster, MarathonServiceConfig
):
if marathon_service_config.get_instance() in instance_set:
client: MarathonClient = marathon_clients.get_current_client_for_service(
job_config=marathon_service_config
)
ip_url: str = client.servers[0]
# Convert to a marathon link if possible else default to the originalIP address
shard_url: str = shard_url_to_marathon_link_dict.get(ip_url, ip_url)
service_info: Marathon_Dashboard_Item = {
"service": service,
"instance": marathon_service_config.get_instance(),
"shard_url": shard_url,
}
dashboard[cluster].append(service_info)
return dashboard
def main(argv=None) -> None:
args = parse_args(argv)
dashboard: Marathon_Dashboard = create_marathon_dashboard(
cluster=args.cluster, soa_dir=args.soa_dir
)
print(json.dumps(dashboard))
if __name__ == "__main__":
main()
|
from __future__ import print_function
import errno
import os
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
import signal
import subprocess
import time
import traceback
import unittest
import rospkg
from . import junitxml
from . import pmon
from .core import create_xml_runner # noqa: F401
from .core import printerrlog
from .core import printlog
from .core import printlog_bold
from .core import rostest_name_from_path # noqa: F401
from .core import xml_results_file
BARE_TIME_LIMIT = 60.
TIMEOUT_SIGINT = 15.0 # seconds
TIMEOUT_SIGTERM = 2.0 # seconds
class TestTimeoutException(Exception):
pass
class BareTestCase(unittest.TestCase):
def __init__(self, exe, args, retry=0, time_limit=None, test_name=None, text_mode=False, package_name=None):
"""
@param exe: path to executable to run
@type exe: str
@param args: arguments to exe
@type args: [str]
@type retry: int
@param time_limit: (optional) time limit for test. Defaults to BARE_TIME_LIMIT.
@type time_limit: float
@param test_name: (optional) override automatically generated test name
@type test_name: str
@param package_name: (optional) override automatically inferred package name
@type package_name: str
"""
super(BareTestCase, self).__init__()
self.text_mode = text_mode
if package_name:
self.package = package_name
else:
self.package = rospkg.get_package_name(exe)
self.exe = os.path.abspath(exe)
if test_name is None:
self.test_name = os.path.basename(exe)
else:
self.test_name = test_name
# invoke pyunit tests with python executable
if self.exe.endswith('.py'):
self.args = ['python', self.exe] + args
else:
self.args = [self.exe] + args
if text_mode:
self.args = self.args + ['--text']
self.retry = retry
self.time_limit = time_limit or BARE_TIME_LIMIT
self.pmon = None
self.results = junitxml.Result(self.test_name)
def setUp(self):
self.pmon = pmon.start_process_monitor()
def tearDown(self):
if self.pmon is not None:
pmon.shutdown_process_monitor(self.pmon)
self.pmon = None
def runTest(self):
self.failIf(self.package is None, 'unable to determine package of executable')
done = False
while not done:
test_name = self.test_name
printlog('Running test [%s]', test_name)
# setup the test
# - we pass in the output test_file name so we can scrape it
test_file = xml_results_file(self.package, test_name, False)
if os.path.exists(test_file):
printlog('removing previous test results file [%s]', test_file)
os.remove(test_file)
self.args.append('--gtest_output=xml:%s' % test_file)
# run the test, blocks until completion
printlog('running test %s' % test_name)
timeout_failure = False
run_id = None
# TODO: really need different, non-node version of LocalProcess instead of these extra args
process = LocalProcess(run_id, self.package, self.test_name, self.args, os.environ, False, cwd='cwd', is_node=False)
pm = self.pmon
pm.register(process)
success = process.start()
self.assert_(success, 'test failed to start')
# poll until test terminates or alloted time exceed
timeout_t = time.time() + self.time_limit
try:
while process.is_alive():
# test fails on timeout
if time.time() > timeout_t:
raise TestTimeoutException('test max time allotted')
time.sleep(0.1)
except TestTimeoutException:
if self.retry:
timeout_failure = True
else:
raise
if not timeout_failure:
printlog('test [%s] finished' % test_name)
else:
printerrlog('test [%s] timed out' % test_name)
if self.text_mode:
results = self.results
elif not self.text_mode:
# load in test_file
if not timeout_failure:
self.assert_(os.path.isfile(test_file), 'test [%s] did not generate test results' % test_name)
printlog('test [%s] results are in [%s]', test_name, test_file)
results = junitxml.read(test_file, test_name)
test_fail = results.num_errors or results.num_failures
else:
test_fail = True
if self.retry > 0 and test_fail:
self.retry -= 1
printlog('test [%s] failed, retrying. Retries left: %s' % (test_name, self.retry))
else:
done = True
self.results = results
printlog('test [%s] results summary: %s errors, %s failures, %s tests',
test_name, results.num_errors, results.num_failures, results.num_tests)
printlog('[ROSTEST] test [%s] done', test_name)
# TODO: this is a straight copy from roslaunch. Need to reduce, refactor
class LocalProcess(pmon.Process):
"""
Process launched on local machine
"""
def __init__(self, run_id, package, name, args, env, log_output, respawn=False, required=False, cwd=None, is_node=True):
"""
@param run_id: unique run ID for this roslaunch. Used to
generate log directory location. run_id may be None if this
feature is not being used.
@type run_id: str
@param package: name of package process is part of
@type package: str
@param name: name of process
@type name: str
@param args: list of arguments to process
@type args: [str]
@param env: environment dictionary for process
@type env: {str : str}
@param log_output: if True, log output streams of process
@type log_output: bool
@param respawn: respawn process if it dies (default is False)
@type respawn: bool
@param cwd: working directory of process, or None
@type cwd: str
@param is_node: (optional) if True, process is ROS node and accepts ROS node command-line arguments. Default: True
@type is_node: False
"""
super(LocalProcess, self).__init__(package, name, args, env, respawn, required)
self.run_id = run_id
self.popen = None
self.log_output = log_output
self.started = False
self.stopped = False
self.cwd = cwd
self.log_dir = None
self.pid = -1
self.is_node = is_node
# NOTE: in the future, info() is going to have to be sufficient for relaunching a process
def get_info(self):
"""
Get all data about this process in dictionary form
"""
info = super(LocalProcess, self).get_info()
info['pid'] = self.pid
if self.run_id:
info['run_id'] = self.run_id
info['log_output'] = self.log_output
if self.cwd is not None:
info['cwd'] = self.cwd
return info
def _configure_logging(self):
"""
Configure logging of node's log file and stdout/stderr
@return: stdout log file name, stderr log file
name. Values are None if stdout/stderr are not logged.
@rtype: str, str
"""
log_dir = rospkg.get_log_dir(env=os.environ)
if self.run_id:
log_dir = os.path.join(log_dir, self.run_id)
if not os.path.exists(log_dir):
try:
os.makedirs(log_dir)
except OSError as e:
if e.errno == errno.EACCES:
raise RLException('unable to create directory for log file [%s].\nPlease check permissions.' % log_dir)
else:
raise RLException('unable to create directory for log file [%s]: %s' % (log_dir, e.strerror))
# #973: save log dir for error messages
self.log_dir = log_dir
# send stdout/stderr to file. in the case of respawning, we have to
# open in append mode
# note: logfileerr: disabling in favor of stderr appearing in the console.
# will likely reinstate once roserr/rosout is more properly used.
logfileout = logfileerr = None
if self.log_output:
outf, errf = [os.path.join(log_dir, '%s-%s.log' % (self.name, n)) for n in ['stdout', 'stderr']]
if self.respawn:
mode = 'a'
else:
mode = 'w'
logfileout = open(outf, mode)
if is_child_mode():
logfileerr = open(errf, mode)
# #986: pass in logfile name to node
if self.is_node:
# #1595: on respawn, these keep appending
self.args = _cleanup_remappings(self.args, '__log:=')
self.args.append('__log:=%s' % os.path.join(log_dir, '%s.log' % self.name))
return logfileout, logfileerr
def start(self):
"""
Start the process.
@raise pmon.FatalProcessLaunch: if process cannot be started and it
is not likely to ever succeed
"""
super(LocalProcess, self).start()
try:
self.lock.acquire()
self.started = self.stopped = False
full_env = self.env
# _configure_logging() can mutate self.args
try:
logfileout, logfileerr = self._configure_logging()
except Exception as e:
printerrlog('[%s] ERROR: unable to configure logging [%s]' % (self.name, str(e)))
# it's not safe to inherit from this process as
# rostest changes stdout to a StringIO, which is not a
# proper file.
logfileout, logfileerr = subprocess.PIPE, subprocess.PIPE
if self.cwd == 'node':
cwd = os.path.dirname(self.args[0])
elif self.cwd == 'cwd':
cwd = os.getcwd()
elif self.cwd == 'ros-root':
from roslib.rosenv import get_ros_root
cwd = get_ros_root()
else:
cwd = rospkg.get_ros_home()
if not os.path.exists(cwd):
try:
os.makedirs(cwd)
except OSError:
# exist_ok=True
pass
try:
self.popen = subprocess.Popen(self.args, cwd=cwd, stdout=logfileout, stderr=logfileerr, env=full_env, close_fds=True, preexec_fn=os.setsid)
except OSError as e:
self.started = True # must set so is_alive state is correct
if e.errno == errno.ENOEXEC: # Exec format error
raise pmon.FatalProcessLaunch("Unable to launch [%s]. \nIf it is a script, you may be missing a '#!' declaration at the top." % self.name)
elif e.errno == errno.ENOENT: # no such file or directory
raise pmon.FatalProcessLaunch("""Roslaunch got a '%s' error while attempting to run:
%s
Please make sure that all the executables in this command exist and have
executable permission. This is often caused by a bad launch-prefix.""" % (msg, ' '.join(self.args)))
else:
raise pmon.FatalProcessLaunch('unable to launch [%s]: %s' % (' '.join(self.args), msg))
self.started = True
# Check that the process is either still running (poll returns
# None) or that it completed successfully since when we
# launched it above (poll returns the return code, 0).
poll_result = self.popen.poll()
if poll_result is None or poll_result == 0:
self.pid = self.popen.pid
printlog_bold('process[%s]: started with pid [%s]' % (self.name, self.pid))
return True
else:
printerrlog('failed to start local process: %s' % (' '.join(self.args)))
return False
finally:
self.lock.release()
def is_alive(self):
"""
@return: True if process is still running
@rtype: bool
"""
if not self.started: # not started yet
return True
if self.stopped or self.popen is None:
return False
self.exit_code = self.popen.poll()
if self.exit_code is not None:
return False
return True
def get_exit_description(self):
"""
@return: human-readable description of exit state
@rtype: str
"""
# #973: include location of output location in message
if self.exit_code is not None:
if self.exit_code:
if self.log_dir:
return 'process has died [pid %s, exit code %s].\nlog files: %s*.log' % (self.pid, self.exit_code, os.path.join(self.log_dir, self.name))
else:
return 'process has died [pid %s, exit code %s]' % (self.pid, self.exit_code)
else:
if self.log_dir:
return 'process has finished cleanly.\nlog file: %s*.log' % (os.path.join(self.log_dir, self.name))
else:
return 'process has finished cleanly'
else:
return 'process has died'
def _stop_unix(self, errors):
"""
UNIX implementation of process killing
@param errors: error messages. stop() will record messages into this list.
@type errors: [str]
"""
self.exit_code = self.popen.poll()
if self.exit_code is not None:
# print "process[%s].stop(): process has already returned %s"%(self.name, self.exit_code)
self.popen = None
self.stopped = True
return
pid = self.popen.pid
pgid = os.getpgid(pid)
try:
# Start with SIGINT and escalate from there.
os.killpg(pgid, signal.SIGINT)
timeout_t = time.time() + TIMEOUT_SIGINT
retcode = self.popen.poll()
while time.time() < timeout_t and retcode is None:
time.sleep(0.1)
retcode = self.popen.poll()
# Escalate non-responsive process
if retcode is None:
printerrlog('[%s] escalating to SIGTERM' % self.name)
timeout_t = time.time() + TIMEOUT_SIGTERM
os.killpg(pgid, signal.SIGTERM)
retcode = self.popen.poll()
while time.time() < timeout_t and retcode is None:
time.sleep(0.2)
retcode = self.popen.poll()
if retcode is None:
printerrlog('[%s] escalating to SIGKILL' % self.name)
errors.append('process[%s, pid %s]: required SIGKILL. May still be running.' % (self.name, pid))
try:
os.killpg(pgid, signal.SIGKILL)
# #2096: don't block on SIGKILL, because this results in more orphaned processes overall
except OSError as e:
if e.args[0] == 3:
printerrlog('no [%s] process with pid [%s]' % (self.name, pid))
else:
printerrlog('errors shutting down [%s]: %s' % (self.name, e))
finally:
self.popen = None
def stop(self, errors=[]):
"""
Stop the process. Record any significant error messages in the errors parameter
@param errors: error messages. stop() will record messages into this list.
@type errors: [str]
"""
super(LocalProcess, self).stop(errors)
self.lock.acquire()
try:
try:
if self.popen is None:
return
# NOTE: currently POSIX-only. Need to add in Windows code once I have a test environment:
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/347462
self._stop_unix(errors)
except Exception:
printerrlog('[%s] EXCEPTION %s' % (self.name, traceback.format_exc()))
finally:
self.stopped = True
self.lock.release()
def print_runner_summary(runner_results, junit_results, runner_name='ROSUNIT'):
"""
Print summary of runner results and actual test results to
stdout. For rosunit and rostest, the test is wrapped in an
external runner. The results from this runner are important if the
runner itself has a failure.
@param runner_result: unittest runner result object
@type runner_result: _XMLTestResult
@param junit_results: Parsed JUnit test results
@type junit_results: rosunit.junitxml.Result
"""
# we have two separate result objects, which can be a bit
# confusing. 'result' counts successful _running_ of tests
# (i.e. doesn't check for actual test success). The 'r' result
# object contains results of the actual tests.
buff = StringIO()
buff.write('[%s]' % (runner_name) + '-' * 71 + '\n\n')
for tc_result in junit_results.test_case_results:
buff.write(tc_result.description)
for tc_result in runner_results.failures:
buff.write('[%s][failed]\n' % tc_result[0]._testMethodName)
buff.write('\nSUMMARY\n')
if runner_results.wasSuccessful() and (junit_results.num_errors + junit_results.num_failures) == 0:
buff.write('\033[32m * RESULT: SUCCESS\033[0m\n')
else:
buff.write('\033[1;31m * RESULT: FAIL\033[0m\n')
# TODO: still some issues with the numbers adding up if tests fail to launch
# number of errors from the inner tests, plus add in count for tests
# that didn't run properly ('result' object).
buff.write(' * TESTS: %s\n' % junit_results.num_tests)
num_errors = junit_results.num_errors+len(runner_results.errors)
if num_errors:
buff.write('\033[1;31m * ERRORS: %s\033[0m\n' % num_errors)
else:
buff.write(' * ERRORS: 0\n')
num_failures = junit_results.num_failures+len(runner_results.failures)
if num_failures:
buff.write('\033[1;31m * FAILURES: %s\033[0m\n' % num_failures)
else:
buff.write(' * FAILURES: 0\n')
if runner_results.failures:
buff.write('\nERROR: The following tests failed to run:\n')
for tc_result in runner_results.failures:
buff.write(' * ' + tc_result[0]._testMethodName + '\n')
print(buff.getvalue())
def _format_errors(errors):
formatted = []
for e in errors:
if '_testMethodName' in e[0].__dict__:
formatted.append(e[0]._testMethodName)
elif 'description' in e[0].__dict__:
formatted.append('%s: %s\n' % (str(e[0].description), str(e[1])))
else:
formatted.append(str(e[0].__dict__))
return formatted
def print_unittest_summary(result):
"""
Print summary of python unittest result to stdout
@param result: test results
"""
buff = StringIO()
buff.write('-------------------------------------------------------------\nSUMMARY:\n')
if result.wasSuccessful():
buff.write('\033[32m * RESULT: SUCCESS\033[0m\n')
else:
buff.write(' * RESULT: FAIL\n')
buff.write(' * TESTS: %s\n' % result.testsRun)
buff.write(' * ERRORS: %s [%s]\n' % (len(result.errors), ', '.join(_format_errors(result.errors))))
buff.write(' * FAILURES: %s [%s]\n' % (len(result.failures), ', '.join(_format_errors(result.failures))))
print(buff.getvalue())
|
import datetime
import sys
import unittest
import mock
from six import StringIO
# These imports are mocked so that we don't need to add them to the
# test dependencies. The script under test for this test module is
# expected to execute only on a client VM which has built tensorflow
# from source.
sys.modules['grpc'] = mock.Mock()
sys.modules['grpc.beta'] = mock.Mock()
sys.modules['grpc.framework'] = mock.Mock()
sys.modules['grpc.framework.interfaces'] = mock.Mock()
sys.modules['grpc.framework.interfaces.face'] = mock.Mock()
sys.modules['grpc.framework.interfaces.face.face'] = mock.Mock()
sys.modules['tensorflow'] = mock.Mock()
sys.modules['tensorflow_serving'] = mock.Mock()
sys.modules['tensorflow_serving.apis'] = mock.Mock()
from perfkitbenchmarker.scripts import tensorflow_serving_client_workload # pylint: disable=g-import-not-at-top,g-bad-import-order
class TestTensorflowServingClientWorkload(unittest.TestCase):
def setUp(self):
flag_values = {
'server': '123:456',
'image_directory': '/fake',
'num_threads': 16,
'runtime': 20,
}
p = mock.patch(tensorflow_serving_client_workload.__name__ + '.FLAGS')
flags_mock = p.start()
flags_mock.configure_mock(**flag_values)
self.addCleanup(p.stop)
os_patch = mock.patch(tensorflow_serving_client_workload.__name__ + '.os')
os_patch.start()
self.addCleanup(os_patch.stop)
self.client_workload = (
tensorflow_serving_client_workload.TfServingClientWorkload())
def testPrintOutput(self):
self.client_workload.num_completed_requests = 10
self.client_workload.num_failed_requests = 2
self.client_workload.latencies = [1.1, 2.2, 3.3]
# Set start_time to an arbitarty datetime, and set end_time to 20 seconds
# after start_time.
self.client_workload.start_time = datetime.datetime(2000, 1, 1, 1, 1, 1, 1)
self.client_workload.end_time = datetime.datetime(2000, 1, 1, 1, 1, 21, 1)
expected_output = """
Completed requests: 10
Failed requests: 2
Runtime: 20.0
Number of threads: 16
Throughput: 0.5
Latency:
1.1
2.2
3.3""".strip()
out = StringIO()
self.client_workload.print_results(out=out)
actual_output = out.getvalue().strip()
self.assertEqual(expected_output, actual_output)
if __name__ == '__main__':
unittest.main()
|
import pytest
import homeassistant.components.automation as automation
from homeassistant.components.fan import DOMAIN
from homeassistant.const import STATE_OFF, STATE_ON
from homeassistant.helpers import device_registry
from homeassistant.setup import async_setup_component
from tests.common import (
MockConfigEntry,
assert_lists_same,
async_get_device_automations,
async_mock_service,
mock_device_registry,
mock_registry,
)
@pytest.fixture
def device_reg(hass):
"""Return an empty, loaded, registry."""
return mock_device_registry(hass)
@pytest.fixture
def entity_reg(hass):
"""Return an empty, loaded, registry."""
return mock_registry(hass)
@pytest.fixture
def calls(hass):
"""Track calls to a mock service."""
return async_mock_service(hass, "test", "automation")
async def test_get_conditions(hass, device_reg, entity_reg):
"""Test we get the expected conditions from a fan."""
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create(DOMAIN, "test", "5678", device_id=device_entry.id)
expected_conditions = [
{
"condition": "device",
"domain": DOMAIN,
"type": "is_off",
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_5678",
},
{
"condition": "device",
"domain": DOMAIN,
"type": "is_on",
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_5678",
},
]
conditions = await async_get_device_automations(hass, "condition", device_entry.id)
assert_lists_same(conditions, expected_conditions)
async def test_if_state(hass, calls):
"""Test for turn_on and turn_off conditions."""
hass.states.async_set("fan.entity", STATE_ON)
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {"platform": "event", "event_type": "test_event1"},
"condition": [
{
"condition": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": "fan.entity",
"type": "is_on",
}
],
"action": {
"service": "test.automation",
"data_template": {
"some": "is_on - {{ trigger.platform }} - {{ trigger.event.event_type }}"
},
},
},
{
"trigger": {"platform": "event", "event_type": "test_event2"},
"condition": [
{
"condition": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": "fan.entity",
"type": "is_off",
}
],
"action": {
"service": "test.automation",
"data_template": {
"some": "is_off - {{ trigger.platform }} - {{ trigger.event.event_type }}"
},
},
},
]
},
)
hass.bus.async_fire("test_event1")
hass.bus.async_fire("test_event2")
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data["some"] == "is_on - event - test_event1"
hass.states.async_set("fan.entity", STATE_OFF)
hass.bus.async_fire("test_event1")
hass.bus.async_fire("test_event2")
await hass.async_block_till_done()
assert len(calls) == 2
assert calls[1].data["some"] == "is_off - event - test_event2"
|
from homeassistant.components.climate import ClimateEntity
from homeassistant.components.climate.const import (
CURRENT_HVAC_HEAT,
CURRENT_HVAC_IDLE,
CURRENT_HVAC_OFF,
HVAC_MODE_AUTO,
HVAC_MODE_HEAT,
HVAC_MODE_OFF,
PRESET_BOOST,
PRESET_NONE,
SUPPORT_PRESET_MODE,
SUPPORT_TARGET_TEMPERATURE,
)
from homeassistant.const import ATTR_TEMPERATURE, TEMP_CELSIUS
from . import DATA_HIVE, DOMAIN, HiveEntity, refresh_system
HIVE_TO_HASS_STATE = {
"SCHEDULE": HVAC_MODE_AUTO,
"MANUAL": HVAC_MODE_HEAT,
"OFF": HVAC_MODE_OFF,
}
HASS_TO_HIVE_STATE = {
HVAC_MODE_AUTO: "SCHEDULE",
HVAC_MODE_HEAT: "MANUAL",
HVAC_MODE_OFF: "OFF",
}
HIVE_TO_HASS_HVAC_ACTION = {
"UNKNOWN": CURRENT_HVAC_OFF,
False: CURRENT_HVAC_IDLE,
True: CURRENT_HVAC_HEAT,
}
SUPPORT_FLAGS = SUPPORT_TARGET_TEMPERATURE | SUPPORT_PRESET_MODE
SUPPORT_HVAC = [HVAC_MODE_AUTO, HVAC_MODE_HEAT, HVAC_MODE_OFF]
SUPPORT_PRESET = [PRESET_NONE, PRESET_BOOST]
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up Hive climate devices."""
if discovery_info is None:
return
session = hass.data.get(DATA_HIVE)
devs = []
for dev in discovery_info:
devs.append(HiveClimateEntity(session, dev))
add_entities(devs)
class HiveClimateEntity(HiveEntity, ClimateEntity):
"""Hive Climate Device."""
def __init__(self, hive_session, hive_device):
"""Initialize the Climate device."""
super().__init__(hive_session, hive_device)
self.thermostat_node_id = hive_device["Thermostat_NodeID"]
@property
def unique_id(self):
"""Return unique ID of entity."""
return self._unique_id
@property
def device_info(self):
"""Return device information."""
return {"identifiers": {(DOMAIN, self.unique_id)}, "name": self.name}
@property
def supported_features(self):
"""Return the list of supported features."""
return SUPPORT_FLAGS
@property
def name(self):
"""Return the name of the Climate device."""
friendly_name = "Heating"
if self.node_name is not None:
if self.device_type == "TRV":
friendly_name = self.node_name
else:
friendly_name = f"{self.node_name} {friendly_name}"
return friendly_name
@property
def device_state_attributes(self):
"""Show Device Attributes."""
return self.attributes
@property
def hvac_modes(self):
"""Return the list of available hvac operation modes.
Need to be a subset of HVAC_MODES.
"""
return SUPPORT_HVAC
@property
def hvac_mode(self):
"""Return hvac operation ie. heat, cool mode.
Need to be one of HVAC_MODE_*.
"""
return HIVE_TO_HASS_STATE[self.session.heating.get_mode(self.node_id)]
@property
def hvac_action(self):
"""Return current HVAC action."""
return HIVE_TO_HASS_HVAC_ACTION[
self.session.heating.operational_status(self.node_id, self.device_type)
]
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return TEMP_CELSIUS
@property
def current_temperature(self):
"""Return the current temperature."""
return self.session.heating.current_temperature(self.node_id)
@property
def target_temperature(self):
"""Return the target temperature."""
return self.session.heating.get_target_temperature(self.node_id)
@property
def min_temp(self):
"""Return minimum temperature."""
return self.session.heating.min_temperature(self.node_id)
@property
def max_temp(self):
"""Return the maximum temperature."""
return self.session.heating.max_temperature(self.node_id)
@property
def preset_mode(self):
"""Return the current preset mode, e.g., home, away, temp."""
if (
self.device_type == "Heating"
and self.session.heating.get_boost(self.node_id) == "ON"
):
return PRESET_BOOST
return None
@property
def preset_modes(self):
"""Return a list of available preset modes."""
return SUPPORT_PRESET
@refresh_system
def set_hvac_mode(self, hvac_mode):
"""Set new target hvac mode."""
new_mode = HASS_TO_HIVE_STATE[hvac_mode]
self.session.heating.set_mode(self.node_id, new_mode)
@refresh_system
def set_temperature(self, **kwargs):
"""Set new target temperature."""
new_temperature = kwargs.get(ATTR_TEMPERATURE)
if new_temperature is not None:
self.session.heating.set_target_temperature(self.node_id, new_temperature)
@refresh_system
def set_preset_mode(self, preset_mode):
"""Set new preset mode."""
if preset_mode == PRESET_NONE and self.preset_mode == PRESET_BOOST:
self.session.heating.turn_boost_off(self.node_id)
elif preset_mode == PRESET_BOOST:
curtemp = round(self.current_temperature * 2) / 2
temperature = curtemp + 0.5
self.session.heating.turn_boost_on(self.node_id, 30, temperature)
def update(self):
"""Update all Node data from Hive."""
self.session.core.update_data(self.node_id)
self.attributes = self.session.attributes.state_attributes(
self.thermostat_node_id
)
|
try:
from urllib3.connectionpool import HTTPConnection, VerifiedHTTPSConnection
except ImportError:
from requests.packages.urllib3.connectionpool import HTTPConnection, VerifiedHTTPSConnection
from ..stubs import VCRHTTPConnection, VCRHTTPSConnection
# urllib3 defines its own HTTPConnection classes, which requests goes ahead and assumes
# you're using. It includes some polyfills for newer features missing in older pythons.
class VCRRequestsHTTPConnection(VCRHTTPConnection, HTTPConnection):
_baseclass = HTTPConnection
class VCRRequestsHTTPSConnection(VCRHTTPSConnection, VerifiedHTTPSConnection):
_baseclass = VerifiedHTTPSConnection
|
import hangups
from common import run_example
async def get_conversation(client, args):
request = hangups.hangouts_pb2.GetConversationRequest(
request_header=client.get_request_header(),
conversation_spec=hangups.hangouts_pb2.ConversationSpec(
conversation_id=hangups.hangouts_pb2.ConversationId(
id=args.conversation_id
),
),
include_event=True,
max_events_per_conversation=10,
)
res = await client.get_conversation(request)
print(res)
if __name__ == '__main__':
run_example(get_conversation, '--conversation-id')
|
from homeassistant.components.switch import SwitchEntity
from . import DATA_HIVE, DOMAIN, HiveEntity, refresh_system
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up Hive switches."""
if discovery_info is None:
return
session = hass.data.get(DATA_HIVE)
devs = []
for dev in discovery_info:
devs.append(HiveDevicePlug(session, dev))
add_entities(devs)
class HiveDevicePlug(HiveEntity, SwitchEntity):
"""Hive Active Plug."""
@property
def unique_id(self):
"""Return unique ID of entity."""
return self._unique_id
@property
def device_info(self):
"""Return device information."""
return {"identifiers": {(DOMAIN, self.unique_id)}, "name": self.name}
@property
def name(self):
"""Return the name of this Switch device if any."""
return self.node_name
@property
def device_state_attributes(self):
"""Show Device Attributes."""
return self.attributes
@property
def current_power_w(self):
"""Return the current power usage in W."""
return self.session.switch.get_power_usage(self.node_id)
@property
def is_on(self):
"""Return true if switch is on."""
return self.session.switch.get_state(self.node_id)
@refresh_system
def turn_on(self, **kwargs):
"""Turn the switch on."""
self.session.switch.turn_on(self.node_id)
@refresh_system
def turn_off(self, **kwargs):
"""Turn the device off."""
self.session.switch.turn_off(self.node_id)
def update(self):
"""Update all Node data from Hive."""
self.session.core.update_data(self.node_id)
self.attributes = self.session.attributes.state_attributes(self.node_id)
|
from openrazer_daemon.dbus_services import endpoint
@endpoint('razer.device.lighting.logo', 'setLogoStatic', in_sig='yyy')
def set_logo_static_naga_hex_v2(self, red, green, blue):
"""
Set the device to static colour
:param red: Red component
:type red: int
:param green: Green component
:type green: int
:param blue: Blue component
:type blue: int
"""
self.logger.debug("DBus call set_static_effect")
# Notify others
self.send_effect_event('setStatic', red, green, blue)
# remember effect
self.set_persistence("logo", "effect", 'static')
self.zone["logo"]["colors"][0:3] = int(red), int(green), int(blue)
rgb_driver_path = self.get_driver_path('logo_matrix_effect_static')
payload = bytes([red, green, blue])
with open(rgb_driver_path, 'wb') as rgb_driver_file:
rgb_driver_file.write(payload)
@endpoint('razer.device.lighting.logo', 'setLogoSpectrum')
def set_logo_spectrum_naga_hex_v2(self):
"""
Set the device to spectrum mode
"""
self.logger.debug("DBus call set_logo_spectrum")
# Notify others
self.send_effect_event('setSpectrum')
# remember effect
self.set_persistence("logo", "effect", 'spectrum')
effect_driver_path = self.get_driver_path('logo_matrix_effect_spectrum')
with open(effect_driver_path, 'w') as effect_driver_file:
effect_driver_file.write('1')
@endpoint('razer.device.lighting.logo', 'setLogoNone')
def set_logo_none_naga_hex_v2(self):
"""
Set the device to effect none
"""
self.logger.debug("DBus call set_none_effect")
# Notify others
self.send_effect_event('setNone')
# remember effect
self.set_persistence("logo", "effect", 'none')
driver_path = self.get_driver_path('logo_matrix_effect_none')
with open(driver_path, 'w') as driver_file:
driver_file.write('1')
@endpoint('razer.device.lighting.logo', 'setLogoReactive', in_sig='yyyy')
def set_logo_reactive_naga_hex_v2(self, red, green, blue, speed):
"""
Set the device to reactive effect
:param red: Red component
:type red: int
:param green: Green component
:type green: int
:param blue: Blue component
:type blue: int
:param speed: Speed
:type speed: int
"""
self.logger.debug("DBus call set_reactive_effect")
driver_path = self.get_driver_path('logo_matrix_effect_reactive')
# Notify others
self.send_effect_event('setReactive', red, green, blue, speed)
# remember effect
self.set_persistence("logo", "effect", 'reactive')
self.zone["logo"]["colors"][0:3] = int(red), int(green), int(blue)
self.set_persistence("logo", "speed", int(speed))
if speed not in (1, 2, 3, 4):
speed = 4
payload = bytes([speed, red, green, blue])
with open(driver_path, 'wb') as driver_file:
driver_file.write(payload)
@endpoint('razer.device.lighting.logo', 'setLogoBreathRandom')
def set_logo_breath_random_naga_hex_v2(self):
"""
Set the device to random colour breathing effect
"""
self.logger.debug("DBus call set_breath_random_effect")
# Notify others
self.send_effect_event('setBreathRandom')
# remember effect
self.set_persistence("logo", "effect", 'breathRandom')
driver_path = self.get_driver_path('logo_matrix_effect_breath')
payload = b'1'
with open(driver_path, 'wb') as driver_file:
driver_file.write(payload)
@endpoint('razer.device.lighting.logo', 'setLogoBreathSingle', in_sig='yyy')
def set_logo_breath_single_naga_hex_v2(self, red, green, blue):
"""
Set the device to single colour breathing effect
:param red: Red component
:type red: int
:param green: Green component
:type green: int
:param blue: Blue component
:type blue: int
"""
self.logger.debug("DBus call set_breath_single_effect")
# Notify others
self.send_effect_event('setBreathSingle', red, green, blue)
# remember effect
self.set_persistence("logo", "effect", 'breathSingle')
self.zone["logo"]["colors"][0:3] = int(red), int(green), int(blue)
driver_path = self.get_driver_path('logo_matrix_effect_breath')
payload = bytes([red, green, blue])
with open(driver_path, 'wb') as driver_file:
driver_file.write(payload)
@endpoint('razer.device.lighting.logo', 'setLogoBreathDual', in_sig='yyyyyy')
def set_logo_breath_dual_naga_hex_v2(self, red1, green1, blue1, red2, green2, blue2):
"""
Set the device to dual colour breathing effect
:param red1: Red component
:type red1: int
:param green1: Green component
:type green1: int
:param blue1: Blue component
:type blue1: int
:param red2: Red component
:type red2: int
:param green2: Green component
:type green2: int
:param blue2: Blue component
:type blue2: int
"""
self.logger.debug("DBus call set_breath_dual_effect")
# Notify others
self.send_effect_event('setBreathDual', red1, green1, blue1, red2, green2, blue2)
# remember effect
self.set_persistence("logo", "effect", 'breathDual')
self.zone["logo"]["colors"][0:6] = int(red1), int(green1), int(blue1), int(red2), int(green2), int(blue2)
driver_path = self.get_driver_path('logo_matrix_effect_breath')
payload = bytes([red1, green1, blue1, red2, green2, blue2])
with open(driver_path, 'wb') as driver_file:
driver_file.write(payload)
@endpoint('razer.device.lighting.scroll', 'setScrollStatic', in_sig='yyy')
def set_scroll_static_naga_hex_v2(self, red, green, blue):
"""
Set the device to static colour
:param red: Red component
:type red: int
:param green: Green component
:type green: int
:param blue: Blue component
:type blue: int
"""
self.logger.debug("DBus call set_static_effect")
# Notify others
self.send_effect_event('setStatic', red, green, blue)
# remember effect
self.set_persistence("scroll", "effect", 'static')
self.zone["scroll"]["colors"][0:3] = int(red), int(green), int(blue)
rgb_driver_path = self.get_driver_path('scroll_matrix_effect_static')
payload = bytes([red, green, blue])
with open(rgb_driver_path, 'wb') as rgb_driver_file:
rgb_driver_file.write(payload)
@endpoint('razer.device.lighting.scroll', 'setScrollSpectrum')
def set_scroll_spectrum_naga_hex_v2(self):
"""
Set the device to spectrum mode
"""
self.logger.debug("DBus call set_scroll_spectrum")
# Notify others
self.send_effect_event('setSpectrum')
self.set_persistence("scroll", "effect", 'spectrum')
effect_driver_path = self.get_driver_path('scroll_matrix_effect_spectrum')
with open(effect_driver_path, 'w') as effect_driver_file:
effect_driver_file.write('1')
@endpoint('razer.device.lighting.scroll', 'setScrollNone')
def set_scroll_none_naga_hex_v2(self):
"""
Set the device to effect none
"""
self.logger.debug("DBus call set_none_effect")
# Notify others
self.send_effect_event('setNone')
self.set_persistence("scroll", "effect", 'none')
driver_path = self.get_driver_path('scroll_matrix_effect_none')
with open(driver_path, 'w') as driver_file:
driver_file.write('1')
@endpoint('razer.device.lighting.scroll', 'setScrollReactive', in_sig='yyyy')
def set_scroll_reactive_naga_hex_v2(self, red, green, blue, speed):
"""
Set the device to reactive effect
:param red: Red component
:type red: int
:param green: Green component
:type green: int
:param blue: Blue component
:type blue: int
:param speed: Speed
:type speed: int
"""
self.logger.debug("DBus call set_reactive_effect")
driver_path = self.get_driver_path('scroll_matrix_effect_reactive')
# Notify others
self.send_effect_event('setReactive', red, green, blue, speed)
# remember effect
self.set_persistence("scroll", "effect", 'reactive')
self.zone["scroll"]["colors"][0:3] = int(red), int(green), int(blue)
self.set_persistence("scroll", "speed", int(speed))
if speed not in (1, 2, 3, 4):
speed = 4
payload = bytes([speed, red, green, blue])
with open(driver_path, 'wb') as driver_file:
driver_file.write(payload)
@endpoint('razer.device.lighting.scroll', 'setScrollBreathRandom')
def set_scroll_breath_random_naga_hex_v2(self):
"""
Set the device to random colour breathing effect
"""
self.logger.debug("DBus call set_breath_random_effect")
# Notify others
self.send_effect_event('setBreathRandom')
# remember effect
self.set_persistence("scroll", "effect", 'breathRandom')
driver_path = self.get_driver_path('scroll_matrix_effect_breath')
payload = b'1'
with open(driver_path, 'wb') as driver_file:
driver_file.write(payload)
@endpoint('razer.device.lighting.scroll', 'setScrollBreathSingle', in_sig='yyy')
def set_scroll_breath_single_naga_hex_v2(self, red, green, blue):
"""
Set the device to single colour breathing effect
:param red: Red component
:type red: int
:param green: Green component
:type green: int
:param blue: Blue component
:type blue: int
"""
self.logger.debug("DBus call set_breath_single_effect")
# Notify others
self.send_effect_event('setBreathSingle', red, green, blue)
# remember effect
self.set_persistence("scroll", "effect", 'breathSingle')
self.zone["scroll"]["colors"][0:3] = int(red), int(green), int(blue)
driver_path = self.get_driver_path('scroll_matrix_effect_breath')
payload = bytes([red, green, blue])
with open(driver_path, 'wb') as driver_file:
driver_file.write(payload)
@endpoint('razer.device.lighting.scroll', 'setScrollBreathDual', in_sig='yyyyyy')
def set_scroll_breath_dual_naga_hex_v2(self, red1, green1, blue1, red2, green2, blue2):
"""
Set the device to dual colour breathing effect
:param red1: Red component
:type red1: int
:param green1: Green component
:type green1: int
:param blue1: Blue component
:type blue1: int
:param red2: Red component
:type red2: int
:param green2: Green component
:type green2: int
:param blue2: Blue component
:type blue2: int
"""
self.logger.debug("DBus call set_breath_dual_effect")
# Notify others
self.send_effect_event('setBreathDual', red1, green1, blue1, red2, green2, blue2)
# remember effect
self.set_persistence("scroll", "effect", 'breathDual')
self.zone["scroll"]["colors"][0:6] = int(red1), int(green1), int(blue1), int(red2), int(green2), int(blue2)
driver_path = self.get_driver_path('scroll_matrix_effect_breath')
payload = bytes([red1, green1, blue1, red2, green2, blue2])
with open(driver_path, 'wb') as driver_file:
driver_file.write(payload)
|
import json
from homeassistant.components.wled.const import DOMAIN
from homeassistant.const import CONF_HOST, CONF_MAC, CONTENT_TYPE_JSON
from homeassistant.core import HomeAssistant
from tests.common import MockConfigEntry, load_fixture
from tests.test_util.aiohttp import AiohttpClientMocker
async def init_integration(
hass: HomeAssistant,
aioclient_mock: AiohttpClientMocker,
rgbw: bool = False,
skip_setup: bool = False,
) -> MockConfigEntry:
"""Set up the WLED integration in Home Assistant."""
fixture = "wled/rgb.json" if not rgbw else "wled/rgbw.json"
data = json.loads(load_fixture(fixture))
aioclient_mock.get(
"http://192.168.1.123:80/json/",
json=data,
headers={"Content-Type": CONTENT_TYPE_JSON},
)
aioclient_mock.post(
"http://192.168.1.123:80/json/state",
json=data["state"],
headers={"Content-Type": CONTENT_TYPE_JSON},
)
aioclient_mock.get(
"http://192.168.1.123:80/json/info",
json=data["info"],
headers={"Content-Type": CONTENT_TYPE_JSON},
)
aioclient_mock.get(
"http://192.168.1.123:80/json/state",
json=data["state"],
headers={"Content-Type": CONTENT_TYPE_JSON},
)
entry = MockConfigEntry(
domain=DOMAIN, data={CONF_HOST: "192.168.1.123", CONF_MAC: "aabbccddeeff"}
)
entry.add_to_hass(hass)
if not skip_setup:
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
return entry
|
from xs1_api_client.api_constants import ActuatorType
from homeassistant.helpers.entity import ToggleEntity
from . import ACTUATORS, DOMAIN as COMPONENT_DOMAIN, XS1DeviceEntity
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the XS1 switch platform."""
actuators = hass.data[COMPONENT_DOMAIN][ACTUATORS]
switch_entities = []
for actuator in actuators:
if (actuator.type() == ActuatorType.SWITCH) or (
actuator.type() == ActuatorType.DIMMER
):
switch_entities.append(XS1SwitchEntity(actuator))
add_entities(switch_entities)
class XS1SwitchEntity(XS1DeviceEntity, ToggleEntity):
"""Representation of a XS1 switch actuator."""
@property
def name(self):
"""Return the name of the device if any."""
return self.device.name()
@property
def is_on(self):
"""Return true if switch is on."""
return self.device.value() == 100
def turn_on(self, **kwargs):
"""Turn the device on."""
self.device.turn_on()
def turn_off(self, **kwargs):
"""Turn the device off."""
self.device.turn_off()
|
from django.contrib.auth.decorators import login_required
from django.core.exceptions import PermissionDenied
from django.shortcuts import get_object_or_404, redirect
from django.views.decorators.cache import never_cache
from django.views.decorators.http import require_POST
from weblate.trans.forms import LabelForm
from weblate.trans.models import Label
from weblate.trans.util import render
from weblate.utils.views import get_project
@login_required
@never_cache
def project_labels(request, project):
obj = get_project(request, project)
if not request.user.has_perm("project.edit", obj):
raise PermissionDenied()
if request.method == "POST":
form = LabelForm(request.POST)
if form.is_valid():
form.instance.project = obj
form.save()
return redirect("labels", project=project)
else:
form = LabelForm()
return render(
request, "project-labels.html", {"object": obj, "project": obj, "form": form}
)
@login_required
@never_cache
def label_edit(request, project, pk):
obj = get_project(request, project)
if not request.user.has_perm("project.edit", obj):
raise PermissionDenied()
label = get_object_or_404(Label, pk=pk, project=obj)
if request.method == "POST":
form = LabelForm(request.POST, instance=label)
if form.is_valid():
form.save()
return redirect("labels", project=project)
else:
form = LabelForm(instance=label)
return render(
request,
"project-label-edit.html",
{"object": obj, "project": obj, "form": form},
)
@login_required
@never_cache
@require_POST
def label_delete(request, project, pk):
obj = get_project(request, project)
if not request.user.has_perm("project.edit", obj):
raise PermissionDenied()
label = get_object_or_404(Label, pk=pk, project=obj)
label.delete()
return redirect("labels", project=project)
|
import pytest
from homeassistant.components import automation, zone
from homeassistant.const import ATTR_ENTITY_ID, ENTITY_MATCH_ALL, SERVICE_TURN_OFF
from homeassistant.core import Context
from homeassistant.setup import async_setup_component
from tests.common import async_mock_service, mock_component
@pytest.fixture
def calls(hass):
"""Track calls to a mock service."""
return async_mock_service(hass, "test", "automation")
@pytest.fixture(autouse=True)
def setup_comp(hass):
"""Initialize components."""
mock_component(hass, "group")
hass.loop.run_until_complete(
async_setup_component(
hass,
zone.DOMAIN,
{
"zone": {
"name": "test",
"latitude": 32.880837,
"longitude": -117.237561,
"radius": 250,
}
},
)
)
async def test_if_fires_on_zone_enter(hass, calls):
"""Test for firing on zone enter."""
context = Context()
hass.states.async_set(
"geo_location.entity",
"hello",
{"latitude": 32.881011, "longitude": -117.234758, "source": "test_source"},
)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "geo_location",
"source": "test_source",
"zone": "zone.test",
"event": "enter",
},
"action": {
"service": "test.automation",
"data_template": {
"some": "{{ trigger.%s }}"
% "}} - {{ trigger.".join(
(
"platform",
"entity_id",
"from_state.state",
"to_state.state",
"zone.name",
)
)
},
},
}
},
)
hass.states.async_set(
"geo_location.entity",
"hello",
{"latitude": 32.880586, "longitude": -117.237564},
context=context,
)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].context.parent_id == context.id
assert (
calls[0].data["some"]
== "geo_location - geo_location.entity - hello - hello - test"
)
# Set out of zone again so we can trigger call
hass.states.async_set(
"geo_location.entity",
"hello",
{"latitude": 32.881011, "longitude": -117.234758},
)
await hass.async_block_till_done()
await hass.services.async_call(
automation.DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: ENTITY_MATCH_ALL},
blocking=True,
)
hass.states.async_set(
"geo_location.entity",
"hello",
{"latitude": 32.880586, "longitude": -117.237564},
)
await hass.async_block_till_done()
assert len(calls) == 1
async def test_if_not_fires_for_enter_on_zone_leave(hass, calls):
"""Test for not firing on zone leave."""
hass.states.async_set(
"geo_location.entity",
"hello",
{"latitude": 32.880586, "longitude": -117.237564, "source": "test_source"},
)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "geo_location",
"source": "test_source",
"zone": "zone.test",
"event": "enter",
},
"action": {"service": "test.automation"},
}
},
)
hass.states.async_set(
"geo_location.entity",
"hello",
{"latitude": 32.881011, "longitude": -117.234758},
)
await hass.async_block_till_done()
assert len(calls) == 0
async def test_if_fires_on_zone_leave(hass, calls):
"""Test for firing on zone leave."""
hass.states.async_set(
"geo_location.entity",
"hello",
{"latitude": 32.880586, "longitude": -117.237564, "source": "test_source"},
)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "geo_location",
"source": "test_source",
"zone": "zone.test",
"event": "leave",
},
"action": {"service": "test.automation"},
}
},
)
hass.states.async_set(
"geo_location.entity",
"hello",
{"latitude": 32.881011, "longitude": -117.234758, "source": "test_source"},
)
await hass.async_block_till_done()
assert len(calls) == 1
async def test_if_not_fires_for_leave_on_zone_enter(hass, calls):
"""Test for not firing on zone enter."""
hass.states.async_set(
"geo_location.entity",
"hello",
{"latitude": 32.881011, "longitude": -117.234758, "source": "test_source"},
)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "geo_location",
"source": "test_source",
"zone": "zone.test",
"event": "leave",
},
"action": {"service": "test.automation"},
}
},
)
hass.states.async_set(
"geo_location.entity",
"hello",
{"latitude": 32.880586, "longitude": -117.237564},
)
await hass.async_block_till_done()
assert len(calls) == 0
async def test_if_fires_on_zone_appear(hass, calls):
"""Test for firing if entity appears in zone."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "geo_location",
"source": "test_source",
"zone": "zone.test",
"event": "enter",
},
"action": {
"service": "test.automation",
"data_template": {
"some": "{{ trigger.%s }}"
% "}} - {{ trigger.".join(
(
"platform",
"entity_id",
"from_state.state",
"to_state.state",
"zone.name",
)
)
},
},
}
},
)
# Entity appears in zone without previously existing outside the zone.
context = Context()
hass.states.async_set(
"geo_location.entity",
"hello",
{"latitude": 32.880586, "longitude": -117.237564, "source": "test_source"},
context=context,
)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].context.parent_id == context.id
assert (
calls[0].data["some"] == "geo_location - geo_location.entity - - hello - test"
)
async def test_if_fires_on_zone_disappear(hass, calls):
"""Test for firing if entity disappears from zone."""
hass.states.async_set(
"geo_location.entity",
"hello",
{"latitude": 32.880586, "longitude": -117.237564, "source": "test_source"},
)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "geo_location",
"source": "test_source",
"zone": "zone.test",
"event": "leave",
},
"action": {
"service": "test.automation",
"data_template": {
"some": "{{ trigger.%s }}"
% "}} - {{ trigger.".join(
(
"platform",
"entity_id",
"from_state.state",
"to_state.state",
"zone.name",
)
)
},
},
}
},
)
# Entity disappears from zone without new coordinates outside the zone.
hass.states.async_remove("geo_location.entity")
await hass.async_block_till_done()
assert len(calls) == 1
assert (
calls[0].data["some"] == "geo_location - geo_location.entity - hello - - test"
)
|
from __future__ import unicode_literals
import operator
import itertools
import functools
import traceback
from core.CONF import get_conf_dic
from lib.fun.filter import encode_filter
from lib.data.data import pystrs, pyoptions
from lib.fun.fun import finalsavepath, finishprinter, cool
from lib.parse.confparse import elementparser, confmatcher
def build_pattern_dic(source=""):
buffer = []
buffer_size = 512
storepath = finalsavepath("pattern")
pattern_set = patterncore(source)
char_list = [sorted(set(i)) for i in pattern_set.values()]
try:
with open(storepath, "w") as f:
for item in map("".join, itertools.product(*char_list)):
item = pyoptions.head + item + pyoptions.tail
buffer.append(item if pyoptions.encode == "none" else encode_filter(item, encode=pyoptions.encode))
if len(buffer) == buffer_size:
f.write(pyoptions.CRLF.join(buffer) + pyoptions.CRLF)
buffer = []
f.write(pyoptions.CRLF.join(buffer))
finishprinter(storepath)
except Exception as e:
print(cool.red('[-] Exception as following:') + pyoptions.CRLF)
print(traceback.print_exc())
def patterncore(resource):
pattern_set = {}
try:
confdicts = elementparser(confmatcher(resource))
except IndexError:
confdicts = {}
exit(cool.red("[-] parse element error, please check your parsing element"))
finalen = len(confdicts[pystrs.conf_head])
for x in range(0, finalen):
# pattern_set_list = confdicts[pystrs.conf_char][x]
# keep parsing head and tail
pattern_set_list = get_conf_dic(int(confdicts[pystrs.conf_minlen][x]),
int(confdicts[pystrs.conf_maxlen][x]),
confdicts[pystrs.conf_char][x],
confdicts[pystrs.conf_encode][x],
confdicts[pystrs.conf_head][x],
confdicts[pystrs.conf_tail][x])
pattern_set[x] = "".join(pattern_set_list)
count = functools.reduce(operator.mul, [len(i) for i in pattern_set.values()], 1)
if count >= pyoptions.count_switcher:
exit_msg = pyoptions.CRLF + cool.fuchsia("[!] Build items more than pyoptions.count_switcher: %s%s"
"[!] Modify /lib/data/data.py count_switcher to adjust it" %
(str(pyoptions.count_switcher), pyoptions.CRLF))
exit(exit_msg)
return pattern_set
|
import json
import random
import requests
import requests.exceptions
import time
class OpenTSDBClient(object):
"""OpenTSDBClient primary client object to connect OpenTSDB.
The :class:`~.OpenTSDBClient` object holds information necessary to
connect to OpenTSDB. Requests can be made to OpenTSDB directly through
the client.
:param host: hostname to connect to OpenTSDB, defaults to 'localhost'
:type host: str
:param port: port to connect to OpenTSDB, defaults to 4242
:type port: int
:param username: user to connect, defaults to 'root'
:type username: str
:param password: password of the user, defaults to 'root'
:type password: str
:param pool_size: urllib3 connection pool size, defaults to 10.
:type pool_size: int
:param ssl: use https instead of http to connect to OpenTSDB, defaults to
False
:type ssl: bool
:param verify_ssl: verify SSL certificates for HTTPS requests, defaults to
False
:type verify_ssl: bool
:param timeout: number of seconds Requests will wait for your client to
establish a connection, defaults to None
:type timeout: int
:param retries: number of retries your client will try before aborting,
defaults to 3. 0 indicates try until success
:type retries: int
:param proxies: HTTP(S) proxy to use for Requests, defaults to {}
:type proxies: dict
:param cert: Path to client certificate information to use for mutual TLS
authentication. You can specify a local cert to use
as a single file containing the private key and the certificate, or as
a tuple of both files’ paths, defaults to None
:type cert: str
:raises ValueError: if cert is provided but ssl is disabled (set to False)
"""
def __init__(
self,
host='localhost',
port=4242,
username='root',
password='root',
ssl=False,
verify_ssl=False,
timeout=None,
retries=3,
proxies=None,
pool_size=10,
cert=None,
):
"""Construct a new OpenTSDBClient object."""
self._host = host
self._port = int(port)
self._username = username
self._password = password
self._timeout = timeout
self._retries = retries
self._verify_ssl = verify_ssl
self._session = requests.Session()
adapter = requests.adapters.HTTPAdapter(
pool_connections=int(pool_size), pool_maxsize=int(pool_size))
self._scheme = "http"
if ssl is True:
self._scheme = "https"
self._session.mount(self._scheme + '://', adapter)
if proxies is None:
self._proxies = {}
else:
self._proxies = proxies
if cert:
if not ssl:
raise ValueError(
"Client certificate provided but ssl is disabled.")
else:
self._session.cert = cert
self._baseurl = "{0}://{1}:{2}".format(
self._scheme, self._host, self._port)
self._headers = {
'Content-Type': 'application/json',
'Accept': 'application/json'
}
def write(self, data, expected_response_code=204):
"""Write data to OpenTSDB.
:param data: the data to be written
:param expected_response_code: the expected response code of the write
operation, defaults to 204
:type expected_response_code: int
:returns: True, if the write operation is successful
:rtype: bool
"""
headers = self._headers
headers['Content-Type'] = 'application/json'
self.request(
url="api/put",
method='POST',
data=data,
expected_response_code=expected_response_code,
headers=headers)
return True
def request(
self,
url,
method='GET',
params=None,
data=None,
expected_response_code=200,
headers=None):
"""Make a HTTP request to the OpenTSDB API.
:param url: the path of the HTTP request
:type url: str
:param method: the HTTP method for the request, defaults to GET
:type method: str
:param params: additional parameters for the request, defaults to None
:type params: dict
:param data: the data of the request, defaults to None
:type data: str
:param expected_response_code: the expected response code of
the request, defaults to 200
:type expected_response_code: int
:param headers: headers to add to the request
:type headers: dict
:returns: the response from the request
:rtype: :class:`requests.Response`
:raises OpenTSDBServerError: if the response code is any server error
code (5xx)
:raises OpenTSDBClientError: if the response code is not the
same as `expected_response_code` and is not a server error code
"""
url = "{0}/{1}".format(self._baseurl, url)
if headers is None:
headers = self._headers
if params is None:
params = {}
if isinstance(data, (dict, list)):
data = json.dumps(data)
# Try to send the request more than once by default (see #103)
retry = True
_try = 0
while retry:
try:
response = self._session.request(
method=method,
url=url,
auth=(self._username, self._password),
params=params,
data=data,
headers=headers,
proxies=self._proxies,
verify=self._verify_ssl,
timeout=self._timeout)
break
except (requests.exceptions.ConnectionError,
requests.exceptions.HTTPError, requests.exceptions.Timeout):
_try += 1
if self._retries != 0:
retry = _try < self._retries
if method == "POST":
time.sleep((2**_try) * random.random() / 100.0)
if not retry:
raise
# if there's not an error, there must have been a successful response
if 500 <= response.status_code < 600:
raise Exception(response.content)
elif response.status_code == expected_response_code:
return response
else:
raise Exception(response.content, response.status_code)
|
from ..grammar import NonTerminal, Terminal
class Item(object):
"An Earley Item, the atom of the algorithm."
__slots__ = ('s', 'rule', 'ptr', 'start', 'is_complete', 'expect', 'previous', 'node', '_hash')
def __init__(self, rule, ptr, start):
self.is_complete = len(rule.expansion) == ptr
self.rule = rule # rule
self.ptr = ptr # ptr
self.start = start # j
self.node = None # w
if self.is_complete:
self.s = rule.origin
self.expect = None
self.previous = rule.expansion[ptr - 1] if ptr > 0 and len(rule.expansion) else None
else:
self.s = (rule, ptr)
self.expect = rule.expansion[ptr]
self.previous = rule.expansion[ptr - 1] if ptr > 0 and len(rule.expansion) else None
self._hash = hash((self.s, self.start))
def advance(self):
return Item(self.rule, self.ptr + 1, self.start)
def __eq__(self, other):
return self is other or (self.s == other.s and self.start == other.start)
def __hash__(self):
return self._hash
def __repr__(self):
before = ( expansion.name for expansion in self.rule.expansion[:self.ptr] )
after = ( expansion.name for expansion in self.rule.expansion[self.ptr:] )
symbol = "{} ::= {}* {}".format(self.rule.origin.name, ' '.join(before), ' '.join(after))
return '%s (%d)' % (symbol, self.start)
class TransitiveItem(Item):
__slots__ = ('recognized', 'reduction', 'column', 'next_titem')
def __init__(self, recognized, trule, originator, start):
super(TransitiveItem, self).__init__(trule.rule, trule.ptr, trule.start)
self.recognized = recognized
self.reduction = originator
self.column = start
self.next_titem = None
self._hash = hash((self.s, self.start, self.recognized))
def __eq__(self, other):
if not isinstance(other, TransitiveItem):
return False
return self is other or (type(self.s) == type(other.s) and self.s == other.s and self.start == other.start and self.recognized == other.recognized)
def __hash__(self):
return self._hash
def __repr__(self):
before = ( expansion.name for expansion in self.rule.expansion[:self.ptr] )
after = ( expansion.name for expansion in self.rule.expansion[self.ptr:] )
return '{} : {} -> {}* {} ({}, {})'.format(self.recognized.name, self.rule.origin.name, ' '.join(before), ' '.join(after), self.column, self.start)
|
import asynctest
from mock import patch
from paasta_tools.monitoring import check_mesos_outdated_tasks
@patch(
"paasta_tools.monitoring.check_mesos_outdated_tasks.get_mesos_master", autospec=True
)
def test_check_mesos_tasks(mock_get_mesos_master):
mock_get_mesos_master.return_value.state = asynctest.CoroutineMock(
func=asynctest.CoroutineMock(),
return_value={
"slaves": [
{
"id": "4abbb181-fd06-4729-815b-6b55cebdf8ee-S2",
"hostname": "mesos-slave1.example.com",
}
],
"frameworks": [
{
"name": "marathon",
"tasks": [
{
"state": "TASK_RUNNING",
"name": "service.instance.gitlast_SHA.config3f15fefe",
"slave_id": "4abbb181-fd06-4729-815b-6b55cebdf8ee-S2",
"statuses": [
{
"state": "TASK_RUNNING",
"timestamp": 1509392500.9267,
"container_status": {
"container_id": {
"value": "a69b426d-f283-4287-9bee-6b8811386e1a"
}
},
}
],
},
{
"state": "TASK_RUNNING",
"name": "service.instance.gitold_SHA.config3f15fefe",
"slave_id": "4abbb181-fd06-4729-815b-6b55cebdf8ee-S2",
"statuses": [
{
"state": "TASK_RUNNING",
"timestamp": 1509342500.9267,
"container_status": {
"container_id": {
"value": "a69b426d-f283-4287-9bee-6b8811386e1b"
}
},
}
],
},
],
}
],
},
)
output, remedy = check_mesos_outdated_tasks.check_mesos_tasks()
assert len(output) == 1
assert "a69b426d-f283-4287-9bee-6b8811386e1b" in output[0]
assert "old_SHA" in output[0]
|
import unittest
import numpy as np
from chainer import testing
from chainercv.transforms import resize_bbox
from chainercv.utils.testing.generate_random_bbox import generate_random_bbox
class TestResizeBbox(unittest.TestCase):
def test_resize_bbox(self):
in_size = (32, 24)
out_size = (in_size[0] * 2, in_size[1] * 4)
bbox = generate_random_bbox(10, in_size, 0, min(in_size))
out = resize_bbox(bbox, in_size=in_size, out_size=out_size)
bbox_expected = bbox.copy()
bbox_expected[:, 0] = bbox[:, 0] * 2
bbox_expected[:, 1] = bbox[:, 1] * 4
bbox_expected[:, 2] = bbox[:, 2] * 2
bbox_expected[:, 3] = bbox[:, 3] * 4
np.testing.assert_equal(out, bbox_expected)
testing.run_module(__name__, __file__)
|
import json
import logging
import pyfttt
import requests
import voluptuous as vol
from homeassistant.const import CONF_WEBHOOK_ID, HTTP_OK
from homeassistant.helpers import config_entry_flow
import homeassistant.helpers.config_validation as cv
from .const import DOMAIN
_LOGGER = logging.getLogger(__name__)
EVENT_RECEIVED = "ifttt_webhook_received"
ATTR_EVENT = "event"
ATTR_TARGET = "target"
ATTR_VALUE1 = "value1"
ATTR_VALUE2 = "value2"
ATTR_VALUE3 = "value3"
CONF_KEY = "key"
SERVICE_PUSH_ALARM_STATE = "push_alarm_state"
SERVICE_TRIGGER = "trigger"
SERVICE_TRIGGER_SCHEMA = vol.Schema(
{
vol.Required(ATTR_EVENT): cv.string,
vol.Optional(ATTR_TARGET): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(ATTR_VALUE1): cv.string,
vol.Optional(ATTR_VALUE2): cv.string,
vol.Optional(ATTR_VALUE3): cv.string,
}
)
CONFIG_SCHEMA = vol.Schema(
{
vol.Optional(DOMAIN): vol.Schema(
{vol.Required(CONF_KEY): vol.Any({cv.string: cv.string}, cv.string)}
)
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass, config):
"""Set up the IFTTT service component."""
if DOMAIN not in config:
return True
api_keys = config[DOMAIN][CONF_KEY]
if isinstance(api_keys, str):
api_keys = {"default": api_keys}
def trigger_service(call):
"""Handle IFTTT trigger service calls."""
event = call.data[ATTR_EVENT]
targets = call.data.get(ATTR_TARGET, list(api_keys))
value1 = call.data.get(ATTR_VALUE1)
value2 = call.data.get(ATTR_VALUE2)
value3 = call.data.get(ATTR_VALUE3)
target_keys = {}
for target in targets:
if target not in api_keys:
_LOGGER.error("No IFTTT api key for %s", target)
continue
target_keys[target] = api_keys[target]
try:
for target, key in target_keys.items():
res = pyfttt.send_event(key, event, value1, value2, value3)
if res.status_code != HTTP_OK:
_LOGGER.error("IFTTT reported error sending event to %s", target)
except requests.exceptions.RequestException:
_LOGGER.exception("Error communicating with IFTTT")
hass.services.async_register(
DOMAIN, SERVICE_TRIGGER, trigger_service, schema=SERVICE_TRIGGER_SCHEMA
)
return True
async def handle_webhook(hass, webhook_id, request):
"""Handle webhook callback."""
body = await request.text()
try:
data = json.loads(body) if body else {}
except ValueError:
_LOGGER.error(
"Received invalid data from IFTTT. Data needs to be formatted as JSON: %s",
body,
)
return
if not isinstance(data, dict):
_LOGGER.error(
"Received invalid data from IFTTT. Data needs to be a dictionary: %s", data
)
return
data["webhook_id"] = webhook_id
hass.bus.async_fire(EVENT_RECEIVED, data)
async def async_setup_entry(hass, entry):
"""Configure based on config entry."""
hass.components.webhook.async_register(
DOMAIN, "IFTTT", entry.data[CONF_WEBHOOK_ID], handle_webhook
)
return True
async def async_unload_entry(hass, entry):
"""Unload a config entry."""
hass.components.webhook.async_unregister(entry.data[CONF_WEBHOOK_ID])
return True
async_remove_entry = config_entry_flow.webhook_async_remove_entry
|
import asyncio
from copy import copy
import logging
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import (
CONF_BINARY_SENSORS,
CONF_LIGHTS,
CONF_MAXIMUM,
CONF_MINIMUM,
CONF_NAME,
CONF_PIN,
CONF_SENSORS,
CONF_SWITCHES,
EVENT_HOMEASSISTANT_STOP,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers import config_validation as cv, device_registry as dr
from .board import FirmataBoard
from .const import (
CONF_ARDUINO_INSTANCE_ID,
CONF_ARDUINO_WAIT,
CONF_DIFFERENTIAL,
CONF_INITIAL_STATE,
CONF_NEGATE_STATE,
CONF_PIN_MODE,
CONF_PLATFORM_MAP,
CONF_SAMPLING_INTERVAL,
CONF_SERIAL_BAUD_RATE,
CONF_SERIAL_PORT,
CONF_SLEEP_TUNE,
DOMAIN,
FIRMATA_MANUFACTURER,
PIN_MODE_ANALOG,
PIN_MODE_INPUT,
PIN_MODE_OUTPUT,
PIN_MODE_PULLUP,
PIN_MODE_PWM,
)
_LOGGER = logging.getLogger(__name__)
DATA_CONFIGS = "board_configs"
ANALOG_PIN_SCHEMA = vol.All(cv.string, vol.Match(r"^A[0-9]+$"))
SWITCH_SCHEMA = vol.Schema(
{
vol.Required(CONF_NAME): cv.string,
# Both digital and analog pins may be used as digital output
vol.Required(CONF_PIN): vol.Any(cv.positive_int, ANALOG_PIN_SCHEMA),
vol.Required(CONF_PIN_MODE): PIN_MODE_OUTPUT,
vol.Optional(CONF_INITIAL_STATE, default=False): cv.boolean,
vol.Optional(CONF_NEGATE_STATE, default=False): cv.boolean,
},
required=True,
)
LIGHT_SCHEMA = vol.Schema(
{
vol.Required(CONF_NAME): cv.string,
# Both digital and analog pins may be used as PWM/analog output
vol.Required(CONF_PIN): vol.Any(cv.positive_int, ANALOG_PIN_SCHEMA),
vol.Required(CONF_PIN_MODE): PIN_MODE_PWM,
vol.Optional(CONF_INITIAL_STATE, default=0): cv.positive_int,
vol.Optional(CONF_MINIMUM, default=0): cv.positive_int,
vol.Optional(CONF_MAXIMUM, default=255): cv.positive_int,
},
required=True,
)
BINARY_SENSOR_SCHEMA = vol.Schema(
{
vol.Required(CONF_NAME): cv.string,
# Both digital and analog pins may be used as digital input
vol.Required(CONF_PIN): vol.Any(cv.positive_int, ANALOG_PIN_SCHEMA),
vol.Required(CONF_PIN_MODE): vol.Any(PIN_MODE_INPUT, PIN_MODE_PULLUP),
vol.Optional(CONF_NEGATE_STATE, default=False): cv.boolean,
},
required=True,
)
SENSOR_SCHEMA = vol.Schema(
{
vol.Required(CONF_NAME): cv.string,
# Currently only analog input sensor is implemented
vol.Required(CONF_PIN): ANALOG_PIN_SCHEMA,
vol.Required(CONF_PIN_MODE): PIN_MODE_ANALOG,
# Default differential is 40 to avoid a flood of messages on initial setup
# in case pin is unplugged. Firmata responds really really fast
vol.Optional(CONF_DIFFERENTIAL, default=40): vol.All(
cv.positive_int, vol.Range(min=1)
),
},
required=True,
)
BOARD_CONFIG_SCHEMA = vol.Schema(
{
vol.Required(CONF_SERIAL_PORT): cv.string,
vol.Optional(CONF_SERIAL_BAUD_RATE): cv.positive_int,
vol.Optional(CONF_ARDUINO_INSTANCE_ID): cv.positive_int,
vol.Optional(CONF_ARDUINO_WAIT): cv.positive_int,
vol.Optional(CONF_SLEEP_TUNE): vol.All(
vol.Coerce(float), vol.Range(min=0.0001)
),
vol.Optional(CONF_SAMPLING_INTERVAL): cv.positive_int,
vol.Optional(CONF_SWITCHES): [SWITCH_SCHEMA],
vol.Optional(CONF_LIGHTS): [LIGHT_SCHEMA],
vol.Optional(CONF_BINARY_SENSORS): [BINARY_SENSOR_SCHEMA],
vol.Optional(CONF_SENSORS): [SENSOR_SCHEMA],
},
required=True,
)
CONFIG_SCHEMA = vol.Schema(
{DOMAIN: vol.All(cv.ensure_list, [BOARD_CONFIG_SCHEMA])}, extra=vol.ALLOW_EXTRA
)
async def async_setup(hass: HomeAssistant, config: dict) -> bool:
"""Set up the Firmata domain."""
# Delete specific entries that no longer exist in the config
if hass.config_entries.async_entries(DOMAIN):
for entry in hass.config_entries.async_entries(DOMAIN):
remove = True
for board in config[DOMAIN]:
if entry.data[CONF_SERIAL_PORT] == board[CONF_SERIAL_PORT]:
remove = False
break
if remove:
await hass.config_entries.async_remove(entry.entry_id)
# Setup new entries and update old entries
for board in config[DOMAIN]:
firmata_config = copy(board)
existing_entry = False
for entry in hass.config_entries.async_entries(DOMAIN):
if board[CONF_SERIAL_PORT] == entry.data[CONF_SERIAL_PORT]:
existing_entry = True
firmata_config[CONF_NAME] = entry.data[CONF_NAME]
hass.config_entries.async_update_entry(entry, data=firmata_config)
break
if not existing_entry:
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data=firmata_config,
)
)
return True
async def async_setup_entry(
hass: HomeAssistant, config_entry: config_entries.ConfigEntry
) -> bool:
"""Set up a Firmata board for a config entry."""
if DOMAIN not in hass.data:
hass.data[DOMAIN] = {}
_LOGGER.debug(
"Setting up Firmata id %s, name %s, config %s",
config_entry.entry_id,
config_entry.data[CONF_NAME],
config_entry.data,
)
board = FirmataBoard(config_entry.data)
if not await board.async_setup():
return False
hass.data[DOMAIN][config_entry.entry_id] = board
async def handle_shutdown(event) -> None:
"""Handle shutdown of board when Home Assistant shuts down."""
# Ensure board was not already removed previously before shutdown
if config_entry.entry_id in hass.data[DOMAIN]:
await board.async_reset()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, handle_shutdown)
device_registry = await dr.async_get_registry(hass)
device_registry.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={},
identifiers={(DOMAIN, board.name)},
manufacturer=FIRMATA_MANUFACTURER,
name=board.name,
sw_version=board.firmware_version,
)
for (conf, platform) in CONF_PLATFORM_MAP.items():
if conf in config_entry.data:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(config_entry, platform)
)
return True
async def async_unload_entry(
hass: HomeAssistant, config_entry: config_entries.ConfigEntry
) -> None:
"""Shutdown and close a Firmata board for a config entry."""
_LOGGER.debug("Closing Firmata board %s", config_entry.data[CONF_NAME])
unload_entries = []
for (conf, platform) in CONF_PLATFORM_MAP.items():
if conf in config_entry.data:
unload_entries.append(
hass.config_entries.async_forward_entry_unload(config_entry, platform)
)
results = []
if unload_entries:
results = await asyncio.gather(*unload_entries)
results.append(await hass.data[DOMAIN].pop(config_entry.entry_id).async_reset())
return False not in results
|
import textwrap
import pytest
from qutebrowser.config import configexc
from qutebrowser.utils import usertypes
def test_validation_error():
e = configexc.ValidationError('val', 'msg')
assert e.option is None
assert str(e) == "Invalid value 'val' - msg"
@pytest.mark.parametrize('deleted, renamed, expected', [
(False, None, "No option 'opt'"),
(True, None, "No option 'opt' (this option was removed from qutebrowser)"),
(False, 'new', "No option 'opt' (this option was renamed to 'new')"),
])
def test_no_option_error(deleted, renamed, expected):
e = configexc.NoOptionError('opt', deleted=deleted, renamed=renamed)
assert e.option == 'opt'
assert str(e) == expected
def test_no_option_error_clash():
with pytest.raises(AssertionError):
configexc.NoOptionError('opt', deleted=True, renamed='foo')
def test_no_autoconfig_error():
e = configexc.NoAutoconfigError('opt')
expected = "The opt setting can only be set in config.py!"
assert str(e) == expected
@pytest.mark.parametrize('raw_backends', [
None,
{'QtWebEngine': 'Qt 5.11', 'QtWebKit': False}
])
def test_backend_error(raw_backends):
e = configexc.BackendError('foo', usertypes.Backend.QtWebKit, raw_backends)
expected = "The foo setting is not available with the QtWebKit backend!"
assert str(e) == expected
def test_backend_error_condition():
e = configexc.BackendError('foo', usertypes.Backend.QtWebEngine,
{'QtWebEngine': 'Qt 5.11', 'QtWebKit': True})
expected = "The foo setting needs Qt 5.11 with the QtWebEngine backend!"
assert str(e) == expected
def test_no_pattern_error():
e = configexc.NoPatternError('foo')
expected = "The foo setting does not support URL patterns!"
assert str(e) == expected
def test_desc_with_text():
"""Test ConfigErrorDesc.with_text."""
old = configexc.ConfigErrorDesc("Error text", Exception("Exception text"))
new = old.with_text("additional text")
assert str(new) == 'Error text (additional text): Exception text'
@pytest.fixture
def errors():
"""Get a ConfigFileErrors object."""
err1 = configexc.ConfigErrorDesc("Error text 1", Exception("Exception 1"))
err2 = configexc.ConfigErrorDesc("Error text 2", Exception("Exception 2"),
"Fake traceback")
return configexc.ConfigFileErrors("config.py", [err1, err2])
def test_config_file_errors_str(errors):
assert str(errors).splitlines() == [
'Errors occurred while reading config.py:',
' Error text 1: Exception 1',
' Error text 2 - Exception: Exception 2',
]
def test_config_file_errors_html(errors):
html = errors.to_html()
assert textwrap.dedent(html) == textwrap.dedent("""
Errors occurred while reading config.py:
<ul>
<li>
<b>Error text 1</b>: Exception 1
</li>
<li>
<b>Error text 2</b>: Exception 2
<pre>
Fake traceback
</pre>
</li>
</ul>
""")
# Make sure the traceback is not indented
assert '<pre>\nFake traceback\n' in html
def test_config_file_errors_fatal():
err = configexc.ConfigErrorDesc("Text", Exception("Text"))
errors = configexc.ConfigFileErrors("state", [err], fatal=True)
assert errors.fatal
|
import csv
import typing
import numpy as np
import pandas as pd
import matchzoo as mz
class Embedding(object):
"""
Embedding class.
Examples::
>>> import matchzoo as mz
>>> train_raw = mz.datasets.toy.load_data()
>>> pp = mz.preprocessors.NaivePreprocessor()
>>> train = pp.fit_transform(train_raw, verbose=0)
>>> vocab_unit = mz.build_vocab_unit(train, verbose=0)
>>> term_index = vocab_unit.state['term_index']
>>> embed_path = mz.datasets.embeddings.EMBED_RANK
To load from a file:
>>> embedding = mz.embedding.load_from_file(embed_path)
>>> matrix = embedding.build_matrix(term_index)
>>> matrix.shape[0] == len(term_index)
True
To build your own:
>>> data = pd.DataFrame(data=[[0, 1], [2, 3]], index=['A', 'B'])
>>> embedding = mz.Embedding(data)
>>> matrix = embedding.build_matrix({'A': 2, 'B': 1, '_PAD': 0})
>>> matrix.shape == (3, 2)
True
"""
def __init__(self, data: pd.DataFrame):
"""
Embedding.
:param data: DataFrame to use as term to vector mapping.
"""
self._data = data
@property
def input_dim(self) -> int:
""":return Embedding input dimension."""
return self._data.shape[0]
@property
def output_dim(self) -> int:
""":return Embedding output dimension."""
return self._data.shape[1]
def build_matrix(
self,
term_index: typing.Union[
dict, mz.preprocessors.units.Vocabulary.TermIndex],
initializer=lambda: np.random.uniform(-0.2, 0.2)
) -> np.ndarray:
"""
Build a matrix using `term_index`.
:param term_index: A `dict` or `TermIndex` to build with.
:param initializer: A callable that returns a default value for missing
terms in data. (default: a random uniform distribution in range)
`(-0.2, 0.2)`).
:return: A matrix.
"""
input_dim = len(term_index)
matrix = np.empty((input_dim, self.output_dim))
for index in np.ndindex(*matrix.shape):
matrix[index] = initializer()
valid_keys = set(self._data.index)
for term, index in term_index.items():
if term in valid_keys:
matrix[index] = self._data.loc[term]
return matrix
def load_from_file(file_path: str, mode: str = 'word2vec') -> Embedding:
"""
Load embedding from `file_path`.
:param file_path: Path to file.
:param mode: Embedding file format mode, one of 'word2vec' or 'glove'.
(default: 'word2vec')
:return: An :class:`matchzoo.embedding.Embedding` instance.
"""
if mode == 'word2vec':
data = pd.read_csv(file_path,
sep=" ",
index_col=0,
header=None,
skiprows=1)
elif mode == 'glove':
data = pd.read_csv(file_path,
sep=" ",
index_col=0,
header=None,
quoting=csv.QUOTE_NONE)
else:
raise TypeError(f"{mode} is not a supported embedding type."
f"`word2vec` or `glove` expected.")
return Embedding(data)
|
import diamond.collector
class PingCollector(diamond.collector.ProcessCollector):
def get_default_config_help(self):
config_help = super(PingCollector, self).get_default_config_help()
config_help.update({
'bin': 'The path to the ping binary',
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(PingCollector, self).get_default_config()
config.update({
'path': 'ping',
'bin': '/bin/ping',
})
return config
def collect(self):
for key in self.config.keys():
if key[:7] == "target_":
host = self.config[key]
metric_name = host.replace('.', '_')
ping = self.run_command(['-nq', '-c 1', host])
ping = ping[0].strip().split("\n")[-1]
# Linux
if ping.startswith('rtt'):
ping = ping.split()[3].split('/')[0]
metric_value = float(ping)
# OS X
elif ping.startswith('round-trip '):
ping = ping.split()[3].split('/')[0]
metric_value = float(ping)
# Unknown
else:
metric_value = 10000
self.publish(metric_name, metric_value, precision=3)
|
import os
import unittest
from perfkitbenchmarker import sample
from perfkitbenchmarker import test_util
from perfkitbenchmarker.windows_packages import diskspd
class DiskspdBenchmarkTestCase(unittest.TestCase, test_util.SamplesTestMixin):
def getDataContents(self, file_name):
path = os.path.join(os.path.dirname(__file__), '..', 'data', file_name)
with open(path) as fp:
contents = fp.read()
return contents
def setUp(self):
self.result_xml = self.getDataContents('diskspd_result.xml')
def testDiskSpdParsing(self):
single_sample = diskspd.ParseDiskSpdResults(self.result_xml, {},
'ReadSpeed')
expected_metadata = {'DisableAffinity': 'false',
'MaxFileSize': '0',
'BlockSize': '65536',
'IdlePercent': '96.80',
'CompletionRoutines': 'false',
'StrideSize': '65536',
'RandomAccess': 'false',
'Weight': '1',
'UserPercent': '0.25',
'WriteBytes': 0,
'Warmup': '5',
'Pattern': 'sequential',
'IOPriority': '3',
'ThreadsPerFile': '4',
'KernelPercent': '2.95',
'ReadIops': 3030,
'BytesCount': 5961809920,
'InterlockedSequential': 'false',
'MeasureLatency': 'false',
'WriteRatio': '0',
'FileSize': '838860800',
'BaseFileOffset': '0',
'Cooldown': '0',
'IOCount': 90970,
'UseLargePages': 'false',
'UsagePercent': '3.20',
'SequentialScan': 'true',
'TotalIops': 3030,
'WriteSpeed': 0,
'ProcCount': '4',
'WriteCount': 0,
'ReadSpeed': 189,
'TestTimeSeconds': '30.02',
'ThreadCount': '4',
'ReadBytes': 5961809920,
'TemporaryFile': 'false',
'ReadCount': 90970,
'Duration': '30',
'ThreadStride': '0',
'TotalSpeed': 189,
'RandSeed': '0',
'RequestCount': '0',
'Path': 'C:\\scratch\\testfile.dat',
'WriteThrough': 'true',
'CalculateIopsStdDev': 'false',
'IoBucketDuration': '1000',
'ParallelAsyncIO': 'false',
'Throughput': '0',
'DisableOSCache': 'true',
'WriteIops': 0}
sample_list = [
sample.Sample('ReadSpeed', 189, 'MB/s',
expected_metadata)
]
self.assertSampleListsEqualUpToTimestamp([single_sample], sample_list)
if __name__ == '__main__':
unittest.main()
|
import os
from babelfish import Language, language_converters
import pytest
from vcr import VCR
from subliminal.providers.shooter import ShooterSubtitle, ShooterProvider
vcr = VCR(path_transformer=lambda path: path + '.yaml',
record_mode=os.environ.get('VCR_RECORD_MODE', 'once'),
match_on=['method', 'scheme', 'host', 'port', 'path', 'body'],
cassette_library_dir=os.path.realpath(os.path.join('tests', 'cassettes', 'shooter')))
def test_get_matches_movie_hash(movies):
subtitle = ShooterSubtitle(Language('zho'), '314f454ab464775498ae6f1f5ad813a9;fdaa8b702d8936feba2122e93ba5c44f;'
'0a6935e3436aa7db5597ef67a2c494e3;4d269733f36ddd49f71e92732a462fe5', None)
matches = subtitle.get_matches(movies['man_of_steel'])
assert matches == {'hash'}
@pytest.mark.converter
def test_converter_convert_alpha3():
assert language_converters['shooter'].convert('zho') == 'chn'
@pytest.mark.converter
def test_converter_reverse():
assert language_converters['shooter'].reverse('chn') == ('zho',)
@pytest.mark.integration
@vcr.use_cassette
def test_query_movie(movies):
language = Language('zho')
video = movies['man_of_steel']
with ShooterProvider() as provider:
subtitles = provider.query(language, video.name, video.hashes['shooter'])
assert len(subtitles) == 3
@pytest.mark.integration
@vcr.use_cassette
def test_query_movie_no_hash(movies):
language = Language('zho')
video = movies['enders_game']
with ShooterProvider() as provider:
subtitles = provider.query(language, video.name)
assert len(subtitles) == 0
@pytest.mark.integration
@vcr.use_cassette
def test_query_episode(episodes):
video = episodes['bbt_s07e05']
language = Language('zho')
with ShooterProvider() as provider:
subtitles = provider.query(language, video.name, video.hashes['shooter'])
assert len(subtitles) == 3
@pytest.mark.integration
@vcr.use_cassette
def test_query_episode_no_hash(episodes):
video = episodes['dallas_2012_s01e03']
language = Language('zho')
with ShooterProvider() as provider:
subtitles = provider.query(language, video.name)
assert len(subtitles) == 1
@pytest.mark.integration
@vcr.use_cassette
def test_list_subtitles(movies):
video = movies['man_of_steel']
languages = {Language('eng'), Language('zho')}
with ShooterProvider() as provider:
subtitles = provider.list_subtitles(video, languages)
assert len(subtitles) == 6
assert {subtitle.language for subtitle in subtitles} == languages
@pytest.mark.integration
@vcr.use_cassette
def test_download_subtitle(movies):
video = movies['man_of_steel']
languages = {Language('eng'), Language('zho')}
with ShooterProvider() as provider:
subtitles = provider.list_subtitles(video, languages)
provider.download_subtitle(subtitles[0])
assert subtitles[0].content is not None
|
import gc
import sys
import time
import asyncio
from flexx.event import loop
from flexx import app
from flexx.event.both_tester import FakeStream, smart_compare
async def roundtrip(*sessions):
""" Coroutine to await a roundtrip to all given sessions.
"""
ok = []
def up():
ok.append(1)
for session in sessions:
session.call_after_roundtrip(up)
# timeout = time.time() + 5.0
while len(ok) < len(sessions):
await asyncio.sleep(0.02)
loop.iter()
def launch(cls, *args, **kwargs):
""" Shorthand for app.launch() that also returns session.
"""
# from flexx import app
c = app.App(cls, *args, **kwargs).launch('firefox-app')
return c, c.session
def filter_stdout(text):
py_lines = []
js_lines = []
for line in text.strip().splitlines():
if 'JS: ' in line:
js_lines.append(line.split('JS: ', 1)[1])
elif not line.startswith(('[I', '[D')):
py_lines.append(line)
return '\n'.join(py_lines), '\n'.join(js_lines)
def run_live(func):
""" Decorator to run a live test.
"""
def runner():
# Run with a fresh server and loop
loop.reset()
#asyncio_loop = asyncio.get_event_loop()
asyncio_loop = asyncio.new_event_loop()
app.create_server(port=0, loop=asyncio_loop)
print('running', func.__name__, '...', end='')
orig_stdout = sys.stdout
orig_stderr = sys.stderr
fake_stdout = FakeStream()
sys.stdout = sys.stderr = fake_stdout
t0 = time.time()
try:
# Call function - it could be a co-routine
cr = func()
if asyncio.iscoroutine(cr):
asyncio_loop.run_until_complete(cr)
gc.collect()
finally:
sys.stdout = orig_stdout
sys.stderr = orig_stderr
# Clean up / shut down
print('done in %f seconds' % (time.time()-t0))
for appname in app.manager.get_app_names():
if 'default' not in appname:
sessions = app.manager.get_connections(appname)
for session in sessions:
if session.app is not None:
session.app.dispose()
session.close()
loop.reset()
# Get reference text
pyresult, jsresult = filter_stdout(fake_stdout.getvalue())
reference = '\n'.join(line[4:] for line in func.__doc__.splitlines())
parts = reference.split('-'*10)
pyref = parts[0].strip(' \n')
jsref = parts[-1].strip(' \n-')
# Compare
smart_compare(func, ('Python', pyresult, pyref),
('JavaScript', jsresult, jsref))
return runner
|
import bz2
import hashlib
import logging
import struct
import sys
import zlib
from io import BytesIO
logger = logging.getLogger(__name__)
VERSION = 2, 2, 1
__version__ = '.'.join(str(i) for i in VERSION)
# %% The encoder and decoder implementation
# Shorthands
spack = struct.pack
strunpack = struct.unpack
def lencode(x):
""" Encode an unsigned integer into a variable sized blob of bytes.
"""
# We could support 16 bit and 32 bit as well, but the gain is low, since
# 9 bytes for collections with over 250 elements is marginal anyway.
if x <= 250:
return spack('<B', x)
else:
return spack('<BQ', 253, x)
# Include len decoder for completeness; we've inlined it for performance.
def lendecode(f):
""" Decode an unsigned integer from a file.
"""
n = strunpack('<B', f.read(1))[0]
if n == 253: n = strunpack('<Q', f.read(8))[0] # noqa
return n
def encode_type_id(b, ext_id):
""" Encode the type identifier, with or without extension id.
"""
if ext_id is not None:
bb = ext_id.encode('UTF-8')
return b.upper() + lencode(len(bb)) + bb # noqa
else:
return b # noqa
class BsdfLiteSerializer(object):
""" Instances of this class represent a BSDF encoder/decoder.
This is a lite variant of the Python BSDF serializer. It does not support
lazy loading or streaming, but is otherwise fully functional, including
support for custom extensions.
It acts as a placeholder for a set of extensions and encoding/decoding
options. Options for encoding:
* compression (int or str): ``0`` or "no" for no compression (default),
``1`` or "zlib" for Zlib compression (same as zip files and PNG), and
``2`` or "bz2" for Bz2 compression (more compact but slower writing).
Note that some BSDF implementations (e.g. JavaScript) may not support
compression.
* use_checksum (bool): whether to include a checksum with binary blobs.
* float64 (bool): Whether to write floats as 64 bit (default) or 32 bit.
"""
def __init__(self, extensions=None, **options):
self._extensions = {} # name -> extension
self._extensions_by_cls = {} # cls -> (name, extension.encode)
if extensions is None:
extensions = standard_extensions
for extension in extensions:
self.add_extension(extension)
self._parse_options(**options)
def _parse_options(self, compression=0, use_checksum=False, float64=True):
# Validate compression
if isinstance(compression, str):
m = {'no': 0, 'zlib': 1, 'bz2': 2}
compression = m.get(compression.lower(), compression)
if compression not in (0, 1, 2):
raise TypeError('Compression must be 0, 1, 2, '
'"no", "zlib", or "bz2"')
self._compression = compression
# Other encoding args
self._use_checksum = bool(use_checksum)
self._float64 = bool(float64)
def add_extension(self, extension_class):
""" Add an extension to this serializer instance, which must be
a subclass of Extension. Can be used as a decorator.
"""
# Check class
if not (isinstance(extension_class, type) and
issubclass(extension_class, Extension)):
raise TypeError('add_extension() expects a Extension class.')
extension = extension_class()
# Get name
name = extension.name
if not isinstance(name, str):
raise TypeError('Extension name must be str.')
if len(name) == 0 or len(name) > 250:
raise NameError('Extension names must be nonempty and shorter '
'than 251 chars.')
if name in self._extensions:
logger.warning('BSDF warning: overwriting extension "%s", '
'consider removing first' % name)
# Get classes
cls = extension.cls
if not cls:
clss = []
elif isinstance(cls, (tuple, list)):
clss = cls
else:
clss = [cls]
for cls in clss:
if not isinstance(cls, type):
raise TypeError('Extension classes must be types.')
# Store
for cls in clss:
self._extensions_by_cls[cls] = name, extension.encode
self._extensions[name] = extension
return extension_class
def remove_extension(self, name):
""" Remove a converted by its unique name.
"""
if not isinstance(name, str):
raise TypeError('Extension name must be str.')
if name in self._extensions:
self._extensions.pop(name)
for cls in list(self._extensions_by_cls.keys()):
if self._extensions_by_cls[cls][0] == name:
self._extensions_by_cls.pop(cls)
def _encode(self, f, value, ext_id):
""" Main encoder function.
"""
x = encode_type_id
if value is None:
f.write(x(b'v', ext_id)) # V for void
elif value is True:
f.write(x(b'y', ext_id)) # Y for yes
elif value is False:
f.write(x(b'n', ext_id)) # N for no
elif isinstance(value, int):
if -32768 <= value <= 32767:
f.write(x(b'h', ext_id) + spack('h', value)) # H for ...
else:
f.write(x(b'i', ext_id) + spack('<q', value)) # I for int
elif isinstance(value, float):
if self._float64:
f.write(x(b'd', ext_id) + spack('<d', value)) # D for double
else:
f.write(x(b'f', ext_id) + spack('<f', value)) # f for float
elif isinstance(value, str):
bb = value.encode('UTF-8')
f.write(x(b's', ext_id) + lencode(len(bb))) # S for str
f.write(bb)
elif isinstance(value, (list, tuple)):
f.write(x(b'l', ext_id) + lencode(len(value))) # L for list
for v in value:
self._encode(f, v, None)
elif isinstance(value, dict):
f.write(x(b'm', ext_id) + lencode(len(value))) # M for mapping
for key, v in value.items():
assert isinstance(key, str)
name_b = key.encode('UTF-8')
f.write(lencode(len(name_b)))
f.write(name_b)
self._encode(f, v, None)
elif isinstance(value, bytes):
f.write(x(b'b', ext_id)) # B for blob
# Compress
compression = self._compression
if compression == 0:
compressed = value
elif compression == 1:
compressed = zlib.compress(value, 9)
elif compression == 2:
compressed = bz2.compress(value, 9)
else:
assert False, 'Unknown compression identifier'
# Get sizes
data_size = len(value)
used_size = len(compressed)
extra_size = 0
allocated_size = used_size + extra_size
# Write sizes - write at least in a size that allows resizing
if allocated_size <= 250 and compression == 0:
f.write(spack('<B', allocated_size))
f.write(spack('<B', used_size))
f.write(lencode(data_size))
else:
f.write(spack('<BQ', 253, allocated_size))
f.write(spack('<BQ', 253, used_size))
f.write(spack('<BQ', 253, data_size))
# Compression and checksum
f.write(spack('B', compression))
if self._use_checksum:
f.write(b'\xff' + hashlib.md5(compressed).digest())
else:
f.write(b'\x00')
# Byte alignment (only necessary for uncompressed data)
if compression == 0:
alignment = 8 - (f.tell() + 1) % 8 # +1 for the byte to write
f.write(spack('<B', alignment)) # padding for byte alignment
f.write(b'\x00' * alignment)
else:
f.write(spack('<B', 0))
# The actual data and extra space
f.write(compressed)
f.write(b'\x00' * (allocated_size - used_size))
elif getattr(value, "shape", None) == () and str(
getattr(value, "dtype", "")
).startswith(("uint", "int", "float")):
# Implicit conversion of numpy scalars
if 'int' in str(value.dtype):
value = int(value)
if -32768 <= value <= 32767:
f.write(x(b'h', ext_id) + spack('h', value))
else:
f.write(x(b'i', ext_id) + spack('<q', value))
else:
value = float(value)
if self._float64:
f.write(x(b'd', ext_id) + spack('<d', value))
else:
f.write(x(b'f', ext_id) + spack('<f', value))
else:
if ext_id is not None:
raise ValueError(
'Extension %s wronfully encodes object to another '
'extension object (though it may encode to a list/dict '
'that contains other extension objects).' % ext_id)
# Try if the value is of a type we know
ex = self._extensions_by_cls.get(value.__class__, None)
# Maybe its a subclass of a type we know
if ex is None:
for name, c in self._extensions.items():
if c.match(self, value):
ex = name, c.encode
break
else:
ex = None
# Success or fail
if ex is not None:
ext_id2, extension_encode = ex
self._encode(f, extension_encode(self, value), ext_id2)
else:
t = ('Class %r is not a valid base BSDF type, nor is it '
'handled by an extension.')
raise TypeError(t % value.__class__.__name__)
def _decode(self, f):
""" Main decoder function.
"""
# Get value
char = f.read(1)
c = char.lower()
# Conversion (uppercase value identifiers signify converted values)
if not char:
raise EOFError()
elif char != c:
n = strunpack('<B', f.read(1))[0]
# if n == 253: n = strunpack('<Q', f.read(8))[0] # noqa - noneed
ext_id = f.read(n).decode('UTF-8')
else:
ext_id = None
if c == b'v':
value = None
elif c == b'y':
value = True
elif c == b'n':
value = False
elif c == b'h':
value = strunpack('<h', f.read(2))[0]
elif c == b'i':
value = strunpack('<q', f.read(8))[0]
elif c == b'f':
value = strunpack('<f', f.read(4))[0]
elif c == b'd':
value = strunpack('<d', f.read(8))[0]
elif c == b's':
n_s = strunpack('<B', f.read(1))[0]
if n_s == 253: n_s = strunpack('<Q', f.read(8))[0] # noqa
value = f.read(n_s).decode('UTF-8')
elif c == b'l':
n = strunpack('<B', f.read(1))[0]
if n >= 254:
# Streaming
closed = n == 254
n = strunpack('<Q', f.read(8))[0]
if closed:
value = [self._decode(f) for i in range(n)]
else:
value = []
try:
while True:
value.append(self._decode(f))
except EOFError:
pass
else:
# Normal
if n == 253: n = strunpack('<Q', f.read(8))[0] # noqa
value = [self._decode(f) for i in range(n)]
elif c == b'm':
value = dict()
n = strunpack('<B', f.read(1))[0]
if n == 253: n = strunpack('<Q', f.read(8))[0] # noqa
for i in range(n):
n_name = strunpack('<B', f.read(1))[0]
if n_name == 253: n_name = strunpack('<Q', f.read(8))[0] # noqa
assert n_name > 0
name = f.read(n_name).decode('UTF-8')
value[name] = self._decode(f)
elif c == b'b':
# Read blob header data (5 to 42 bytes)
# Size
allocated_size = strunpack('<B', f.read(1))[0]
if allocated_size == 253: allocated_size = strunpack('<Q', f.read(8))[0] # noqa
used_size = strunpack('<B', f.read(1))[0]
if used_size == 253: used_size = strunpack('<Q', f.read(8))[0] # noqa
data_size = strunpack('<B', f.read(1))[0]
if data_size == 253: data_size = strunpack('<Q', f.read(8))[0] # noqa
# Compression and checksum
compression = strunpack('<B', f.read(1))[0]
has_checksum = strunpack('<B', f.read(1))[0]
if has_checksum:
checksum = f.read(16) # noqa - not used yet
# Skip alignment
alignment = strunpack('<B', f.read(1))[0]
f.read(alignment)
# Get data
compressed = f.read(used_size)
# Skip remaining space
f.read(allocated_size - used_size)
# Decompress
if compression == 0:
value = compressed
elif compression == 1:
value = zlib.decompress(compressed)
elif compression == 2:
value = bz2.decompress(compressed)
else:
raise RuntimeError('Invalid compression %i' % compression)
else:
raise RuntimeError('Parse error %r' % char)
# Convert value if we have a nextension for it
if ext_id is not None:
extension = self._extensions.get(ext_id, None)
if extension is not None:
value = extension.decode(self, value)
else:
logger.warning('BSDF warning: no extension found for %r' % ext_id)
return value
def encode(self, ob):
""" Save the given object to bytes.
"""
f = BytesIO()
self.save(f, ob)
return f.getvalue()
def save(self, f, ob):
""" Write the given object to the given file object.
"""
f.write(b'BSDF')
f.write(struct.pack('<B', VERSION[0]))
f.write(struct.pack('<B', VERSION[1]))
self._encode(f, ob, None)
def decode(self, bb):
""" Load the data structure that is BSDF-encoded in the given bytes.
"""
f = BytesIO(bb)
return self.load(f)
def load(self, f):
""" Load a BSDF-encoded object from the given file object.
"""
# Check magic string
if f.read(4) != b'BSDF':
raise RuntimeError('This does not look a BSDF file.')
# Check version
major_version = strunpack('<B', f.read(1))[0]
minor_version = strunpack('<B', f.read(1))[0]
file_version = '%i.%i' % (major_version, minor_version)
if major_version != VERSION[0]: # major version should be 2
t = ('Reading file with different major version (%s) '
'from the implementation (%s).')
raise RuntimeError(t % (file_version, __version__))
if minor_version > VERSION[1]: # minor should be < ours
t = ('BSDF warning: reading file with higher minor version (%s) '
'than the implementation (%s).')
logger.warning(t % (file_version, __version__))
return self._decode(f)
# %% Standard extensions
# Defining extensions as a dict would be more compact and feel lighter, but
# that would only allow lambdas, which is too limiting, e.g. for ndarray
# extension.
class Extension(object):
""" Base class to implement BSDF extensions for special data types.
Extension classes are provided to the BSDF serializer, which
instantiates the class. That way, the extension can be somewhat dynamic:
e.g. the NDArrayExtension exposes the ndarray class only when numpy
is imported.
A extension instance must have two attributes. These can be attribiutes of
the class, or of the instance set in ``__init__()``:
* name (str): the name by which encoded values will be identified.
* cls (type): the type (or list of types) to match values with.
This is optional, but it makes the encoder select extensions faster.
Further, it needs 3 methods:
* `match(serializer, value) -> bool`: return whether the extension can
convert the given value. The default is ``isinstance(value, self.cls)``.
* `encode(serializer, value) -> encoded_value`: the function to encode a
value to more basic data types.
* `decode(serializer, encoded_value) -> value`: the function to decode an
encoded value back to its intended representation.
"""
name = ''
cls = ()
def __repr__(self):
return '<BSDF extension %r at 0x%s>' % (self.name, hex(id(self)))
def match(self, s, v):
return isinstance(v, self.cls)
def encode(self, s, v):
raise NotImplementedError()
def decode(self, s, v):
raise NotImplementedError()
class ComplexExtension(Extension):
name = 'c'
cls = complex
def encode(self, s, v):
return (v.real, v.imag)
def decode(self, s, v):
return complex(v[0], v[1])
class NDArrayExtension(Extension):
name = 'ndarray'
def __init__(self):
if 'numpy' in sys.modules:
import numpy as np
self.cls = np.ndarray
def match(self, s, v): # e.g. for nd arrays in JS
return (hasattr(v, 'shape') and
hasattr(v, 'dtype') and
hasattr(v, 'tobytes'))
def encode(self, s, v):
return dict(shape=v.shape,
dtype=str(v.dtype),
data=v.tobytes())
def decode(self, s, v):
try:
import numpy as np
except ImportError:
return v
a = np.frombuffer(v['data'], dtype=v['dtype'])
a.shape = v['shape']
return a
standard_extensions = [ComplexExtension, NDArrayExtension]
|
import re
try: # pragma: no cover
from collections import OrderedDict as _dict
except ImportError:
_dict = dict
def isidentifier(s):
# http://stackoverflow.com/questions/2544972/
if not isinstance(s, str):
return False
return re.match(r'^\w+$', s, re.UNICODE) and re.match(r'^[0-9]', s) is None
class Dict(_dict):
""" A dict in which the items can be get/set as attributes.
This provides a lean way to represent structured data, and works
well in combination with autocompletion. Keys can be anything that
are otherwise valid keys, but keys that are not valid identifiers
or that are methods of the dict class (e.g. 'items' or 'copy')
can only be get/set in the classic way.
Example:
.. code-block:: python
>> d = Dict(foo=3)
>> d.foo
3
>> d['foo'] = 4
>> d.foo
4
>> d.bar = 5
>> d.bar
5
"""
__reserved_names__ = dir(_dict()) # Also from OrderedDict
__pure_names__ = dir(dict())
__slots__ = []
def __repr__(self):
identifier_items = []
nonidentifier_items = []
for key, val in self.items():
if isidentifier(key):
identifier_items.append('%s=%r' % (key, val))
else:
nonidentifier_items.append('(%r, %r)' % (key, val))
if nonidentifier_items:
return 'Dict([%s], %s)' % (', '.join(nonidentifier_items),
', '.join(identifier_items))
else:
return 'Dict(%s)' % (', '.join(identifier_items))
def __getattribute__(self, key):
try:
return object.__getattribute__(self, key)
except AttributeError:
if key in self:
return self[key]
else:
raise
def __setattr__(self, key, val):
if key in Dict.__reserved_names__:
# Either let OrderedDict do its work, or disallow
if key not in Dict.__pure_names__: # pragma: no cover
return _dict.__setattr__(self, key, val)
else:
raise AttributeError('Reserved name, this key can only ' +
'be set via ``d[%r] = X``' % key)
else:
# if isinstance(val, dict): val = Dict(val) -> no, makes a copy!
self[key] = val
def __dir__(self):
names = [k for k in self.keys() if isidentifier(k)]
return Dict.__reserved_names__ + names
|
from datetime import datetime
import json
import os
import coverage
from tests.coveragetest import UsingModulesMixin, CoverageTest
class JsonReportTest(UsingModulesMixin, CoverageTest):
"""Tests of the JSON reports from coverage.py."""
def _assert_expected_json_report(self, cov, expected_result):
"""
Helper for tests that handles the common ceremony so the tests can be clearly show the
consequences of setting various arguments.
"""
self.make_file("a.py", """\
a = {'b': 1}
if a.get('a'):
b = 1
""")
a = self.start_import_stop(cov, "a")
output_path = os.path.join(self.temp_dir, "a.json")
cov.json_report(a, outfile=output_path)
with open(output_path) as result_file:
parsed_result = json.load(result_file)
self.assert_recent_datetime(
datetime.strptime(parsed_result['meta']['timestamp'], "%Y-%m-%dT%H:%M:%S.%f")
)
del (parsed_result['meta']['timestamp'])
assert parsed_result == expected_result
def test_branch_coverage(self):
cov = coverage.Coverage(branch=True)
expected_result = {
'meta': {
"version": coverage.__version__,
"branch_coverage": True,
"show_contexts": False,
},
'files': {
'a.py': {
'executed_lines': [1, 2],
'missing_lines': [3],
'excluded_lines': [],
'summary': {
'missing_lines': 1,
'covered_lines': 2,
'num_statements': 3,
'num_branches': 2,
'excluded_lines': 0,
'num_partial_branches': 1,
'covered_branches': 1,
'missing_branches': 1,
'percent_covered': 60.0,
}
}
},
'totals': {
'missing_lines': 1,
'covered_lines': 2,
'num_statements': 3,
'num_branches': 2,
'excluded_lines': 0,
'num_partial_branches': 1,
'percent_covered': 60.0,
'covered_branches': 1,
'missing_branches': 1,
}
}
self._assert_expected_json_report(cov, expected_result)
def test_simple_line_coverage(self):
cov = coverage.Coverage()
expected_result = {
'meta': {
"version": coverage.__version__,
"branch_coverage": False,
"show_contexts": False,
},
'files': {
'a.py': {
'executed_lines': [1, 2],
'missing_lines': [3],
'excluded_lines': [],
'summary': {
'excluded_lines': 0,
'missing_lines': 1,
'covered_lines': 2,
'num_statements': 3,
'percent_covered': 66.66666666666667
}
}
},
'totals': {
'excluded_lines': 0,
'missing_lines': 1,
'covered_lines': 2,
'num_statements': 3,
'percent_covered': 66.66666666666667
}
}
self._assert_expected_json_report(cov, expected_result)
def run_context_test(self, relative_files):
"""A helper for two tests below."""
self.make_file("config", """\
[run]
relative_files = {}
[json]
show_contexts = True
""".format(relative_files))
cov = coverage.Coverage(context="cool_test", config_file="config")
expected_result = {
'meta': {
"version": coverage.__version__,
"branch_coverage": False,
"show_contexts": True,
},
'files': {
'a.py': {
'executed_lines': [1, 2],
'missing_lines': [3],
'excluded_lines': [],
"contexts": {
"1": [
"cool_test"
],
"2": [
"cool_test"
]
},
'summary': {
'excluded_lines': 0,
'missing_lines': 1,
'covered_lines': 2,
'num_statements': 3,
'percent_covered': 66.66666666666667
}
}
},
'totals': {
'excluded_lines': 0,
'missing_lines': 1,
'covered_lines': 2,
'num_statements': 3,
'percent_covered': 66.66666666666667
}
}
self._assert_expected_json_report(cov, expected_result)
def test_context_non_relative(self):
self.run_context_test(relative_files=False)
def test_context_relative(self):
self.run_context_test(relative_files=True)
|
import asyncio
from datetime import timedelta
import logging
from pyControl4.error_handling import C4Exception
from pyControl4.light import C4Light
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_TRANSITION,
SUPPORT_BRIGHTNESS,
SUPPORT_TRANSITION,
LightEntity,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_SCAN_INTERVAL
from homeassistant.core import HomeAssistant
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed
from . import Control4Entity, get_items_of_category
from .const import CONF_DIRECTOR, CONTROL4_ENTITY_TYPE, DOMAIN
from .director_utils import director_update_data
_LOGGER = logging.getLogger(__name__)
CONTROL4_CATEGORY = "lights"
CONTROL4_NON_DIMMER_VAR = "LIGHT_STATE"
CONTROL4_DIMMER_VAR = "LIGHT_LEVEL"
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities
):
"""Set up Control4 lights from a config entry."""
entry_data = hass.data[DOMAIN][entry.entry_id]
scan_interval = entry_data[CONF_SCAN_INTERVAL]
_LOGGER.debug(
"Scan interval = %s",
scan_interval,
)
async def async_update_data_non_dimmer():
"""Fetch data from Control4 director for non-dimmer lights."""
try:
return await director_update_data(hass, entry, CONTROL4_NON_DIMMER_VAR)
except C4Exception as err:
raise UpdateFailed(f"Error communicating with API: {err}") from err
async def async_update_data_dimmer():
"""Fetch data from Control4 director for dimmer lights."""
try:
return await director_update_data(hass, entry, CONTROL4_DIMMER_VAR)
except C4Exception as err:
raise UpdateFailed(f"Error communicating with API: {err}") from err
non_dimmer_coordinator = DataUpdateCoordinator(
hass,
_LOGGER,
name="light",
update_method=async_update_data_non_dimmer,
update_interval=timedelta(seconds=scan_interval),
)
dimmer_coordinator = DataUpdateCoordinator(
hass,
_LOGGER,
name="light",
update_method=async_update_data_dimmer,
update_interval=timedelta(seconds=scan_interval),
)
# Fetch initial data so we have data when entities subscribe
await non_dimmer_coordinator.async_refresh()
await dimmer_coordinator.async_refresh()
items_of_category = await get_items_of_category(hass, entry, CONTROL4_CATEGORY)
entity_list = []
for item in items_of_category:
try:
if item["type"] == CONTROL4_ENTITY_TYPE:
item_name = item["name"]
item_id = item["id"]
item_parent_id = item["parentId"]
item_manufacturer = None
item_device_name = None
item_model = None
for parent_item in items_of_category:
if parent_item["id"] == item_parent_id:
item_manufacturer = parent_item["manufacturer"]
item_device_name = parent_item["name"]
item_model = parent_item["model"]
else:
continue
except KeyError:
_LOGGER.exception(
"Unknown device properties received from Control4: %s",
item,
)
continue
if item_id in dimmer_coordinator.data:
item_is_dimmer = True
item_coordinator = dimmer_coordinator
elif item_id in non_dimmer_coordinator.data:
item_is_dimmer = False
item_coordinator = non_dimmer_coordinator
else:
director = entry_data[CONF_DIRECTOR]
item_variables = await director.getItemVariables(item_id)
_LOGGER.warning(
"Couldn't get light state data for %s, skipping setup. Available variables from Control4: %s",
item_name,
item_variables,
)
continue
entity_list.append(
Control4Light(
entry_data,
entry,
item_coordinator,
item_name,
item_id,
item_device_name,
item_manufacturer,
item_model,
item_parent_id,
item_is_dimmer,
)
)
async_add_entities(entity_list, True)
class Control4Light(Control4Entity, LightEntity):
"""Control4 light entity."""
def __init__(
self,
entry_data: dict,
entry: ConfigEntry,
coordinator: DataUpdateCoordinator,
name: str,
idx: int,
device_name: str,
device_manufacturer: str,
device_model: str,
device_id: int,
is_dimmer: bool,
):
"""Initialize Control4 light entity."""
super().__init__(
entry_data,
entry,
coordinator,
name,
idx,
device_name,
device_manufacturer,
device_model,
device_id,
)
self._is_dimmer = is_dimmer
def create_api_object(self):
"""Create a pyControl4 device object.
This exists so the director token used is always the latest one, without needing to re-init the entire entity.
"""
return C4Light(self.entry_data[CONF_DIRECTOR], self._idx)
@property
def is_on(self):
"""Return whether this light is on or off."""
return self.coordinator.data[self._idx]["value"] > 0
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
if self._is_dimmer:
return round(self.coordinator.data[self._idx]["value"] * 2.55)
return None
@property
def supported_features(self) -> int:
"""Flag supported features."""
flags = 0
if self._is_dimmer:
flags |= SUPPORT_BRIGHTNESS | SUPPORT_TRANSITION
return flags
async def async_turn_on(self, **kwargs) -> None:
"""Turn the entity on."""
c4_light = self.create_api_object()
if self._is_dimmer:
if ATTR_TRANSITION in kwargs:
transition_length = kwargs[ATTR_TRANSITION] * 1000
else:
transition_length = 0
if ATTR_BRIGHTNESS in kwargs:
brightness = (kwargs[ATTR_BRIGHTNESS] / 255) * 100
else:
brightness = 100
await c4_light.rampToLevel(brightness, transition_length)
else:
transition_length = 0
await c4_light.setLevel(100)
if transition_length == 0:
transition_length = 1000
delay_time = (transition_length / 1000) + 0.7
_LOGGER.debug("Delaying light update by %s seconds", delay_time)
await asyncio.sleep(delay_time)
await self.coordinator.async_request_refresh()
async def async_turn_off(self, **kwargs) -> None:
"""Turn the entity off."""
c4_light = self.create_api_object()
if self._is_dimmer:
if ATTR_TRANSITION in kwargs:
transition_length = kwargs[ATTR_TRANSITION] * 1000
else:
transition_length = 0
await c4_light.rampToLevel(0, transition_length)
else:
transition_length = 0
await c4_light.setLevel(0)
if transition_length == 0:
transition_length = 1500
delay_time = (transition_length / 1000) + 0.7
_LOGGER.debug("Delaying light update by %s seconds", delay_time)
await asyncio.sleep(delay_time)
await self.coordinator.async_request_refresh()
|
import asyncio
from itertools import product
import logging
from homeassistant.const import ATTR_ENTITY_ID, __version__
from homeassistant.util.decorator import Registry
from .const import (
ERR_DEVICE_OFFLINE,
ERR_PROTOCOL_ERROR,
ERR_UNKNOWN_ERROR,
EVENT_COMMAND_RECEIVED,
EVENT_QUERY_RECEIVED,
EVENT_SYNC_RECEIVED,
)
from .error import SmartHomeError
from .helpers import GoogleEntity, RequestData, async_get_entities
HANDLERS = Registry()
_LOGGER = logging.getLogger(__name__)
async def async_handle_message(hass, config, user_id, message, source):
"""Handle incoming API messages."""
data = RequestData(
config, user_id, source, message["requestId"], message.get("devices")
)
response = await _process(hass, data, message)
if response and "errorCode" in response["payload"]:
_LOGGER.error("Error handling message %s: %s", message, response["payload"])
return response
async def _process(hass, data, message):
"""Process a message."""
inputs: list = message.get("inputs")
if len(inputs) != 1:
return {
"requestId": data.request_id,
"payload": {"errorCode": ERR_PROTOCOL_ERROR},
}
handler = HANDLERS.get(inputs[0].get("intent"))
if handler is None:
return {
"requestId": data.request_id,
"payload": {"errorCode": ERR_PROTOCOL_ERROR},
}
try:
result = await handler(hass, data, inputs[0].get("payload"))
except SmartHomeError as err:
return {"requestId": data.request_id, "payload": {"errorCode": err.code}}
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected error")
return {
"requestId": data.request_id,
"payload": {"errorCode": ERR_UNKNOWN_ERROR},
}
if result is None:
return None
return {"requestId": data.request_id, "payload": result}
@HANDLERS.register("action.devices.SYNC")
async def async_devices_sync(hass, data, payload):
"""Handle action.devices.SYNC request.
https://developers.google.com/assistant/smarthome/develop/process-intents#SYNC
"""
hass.bus.async_fire(
EVENT_SYNC_RECEIVED,
{"request_id": data.request_id, "source": data.source},
context=data.context,
)
agent_user_id = data.config.get_agent_user_id(data.context)
entities = async_get_entities(hass, data.config)
results = await asyncio.gather(
*(
entity.sync_serialize(agent_user_id)
for entity in entities
if entity.should_expose()
),
return_exceptions=True,
)
devices = []
for entity, result in zip(entities, results):
if isinstance(result, Exception):
_LOGGER.error("Error serializing %s", entity.entity_id, exc_info=result)
else:
devices.append(result)
response = {"agentUserId": agent_user_id, "devices": devices}
await data.config.async_connect_agent_user(agent_user_id)
_LOGGER.debug("Syncing entities response: %s", response)
return response
@HANDLERS.register("action.devices.QUERY")
async def async_devices_query(hass, data, payload):
"""Handle action.devices.QUERY request.
https://developers.google.com/assistant/smarthome/develop/process-intents#QUERY
"""
devices = {}
for device in payload.get("devices", []):
devid = device["id"]
state = hass.states.get(devid)
hass.bus.async_fire(
EVENT_QUERY_RECEIVED,
{
"request_id": data.request_id,
ATTR_ENTITY_ID: devid,
"source": data.source,
},
context=data.context,
)
if not state:
# If we can't find a state, the device is offline
devices[devid] = {"online": False}
continue
entity = GoogleEntity(hass, data.config, state)
try:
devices[devid] = entity.query_serialize()
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected error serializing query for %s", state)
devices[devid] = {"online": False}
return {"devices": devices}
async def _entity_execute(entity, data, executions):
"""Execute all commands for an entity.
Returns a dict if a special result needs to be set.
"""
for execution in executions:
try:
await entity.execute(data, execution)
except SmartHomeError as err:
return {
"ids": [entity.entity_id],
"status": "ERROR",
**err.to_response(),
}
return None
@HANDLERS.register("action.devices.EXECUTE")
async def handle_devices_execute(hass, data, payload):
"""Handle action.devices.EXECUTE request.
https://developers.google.com/assistant/smarthome/develop/process-intents#EXECUTE
"""
entities = {}
executions = {}
results = {}
for command in payload["commands"]:
for device, execution in product(command["devices"], command["execution"]):
entity_id = device["id"]
hass.bus.async_fire(
EVENT_COMMAND_RECEIVED,
{
"request_id": data.request_id,
ATTR_ENTITY_ID: entity_id,
"execution": execution,
"source": data.source,
},
context=data.context,
)
# Happens if error occurred. Skip entity for further processing
if entity_id in results:
continue
if entity_id in entities:
executions[entity_id].append(execution)
continue
state = hass.states.get(entity_id)
if state is None:
results[entity_id] = {
"ids": [entity_id],
"status": "ERROR",
"errorCode": ERR_DEVICE_OFFLINE,
}
continue
entities[entity_id] = GoogleEntity(hass, data.config, state)
executions[entity_id] = [execution]
execute_results = await asyncio.gather(
*[
_entity_execute(entities[entity_id], data, executions[entity_id])
for entity_id in executions
]
)
for entity_id, result in zip(executions, execute_results):
if result is not None:
results[entity_id] = result
final_results = list(results.values())
for entity in entities.values():
if entity.entity_id in results:
continue
entity.async_update()
final_results.append(
{
"ids": [entity.entity_id],
"status": "SUCCESS",
"states": entity.query_serialize(),
}
)
return {"commands": final_results}
@HANDLERS.register("action.devices.DISCONNECT")
async def async_devices_disconnect(hass, data: RequestData, payload):
"""Handle action.devices.DISCONNECT request.
https://developers.google.com/assistant/smarthome/develop/process-intents#DISCONNECT
"""
await data.config.async_disconnect_agent_user(data.context.user_id)
return None
@HANDLERS.register("action.devices.IDENTIFY")
async def async_devices_identify(hass, data: RequestData, payload):
"""Handle action.devices.IDENTIFY request.
https://developers.google.com/assistant/smarthome/develop/local#implement_the_identify_handler
"""
return {
"device": {
"id": data.config.get_agent_user_id(data.context),
"isLocalOnly": True,
"isProxy": True,
"deviceInfo": {
"hwVersion": "UNKNOWN_HW_VERSION",
"manufacturer": "Home Assistant",
"model": "Home Assistant",
"swVersion": __version__,
},
}
}
@HANDLERS.register("action.devices.REACHABLE_DEVICES")
async def async_devices_reachable(hass, data: RequestData, payload):
"""Handle action.devices.REACHABLE_DEVICES request.
https://developers.google.com/actions/smarthome/create#actiondevicesdisconnect
"""
google_ids = {dev["id"] for dev in (data.devices or [])}
return {
"devices": [
entity.reachable_device_serialize()
for entity in async_get_entities(hass, data.config)
if entity.entity_id in google_ids and entity.should_expose_local()
]
}
def turned_off_response(message):
"""Return a device turned off response."""
return {
"requestId": message.get("requestId"),
"payload": {"errorCode": "deviceTurnedOff"},
}
|
import json
import pytest
import homeassistant.components.sensor as sensor
from homeassistant.const import CONF_NAME
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from tests.async_mock import AsyncMock, patch
from tests.common import assert_setup_component, load_fixture
REPLY = json.loads(load_fixture("yandex_transport_reply.json"))
@pytest.fixture
def mock_requester():
"""Create a mock for YandexMapsRequester."""
with patch("aioymaps.YandexMapsRequester") as requester:
instance = requester.return_value
instance.get_stop_info = AsyncMock(return_value=REPLY)
yield instance
STOP_ID = "stop__9639579"
ROUTES = ["194", "т36", "т47", "м10"]
NAME = "test_name"
TEST_CONFIG = {
"sensor": {
"platform": "yandex_transport",
"stop_id": "stop__9639579",
"routes": ROUTES,
"name": NAME,
}
}
FILTERED_ATTRS = {
"т36": ["18:25", "18:42", "18:46"],
"т47": ["18:35", "18:37", "18:40", "18:42"],
"м10": ["18:20", "18:27", "18:29", "18:41", "18:43"],
"stop_name": "7-й автобусный парк",
"attribution": "Data provided by maps.yandex.ru",
}
RESULT_STATE = dt_util.utc_from_timestamp(1583421540).isoformat(timespec="seconds")
async def assert_setup_sensor(hass, config, count=1):
"""Set up the sensor and assert it's been created."""
with assert_setup_component(count):
assert await async_setup_component(hass, sensor.DOMAIN, config)
await hass.async_block_till_done()
async def test_setup_platform_valid_config(hass, mock_requester):
"""Test that sensor is set up properly with valid config."""
await assert_setup_sensor(hass, TEST_CONFIG)
async def test_setup_platform_invalid_config(hass, mock_requester):
"""Check an invalid configuration."""
await assert_setup_sensor(
hass, {"sensor": {"platform": "yandex_transport", "stopid": 1234}}, count=0
)
async def test_name(hass, mock_requester):
"""Return the name if set in the configuration."""
await assert_setup_sensor(hass, TEST_CONFIG)
state = hass.states.get("sensor.test_name")
assert state.name == TEST_CONFIG["sensor"][CONF_NAME]
async def test_state(hass, mock_requester):
"""Return the contents of _state."""
await assert_setup_sensor(hass, TEST_CONFIG)
state = hass.states.get("sensor.test_name")
assert state.state == RESULT_STATE
async def test_filtered_attributes(hass, mock_requester):
"""Return the contents of attributes."""
await assert_setup_sensor(hass, TEST_CONFIG)
state = hass.states.get("sensor.test_name")
state_attrs = {key: state.attributes[key] for key in FILTERED_ATTRS}
assert state_attrs == FILTERED_ATTRS
|
import pytest
import numpy as np
import math
import os
import shutil
from tensorflow.keras import backend as K
from tensorflow.keras.models import Sequential, load_model # type: ignore
import tensorflow as tf
from tensornetwork.tn_keras.layers import DenseDecomp
from tensornetwork.tn_keras.layers import DenseMPO
from tensornetwork.tn_keras.layers import DenseCondenser
from tensornetwork.tn_keras.layers import DenseExpander
from tensornetwork.tn_keras.layers import DenseEntangler
from tensorflow.keras.layers import Dense # type: ignore
@pytest.fixture(params=[512])
def dummy_data(request):
np.random.seed(42)
# Generate dummy data for use in tests
data = np.random.randint(10, size=(1000, request.param))
labels = np.concatenate((np.ones((500, 1)), np.zeros((500, 1))), axis=0)
return data, labels
@pytest.fixture(params=[
'DenseDecomp', 'DenseMPO', 'DenseCondenser', 'DenseExpander',
'DenseEntangler', 'DenseEntanglerAsymmetric'
])
def make_model(dummy_data, request):
# Disable the redefined-outer-name violation in this function
# pylint: disable=redefined-outer-name
data, _ = dummy_data
if request.param == 'DenseMPO':
model = Sequential()
model.add(
DenseMPO(data.shape[1],
num_nodes=int(math.log(int(data.shape[1]), 8)),
bond_dim=8,
use_bias=True,
activation='relu',
input_shape=(data.shape[1],)))
model.add(Dense(1, activation='sigmoid'))
elif request.param == 'DenseDecomp':
model = Sequential()
model.add(
DenseDecomp(512,
decomp_size=128,
use_bias=True,
activation='relu',
input_shape=(data.shape[1],)))
model.add(Dense(1, activation='sigmoid'))
elif request.param == 'DenseCondenser':
model = Sequential()
model.add(
DenseCondenser(exp_base=2,
num_nodes=3,
use_bias=True,
activation='relu',
input_shape=(data.shape[1],)))
model.add(Dense(1, activation='sigmoid'))
elif request.param == 'DenseExpander':
model = Sequential()
model.add(
DenseExpander(exp_base=2,
num_nodes=3,
use_bias=True,
activation='relu',
input_shape=(data.shape[-1],)))
model.add(Dense(1, activation='sigmoid'))
elif request.param == 'DenseEntangler':
num_legs = 3
leg_dim = round(data.shape[-1]**(1. / num_legs))
assert leg_dim**num_legs == data.shape[-1]
model = Sequential()
model.add(
DenseEntangler(leg_dim**num_legs,
num_legs=num_legs,
num_levels=3,
use_bias=True,
activation='relu',
input_shape=(data.shape[1],)))
model.add(Dense(1, activation='sigmoid'))
elif request.param == 'DenseEntanglerAsymmetric':
num_legs = 3
leg_dim = round(data.shape[-1]**(1. / num_legs))
assert leg_dim**num_legs == data.shape[-1]
model = Sequential()
model.add(
DenseEntangler((leg_dim * 2)**num_legs,
num_legs=num_legs,
num_levels=3,
use_bias=True,
activation='relu',
input_shape=(data.shape[1],)))
model.add(Dense(1, activation='sigmoid'))
return model
def test_train(dummy_data, make_model):
# Disable the redefined-outer-name violation in this function
# pylint: disable=redefined-outer-name
tf.random.set_seed(0)
data, labels = dummy_data
model = make_model
model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy'])
# Train the model for 10 epochs
history = model.fit(data, labels, epochs=10, batch_size=32)
# Check that loss decreases and accuracy increases
assert history.history['loss'][0] > history.history['loss'][-1]
assert history.history['accuracy'][0] < history.history['accuracy'][-1]
def test_weights_change(dummy_data, make_model):
# Disable the redefined-outer-name violation in this function
# pylint: disable=redefined-outer-name
tf.random.set_seed(0)
data, labels = dummy_data
model = make_model
model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy'])
before = model.get_weights()
model.fit(data, labels, epochs=5, batch_size=32)
after = model.get_weights()
# Make sure every layer's weights changed
for i, _ in enumerate(before):
assert (after[i] != before[i]).any()
def test_output_shape(dummy_data, make_model):
# Disable the redefined-outer-name violation in this function
# pylint: disable=redefined-outer-name
data, _ = dummy_data
data = K.constant(data)
input_shape = data.shape
model = make_model
actual_output_shape = model(data).shape
expected_output_shape = model.compute_output_shape(input_shape)
np.testing.assert_equal(expected_output_shape, actual_output_shape)
@pytest.fixture(params=[(100, 10, 10, 512), (100, 512), (20, 10, 512)])
def high_dim_data(request):
np.random.seed(42)
# Generate dummy data for use in tests
data = np.random.randint(10, size=request.param)
return data
@pytest.fixture(params=[
'DenseDecomp', 'DenseMPO', 'DenseCondenser', 'DenseExpander',
'DenseEntangler'
])
def make_high_dim_model(high_dim_data, request):
# Disable the redefined-outer-name violation in this function
# pylint: disable=redefined-outer-name
data = high_dim_data
if request.param == 'DenseMPO':
model = Sequential()
model.add(
DenseMPO(data.shape[-1],
num_nodes=int(math.log(int(data.shape[-1]), 8)),
bond_dim=8,
use_bias=True,
activation='relu',
input_shape=(data.shape[-1],)))
elif request.param == 'DenseDecomp':
model = Sequential()
model.add(
DenseDecomp(512,
decomp_size=128,
use_bias=True,
activation='relu',
input_shape=(data.shape[-1],)))
elif request.param == 'DenseCondenser':
model = Sequential()
model.add(
DenseCondenser(exp_base=2,
num_nodes=3,
use_bias=True,
activation='relu',
input_shape=(data.shape[-1],)))
elif request.param == 'DenseExpander':
model = Sequential()
model.add(
DenseExpander(exp_base=2,
num_nodes=3,
use_bias=True,
activation='relu',
input_shape=(data.shape[-1],)))
elif request.param == 'DenseEntangler':
num_legs = 3
leg_dim = round(data.shape[-1]**(1. / num_legs))
assert leg_dim**num_legs == data.shape[-1]
model = Sequential()
model.add(
DenseEntangler(leg_dim**num_legs,
num_legs=num_legs,
num_levels=3,
use_bias=True,
activation='relu',
input_shape=(data.shape[-1],)))
return data, model
def test_higher_dim_input_output_shape(make_high_dim_model):
# pylint: disable=redefined-outer-name
data, model = make_high_dim_model
actual_output_shape = model(data).shape
expected_output_shape = model.compute_output_shape(data.shape)
np.testing.assert_equal(expected_output_shape, actual_output_shape)
def test_decomp_num_parameters(dummy_data):
# Disable the redefined-outer-name violation in this function
# pylint: disable=redefined-outer-name
data, _ = dummy_data
output_dim = 256
decomp_size = 128
model = Sequential()
model.add(
DenseDecomp(output_dim,
decomp_size=decomp_size,
use_bias=True,
activation='relu',
input_shape=(data.shape[1],)))
# num_params = a_params + b_params + bias_params
expected_num_parameters = (data.shape[1] * decomp_size) + (
decomp_size * output_dim) + output_dim
np.testing.assert_equal(expected_num_parameters, model.count_params())
def test_mpo_num_parameters(dummy_data):
# Disable the redefined-outer-name violation in this function
# pylint: disable=redefined-outer-name
data, _ = dummy_data
output_dim = data.shape[1]
num_nodes = int(math.log(data.shape[1], 8))
bond_dim = 8
model = Sequential()
model.add(
DenseMPO(output_dim,
num_nodes=num_nodes,
bond_dim=bond_dim,
use_bias=True,
activation='relu',
input_shape=(data.shape[1],)))
in_leg_dim = math.ceil(data.shape[1]**(1. / num_nodes))
out_leg_dim = math.ceil(output_dim**(1. / num_nodes))
# num_params = num_edge_node_params + num_middle_node_params + bias_params
expected_num_parameters = (2 * in_leg_dim * bond_dim * out_leg_dim) + (
(num_nodes - 2) * in_leg_dim * bond_dim * bond_dim *
out_leg_dim) + output_dim
np.testing.assert_equal(expected_num_parameters, model.count_params())
def test_condenser_num_parameters(dummy_data):
# Disable the redefined-outer-name violation in this function
# pylint: disable=redefined-outer-name
data, _ = dummy_data
exp_base = 2
num_nodes = 3
model = Sequential()
model.add(
DenseCondenser(exp_base=exp_base,
num_nodes=num_nodes,
use_bias=True,
activation='relu',
input_shape=(data.shape[1],)))
output_dim = data.shape[-1] // (exp_base**num_nodes)
# num_params = (num_nodes * num_node_params) + num_bias_params
expected_num_parameters = (num_nodes * output_dim * output_dim *
exp_base) + output_dim
np.testing.assert_equal(expected_num_parameters, model.count_params())
def test_expander_num_parameters(dummy_data):
# Disable the redefined-outer-name violation in this function
# pylint: disable=redefined-outer-name
data, _ = dummy_data
exp_base = 2
num_nodes = 3
model = Sequential()
model.add(
DenseExpander(exp_base=exp_base,
num_nodes=num_nodes,
use_bias=True,
activation='relu',
input_shape=(data.shape[-1],)))
output_dim = data.shape[-1] * (exp_base**num_nodes)
# num_params = (num_nodes * num_node_params) + num_bias_params
expected_num_parameters = (num_nodes * data.shape[-1] * data.shape[-1] *
exp_base) + output_dim
np.testing.assert_equal(expected_num_parameters, model.count_params())
def test_entangler_num_parameters(dummy_data):
# Disable the redefined-outer-name violation in this function
# pylint: disable=redefined-outer-name
data, _ = dummy_data
num_legs = 3
num_levels = 3
leg_dim = round(data.shape[-1]**(1. / num_legs))
assert leg_dim**num_legs == data.shape[-1]
model = Sequential()
model.add(
DenseEntangler(leg_dim**num_legs,
num_legs=num_legs,
num_levels=num_levels,
use_bias=True,
activation='relu',
input_shape=(data.shape[1],)))
# num_params = entangler_node_params + bias_params
expected_num_parameters = num_levels * (num_legs - 1) * (leg_dim**4) + (
leg_dim**num_legs)
np.testing.assert_equal(expected_num_parameters, model.count_params())
@pytest.mark.parametrize('num_levels', list(range(1, 4)))
@pytest.mark.parametrize('num_legs', list(range(2, 6)))
@pytest.mark.parametrize('leg_dims', [(4, 8), (8, 4)])
def test_entangler_asymmetric_num_parameters_output_shape(num_legs,
num_levels,
leg_dims):
leg_dim, out_leg_dim = leg_dims
data_shape = (leg_dim ** num_legs,)
model = Sequential()
model.add(
DenseEntangler(out_leg_dim**num_legs,
num_legs=num_legs,
num_levels=num_levels,
use_bias=True,
activation='relu',
input_shape=data_shape))
primary = leg_dim
secondary = out_leg_dim
if leg_dim > out_leg_dim:
primary, secondary = secondary, primary
expected_num_parameters = (num_levels - 1) * (num_legs - 1) * (primary**4) + (
num_legs - 2) * primary**3 * secondary + primary**2 * secondary**2 + (
out_leg_dim**num_legs)
np.testing.assert_equal(expected_num_parameters, model.count_params())
data = np.random.randint(10, size=(10, data_shape[0]))
out = model(data)
np.testing.assert_equal(out.shape, (data.shape[0], out_leg_dim**num_legs))
def test_config(make_model):
# Disable the redefined-outer-name violation in this function
# pylint: disable=redefined-outer-name
model = make_model
expected_num_parameters = model.layers[0].count_params()
# Serialize model and use config to create new layer
model_config = model.get_config()
layer_config = model_config['layers'][1]['config']
if 'mpo' in model.layers[0].name:
new_model = DenseMPO.from_config(layer_config)
elif 'decomp' in model.layers[0].name:
new_model = DenseDecomp.from_config(layer_config)
elif 'condenser' in model.layers[0].name:
new_model = DenseCondenser.from_config(layer_config)
elif 'expander' in model.layers[0].name:
new_model = DenseExpander.from_config(layer_config)
elif 'entangler' in model.layers[0].name:
new_model = DenseEntangler.from_config(layer_config)
# Build the layer so we can count params below
new_model.build(layer_config['batch_input_shape'])
# Check that original layer had same num params as layer built from config
np.testing.assert_equal(expected_num_parameters, new_model.count_params())
def test_model_save(dummy_data, make_model, tmp_path):
# Disable the redefined-outer-name violation in this function
# pylint: disable=redefined-outer-name
data, labels = dummy_data
model = make_model
model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy'])
# Train the model for 5 epochs
model.fit(data, labels, epochs=5, batch_size=32)
for save_path in ['test_model', 'test_model.h5']:
# Save model to a SavedModel folder or h5 file, then load model
save_path = tmp_path / save_path
model.save(save_path)
loaded_model = load_model(save_path)
# Clean up SavedModel folder
if os.path.isdir(save_path):
shutil.rmtree(save_path)
# Clean up h5 file
if os.path.exists(save_path):
os.remove(save_path)
# Compare model predictions and loaded_model predictions
np.testing.assert_equal(model.predict(data), loaded_model.predict(data))
|
import unittest
from absl import flags
from absl.testing import flagsaver
import mock
from tests import pkb_common_test_case
from perfkitbenchmarker.traces import tcpdump
FLAGS = flags.FLAGS
_OUTPUT_FILE = '/tmp/x.pcap'
# all vm.RemoteCommands to launch tcpdump look like this
_CMD_FORMAT = ('sudo tcpdump -n -w {output_file} {{command}} '
'> /dev/null 2>&1 & echo $!').format(output_file=_OUTPUT_FILE)
class TcpdumpTestCase(pkb_common_test_case.PkbCommonTestCase):
def setUp(self):
super(TcpdumpTestCase, self).setUp()
self.enter_context(mock.patch('os.path.isdir', return_value=True))
def assertRunLine(self, command):
expected_command = _CMD_FORMAT.format(command=command)
collector = tcpdump._CreateCollector(FLAGS)
actual_command = collector._CollectorRunCommand(None, _OUTPUT_FILE)
self.assertEqual(expected_command, actual_command)
def testDefaults(self):
self.assertRunLine(r'-s 96 not port \(22\)')
@flagsaver.flagsaver(tcpdump_snaplen=10)
def testSnaplen(self):
self.assertRunLine(r'-s 10 not port \(22\)')
@flagsaver.flagsaver(tcpdump_snaplen=0)
def testNoSnaplen(self):
self.assertRunLine(r'not port \(22\)')
@flagsaver.flagsaver(tcpdump_packet_count=12)
def testCount(self):
self.assertRunLine(r'-s 96 -c 12 not port \(22\)')
@flagsaver.flagsaver(tcpdump_ignore_ports=[53, 80])
def testIgnorePorts(self):
self.assertRunLine(r'-s 96 not port \(53 or 80\)')
@flagsaver.flagsaver(tcpdump_include_ports=[22, 443])
def testIncludePorts(self):
self.assertRunLine(r'-s 96 port \(22 or 443\)')
@flagsaver.flagsaver(tcpdump_ignore_ports=[])
def testIncludeAll(self):
self.assertRunLine(r'-s 96')
def testKillCommand(self):
collector = tcpdump._CreateCollector(FLAGS)
vm = mock.Mock()
vm.RemoteCommand.return_value = ('pid1234', '')
collector._StartOnVm(vm)
vm.RemoteCommand.reset_mock()
collector._StopOnVm(vm, 'roleA')
vm.RemoteCommand.assert_called_with(
'sudo kill -s INT pid1234; sleep 3', ignore_failure=True)
if __name__ == '__main__':
unittest.main()
|
import os.path as op
import pytest
from mne.datasets.testing import data_path
from mne.io import read_raw_nirx
from mne.preprocessing.nirs import optical_density, beer_lambert_law,\
_fnirs_check_bads, _fnirs_spread_bads
from mne.datasets import testing
fname_nirx_15_0 = op.join(data_path(download=False),
'NIRx', 'nirscout', 'nirx_15_0_recording')
fname_nirx_15_2 = op.join(data_path(download=False),
'NIRx', 'nirscout', 'nirx_15_2_recording')
fname_nirx_15_2_short = op.join(data_path(download=False),
'NIRx', 'nirscout',
'nirx_15_2_recording_w_short')
@testing.requires_testing_data
@pytest.mark.parametrize('fname', ([fname_nirx_15_2_short, fname_nirx_15_2,
fname_nirx_15_0]))
def test_fnirs_check_bads(fname):
"""Test checking of bad markings."""
# No bad channels, so these should all pass
raw = read_raw_nirx(fname)
_fnirs_check_bads(raw)
raw = optical_density(raw)
_fnirs_check_bads(raw)
raw = beer_lambert_law(raw)
_fnirs_check_bads(raw)
# Mark pairs of bad channels, so these should all pass
raw = read_raw_nirx(fname)
raw.info['bads'] = raw.ch_names[0:2]
_fnirs_check_bads(raw)
raw = optical_density(raw)
_fnirs_check_bads(raw)
raw = beer_lambert_law(raw)
_fnirs_check_bads(raw)
# Mark single channel as bad, so these should all fail
raw = read_raw_nirx(fname)
raw.info['bads'] = raw.ch_names[0:1]
pytest.raises(RuntimeError, _fnirs_check_bads, raw)
raw = optical_density(raw)
pytest.raises(RuntimeError, _fnirs_check_bads, raw)
raw = beer_lambert_law(raw)
pytest.raises(RuntimeError, _fnirs_check_bads, raw)
@testing.requires_testing_data
@pytest.mark.parametrize('fname', ([fname_nirx_15_2_short, fname_nirx_15_2,
fname_nirx_15_0]))
def test_fnirs_spread_bads(fname):
"""Test checking of bad markings."""
# Test spreading upwards in frequency and on raw data
raw = read_raw_nirx(fname)
raw.info['bads'] = ['S1_D1 760']
raw = _fnirs_spread_bads(raw)
assert raw.info['bads'] == ['S1_D1 760', 'S1_D1 850']
# Test spreading downwards in frequency and on od data
raw = optical_density(raw)
raw.info['bads'] = raw.ch_names[5:6]
raw = _fnirs_spread_bads(raw)
assert raw.info['bads'] == raw.ch_names[4:6]
# Test spreading multiple bads and on chroma data
raw = beer_lambert_law(raw)
raw.info['bads'] = [raw.ch_names[x] for x in [1, 8]]
print(raw.info['bads'])
raw = _fnirs_spread_bads(raw)
print(raw.info['bads'])
assert raw.info['bads'] == [raw.ch_names[x] for x in [0, 1, 8, 9]]
|
import asyncio
from datetime import datetime, timedelta
import enum
from functools import wraps
import random
import re
import socket
import string
import threading
from types import MappingProxyType
from typing import (
Any,
Callable,
Coroutine,
Iterable,
KeysView,
Optional,
TypeVar,
Union,
)
import slugify as unicode_slug
from .dt import as_local, utcnow
T = TypeVar("T")
U = TypeVar("U") # pylint: disable=invalid-name
ENUM_T = TypeVar("ENUM_T", bound=enum.Enum) # pylint: disable=invalid-name
RE_SANITIZE_FILENAME = re.compile(r"(~|\.\.|/|\\)")
RE_SANITIZE_PATH = re.compile(r"(~|\.(\.)+)")
def sanitize_filename(filename: str) -> str:
r"""Sanitize a filename by removing .. / and \\."""
return RE_SANITIZE_FILENAME.sub("", filename)
def sanitize_path(path: str) -> str:
"""Sanitize a path by removing ~ and .."""
return RE_SANITIZE_PATH.sub("", path)
def slugify(text: str, *, separator: str = "_") -> str:
"""Slugify a given text."""
return unicode_slug.slugify(text, separator=separator)
def repr_helper(inp: Any) -> str:
"""Help creating a more readable string representation of objects."""
if isinstance(inp, (dict, MappingProxyType)):
return ", ".join(
f"{repr_helper(key)}={repr_helper(item)}" for key, item in inp.items()
)
if isinstance(inp, datetime):
return as_local(inp).isoformat()
return str(inp)
def convert(
value: Optional[T], to_type: Callable[[T], U], default: Optional[U] = None
) -> Optional[U]:
"""Convert value to to_type, returns default if fails."""
try:
return default if value is None else to_type(value)
except (ValueError, TypeError):
# If value could not be converted
return default
def ensure_unique_string(
preferred_string: str, current_strings: Union[Iterable[str], KeysView[str]]
) -> str:
"""Return a string that is not present in current_strings.
If preferred string exists will append _2, _3, ..
"""
test_string = preferred_string
current_strings_set = set(current_strings)
tries = 1
while test_string in current_strings_set:
tries += 1
test_string = f"{preferred_string}_{tries}"
return test_string
# Taken from: http://stackoverflow.com/a/11735897
def get_local_ip() -> str:
"""Try to determine the local IP address of the machine."""
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Use Google Public DNS server to determine own IP
sock.connect(("8.8.8.8", 80))
return sock.getsockname()[0] # type: ignore
except OSError:
try:
return socket.gethostbyname(socket.gethostname())
except socket.gaierror:
return "127.0.0.1"
finally:
sock.close()
# Taken from http://stackoverflow.com/a/23728630
def get_random_string(length: int = 10) -> str:
"""Return a random string with letters and digits."""
generator = random.SystemRandom()
source_chars = string.ascii_letters + string.digits
return "".join(generator.choice(source_chars) for _ in range(length))
class OrderedEnum(enum.Enum):
"""Taken from Python 3.4.0 docs."""
# https://github.com/PyCQA/pylint/issues/2306
# pylint: disable=comparison-with-callable
def __ge__(self, other: ENUM_T) -> bool:
"""Return the greater than element."""
if self.__class__ is other.__class__:
return bool(self.value >= other.value)
return NotImplemented
def __gt__(self, other: ENUM_T) -> bool:
"""Return the greater element."""
if self.__class__ is other.__class__:
return bool(self.value > other.value)
return NotImplemented
def __le__(self, other: ENUM_T) -> bool:
"""Return the lower than element."""
if self.__class__ is other.__class__:
return bool(self.value <= other.value)
return NotImplemented
def __lt__(self, other: ENUM_T) -> bool:
"""Return the lower element."""
if self.__class__ is other.__class__:
return bool(self.value < other.value)
return NotImplemented
class Throttle:
"""A class for throttling the execution of tasks.
This method decorator adds a cooldown to a method to prevent it from being
called more then 1 time within the timedelta interval `min_time` after it
returned its result.
Calling a method a second time during the interval will return None.
Pass keyword argument `no_throttle=True` to the wrapped method to make
the call not throttled.
Decorator takes in an optional second timedelta interval to throttle the
'no_throttle' calls.
Adds a datetime attribute `last_call` to the method.
"""
def __init__(
self, min_time: timedelta, limit_no_throttle: Optional[timedelta] = None
) -> None:
"""Initialize the throttle."""
self.min_time = min_time
self.limit_no_throttle = limit_no_throttle
def __call__(self, method: Callable) -> Callable:
"""Caller for the throttle."""
# Make sure we return a coroutine if the method is async.
if asyncio.iscoroutinefunction(method):
async def throttled_value() -> None:
"""Stand-in function for when real func is being throttled."""
return None
else:
def throttled_value() -> None: # type: ignore
"""Stand-in function for when real func is being throttled."""
return None
if self.limit_no_throttle is not None:
method = Throttle(self.limit_no_throttle)(method)
# Different methods that can be passed in:
# - a function
# - an unbound function on a class
# - a method (bound function on a class)
# We want to be able to differentiate between function and unbound
# methods (which are considered functions).
# All methods have the classname in their qualname separated by a '.'
# Functions have a '.' in their qualname if defined inline, but will
# be prefixed by '.<locals>.' so we strip that out.
is_func = (
not hasattr(method, "__self__")
and "." not in method.__qualname__.split(".<locals>.")[-1]
)
@wraps(method)
def wrapper(*args: Any, **kwargs: Any) -> Union[Callable, Coroutine]:
"""Wrap that allows wrapped to be called only once per min_time.
If we cannot acquire the lock, it is running so return None.
"""
if hasattr(method, "__self__"):
host = getattr(method, "__self__")
elif is_func:
host = wrapper
else:
host = args[0] if args else wrapper
# pylint: disable=protected-access # to _throttle
if not hasattr(host, "_throttle"):
host._throttle = {}
if id(self) not in host._throttle:
host._throttle[id(self)] = [threading.Lock(), None]
throttle = host._throttle[id(self)]
# pylint: enable=protected-access
if not throttle[0].acquire(False):
return throttled_value()
# Check if method is never called or no_throttle is given
force = kwargs.pop("no_throttle", False) or not throttle[1]
try:
if force or utcnow() - throttle[1] > self.min_time:
result = method(*args, **kwargs)
throttle[1] = utcnow()
return result # type: ignore
return throttled_value()
finally:
throttle[0].release()
return wrapper
|
import ipaddress
import logging
import re
from pdunehd import DuneHDPlayer
import voluptuous as vol
from homeassistant import config_entries, exceptions
from homeassistant.const import CONF_HOST
from .const import DOMAIN # pylint:disable=unused-import
_LOGGER = logging.getLogger(__name__)
def host_valid(host):
"""Return True if hostname or IP address is valid."""
try:
if ipaddress.ip_address(host).version == (4 or 6):
return True
except ValueError:
if len(host) > 253:
return False
allowed = re.compile(r"(?!-)[A-Z\d\-\_]{1,63}(?<!-)$", re.IGNORECASE)
return all(allowed.match(x) for x in host.split("."))
class DuneHDConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for Dune HD integration."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_POLL
def __init__(self):
"""Initialize."""
self.host = None
async def init_device(self, host):
"""Initialize Dune HD player."""
player = DuneHDPlayer(host)
state = await self.hass.async_add_executor_job(player.update_state)
if not state:
raise CannotConnect()
async def async_step_user(self, user_input=None):
"""Handle the initial step."""
errors = {}
if user_input is not None:
if host_valid(user_input[CONF_HOST]):
self.host = user_input[CONF_HOST]
try:
if self.host_already_configured(self.host):
raise AlreadyConfigured()
await self.init_device(self.host)
except CannotConnect:
errors[CONF_HOST] = "cannot_connect"
except AlreadyConfigured:
errors[CONF_HOST] = "already_configured"
else:
return self.async_create_entry(title=self.host, data=user_input)
else:
errors[CONF_HOST] = "invalid_host"
return self.async_show_form(
step_id="user",
data_schema=vol.Schema({vol.Required(CONF_HOST, default=""): str}),
errors=errors,
)
async def async_step_import(self, user_input=None):
"""Handle configuration by yaml file."""
self.host = user_input[CONF_HOST]
if self.host_already_configured(self.host):
return self.async_abort(reason="already_configured")
try:
await self.init_device(self.host)
except CannotConnect:
_LOGGER.error("Import aborted, cannot connect to %s", self.host)
return self.async_abort(reason="cannot_connect")
else:
return self.async_create_entry(title=self.host, data=user_input)
def host_already_configured(self, host):
"""See if we already have a dunehd entry matching user input configured."""
existing_hosts = {
entry.data[CONF_HOST] for entry in self._async_current_entries()
}
return host in existing_hosts
class CannotConnect(exceptions.HomeAssistantError):
"""Error to indicate we cannot connect."""
class AlreadyConfigured(exceptions.HomeAssistantError):
"""Error to indicate device is already configured."""
|
import socket
import time
import mock
from behave import given
from behave import then
from behave import when
from marathon import MarathonHttpError
from paasta_tools import bounce_lib
from paasta_tools import drain_lib
from paasta_tools import marathon_tools
from paasta_tools import mesos_maintenance
from paasta_tools import setup_marathon_job
from paasta_tools.bounce_lib import get_happy_tasks
def which_id(context, which):
if which == "new":
return context.new_id
elif which == "old":
return context.old_app_config["id"]
@given(
'a new {state} app to be deployed, with bounce strategy "{bounce_method}" and drain method "{drain_method}"'
)
def given_a_new_app_to_be_deployed(context, state, bounce_method, drain_method):
given_a_new_app_to_be_deployed_constraints(
context, state, bounce_method, drain_method, constraints=str([])
)
@given(
"a new {state} app to be deployed, "
+ 'with bounce strategy "{bounce_method}" '
+ 'and drain method "{drain_method}" '
+ "and host_port {host_port:d} "
+ "and {net} networking "
+ "and {instances:d} instances"
)
def given_a_new_app_to_be_deployed_host_port_net(
context, state, bounce_method, drain_method, host_port, net, instances
):
given_a_new_app_to_be_deployed_constraints(
context=context,
state=state,
bounce_method=bounce_method,
drain_method=drain_method,
constraints=str([]),
host_port=host_port,
net=net,
instances=instances,
)
@given(
"a new {state} app to be deployed, "
+ 'with bounce strategy "{bounce_method}" '
+ 'and drain method "{drain_method}" '
+ "and constraints {constraints}"
)
def given_a_new_app_to_be_deployed_constraints(
context,
state,
bounce_method,
drain_method,
constraints,
host_port=0,
net="bridge",
instances=2,
):
constraints = eval(constraints)
if state == "healthy":
cmd = "/bin/true"
elif state == "unhealthy":
cmd = "/bin/false"
else:
return ValueError("can't start test app with unknown state %s", state)
context.service = "bounce"
context.instance = "test1"
context.new_id = "bounce.test1.newapp.confighash"
context.new_marathon_service_config = marathon_tools.MarathonServiceConfig(
service=context.service,
cluster=context.cluster,
instance=context.instance,
config_dict={
"cmd": "/bin/sleep 300",
"instances": instances,
"healthcheck_mode": "cmd",
"healthcheck_cmd": cmd,
"bounce_method": str(bounce_method),
"drain_method": str(drain_method),
"cpus": 0.1,
"mem": 100,
"disk": 10,
"constraints": constraints,
"host_port": host_port,
"net": net,
},
branch_dict={
"docker_image": "busybox",
"desired_state": "start",
"force_bounce": None,
},
)
context.current_client = context.marathon_clients.get_current_client_for_service(
context.new_marathon_service_config
)
@given("an old app to be destroyed")
def given_an_old_app_to_be_destroyed(context):
given_an_old_app_to_be_destroyed_constraints(context, str([]))
@given("an old app to be destroyed with constraints {constraints}")
def given_an_old_app_to_be_destroyed_constraints(context, constraints):
constraints = eval(constraints)
old_app_name = "bounce.test1.oldapp.confighash"
context.old_ids = [old_app_name]
context.old_app_config = {
"id": old_app_name,
"cmd": "/bin/sleep 300",
"instances": 2,
"container": {
"type": "DOCKER",
"docker": {"network": "BRIDGE", "image": "busybox"},
},
"backoff_seconds": 1,
"backoff_factor": 1,
"constraints": constraints,
}
bounce_lib.create_marathon_app(
old_app_name, context.old_app_config, context.current_client
)
@when("there are exactly {num:d} {which} {state} tasks")
def when_there_are_exactly_num_which_tasks(context, num, which, state):
there_are_num_which_tasks(context, num, which, state, True)
@when("there are {num:d} {which} {state} tasks")
def when_there_are_num_which_tasks(context, num, which, state):
there_are_num_which_tasks(context, num, which, state, False)
def there_are_num_which_tasks(context, num, which, state, exact):
context.max_tasks = num
app_id = which_id(context, which)
# 180 * 0.5 = 90 seconds
for _ in range(180):
app = context.current_client.get_app(app_id, embed_tasks=True)
happy_tasks = get_happy_tasks(
app, context.service, "fake_nerve_ns", context.system_paasta_config
)
happy_count = len(happy_tasks)
if state == "healthy":
if exact:
if happy_count == context.max_tasks:
return
else:
if happy_count >= context.max_tasks:
return
elif state == "unhealthy":
if exact:
if len(app.tasks) - happy_count == context.max_tasks:
return
else:
if len(app.tasks) - happy_count >= context.max_tasks:
return
time.sleep(0.5)
raise Exception(
"timed out waiting for %d %s tasks on %s; there are %d"
% (context.max_tasks, state, app_id, len(app.tasks))
)
@when("setup_service is initiated")
def when_setup_service_initiated(context):
with mock.patch(
"paasta_tools.bounce_lib.get_happy_tasks",
autospec=True,
# Wrap function call so we can select a subset of tasks or test
# intermediate steps, like when an app is not completely up
side_effect=lambda app, _, __, ___, **kwargs: get_happy_tasks(
app, context.service, "fake_nerve_ns", context.system_paasta_config
)[: context.max_tasks],
), mock.patch(
"paasta_tools.bounce_lib.bounce_lock_zookeeper", autospec=True
), mock.patch(
"paasta_tools.bounce_lib.time.sleep", autospec=True
), mock.patch(
"paasta_tools.setup_marathon_job.load_system_paasta_config", autospec=True
) as mock_load_system_paasta_config, mock.patch(
"paasta_tools.setup_marathon_job._log", autospec=True
), mock.patch(
"paasta_tools.marathon_tools.get_config_hash",
autospec=True,
return_value="confighash",
), mock.patch(
"paasta_tools.marathon_tools.get_code_sha_from_dockerurl",
autospec=True,
return_value="newapp",
), mock.patch(
"paasta_tools.utils.InstanceConfig.get_docker_url",
autospec=True,
return_value="busybox",
), mock.patch(
"paasta_tools.mesos_maintenance.get_principal", autospec=True
) as mock_get_principal, mock.patch(
"paasta_tools.mesos_maintenance.get_secret", autospec=True
) as mock_get_secret, mock.patch(
"paasta_tools.mesos_maintenance.get_mesos_leader",
autospec=True,
return_value="mesosmaster",
):
credentials = mesos_maintenance.load_credentials(
mesos_secrets="/etc/mesos-slave-secret"
)
mock_get_principal.return_value = credentials.principal
mock_get_secret.return_value = credentials.secret
mock_load_system_paasta_config.return_value.get_cluster = mock.Mock(
return_value=context.cluster
)
# 120 * 0.5 = 60 seconds
for _ in range(120):
try:
marathon_apps_with_clients = marathon_tools.get_marathon_apps_with_clients(
clients=context.marathon_clients.get_all_clients(), embed_tasks=True
)
(code, message, bounce_again) = setup_marathon_job.setup_service(
service=context.service,
instance=context.instance,
clients=context.marathon_clients,
marathon_apps_with_clients=marathon_apps_with_clients,
job_config=context.new_marathon_service_config,
soa_dir="/nail/etc/services",
)
assert code == 0, message
return
except MarathonHttpError:
time.sleep(0.5)
raise Exception(
"Unable to acquire app lock for setup_marathon_job.setup_service"
)
@when("the {which} app is down to {num} instances")
def when_the_which_app_is_down_to_num_instances(context, which, num):
app_id = which_id(context, which)
while True:
tasks = context.current_client.list_tasks(app_id)
if len([t for t in tasks if t.started_at]) <= int(num):
return
time.sleep(0.5)
@then("the {which} app should be running")
def then_the_which_app_should_be_running(context, which):
assert (
marathon_tools.is_app_id_running(
which_id(context, which), context.current_client
)
is True
)
@then("the {which} app should be configured to have {num} instances")
def then_the_which_app_should_be_configured_to_have_num_instances(
context, which, num, retries=10
):
app_id = which_id(context, which)
for _ in range(retries):
app = context.current_client.get_app(app_id)
if app.instances == int(num):
return
time.sleep(0.5)
raise ValueError(
"Expected there to be %d instances, but there were %d"
% (int(num), app.instances)
)
@then("the {which} app should be gone")
def then_the_which_app_should_be_gone(context, which):
assert (
marathon_tools.is_app_id_running(
which_id(context, which), context.current_client
)
is False
)
@when("we wait a bit for the {which} app to disappear")
def and_we_wait_a_bit_for_the_app_to_disappear(context, which):
""" Marathon will not make the app disappear until after all the tasks have died
https://github.com/mesosphere/marathon/issues/1431 """
for _ in range(10):
if (
marathon_tools.is_app_id_running(
which_id(context, which), context.current_client
)
is True
):
time.sleep(0.5)
else:
return True
# It better not be running by now!
assert (
marathon_tools.is_app_id_running(
which_id(context, which), context.current_client
)
is False
)
@when("a task has drained")
def when_a_task_has_drained(context):
"""Tell the TestDrainMethod to mark a task as safe to kill.
Normal drain methods, like hacheck, require waiting for something to happen in the background. The bounce code can
cause a task to go from up -> draining, but the draining->drained transition normally happens outside of Paasta.
With TestDrainMethod, we can control the draining->drained transition to emulate that external code, and that's what
this step does.
"""
drain_lib.TestDrainMethod.mark_arbitrary_task_as_safe_to_kill()
@then("it should be discoverable on port {host_port:d}")
def should_be_discoverable_on_port(context, host_port):
all_discovered = {}
for slave_ip in socket.gethostbyname_ex("mesosslave")[2]:
with mock.patch(
"paasta_tools.mesos_tools.socket.getfqdn",
return_value=slave_ip,
autospec=True,
):
discovered = marathon_tools.marathon_services_running_here()
all_discovered[slave_ip] = discovered
if discovered == [("bounce", "test1", host_port)]:
return
raise Exception(
"Did not find bounce.test1 in marathon_services_running_here for any of our slaves: %r",
all_discovered,
)
@then("it should be discoverable on any port")
def should_be_discoverable_on_any_port(context):
return should_be_discoverable_on_port(context, mock.ANY)
|
from unittest.mock import patch
from homeassistant.components.elgato.light import ElgatoError
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_COLOR_TEMP,
DOMAIN as LIGHT_DOMAIN,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_ON,
STATE_UNAVAILABLE,
)
from homeassistant.core import HomeAssistant
from tests.common import mock_coro
from tests.components.elgato import init_integration
from tests.test_util.aiohttp import AiohttpClientMocker
async def test_light_state(
hass: HomeAssistant, aioclient_mock: AiohttpClientMocker
) -> None:
"""Test the creation and values of the Elgato Key Lights."""
await init_integration(hass, aioclient_mock)
entity_registry = await hass.helpers.entity_registry.async_get_registry()
# First segment of the strip
state = hass.states.get("light.frenck")
assert state
assert state.attributes.get(ATTR_BRIGHTNESS) == 54
assert state.attributes.get(ATTR_COLOR_TEMP) == 297
assert state.state == STATE_ON
entry = entity_registry.async_get("light.frenck")
assert entry
assert entry.unique_id == "CN11A1A00001"
async def test_light_change_state(
hass: HomeAssistant, aioclient_mock: AiohttpClientMocker
) -> None:
"""Test the change of state of a Elgato Key Light device."""
await init_integration(hass, aioclient_mock)
state = hass.states.get("light.frenck")
assert state.state == STATE_ON
with patch(
"homeassistant.components.elgato.light.Elgato.light",
return_value=mock_coro(),
) as mock_light:
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{
ATTR_ENTITY_ID: "light.frenck",
ATTR_BRIGHTNESS: 255,
ATTR_COLOR_TEMP: 100,
},
blocking=True,
)
await hass.async_block_till_done()
assert len(mock_light.mock_calls) == 1
mock_light.assert_called_with(on=True, brightness=100, temperature=100)
with patch(
"homeassistant.components.elgato.light.Elgato.light",
return_value=mock_coro(),
) as mock_light:
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: "light.frenck"},
blocking=True,
)
await hass.async_block_till_done()
assert len(mock_light.mock_calls) == 1
mock_light.assert_called_with(on=False)
async def test_light_unavailable(
hass: HomeAssistant, aioclient_mock: AiohttpClientMocker
) -> None:
"""Test error/unavailable handling of an Elgato Key Light."""
await init_integration(hass, aioclient_mock)
with patch(
"homeassistant.components.elgato.light.Elgato.light",
side_effect=ElgatoError,
):
with patch(
"homeassistant.components.elgato.light.Elgato.state",
side_effect=ElgatoError,
):
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: "light.frenck"},
blocking=True,
)
await hass.async_block_till_done()
state = hass.states.get("light.frenck")
assert state.state == STATE_UNAVAILABLE
|
from homeassistant.components.cover import (
DEVICE_CLASS_BLIND,
SUPPORT_CLOSE,
SUPPORT_OPEN,
SUPPORT_SET_POSITION,
CoverEntity,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.helpers.typing import HomeAssistantType
from .const import DOMAIN
from .devolo_multi_level_switch import DevoloMultiLevelSwitchDeviceEntity
async def async_setup_entry(
hass: HomeAssistantType, entry: ConfigEntry, async_add_entities
) -> None:
"""Get all cover devices and setup them via config entry."""
entities = []
for gateway in hass.data[DOMAIN][entry.entry_id]["gateways"]:
for device in gateway.multi_level_switch_devices:
for multi_level_switch in device.multi_level_switch_property:
if multi_level_switch.startswith("devolo.Blinds"):
entities.append(
DevoloCoverDeviceEntity(
homecontrol=gateway,
device_instance=device,
element_uid=multi_level_switch,
)
)
async_add_entities(entities, False)
class DevoloCoverDeviceEntity(DevoloMultiLevelSwitchDeviceEntity, CoverEntity):
"""Representation of a cover device within devolo Home Control."""
@property
def current_cover_position(self):
"""Return the current position. 0 is closed. 100 is open."""
return self._value
@property
def device_class(self):
"""Return the class of the device."""
return DEVICE_CLASS_BLIND
@property
def is_closed(self):
"""Return if the blind is closed or not."""
return not bool(self._value)
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_OPEN | SUPPORT_CLOSE | SUPPORT_SET_POSITION
def open_cover(self, **kwargs):
"""Open the blind."""
self._multi_level_switch_property.set(100)
def close_cover(self, **kwargs):
"""Close the blind."""
self._multi_level_switch_property.set(0)
def set_cover_position(self, **kwargs):
"""Set the blind to the given position."""
self._multi_level_switch_property.set(kwargs["position"])
|
import asyncio
from ipaddress import ip_address
from operator import itemgetter
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.config_entries import ConfigEntry
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import config_validation as cv, device_registry as dr
from homeassistant.helpers.typing import ConfigType, HomeAssistantType
from homeassistant.util import get_local_ip
from .const import (
CONF_LOCAL_IP,
CONFIG_ENTRY_ST,
CONFIG_ENTRY_UDN,
DISCOVERY_LOCATION,
DISCOVERY_ST,
DISCOVERY_UDN,
DISCOVERY_USN,
DOMAIN,
DOMAIN_CONFIG,
DOMAIN_COORDINATORS,
DOMAIN_DEVICES,
DOMAIN_LOCAL_IP,
LOGGER as _LOGGER,
)
from .device import Device
NOTIFICATION_ID = "upnp_notification"
NOTIFICATION_TITLE = "UPnP/IGD Setup"
CONFIG_SCHEMA = vol.Schema(
{DOMAIN: vol.Schema({vol.Optional(CONF_LOCAL_IP): vol.All(ip_address, cv.string)})},
extra=vol.ALLOW_EXTRA,
)
async def async_discover_and_construct(
hass: HomeAssistantType, udn: str = None, st: str = None
) -> Device:
"""Discovery devices and construct a Device for one."""
# pylint: disable=invalid-name
discovery_infos = await Device.async_discover(hass)
_LOGGER.debug("Discovered devices: %s", discovery_infos)
if not discovery_infos:
_LOGGER.info("No UPnP/IGD devices discovered")
return None
if udn:
# Get the discovery info with specified UDN/ST.
filtered = [di for di in discovery_infos if di[DISCOVERY_UDN] == udn]
if st:
filtered = [di for di in discovery_infos if di[DISCOVERY_ST] == st]
if not filtered:
_LOGGER.warning(
'Wanted UPnP/IGD device with UDN/ST "%s"/"%s" not found, aborting',
udn,
st,
)
return None
# Ensure we're always taking the latest, if we filtered only on UDN.
filtered = sorted(filtered, key=itemgetter(DISCOVERY_ST), reverse=True)
discovery_info = filtered[0]
else:
# Get the first/any.
discovery_info = discovery_infos[0]
if len(discovery_infos) > 1:
device_name = discovery_info.get(
DISCOVERY_USN, discovery_info.get(DISCOVERY_LOCATION, "")
)
_LOGGER.info("Detected multiple UPnP/IGD devices, using: %s", device_name)
location = discovery_info[DISCOVERY_LOCATION]
return await Device.async_create_device(hass, location)
async def async_setup(hass: HomeAssistantType, config: ConfigType):
"""Set up UPnP component."""
_LOGGER.debug("async_setup, config: %s", config)
conf_default = CONFIG_SCHEMA({DOMAIN: {}})[DOMAIN]
conf = config.get(DOMAIN, conf_default)
local_ip = await hass.async_add_executor_job(get_local_ip)
hass.data[DOMAIN] = {
DOMAIN_CONFIG: conf,
DOMAIN_COORDINATORS: {},
DOMAIN_DEVICES: {},
DOMAIN_LOCAL_IP: conf.get(CONF_LOCAL_IP, local_ip),
}
# Only start if set up via configuration.yaml.
if DOMAIN in config:
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_IMPORT}
)
)
return True
async def async_setup_entry(hass: HomeAssistantType, config_entry: ConfigEntry) -> bool:
"""Set up UPnP/IGD device from a config entry."""
_LOGGER.debug("async_setup_entry, config_entry: %s", config_entry.data)
# Discover and construct.
udn = config_entry.data.get(CONFIG_ENTRY_UDN)
st = config_entry.data.get(CONFIG_ENTRY_ST) # pylint: disable=invalid-name
try:
device = await async_discover_and_construct(hass, udn, st)
except asyncio.TimeoutError as err:
raise ConfigEntryNotReady from err
if not device:
_LOGGER.info("Unable to create UPnP/IGD, aborting")
raise ConfigEntryNotReady
# Save device.
hass.data[DOMAIN][DOMAIN_DEVICES][device.udn] = device
# Ensure entry has a unique_id.
if not config_entry.unique_id:
hass.config_entries.async_update_entry(
entry=config_entry,
unique_id=device.unique_id,
)
# Create device registry entry.
device_registry = await dr.async_get_registry(hass)
device_registry.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(dr.CONNECTION_UPNP, device.udn)},
identifiers={(DOMAIN, device.udn)},
name=device.name,
manufacturer=device.manufacturer,
model=device.model_name,
)
# Create sensors.
_LOGGER.debug("Enabling sensors")
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(config_entry, "sensor")
)
return True
async def async_unload_entry(
hass: HomeAssistantType, config_entry: ConfigEntry
) -> bool:
"""Unload a UPnP/IGD device from a config entry."""
udn = config_entry.data.get(CONFIG_ENTRY_UDN)
if udn in hass.data[DOMAIN][DOMAIN_DEVICES]:
del hass.data[DOMAIN][DOMAIN_DEVICES][udn]
if udn in hass.data[DOMAIN][DOMAIN_COORDINATORS]:
del hass.data[DOMAIN][DOMAIN_COORDINATORS][udn]
_LOGGER.debug("Deleting sensors")
return await hass.config_entries.async_forward_entry_unload(config_entry, "sensor")
|
import logging
from gsp import GstreamerPlayer
import voluptuous as vol
from homeassistant.components.media_player import PLATFORM_SCHEMA, MediaPlayerEntity
from homeassistant.components.media_player.const import (
MEDIA_TYPE_MUSIC,
SUPPORT_NEXT_TRACK,
SUPPORT_PAUSE,
SUPPORT_PLAY,
SUPPORT_PLAY_MEDIA,
SUPPORT_VOLUME_SET,
)
from homeassistant.const import CONF_NAME, EVENT_HOMEASSISTANT_STOP, STATE_IDLE
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
CONF_PIPELINE = "pipeline"
DOMAIN = "gstreamer"
SUPPORT_GSTREAMER = (
SUPPORT_VOLUME_SET
| SUPPORT_PLAY
| SUPPORT_PAUSE
| SUPPORT_PLAY_MEDIA
| SUPPORT_NEXT_TRACK
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Optional(CONF_NAME): cv.string, vol.Optional(CONF_PIPELINE): cv.string}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Gstreamer platform."""
name = config.get(CONF_NAME)
pipeline = config.get(CONF_PIPELINE)
player = GstreamerPlayer(pipeline)
def _shutdown(call):
"""Quit the player on shutdown."""
player.quit()
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, _shutdown)
add_entities([GstreamerDevice(player, name)])
class GstreamerDevice(MediaPlayerEntity):
"""Representation of a Gstreamer device."""
def __init__(self, player, name):
"""Initialize the Gstreamer device."""
self._player = player
self._name = name or DOMAIN
self._state = STATE_IDLE
self._volume = None
self._duration = None
self._uri = None
self._title = None
self._artist = None
self._album = None
def update(self):
"""Update properties."""
self._state = self._player.state
self._volume = self._player.volume
self._duration = self._player.duration
self._uri = self._player.uri
self._title = self._player.title
self._album = self._player.album
self._artist = self._player.artist
def set_volume_level(self, volume):
"""Set the volume level."""
self._player.volume = volume
def play_media(self, media_type, media_id, **kwargs):
"""Play media."""
if media_type != MEDIA_TYPE_MUSIC:
_LOGGER.error("invalid media type")
return
self._player.queue(media_id)
def media_play(self):
"""Play."""
self._player.play()
def media_pause(self):
"""Pause."""
self._player.pause()
def media_next_track(self):
"""Next track."""
self._player.next()
@property
def media_content_id(self):
"""Content ID of currently playing media."""
return self._uri
@property
def content_type(self):
"""Content type of currently playing media."""
return MEDIA_TYPE_MUSIC
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def volume_level(self):
"""Return the volume level."""
return self._volume
@property
def supported_features(self):
"""Flag media player features that are supported."""
return SUPPORT_GSTREAMER
@property
def state(self):
"""Return the state of the player."""
return self._state
@property
def media_duration(self):
"""Duration of current playing media in seconds."""
return self._duration
@property
def media_title(self):
"""Media title."""
return self._title
@property
def media_artist(self):
"""Media artist."""
return self._artist
@property
def media_album_name(self):
"""Media album."""
return self._album
|
import json
import logging
import re
import requests
import voluptuous as vol
from homeassistant.components.device_tracker import (
DOMAIN,
PLATFORM_SCHEMA,
DeviceScanner,
)
from homeassistant.const import (
CONF_HOST,
CONF_PASSWORD,
CONF_PORT,
CONF_SSL,
CONF_USERNAME,
CONF_VERIFY_SSL,
HTTP_OK,
HTTP_UNAUTHORIZED,
)
import homeassistant.helpers.config_validation as cv
CONF_HTTP_ID = "http_id"
_LOGGER = logging.getLogger(__name__)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_PORT): cv.port,
vol.Optional(CONF_SSL, default=False): cv.boolean,
vol.Optional(CONF_VERIFY_SSL, default=True): vol.Any(cv.boolean, cv.isfile),
vol.Required(CONF_PASSWORD): cv.string,
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_HTTP_ID): cv.string,
}
)
def get_scanner(hass, config):
"""Validate the configuration and returns a Tomato scanner."""
return TomatoDeviceScanner(config[DOMAIN])
class TomatoDeviceScanner(DeviceScanner):
"""This class queries a wireless router running Tomato firmware."""
def __init__(self, config):
"""Initialize the scanner."""
host, http_id = config[CONF_HOST], config[CONF_HTTP_ID]
port = config.get(CONF_PORT)
username, password = config[CONF_USERNAME], config[CONF_PASSWORD]
self.ssl, self.verify_ssl = config[CONF_SSL], config[CONF_VERIFY_SSL]
if port is None:
port = 443 if self.ssl else 80
self.req = requests.Request(
"POST",
"http{}://{}:{}/update.cgi".format("s" if self.ssl else "", host, port),
data={"_http_id": http_id, "exec": "devlist"},
auth=requests.auth.HTTPBasicAuth(username, password),
).prepare()
self.parse_api_pattern = re.compile(r"(?P<param>\w*) = (?P<value>.*);")
self.last_results = {"wldev": [], "dhcpd_lease": []}
self.success_init = self._update_tomato_info()
def scan_devices(self):
"""Scan for new devices and return a list with found device IDs."""
self._update_tomato_info()
return [item[1] for item in self.last_results["wldev"]]
def get_device_name(self, device):
"""Return the name of the given device or None if we don't know."""
filter_named = [
item[0] for item in self.last_results["dhcpd_lease"] if item[2] == device
]
if not filter_named or not filter_named[0]:
return None
return filter_named[0]
def _update_tomato_info(self):
"""Ensure the information from the Tomato router is up to date.
Return boolean if scanning successful.
"""
_LOGGER.info("Scanning")
try:
if self.ssl:
response = requests.Session().send(
self.req, timeout=3, verify=self.verify_ssl
)
else:
response = requests.Session().send(self.req, timeout=3)
# Calling and parsing the Tomato api here. We only need the
# wldev and dhcpd_lease values.
if response.status_code == HTTP_OK:
for param, value in self.parse_api_pattern.findall(response.text):
if param in ("wldev", "dhcpd_lease"):
self.last_results[param] = json.loads(value.replace("'", '"'))
return True
if response.status_code == HTTP_UNAUTHORIZED:
# Authentication error
_LOGGER.exception(
"Failed to authenticate, please check your username and password"
)
return False
except requests.exceptions.ConnectionError:
# We get this if we could not connect to the router or
# an invalid http_id was supplied.
_LOGGER.exception(
"Failed to connect to the router or invalid http_id supplied"
)
return False
except requests.exceptions.Timeout:
# We get this if we could not connect to the router or
# an invalid http_id was supplied.
_LOGGER.exception("Connection to the router timed out")
return False
except ValueError:
# If JSON decoder could not parse the response.
_LOGGER.exception("Failed to parse response from router")
return False
|
import logging
from typing import Dict, Union
import voluptuous as vol
from withings_api.common import AuthScope
from homeassistant import config_entries
from homeassistant.components.withings import const
from homeassistant.helpers import config_entry_oauth2_flow
from homeassistant.util import slugify
class WithingsFlowHandler(
config_entry_oauth2_flow.AbstractOAuth2FlowHandler, domain=const.DOMAIN
):
"""Handle a config flow."""
DOMAIN = const.DOMAIN
CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_POLL
# Temporarily holds authorization data during the profile step.
_current_data: Dict[str, Union[None, str, int]] = {}
@property
def logger(self) -> logging.Logger:
"""Return logger."""
return logging.getLogger(__name__)
@property
def extra_authorize_data(self) -> dict:
"""Extra data that needs to be appended to the authorize url."""
return {
"scope": ",".join(
[
AuthScope.USER_INFO.value,
AuthScope.USER_METRICS.value,
AuthScope.USER_ACTIVITY.value,
AuthScope.USER_SLEEP_EVENTS.value,
]
)
}
async def async_oauth_create_entry(self, data: dict) -> dict:
"""Override the create entry so user can select a profile."""
self._current_data = data
return await self.async_step_profile(data)
async def async_step_profile(self, data: dict) -> dict:
"""Prompt the user to select a user profile."""
errors = {}
# pylint: disable=no-member # https://github.com/PyCQA/pylint/issues/3167
reauth_profile = (
self.context.get(const.PROFILE)
if self.context.get("source") == "reauth"
else None
)
profile = data.get(const.PROFILE) or reauth_profile
if profile:
existing_entries = [
config_entry
for config_entry in self.hass.config_entries.async_entries(const.DOMAIN)
if slugify(config_entry.data.get(const.PROFILE)) == slugify(profile)
]
if reauth_profile or not existing_entries:
new_data = {**self._current_data, **data, const.PROFILE: profile}
self._current_data = {}
return await self.async_step_finish(new_data)
errors["base"] = "already_configured"
return self.async_show_form(
step_id="profile",
data_schema=vol.Schema({vol.Required(const.PROFILE): str}),
errors=errors,
)
async def async_step_reauth(self, data: dict = None) -> dict:
"""Prompt user to re-authenticate."""
if data is not None:
return await self.async_step_user()
# pylint: disable=no-member # https://github.com/PyCQA/pylint/issues/3167
placeholders = {const.PROFILE: self.context["profile"]}
self.context.update({"title_placeholders": placeholders})
return self.async_show_form(
step_id="reauth",
# pylint: disable=no-member # https://github.com/PyCQA/pylint/issues/3167
description_placeholders=placeholders,
)
async def async_step_finish(self, data: dict) -> dict:
"""Finish the flow."""
self._current_data = {}
await self.async_set_unique_id(
str(data["token"]["userid"]), raise_on_progress=False
)
self._abort_if_unique_id_configured(data)
return self.async_create_entry(title=data[const.PROFILE], data=data)
|
import pytest
from homeassistant.components.NEW_DOMAIN import DOMAIN
import homeassistant.components.automation as automation
from homeassistant.const import STATE_OFF, STATE_ON
from homeassistant.helpers import device_registry
from homeassistant.setup import async_setup_component
from tests.common import (
MockConfigEntry,
assert_lists_same,
async_get_device_automations,
async_mock_service,
mock_device_registry,
mock_registry,
)
@pytest.fixture
def device_reg(hass):
"""Return an empty, loaded, registry."""
return mock_device_registry(hass)
@pytest.fixture
def entity_reg(hass):
"""Return an empty, loaded, registry."""
return mock_registry(hass)
@pytest.fixture
def calls(hass):
"""Track calls to a mock service."""
return async_mock_service(hass, "test", "automation")
async def test_get_triggers(hass, device_reg, entity_reg):
"""Test we get the expected triggers from a NEW_DOMAIN."""
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create(DOMAIN, "test", "5678", device_id=device_entry.id)
expected_triggers = [
{
"platform": "device",
"domain": DOMAIN,
"type": "turned_off",
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_5678",
},
{
"platform": "device",
"domain": DOMAIN,
"type": "turned_on",
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_5678",
},
]
triggers = await async_get_device_automations(hass, "trigger", device_entry.id)
assert_lists_same(triggers, expected_triggers)
async def test_if_fires_on_state_change(hass, calls):
"""Test for turn_on and turn_off triggers firing."""
hass.states.async_set("NEW_DOMAIN.entity", STATE_OFF)
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": "NEW_DOMAIN.entity",
"type": "turned_on",
},
"action": {
"service": "test.automation",
"data_template": {
"some": (
"turn_on - {{ trigger.platform}} - "
"{{ trigger.entity_id}} - {{ trigger.from_state.state}} - "
"{{ trigger.to_state.state}} - {{ trigger.for }}"
)
},
},
},
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": "NEW_DOMAIN.entity",
"type": "turned_off",
},
"action": {
"service": "test.automation",
"data_template": {
"some": (
"turn_off - {{ trigger.platform}} - "
"{{ trigger.entity_id}} - {{ trigger.from_state.state}} - "
"{{ trigger.to_state.state}} - {{ trigger.for }}"
)
},
},
},
]
},
)
# Fake that the entity is turning on.
hass.states.async_set("NEW_DOMAIN.entity", STATE_ON)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data["some"] == "turn_on - device - {} - off - on - None".format(
"NEW_DOMAIN.entity"
)
# Fake that the entity is turning off.
hass.states.async_set("NEW_DOMAIN.entity", STATE_OFF)
await hass.async_block_till_done()
assert len(calls) == 2
assert calls[1].data["some"] == "turn_off - device - {} - on - off - None".format(
"NEW_DOMAIN.entity"
)
|
from __future__ import print_function
import collections
import json
import re
import sys
def main():
if len(sys.argv) != 4:
print('usage: %s samples_file.json metric_name data_label' % sys.argv[0])
sys.exit(1)
latency_histogram_by_label = collections.defaultdict(
lambda: collections.defaultdict(int))
total_samples = collections.defaultdict(int)
with open(sys.argv[1]) as samples_file:
for line in samples_file:
sample = json.loads(line)
if sample['metric'] == sys.argv[2]:
labels = sample['labels']
regex = r'\|%s:(.*?)\|' % sys.argv[3]
label = re.search(regex, labels).group(1)
histogram = json.loads(
re.search(r'\|histogram:(.*?)\|', labels).group(1))
for bucket, count in histogram.iteritems():
latency_histogram_by_label[label][float(bucket)] += int(count)
total_samples[label] += int(count)
for label, histogram in latency_histogram_by_label.iteritems():
running_count = 0
for bucket in sorted(histogram):
running_count += histogram[bucket]
percentile = 100.0 * running_count / total_samples[label]
print(','.join((label, str(bucket), str(percentile))))
if __name__ == '__main__':
main()
|
from __future__ import unicode_literals
import hashlib
def md5_encode(item):
"""md5 message digest algorithm output 32 char"""
try:
return (hashlib.md5(item.encode("utf-8"))).hexdigest()
except:
return ''
|
from qstrader.broker.fee_model.zero_fee_model import ZeroFeeModel
class AssetMock(object):
def __init__(self):
pass
class BrokerMock(object):
def __init__(self):
pass
def test_commission_is_zero_uniformly():
"""
Tests that each method returns zero commission,
irrespective of asset, consideration or broker.
"""
zbc = ZeroFeeModel()
asset = AssetMock()
quantity = 100
consideration = 1000.0
broker = BrokerMock()
assert zbc._calc_commission(asset, quantity, consideration, broker=broker) == 0.0
assert zbc._calc_tax(asset, quantity, consideration, broker=broker) == 0.0
assert zbc.calc_total_cost(asset, quantity, consideration, broker=broker) == 0.0
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import logging
import os
import platform
import posixpath
import random
import re
import string
import subprocess
import tempfile
import threading
import time
from absl import flags
import jinja2
from perfkitbenchmarker import background_tasks
from perfkitbenchmarker import data
from perfkitbenchmarker import errors
from perfkitbenchmarker import temp_dir
from six.moves import range
FLAGS = flags.FLAGS
PRIVATE_KEYFILE = 'perfkitbenchmarker_keyfile'
PUBLIC_KEYFILE = 'perfkitbenchmarker_keyfile.pub'
# The temporary directory on VMs. We cannot reuse GetTempDir()
# because run_uri will not be available at time of module load and we need
# to use this directory as a base for other module level constants.
VM_TMP_DIR = '/tmp/pkb'
# Default timeout for issuing a command.
DEFAULT_TIMEOUT = 300
# Defaults for retrying commands.
POLL_INTERVAL = 30
TIMEOUT = 1200
FUZZ = .5
MAX_RETRIES = -1
WINDOWS = 'nt'
DARWIN = 'Darwin'
PASSWORD_LENGTH = 15
OUTPUT_STDOUT = 0
OUTPUT_STDERR = 1
OUTPUT_EXIT_CODE = 2
_SIMULATE_MAINTENANCE_SEMAPHORE = threading.Semaphore(0)
flags.DEFINE_integer('default_timeout', TIMEOUT, 'The default timeout for '
'retryable commands in seconds.')
flags.DEFINE_integer('burn_cpu_seconds', 0,
'Amount of time in seconds to burn cpu on vm before '
'starting benchmark')
flags.DEFINE_integer('burn_cpu_threads', 1, 'Number of threads to use to '
'burn cpu before starting benchmark.')
flags.DEFINE_integer('background_cpu_threads', None,
'Number of threads of background cpu usage while '
'running a benchmark')
flags.DEFINE_integer('background_network_mbits_per_sec', None,
'Number of megabits per second of background '
'network traffic to generate during the run phase '
'of the benchmark')
flags.DEFINE_boolean('simulate_maintenance', False,
'Whether to simulate VM maintenance during the benchmark. '
'This requires both benchmark and provider support.')
flags.DEFINE_integer('simulate_maintenance_delay', 0,
'The number of seconds to wait to start simulating '
'maintenance.')
flags.DEFINE_boolean('ssh_reuse_connections', True,
'Whether to reuse SSH connections rather than '
'reestablishing a connection for each remote command.')
# We set this to the short value of 5 seconds so that the cluster boot benchmark
# can measure a fast connection when bringing up a VM. This avoids retries that
# may not be as quick as every 5 seconds when specifying a larger value.
flags.DEFINE_integer('ssh_connect_timeout', 5, 'timeout for SSH connection.',
lower_bound=0)
flags.DEFINE_string('ssh_control_path', None,
'Overrides the default ControlPath setting for ssh '
'connections if --ssh_reuse_connections is set. This can '
'be helpful on systems whose default temporary directory '
'path is too long (sockets have a max path length) or a '
'version of ssh that doesn\'t support the %h token. See '
'ssh documentation on the ControlPath setting for more '
'detailed information.')
flags.DEFINE_string('ssh_control_persist', '30m',
'Setting applied to ssh connections if '
'--ssh_reuse_connections is set. Sets how long the '
'connections persist before they are removed. '
'See ssh documentation about the ControlPersist setting '
'for more detailed information.')
flags.DEFINE_integer('ssh_server_alive_interval', 30,
'Value for ssh -o ServerAliveInterval. Use with '
'--ssh_server_alive_count_max to configure how long to '
'wait for unresponsive servers.')
flags.DEFINE_integer('ssh_server_alive_count_max', 10,
'Value for ssh -o ServerAliveCountMax. Use with '
'--ssh_server_alive_interval to configure how long to '
'wait for unresponsive servers.')
class IpAddressSubset(object):
"""Enum of options for --ip_addresses."""
REACHABLE = 'REACHABLE'
BOTH = 'BOTH'
INTERNAL = 'INTERNAL'
EXTERNAL = 'EXTERNAL'
ALL = (REACHABLE, BOTH, INTERNAL, EXTERNAL)
flags.DEFINE_enum('ip_addresses', IpAddressSubset.REACHABLE,
IpAddressSubset.ALL,
'For networking tests: use both internal and external '
'IP addresses (BOTH), internal and external only if '
'the receiving VM is reachable by internal IP (REACHABLE), '
'external IP only (EXTERNAL) or internal IP only (INTERNAL)')
flags.DEFINE_enum('background_network_ip_type', IpAddressSubset.EXTERNAL,
(IpAddressSubset.INTERNAL, IpAddressSubset.EXTERNAL),
'IP address type to use when generating background network '
'traffic')
class IpAddressMetadata(object):
INTERNAL = 'internal'
EXTERNAL = 'external'
def GetTempDir():
"""Returns the tmp dir of the current run."""
return temp_dir.GetRunDirPath()
def PrependTempDir(file_name):
"""Returns the file name prepended with the tmp dir of the current run."""
return os.path.join(GetTempDir(), file_name)
def GenTempDir():
"""Creates the tmp dir for the current run if it does not already exist."""
temp_dir.CreateTemporaryDirectories()
def SSHKeyGen():
"""Create PerfKitBenchmarker SSH keys in the tmp dir of the current run."""
if not os.path.isdir(GetTempDir()):
GenTempDir()
if not os.path.isfile(GetPrivateKeyPath()):
create_cmd = ['ssh-keygen',
'-t', 'rsa',
'-N', '',
'-m', 'PEM',
'-q',
'-f', PrependTempDir(PRIVATE_KEYFILE)]
IssueCommand(create_cmd)
def GetPrivateKeyPath():
return PrependTempDir(PRIVATE_KEYFILE)
def GetPublicKeyPath():
return PrependTempDir(PUBLIC_KEYFILE)
def GetSshOptions(ssh_key_filename, connect_timeout=None):
"""Return common set of SSH and SCP options."""
options = [
'-2',
'-o', 'UserKnownHostsFile=/dev/null',
'-o', 'StrictHostKeyChecking=no',
'-o', 'IdentitiesOnly=yes',
'-o', 'PreferredAuthentications=publickey',
'-o', 'PasswordAuthentication=no',
'-o', 'ConnectTimeout=%d' % (
connect_timeout or FLAGS.ssh_connect_timeout),
'-o', 'GSSAPIAuthentication=no',
'-o', 'ServerAliveInterval=%d' % FLAGS.ssh_server_alive_interval,
'-o', 'ServerAliveCountMax=%d' % FLAGS.ssh_server_alive_count_max,
'-i', ssh_key_filename
]
if FLAGS.use_ipv6:
options.append('-6')
if FLAGS.ssh_reuse_connections:
control_path = (FLAGS.ssh_control_path or
os.path.join(temp_dir.GetSshConnectionsDir(), '%h'))
options.extend([
'-o', 'ControlPath="%s"' % control_path,
'-o', 'ControlMaster=auto',
'-o', 'ControlPersist=%s' % FLAGS.ssh_control_persist
])
options.extend(FLAGS.ssh_options)
return options
# TODO(skschneider): Remove at least RunParallelProcesses and RunParallelThreads
# from this file (update references to call directly into background_tasks).
RunParallelProcesses = background_tasks.RunParallelProcesses
RunParallelThreads = background_tasks.RunParallelThreads
RunThreaded = background_tasks.RunThreaded
def Retry(poll_interval=POLL_INTERVAL, max_retries=MAX_RETRIES,
timeout=None, fuzz=FUZZ, log_errors=True,
retryable_exceptions=None):
"""A function decorator that will retry when exceptions are thrown.
Args:
poll_interval: The time between tries in seconds. This is the maximum poll
interval when fuzz is specified.
max_retries: The maximum number of retries before giving up. If -1, this
means continue until the timeout is reached. The function will stop
retrying when either max_retries is met or timeout is reached.
timeout: The timeout for all tries in seconds. If -1, this means continue
until max_retries is met. The function will stop retrying when either
max_retries is met or timeout is reached.
fuzz: The amount of randomness in the sleep time. This is used to
keep threads from all retrying at the same time. At 0, this
means sleep exactly poll_interval seconds. At 1, this means
sleep anywhere from 0 to poll_interval seconds.
log_errors: A boolean describing whether errors should be logged.
retryable_exceptions: A tuple of exceptions that should be retried. By
default, this is None, which indicates that all exceptions should
be retried.
Returns:
A function that wraps functions in retry logic. It can be
used as a decorator.
"""
if retryable_exceptions is None:
retryable_exceptions = Exception
def Wrap(f):
"""Wraps the supplied function with retry logic."""
def WrappedFunction(*args, **kwargs):
"""Holds the retry logic."""
local_timeout = FLAGS.default_timeout if timeout is None else timeout
if local_timeout >= 0:
deadline = time.time() + local_timeout
else:
deadline = float('inf')
tries = 0
while True:
try:
tries += 1
return f(*args, **kwargs)
except retryable_exceptions as e:
fuzz_multiplier = 1 - fuzz + random.random() * fuzz
sleep_time = poll_interval * fuzz_multiplier
if ((time.time() + sleep_time) >= deadline or
(max_retries >= 0 and tries > max_retries)):
raise
else:
if log_errors:
logging.info('Retrying exception running %s: %s', f.__name__, e)
time.sleep(sleep_time)
return WrappedFunction
return Wrap
class _BoxedObject(object):
"""Box a value in a reference so it is modifiable inside an inner function.
In python3 the nonlocal keyword could be used instead - but for python2
there is no support for modifying an external scoped variable value.
"""
def __init__(self, initial_value):
self.value = initial_value
def _ReadIssueCommandOutput(tf_out, tf_err):
"""Reads IssueCommand Output from stdout and stderr."""
tf_out.seek(0)
stdout = tf_out.read().decode('ascii', 'ignore')
tf_err.seek(0)
stderr = tf_err.read().decode('ascii', 'ignore')
return stdout, stderr
def IssueCommand(cmd, force_info_log=False, suppress_warning=False,
env=None, timeout=DEFAULT_TIMEOUT, cwd=None,
raise_on_failure=True, suppress_failure=None,
raise_on_timeout=True):
"""Tries running the provided command once.
Args:
cmd: A list of strings such as is given to the subprocess.Popen()
constructor.
force_info_log: A boolean indicating whether the command result should
always be logged at the info level. Command results will always be
logged at the debug level if they aren't logged at another level.
suppress_warning: A boolean indicating whether the results should
not be logged at the info level in the event of a non-zero
return code. When force_info_log is True, the output is logged
regardless of suppress_warning's value.
env: A dict of key/value strings, such as is given to the subprocess.Popen()
constructor, that contains environment variables to be injected.
timeout: Timeout for the command in seconds. If the command has not finished
before the timeout is reached, it will be killed. Set timeout to None to
let the command run indefinitely. If the subprocess is killed, the
return code will indicate an error, and stdout and stderr will
contain what had already been written to them before the process was
killed.
cwd: Directory in which to execute the command.
raise_on_failure: A boolean indicating if non-zero return codes should raise
IssueCommandError.
suppress_failure: A function passed (stdout, stderr, ret_code) for non-zero
return codes to determine if the failure should be suppressed e.g. a
delete command which fails because the item to be deleted does not
exist.
raise_on_timeout: A boolean indicating if killing the process due to the
timeout being hit should raise a IssueCommandTimeoutError
Returns:
A tuple of stdout, stderr, and retcode from running the provided command.
Raises:
IssueCommandError: When raise_on_failure=True and retcode is non-zero.
IssueCommandTimeoutError: When raise_on_timeout=True and
command duration exceeds timeout
"""
if env:
logging.debug('Environment variables: %s', env)
# Force conversion to string so you get a nice log statement before hitting a
# type error or NPE. subprocess will still catch it.
full_cmd = ' '.join(str(w) for w in cmd)
logging.info('Running: %s', full_cmd)
time_file_path = '/usr/bin/time'
running_on_windows = RunningOnWindows()
running_on_darwin = RunningOnDarwin()
should_time = (not (running_on_windows or running_on_darwin) and
os.path.isfile(time_file_path) and FLAGS.time_commands)
shell_value = running_on_windows
with tempfile.TemporaryFile() as tf_out, \
tempfile.TemporaryFile() as tf_err, \
tempfile.NamedTemporaryFile(mode='r') as tf_timing:
cmd_to_use = cmd
if should_time:
cmd_to_use = [time_file_path,
'-o', tf_timing.name,
'--quiet',
'-f', ', WallTime:%Es, CPU:%Us, MaxMemory:%Mkb '] + cmd
process = subprocess.Popen(cmd_to_use, env=env, shell=shell_value,
stdin=subprocess.PIPE, stdout=tf_out,
stderr=tf_err, cwd=cwd)
did_timeout = _BoxedObject(False)
was_killed = _BoxedObject(False)
def _KillProcess():
did_timeout.value = True
if not raise_on_timeout:
logging.warning('IssueCommand timed out after %d seconds. '
'Killing command "%s".', timeout, full_cmd)
process.kill()
was_killed.value = True
timer = threading.Timer(timeout, _KillProcess)
timer.start()
try:
process.wait()
finally:
timer.cancel()
stdout, stderr = _ReadIssueCommandOutput(tf_out, tf_err)
timing_output = ''
if should_time:
timing_output = tf_timing.read().rstrip('\n')
debug_text = ('Ran: {%s}\nReturnCode:%s%s\nSTDOUT: %s\nSTDERR: %s' %
(full_cmd, process.returncode, timing_output, stdout, stderr))
if force_info_log or (process.returncode and not suppress_warning):
logging.info(debug_text)
else:
logging.debug(debug_text)
# Raise timeout error regardless of raise_on_failure - as the intended
# semantics is to ignore expected errors caused by invoking the command
# not errors from PKB infrastructure.
if did_timeout.value and raise_on_timeout:
debug_text = (
'{0}\nIssueCommand timed out after {1} seconds. '
'{2} by perfkitbenchmarker.'.format(
debug_text, timeout,
'Process was killed' if was_killed.value else
'Process may have been killed'))
raise errors.VmUtil.IssueCommandTimeoutError(debug_text)
elif process.returncode and (raise_on_failure or suppress_failure):
if (suppress_failure and
suppress_failure(stdout, stderr, process.returncode)):
# failure is suppressible, rewrite the stderr and return code as passing
# since some callers assume either is a failure e.g.
# perfkitbenchmarker.providers.aws.util.IssueRetryableCommand()
return stdout, '', 0
raise errors.VmUtil.IssueCommandError(debug_text)
return stdout, stderr, process.returncode
def IssueBackgroundCommand(cmd, stdout_path, stderr_path, env=None):
"""Run the provided command once in the background.
Args:
cmd: Command to be run, as expected by subprocess.Popen.
stdout_path: Redirect stdout here. Overwritten.
stderr_path: Redirect stderr here. Overwritten.
env: A dict of key/value strings, such as is given to the subprocess.Popen()
constructor, that contains environment variables to be injected.
"""
logging.debug('Environment variables: %s', env)
full_cmd = ' '.join(cmd)
logging.info('Spawning: %s', full_cmd)
outfile = open(stdout_path, 'w')
errfile = open(stderr_path, 'w')
shell_value = RunningOnWindows()
subprocess.Popen(cmd, env=env, shell=shell_value,
stdout=outfile, stderr=errfile, close_fds=True)
@Retry()
def IssueRetryableCommand(cmd, env=None):
"""Tries running the provided command until it succeeds or times out.
Args:
cmd: A list of strings such as is given to the subprocess.Popen()
constructor.
env: An alternate environment to pass to the Popen command.
Returns:
A tuple of stdout and stderr from running the provided command.
"""
stdout, stderr, retcode = IssueCommand(cmd, env=env, raise_on_failure=False)
if retcode:
debug_text = ('Ran: {%s}\nReturnCode:%s\nSTDOUT: %s\nSTDERR: %s' %
(' '.join(cmd), retcode, stdout, stderr))
raise errors.VmUtil.CalledProcessException(
'Command returned a non-zero exit code:\n{}'.format(debug_text))
return stdout, stderr
def ParseTimeCommandResult(command_result):
"""Parse command result and get time elapsed.
Note this parses the output of bash's time builtin, not /usr/bin/time or other
implementations. You may need to run something like bash -c "time ./command"
to produce parseable output.
Args:
command_result: The result after executing a remote time command.
Returns:
Time taken for the command.
"""
time_data = re.findall(r'real\s+(\d+)m(\d+.\d+)', command_result)
time_in_seconds = 60 * float(time_data[0][0]) + float(time_data[0][1])
return time_in_seconds
def ShouldRunOnExternalIpAddress(ip_type=None):
"""Returns whether a test should be run on an instance's external IP."""
ip_type_to_check = ip_type or FLAGS.ip_addresses
return ip_type_to_check in (IpAddressSubset.EXTERNAL, IpAddressSubset.BOTH,
IpAddressSubset.REACHABLE)
def ShouldRunOnInternalIpAddress(sending_vm, receiving_vm, ip_type=None):
"""Returns whether a test should be run on an instance's internal IP.
Based on the command line flag --ip_addresses. Internal IP addresses are used
when:
* --ip_addresses=BOTH or --ip-addresses=INTERNAL
* --ip_addresses=REACHABLE and 'sending_vm' can ping 'receiving_vm' on its
internal IP.
Args:
sending_vm: VirtualMachine. The client.
receiving_vm: VirtualMachine. The server.
ip_type: optional ip_type to use instead of what is set in the FLAGS
Returns:
Whether a test should be run on an instance's internal IP.
"""
ip_type_to_check = ip_type or FLAGS.ip_addresses
return (ip_type_to_check in (IpAddressSubset.BOTH, IpAddressSubset.INTERNAL)
or (ip_type_to_check == IpAddressSubset.REACHABLE and
sending_vm.IsReachable(receiving_vm)))
def GetLastRunUri():
"""Returns the last run_uri used (or None if it can't be determined)."""
runs_dir_path = temp_dir.GetAllRunsDirPath()
try:
dir_names = next(os.walk(runs_dir_path))[1]
except StopIteration:
# The runs directory was not found.
return None
if not dir_names:
# No run subdirectories were found in the runs directory.
return None
# Return the subdirectory with the most recent modification time.
return max(dir_names,
key=lambda d: os.path.getmtime(os.path.join(runs_dir_path, d)))
@contextlib.contextmanager
def NamedTemporaryFile(mode='w+b', prefix='tmp', suffix='', dir=None,
delete=True):
"""Behaves like tempfile.NamedTemporaryFile.
The existing tempfile.NamedTemporaryFile has the annoying property on
Windows that it cannot be opened a second time while it is already open.
This makes it impossible to use it with a "with" statement in a cross platform
compatible way. This serves a similar role, but allows the file to be closed
within a "with" statement without causing the file to be unlinked until the
context exits.
Args:
mode: see mode in tempfile.NamedTemporaryFile.
prefix: see prefix in tempfile.NamedTemporaryFile.
suffix: see suffix in tempfile.NamedTemporaryFile.
dir: see dir in tempfile.NamedTemporaryFile.
delete: see delete in NamedTemporaryFile.
Yields:
A cross platform file-like object which is "with" compatible.
"""
f = tempfile.NamedTemporaryFile(mode=mode, prefix=prefix, suffix=suffix,
dir=dir, delete=False)
try:
yield f
finally:
if not f.closed:
f.close()
if delete:
os.unlink(f.name)
def GenerateSSHConfig(vms, vm_groups):
"""Generates an SSH config file to simplify connecting to the specified VMs.
Writes a file to GetTempDir()/ssh_config with an SSH configuration for each VM
provided in the arguments. Users can then SSH with any of the following:
ssh -F <ssh_config_path> <vm_name>
ssh -F <ssh_config_path> vm<vm_index>
ssh -F <ssh_config_path> <group_name>-<index>
Args:
vms: list of BaseVirtualMachines.
vm_groups: dict mapping VM group name string to list of BaseVirtualMachines.
"""
target_file = os.path.join(GetTempDir(), 'ssh_config')
template_path = data.ResourcePath('ssh_config.j2')
environment = jinja2.Environment(undefined=jinja2.StrictUndefined)
with open(template_path) as fp:
template = environment.from_string(fp.read())
with open(target_file, 'w') as ofp:
ofp.write(template.render({'vms': vms, 'vm_groups': vm_groups}))
ssh_options = [' ssh -F {0} {1}'.format(target_file, pattern)
for pattern in ('<vm_name>', 'vm<index>',
'<group_name>-<index>')]
logging.info('ssh to VMs in this benchmark by name with:\n%s',
'\n'.join(ssh_options))
def RunningOnWindows():
"""Returns True if PKB is running on Windows."""
return os.name == WINDOWS
def RunningOnDarwin():
"""Returns True if PKB is running on a Darwin OS machine."""
return os.name != WINDOWS and platform.system() == DARWIN
def ExecutableOnPath(executable_name):
"""Return True if the given executable can be found on the path."""
cmd = ['where'] if RunningOnWindows() else ['which']
cmd.append(executable_name)
shell_value = RunningOnWindows()
process = subprocess.Popen(cmd,
shell=shell_value,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
process.communicate()
if process.returncode:
return False
return True
def GenerateRandomWindowsPassword(password_length=PASSWORD_LENGTH):
"""Generates a password that meets Windows complexity requirements."""
# The special characters have to be recognized by the Azure CLI as
# special characters. This greatly limits the set of characters
# that we can safely use. See
# https://github.com/Azure/azure-xplat-cli/blob/master/lib/commands/arm/vm/vmOsProfile._js#L145
special_chars = '*!@#$%+='
# Ensure that the password contains at least one of each 4 required
# character types starting with letters to avoid starting with chars which
# are problematic on the command line e.g. @.
prefix = [random.choice(string.ascii_lowercase),
random.choice(string.ascii_uppercase),
random.choice(string.digits),
random.choice(special_chars)]
password = [
random.choice(string.ascii_letters + string.digits + special_chars)
for _ in range(password_length - 4)]
return ''.join(prefix + password)
def StartSimulatedMaintenance():
"""Initiates the simulated maintenance event."""
if FLAGS.simulate_maintenance:
_SIMULATE_MAINTENANCE_SEMAPHORE.release()
def SetupSimulatedMaintenance(vm):
"""Called ready VM for simulated maintenance."""
if FLAGS.simulate_maintenance:
def _SimulateMaintenance():
_SIMULATE_MAINTENANCE_SEMAPHORE.acquire()
time.sleep(FLAGS.simulate_maintenance_delay)
vm.SimulateMaintenanceEvent()
t = threading.Thread(target=_SimulateMaintenance)
t.daemon = True
t.start()
def CopyFileBetweenVms(filename, src_vm, src_path, dest_vm, dest_path):
"""Copies a file from the src_vm to the dest_vm."""
with tempfile.NamedTemporaryFile() as tf:
temp_path = tf.name
src_vm.RemoteCopy(
temp_path, os.path.join(src_path, filename), copy_to=False)
dest_vm.RemoteCopy(
temp_path, os.path.join(dest_path, filename), copy_to=True)
def ReplaceText(vm, current_value, new_value, file_name, regex_char='/'):
"""Replaces text <current_value> with <new_value> in remote <file_name>."""
vm.RemoteCommand('sed -i -r "s{regex_char}{current_value}{regex_char}'
'{new_value}{regex_char}" {file}'.format(
regex_char=regex_char,
current_value=current_value,
new_value=new_value,
file=file_name))
def DictionaryToEnvString(dictionary, joiner=' '):
"""Convert a dictionary to a space sperated 'key=value' string.
Args:
dictionary: the key-value dictionary to be convert
joiner: string to separate the entries in the returned value.
Returns:
a string representing the dictionary
"""
return joiner.join(
f'{key}={value}' for key, value in sorted(dictionary.items()))
def CreateRemoteFile(vm, file_contents, file_path):
"""Creates a file on the remote server."""
with NamedTemporaryFile(mode='w') as tf:
tf.write(file_contents)
tf.close()
parent_dir = posixpath.dirname(file_path)
vm.RemoteCommand(f'[ -d {parent_dir} ] || mkdir -p {parent_dir}')
vm.PushFile(tf.name, file_path)
|
import logging
import re
import pexpect
import voluptuous as vol
from homeassistant.components.device_tracker import (
DOMAIN,
PLATFORM_SCHEMA,
DeviceScanner,
)
from homeassistant.const import CONF_HOST, CONF_PASSWORD, CONF_USERNAME
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
_DEVICES_REGEX = re.compile(
r"(?P<name>([^\s]+)?)\s+"
+ r"(?P<ip>([0-9]{1,3}[\.]){3}[0-9]{1,3})\s+"
+ r"(?P<mac>([0-9a-f]{2}[:-]){5}([0-9a-f]{2}))\s+"
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Required(CONF_USERNAME): cv.string,
}
)
def get_scanner(hass, config):
"""Validate the configuration and return a Aruba scanner."""
scanner = ArubaDeviceScanner(config[DOMAIN])
return scanner if scanner.success_init else None
class ArubaDeviceScanner(DeviceScanner):
"""This class queries a Aruba Access Point for connected devices."""
def __init__(self, config):
"""Initialize the scanner."""
self.host = config[CONF_HOST]
self.username = config[CONF_USERNAME]
self.password = config[CONF_PASSWORD]
self.last_results = {}
# Test the router is accessible.
data = self.get_aruba_data()
self.success_init = data is not None
def scan_devices(self):
"""Scan for new devices and return a list with found device IDs."""
self._update_info()
return [client["mac"] for client in self.last_results]
def get_device_name(self, device):
"""Return the name of the given device or None if we don't know."""
if not self.last_results:
return None
for client in self.last_results:
if client["mac"] == device:
return client["name"]
return None
def _update_info(self):
"""Ensure the information from the Aruba Access Point is up to date.
Return boolean if scanning successful.
"""
if not self.success_init:
return False
data = self.get_aruba_data()
if not data:
return False
self.last_results = data.values()
return True
def get_aruba_data(self):
"""Retrieve data from Aruba Access Point and return parsed result."""
connect = f"ssh {self.username}@{self.host}"
ssh = pexpect.spawn(connect)
query = ssh.expect(
[
"password:",
pexpect.TIMEOUT,
pexpect.EOF,
"continue connecting (yes/no)?",
"Host key verification failed.",
"Connection refused",
"Connection timed out",
],
timeout=120,
)
if query == 1:
_LOGGER.error("Timeout")
return
if query == 2:
_LOGGER.error("Unexpected response from router")
return
if query == 3:
ssh.sendline("yes")
ssh.expect("password:")
elif query == 4:
_LOGGER.error("Host key changed")
return
elif query == 5:
_LOGGER.error("Connection refused by server")
return
elif query == 6:
_LOGGER.error("Connection timed out")
return
ssh.sendline(self.password)
ssh.expect("#")
ssh.sendline("show clients")
ssh.expect("#")
devices_result = ssh.before.split(b"\r\n")
ssh.sendline("exit")
devices = {}
for device in devices_result:
match = _DEVICES_REGEX.search(device.decode("utf-8"))
if match:
devices[match.group("ip")] = {
"ip": match.group("ip"),
"mac": match.group("mac").upper(),
"name": match.group("name"),
}
return devices
|
from test import unittest
from test import run_only
from mock import Mock
from mock import patch
import configobj
import diamond.handler.riemann as mod
from diamond.metric import Metric
try:
from riemann_client.client import Client
riemann_client = True
except ImportError:
riemann_client = None
def run_only_if_riemann_client_is_available(func):
def pred():
return riemann_client is not None
return run_only(func, pred)
def fake_connect(self):
# used for 'we can connect' tests
self.transport = Mock()
class TestRiemannHandler(unittest.TestCase):
def setUp(self):
self.__connect_method = mod.RiemannHandler
mod.RiemannHandler._connect = fake_connect
def tearDown(self):
# restore the override
mod.RiemannHandler._connect = self.__connect_method
@run_only_if_riemann_client_is_available
@patch('riemann_client.transport.TCPTransport.connect', Mock())
@patch('riemann_client.client.Client.send_event', Mock())
def test_metric_to_riemann_event(self):
config = configobj.ConfigObj()
config['host'] = 'localhost'
config['port'] = 5555
handler = mod.RiemannHandler(config)
metric = Metric('servers.com.example.www.cpu.total.idle',
0,
timestamp=1234567,
host='com.example.www')
handler.process(metric)
for call in handler.client.send_event.mock_calls:
event = Client.create_dict(call[1][0])
self.assertEqual(event, {
'host': u'com.example.www',
'service': u'servers.cpu.total.idle',
'time': 1234567L,
'metric_f': 0.0,
})
##########################################################################
if __name__ == "__main__":
unittest.main()
|
import os
import os.path
import pytest
from coverage import files
from coverage.files import (
TreeMatcher, FnmatchMatcher, ModuleMatcher, PathAliases,
find_python_files, abs_file, actual_path, flat_rootname, fnmatches_to_regex,
)
from coverage.misc import CoverageException
from coverage import env
from tests.coveragetest import CoverageTest
class FilesTest(CoverageTest):
"""Tests of coverage.files."""
def abs_path(self, p):
"""Return the absolute path for `p`."""
return os.path.join(abs_file(os.getcwd()), os.path.normpath(p))
def test_simple(self):
self.make_file("hello.py")
files.set_relative_directory()
self.assertEqual(files.relative_filename(u"hello.py"), u"hello.py")
a = self.abs_path("hello.py")
self.assertNotEqual(a, "hello.py")
self.assertEqual(files.relative_filename(a), "hello.py")
def test_peer_directories(self):
self.make_file("sub/proj1/file1.py")
self.make_file("sub/proj2/file2.py")
a1 = self.abs_path("sub/proj1/file1.py")
a2 = self.abs_path("sub/proj2/file2.py")
d = os.path.normpath("sub/proj1")
self.chdir(d)
files.set_relative_directory()
self.assertEqual(files.relative_filename(a1), "file1.py")
self.assertEqual(files.relative_filename(a2), a2)
def test_filepath_contains_absolute_prefix_twice(self):
# https://github.com/nedbat/coveragepy/issues/194
# Build a path that has two pieces matching the absolute path prefix.
# Technically, this test doesn't do that on Windows, but drive
# letters make that impractical to achieve.
files.set_relative_directory()
d = abs_file(os.curdir)
trick = os.path.splitdrive(d)[1].lstrip(os.path.sep)
rel = os.path.join('sub', trick, 'file1.py')
self.assertEqual(files.relative_filename(abs_file(rel)), rel)
def test_canonical_filename_ensure_cache_hit(self):
self.make_file("sub/proj1/file1.py")
d = actual_path(self.abs_path("sub/proj1"))
self.chdir(d)
files.set_relative_directory()
canonical_path = files.canonical_filename('sub/proj1/file1.py')
self.assertEqual(canonical_path, self.abs_path('file1.py'))
# After the filename has been converted, it should be in the cache.
self.assertIn('sub/proj1/file1.py', files.CANONICAL_FILENAME_CACHE)
self.assertEqual(
files.canonical_filename('sub/proj1/file1.py'),
self.abs_path('file1.py'))
@pytest.mark.parametrize("original, flat", [
(u"a/b/c.py", u"a_b_c_py"),
(u"c:\\foo\\bar.html", u"_foo_bar_html"),
(u"Montréal/☺/conf.py", u"Montréal_☺_conf_py"),
( # original:
u"c:\\lorem\\ipsum\\quia\\dolor\\sit\\amet\\consectetur\\adipisci\\velit\\sed\\quia\\non"
u"\\numquam\\eius\\modi\\tempora\\incidunt\\ut\\labore\\et\\dolore\\magnam\\aliquam"
u"\\quaerat\\voluptatem\\ut\\enim\\ad\\minima\\veniam\\quis\\nostrum\\exercitationem"
u"\\ullam\\corporis\\suscipit\\laboriosam\\Montréal\\☺\\my_program.py",
# flat:
u"re_et_dolore_magnam_aliquam_quaerat_voluptatem_ut_enim_ad_minima_veniam_quis_"
u"nostrum_exercitationem_ullam_corporis_suscipit_laboriosam_Montréal_☺_my_program_py_"
u"97eaca41b860faaa1a21349b1f3009bb061cf0a8"
),
])
def test_flat_rootname(original, flat):
assert flat_rootname(original) == flat
@pytest.mark.parametrize(
"patterns, case_insensitive, partial,"
"matches,"
"nomatches",
[
(
["abc", "xyz"], False, False,
["abc", "xyz"],
["ABC", "xYz", "abcx", "xabc", "axyz", "xyza"],
),
(
["abc", "xyz"], True, False,
["abc", "xyz", "Abc", "XYZ", "AbC"],
["abcx", "xabc", "axyz", "xyza"],
),
(
["abc/hi.py"], True, False,
["abc/hi.py", "ABC/hi.py", r"ABC\hi.py"],
["abc_hi.py", "abc/hi.pyc"],
),
(
[r"abc\hi.py"], True, False,
[r"abc\hi.py", r"ABC\hi.py"],
["abc/hi.py", "ABC/hi.py", "abc_hi.py", "abc/hi.pyc"],
),
(
["abc/*/hi.py"], True, False,
["abc/foo/hi.py", "ABC/foo/bar/hi.py", r"ABC\foo/bar/hi.py"],
["abc/hi.py", "abc/hi.pyc"],
),
(
["abc/[a-f]*/hi.py"], True, False,
["abc/foo/hi.py", "ABC/foo/bar/hi.py", r"ABC\foo/bar/hi.py"],
["abc/zoo/hi.py", "abc/hi.py", "abc/hi.pyc"],
),
(
["abc/"], True, True,
["abc/foo/hi.py", "ABC/foo/bar/hi.py", r"ABC\foo/bar/hi.py"],
["abcd/foo.py", "xabc/hi.py"],
),
])
def test_fnmatches_to_regex(patterns, case_insensitive, partial, matches, nomatches):
regex = fnmatches_to_regex(patterns, case_insensitive=case_insensitive, partial=partial)
for s in matches:
assert regex.match(s)
for s in nomatches:
assert not regex.match(s)
class MatcherTest(CoverageTest):
"""Tests of file matchers."""
def setUp(self):
super(MatcherTest, self).setUp()
files.set_relative_directory()
def assertMatches(self, matcher, filepath, matches):
"""The `matcher` should agree with `matches` about `filepath`."""
canonical = files.canonical_filename(filepath)
self.assertEqual(
matcher.match(canonical), matches,
"File %s should have matched as %s" % (filepath, matches)
)
def test_tree_matcher(self):
matches_to_try = [
(self.make_file("sub/file1.py"), True),
(self.make_file("sub/file2.c"), True),
(self.make_file("sub2/file3.h"), False),
(self.make_file("sub3/file4.py"), True),
(self.make_file("sub3/file5.c"), False),
]
trees = [
files.canonical_filename("sub"),
files.canonical_filename("sub3/file4.py"),
]
tm = TreeMatcher(trees)
self.assertEqual(tm.info(), trees)
for filepath, matches in matches_to_try:
self.assertMatches(tm, filepath, matches)
def test_module_matcher(self):
matches_to_try = [
('test', True),
('trash', False),
('testing', False),
('test.x', True),
('test.x.y.z', True),
('py', False),
('py.t', False),
('py.test', True),
('py.testing', False),
('py.test.buz', True),
('py.test.buz.baz', True),
('__main__', False),
('mymain', True),
('yourmain', False),
]
modules = ['test', 'py.test', 'mymain']
mm = ModuleMatcher(modules)
self.assertEqual(
mm.info(),
modules
)
for modulename, matches in matches_to_try:
self.assertEqual(
mm.match(modulename),
matches,
modulename,
)
def test_fnmatch_matcher(self):
matches_to_try = [
(self.make_file("sub/file1.py"), True),
(self.make_file("sub/file2.c"), False),
(self.make_file("sub2/file3.h"), True),
(self.make_file("sub3/file4.py"), True),
(self.make_file("sub3/file5.c"), False),
]
fnm = FnmatchMatcher(["*.py", "*/sub2/*"])
self.assertEqual(fnm.info(), ["*.py", "*/sub2/*"])
for filepath, matches in matches_to_try:
self.assertMatches(fnm, filepath, matches)
def test_fnmatch_matcher_overload(self):
fnm = FnmatchMatcher(["*x%03d*.txt" % i for i in range(500)])
self.assertMatches(fnm, "x007foo.txt", True)
self.assertMatches(fnm, "x123foo.txt", True)
self.assertMatches(fnm, "x798bar.txt", False)
def test_fnmatch_windows_paths(self):
# We should be able to match Windows paths even if we are running on
# a non-Windows OS.
fnm = FnmatchMatcher(["*/foo.py"])
self.assertMatches(fnm, r"dir\foo.py", True)
fnm = FnmatchMatcher([r"*\foo.py"])
self.assertMatches(fnm, r"dir\foo.py", True)
class PathAliasesTest(CoverageTest):
"""Tests for coverage/files.py:PathAliases"""
run_in_temp_dir = False
def assert_mapped(self, aliases, inp, out):
"""Assert that `inp` mapped through `aliases` produces `out`.
`out` is canonicalized first, since aliases always produce
canonicalized paths.
"""
aliases.pprint()
print(inp)
print(out)
self.assertEqual(aliases.map(inp), files.canonical_filename(out))
def assert_unchanged(self, aliases, inp):
"""Assert that `inp` mapped through `aliases` is unchanged."""
self.assertEqual(aliases.map(inp), inp)
def test_noop(self):
aliases = PathAliases()
self.assert_unchanged(aliases, '/ned/home/a.py')
def test_nomatch(self):
aliases = PathAliases()
aliases.add('/home/*/src', './mysrc')
self.assert_unchanged(aliases, '/home/foo/a.py')
def test_wildcard(self):
aliases = PathAliases()
aliases.add('/ned/home/*/src', './mysrc')
self.assert_mapped(aliases, '/ned/home/foo/src/a.py', './mysrc/a.py')
aliases = PathAliases()
aliases.add('/ned/home/*/src/', './mysrc')
self.assert_mapped(aliases, '/ned/home/foo/src/a.py', './mysrc/a.py')
def test_no_accidental_match(self):
aliases = PathAliases()
aliases.add('/home/*/src', './mysrc')
self.assert_unchanged(aliases, '/home/foo/srcetc')
def test_multiple_patterns(self):
aliases = PathAliases()
aliases.add('/home/*/src', './mysrc')
aliases.add('/lib/*/libsrc', './mylib')
self.assert_mapped(aliases, '/home/foo/src/a.py', './mysrc/a.py')
self.assert_mapped(aliases, '/lib/foo/libsrc/a.py', './mylib/a.py')
def test_cant_have_wildcard_at_end(self):
aliases = PathAliases()
msg = "Pattern must not end with wildcards."
with self.assertRaisesRegex(CoverageException, msg):
aliases.add("/ned/home/*", "fooey")
with self.assertRaisesRegex(CoverageException, msg):
aliases.add("/ned/home/*/", "fooey")
with self.assertRaisesRegex(CoverageException, msg):
aliases.add("/ned/home/*/*/", "fooey")
def test_no_accidental_munging(self):
aliases = PathAliases()
aliases.add(r'c:\Zoo\boo', 'src/')
aliases.add('/home/ned$', 'src/')
self.assert_mapped(aliases, r'c:\Zoo\boo\foo.py', 'src/foo.py')
self.assert_mapped(aliases, r'/home/ned$/foo.py', 'src/foo.py')
def test_paths_are_os_corrected(self):
aliases = PathAliases()
aliases.add('/home/ned/*/src', './mysrc')
aliases.add(r'c:\ned\src', './mysrc')
self.assert_mapped(aliases, r'C:\Ned\src\sub\a.py', './mysrc/sub/a.py')
aliases = PathAliases()
aliases.add('/home/ned/*/src', r'.\mysrc')
aliases.add(r'c:\ned\src', r'.\mysrc')
self.assert_mapped(aliases, r'/home/ned/foo/src/sub/a.py', r'.\mysrc\sub\a.py')
def test_windows_on_linux(self):
# https://github.com/nedbat/coveragepy/issues/618
lin = "*/project/module/"
win = "*\\project\\module\\"
# Try the paths in both orders.
for paths in [[lin, win], [win, lin]]:
aliases = PathAliases()
for path in paths:
aliases.add(path, "project/module")
self.assert_mapped(
aliases,
"C:\\a\\path\\somewhere\\coveragepy_test\\project\\module\\tests\\file.py",
"project/module/tests/file.py"
)
def test_linux_on_windows(self):
# https://github.com/nedbat/coveragepy/issues/618
lin = "*/project/module/"
win = "*\\project\\module\\"
# Try the paths in both orders.
for paths in [[lin, win], [win, lin]]:
aliases = PathAliases()
for path in paths:
aliases.add(path, "project\\module")
self.assert_mapped(
aliases,
"C:/a/path/somewhere/coveragepy_test/project/module/tests/file.py",
"project\\module\\tests\\file.py"
)
def test_multiple_wildcard(self):
aliases = PathAliases()
aliases.add('/home/jenkins/*/a/*/b/*/django', './django')
self.assert_mapped(
aliases,
'/home/jenkins/xx/a/yy/b/zz/django/foo/bar.py',
'./django/foo/bar.py'
)
def test_leading_wildcard(self):
aliases = PathAliases()
aliases.add('*/d1', './mysrc1')
aliases.add('*/d2', './mysrc2')
self.assert_mapped(aliases, '/foo/bar/d1/x.py', './mysrc1/x.py')
self.assert_mapped(aliases, '/foo/bar/d2/y.py', './mysrc2/y.py')
def test_dot(self):
cases = ['.', '..', '../other']
if not env.WINDOWS:
# The root test case was added for the manylinux Docker images,
# and I'm not sure how it should work on Windows, so skip it.
cases += ['/']
for d in cases:
aliases = PathAliases()
aliases.add(d, '/the/source')
the_file = os.path.join(d, 'a.py')
the_file = os.path.expanduser(the_file)
the_file = os.path.abspath(os.path.realpath(the_file))
assert '~' not in the_file # to be sure the test is pure.
self.assert_mapped(aliases, the_file, '/the/source/a.py')
class FindPythonFilesTest(CoverageTest):
"""Tests of `find_python_files`."""
def test_find_python_files(self):
self.make_file("sub/a.py")
self.make_file("sub/b.py")
self.make_file("sub/x.c") # nope: not .py
self.make_file("sub/ssub/__init__.py")
self.make_file("sub/ssub/s.py")
self.make_file("sub/ssub/~s.py") # nope: editor effluvia
self.make_file("sub/lab/exp.py") # nope: no __init__.py
self.make_file("sub/windows.pyw")
py_files = set(find_python_files("sub"))
self.assert_same_files(py_files, [
"sub/a.py", "sub/b.py",
"sub/ssub/__init__.py", "sub/ssub/s.py",
"sub/windows.pyw",
])
class WindowsFileTest(CoverageTest):
"""Windows-specific tests of file name handling."""
run_in_temp_dir = False
def setUp(self):
if not env.WINDOWS:
self.skipTest("Only need to run Windows tests on Windows.")
super(WindowsFileTest, self).setUp()
def test_actual_path(self):
self.assertEqual(actual_path(r'c:\Windows'), actual_path(r'C:\wINDOWS'))
|
from homeassistant.const import HTTP_FORBIDDEN, HTTP_UNAUTHORIZED
def is_invalid_auth_code(http_status_code):
"""HTTP status codes that mean invalid auth."""
if http_status_code in (HTTP_UNAUTHORIZED, HTTP_FORBIDDEN):
return True
return False
def percent_conv(val):
"""Convert an actual percentage (0.0-1.0) to 0-100 scale."""
return round(val * 100.0, 1)
|
from homeassistant import config_entries, setup
from homeassistant.components.coronavirus.const import DOMAIN, OPTION_WORLDWIDE
async def test_form(hass):
"""Test we get the form."""
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] == {}
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"country": OPTION_WORLDWIDE},
)
assert result2["type"] == "create_entry"
assert result2["title"] == "Worldwide"
assert result2["result"].unique_id == OPTION_WORLDWIDE
assert result2["data"] == {
"country": OPTION_WORLDWIDE,
}
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 4
|
import unittest
from unittest import mock
from homeassistant.components import dyson
from .common import load_mock_device
from tests.common import get_test_home_assistant
def _get_dyson_account_device_available():
"""Return a valid device provide by Dyson web services."""
device = mock.Mock()
load_mock_device(device)
device.connect = mock.Mock(return_value=True)
device.auto_connect = mock.Mock(return_value=True)
return device
def _get_dyson_account_device_not_available():
"""Return an invalid device provide by Dyson web services."""
device = mock.Mock()
load_mock_device(device)
device.connect = mock.Mock(return_value=False)
device.auto_connect = mock.Mock(return_value=False)
return device
def _get_dyson_account_device_error():
"""Return an invalid device raising OSError while connecting."""
device = mock.Mock()
load_mock_device(device)
device.connect = mock.Mock(side_effect=OSError("Network error"))
return device
class DysonTest(unittest.TestCase):
"""Dyson parent component test class."""
def setUp(self): # pylint: disable=invalid-name
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.addCleanup(self.tear_down_cleanup)
def tear_down_cleanup(self):
"""Stop everything that was started."""
self.hass.stop()
@mock.patch("libpurecool.dyson.DysonAccount.login", return_value=False)
def test_dyson_login_failed(self, mocked_login):
"""Test if Dyson connection failed."""
dyson.setup(
self.hass,
{
dyson.DOMAIN: {
dyson.CONF_USERNAME: "email",
dyson.CONF_PASSWORD: "password",
dyson.CONF_LANGUAGE: "FR",
}
},
)
assert mocked_login.call_count == 1
@mock.patch("libpurecool.dyson.DysonAccount.devices", return_value=[])
@mock.patch("libpurecool.dyson.DysonAccount.login", return_value=True)
def test_dyson_login(self, mocked_login, mocked_devices):
"""Test valid connection to dyson web service."""
dyson.setup(
self.hass,
{
dyson.DOMAIN: {
dyson.CONF_USERNAME: "email",
dyson.CONF_PASSWORD: "password",
dyson.CONF_LANGUAGE: "FR",
}
},
)
assert mocked_login.call_count == 1
assert mocked_devices.call_count == 1
assert len(self.hass.data[dyson.DYSON_DEVICES]) == 0
@mock.patch("homeassistant.helpers.discovery.load_platform")
@mock.patch(
"libpurecool.dyson.DysonAccount.devices",
return_value=[_get_dyson_account_device_available()],
)
@mock.patch("libpurecool.dyson.DysonAccount.login", return_value=True)
def test_dyson_custom_conf(self, mocked_login, mocked_devices, mocked_discovery):
"""Test device connection using custom configuration."""
dyson.setup(
self.hass,
{
dyson.DOMAIN: {
dyson.CONF_USERNAME: "email",
dyson.CONF_PASSWORD: "password",
dyson.CONF_LANGUAGE: "FR",
dyson.CONF_DEVICES: [
{"device_id": "XX-XXXXX-XX", "device_ip": "192.168.0.1"}
],
}
},
)
assert mocked_login.call_count == 1
assert mocked_devices.call_count == 1
assert len(self.hass.data[dyson.DYSON_DEVICES]) == 1
assert mocked_discovery.call_count == 5
@mock.patch(
"libpurecool.dyson.DysonAccount.devices",
return_value=[_get_dyson_account_device_not_available()],
)
@mock.patch("libpurecool.dyson.DysonAccount.login", return_value=True)
def test_dyson_custom_conf_device_not_available(self, mocked_login, mocked_devices):
"""Test device connection with an invalid device."""
dyson.setup(
self.hass,
{
dyson.DOMAIN: {
dyson.CONF_USERNAME: "email",
dyson.CONF_PASSWORD: "password",
dyson.CONF_LANGUAGE: "FR",
dyson.CONF_DEVICES: [
{"device_id": "XX-XXXXX-XX", "device_ip": "192.168.0.1"}
],
}
},
)
assert mocked_login.call_count == 1
assert mocked_devices.call_count == 1
assert len(self.hass.data[dyson.DYSON_DEVICES]) == 0
@mock.patch(
"libpurecool.dyson.DysonAccount.devices",
return_value=[_get_dyson_account_device_error()],
)
@mock.patch("libpurecool.dyson.DysonAccount.login", return_value=True)
def test_dyson_custom_conf_device_error(self, mocked_login, mocked_devices):
"""Test device connection with device raising an exception."""
dyson.setup(
self.hass,
{
dyson.DOMAIN: {
dyson.CONF_USERNAME: "email",
dyson.CONF_PASSWORD: "password",
dyson.CONF_LANGUAGE: "FR",
dyson.CONF_DEVICES: [
{"device_id": "XX-XXXXX-XX", "device_ip": "192.168.0.1"}
],
}
},
)
assert mocked_login.call_count == 1
assert mocked_devices.call_count == 1
assert len(self.hass.data[dyson.DYSON_DEVICES]) == 0
@mock.patch("homeassistant.helpers.discovery.load_platform")
@mock.patch(
"libpurecool.dyson.DysonAccount.devices",
return_value=[_get_dyson_account_device_available()],
)
@mock.patch("libpurecool.dyson.DysonAccount.login", return_value=True)
def test_dyson_custom_conf_with_unknown_device(
self, mocked_login, mocked_devices, mocked_discovery
):
"""Test device connection with custom conf and unknown device."""
dyson.setup(
self.hass,
{
dyson.DOMAIN: {
dyson.CONF_USERNAME: "email",
dyson.CONF_PASSWORD: "password",
dyson.CONF_LANGUAGE: "FR",
dyson.CONF_DEVICES: [
{"device_id": "XX-XXXXX-XY", "device_ip": "192.168.0.1"}
],
}
},
)
assert mocked_login.call_count == 1
assert mocked_devices.call_count == 1
assert len(self.hass.data[dyson.DYSON_DEVICES]) == 0
assert mocked_discovery.call_count == 0
@mock.patch("homeassistant.helpers.discovery.load_platform")
@mock.patch(
"libpurecool.dyson.DysonAccount.devices",
return_value=[_get_dyson_account_device_available()],
)
@mock.patch("libpurecool.dyson.DysonAccount.login", return_value=True)
def test_dyson_discovery(self, mocked_login, mocked_devices, mocked_discovery):
"""Test device connection using discovery."""
dyson.setup(
self.hass,
{
dyson.DOMAIN: {
dyson.CONF_USERNAME: "email",
dyson.CONF_PASSWORD: "password",
dyson.CONF_LANGUAGE: "FR",
dyson.CONF_TIMEOUT: 5,
dyson.CONF_RETRY: 2,
}
},
)
assert mocked_login.call_count == 1
assert mocked_devices.call_count == 1
assert len(self.hass.data[dyson.DYSON_DEVICES]) == 1
assert mocked_discovery.call_count == 5
@mock.patch(
"libpurecool.dyson.DysonAccount.devices",
return_value=[_get_dyson_account_device_not_available()],
)
@mock.patch("libpurecool.dyson.DysonAccount.login", return_value=True)
def test_dyson_discovery_device_not_available(self, mocked_login, mocked_devices):
"""Test device connection with discovery and invalid device."""
dyson.setup(
self.hass,
{
dyson.DOMAIN: {
dyson.CONF_USERNAME: "email",
dyson.CONF_PASSWORD: "password",
dyson.CONF_LANGUAGE: "FR",
dyson.CONF_TIMEOUT: 5,
dyson.CONF_RETRY: 2,
}
},
)
assert mocked_login.call_count == 1
assert mocked_devices.call_count == 1
assert len(self.hass.data[dyson.DYSON_DEVICES]) == 0
|
from typing import Optional
from aioesphomeapi import BinarySensorInfo, BinarySensorState
from homeassistant.components.binary_sensor import BinarySensorEntity
from . import EsphomeEntity, platform_async_setup_entry
async def async_setup_entry(hass, entry, async_add_entities):
"""Set up ESPHome binary sensors based on a config entry."""
await platform_async_setup_entry(
hass,
entry,
async_add_entities,
component_key="binary_sensor",
info_type=BinarySensorInfo,
entity_type=EsphomeBinarySensor,
state_type=BinarySensorState,
)
class EsphomeBinarySensor(EsphomeEntity, BinarySensorEntity):
"""A binary sensor implementation for ESPHome."""
@property
def _static_info(self) -> BinarySensorInfo:
return super()._static_info
@property
def _state(self) -> Optional[BinarySensorState]:
return super()._state
@property
def is_on(self) -> Optional[bool]:
"""Return true if the binary sensor is on."""
if self._static_info.is_status_binary_sensor:
# Status binary sensors indicated connected state.
# So in their case what's usually _availability_ is now state
return self._entry_data.available
if self._state is None:
return None
if self._state.missing_state:
return None
return self._state.state
@property
def device_class(self) -> str:
"""Return the class of this device, from component DEVICE_CLASSES."""
return self._static_info.device_class
@property
def available(self) -> bool:
"""Return True if entity is available."""
if self._static_info.is_status_binary_sensor:
return True
return super().available
|
import asyncio
from ssl import SSLContext
import sys
from typing import Any, Awaitable, Optional, Union, cast
import aiohttp
from aiohttp import web
from aiohttp.hdrs import CONTENT_TYPE, USER_AGENT
from aiohttp.web_exceptions import HTTPBadGateway, HTTPGatewayTimeout
import async_timeout
from homeassistant.const import EVENT_HOMEASSISTANT_CLOSE, __version__
from homeassistant.core import Event, callback
from homeassistant.helpers.frame import warn_use
from homeassistant.helpers.typing import HomeAssistantType
from homeassistant.loader import bind_hass
from homeassistant.util import ssl as ssl_util
DATA_CONNECTOR = "aiohttp_connector"
DATA_CONNECTOR_NOTVERIFY = "aiohttp_connector_notverify"
DATA_CLIENTSESSION = "aiohttp_clientsession"
DATA_CLIENTSESSION_NOTVERIFY = "aiohttp_clientsession_notverify"
SERVER_SOFTWARE = "HomeAssistant/{0} aiohttp/{1} Python/{2[0]}.{2[1]}".format(
__version__, aiohttp.__version__, sys.version_info
)
@callback
@bind_hass
def async_get_clientsession(
hass: HomeAssistantType, verify_ssl: bool = True
) -> aiohttp.ClientSession:
"""Return default aiohttp ClientSession.
This method must be run in the event loop.
"""
if verify_ssl:
key = DATA_CLIENTSESSION
else:
key = DATA_CLIENTSESSION_NOTVERIFY
if key not in hass.data:
hass.data[key] = async_create_clientsession(hass, verify_ssl)
return cast(aiohttp.ClientSession, hass.data[key])
@callback
@bind_hass
def async_create_clientsession(
hass: HomeAssistantType,
verify_ssl: bool = True,
auto_cleanup: bool = True,
**kwargs: Any,
) -> aiohttp.ClientSession:
"""Create a new ClientSession with kwargs, i.e. for cookies.
If auto_cleanup is False, you need to call detach() after the session
returned is no longer used. Default is True, the session will be
automatically detached on homeassistant_stop.
This method must be run in the event loop.
"""
connector = _async_get_connector(hass, verify_ssl)
clientsession = aiohttp.ClientSession(
connector=connector,
headers={USER_AGENT: SERVER_SOFTWARE},
**kwargs,
)
clientsession.close = warn_use( # type: ignore
clientsession.close, "closes the Home Assistant aiohttp session"
)
if auto_cleanup:
_async_register_clientsession_shutdown(hass, clientsession)
return clientsession
@bind_hass
async def async_aiohttp_proxy_web(
hass: HomeAssistantType,
request: web.BaseRequest,
web_coro: Awaitable[aiohttp.ClientResponse],
buffer_size: int = 102400,
timeout: int = 10,
) -> Optional[web.StreamResponse]:
"""Stream websession request to aiohttp web response."""
try:
with async_timeout.timeout(timeout):
req = await web_coro
except asyncio.CancelledError:
# The user cancelled the request
return None
except asyncio.TimeoutError as err:
# Timeout trying to start the web request
raise HTTPGatewayTimeout() from err
except aiohttp.ClientError as err:
# Something went wrong with the connection
raise HTTPBadGateway() from err
try:
return await async_aiohttp_proxy_stream(
hass, request, req.content, req.headers.get(CONTENT_TYPE)
)
finally:
req.close()
@bind_hass
async def async_aiohttp_proxy_stream(
hass: HomeAssistantType,
request: web.BaseRequest,
stream: aiohttp.StreamReader,
content_type: Optional[str],
buffer_size: int = 102400,
timeout: int = 10,
) -> web.StreamResponse:
"""Stream a stream to aiohttp web response."""
response = web.StreamResponse()
if content_type is not None:
response.content_type = content_type
await response.prepare(request)
try:
while True:
with async_timeout.timeout(timeout):
data = await stream.read(buffer_size)
if not data:
break
await response.write(data)
except (asyncio.TimeoutError, aiohttp.ClientError):
# Something went wrong fetching data, closed connection
pass
return response
@callback
def _async_register_clientsession_shutdown(
hass: HomeAssistantType, clientsession: aiohttp.ClientSession
) -> None:
"""Register ClientSession close on Home Assistant shutdown.
This method must be run in the event loop.
"""
@callback
def _async_close_websession(event: Event) -> None:
"""Close websession."""
clientsession.detach()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_CLOSE, _async_close_websession)
@callback
def _async_get_connector(
hass: HomeAssistantType, verify_ssl: bool = True
) -> aiohttp.BaseConnector:
"""Return the connector pool for aiohttp.
This method must be run in the event loop.
"""
key = DATA_CONNECTOR if verify_ssl else DATA_CONNECTOR_NOTVERIFY
if key in hass.data:
return cast(aiohttp.BaseConnector, hass.data[key])
if verify_ssl:
ssl_context: Union[bool, SSLContext] = ssl_util.client_context()
else:
ssl_context = False
connector = aiohttp.TCPConnector(enable_cleanup_closed=True, ssl=ssl_context)
hass.data[key] = connector
async def _async_close_connector(event: Event) -> None:
"""Close connector pool."""
await connector.close()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_CLOSE, _async_close_connector)
return connector
|
import io
import os
import lxml.html
from nikola import shortcodes as sc
from nikola.plugin_categories import PageCompiler
from nikola.utils import LocaleBorg, makedirs, map_metadata, write_metadata
class CompileHtml(PageCompiler):
"""Compile HTML into HTML."""
name = "html"
friendly_name = "HTML"
supports_metadata = True
def compile_string(self, data, source_path=None, is_two_file=True, post=None, lang=None):
"""Compile HTML into HTML strings, with shortcode support."""
if not is_two_file:
_, data = self.split_metadata(data, post, lang)
new_data, shortcodes = sc.extract_shortcodes(data)
return self.site.apply_shortcodes_uuid(new_data, shortcodes, filename=source_path, extra_context={'post': post})
def compile(self, source, dest, is_two_file=True, post=None, lang=None):
"""Compile the source file into HTML and save as dest."""
makedirs(os.path.dirname(dest))
with io.open(dest, "w+", encoding="utf-8") as out_file:
with io.open(source, "r", encoding="utf-8-sig") as in_file:
data = in_file.read()
data, shortcode_deps = self.compile_string(data, source, is_two_file, post, lang)
out_file.write(data)
if post is None:
if shortcode_deps:
self.logger.error(
"Cannot save dependencies for post {0} (post unknown)",
source)
else:
post._depfile[dest] += shortcode_deps
return True
def create_post(self, path, **kw):
"""Create a new post."""
content = kw.pop('content', None)
onefile = kw.pop('onefile', False)
# is_page is not used by create_post as of now.
kw.pop('is_page', False)
metadata = {}
metadata.update(self.default_metadata)
metadata.update(kw)
makedirs(os.path.dirname(path))
if not content.endswith('\n'):
content += '\n'
with io.open(path, "w+", encoding="utf-8") as fd:
if onefile:
fd.write(write_metadata(metadata, comment_wrap=True, site=self.site, compiler=self))
fd.write(content)
def read_metadata(self, post, file_metadata_regexp=None, unslugify_titles=False, lang=None):
"""Read the metadata from a post's meta tags, and return a metadata dict."""
if lang is None:
lang = LocaleBorg().current_lang
source_path = post.translated_source_path(lang)
with io.open(source_path, 'r', encoding='utf-8-sig') as inf:
data = inf.read()
metadata = {}
try:
doc = lxml.html.document_fromstring(data)
except lxml.etree.ParserError as e:
# Issue #374 -> #2851
if str(e) == "Document is empty":
return {}
# let other errors raise
raise
title_tag = doc.find('*//title')
if title_tag is not None and title_tag.text:
metadata['title'] = title_tag.text
meta_tags = doc.findall('*//meta')
for tag in meta_tags:
k = tag.get('name', '').lower()
if not k:
continue
elif k == 'keywords':
k = 'tags'
content = tag.get('content')
if content:
metadata[k] = content
map_metadata(metadata, 'html_metadata', self.site.config)
return metadata
|
import numpy as np
import pytest
# pylint: disable=line-too-long
from tensornetwork.contractors.custom_path_solvers.nconinterface import ncon_solver, ncon_to_adj, ord_to_ncon, ncon_cost_check
@pytest.mark.parametrize('chi', range(2, 6))
def test_ncon_solver(chi):
# test against network with known cost
chi = np.random.randint(2, 10)
u = np.random.rand(chi, chi, chi, chi)
w = np.random.rand(chi, chi, chi)
ham = np.random.rand(chi, chi, chi, chi, chi, chi)
tensors = [u, u, w, w, w, ham, u, u, w, w, w]
connects = [[1, 3, 10, 11], [4, 7, 12, 13], [8, 10, -4], [11, 12, -5],
[13, 14, -6], [2, 5, 6, 3, 4, 7], [1, 2, 9, 17], [5, 6, 16, 15],
[8, 9, -1], [17, 16, -2], [15, 14, -3]]
con_order, costs, is_optimal = ncon_solver(tensors, connects, max_branch=None)
flat_connects = np.concatenate(connects)
inds = np.sort(np.unique(flat_connects[flat_connects > 0]))
ex_cost = np.log10(2 * chi**9 + 4 * chi**8 + 2 * chi**6 + 2 * chi**5)
assert np.allclose(costs, ex_cost)
assert is_optimal
assert np.array_equal(inds, np.sort(con_order))
@pytest.mark.parametrize('num_closed', range(1, 20))
def test_ncon_solver2(num_closed):
chi = 4
N = 10
A = np.zeros([chi, chi, chi, chi, chi, chi])
tensors = [A] * N
num_open = 4 * N - 2 * num_closed
cl_inds = 1 + np.arange(num_closed)
op_inds = -1 - np.arange(num_open)
connects = [0] * N
perm = np.argsort(np.sin(range(4 * N)))
comb_inds = np.concatenate((op_inds, cl_inds, cl_inds))[perm]
for k in range(N):
if k < (N - 1):
connect_temp = np.concatenate((comb_inds[4 * k:4 * (k + 1)],
[num_closed + k + 1, num_closed + k + 2]))
else:
connect_temp = np.concatenate(
(comb_inds[4 * k:4 * (k + 1)], [num_closed + k + 1, num_closed + 1]))
connects[k] = list(connect_temp[np.argsort(np.random.rand(6))])
max_branch = 1000
con_order, costs, _ = ncon_solver(tensors, connects, max_branch=max_branch)
ex_cost = ncon_cost_check(tensors, connects, con_order)
assert np.allclose(costs, ex_cost)
assert np.array_equal(np.arange(num_closed + N) + 1, np.sort(con_order))
@pytest.mark.parametrize('chi', range(2, 6))
@pytest.mark.parametrize('N', range(2, 7))
def test_ncon_to_adj(chi, N):
A = np.zeros([chi, chi])
tensors = [A] * N
connects = [0] * N
for k in range(N):
if k == 0:
connects[k] = [-1, 1]
elif k == (N - 1):
connects[k] = [k, -2]
else:
connects[k] = [k, k + 1]
log_adj = ncon_to_adj(tensors, connects)
ex_log_adj = np.zeros([N, N])
ex_log_adj[:(N - 1), 1:] = np.diag(np.log10(chi) * np.ones([N - 1]))
ex_log_adj += ex_log_adj.T
ex_log_adj[0, 0] = np.log10(chi)
ex_log_adj[-1, -1] = np.log10(chi)
assert np.allclose(log_adj, ex_log_adj)
@pytest.mark.parametrize('num_closed', range(1, 16))
def test_ord_to_ncon(num_closed):
N = 8
num_open = 4 * N - 2 * num_closed
cl_inds = 1 + np.arange(num_closed)
op_inds = -1 - np.arange(num_open)
connects = [0] * N
perm = np.argsort(np.random.rand(4 * N))
comb_inds = np.concatenate((op_inds, cl_inds, cl_inds))[perm]
for k in range(N):
if k < (N - 1):
connect_temp = np.concatenate((comb_inds[4 * k:4 * (k + 1)],
[num_closed + k + 1, num_closed + k + 2]))
else:
connect_temp = np.concatenate(
(comb_inds[4 * k:4 * (k + 1)], [num_closed + k + 1, num_closed + 1]))
connects[k] = list(connect_temp[np.argsort(np.random.rand(6))])
order = np.zeros([2, N - 1], dtype=int)
for k in range(N - 1):
temp_loc = np.random.randint(0, N - k - 1)
order[0, k] = temp_loc
order[1, k] = np.random.randint(temp_loc + 1, N - k)
con_order = ord_to_ncon(connects, order)
assert np.array_equal(np.sort(con_order), np.arange(num_closed + N) + 1)
@pytest.mark.parametrize('chi', range(2, 6))
def test_ncon_cost_check(chi):
# test against network with known cost
u = np.random.rand(chi, chi, chi, chi)
w = np.random.rand(chi, chi, chi)
ham = np.random.rand(chi, chi, chi, chi, chi, chi)
tensors = [u, u, w, w, w, ham, u, u, w, w, w]
connects = [[1, 3, 10, 11], [4, 7, 12, 13], [8, 10, -4], [11, 12, -5],
[13, 14, -6], [2, 5, 6, 3, 4, 7], [1, 2, 9, 17], [5, 6, 16, 15],
[8, 9, -1], [17, 16, -2], [15, 14, -3]]
con_order = [4, 7, 17, 5, 6, 11, 3, 12, 14, 1, 2, 16, 8, 9, 10, 13, 15]
cost = ncon_cost_check(tensors, connects, con_order)
ex_cost = np.log10(2 * chi**9 + 4 * chi**8 + 2 * chi**6 + 2 * chi**5)
assert np.allclose(cost, ex_cost)
@pytest.mark.parametrize('chi', range(2, 6))
def test_ncon_cost_check2(chi):
# test against network with known (includes traces and inner products
A = np.zeros([chi, chi, chi, chi])
B = np.zeros([chi, chi, chi, chi, chi, chi])
C = np.zeros([chi, chi, chi])
D = np.zeros([chi, chi])
tensors = [A, B, C, D]
connects = [[1, 2, 3, 1], [2, 4, 4, 5, 6, 6], [3, 5, -1], [-2, -3]]
con_order = [1, 2, 3, 4, 5, 6]
cost = ncon_cost_check(tensors, connects, con_order)
ex_cost = np.log10(3 * chi**3)
assert np.allclose(cost, ex_cost)
|
import os
import cherrypy
from cherrypy.test import helper
curdir = os.path.join(os.getcwd(), os.path.dirname(__file__))
class VirtualHostTest(helper.CPWebCase):
@staticmethod
def setup_server():
class Root:
@cherrypy.expose
def index(self):
return 'Hello, world'
@cherrypy.expose
def dom4(self):
return 'Under construction'
@cherrypy.expose
def method(self, value):
return 'You sent %s' % value
class VHost:
def __init__(self, sitename):
self.sitename = sitename
@cherrypy.expose
def index(self):
return 'Welcome to %s' % self.sitename
@cherrypy.expose
def vmethod(self, value):
return 'You sent %s' % value
@cherrypy.expose
def url(self):
return cherrypy.url('nextpage')
# Test static as a handler (section must NOT include vhost prefix)
static = cherrypy.tools.staticdir.handler(
section='/static', dir=curdir)
root = Root()
root.mydom2 = VHost('Domain 2')
root.mydom3 = VHost('Domain 3')
hostmap = {'www.mydom2.com': '/mydom2',
'www.mydom3.com': '/mydom3',
'www.mydom4.com': '/dom4',
}
cherrypy.tree.mount(root, config={
'/': {
'request.dispatch': cherrypy.dispatch.VirtualHost(**hostmap)
},
# Test static in config (section must include vhost prefix)
'/mydom2/static2': {
'tools.staticdir.on': True,
'tools.staticdir.root': curdir,
'tools.staticdir.dir': 'static',
'tools.staticdir.index': 'index.html',
},
})
def testVirtualHost(self):
self.getPage('/', [('Host', 'www.mydom1.com')])
self.assertBody('Hello, world')
self.getPage('/mydom2/', [('Host', 'www.mydom1.com')])
self.assertBody('Welcome to Domain 2')
self.getPage('/', [('Host', 'www.mydom2.com')])
self.assertBody('Welcome to Domain 2')
self.getPage('/', [('Host', 'www.mydom3.com')])
self.assertBody('Welcome to Domain 3')
self.getPage('/', [('Host', 'www.mydom4.com')])
self.assertBody('Under construction')
# Test GET, POST, and positional params
self.getPage('/method?value=root')
self.assertBody('You sent root')
self.getPage('/vmethod?value=dom2+GET', [('Host', 'www.mydom2.com')])
self.assertBody('You sent dom2 GET')
self.getPage('/vmethod', [('Host', 'www.mydom3.com')], method='POST',
body='value=dom3+POST')
self.assertBody('You sent dom3 POST')
self.getPage('/vmethod/pos', [('Host', 'www.mydom3.com')])
self.assertBody('You sent pos')
# Test that cherrypy.url uses the browser url, not the virtual url
self.getPage('/url', [('Host', 'www.mydom2.com')])
self.assertBody('%s://www.mydom2.com/nextpage' % self.scheme)
def test_VHost_plus_Static(self):
# Test static as a handler
self.getPage('/static/style.css', [('Host', 'www.mydom2.com')])
self.assertStatus('200 OK')
self.assertHeader('Content-Type', 'text/css;charset=utf-8')
# Test static in config
self.getPage('/static2/dirback.jpg', [('Host', 'www.mydom2.com')])
self.assertStatus('200 OK')
self.assertHeaderIn('Content-Type', ['image/jpeg', 'image/pjpeg'])
# Test static config with "index" arg
self.getPage('/static2/', [('Host', 'www.mydom2.com')])
self.assertStatus('200 OK')
self.assertBody('Hello, world\r\n')
# Since tools.trailing_slash is on by default, this should redirect
self.getPage('/static2', [('Host', 'www.mydom2.com')])
self.assertStatus(301)
|
from homeassistant import config_entries, data_entry_flow
from homeassistant.components import ps4
from homeassistant.components.media_player.const import (
ATTR_MEDIA_CONTENT_TYPE,
ATTR_MEDIA_TITLE,
MEDIA_TYPE_GAME,
)
from homeassistant.components.ps4.const import (
ATTR_MEDIA_IMAGE_URL,
COMMANDS,
CONFIG_ENTRY_VERSION as VERSION,
DEFAULT_REGION,
DOMAIN,
PS4_DATA,
)
from homeassistant.const import (
ATTR_COMMAND,
ATTR_ENTITY_ID,
ATTR_LOCKED,
CONF_HOST,
CONF_NAME,
CONF_REGION,
CONF_TOKEN,
)
from homeassistant.exceptions import HomeAssistantError
from homeassistant.setup import async_setup_component
from homeassistant.util import location
from tests.async_mock import MagicMock, patch
from tests.common import MockConfigEntry, mock_registry
MOCK_HOST = "192.168.0.1"
MOCK_NAME = "test_ps4"
MOCK_REGION = "Some Region"
MOCK_CREDS = "1234567890A"
MOCK_DEVICE = {CONF_HOST: MOCK_HOST, CONF_NAME: MOCK_NAME, CONF_REGION: MOCK_REGION}
MOCK_DATA = {CONF_TOKEN: MOCK_CREDS, "devices": [MOCK_DEVICE]}
MOCK_FLOW_RESULT = {
"version": VERSION,
"handler": DOMAIN,
"type": data_entry_flow.RESULT_TYPE_CREATE_ENTRY,
"title": "test_ps4",
"data": MOCK_DATA,
}
MOCK_ENTRY_ID = "SomeID"
MOCK_CONFIG = MockConfigEntry(domain=DOMAIN, data=MOCK_DATA, entry_id=MOCK_ENTRY_ID)
MOCK_LOCATION = location.LocationInfo(
"0.0.0.0",
"US",
"United States",
"CA",
"California",
"San Diego",
"92122",
"America/Los_Angeles",
32.8594,
-117.2073,
True,
)
MOCK_DEVICE_VERSION_1 = {
CONF_HOST: MOCK_HOST,
CONF_NAME: MOCK_NAME,
CONF_REGION: "Some Region",
}
MOCK_DATA_VERSION_1 = {CONF_TOKEN: MOCK_CREDS, "devices": [MOCK_DEVICE_VERSION_1]}
MOCK_DEVICE_ID = "somedeviceid"
MOCK_ENTRY_VERSION_1 = MockConfigEntry(
domain=DOMAIN, data=MOCK_DATA_VERSION_1, entry_id=MOCK_ENTRY_ID, version=1
)
MOCK_UNIQUE_ID = "someuniqueid"
MOCK_ID = "CUSA00123"
MOCK_URL = "http://someurl.jpeg"
MOCK_TITLE = "Some Title"
MOCK_TYPE = MEDIA_TYPE_GAME
MOCK_GAMES_DATA_OLD_STR_FORMAT = {"mock_id": "mock_title", "mock_id2": "mock_title2"}
MOCK_GAMES_DATA = {
ATTR_LOCKED: False,
ATTR_MEDIA_CONTENT_TYPE: MEDIA_TYPE_GAME,
ATTR_MEDIA_IMAGE_URL: MOCK_URL,
ATTR_MEDIA_TITLE: MOCK_TITLE,
}
MOCK_GAMES_DATA_LOCKED = {
ATTR_LOCKED: True,
ATTR_MEDIA_CONTENT_TYPE: MEDIA_TYPE_GAME,
ATTR_MEDIA_IMAGE_URL: MOCK_URL,
ATTR_MEDIA_TITLE: MOCK_TITLE,
}
MOCK_GAMES = {MOCK_ID: MOCK_GAMES_DATA}
MOCK_GAMES_LOCKED = {MOCK_ID: MOCK_GAMES_DATA_LOCKED}
async def test_ps4_integration_setup(hass):
"""Test PS4 integration is setup."""
await ps4.async_setup(hass, {})
await hass.async_block_till_done()
assert hass.data[PS4_DATA].protocol is not None
async def test_creating_entry_sets_up_media_player(hass):
"""Test setting up PS4 loads the media player."""
mock_flow = "homeassistant.components.ps4.PlayStation4FlowHandler.async_step_user"
with patch(
"homeassistant.components.ps4.media_player.async_setup_entry",
return_value=True,
) as mock_setup, patch(mock_flow, return_value=MOCK_FLOW_RESULT):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
await hass.async_block_till_done()
assert len(mock_setup.mock_calls) == 1
async def test_config_flow_entry_migrate(hass):
"""Test that config flow entry is migrated correctly."""
# Start with the config entry at Version 1.
manager = hass.config_entries
mock_entry = MOCK_ENTRY_VERSION_1
mock_entry.add_to_manager(manager)
mock_e_registry = mock_registry(hass)
mock_entity_id = f"media_player.ps4_{MOCK_UNIQUE_ID}"
mock_e_entry = mock_e_registry.async_get_or_create(
"media_player",
"ps4",
MOCK_UNIQUE_ID,
config_entry=mock_entry,
device_id=MOCK_DEVICE_ID,
)
assert len(mock_e_registry.entities) == 1
assert mock_e_entry.entity_id == mock_entity_id
assert mock_e_entry.unique_id == MOCK_UNIQUE_ID
with patch(
"homeassistant.util.location.async_detect_location_info",
return_value=MOCK_LOCATION,
), patch(
"homeassistant.helpers.entity_registry.async_get_registry",
return_value=mock_e_registry,
):
await ps4.async_migrate_entry(hass, mock_entry)
await hass.async_block_till_done()
assert len(mock_e_registry.entities) == 1
for entity in mock_e_registry.entities.values():
mock_entity = entity
# Test that entity_id remains the same.
assert mock_entity.entity_id == mock_entity_id
assert mock_entity.device_id == MOCK_DEVICE_ID
# Test that last four of credentials is appended to the unique_id.
assert mock_entity.unique_id == "{}_{}".format(MOCK_UNIQUE_ID, MOCK_CREDS[-4:])
# Test that config entry is at the current version.
assert mock_entry.version == VERSION
assert mock_entry.data[CONF_TOKEN] == MOCK_CREDS
assert mock_entry.data["devices"][0][CONF_HOST] == MOCK_HOST
assert mock_entry.data["devices"][0][CONF_NAME] == MOCK_NAME
assert mock_entry.data["devices"][0][CONF_REGION] == DEFAULT_REGION
async def test_media_player_is_setup(hass):
"""Test media_player is setup correctly."""
await setup_mock_component(hass)
assert len(hass.data[PS4_DATA].devices) == 1
async def setup_mock_component(hass):
"""Set up Mock Media Player."""
entry = MockConfigEntry(domain=ps4.DOMAIN, data=MOCK_DATA, version=VERSION)
entry.add_to_manager(hass.config_entries)
await async_setup_component(hass, DOMAIN, {DOMAIN: {}})
await hass.async_block_till_done()
def test_games_reformat_to_dict(hass):
"""Test old data format is converted to new format."""
with patch(
"homeassistant.components.ps4.load_json",
return_value=MOCK_GAMES_DATA_OLD_STR_FORMAT,
), patch("homeassistant.components.ps4.save_json", side_effect=MagicMock()), patch(
"os.path.isfile", return_value=True
):
mock_games = ps4.load_games(hass, MOCK_ENTRY_ID)
# New format is a nested dict.
assert isinstance(mock_games, dict)
assert mock_games["mock_id"][ATTR_MEDIA_TITLE] == "mock_title"
assert mock_games["mock_id2"][ATTR_MEDIA_TITLE] == "mock_title2"
for mock_game in mock_games:
mock_data = mock_games[mock_game]
assert isinstance(mock_data, dict)
assert mock_data
assert mock_data[ATTR_MEDIA_IMAGE_URL] is None
assert mock_data[ATTR_LOCKED] is False
assert mock_data[ATTR_MEDIA_CONTENT_TYPE] == MEDIA_TYPE_GAME
def test_load_games(hass):
"""Test that games are loaded correctly."""
with patch(
"homeassistant.components.ps4.load_json", return_value=MOCK_GAMES
), patch("homeassistant.components.ps4.save_json", side_effect=MagicMock()), patch(
"os.path.isfile", return_value=True
):
mock_games = ps4.load_games(hass, MOCK_ENTRY_ID)
assert isinstance(mock_games, dict)
mock_data = mock_games[MOCK_ID]
assert isinstance(mock_data, dict)
assert mock_data[ATTR_MEDIA_TITLE] == MOCK_TITLE
assert mock_data[ATTR_MEDIA_IMAGE_URL] == MOCK_URL
assert mock_data[ATTR_LOCKED] is False
assert mock_data[ATTR_MEDIA_CONTENT_TYPE] == MEDIA_TYPE_GAME
def test_loading_games_returns_dict(hass):
"""Test that loading games always returns a dict."""
with patch(
"homeassistant.components.ps4.load_json", side_effect=HomeAssistantError
), patch("homeassistant.components.ps4.save_json", side_effect=MagicMock()), patch(
"os.path.isfile", return_value=True
):
mock_games = ps4.load_games(hass, MOCK_ENTRY_ID)
assert isinstance(mock_games, dict)
assert not mock_games
with patch(
"homeassistant.components.ps4.load_json", return_value="Some String"
), patch("homeassistant.components.ps4.save_json", side_effect=MagicMock()), patch(
"os.path.isfile", return_value=True
):
mock_games = ps4.load_games(hass, MOCK_ENTRY_ID)
assert isinstance(mock_games, dict)
assert not mock_games
with patch("homeassistant.components.ps4.load_json", return_value=[]), patch(
"homeassistant.components.ps4.save_json", side_effect=MagicMock()
), patch("os.path.isfile", return_value=True):
mock_games = ps4.load_games(hass, MOCK_ENTRY_ID)
assert isinstance(mock_games, dict)
assert not mock_games
async def test_send_command(hass):
"""Test send_command service."""
await setup_mock_component(hass)
mock_func = "{}{}".format(
"homeassistant.components.ps4", ".media_player.PS4Device.async_send_command"
)
mock_devices = hass.data[PS4_DATA].devices
assert len(mock_devices) == 1
mock_entity = mock_devices[0]
assert mock_entity.entity_id == f"media_player.{MOCK_NAME}"
# Test that all commands call service function.
with patch(mock_func, return_value=True) as mock_service:
for mock_command in COMMANDS:
await hass.services.async_call(
DOMAIN,
"send_command",
{ATTR_ENTITY_ID: mock_entity.entity_id, ATTR_COMMAND: mock_command},
)
await hass.async_block_till_done()
assert len(mock_service.mock_calls) == len(COMMANDS)
|
import argparse
import getpass
import os
from homeassistant.util.yaml import _SECRET_NAMESPACE
# mypy: allow-untyped-defs
REQUIREMENTS = ["keyring==21.2.0", "keyrings.alt==3.4.0"]
def run(args):
"""Handle keyring script."""
parser = argparse.ArgumentParser(
description=(
"Modify Home Assistant secrets in the default keyring. "
"Use the secrets in configuration files with: "
"!secret <name>"
)
)
parser.add_argument("--script", choices=["keyring"])
parser.add_argument(
"action",
choices=["get", "set", "del", "info"],
help="Get, set or delete a secret",
)
parser.add_argument("name", help="Name of the secret", nargs="?", default=None)
import keyring # pylint: disable=import-outside-toplevel
# pylint: disable=import-outside-toplevel
from keyring.util import platform_ as platform
args = parser.parse_args(args)
if args.action == "info":
keyr = keyring.get_keyring()
print("Keyring version {}\n".format(REQUIREMENTS[0].split("==")[1]))
print(f"Active keyring : {keyr.__module__}")
config_name = os.path.join(platform.config_root(), "keyringrc.cfg")
print(f"Config location : {config_name}")
print(f"Data location : {platform.data_root()}\n")
elif args.name is None:
parser.print_help()
return 1
if args.action == "set":
entered_secret = getpass.getpass(f"Please enter the secret for {args.name}: ")
keyring.set_password(_SECRET_NAMESPACE, args.name, entered_secret)
print(f"Secret {args.name} set successfully")
elif args.action == "get":
the_secret = keyring.get_password(_SECRET_NAMESPACE, args.name)
if the_secret is None:
print(f"Secret {args.name} not found")
else:
print(f"Secret {args.name}={the_secret}")
elif args.action == "del":
try:
keyring.delete_password(_SECRET_NAMESPACE, args.name)
print(f"Deleted secret {args.name}")
except keyring.errors.PasswordDeleteError:
print(f"Secret {args.name} not found")
|
__docformat__ = "restructuredtext en"
import os
import sys
import logging
from six import string_types
from logilab.common.textutils import colorize_ansi
def set_log_methods(cls, logger):
"""bind standard logger's methods as methods on the class"""
cls.__logger = logger
for attr in ('debug', 'info', 'warning', 'error', 'critical', 'exception'):
setattr(cls, attr, getattr(logger, attr))
def xxx_cyan(record):
if 'XXX' in record.message:
return 'cyan'
class ColorFormatter(logging.Formatter):
"""
A color Formatter for the logging standard module.
By default, colorize CRITICAL and ERROR in red, WARNING in orange, INFO in
green and DEBUG in yellow.
self.colors is customizable via the 'color' constructor argument (dictionary).
self.colorfilters is a list of functions that get the LogRecord
and return a color name or None.
"""
def __init__(self, fmt=None, datefmt=None, colors=None):
logging.Formatter.__init__(self, fmt, datefmt)
self.colorfilters = []
self.colors = {'CRITICAL': 'red',
'ERROR': 'red',
'WARNING': 'magenta',
'INFO': 'green',
'DEBUG': 'yellow',
}
if colors is not None:
assert isinstance(colors, dict)
self.colors.update(colors)
def format(self, record):
msg = logging.Formatter.format(self, record)
if record.levelname in self.colors:
color = self.colors[record.levelname]
return colorize_ansi(msg, color)
else:
for cf in self.colorfilters:
color = cf(record)
if color:
return colorize_ansi(msg, color)
return msg
def set_color_formatter(logger=None, **kw):
"""
Install a color formatter on the 'logger'. If not given, it will
defaults to the default logger.
Any additional keyword will be passed as-is to the ColorFormatter
constructor.
"""
if logger is None:
logger = logging.getLogger()
if not logger.handlers:
logging.basicConfig()
format_msg = logger.handlers[0].formatter._fmt
fmt = ColorFormatter(format_msg, **kw)
fmt.colorfilters.append(xxx_cyan)
logger.handlers[0].setFormatter(fmt)
LOG_FORMAT = '%(asctime)s - (%(name)s) %(levelname)s: %(message)s'
LOG_DATE_FORMAT = '%Y-%m-%d %H:%M:%S'
def get_handler(debug=False, syslog=False, logfile=None, rotation_parameters=None):
"""get an apropriate handler according to given parameters"""
if os.environ.get('APYCOT_ROOT'):
handler = logging.StreamHandler(sys.stdout)
if debug:
handler = logging.StreamHandler()
elif logfile is None:
if syslog:
from logging import handlers
handler = handlers.SysLogHandler()
else:
handler = logging.StreamHandler()
else:
try:
if rotation_parameters is None:
if os.name == 'posix' and sys.version_info >= (2, 6):
from logging.handlers import WatchedFileHandler
handler = WatchedFileHandler(logfile)
else:
handler = logging.FileHandler(logfile)
else:
from logging.handlers import TimedRotatingFileHandler
handler = TimedRotatingFileHandler(
logfile, **rotation_parameters)
except IOError:
handler = logging.StreamHandler()
return handler
def get_threshold(debug=False, logthreshold=None):
if logthreshold is None:
if debug:
logthreshold = logging.DEBUG
else:
logthreshold = logging.ERROR
elif isinstance(logthreshold, string_types):
logthreshold = getattr(logging, THRESHOLD_MAP.get(logthreshold,
logthreshold))
return logthreshold
def _colorable_terminal():
isatty = hasattr(sys.__stdout__, 'isatty') and sys.__stdout__.isatty()
if not isatty:
return False
if os.name == 'nt':
try:
from colorama import init as init_win32_colors
except ImportError:
return False
init_win32_colors()
return True
def get_formatter(logformat=LOG_FORMAT, logdateformat=LOG_DATE_FORMAT):
if _colorable_terminal():
fmt = ColorFormatter(logformat, logdateformat)
def col_fact(record):
if 'XXX' in record.message:
return 'cyan'
if 'kick' in record.message:
return 'red'
fmt.colorfilters.append(col_fact)
else:
fmt = logging.Formatter(logformat, logdateformat)
return fmt
def init_log(debug=False, syslog=False, logthreshold=None, logfile=None,
logformat=LOG_FORMAT, logdateformat=LOG_DATE_FORMAT, fmt=None,
rotation_parameters=None, handler=None):
"""init the log service"""
logger = logging.getLogger()
if handler is None:
handler = get_handler(debug, syslog, logfile, rotation_parameters)
# only addHandler and removeHandler method while I would like a setHandler
# method, so do it this way :$
logger.handlers = [handler]
logthreshold = get_threshold(debug, logthreshold)
logger.setLevel(logthreshold)
if fmt is None:
if debug:
fmt = get_formatter(logformat=logformat, logdateformat=logdateformat)
else:
fmt = logging.Formatter(logformat, logdateformat)
handler.setFormatter(fmt)
return handler
# map logilab.common.logger thresholds to logging thresholds
THRESHOLD_MAP = {'LOG_DEBUG': 'DEBUG',
'LOG_INFO': 'INFO',
'LOG_NOTICE': 'INFO',
'LOG_WARN': 'WARNING',
'LOG_WARNING': 'WARNING',
'LOG_ERR': 'ERROR',
'LOG_ERROR': 'ERROR',
'LOG_CRIT': 'CRITICAL',
}
|
import copy
import datetime
import tempfile
from absl import flags
from perfkitbenchmarker import beam_benchmark_helper
from perfkitbenchmarker import beam_pipeline_options
from perfkitbenchmarker import configs
from perfkitbenchmarker import dpb_service
from perfkitbenchmarker import errors
from perfkitbenchmarker import kubernetes_helper
from perfkitbenchmarker import sample
from perfkitbenchmarker.dpb_service import BaseDpbService
BENCHMARK_NAME = 'beam_integration_benchmark'
BENCHMARK_CONFIG = """
beam_integration_benchmark:
description: Run word count on dataflow and dataproc
dpb_service:
service_type: dataflow
worker_group:
vm_spec:
GCP:
machine_type: n1-standard-1
boot_disk_size: 500
AWS:
machine_type: m3.medium
disk_spec:
GCP:
disk_type: nodisk
AWS:
disk_size: 500
disk_type: gp2
worker_count: 2
"""
DEFAULT_JAVA_IT_CLASS = 'org.apache.beam.examples.WordCountIT'
DEFAULT_PYTHON_IT_MODULE = ('apache_beam.examples.wordcount_it_test:'
'WordCountIT.test_wordcount_it')
flags.DEFINE_string('beam_it_class', None, 'Path to IT class')
flags.DEFINE_string('beam_it_args', None, 'Args to provide to the IT.'
' Deprecated & replaced by beam_it_options')
flags.DEFINE_string('beam_it_options', None, 'Pipeline Options sent to the'
' integration test.')
flags.DEFINE_string('beam_kubernetes_scripts', None, 'A local path to the'
' Kubernetes scripts to run which will instantiate a'
' datastore.')
flags.DEFINE_string('beam_options_config_file', None, 'A local path to the'
' yaml file defining static and dynamic pipeline options to'
' use for this benchmark run.')
FLAGS = flags.FLAGS
def GetConfig(user_config):
return configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
def CheckPrerequisites(benchmark_config_spec):
"""Verifies that the required resources are present.
Raises:
perfkitbenchmarker.errors.Config.InvalidValue: If no Beam args are provided.
NotImplementedError: If an invalid runner is specified.
"""
if FLAGS.beam_it_options is None and FLAGS.beam_it_args is None:
raise errors.Config.InvalidValue(
'No options provided. To run with default class (WordCountIT), must'
' provide --beam_it_options=--tempRoot=<temp dir,'
' e.g. gs://my-dir/temp>.')
if FLAGS.beam_sdk is None:
raise errors.Config.InvalidValue(
'No sdk provided. To run Beam integration benchmark, the test must'
'specify which sdk is used in the pipeline. For example, java/python.')
if benchmark_config_spec.dpb_service.service_type != dpb_service.DATAFLOW:
raise NotImplementedError('Currently only works against Dataflow.')
if (FLAGS.beam_it_options and
(not FLAGS.beam_it_options.endswith(']') or
not FLAGS.beam_it_options.startswith('['))):
raise Exception("beam_it_options must be of form"
" [\"--option=value\",\"--option2=val2\"]")
def Prepare(benchmark_spec):
beam_benchmark_helper.InitializeBeamRepo(benchmark_spec)
benchmark_spec.always_call_cleanup = True
kubernetes_helper.CreateAllFiles(getKubernetesScripts())
pass
def getKubernetesScripts():
if FLAGS.beam_kubernetes_scripts:
scripts = FLAGS.beam_kubernetes_scripts.split(',')
return scripts
else:
return []
def Run(benchmark_spec):
# Get handle to the dpb service
dpb_service_instance = benchmark_spec.dpb_service
# Create a file handle to contain the response from running the job on
# the dpb service
stdout_file = tempfile.NamedTemporaryFile(suffix='.stdout',
prefix='beam_integration_benchmark',
delete=False)
stdout_file.close()
if FLAGS.beam_it_class is None:
if FLAGS.beam_sdk == beam_benchmark_helper.BEAM_JAVA_SDK:
classname = DEFAULT_JAVA_IT_CLASS
elif FLAGS.beam_sdk == beam_benchmark_helper.BEAM_PYTHON_SDK:
classname = DEFAULT_PYTHON_IT_MODULE
else:
raise NotImplementedError('Unsupported Beam SDK: %s.' % FLAGS.beam_sdk)
else:
classname = FLAGS.beam_it_class
static_pipeline_options, dynamic_pipeline_options = \
beam_pipeline_options.ReadPipelineOptionConfigFile()
job_arguments = beam_pipeline_options.GenerateAllPipelineOptions(
FLAGS.beam_it_args, FLAGS.beam_it_options,
static_pipeline_options,
dynamic_pipeline_options)
job_type = BaseDpbService.BEAM_JOB_TYPE
results = []
metadata = copy.copy(dpb_service_instance.GetMetadata())
start = datetime.datetime.now()
dpb_service_instance.SubmitJob(
classname=classname,
job_arguments=job_arguments,
job_stdout_file=stdout_file,
job_type=job_type)
end_time = datetime.datetime.now()
run_time = (end_time - start).total_seconds()
results.append(sample.Sample('run_time', run_time, 'seconds', metadata))
return results
def Cleanup(benchmark_spec):
kubernetes_helper.DeleteAllFiles(getKubernetesScripts())
pass
|
import asyncio
from ssl import SSLContext
from typing import List, Optional, Union
from aiohttp import web
from yarl import URL
class HomeAssistantTCPSite(web.BaseSite):
"""HomeAssistant specific aiohttp Site.
Vanilla TCPSite accepts only str as host. However, the underlying asyncio's
create_server() implementation does take a list of strings to bind to multiple
host IP's. To support multiple server_host entries (e.g. to enable dual-stack
explicitly), we would like to pass an array of strings. Bring our own
implementation inspired by TCPSite.
Custom TCPSite can be dropped when https://github.com/aio-libs/aiohttp/pull/4894
is merged.
"""
__slots__ = ("_host", "_port", "_reuse_address", "_reuse_port", "_hosturl")
def __init__(
self,
runner: "web.BaseRunner",
host: Union[None, str, List[str]],
port: int,
*,
shutdown_timeout: float = 60.0,
ssl_context: Optional[SSLContext] = None,
backlog: int = 128,
reuse_address: Optional[bool] = None,
reuse_port: Optional[bool] = None,
) -> None: # noqa: D107
super().__init__(
runner,
shutdown_timeout=shutdown_timeout,
ssl_context=ssl_context,
backlog=backlog,
)
self._host = host
self._port = port
self._reuse_address = reuse_address
self._reuse_port = reuse_port
@property
def name(self) -> str: # noqa: D102
scheme = "https" if self._ssl_context else "http"
host = self._host[0] if isinstance(self._host, list) else "0.0.0.0"
return str(URL.build(scheme=scheme, host=host, port=self._port))
async def start(self) -> None: # noqa: D102
await super().start()
loop = asyncio.get_running_loop()
server = self._runner.server
assert server is not None
self._server = await loop.create_server(
server,
self._host,
self._port,
ssl=self._ssl_context,
backlog=self._backlog,
reuse_address=self._reuse_address,
reuse_port=self._reuse_port,
)
|
import pytest
from PyQt5.QtCore import QUrl
from qutebrowser.misc import autoupdate, httpclient
INVALID_JSON = ['{"invalid": { "json"}', '{"wrong": "keys"}']
class HTTPGetStub(httpclient.HTTPClient):
"""A stub class for HTTPClient.
Attributes:
url: the last url used by get()
_success: Whether get() will emit a success signal.
"""
def __init__(self, success=True, json=None):
super().__init__()
self.url = None
self._success = success
if json:
self._json = json
else:
self._json = '{"info": {"version": "test"}}'
def get(self, url):
self.url = url
if self._success:
self.success.emit(self._json)
else:
self.error.emit("error")
def test_constructor(qapp):
client = autoupdate.PyPIVersionClient()
assert isinstance(client._client, httpclient.HTTPClient)
def test_get_version_success(qtbot):
"""Test get_version() when success is emitted."""
http_stub = HTTPGetStub(success=True)
client = autoupdate.PyPIVersionClient(client=http_stub)
with qtbot.assertNotEmitted(client.error):
with qtbot.waitSignal(client.success):
client.get_version('test')
assert http_stub.url == QUrl(client.API_URL.format('test'))
def test_get_version_error(qtbot):
"""Test get_version() when error is emitted."""
http_stub = HTTPGetStub(success=False)
client = autoupdate.PyPIVersionClient(client=http_stub)
with qtbot.assertNotEmitted(client.success):
with qtbot.waitSignal(client.error):
client.get_version('test')
@pytest.mark.parametrize('json', INVALID_JSON)
def test_invalid_json(qtbot, json):
"""Test on_client_success() with invalid JSON."""
http_stub = HTTPGetStub(json=json)
client = autoupdate.PyPIVersionClient(client=http_stub)
client.get_version('test')
with qtbot.assertNotEmitted(client.success):
with qtbot.waitSignal(client.error):
client.get_version('test')
|
import logging
import blebox_uniapi
import pytest
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_HS_COLOR,
ATTR_WHITE_VALUE,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR,
SUPPORT_WHITE_VALUE,
)
from homeassistant.const import (
ATTR_SUPPORTED_FEATURES,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_OFF,
STATE_ON,
)
from homeassistant.util import color
from .conftest import async_setup_entity, mock_feature
from tests.async_mock import AsyncMock, PropertyMock
ALL_LIGHT_FIXTURES = ["dimmer", "wlightbox_s", "wlightbox"]
@pytest.fixture(name="dimmer")
def dimmer_fixture():
"""Return a default light entity mock."""
feature = mock_feature(
"lights",
blebox_uniapi.light.Light,
unique_id="BleBox-dimmerBox-1afe34e750b8-brightness",
full_name="dimmerBox-brightness",
device_class=None,
brightness=65,
is_on=True,
supports_color=False,
supports_white=False,
)
product = feature.product
type(product).name = PropertyMock(return_value="My dimmer")
type(product).model = PropertyMock(return_value="dimmerBox")
return (feature, "light.dimmerbox_brightness")
async def test_dimmer_init(dimmer, hass, config):
"""Test cover default state."""
_, entity_id = dimmer
entry = await async_setup_entity(hass, config, entity_id)
assert entry.unique_id == "BleBox-dimmerBox-1afe34e750b8-brightness"
state = hass.states.get(entity_id)
assert state.name == "dimmerBox-brightness"
supported_features = state.attributes[ATTR_SUPPORTED_FEATURES]
assert supported_features & SUPPORT_BRIGHTNESS
assert state.attributes[ATTR_BRIGHTNESS] == 65
assert state.state == STATE_ON
device_registry = await hass.helpers.device_registry.async_get_registry()
device = device_registry.async_get(entry.device_id)
assert device.name == "My dimmer"
assert device.identifiers == {("blebox", "abcd0123ef5678")}
assert device.manufacturer == "BleBox"
assert device.model == "dimmerBox"
assert device.sw_version == "1.23"
async def test_dimmer_update(dimmer, hass, config):
"""Test light updating."""
feature_mock, entity_id = dimmer
def initial_update():
feature_mock.brightness = 53
feature_mock.async_update = AsyncMock(side_effect=initial_update)
await async_setup_entity(hass, config, entity_id)
state = hass.states.get(entity_id)
assert state.attributes[ATTR_BRIGHTNESS] == 53
assert state.state == STATE_ON
async def test_dimmer_on(dimmer, hass, config):
"""Test light on."""
feature_mock, entity_id = dimmer
def initial_update():
feature_mock.is_on = False
feature_mock.brightness = 0 # off
feature_mock.sensible_on_value = 254
feature_mock.async_update = AsyncMock(side_effect=initial_update)
await async_setup_entity(hass, config, entity_id)
feature_mock.async_update = AsyncMock()
state = hass.states.get(entity_id)
assert state.state == STATE_OFF
def turn_on(brightness):
assert brightness == 254
feature_mock.brightness = 254 # on
feature_mock.is_on = True # on
feature_mock.async_on = AsyncMock(side_effect=turn_on)
await hass.services.async_call(
"light",
SERVICE_TURN_ON,
{"entity_id": entity_id},
blocking=True,
)
state = hass.states.get(entity_id)
assert state.state == STATE_ON
assert state.attributes[ATTR_BRIGHTNESS] == 254
async def test_dimmer_on_with_brightness(dimmer, hass, config):
"""Test light on with a brightness value."""
feature_mock, entity_id = dimmer
def initial_update():
feature_mock.is_on = False
feature_mock.brightness = 0 # off
feature_mock.sensible_on_value = 254
feature_mock.async_update = AsyncMock(side_effect=initial_update)
await async_setup_entity(hass, config, entity_id)
feature_mock.async_update = AsyncMock()
state = hass.states.get(entity_id)
assert state.state == STATE_OFF
def turn_on(brightness):
assert brightness == 202
feature_mock.brightness = 202 # on
feature_mock.is_on = True # on
feature_mock.async_on = AsyncMock(side_effect=turn_on)
def apply(value, brightness):
assert value == 254
return brightness
feature_mock.apply_brightness = apply
await hass.services.async_call(
"light",
SERVICE_TURN_ON,
{"entity_id": entity_id, ATTR_BRIGHTNESS: 202},
blocking=True,
)
state = hass.states.get(entity_id)
assert state.attributes[ATTR_BRIGHTNESS] == 202
assert state.state == STATE_ON
async def test_dimmer_off(dimmer, hass, config):
"""Test light off."""
feature_mock, entity_id = dimmer
def initial_update():
feature_mock.is_on = True
feature_mock.async_update = AsyncMock(side_effect=initial_update)
await async_setup_entity(hass, config, entity_id)
feature_mock.async_update = AsyncMock()
state = hass.states.get(entity_id)
assert state.state == STATE_ON
def turn_off():
feature_mock.is_on = False
feature_mock.brightness = 0 # off
feature_mock.async_off = AsyncMock(side_effect=turn_off)
await hass.services.async_call(
"light",
SERVICE_TURN_OFF,
{"entity_id": entity_id},
blocking=True,
)
state = hass.states.get(entity_id)
assert state.state == STATE_OFF
assert ATTR_BRIGHTNESS not in state.attributes
@pytest.fixture(name="wlightbox_s")
def wlightboxs_fixture():
"""Return a default light entity mock."""
feature = mock_feature(
"lights",
blebox_uniapi.light.Light,
unique_id="BleBox-wLightBoxS-1afe34e750b8-color",
full_name="wLightBoxS-color",
device_class=None,
brightness=None,
is_on=None,
supports_color=False,
supports_white=False,
)
product = feature.product
type(product).name = PropertyMock(return_value="My wLightBoxS")
type(product).model = PropertyMock(return_value="wLightBoxS")
return (feature, "light.wlightboxs_color")
async def test_wlightbox_s_init(wlightbox_s, hass, config):
"""Test cover default state."""
_, entity_id = wlightbox_s
entry = await async_setup_entity(hass, config, entity_id)
assert entry.unique_id == "BleBox-wLightBoxS-1afe34e750b8-color"
state = hass.states.get(entity_id)
assert state.name == "wLightBoxS-color"
supported_features = state.attributes[ATTR_SUPPORTED_FEATURES]
assert supported_features & SUPPORT_BRIGHTNESS
assert ATTR_BRIGHTNESS not in state.attributes
assert state.state == STATE_OFF
device_registry = await hass.helpers.device_registry.async_get_registry()
device = device_registry.async_get(entry.device_id)
assert device.name == "My wLightBoxS"
assert device.identifiers == {("blebox", "abcd0123ef5678")}
assert device.manufacturer == "BleBox"
assert device.model == "wLightBoxS"
assert device.sw_version == "1.23"
async def test_wlightbox_s_update(wlightbox_s, hass, config):
"""Test light updating."""
feature_mock, entity_id = wlightbox_s
def initial_update():
feature_mock.brightness = 0xAB
feature_mock.is_on = True
feature_mock.async_update = AsyncMock(side_effect=initial_update)
await async_setup_entity(hass, config, entity_id)
state = hass.states.get(entity_id)
assert state.state == STATE_ON
assert state.attributes[ATTR_BRIGHTNESS] == 0xAB
async def test_wlightbox_s_on(wlightbox_s, hass, config):
"""Test light on."""
feature_mock, entity_id = wlightbox_s
def initial_update():
feature_mock.is_on = False
feature_mock.sensible_on_value = 254
feature_mock.async_update = AsyncMock(side_effect=initial_update)
await async_setup_entity(hass, config, entity_id)
feature_mock.async_update = AsyncMock()
state = hass.states.get(entity_id)
assert state.state == STATE_OFF
def turn_on(brightness):
assert brightness == 254
feature_mock.brightness = 254 # on
feature_mock.is_on = True # on
feature_mock.async_on = AsyncMock(side_effect=turn_on)
await hass.services.async_call(
"light",
SERVICE_TURN_ON,
{"entity_id": entity_id},
blocking=True,
)
state = hass.states.get(entity_id)
assert state.attributes[ATTR_BRIGHTNESS] == 254
assert state.state == STATE_ON
@pytest.fixture(name="wlightbox")
def wlightbox_fixture():
"""Return a default light entity mock."""
feature = mock_feature(
"lights",
blebox_uniapi.light.Light,
unique_id="BleBox-wLightBox-1afe34e750b8-color",
full_name="wLightBox-color",
device_class=None,
is_on=None,
supports_color=True,
supports_white=True,
white_value=None,
rgbw_hex=None,
)
product = feature.product
type(product).name = PropertyMock(return_value="My wLightBox")
type(product).model = PropertyMock(return_value="wLightBox")
return (feature, "light.wlightbox_color")
async def test_wlightbox_init(wlightbox, hass, config):
"""Test cover default state."""
_, entity_id = wlightbox
entry = await async_setup_entity(hass, config, entity_id)
assert entry.unique_id == "BleBox-wLightBox-1afe34e750b8-color"
state = hass.states.get(entity_id)
assert state.name == "wLightBox-color"
supported_features = state.attributes[ATTR_SUPPORTED_FEATURES]
assert supported_features & SUPPORT_WHITE_VALUE
assert supported_features & SUPPORT_COLOR
assert ATTR_WHITE_VALUE not in state.attributes
assert ATTR_HS_COLOR not in state.attributes
assert ATTR_BRIGHTNESS not in state.attributes
assert state.state == STATE_OFF
device_registry = await hass.helpers.device_registry.async_get_registry()
device = device_registry.async_get(entry.device_id)
assert device.name == "My wLightBox"
assert device.identifiers == {("blebox", "abcd0123ef5678")}
assert device.manufacturer == "BleBox"
assert device.model == "wLightBox"
assert device.sw_version == "1.23"
async def test_wlightbox_update(wlightbox, hass, config):
"""Test light updating."""
feature_mock, entity_id = wlightbox
def initial_update():
feature_mock.is_on = True
feature_mock.rgbw_hex = "fa00203A"
feature_mock.white_value = 0x3A
feature_mock.async_update = AsyncMock(side_effect=initial_update)
await async_setup_entity(hass, config, entity_id)
state = hass.states.get(entity_id)
assert state.attributes[ATTR_HS_COLOR] == (352.32, 100.0)
assert state.attributes[ATTR_WHITE_VALUE] == 0x3A
assert state.state == STATE_ON
async def test_wlightbox_on_via_just_whiteness(wlightbox, hass, config):
"""Test light on."""
feature_mock, entity_id = wlightbox
def initial_update():
feature_mock.is_on = False
feature_mock.async_update = AsyncMock(side_effect=initial_update)
await async_setup_entity(hass, config, entity_id)
feature_mock.async_update = AsyncMock()
state = hass.states.get(entity_id)
assert state.state == STATE_OFF
def turn_on(value):
feature_mock.is_on = True
assert value == "f1e2d3c7"
feature_mock.white_value = 0xC7 # on
feature_mock.rgbw_hex = "f1e2d3c7"
feature_mock.async_on = AsyncMock(side_effect=turn_on)
def apply_white(value, white):
assert value == "f1e2d305"
assert white == 0xC7
return "f1e2d3c7"
feature_mock.apply_white = apply_white
feature_mock.sensible_on_value = "f1e2d305"
await hass.services.async_call(
"light",
SERVICE_TURN_ON,
{"entity_id": entity_id, ATTR_WHITE_VALUE: 0xC7},
blocking=True,
)
state = hass.states.get(entity_id)
assert state.state == STATE_ON
assert state.attributes[ATTR_WHITE_VALUE] == 0xC7
assert state.attributes[ATTR_HS_COLOR] == color.color_RGB_to_hs(0xF1, 0xE2, 0xD3)
async def test_wlightbox_on_via_reset_whiteness(wlightbox, hass, config):
"""Test light on."""
feature_mock, entity_id = wlightbox
def initial_update():
feature_mock.is_on = False
feature_mock.async_update = AsyncMock(side_effect=initial_update)
await async_setup_entity(hass, config, entity_id)
feature_mock.async_update = AsyncMock()
state = hass.states.get(entity_id)
assert state.state == STATE_OFF
def turn_on(value):
feature_mock.is_on = True
feature_mock.white_value = 0x0
assert value == "f1e2d300"
feature_mock.rgbw_hex = "f1e2d300"
feature_mock.async_on = AsyncMock(side_effect=turn_on)
def apply_white(value, white):
assert value == "f1e2d305"
assert white == 0x0
return "f1e2d300"
feature_mock.apply_white = apply_white
feature_mock.sensible_on_value = "f1e2d305"
await hass.services.async_call(
"light",
SERVICE_TURN_ON,
{"entity_id": entity_id, ATTR_WHITE_VALUE: 0x0},
blocking=True,
)
state = hass.states.get(entity_id)
assert state.state == STATE_ON
assert state.attributes[ATTR_WHITE_VALUE] == 0x0
assert state.attributes[ATTR_HS_COLOR] == color.color_RGB_to_hs(0xF1, 0xE2, 0xD3)
async def test_wlightbox_on_via_just_hsl_color(wlightbox, hass, config):
"""Test light on."""
feature_mock, entity_id = wlightbox
def initial_update():
feature_mock.is_on = False
feature_mock.rgbw_hex = "00000000"
feature_mock.async_update = AsyncMock(side_effect=initial_update)
await async_setup_entity(hass, config, entity_id)
feature_mock.async_update = AsyncMock()
state = hass.states.get(entity_id)
assert state.state == STATE_OFF
hs_color = color.color_RGB_to_hs(0xFF, 0xA1, 0xB2)
def turn_on(value):
feature_mock.is_on = True
assert value == "ffa1b2e4"
feature_mock.white_value = 0xE4
feature_mock.rgbw_hex = value
feature_mock.async_on = AsyncMock(side_effect=turn_on)
def apply_color(value, color_value):
assert value == "c1a2e3e4"
assert color_value == "ffa0b1"
return "ffa1b2e4"
feature_mock.apply_color = apply_color
feature_mock.sensible_on_value = "c1a2e3e4"
await hass.services.async_call(
"light",
SERVICE_TURN_ON,
{"entity_id": entity_id, ATTR_HS_COLOR: hs_color},
blocking=True,
)
state = hass.states.get(entity_id)
assert state.attributes[ATTR_HS_COLOR] == hs_color
assert state.attributes[ATTR_WHITE_VALUE] == 0xE4
assert state.state == STATE_ON
async def test_wlightbox_on_to_last_color(wlightbox, hass, config):
"""Test light on."""
feature_mock, entity_id = wlightbox
def initial_update():
feature_mock.is_on = False
feature_mock.async_update = AsyncMock(side_effect=initial_update)
await async_setup_entity(hass, config, entity_id)
feature_mock.async_update = AsyncMock()
state = hass.states.get(entity_id)
assert state.state == STATE_OFF
def turn_on(value):
feature_mock.is_on = True
assert value == "f1e2d3e4"
feature_mock.white_value = 0xE4
feature_mock.rgbw_hex = value
feature_mock.async_on = AsyncMock(side_effect=turn_on)
feature_mock.sensible_on_value = "f1e2d3e4"
await hass.services.async_call(
"light",
SERVICE_TURN_ON,
{"entity_id": entity_id},
blocking=True,
)
state = hass.states.get(entity_id)
assert state.attributes[ATTR_WHITE_VALUE] == 0xE4
assert state.attributes[ATTR_HS_COLOR] == color.color_RGB_to_hs(0xF1, 0xE2, 0xD3)
assert state.state == STATE_ON
async def test_wlightbox_off(wlightbox, hass, config):
"""Test light off."""
feature_mock, entity_id = wlightbox
def initial_update():
feature_mock.is_on = True
feature_mock.async_update = AsyncMock(side_effect=initial_update)
await async_setup_entity(hass, config, entity_id)
feature_mock.async_update = AsyncMock()
state = hass.states.get(entity_id)
assert state.state == STATE_ON
def turn_off():
feature_mock.is_on = False
feature_mock.white_value = 0x0
feature_mock.rgbw_hex = "00000000"
feature_mock.async_off = AsyncMock(side_effect=turn_off)
await hass.services.async_call(
"light",
SERVICE_TURN_OFF,
{"entity_id": entity_id},
blocking=True,
)
state = hass.states.get(entity_id)
assert ATTR_WHITE_VALUE not in state.attributes
assert ATTR_HS_COLOR not in state.attributes
assert state.state == STATE_OFF
@pytest.mark.parametrize("feature", ALL_LIGHT_FIXTURES, indirect=["feature"])
async def test_update_failure(feature, hass, config, caplog):
"""Test that update failures are logged."""
caplog.set_level(logging.ERROR)
feature_mock, entity_id = feature
feature_mock.async_update = AsyncMock(side_effect=blebox_uniapi.error.ClientError)
await async_setup_entity(hass, config, entity_id)
assert f"Updating '{feature_mock.full_name}' failed: " in caplog.text
@pytest.mark.parametrize("feature", ALL_LIGHT_FIXTURES, indirect=["feature"])
async def test_turn_on_failure(feature, hass, config, caplog):
"""Test that turn_on failures are logged."""
caplog.set_level(logging.ERROR)
feature_mock, entity_id = feature
feature_mock.async_on = AsyncMock(side_effect=blebox_uniapi.error.BadOnValueError)
await async_setup_entity(hass, config, entity_id)
feature_mock.sensible_on_value = 123
await hass.services.async_call(
"light",
SERVICE_TURN_ON,
{"entity_id": entity_id},
blocking=True,
)
assert (
f"Turning on '{feature_mock.full_name}' failed: Bad value 123 ()" in caplog.text
)
|
from test import CollectorTestCase
from test import get_collector_config
from snmpinterface import SNMPInterfaceCollector
class TestSNMPInterfaceCollector(CollectorTestCase):
def setUp(self, allowed_names=None):
if not allowed_names:
allowed_names = []
config = get_collector_config('SNMPInterfaceCollector', {
})
self.collector = SNMPInterfaceCollector(config, None)
def test_import(self):
self.assertTrue(SNMPInterfaceCollector)
|
import numpy as np
from ..utils import read_str
def _unpack_matrix(fid, rows, cols, dtype, out_dtype):
"""Unpack matrix."""
dtype = np.dtype(dtype)
string = fid.read(int(dtype.itemsize * rows * cols))
out = np.frombuffer(string, dtype=dtype).reshape(
rows, cols).astype(out_dtype)
return out
def _unpack_simple(fid, dtype, out_dtype):
"""Unpack a NumPy type."""
dtype = np.dtype(dtype)
string = fid.read(dtype.itemsize)
out = np.frombuffer(string, dtype=dtype).astype(out_dtype)
if len(out) > 0:
out = out[0]
return out
def read_char(fid, count=1):
"""Read character from bti file."""
return _unpack_simple(fid, '>S%s' % count, 'S')
def read_bool(fid):
"""Read bool value from bti file."""
return _unpack_simple(fid, '>?', bool)
def read_uint8(fid):
"""Read unsigned 8bit integer from bti file."""
return _unpack_simple(fid, '>u1', np.uint8)
def read_int8(fid):
"""Read 8bit integer from bti file."""
return _unpack_simple(fid, '>i1', np.int8)
def read_uint16(fid):
"""Read unsigned 16bit integer from bti file."""
return _unpack_simple(fid, '>u2', np.uint16)
def read_int16(fid):
"""Read 16bit integer from bti file."""
return _unpack_simple(fid, '>i2', np.int16)
def read_uint32(fid):
"""Read unsigned 32bit integer from bti file."""
return _unpack_simple(fid, '>u4', np.uint32)
def read_int32(fid):
"""Read 32bit integer from bti file."""
return _unpack_simple(fid, '>i4', np.int32)
def read_uint64(fid):
"""Read unsigned 64bit integer from bti file."""
return _unpack_simple(fid, '>u8', np.uint64)
def read_int64(fid):
"""Read 64bit integer from bti file."""
return _unpack_simple(fid, '>u8', np.int64)
def read_float(fid):
"""Read 32bit float from bti file."""
return _unpack_simple(fid, '>f4', np.float32)
def read_double(fid):
"""Read 64bit float from bti file."""
return _unpack_simple(fid, '>f8', np.float64)
def read_int16_matrix(fid, rows, cols):
"""Read 16bit integer matrix from bti file."""
return _unpack_matrix(fid, rows, cols, dtype='>i2',
out_dtype=np.int16)
def read_float_matrix(fid, rows, cols):
"""Read 32bit float matrix from bti file."""
return _unpack_matrix(fid, rows, cols, dtype='>f4',
out_dtype=np.float32)
def read_double_matrix(fid, rows, cols):
"""Read 64bit float matrix from bti file."""
return _unpack_matrix(fid, rows, cols, dtype='>f8',
out_dtype=np.float64)
def read_transform(fid):
"""Read 64bit float matrix transform from bti file."""
return read_double_matrix(fid, rows=4, cols=4)
def read_dev_header(x):
"""Create a dev header."""
return dict(size=read_int32(x), checksum=read_int32(x),
reserved=read_str(x, 32))
|
import asyncio
from datetime import timedelta
import functools
import logging
from typing import Optional
import aiohttp
from async_upnp_client import UpnpFactory
from async_upnp_client.aiohttp import AiohttpNotifyServer, AiohttpSessionRequester
from async_upnp_client.profiles.dlna import DeviceState, DmrDevice
import voluptuous as vol
from homeassistant.components.media_player import PLATFORM_SCHEMA, MediaPlayerEntity
from homeassistant.components.media_player.const import (
MEDIA_TYPE_CHANNEL,
MEDIA_TYPE_EPISODE,
MEDIA_TYPE_IMAGE,
MEDIA_TYPE_MOVIE,
MEDIA_TYPE_MUSIC,
MEDIA_TYPE_PLAYLIST,
MEDIA_TYPE_TVSHOW,
MEDIA_TYPE_VIDEO,
SUPPORT_NEXT_TRACK,
SUPPORT_PAUSE,
SUPPORT_PLAY,
SUPPORT_PLAY_MEDIA,
SUPPORT_PREVIOUS_TRACK,
SUPPORT_SEEK,
SUPPORT_STOP,
SUPPORT_VOLUME_MUTE,
SUPPORT_VOLUME_SET,
)
from homeassistant.const import (
CONF_NAME,
CONF_URL,
EVENT_HOMEASSISTANT_STOP,
STATE_IDLE,
STATE_OFF,
STATE_ON,
STATE_PAUSED,
STATE_PLAYING,
)
from homeassistant.exceptions import PlatformNotReady
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.typing import HomeAssistantType
from homeassistant.util import get_local_ip
import homeassistant.util.dt as dt_util
_LOGGER = logging.getLogger(__name__)
DLNA_DMR_DATA = "dlna_dmr"
DEFAULT_NAME = "DLNA Digital Media Renderer"
DEFAULT_LISTEN_PORT = 8301
CONF_LISTEN_IP = "listen_ip"
CONF_LISTEN_PORT = "listen_port"
CONF_CALLBACK_URL_OVERRIDE = "callback_url_override"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_URL): cv.string,
vol.Optional(CONF_LISTEN_IP): cv.string,
vol.Optional(CONF_LISTEN_PORT, default=DEFAULT_LISTEN_PORT): cv.port,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_CALLBACK_URL_OVERRIDE): cv.url,
}
)
HOME_ASSISTANT_UPNP_CLASS_MAPPING = {
MEDIA_TYPE_MUSIC: "object.item.audioItem",
MEDIA_TYPE_TVSHOW: "object.item.videoItem",
MEDIA_TYPE_MOVIE: "object.item.videoItem",
MEDIA_TYPE_VIDEO: "object.item.videoItem",
MEDIA_TYPE_EPISODE: "object.item.videoItem",
MEDIA_TYPE_CHANNEL: "object.item.videoItem",
MEDIA_TYPE_IMAGE: "object.item.imageItem",
MEDIA_TYPE_PLAYLIST: "object.item.playlistItem",
}
UPNP_CLASS_DEFAULT = "object.item"
HOME_ASSISTANT_UPNP_MIME_TYPE_MAPPING = {
MEDIA_TYPE_MUSIC: "audio/*",
MEDIA_TYPE_TVSHOW: "video/*",
MEDIA_TYPE_MOVIE: "video/*",
MEDIA_TYPE_VIDEO: "video/*",
MEDIA_TYPE_EPISODE: "video/*",
MEDIA_TYPE_CHANNEL: "video/*",
MEDIA_TYPE_IMAGE: "image/*",
MEDIA_TYPE_PLAYLIST: "playlist/*",
}
def catch_request_errors():
"""Catch asyncio.TimeoutError, aiohttp.ClientError errors."""
def call_wrapper(func):
"""Call wrapper for decorator."""
@functools.wraps(func)
async def wrapper(self, *args, **kwargs):
"""Catch asyncio.TimeoutError, aiohttp.ClientError errors."""
try:
return await func(self, *args, **kwargs)
except (asyncio.TimeoutError, aiohttp.ClientError):
_LOGGER.error("Error during call %s", func.__name__)
return wrapper
return call_wrapper
async def async_start_event_handler(
hass: HomeAssistantType,
server_host: str,
server_port: int,
requester,
callback_url_override: Optional[str] = None,
):
"""Register notify view."""
hass_data = hass.data[DLNA_DMR_DATA]
if "event_handler" in hass_data:
return hass_data["event_handler"]
# start event handler
server = AiohttpNotifyServer(
requester,
listen_port=server_port,
listen_host=server_host,
callback_url=callback_url_override,
)
await server.start_server()
_LOGGER.info("UPNP/DLNA event handler listening, url: %s", server.callback_url)
hass_data["notify_server"] = server
hass_data["event_handler"] = server.event_handler
# register for graceful shutdown
async def async_stop_server(event):
"""Stop server."""
_LOGGER.debug("Stopping UPNP/DLNA event handler")
await server.stop_server()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, async_stop_server)
return hass_data["event_handler"]
async def async_setup_platform(
hass: HomeAssistantType, config, async_add_entities, discovery_info=None
):
"""Set up DLNA DMR platform."""
if config.get(CONF_URL) is not None:
url = config[CONF_URL]
name = config.get(CONF_NAME)
elif discovery_info is not None:
url = discovery_info["ssdp_description"]
name = discovery_info.get("name")
if DLNA_DMR_DATA not in hass.data:
hass.data[DLNA_DMR_DATA] = {}
if "lock" not in hass.data[DLNA_DMR_DATA]:
hass.data[DLNA_DMR_DATA]["lock"] = asyncio.Lock()
# build upnp/aiohttp requester
session = async_get_clientsession(hass)
requester = AiohttpSessionRequester(session, True)
# ensure event handler has been started
async with hass.data[DLNA_DMR_DATA]["lock"]:
server_host = config.get(CONF_LISTEN_IP)
if server_host is None:
server_host = get_local_ip()
server_port = config.get(CONF_LISTEN_PORT, DEFAULT_LISTEN_PORT)
callback_url_override = config.get(CONF_CALLBACK_URL_OVERRIDE)
event_handler = await async_start_event_handler(
hass, server_host, server_port, requester, callback_url_override
)
# create upnp device
factory = UpnpFactory(requester, disable_state_variable_validation=True)
try:
upnp_device = await factory.async_create_device(url)
except (asyncio.TimeoutError, aiohttp.ClientError) as err:
raise PlatformNotReady() from err
# wrap with DmrDevice
dlna_device = DmrDevice(upnp_device, event_handler)
# create our own device
device = DlnaDmrDevice(dlna_device, name)
_LOGGER.debug("Adding device: %s", device)
async_add_entities([device], True)
class DlnaDmrDevice(MediaPlayerEntity):
"""Representation of a DLNA DMR device."""
def __init__(self, dmr_device, name=None):
"""Initialize DLNA DMR device."""
self._device = dmr_device
self._name = name
self._available = False
self._subscription_renew_time = None
async def async_added_to_hass(self):
"""Handle addition."""
self._device.on_event = self._on_event
# Register unsubscribe on stop
bus = self.hass.bus
bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, self._async_on_hass_stop)
@property
def available(self):
"""Device is available."""
return self._available
async def _async_on_hass_stop(self, event):
"""Event handler on Home Assistant stop."""
async with self.hass.data[DLNA_DMR_DATA]["lock"]:
await self._device.async_unsubscribe_services()
async def async_update(self):
"""Retrieve the latest data."""
was_available = self._available
try:
await self._device.async_update()
self._available = True
except (asyncio.TimeoutError, aiohttp.ClientError):
self._available = False
_LOGGER.debug("Device unavailable")
return
# do we need to (re-)subscribe?
now = dt_util.utcnow()
should_renew = (
self._subscription_renew_time and now >= self._subscription_renew_time
)
if should_renew or not was_available and self._available:
try:
timeout = await self._device.async_subscribe_services()
self._subscription_renew_time = dt_util.utcnow() + timeout / 2
except (asyncio.TimeoutError, aiohttp.ClientError):
self._available = False
_LOGGER.debug("Could not (re)subscribe")
def _on_event(self, service, state_variables):
"""State variable(s) changed, let home-assistant know."""
self.schedule_update_ha_state()
@property
def supported_features(self):
"""Flag media player features that are supported."""
supported_features = 0
if self._device.has_volume_level:
supported_features |= SUPPORT_VOLUME_SET
if self._device.has_volume_mute:
supported_features |= SUPPORT_VOLUME_MUTE
if self._device.has_play:
supported_features |= SUPPORT_PLAY
if self._device.has_pause:
supported_features |= SUPPORT_PAUSE
if self._device.has_stop:
supported_features |= SUPPORT_STOP
if self._device.has_previous:
supported_features |= SUPPORT_PREVIOUS_TRACK
if self._device.has_next:
supported_features |= SUPPORT_NEXT_TRACK
if self._device.has_play_media:
supported_features |= SUPPORT_PLAY_MEDIA
if self._device.has_seek_rel_time:
supported_features |= SUPPORT_SEEK
return supported_features
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
return self._device.volume_level
@catch_request_errors()
async def async_set_volume_level(self, volume):
"""Set volume level, range 0..1."""
await self._device.async_set_volume_level(volume)
@property
def is_volume_muted(self):
"""Boolean if volume is currently muted."""
return self._device.is_volume_muted
@catch_request_errors()
async def async_mute_volume(self, mute):
"""Mute the volume."""
desired_mute = bool(mute)
await self._device.async_mute_volume(desired_mute)
@catch_request_errors()
async def async_media_pause(self):
"""Send pause command."""
if not self._device.can_pause:
_LOGGER.debug("Cannot do Pause")
return
await self._device.async_pause()
@catch_request_errors()
async def async_media_play(self):
"""Send play command."""
if not self._device.can_play:
_LOGGER.debug("Cannot do Play")
return
await self._device.async_play()
@catch_request_errors()
async def async_media_stop(self):
"""Send stop command."""
if not self._device.can_stop:
_LOGGER.debug("Cannot do Stop")
return
await self._device.async_stop()
@catch_request_errors()
async def async_media_seek(self, position):
"""Send seek command."""
if not self._device.can_seek_rel_time:
_LOGGER.debug("Cannot do Seek/rel_time")
return
time = timedelta(seconds=position)
await self._device.async_seek_rel_time(time)
@catch_request_errors()
async def async_play_media(self, media_type, media_id, **kwargs):
"""Play a piece of media."""
title = "Home Assistant"
mime_type = HOME_ASSISTANT_UPNP_MIME_TYPE_MAPPING.get(media_type, media_type)
upnp_class = HOME_ASSISTANT_UPNP_CLASS_MAPPING.get(
media_type, UPNP_CLASS_DEFAULT
)
# Stop current playing media
if self._device.can_stop:
await self.async_media_stop()
# Queue media
await self._device.async_set_transport_uri(
media_id, title, mime_type, upnp_class
)
await self._device.async_wait_for_can_play()
# If already playing, no need to call Play
if self._device.state == DeviceState.PLAYING:
return
# Play it
await self.async_media_play()
@catch_request_errors()
async def async_media_previous_track(self):
"""Send previous track command."""
if not self._device.can_previous:
_LOGGER.debug("Cannot do Previous")
return
await self._device.async_previous()
@catch_request_errors()
async def async_media_next_track(self):
"""Send next track command."""
if not self._device.can_next:
_LOGGER.debug("Cannot do Next")
return
await self._device.async_next()
@property
def media_title(self):
"""Title of current playing media."""
return self._device.media_title
@property
def media_image_url(self):
"""Image url of current playing media."""
return self._device.media_image_url
@property
def state(self):
"""State of the player."""
if not self._available:
return STATE_OFF
if self._device.state is None:
return STATE_ON
if self._device.state == DeviceState.PLAYING:
return STATE_PLAYING
if self._device.state == DeviceState.PAUSED:
return STATE_PAUSED
return STATE_IDLE
@property
def media_duration(self):
"""Duration of current playing media in seconds."""
return self._device.media_duration
@property
def media_position(self):
"""Position of current playing media in seconds."""
return self._device.media_position
@property
def media_position_updated_at(self):
"""When was the position of the current playing media valid.
Returns value from homeassistant.util.dt.utcnow().
"""
return self._device.media_position_updated_at
@property
def name(self) -> str:
"""Return the name of the device."""
if self._name:
return self._name
return self._device.name
@property
def unique_id(self) -> str:
"""Return an unique ID."""
return self._device.udn
|
import logging
from devolo_home_control_api.mydevolo import Mydevolo
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import CONF_PASSWORD, CONF_USERNAME
from homeassistant.core import callback
from .const import ( # pylint:disable=unused-import
CONF_HOMECONTROL,
CONF_MYDEVOLO,
DEFAULT_MPRM,
DEFAULT_MYDEVOLO,
DOMAIN,
)
_LOGGER = logging.getLogger(__name__)
class DevoloHomeControlFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a devolo HomeControl config flow."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_PUSH
def __init__(self):
"""Initialize devolo Home Control flow."""
self.data_schema = {
vol.Required(CONF_USERNAME): str,
vol.Required(CONF_PASSWORD): str,
}
async def async_step_user(self, user_input=None):
"""Handle a flow initiated by the user."""
if self.show_advanced_options:
self.data_schema = {
vol.Required(CONF_USERNAME): str,
vol.Required(CONF_PASSWORD): str,
vol.Required(CONF_MYDEVOLO, default=DEFAULT_MYDEVOLO): str,
vol.Required(CONF_HOMECONTROL, default=DEFAULT_MPRM): str,
}
if user_input is None:
return self._show_form(user_input)
user = user_input[CONF_USERNAME]
password = user_input[CONF_PASSWORD]
try:
mydevolo = Mydevolo.get_instance()
except SyntaxError:
mydevolo = Mydevolo()
mydevolo.user = user
mydevolo.password = password
if self.show_advanced_options:
mydevolo.url = user_input[CONF_MYDEVOLO]
mprm = user_input[CONF_HOMECONTROL]
else:
mydevolo.url = DEFAULT_MYDEVOLO
mprm = DEFAULT_MPRM
credentials_valid = await self.hass.async_add_executor_job(
mydevolo.credentials_valid
)
if not credentials_valid:
return self._show_form({"base": "invalid_auth"})
_LOGGER.debug("Credentials valid")
gateway_ids = await self.hass.async_add_executor_job(mydevolo.get_gateway_ids)
await self.async_set_unique_id(gateway_ids[0])
self._abort_if_unique_id_configured()
return self.async_create_entry(
title="devolo Home Control",
data={
CONF_PASSWORD: password,
CONF_USERNAME: user,
CONF_MYDEVOLO: mydevolo.url,
CONF_HOMECONTROL: mprm,
},
)
@callback
def _show_form(self, errors=None):
"""Show the form to the user."""
return self.async_show_form(
step_id="user",
data_schema=vol.Schema(self.data_schema),
errors=errors if errors else {},
)
|
import os
import os.path as op
from shutil import copyfile
import numpy as np
from scipy import sparse
import pytest
from numpy.testing import assert_array_equal, assert_allclose, assert_equal
from mne.datasets import testing
from mne import (read_surface, write_surface, decimate_surface, pick_types,
dig_mri_distances)
from mne.surface import (read_morph_map, _compute_nearest, _tessellate_sphere,
fast_cross_3d, get_head_surf, read_curvature,
get_meg_helmet_surf, _normal_orth, _read_patch)
from mne.utils import (_TempDir, requires_vtk, catch_logging,
run_tests_if_main, object_diff, requires_freesurfer)
from mne.io import read_info
from mne.io.constants import FIFF
from mne.transforms import _get_trans
data_path = testing.data_path(download=False)
subjects_dir = op.join(data_path, 'subjects')
fname = op.join(subjects_dir, 'sample', 'bem',
'sample-1280-1280-1280-bem-sol.fif')
fname_trans = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-trans.fif')
fname_raw = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc_raw.fif')
rng = np.random.RandomState(0)
def test_helmet():
"""Test loading helmet surfaces."""
base_dir = op.join(op.dirname(__file__), '..', 'io')
fname_raw = op.join(base_dir, 'tests', 'data', 'test_raw.fif')
fname_kit_raw = op.join(base_dir, 'kit', 'tests', 'data',
'test_bin_raw.fif')
fname_bti_raw = op.join(base_dir, 'bti', 'tests', 'data',
'exported4D_linux_raw.fif')
fname_ctf_raw = op.join(base_dir, 'tests', 'data', 'test_ctf_raw.fif')
fname_trans = op.join(base_dir, 'tests', 'data',
'sample-audvis-raw-trans.txt')
trans = _get_trans(fname_trans)[0]
new_info = read_info(fname_raw)
artemis_info = new_info.copy()
for pick in pick_types(new_info, meg=True):
new_info['chs'][pick]['coil_type'] = 9999
artemis_info['chs'][pick]['coil_type'] = \
FIFF.FIFFV_COIL_ARTEMIS123_GRAD
for info, n, name in [(read_info(fname_raw), 304, '306m'),
(read_info(fname_kit_raw), 150, 'KIT'), # Delaunay
(read_info(fname_bti_raw), 304, 'Magnes'),
(read_info(fname_ctf_raw), 342, 'CTF'),
(new_info, 102, 'unknown'), # Delaunay
(artemis_info, 102, 'ARTEMIS123'), # Delaunay
]:
with catch_logging() as log:
helmet = get_meg_helmet_surf(info, trans, verbose=True)
log = log.getvalue()
assert name in log
assert_equal(len(helmet['rr']), n)
assert_equal(len(helmet['rr']), len(helmet['nn']))
@testing.requires_testing_data
def test_head():
"""Test loading the head surface."""
surf_1 = get_head_surf('sample', subjects_dir=subjects_dir)
surf_2 = get_head_surf('sample', 'head', subjects_dir=subjects_dir)
assert len(surf_1['rr']) < len(surf_2['rr']) # BEM vs dense head
pytest.raises(TypeError, get_head_surf, subject=None,
subjects_dir=subjects_dir)
def test_fast_cross_3d():
"""Test cross product with lots of elements."""
x = rng.rand(100000, 3)
y = rng.rand(1, 3)
z = np.cross(x, y)
zz = fast_cross_3d(x, y)
assert_array_equal(z, zz)
# broadcasting and non-2D
zz = fast_cross_3d(x[:, np.newaxis], y[0])
assert_array_equal(z, zz[:, 0])
def test_compute_nearest():
"""Test nearest neighbor searches."""
x = rng.randn(500, 3)
x /= np.sqrt(np.sum(x ** 2, axis=1))[:, None]
nn_true = rng.permutation(np.arange(500, dtype=np.int64))[:20]
y = x[nn_true]
nn1 = _compute_nearest(x, y, method='BallTree')
nn2 = _compute_nearest(x, y, method='cKDTree')
nn3 = _compute_nearest(x, y, method='cdist')
assert_array_equal(nn_true, nn1)
assert_array_equal(nn_true, nn2)
assert_array_equal(nn_true, nn3)
# test distance support
nnn1 = _compute_nearest(x, y, method='BallTree', return_dists=True)
nnn2 = _compute_nearest(x, y, method='cKDTree', return_dists=True)
nnn3 = _compute_nearest(x, y, method='cdist', return_dists=True)
assert_array_equal(nnn1[0], nn_true)
assert_array_equal(nnn1[1], np.zeros_like(nn1)) # all dists should be 0
assert_equal(len(nnn1), len(nnn2))
for nn1, nn2, nn3 in zip(nnn1, nnn2, nnn3):
assert_array_equal(nn1, nn2)
assert_array_equal(nn1, nn3)
@pytest.mark.slowtest
@testing.requires_testing_data
def test_make_morph_maps():
"""Test reading and creating morph maps."""
# make a new fake subjects_dir
tempdir = _TempDir()
for subject in ('sample', 'sample_ds', 'fsaverage_ds'):
os.mkdir(op.join(tempdir, subject))
os.mkdir(op.join(tempdir, subject, 'surf'))
regs = ('reg', 'left_right') if subject == 'fsaverage_ds' else ('reg',)
for hemi in ['lh', 'rh']:
for reg in regs:
args = [subject, 'surf', hemi + '.sphere.' + reg]
copyfile(op.join(subjects_dir, *args),
op.join(tempdir, *args))
for subject_from, subject_to, xhemi in (
('fsaverage_ds', 'sample_ds', False),
('fsaverage_ds', 'fsaverage_ds', True)):
# trigger the creation of morph-maps dir and create the map
with catch_logging() as log:
mmap = read_morph_map(subject_from, subject_to, tempdir,
xhemi=xhemi, verbose=True)
log = log.getvalue()
assert 'does not exist' in log
assert 'Creating' in log
mmap2 = read_morph_map(subject_from, subject_to, subjects_dir,
xhemi=xhemi)
assert_equal(len(mmap), len(mmap2))
for m1, m2 in zip(mmap, mmap2):
# deal with sparse matrix stuff
diff = (m1 - m2).data
assert_allclose(diff, np.zeros_like(diff), atol=1e-3, rtol=0)
# This will also trigger creation, but it's trivial
with pytest.warns(None):
mmap = read_morph_map('sample', 'sample', subjects_dir=tempdir)
for mm in mmap:
assert (mm - sparse.eye(mm.shape[0], mm.shape[0])).sum() == 0
@testing.requires_testing_data
def test_io_surface():
"""Test reading and writing of Freesurfer surface mesh files."""
tempdir = _TempDir()
fname_quad = op.join(data_path, 'subjects', 'bert', 'surf',
'lh.inflated.nofix')
fname_tri = op.join(data_path, 'subjects', 'sample', 'bem',
'inner_skull.surf')
for fname in (fname_quad, fname_tri):
with pytest.warns(None): # no volume info
pts, tri, vol_info = read_surface(fname, read_metadata=True)
write_surface(op.join(tempdir, 'tmp'), pts, tri, volume_info=vol_info,
overwrite=True)
with pytest.warns(None): # no volume info
c_pts, c_tri, c_vol_info = read_surface(op.join(tempdir, 'tmp'),
read_metadata=True)
assert_array_equal(pts, c_pts)
assert_array_equal(tri, c_tri)
assert_equal(object_diff(vol_info, c_vol_info), '')
if fname != fname_tri: # don't bother testing wavefront for the bigger
continue
# Test writing/reading a Wavefront .obj file
write_surface(op.join(tempdir, 'tmp.obj'), pts, tri, volume_info=None,
overwrite=True)
c_pts, c_tri = read_surface(op.join(tempdir, 'tmp.obj'),
read_metadata=False)
assert_array_equal(pts, c_pts)
assert_array_equal(tri, c_tri)
# reading patches (just a smoke test, let the flatmap viz tests be more
# complete)
fname_patch = op.join(
data_path, 'subjects', 'fsaverage', 'surf', 'rh.cortex.patch.flat')
_read_patch(fname_patch)
@testing.requires_testing_data
def test_read_curv():
"""Test reading curvature data."""
fname_curv = op.join(data_path, 'subjects', 'fsaverage', 'surf', 'lh.curv')
fname_surf = op.join(data_path, 'subjects', 'fsaverage', 'surf',
'lh.inflated')
bin_curv = read_curvature(fname_curv)
rr = read_surface(fname_surf)[0]
assert len(bin_curv) == len(rr)
assert np.logical_or(bin_curv == 0, bin_curv == 1).all()
@requires_vtk
def test_decimate_surface_vtk():
"""Test triangular surface decimation."""
points = np.array([[-0.00686118, -0.10369860, 0.02615170],
[-0.00713948, -0.10370162, 0.02614874],
[-0.00686208, -0.10368247, 0.02588313],
[-0.00713987, -0.10368724, 0.02587745]])
tris = np.array([[0, 1, 2], [1, 2, 3], [0, 3, 1], [1, 2, 0]])
for n_tri in [4, 3, 2]: # quadric decimation creates even numbered output.
_, this_tris = decimate_surface(points, tris, n_tri)
assert len(this_tris) == n_tri if not n_tri % 2 else 2
with pytest.raises(ValueError, match='exceeds number of original'):
decimate_surface(points, tris, len(tris) + 1)
nirvana = 5
tris = np.array([[0, 1, 2], [1, 2, 3], [0, 3, 1], [1, 2, nirvana]])
pytest.raises(ValueError, decimate_surface, points, tris, n_tri)
@requires_freesurfer('mris_sphere')
def test_decimate_surface_sphere():
"""Test sphere mode of decimation."""
rr, tris = _tessellate_sphere(3)
assert len(rr) == 66
assert len(tris) == 128
for kind, n_tri in [('ico', 20), ('oct', 32)]:
with catch_logging() as log:
_, tris_new = decimate_surface(
rr, tris, n_tri, method='sphere', verbose=True)
log = log.getvalue()
assert 'Freesurfer' in log
assert kind in log
assert len(tris_new) == n_tri
@pytest.mark.parametrize('dig_kinds, exclude, count, bounds, outliers', [
('auto', False, 72, (0.001, 0.002), 0),
(('eeg', 'extra', 'cardinal', 'hpi'), False, 146, (0.002, 0.003), 1),
(('eeg', 'extra', 'cardinal', 'hpi'), True, 139, (0.001, 0.002), 0),
])
@testing.requires_testing_data
def test_dig_mri_distances(dig_kinds, exclude, count, bounds, outliers):
"""Test the trans obtained by coregistration."""
info = read_info(fname_raw)
dists = dig_mri_distances(info, fname_trans, 'sample', subjects_dir,
dig_kinds=dig_kinds, exclude_frontal=exclude)
assert dists.shape == (count,)
assert bounds[0] < np.mean(dists) < bounds[1]
assert np.sum(dists > 0.03) == outliers
def test_normal_orth():
"""Test _normal_orth."""
nns = np.eye(3)
for nn in nns:
ori = _normal_orth(nn)
assert_allclose(ori[2], nn, atol=1e-12)
run_tests_if_main()
|
import io
from setuptools import setup, find_packages # pylint: disable=no-name-in-module,import-error
def dependencies(file):
with open(file) as f:
return f.read().splitlines()
with io.open("README.md", encoding='utf-8') as infile:
long_description = infile.read()
setup(
name='halo',
packages=find_packages(exclude=('tests', 'examples')),
version='0.0.29',
license='MIT',
description='Beautiful terminal spinners in Python',
long_description=long_description,
long_description_content_type="text/markdown",
author='Manraj Singh',
author_email='[email protected]',
url='https://github.com/manrajgrover/halo',
keywords=[
"console",
"loading",
"indicator",
"progress",
"cli",
"spinner",
"spinners",
"terminal",
"term",
"busy",
"wait",
"idle"
],
install_requires=dependencies('requirements.txt'),
tests_require=dependencies('requirements-dev.txt'),
include_package_data=True,
extras_require={
'ipython': [
'IPython==5.7.0',
'ipywidgets==7.1.0',
]
}
)
|
from unittest import TestCase
import numpy as np
import pandas as pd
from scattertext import whitespace_nlp_with_sentences
from scattertext.features.FeatsFromScoredLexicon import FeatsFromScoredLexicon
class TestFeatsFromScoredLexicon(TestCase):
def test_main(self):
lexicon_df = pd.DataFrame({'activation': {'a': 1.3846,
'abandon': 2.375,
'abandoned': 2.1,
'abandonment': 2.0,
'abated': 1.3333},
'imagery': {'a': 1.0,
'abandon': 2.4,
'abandoned': 3.0,
'abandonment': 1.4,
'abated': 1.2},
'pleasantness': {'a': 2.0,
'abandon': 1.0,
'abandoned': 1.1429,
'abandonment': 1.0,
'abated': 1.6667}})
with self.assertRaises(AssertionError):
FeatsFromScoredLexicon(3)
feats_from_scored_lexicon = FeatsFromScoredLexicon(lexicon_df)
self.assertEqual(set(feats_from_scored_lexicon.get_top_model_term_lists().keys()),
set(['activation', 'imagery', 'pleasantness']))
features = feats_from_scored_lexicon.get_doc_metadata(whitespace_nlp_with_sentences('I abandoned a wallet.'))
np.testing.assert_almost_equal(features[['activation', 'imagery', 'pleasantness']],
np.array([1.74230, 2.00000, 1.57145]))
|
import asyncio
from datetime import timedelta
import logging
from aiohttp import ClientConnectionError
from async_timeout import timeout
from pydaikin.daikin_base import Appliance
import voluptuous as vol
from homeassistant.config_entries import SOURCE_IMPORT, ConfigEntry
from homeassistant.const import CONF_API_KEY, CONF_HOST, CONF_HOSTS, CONF_PASSWORD
from homeassistant.exceptions import ConfigEntryNotReady
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.device_registry import CONNECTION_NETWORK_MAC
from homeassistant.helpers.typing import HomeAssistantType
from homeassistant.util import Throttle
from . import config_flow # noqa: F401
from .const import CONF_UUID, KEY_MAC, TIMEOUT
_LOGGER = logging.getLogger(__name__)
DOMAIN = "daikin"
PARALLEL_UPDATES = 0
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=60)
COMPONENT_TYPES = ["climate", "sensor", "switch"]
CONFIG_SCHEMA = vol.Schema(
vol.All(
cv.deprecated(DOMAIN, invalidation_version="0.113.0"),
{
DOMAIN: vol.Schema(
{
vol.Optional(CONF_HOSTS, default=[]): vol.All(
cv.ensure_list, [cv.string]
)
}
)
},
),
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass, config):
"""Establish connection with Daikin."""
if DOMAIN not in config:
return True
hosts = config[DOMAIN][CONF_HOSTS]
if not hosts:
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}
)
)
for host in hosts:
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}, data={CONF_HOST: host}
)
)
return True
async def async_setup_entry(hass: HomeAssistantType, entry: ConfigEntry):
"""Establish connection with Daikin."""
conf = entry.data
# For backwards compat, set unique ID
if entry.unique_id is None:
hass.config_entries.async_update_entry(entry, unique_id=conf[KEY_MAC])
elif ".local" in entry.unique_id:
hass.config_entries.async_update_entry(entry, unique_id=conf[KEY_MAC])
daikin_api = await daikin_api_setup(
hass,
conf[CONF_HOST],
conf.get(CONF_API_KEY),
conf.get(CONF_UUID),
conf.get(CONF_PASSWORD),
)
if not daikin_api:
return False
hass.data.setdefault(DOMAIN, {}).update({entry.entry_id: daikin_api})
for component in COMPONENT_TYPES:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, component)
)
return True
async def async_unload_entry(hass, config_entry):
"""Unload a config entry."""
await asyncio.wait(
[
hass.config_entries.async_forward_entry_unload(config_entry, component)
for component in COMPONENT_TYPES
]
)
hass.data[DOMAIN].pop(config_entry.entry_id)
if not hass.data[DOMAIN]:
hass.data.pop(DOMAIN)
return True
async def daikin_api_setup(hass, host, key, uuid, password):
"""Create a Daikin instance only once."""
session = hass.helpers.aiohttp_client.async_get_clientsession()
try:
with timeout(TIMEOUT):
device = await Appliance.factory(
host, session, key=key, uuid=uuid, password=password
)
except asyncio.TimeoutError as err:
_LOGGER.debug("Connection to %s timed out", host)
raise ConfigEntryNotReady from err
except ClientConnectionError as err:
_LOGGER.debug("ClientConnectionError to %s", host)
raise ConfigEntryNotReady from err
except Exception: # pylint: disable=broad-except
_LOGGER.error("Unexpected error creating device %s", host)
return None
api = DaikinApi(device)
return api
class DaikinApi:
"""Keep the Daikin instance in one place and centralize the update."""
def __init__(self, device: Appliance):
"""Initialize the Daikin Handle."""
self.device = device
self.name = device.values.get("name", "Daikin AC")
self.ip_address = device.device_ip
self._available = True
@Throttle(MIN_TIME_BETWEEN_UPDATES)
async def async_update(self, **kwargs):
"""Pull the latest data from Daikin."""
try:
await self.device.update_status()
self._available = True
except ClientConnectionError:
_LOGGER.warning("Connection failed for %s", self.ip_address)
self._available = False
@property
def available(self) -> bool:
"""Return True if entity is available."""
return self._available
@property
def device_info(self):
"""Return a device description for device registry."""
info = self.device.values
return {
"connections": {(CONNECTION_NETWORK_MAC, self.device.mac)},
"manufacturer": "Daikin",
"model": info.get("model"),
"name": info.get("name"),
"sw_version": info.get("ver", "").replace("_", "."),
}
|
from openzwavemqtt.const import ATTR_POSITION, ATTR_VALUE
from openzwavemqtt.exceptions import InvalidValueError, NotFoundError, WrongTypeError
import pytest
from .common import setup_ozw
async def test_services(hass, light_data, sent_messages):
"""Test services on lock."""
await setup_ozw(hass, fixture=light_data)
# Test set_config_parameter list by label
await hass.services.async_call(
"ozw",
"set_config_parameter",
{"node_id": 39, "parameter": 1, "value": "Disable"},
blocking=True,
)
assert len(sent_messages) == 1
msg = sent_messages[0]
assert msg["topic"] == "OpenZWave/1/command/setvalue/"
assert msg["payload"] == {"Value": 0, "ValueIDKey": 281475641245716}
# Test set_config_parameter list by index int
await hass.services.async_call(
"ozw",
"set_config_parameter",
{"node_id": 39, "parameter": 1, "value": 1},
blocking=True,
)
assert len(sent_messages) == 2
msg = sent_messages[1]
assert msg["topic"] == "OpenZWave/1/command/setvalue/"
assert msg["payload"] == {"Value": 1, "ValueIDKey": 281475641245716}
# Test set_config_parameter int
await hass.services.async_call(
"ozw",
"set_config_parameter",
{"node_id": 39, "parameter": 3, "value": 55},
blocking=True,
)
assert len(sent_messages) == 3
msg = sent_messages[2]
assert msg["topic"] == "OpenZWave/1/command/setvalue/"
assert msg["payload"] == {"Value": 55, "ValueIDKey": 844425594667027}
# Test set_config_parameter invalid list int
with pytest.raises(NotFoundError):
assert await hass.services.async_call(
"ozw",
"set_config_parameter",
{"node_id": 39, "parameter": 1, "value": 12},
blocking=True,
)
assert len(sent_messages) == 3
# Test set_config_parameter invalid list value
with pytest.raises(NotFoundError):
assert await hass.services.async_call(
"ozw",
"set_config_parameter",
{"node_id": 39, "parameter": 1, "value": "Blah"},
blocking=True,
)
assert len(sent_messages) == 3
# Test set_config_parameter invalid list value type
with pytest.raises(WrongTypeError):
assert await hass.services.async_call(
"ozw",
"set_config_parameter",
{
"node_id": 39,
"parameter": 1,
"value": {ATTR_VALUE: True, ATTR_POSITION: 1},
},
blocking=True,
)
assert len(sent_messages) == 3
# Test set_config_parameter int out of range
with pytest.raises(InvalidValueError):
assert await hass.services.async_call(
"ozw",
"set_config_parameter",
{"node_id": 39, "parameter": 3, "value": 2147483657},
blocking=True,
)
assert len(sent_messages) == 3
# Test set_config_parameter short
await hass.services.async_call(
"ozw",
"set_config_parameter",
{"node_id": 39, "parameter": 81, "value": 3000},
blocking=True,
)
assert len(sent_messages) == 4
msg = sent_messages[3]
assert msg["topic"] == "OpenZWave/1/command/setvalue/"
assert msg["payload"] == {"Value": 3000, "ValueIDKey": 22799473778098198}
# Test set_config_parameter byte
await hass.services.async_call(
"ozw",
"set_config_parameter",
{"node_id": 39, "parameter": 16, "value": 20},
blocking=True,
)
assert len(sent_messages) == 5
msg = sent_messages[4]
assert msg["topic"] == "OpenZWave/1/command/setvalue/"
assert msg["payload"] == {"Value": 20, "ValueIDKey": 4503600291905553}
|
from __future__ import print_function
import configobj
import optparse
import os
import shutil
import sys
import tempfile
import traceback
sys.path.append(os.path.abspath(
os.path.join(os.path.dirname(__file__), 'src')))
def getIncludePaths(path):
for f in os.listdir(path):
cPath = os.path.abspath(os.path.join(path, f))
if os.path.isfile(cPath) and len(f) > 3 and f.endswith('.py'):
sys.path.append(os.path.dirname(cPath))
elif os.path.isdir(cPath):
getIncludePaths(cPath)
collectors = {}
def getCollectors(path):
for f in os.listdir(path):
cPath = os.path.abspath(os.path.join(path, f))
if os.path.isfile(cPath) and len(f) > 3 and f.endswith('.py'):
modname = f[:-3]
if modname.startswith('Test'):
continue
if modname.startswith('test'):
continue
try:
# Import the module
module = __import__(modname, globals(), locals(), ['*'])
# Find the name
for attr in dir(module):
if not attr.endswith('Collector'):
continue
cls = getattr(module, attr)
if cls.__module__ != modname:
continue
if cls.__name__ not in collectors:
collectors[cls.__name__] = module
except Exception:
print("Failed to import module: %s. %s" % (
modname, traceback.format_exc()))
collectors[modname] = False
elif os.path.isdir(cPath):
getCollectors(cPath)
handlers = {}
def getHandlers(path, name=None):
for f in os.listdir(path):
cPath = os.path.abspath(os.path.join(path, f))
if os.path.isfile(cPath) and len(f) > 3 and f.endswith('.py'):
modname = f[:-3]
if name and f is not "%s.py" % name:
break
try:
# Import the module
module = __import__(modname, globals(), locals(), ['*'])
# Find the name
for attr in dir(module):
if ((not attr.endswith('Handler') or
attr.startswith('Handler'))):
continue
cls = getattr(module, attr)
if cls.__name__ not in handlers:
handlers[cls.__name__] = module
except Exception:
print("Failed to import module: %s. %s" % (
modname, traceback.format_exc()))
handlers[modname] = False
elif os.path.isdir(cPath):
getHandlers(cPath)
def writeDocHeader(docFile):
docFile.write("<!--")
docFile.write("This file was generated from the python source\n")
docFile.write("Please edit the source to make changes\n")
docFile.write("-->\n")
def writeDocString(docFile, name, doc):
docFile.write("%s\n" % (name))
docFile.write("=====\n")
if doc is None:
print("No __doc__ string for %s!" % name)
docFile.write("%s\n" % doc)
def writeDocOptionsHeader(docFile):
docFile.write("#### Options\n")
docFile.write("\n")
docFile.write("Setting | Default | Description | Type\n")
docFile.write("--------|---------|-------------|-----\n")
def writeDocOptions(docFile, options, default_options):
for option in sorted(options.keys()):
defaultOption = ''
defaultOptionType = ''
if option in default_options:
defaultOptionType = default_options[option].__class__.__name__
if isinstance(default_options[option], list):
defaultOption = ', '.join(map(str, default_options[option]))
defaultOption += ','
else:
defaultOption = str(default_options[option])
docFile.write("%s | %s | %s | %s\n"
% (option,
defaultOption,
options[option].replace("\n", '<br>\n'),
defaultOptionType))
def writeDoc(items, type_name, doc_path):
for item in sorted(items.iterkeys()):
# Skip configuring the basic item object
if item == type_name:
continue
if item.startswith('Test'):
continue
print("Processing %s..." % (item))
if not hasattr(items[item], item):
continue
cls = getattr(items[item], item)
item_options = None
default_options = None
try:
tmpfile = None
if type_name is "Collector":
obj = cls(config=config, handlers={})
elif type_name is "Handler":
tmpfile = tempfile.mkstemp()
obj = cls({'log_file': tmpfile[1]})
item_options = obj.get_default_config_help()
default_options = obj.get_default_config()
if type_name is "Handler":
os.remove(tmpfile[1])
except Exception as e:
print("Caught Exception {}".format(e))
docFile = open(os.path.join(doc_path, item + ".md"), 'w')
writeDocHeader(docFile)
writeDocString(docFile, item, items[item].__doc__)
writeDocOptionsHeader(docFile)
if item_options:
writeDocOptions(docFile, item_options, default_options)
if type_name is "Collector":
docFile.write("\n")
docFile.write("#### Example Output\n")
docFile.write("\n")
docFile.write("```\n")
docFile.write("__EXAMPLESHERE__\n")
docFile.write("```\n")
docFile.write("\n")
docFile.close()
##########################################################################
if __name__ == "__main__":
# Initialize Options
parser = optparse.OptionParser()
parser.add_option("-c", "--configfile",
dest="configfile",
default="/etc/diamond/diamond.conf",
help="Path to the config file")
parser.add_option("-C", "--collector",
dest="collector",
default=None,
help="Configure a single collector")
parser.add_option("-H", "--handler",
dest="handler",
default=None,
help="Configure a single handler")
parser.add_option("-p", "--print",
action="store_true",
dest="dump",
default=False,
help="Just print the defaults")
# Parse Command Line Args
(options, args) = parser.parse_args()
# Initialize Config
if os.path.exists(options.configfile):
config = configobj.ConfigObj(os.path.abspath(options.configfile))
else:
print("ERROR: Config file: %s does not exist." % (
options.configfile), file=sys.stderr)
print(("Please run python config.py -c /path/to/diamond.conf"),
file=sys.stderr)
parser.print_help(sys.stderr)
sys.exit(1)
docs_path = os.path.abspath(os.path.join(
os.path.dirname(__file__), 'docs'))
if options.collector or (not options.collector and not options.handler):
collector_path = config['server']['collectors_path']
collectors_doc_path = os.path.join(docs_path, "collectors")
getIncludePaths(collector_path)
if options.collector:
single_collector_path = os.path.join(collector_path,
options.collector)
getCollectors(single_collector_path)
else:
# Ugly hack for snmp collector overrides
getCollectors(os.path.join(collector_path, 'snmp'))
getCollectors(collector_path)
shutil.rmtree(collectors_doc_path)
os.mkdir(collectors_doc_path)
writeDoc(collectors, "Collector", collectors_doc_path)
if options.handler or (not options.collector and not options.handler):
handler_path = os.path.abspath(os.path.join(os.path.dirname(__file__),
'src',
'diamond',
'handler'))
handlers_doc_path = os.path.join(docs_path, "handlers")
getIncludePaths(handler_path)
if options.handler:
getHandlers(handler_path, name=options.handler)
else:
getHandlers(handler_path)
shutil.rmtree(handlers_doc_path)
os.mkdir(handlers_doc_path)
writeDoc(handlers, "Handler", handlers_doc_path)
|