text
stringlengths 213
32.3k
|
---|
from homeassistant import auth, data_entry_flow
from homeassistant.auth.mfa_modules import auth_mfa_module_from_config
from homeassistant.auth.models import Credentials
from tests.common import MockUser
async def test_validate(hass):
"""Test validating pin."""
auth_module = await auth_mfa_module_from_config(
hass,
{
"type": "insecure_example",
"data": [{"user_id": "test-user", "pin": "123456"}],
},
)
result = await auth_module.async_validate("test-user", {"pin": "123456"})
assert result is True
result = await auth_module.async_validate("test-user", {"pin": "invalid"})
assert result is False
result = await auth_module.async_validate("invalid-user", {"pin": "123456"})
assert result is False
async def test_setup_user(hass):
"""Test setup user."""
auth_module = await auth_mfa_module_from_config(
hass, {"type": "insecure_example", "data": []}
)
await auth_module.async_setup_user("test-user", {"pin": "123456"})
assert len(auth_module._data) == 1
result = await auth_module.async_validate("test-user", {"pin": "123456"})
assert result is True
async def test_depose_user(hass):
"""Test despose user."""
auth_module = await auth_mfa_module_from_config(
hass,
{
"type": "insecure_example",
"data": [{"user_id": "test-user", "pin": "123456"}],
},
)
assert len(auth_module._data) == 1
await auth_module.async_depose_user("test-user")
assert len(auth_module._data) == 0
async def test_is_user_setup(hass):
"""Test is user setup."""
auth_module = await auth_mfa_module_from_config(
hass,
{
"type": "insecure_example",
"data": [{"user_id": "test-user", "pin": "123456"}],
},
)
assert await auth_module.async_is_user_setup("test-user") is True
assert await auth_module.async_is_user_setup("invalid-user") is False
async def test_login(hass):
"""Test login flow with auth module."""
hass.auth = await auth.auth_manager_from_config(
hass,
[
{
"type": "insecure_example",
"users": [{"username": "test-user", "password": "test-pass"}],
}
],
[
{
"type": "insecure_example",
"data": [{"user_id": "mock-user", "pin": "123456"}],
}
],
)
user = MockUser(
id="mock-user", is_owner=False, is_active=False, name="Paulus"
).add_to_auth_manager(hass.auth)
await hass.auth.async_link_user(
user,
Credentials(
id="mock-id",
auth_provider_type="insecure_example",
auth_provider_id=None,
data={"username": "test-user"},
is_new=False,
),
)
provider = hass.auth.auth_providers[0]
result = await hass.auth.login_flow.async_init((provider.type, provider.id))
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
result = await hass.auth.login_flow.async_configure(
result["flow_id"], {"username": "incorrect-user", "password": "test-pass"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"]["base"] == "invalid_auth"
result = await hass.auth.login_flow.async_configure(
result["flow_id"], {"username": "test-user", "password": "incorrect-pass"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"]["base"] == "invalid_auth"
result = await hass.auth.login_flow.async_configure(
result["flow_id"], {"username": "test-user", "password": "test-pass"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "mfa"
assert result["data_schema"].schema.get("pin") == str
result = await hass.auth.login_flow.async_configure(
result["flow_id"], {"pin": "invalid-code"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"]["base"] == "invalid_code"
result = await hass.auth.login_flow.async_configure(
result["flow_id"], {"pin": "123456"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["data"].id == "mock-user"
async def test_setup_flow(hass):
"""Test validating pin."""
auth_module = await auth_mfa_module_from_config(
hass,
{
"type": "insecure_example",
"data": [{"user_id": "test-user", "pin": "123456"}],
},
)
flow = await auth_module.async_setup_flow("new-user")
result = await flow.async_step_init()
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
result = await flow.async_step_init({"pin": "abcdefg"})
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert auth_module._data[1]["user_id"] == "new-user"
assert auth_module._data[1]["pin"] == "abcdefg"
|
import os
import tempfile
import unittest
import homeassistant.components.notify as notify
from homeassistant.setup import async_setup_component, setup_component
from tests.async_mock import patch
from tests.common import assert_setup_component, get_test_home_assistant
class TestCommandLine(unittest.TestCase):
"""Test the command line notifications."""
def setUp(self): # pylint: disable=invalid-name
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.addCleanup(self.tear_down_cleanup)
def tear_down_cleanup(self):
"""Stop down everything that was started."""
self.hass.stop()
def test_setup(self):
"""Test setup."""
with assert_setup_component(1) as handle_config:
assert setup_component(
self.hass,
"notify",
{
"notify": {
"name": "test",
"platform": "command_line",
"command": "echo $(cat); exit 1",
}
},
)
assert handle_config[notify.DOMAIN]
def test_bad_config(self):
"""Test set up the platform with bad/missing configuration."""
config = {notify.DOMAIN: {"name": "test", "platform": "command_line"}}
with assert_setup_component(0) as handle_config:
assert setup_component(self.hass, notify.DOMAIN, config)
assert not handle_config[notify.DOMAIN]
def test_command_line_output(self):
"""Test the command line output."""
with tempfile.TemporaryDirectory() as tempdirname:
filename = os.path.join(tempdirname, "message.txt")
message = "one, two, testing, testing"
with assert_setup_component(1) as handle_config:
assert setup_component(
self.hass,
notify.DOMAIN,
{
"notify": {
"name": "test",
"platform": "command_line",
"command": f"echo $(cat) > {filename}",
}
},
)
assert handle_config[notify.DOMAIN]
assert self.hass.services.call(
"notify", "test", {"message": message}, blocking=True
)
with open(filename) as fil:
# the echo command adds a line break
assert fil.read() == f"{message}\n"
@patch("homeassistant.components.command_line.notify._LOGGER.error")
def test_error_for_none_zero_exit_code(self, mock_error):
"""Test if an error is logged for non zero exit codes."""
with assert_setup_component(1) as handle_config:
assert setup_component(
self.hass,
notify.DOMAIN,
{
"notify": {
"name": "test",
"platform": "command_line",
"command": "echo $(cat); exit 1",
}
},
)
assert handle_config[notify.DOMAIN]
assert self.hass.services.call(
"notify", "test", {"message": "error"}, blocking=True
)
assert mock_error.call_count == 1
async def test_timeout(hass, caplog):
"""Test we do not block forever."""
assert await async_setup_component(
hass,
notify.DOMAIN,
{
"notify": {
"name": "test",
"platform": "command_line",
"command": "sleep 10000",
"command_timeout": 0.0000001,
}
},
)
await hass.async_block_till_done()
assert await hass.services.async_call(
"notify", "test", {"message": "error"}, blocking=True
)
await hass.async_block_till_done()
assert "Timeout" in caplog.text
|
import pytest
from redbot.core import data_manager
__all__ = ["cleanup_datamanager", "data_mgr_config", "cog_instance"]
@pytest.fixture(autouse=True)
def cleanup_datamanager():
data_manager.basic_config = None
@pytest.fixture()
def data_mgr_config(tmpdir):
default = data_manager.basic_config_default.copy()
default["BASE_DIR"] = str(tmpdir)
return default
@pytest.fixture()
def cog_instance():
thing = type("CogTest", (object,), {})
return thing()
|
import asyncio
import os
import pytest
from homeassistant.components import onboarding
from homeassistant.components.onboarding import const, views
from homeassistant.const import HTTP_FORBIDDEN
from homeassistant.setup import async_setup_component
from . import mock_storage
from tests.async_mock import patch
from tests.common import CLIENT_ID, CLIENT_REDIRECT_URI, register_auth_provider
from tests.components.met.conftest import mock_weather # noqa: F401
@pytest.fixture(autouse=True)
def always_mock_weather(mock_weather): # noqa: F811
"""Mock the Met weather provider."""
pass
@pytest.fixture(autouse=True)
def auth_active(hass):
"""Ensure auth is always active."""
hass.loop.run_until_complete(
register_auth_provider(hass, {"type": "homeassistant"})
)
@pytest.fixture(name="rpi")
async def rpi_fixture(hass, aioclient_mock, mock_supervisor):
"""Mock core info with rpi."""
aioclient_mock.get(
"http://127.0.0.1/core/info",
json={
"result": "ok",
"data": {"version_latest": "1.0.0", "machine": "raspberrypi3"},
},
)
assert await async_setup_component(hass, "hassio", {})
await hass.async_block_till_done()
@pytest.fixture(name="no_rpi")
async def no_rpi_fixture(hass, aioclient_mock, mock_supervisor):
"""Mock core info with rpi."""
aioclient_mock.get(
"http://127.0.0.1/core/info",
json={
"result": "ok",
"data": {"version_latest": "1.0.0", "machine": "odroid-n2"},
},
)
assert await async_setup_component(hass, "hassio", {})
await hass.async_block_till_done()
@pytest.fixture(name="mock_supervisor")
async def mock_supervisor_fixture(hass, aioclient_mock):
"""Mock supervisor."""
aioclient_mock.post("http://127.0.0.1/homeassistant/options", json={"result": "ok"})
aioclient_mock.post("http://127.0.0.1/supervisor/options", json={"result": "ok"})
with patch.dict(os.environ, {"HASSIO": "127.0.0.1"}), patch(
"homeassistant.components.hassio.HassIO.is_connected",
return_value=True,
), patch(
"homeassistant.components.hassio.HassIO.get_info",
return_value={},
), patch(
"homeassistant.components.hassio.HassIO.get_host_info",
return_value={},
), patch(
"homeassistant.components.hassio.HassIO.get_ingress_panels",
return_value={"panels": {}},
), patch.dict(
os.environ, {"HASSIO_TOKEN": "123456"}
):
yield
async def test_onboarding_progress(hass, hass_storage, aiohttp_client):
"""Test fetching progress."""
mock_storage(hass_storage, {"done": ["hello"]})
assert await async_setup_component(hass, "onboarding", {})
await hass.async_block_till_done()
client = await aiohttp_client(hass.http.app)
with patch.object(views, "STEPS", ["hello", "world"]):
resp = await client.get("/api/onboarding")
assert resp.status == 200
data = await resp.json()
assert len(data) == 2
assert data[0] == {"step": "hello", "done": True}
assert data[1] == {"step": "world", "done": False}
async def test_onboarding_user_already_done(hass, hass_storage, aiohttp_client):
"""Test creating a new user when user step already done."""
mock_storage(hass_storage, {"done": [views.STEP_USER]})
with patch.object(onboarding, "STEPS", ["hello", "world"]):
assert await async_setup_component(hass, "onboarding", {})
await hass.async_block_till_done()
client = await aiohttp_client(hass.http.app)
resp = await client.post(
"/api/onboarding/users",
json={
"client_id": CLIENT_ID,
"name": "Test Name",
"username": "test-user",
"password": "test-pass",
"language": "en",
},
)
assert resp.status == HTTP_FORBIDDEN
async def test_onboarding_user(hass, hass_storage, aiohttp_client):
"""Test creating a new user."""
assert await async_setup_component(hass, "person", {})
assert await async_setup_component(hass, "onboarding", {})
await hass.async_block_till_done()
client = await aiohttp_client(hass.http.app)
resp = await client.post(
"/api/onboarding/users",
json={
"client_id": CLIENT_ID,
"name": "Test Name",
"username": "test-user",
"password": "test-pass",
"language": "en",
},
)
assert resp.status == 200
assert const.STEP_USER in hass_storage[const.DOMAIN]["data"]["done"]
data = await resp.json()
assert "auth_code" in data
users = await hass.auth.async_get_users()
assert len(users) == 1
user = users[0]
assert user.name == "Test Name"
assert len(user.credentials) == 1
assert user.credentials[0].data["username"] == "test-user"
assert len(hass.data["person"][1].async_items()) == 1
# Validate refresh token 1
resp = await client.post(
"/auth/token",
data={
"client_id": CLIENT_ID,
"grant_type": "authorization_code",
"code": data["auth_code"],
},
)
assert resp.status == 200
tokens = await resp.json()
assert (
await hass.auth.async_validate_access_token(tokens["access_token"]) is not None
)
# Validate created areas
area_registry = await hass.helpers.area_registry.async_get_registry()
assert len(area_registry.areas) == 3
assert sorted([area.name for area in area_registry.async_list_areas()]) == [
"Bedroom",
"Kitchen",
"Living Room",
]
async def test_onboarding_user_invalid_name(hass, hass_storage, aiohttp_client):
"""Test not providing name."""
mock_storage(hass_storage, {"done": []})
assert await async_setup_component(hass, "onboarding", {})
await hass.async_block_till_done()
client = await aiohttp_client(hass.http.app)
resp = await client.post(
"/api/onboarding/users",
json={
"client_id": CLIENT_ID,
"username": "test-user",
"password": "test-pass",
"language": "en",
},
)
assert resp.status == 400
async def test_onboarding_user_race(hass, hass_storage, aiohttp_client):
"""Test race condition on creating new user."""
mock_storage(hass_storage, {"done": ["hello"]})
assert await async_setup_component(hass, "onboarding", {})
await hass.async_block_till_done()
client = await aiohttp_client(hass.http.app)
resp1 = client.post(
"/api/onboarding/users",
json={
"client_id": CLIENT_ID,
"name": "Test 1",
"username": "1-user",
"password": "1-pass",
"language": "en",
},
)
resp2 = client.post(
"/api/onboarding/users",
json={
"client_id": CLIENT_ID,
"name": "Test 2",
"username": "2-user",
"password": "2-pass",
"language": "es",
},
)
res1, res2 = await asyncio.gather(resp1, resp2)
assert sorted([res1.status, res2.status]) == [200, HTTP_FORBIDDEN]
async def test_onboarding_integration(hass, hass_storage, hass_client):
"""Test finishing integration step."""
mock_storage(hass_storage, {"done": [const.STEP_USER]})
assert await async_setup_component(hass, "onboarding", {})
await hass.async_block_till_done()
client = await hass_client()
resp = await client.post(
"/api/onboarding/integration",
json={"client_id": CLIENT_ID, "redirect_uri": CLIENT_REDIRECT_URI},
)
assert resp.status == 200
data = await resp.json()
assert "auth_code" in data
# Validate refresh token
resp = await client.post(
"/auth/token",
data={
"client_id": CLIENT_ID,
"grant_type": "authorization_code",
"code": data["auth_code"],
},
)
assert resp.status == 200
assert const.STEP_INTEGRATION in hass_storage[const.DOMAIN]["data"]["done"]
tokens = await resp.json()
assert (
await hass.auth.async_validate_access_token(tokens["access_token"]) is not None
)
# Onboarding refresh token and new refresh token
for user in await hass.auth.async_get_users():
assert len(user.refresh_tokens) == 2, user
async def test_onboarding_integration_invalid_redirect_uri(
hass, hass_storage, hass_client
):
"""Test finishing integration step."""
mock_storage(hass_storage, {"done": [const.STEP_USER]})
assert await async_setup_component(hass, "onboarding", {})
await hass.async_block_till_done()
client = await hass_client()
resp = await client.post(
"/api/onboarding/integration",
json={"client_id": CLIENT_ID, "redirect_uri": "http://invalid-redirect.uri"},
)
assert resp.status == 400
# We will still mark the last step as done because there is nothing left.
assert const.STEP_INTEGRATION in hass_storage[const.DOMAIN]["data"]["done"]
# Only refresh token from onboarding should be there
for user in await hass.auth.async_get_users():
assert len(user.refresh_tokens) == 1, user
async def test_onboarding_integration_requires_auth(hass, hass_storage, aiohttp_client):
"""Test finishing integration step."""
mock_storage(hass_storage, {"done": [const.STEP_USER]})
assert await async_setup_component(hass, "onboarding", {})
await hass.async_block_till_done()
client = await aiohttp_client(hass.http.app)
resp = await client.post(
"/api/onboarding/integration", json={"client_id": CLIENT_ID}
)
assert resp.status == 401
async def test_onboarding_core_sets_up_met(hass, hass_storage, hass_client):
"""Test finishing the core step."""
mock_storage(hass_storage, {"done": [const.STEP_USER]})
assert await async_setup_component(hass, "onboarding", {})
await hass.async_block_till_done()
client = await hass_client()
resp = await client.post("/api/onboarding/core_config")
assert resp.status == 200
await hass.async_block_till_done()
assert len(hass.states.async_entity_ids("weather")) == 1
async def test_onboarding_core_sets_up_rpi_power(
hass, hass_storage, hass_client, aioclient_mock, rpi
):
"""Test that the core step sets up rpi_power on RPi."""
mock_storage(hass_storage, {"done": [const.STEP_USER]})
await async_setup_component(hass, "persistent_notification", {})
assert await async_setup_component(hass, "onboarding", {})
await hass.async_block_till_done()
client = await hass_client()
with patch(
"homeassistant.components.rpi_power.config_flow.new_under_voltage"
), patch("homeassistant.components.rpi_power.binary_sensor.new_under_voltage"):
resp = await client.post("/api/onboarding/core_config")
assert resp.status == 200
await hass.async_block_till_done()
rpi_power_state = hass.states.get("binary_sensor.rpi_power_status")
assert rpi_power_state
async def test_onboarding_core_no_rpi_power(
hass, hass_storage, hass_client, aioclient_mock, no_rpi
):
"""Test that the core step do not set up rpi_power on non RPi."""
mock_storage(hass_storage, {"done": [const.STEP_USER]})
await async_setup_component(hass, "persistent_notification", {})
assert await async_setup_component(hass, "onboarding", {})
await hass.async_block_till_done()
client = await hass_client()
with patch(
"homeassistant.components.rpi_power.config_flow.new_under_voltage"
), patch("homeassistant.components.rpi_power.binary_sensor.new_under_voltage"):
resp = await client.post("/api/onboarding/core_config")
assert resp.status == 200
await hass.async_block_till_done()
rpi_power_state = hass.states.get("binary_sensor.rpi_power_status")
assert not rpi_power_state
|
from io import BytesIO
import os
import os.path as op
from functools import reduce, partial
import numpy as np
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_allclose, assert_equal)
import pytest
from mne.datasets import testing
from mne.io import read_raw_fif, read_raw_bti
from mne.io._digitization import _make_bti_dig_points
from mne.io.bti.bti import (_read_config,
_read_bti_header, _get_bti_dev_t,
_correct_trans, _get_bti_info,
_loc_to_coil_trans, _convert_coil_trans,
_check_nan_dev_head_t, _rename_channels)
from mne.io.bti.bti import _read_head_shape
from mne.io.tests.test_raw import _test_raw_reader
from mne.io.pick import pick_info
from mne.io.constants import FIFF
from mne import pick_types
from mne.utils import assert_dig_allclose, run_tests_if_main
from mne.transforms import Transform, combine_transforms, invert_transform
base_dir = op.join(op.abspath(op.dirname(__file__)), 'data')
archs = 'linux', 'solaris'
pdf_fnames = [op.join(base_dir, 'test_pdf_%s' % a) for a in archs]
config_fnames = [op.join(base_dir, 'test_config_%s' % a) for a in archs]
hs_fnames = [op.join(base_dir, 'test_hs_%s' % a) for a in archs]
exported_fnames = [op.join(base_dir, 'exported4D_%s_raw.fif' % a)
for a in archs]
tmp_raw_fname = op.join(base_dir, 'tmp_raw.fif')
fname_2500 = op.join(testing.data_path(download=False), 'BTi', 'erm_HFH',
'c,rfDC')
fname_sim = op.join(testing.data_path(download=False), 'BTi', '4Dsim',
'c,rfDC')
fname_sim_filt = op.join(testing.data_path(download=False), 'BTi', '4Dsim',
'c,rfDC,fn50,o')
# the 4D exporter doesn't export all channels, so we confine our comparison
NCH = 248
@testing.requires_testing_data
def test_read_2500():
"""Test reading data from 2500 system."""
_test_raw_reader(read_raw_bti, pdf_fname=fname_2500, head_shape_fname=None)
def test_read_config():
"""Test read bti config file."""
# for config in config_fname, config_solaris_fname:
for config in config_fnames:
cfg = _read_config(config)
assert all('unknown' not in block.lower() and block != ''
for block in cfg['user_blocks'])
def test_crop_append():
"""Test crop and append raw."""
raw = _test_raw_reader(
read_raw_bti, pdf_fname=pdf_fnames[0],
config_fname=config_fnames[0], head_shape_fname=hs_fnames[0])
y, t = raw[:]
t0, t1 = 0.25 * t[-1], 0.75 * t[-1]
mask = (t0 <= t) * (t <= t1)
raw_ = raw.copy().crop(t0, t1)
y_, _ = raw_[:]
assert (y_.shape[1] == mask.sum())
assert (y_.shape[0] == y.shape[0])
def test_transforms():
"""Test transformations."""
bti_trans = (0.0, 0.02, 0.11)
bti_dev_t = Transform('ctf_meg', 'meg', _get_bti_dev_t(0.0, bti_trans))
for pdf, config, hs, in zip(pdf_fnames, config_fnames, hs_fnames):
raw = read_raw_bti(pdf, config, hs, preload=False)
dev_ctf_t = raw.info['dev_ctf_t']
dev_head_t_old = raw.info['dev_head_t']
ctf_head_t = raw.info['ctf_head_t']
# 1) get BTI->Neuromag
bti_dev_t = Transform('ctf_meg', 'meg', _get_bti_dev_t(0.0, bti_trans))
# 2) get Neuromag->BTI head
t = combine_transforms(invert_transform(bti_dev_t), dev_ctf_t,
'meg', 'ctf_head')
# 3) get Neuromag->head
dev_head_t_new = combine_transforms(t, ctf_head_t, 'meg', 'head')
assert_array_equal(dev_head_t_new['trans'], dev_head_t_old['trans'])
@pytest.mark.slowtest
def test_raw():
"""Test bti conversion to Raw object."""
for pdf, config, hs, exported in zip(pdf_fnames, config_fnames, hs_fnames,
exported_fnames):
# rx = 2 if 'linux' in pdf else 0
pytest.raises(ValueError, read_raw_bti, pdf, 'eggs', preload=False)
pytest.raises(ValueError, read_raw_bti, pdf, config, 'spam',
preload=False)
if op.exists(tmp_raw_fname):
os.remove(tmp_raw_fname)
ex = read_raw_fif(exported, preload=True)
ra = read_raw_bti(pdf, config, hs, preload=False)
assert ('RawBTi' in repr(ra))
assert_equal(ex.ch_names[:NCH], ra.ch_names[:NCH])
assert_array_almost_equal(ex.info['dev_head_t']['trans'],
ra.info['dev_head_t']['trans'], 7)
assert len(ex.info['dig']) in (3563, 5154)
assert_dig_allclose(ex.info, ra.info, limit=100)
coil1, coil2 = [np.concatenate([d['loc'].flatten()
for d in r_.info['chs'][:NCH]])
for r_ in (ra, ex)]
assert_array_almost_equal(coil1, coil2, 7)
loc1, loc2 = [np.concatenate([d['loc'].flatten()
for d in r_.info['chs'][:NCH]])
for r_ in (ra, ex)]
assert_allclose(loc1, loc2)
assert_allclose(ra[:NCH][0], ex[:NCH][0])
assert_array_equal([c['range'] for c in ra.info['chs'][:NCH]],
[c['range'] for c in ex.info['chs'][:NCH]])
assert_array_equal([c['cal'] for c in ra.info['chs'][:NCH]],
[c['cal'] for c in ex.info['chs'][:NCH]])
assert_array_equal(ra._cals[:NCH], ex._cals[:NCH])
# check our transforms
for key in ('dev_head_t', 'dev_ctf_t', 'ctf_head_t'):
if ex.info[key] is None:
pass
else:
assert (ra.info[key] is not None)
for ent in ('to', 'from', 'trans'):
assert_allclose(ex.info[key][ent],
ra.info[key][ent])
ra.save(tmp_raw_fname)
re = read_raw_fif(tmp_raw_fname)
print(re)
for key in ('dev_head_t', 'dev_ctf_t', 'ctf_head_t'):
assert (isinstance(re.info[key], dict))
this_t = re.info[key]['trans']
assert_equal(this_t.shape, (4, 4))
# check that matrix by is not identity
assert (not np.allclose(this_t, np.eye(4)))
os.remove(tmp_raw_fname)
def test_info_no_rename_no_reorder_no_pdf():
"""Test private renaming, reordering and partial construction option."""
for pdf, config, hs in zip(pdf_fnames, config_fnames, hs_fnames):
info, bti_info = _get_bti_info(
pdf_fname=pdf, config_fname=config, head_shape_fname=hs,
rotation_x=0.0, translation=(0.0, 0.02, 0.11), convert=False,
ecg_ch='E31', eog_ch=('E63', 'E64'),
rename_channels=False, sort_by_ch_name=False)
info2, bti_info = _get_bti_info(
pdf_fname=None, config_fname=config, head_shape_fname=hs,
rotation_x=0.0, translation=(0.0, 0.02, 0.11), convert=False,
ecg_ch='E31', eog_ch=('E63', 'E64'),
rename_channels=False, sort_by_ch_name=False)
assert_equal(info['ch_names'],
[ch['ch_name'] for ch in info['chs']])
assert_equal([n for n in info['ch_names'] if n.startswith('A')][:5],
['A22', 'A2', 'A104', 'A241', 'A138'])
assert_equal([n for n in info['ch_names'] if n.startswith('A')][-5:],
['A133', 'A158', 'A44', 'A134', 'A216'])
info = pick_info(info, pick_types(info, meg=True, stim=True,
resp=True))
info2 = pick_info(info2, pick_types(info2, meg=True, stim=True,
resp=True))
assert (info['sfreq'] is not None)
assert (info['lowpass'] is not None)
assert (info['highpass'] is not None)
assert (info['meas_date'] is not None)
assert_equal(info2['sfreq'], None)
assert_equal(info2['lowpass'], None)
assert_equal(info2['highpass'], None)
assert_equal(info2['meas_date'], None)
assert_equal(info['ch_names'], info2['ch_names'])
assert_equal(info['ch_names'], info2['ch_names'])
for key in ['dev_ctf_t', 'dev_head_t', 'ctf_head_t']:
assert_array_equal(info[key]['trans'], info2[key]['trans'])
assert_array_equal(
np.array([ch['loc'] for ch in info['chs']]),
np.array([ch['loc'] for ch in info2['chs']]))
# just check reading data | corner case
raw1 = read_raw_bti(
pdf_fname=pdf, config_fname=config, head_shape_fname=None,
sort_by_ch_name=False, preload=True)
# just check reading data | corner case
raw2 = read_raw_bti(
pdf_fname=pdf, config_fname=config, head_shape_fname=None,
rename_channels=False,
sort_by_ch_name=True, preload=True)
sort_idx = [raw1.bti_ch_labels.index(ch) for ch in raw2.bti_ch_labels]
raw1._data = raw1._data[sort_idx]
assert_array_equal(raw1._data, raw2._data)
assert_array_equal(raw2.bti_ch_labels, raw2.ch_names)
def test_no_conversion():
"""Test bti no-conversion option."""
get_info = partial(
_get_bti_info,
rotation_x=0.0, translation=(0.0, 0.02, 0.11), convert=False,
ecg_ch='E31', eog_ch=('E63', 'E64'),
rename_channels=False, sort_by_ch_name=False)
for pdf, config, hs in zip(pdf_fnames, config_fnames, hs_fnames):
raw_info, _ = get_info(pdf, config, hs, convert=False)
raw_info_con = read_raw_bti(
pdf_fname=pdf, config_fname=config, head_shape_fname=hs,
convert=True, preload=False).info
pick_info(raw_info_con,
pick_types(raw_info_con, meg=True, ref_meg=True),
copy=False)
pick_info(raw_info,
pick_types(raw_info, meg=True, ref_meg=True), copy=False)
bti_info = _read_bti_header(pdf, config)
dev_ctf_t = _correct_trans(bti_info['bti_transform'][0])
assert_array_equal(dev_ctf_t, raw_info['dev_ctf_t']['trans'])
assert_array_equal(raw_info['dev_head_t']['trans'], np.eye(4))
assert_array_equal(raw_info['ctf_head_t']['trans'], np.eye(4))
nasion, lpa, rpa, hpi, dig_points = _read_head_shape(hs)
dig, t, _ = _make_bti_dig_points(nasion, lpa, rpa, hpi, dig_points,
convert=False, use_hpi=False)
assert_array_equal(t['trans'], np.eye(4))
for ii, (old, new, con) in enumerate(zip(
dig, raw_info['dig'], raw_info_con['dig'])):
assert_equal(old['ident'], new['ident'])
assert_array_equal(old['r'], new['r'])
assert (not np.allclose(old['r'], con['r']))
if ii > 10:
break
ch_map = {ch['chan_label']: ch['loc'] for ch in bti_info['chs']}
for ii, ch_label in enumerate(raw_info['ch_names']):
if not ch_label.startswith('A'):
continue
t1 = ch_map[ch_label] # correction already performed in bti_info
t2 = raw_info['chs'][ii]['loc']
t3 = raw_info_con['chs'][ii]['loc']
assert_allclose(t1, t2, atol=1e-15)
assert (not np.allclose(t1, t3))
idx_a = raw_info_con['ch_names'].index('MEG 001')
idx_b = raw_info['ch_names'].index('A22')
assert_equal(
raw_info_con['chs'][idx_a]['coord_frame'],
FIFF.FIFFV_COORD_DEVICE)
assert_equal(
raw_info['chs'][idx_b]['coord_frame'],
FIFF.FIFFV_MNE_COORD_4D_HEAD)
def test_bytes_io():
"""Test bti bytes-io API."""
for pdf, config, hs in zip(pdf_fnames, config_fnames, hs_fnames):
raw = read_raw_bti(pdf, config, hs, convert=True, preload=False)
with open(pdf, 'rb') as fid:
pdf = BytesIO(fid.read())
with open(config, 'rb') as fid:
config = BytesIO(fid.read())
with open(hs, 'rb') as fid:
hs = BytesIO(fid.read())
raw2 = read_raw_bti(pdf, config, hs, convert=True, preload=False)
repr(raw2)
assert_array_equal(raw[:][0], raw2[:][0])
def test_setup_headshape():
"""Test reading bti headshape."""
for hs in hs_fnames:
nasion, lpa, rpa, hpi, dig_points = _read_head_shape(hs)
dig, t, _ = _make_bti_dig_points(nasion, lpa, rpa, hpi, dig_points)
expected = {'kind', 'ident', 'r'}
found = set(reduce(lambda x, y: list(x) + list(y),
[d.keys() for d in dig]))
assert (not expected - found)
def test_nan_trans():
"""Test unlikely case that the device to head transform is empty."""
for ii, pdf_fname in enumerate(pdf_fnames):
bti_info = _read_bti_header(
pdf_fname, config_fnames[ii], sort_by_ch_name=True)
dev_ctf_t = Transform('ctf_meg', 'ctf_head',
_correct_trans(bti_info['bti_transform'][0]))
# reading params
convert = True
rotation_x = 0.
translation = (0.0, 0.02, 0.11)
bti_dev_t = _get_bti_dev_t(rotation_x, translation)
bti_dev_t = Transform('ctf_meg', 'meg', bti_dev_t)
ecg_ch = 'E31'
eog_ch = ('E63', 'E64')
# read parts of info to get trans
bti_ch_names = list()
for ch in bti_info['chs']:
ch_name = ch['name']
if not ch_name.startswith('A'):
ch_name = ch.get('chan_label', ch_name)
bti_ch_names.append(ch_name)
neuromag_ch_names = _rename_channels(
bti_ch_names, ecg_ch=ecg_ch, eog_ch=eog_ch)
ch_mapping = zip(bti_ch_names, neuromag_ch_names)
# add some nan in some locations!
dev_ctf_t['trans'][:, 3] = np.nan
_check_nan_dev_head_t(dev_ctf_t)
for idx, (chan_4d, chan_neuromag) in enumerate(ch_mapping):
loc = bti_info['chs'][idx]['loc']
if loc is not None:
if convert:
t = _loc_to_coil_trans(bti_info['chs'][idx]['loc'])
t = _convert_coil_trans(t, dev_ctf_t, bti_dev_t)
@testing.requires_testing_data
@pytest.mark.parametrize('fname', (fname_sim, fname_sim_filt))
@pytest.mark.parametrize('preload', (True, False))
def test_bti_ch_data(fname, preload):
"""Test for gh-6048."""
read_raw_bti(fname, preload=preload) # used to fail with ascii decode err
run_tests_if_main()
|
from test import CollectorTestCase
from test import get_collector_config
from test import run_only
from test import unittest
from mock import patch
from pgbouncer import PgbouncerCollector
##########################################################################
def run_only_if_psycopg2_is_available(func):
try:
import psycopg2
except ImportError:
psycopg2 = None
pred = lambda: psycopg2 is not None
return run_only(func, pred)
class TestPgbouncerCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('PgbouncerCollector', {})
self.collector = PgbouncerCollector(config, None)
def test_import(self):
self.assertTrue(PgbouncerCollector)
@run_only_if_psycopg2_is_available
@patch.object(PgbouncerCollector, '_get_stats_by_database')
@patch.object(PgbouncerCollector, 'publish')
def test_default(self, publish, _get_stats_by_database):
_get_stats_by_database.return_value = {'foo': {'bar': 42}}
self.collector.collect()
_get_stats_by_database.assert_called_with(
'localhost', '6432', 'postgres', '')
self.assertPublished(publish, 'default.foo.bar', 42)
@run_only_if_psycopg2_is_available
@patch.object(PgbouncerCollector, '_get_stats_by_database')
@patch.object(PgbouncerCollector, 'publish')
def test_instance_names(self, publish, _get_stats_by_database):
def side_effect(host, port, user, password):
if (host, port) == ('127.0.0.1', '6432'):
return {'foo': {'bar': 42}}
elif (host, port) == ('localhost', '6433'):
return {'foo': {'baz': 24}}
_get_stats_by_database.side_effect = side_effect
config = get_collector_config('PgbouncerCollector', {
'instances': {
'alpha': {
'host': '127.0.0.1',
'port': '6432',
},
'beta': {
'host': 'localhost',
'port': '6433',
},
}
})
collector = PgbouncerCollector(config, None)
collector.collect()
self.assertPublished(publish, 'alpha.foo.bar', 42)
self.assertPublished(publish, 'beta.foo.baz', 24)
@run_only_if_psycopg2_is_available
@patch.object(PgbouncerCollector, '_get_stats_by_database')
def test_override_user_password(self, _get_stats_by_database):
_get_stats_by_database.return_value = {}
config = get_collector_config('PgbouncerCollector', {
'instances': {
'test1': {
'host': '127.0.0.1',
'port': '6433',
'password': 'foobar',
},
'test2': {
'host': '127.0.0.2',
'port': '6432',
'user': 'pgbouncer',
}
}
})
collector = PgbouncerCollector(config, None)
collector.collect()
_get_stats_by_database.assert_any_call(
'127.0.0.1', '6433', 'postgres', 'foobar')
_get_stats_by_database.assert_any_call(
'127.0.0.2', '6432', 'pgbouncer', '')
##########################################################################
if __name__ == "__main__":
unittest.main()
|
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_HS_COLOR,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR,
LightEntity,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from . import WiLightDevice
from .const import (
DOMAIN,
ITEM_LIGHT,
LIGHT_COLOR,
LIGHT_DIMMER,
LIGHT_ON_OFF,
SUPPORT_NONE,
)
def entities_from_discovered_wilight(hass, api_device):
"""Parse configuration and add WiLight light entities."""
entities = []
for item in api_device.items:
if item["type"] != ITEM_LIGHT:
continue
index = item["index"]
item_name = item["name"]
if item["sub_type"] == LIGHT_ON_OFF:
entity = WiLightLightOnOff(api_device, index, item_name)
elif item["sub_type"] == LIGHT_DIMMER:
entity = WiLightLightDimmer(api_device, index, item_name)
elif item["sub_type"] == LIGHT_COLOR:
entity = WiLightLightColor(api_device, index, item_name)
else:
continue
entities.append(entity)
return entities
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities
):
"""Set up WiLight lights from a config entry."""
parent = hass.data[DOMAIN][entry.entry_id]
# Handle a discovered WiLight device.
entities = entities_from_discovered_wilight(hass, parent.api)
async_add_entities(entities)
class WiLightLightOnOff(WiLightDevice, LightEntity):
"""Representation of a WiLights light on-off."""
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_NONE
@property
def is_on(self):
"""Return true if device is on."""
return self._status.get("on")
async def async_turn_on(self, **kwargs):
"""Turn the device on."""
await self._client.turn_on(self._index)
async def async_turn_off(self, **kwargs):
"""Turn the device off."""
await self._client.turn_off(self._index)
class WiLightLightDimmer(WiLightDevice, LightEntity):
"""Representation of a WiLights light dimmer."""
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_BRIGHTNESS
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
return int(self._status.get("brightness", 0))
@property
def is_on(self):
"""Return true if device is on."""
return self._status.get("on")
async def async_turn_on(self, **kwargs):
"""Turn the device on,set brightness if needed."""
# Dimmer switches use a range of [0, 255] to control
# brightness. Level 255 might mean to set it to previous value
if ATTR_BRIGHTNESS in kwargs:
brightness = kwargs[ATTR_BRIGHTNESS]
await self._client.set_brightness(self._index, brightness)
else:
await self._client.turn_on(self._index)
async def async_turn_off(self, **kwargs):
"""Turn the device off."""
await self._client.turn_off(self._index)
def wilight_to_hass_hue(value):
"""Convert wilight hue 1..255 to hass 0..360 scale."""
return min(360, round((value * 360) / 255, 3))
def hass_to_wilight_hue(value):
"""Convert hass hue 0..360 to wilight 1..255 scale."""
return min(255, round((value * 255) / 360))
def wilight_to_hass_saturation(value):
"""Convert wilight saturation 1..255 to hass 0..100 scale."""
return min(100, round((value * 100) / 255, 3))
def hass_to_wilight_saturation(value):
"""Convert hass saturation 0..100 to wilight 1..255 scale."""
return min(255, round((value * 255) / 100))
class WiLightLightColor(WiLightDevice, LightEntity):
"""Representation of a WiLights light rgb."""
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_BRIGHTNESS | SUPPORT_COLOR
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
return int(self._status.get("brightness", 0))
@property
def hs_color(self):
"""Return the hue and saturation color value [float, float]."""
return [
wilight_to_hass_hue(int(self._status.get("hue", 0))),
wilight_to_hass_saturation(int(self._status.get("saturation", 0))),
]
@property
def is_on(self):
"""Return true if device is on."""
return self._status.get("on")
async def async_turn_on(self, **kwargs):
"""Turn the device on,set brightness if needed."""
# Brightness use a range of [0, 255] to control
# Hue use a range of [0, 360] to control
# Saturation use a range of [0, 100] to control
if ATTR_BRIGHTNESS in kwargs and ATTR_HS_COLOR in kwargs:
brightness = kwargs[ATTR_BRIGHTNESS]
hue = hass_to_wilight_hue(kwargs[ATTR_HS_COLOR][0])
saturation = hass_to_wilight_saturation(kwargs[ATTR_HS_COLOR][1])
await self._client.set_hsb_color(self._index, hue, saturation, brightness)
elif ATTR_BRIGHTNESS in kwargs and ATTR_HS_COLOR not in kwargs:
brightness = kwargs[ATTR_BRIGHTNESS]
await self._client.set_brightness(self._index, brightness)
elif ATTR_BRIGHTNESS not in kwargs and ATTR_HS_COLOR in kwargs:
hue = hass_to_wilight_hue(kwargs[ATTR_HS_COLOR][0])
saturation = hass_to_wilight_saturation(kwargs[ATTR_HS_COLOR][1])
await self._client.set_hs_color(self._index, hue, saturation)
else:
await self._client.turn_on(self._index)
async def async_turn_off(self, **kwargs):
"""Turn the device off."""
await self._client.turn_off(self._index)
|
import datetime
from typing import TYPE_CHECKING, Optional, Union
from homeassistant.const import SUN_EVENT_SUNRISE, SUN_EVENT_SUNSET
from homeassistant.core import callback
from homeassistant.loader import bind_hass
from homeassistant.util import dt as dt_util
from .typing import HomeAssistantType
if TYPE_CHECKING:
import astral # pylint: disable=unused-import
DATA_LOCATION_CACHE = "astral_location_cache"
@callback
@bind_hass
def get_astral_location(hass: HomeAssistantType) -> "astral.Location":
"""Get an astral location for the current Home Assistant configuration."""
from astral import Location # pylint: disable=import-outside-toplevel
latitude = hass.config.latitude
longitude = hass.config.longitude
timezone = str(hass.config.time_zone)
elevation = hass.config.elevation
info = ("", "", latitude, longitude, timezone, elevation)
# Cache astral locations so they aren't recreated with the same args
if DATA_LOCATION_CACHE not in hass.data:
hass.data[DATA_LOCATION_CACHE] = {}
if info not in hass.data[DATA_LOCATION_CACHE]:
hass.data[DATA_LOCATION_CACHE][info] = Location(info)
return hass.data[DATA_LOCATION_CACHE][info]
@callback
@bind_hass
def get_astral_event_next(
hass: HomeAssistantType,
event: str,
utc_point_in_time: Optional[datetime.datetime] = None,
offset: Optional[datetime.timedelta] = None,
) -> datetime.datetime:
"""Calculate the next specified solar event."""
location = get_astral_location(hass)
return get_location_astral_event_next(location, event, utc_point_in_time, offset)
@callback
def get_location_astral_event_next(
location: "astral.Location",
event: str,
utc_point_in_time: Optional[datetime.datetime] = None,
offset: Optional[datetime.timedelta] = None,
) -> datetime.datetime:
"""Calculate the next specified solar event."""
from astral import AstralError # pylint: disable=import-outside-toplevel
if offset is None:
offset = datetime.timedelta()
if utc_point_in_time is None:
utc_point_in_time = dt_util.utcnow()
mod = -1
while True:
try:
next_dt: datetime.datetime = (
getattr(location, event)(
dt_util.as_local(utc_point_in_time).date()
+ datetime.timedelta(days=mod),
local=False,
)
+ offset
)
if next_dt > utc_point_in_time:
return next_dt
except AstralError:
pass
mod += 1
@callback
@bind_hass
def get_astral_event_date(
hass: HomeAssistantType,
event: str,
date: Union[datetime.date, datetime.datetime, None] = None,
) -> Optional[datetime.datetime]:
"""Calculate the astral event time for the specified date."""
from astral import AstralError # pylint: disable=import-outside-toplevel
location = get_astral_location(hass)
if date is None:
date = dt_util.now().date()
if isinstance(date, datetime.datetime):
date = dt_util.as_local(date).date()
try:
return getattr(location, event)(date, local=False) # type: ignore
except AstralError:
# Event never occurs for specified date.
return None
@callback
@bind_hass
def is_up(
hass: HomeAssistantType, utc_point_in_time: Optional[datetime.datetime] = None
) -> bool:
"""Calculate if the sun is currently up."""
if utc_point_in_time is None:
utc_point_in_time = dt_util.utcnow()
next_sunrise = get_astral_event_next(hass, SUN_EVENT_SUNRISE, utc_point_in_time)
next_sunset = get_astral_event_next(hass, SUN_EVENT_SUNSET, utc_point_in_time)
return next_sunrise > next_sunset
|
import os
import unittest
from unittest import mock
def get_test_path(file_path):
"""
return the path of a file with "Tests" depending of the current location of the execution
:return: string path
"""
current_path = os.getcwd()
if "/Tests" in current_path:
return current_path + os.sep + file_path
else:
return current_path + os.sep + "Tests" + os.sep + file_path
class TestTestUtils(unittest.TestCase):
def test_get_test_path(self):
# Tests is in path
with mock.patch('Tests.utils.utils.os.getcwd', return_value='/home/user/Documents/kalliope/Tests'):
expected = "/home/user/Documents/kalliope/Tests/file"
self.assertEqual(expected, get_test_path("file"))
# Tests not in path
with mock.patch('Tests.utils.utils.os.getcwd', return_value='/home/user/Documents/kalliope'):
expected = "/home/user/Documents/kalliope/Tests/file"
self.assertEqual(expected, get_test_path("file"))
|
from datetime import timedelta
import logging
from locationsharinglib import Service
from locationsharinglib.locationsharinglibexceptions import InvalidCookies
import voluptuous as vol
from homeassistant.components.device_tracker import PLATFORM_SCHEMA, SOURCE_TYPE_GPS
from homeassistant.const import (
ATTR_BATTERY_CHARGING,
ATTR_BATTERY_LEVEL,
ATTR_ID,
CONF_SCAN_INTERVAL,
CONF_USERNAME,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.event import track_time_interval
from homeassistant.helpers.typing import ConfigType
from homeassistant.util import dt as dt_util, slugify
_LOGGER = logging.getLogger(__name__)
ATTR_ADDRESS = "address"
ATTR_FULL_NAME = "full_name"
ATTR_LAST_SEEN = "last_seen"
ATTR_NICKNAME = "nickname"
CONF_MAX_GPS_ACCURACY = "max_gps_accuracy"
CREDENTIALS_FILE = ".google_maps_location_sharing.cookies"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_USERNAME): cv.string,
vol.Optional(CONF_MAX_GPS_ACCURACY, default=100000): vol.Coerce(float),
}
)
def setup_scanner(hass, config: ConfigType, see, discovery_info=None):
"""Set up the Google Maps Location sharing scanner."""
scanner = GoogleMapsScanner(hass, config, see)
return scanner.success_init
class GoogleMapsScanner:
"""Representation of an Google Maps location sharing account."""
def __init__(self, hass, config: ConfigType, see) -> None:
"""Initialize the scanner."""
self.see = see
self.username = config[CONF_USERNAME]
self.max_gps_accuracy = config[CONF_MAX_GPS_ACCURACY]
self.scan_interval = config.get(CONF_SCAN_INTERVAL) or timedelta(seconds=60)
self._prev_seen = {}
credfile = f"{hass.config.path(CREDENTIALS_FILE)}.{slugify(self.username)}"
try:
self.service = Service(credfile, self.username)
self._update_info()
track_time_interval(hass, self._update_info, self.scan_interval)
self.success_init = True
except InvalidCookies:
_LOGGER.error(
"The cookie file provided does not provide a valid session. Please create another one and try again"
)
self.success_init = False
def _update_info(self, now=None):
for person in self.service.get_all_people():
try:
dev_id = f"google_maps_{slugify(person.id)}"
except TypeError:
_LOGGER.warning("No location(s) shared with this account")
return
if (
self.max_gps_accuracy is not None
and person.accuracy > self.max_gps_accuracy
):
_LOGGER.info(
"Ignoring %s update because expected GPS "
"accuracy %s is not met: %s",
person.nickname,
self.max_gps_accuracy,
person.accuracy,
)
continue
last_seen = dt_util.as_utc(person.datetime)
if last_seen < self._prev_seen.get(dev_id, last_seen):
_LOGGER.warning(
"Ignoring %s update because timestamp "
"is older than last timestamp",
person.nickname,
)
_LOGGER.debug("%s < %s", last_seen, self._prev_seen[dev_id])
continue
self._prev_seen[dev_id] = last_seen
attrs = {
ATTR_ADDRESS: person.address,
ATTR_FULL_NAME: person.full_name,
ATTR_ID: person.id,
ATTR_LAST_SEEN: last_seen,
ATTR_NICKNAME: person.nickname,
ATTR_BATTERY_CHARGING: person.charging,
ATTR_BATTERY_LEVEL: person.battery_level,
}
self.see(
dev_id=dev_id,
gps=(person.latitude, person.longitude),
picture=person.picture_url,
source_type=SOURCE_TYPE_GPS,
gps_accuracy=person.accuracy,
attributes=attrs,
)
|
import pytest
from mock import patch, sentinel, Mock, MagicMock
from pymongo.errors import AutoReconnect, OperationFailure, DuplicateKeyError, ServerSelectionTimeoutError
from arctic.decorators import mongo_retry, _get_host
from arctic.hooks import register_log_exception_hook
def test_mongo_retry():
retries = [2]
self = MagicMock()
self._arctic_lib.arctic.mongo_host = sentinel.host
self._collection.database.client.nodes = set([('a', 12)])
self._arctic_lib.get_name.return_value = sentinel.lib_name
with patch('arctic.decorators._handle_error', autospec=True) as he:
@mongo_retry
def foo(self):
if retries[0] == 2:
retries[0] -= 1
raise OperationFailure('error')
elif retries[0] == 1:
retries[0] -= 1
raise AutoReconnect('error')
return "success"
foo(self)
assert he.call_count == 2
assert isinstance(he.call_args_list[0][0][1], OperationFailure)
assert he.call_args_list[0][0][2] == 1
assert he.call_args_list[0][1] == {'mnodes': ['a:12'],
'mhost': 'sentinel.host',
'l': sentinel.lib_name}
assert isinstance(he.call_args_list[1][0][1], AutoReconnect)
assert he.call_args_list[1][0][2] == 2
def test_mongo_retry_hook_changes():
retries = [2]
self = MagicMock()
hook1 = Mock()
register_log_exception_hook(hook1)
hook2 = Mock()
@mongo_retry
def foo(self):
if retries[0] == 2:
retries[0] -= 1
raise OperationFailure('error')
elif retries[0] == 1:
register_log_exception_hook(hook2)
retries[0] -= 1
raise AutoReconnect('error')
return "success"
foo(self)
assert hook1.call_count == 1
assert hook2.call_count == 1
def test_mongo_retry_fails():
error = OperationFailure('error')
retries = [16]
with patch('arctic.decorators._log_exception', autospec=True) as le:
@mongo_retry
def foo():
if retries[0]:
retries[0] -= 1
raise error
return "success"
with pytest.raises(OperationFailure):
foo()
assert le.call_count == 15
assert le.call_args[0][0] == 'foo'
assert le.call_args[0][1] == error
def test_retry_nested():
error = OperationFailure('error')
with patch('arctic.decorators._log_exception', autospec=True) as le:
@mongo_retry
def foo():
@mongo_retry
def bar():
raise error
try:
bar()
except:
raise error
with pytest.raises(OperationFailure):
foo()
assert le.call_count == 15
assert le.call_args[0][0] == 'bar'
assert le.call_args[0][1] == error
def test_all_other_exceptions_logged():
with patch('arctic.decorators._log_exception', autospec=True) as le:
def foo():
raise Exception("Unexpected Error")
foo.__module__ = 'arctic.foo'
foo = mongo_retry(foo)
with pytest.raises(Exception) as e:
foo()
assert "Unexpected Error" in str(e.value)
assert le.call_count == 1
assert le.call_args[0][0] == "foo"
def test_other_exceptions_not_logged_outside_of_arctic():
with patch('arctic.decorators._log_exception', autospec=True) as le:
@mongo_retry
def foo():
raise Exception("Unexpected Error")
with pytest.raises(Exception) as e:
foo()
assert "Unexpected Error" in str(e.value)
assert le.call_count == 0
@pytest.mark.xfail(reason="CS-8393 Mongo server reports auth failure when servers flip")
def test_auth_failure_no_retry():
error = OperationFailure('unauthorized for db:arctic_jblackburn')
with patch('arctic.decorators._log_exception', autospec=True) as le:
@mongo_retry
def foo():
raise error
with pytest.raises(OperationFailure) as e:
foo()
assert 'OperationFailure: unauthorized for db:arctic_jblackburn' in str(e.value)
assert le.call_count == 1
def test_duplicate_key_failure_no_retry():
error = DuplicateKeyError('duplicate key')
with patch('arctic.decorators._log_exception', autospec=True) as le:
@mongo_retry
def foo():
raise error
with pytest.raises(OperationFailure) as e:
foo()
assert 'duplicate key' in str(e.value)
assert le.call_count == 1
def test_ServerSelectionTimeoutError_no_retry():
error = ServerSelectionTimeoutError('some error')
with patch('arctic.decorators._log_exception', autospec=True) as le:
@mongo_retry
def foo():
raise error
with pytest.raises(ServerSelectionTimeoutError) as e:
foo()
assert 'some error' in str(e.value)
assert le.call_count == 1
def test_get_host():
store = Mock()
store._arctic_lib.arctic.mongo_host = sentinel.host
store._collection.database.client.nodes = set([('a', 12)])
store._arctic_lib.get_name.return_value = sentinel.lib_name
assert _get_host(store) == {'mhost': 'sentinel.host',
'mnodes': ['a:12'],
'l': sentinel.lib_name,
}
def test_get_host_list():
store = Mock()
store._arctic_lib.arctic.mongo_host = sentinel.host
store._collection.database.client.nodes = set([('a', 12)])
store._arctic_lib.get_name.return_value = sentinel.lib_name
assert _get_host([store]) == {'mhost': 'sentinel.host',
'mnodes': ['a:12'],
'l': sentinel.lib_name,
}
def test_get_host_not_a_vs():
store = MagicMock()
store._arctic_lib.get_name.side_effect = AttributeError("Hello")
assert _get_host(store) == {}
store._arctic_lib.get_name.side_effect = ValueError("Hello")
assert _get_host(store) == {}
|
from typing import Any
from homeassistant.components.scene import Scene
from homeassistant.helpers import entity_platform
from . import UpbEntity
from .const import DOMAIN, UPB_BLINK_RATE_SCHEMA, UPB_BRIGHTNESS_RATE_SCHEMA
SERVICE_LINK_DEACTIVATE = "link_deactivate"
SERVICE_LINK_FADE_STOP = "link_fade_stop"
SERVICE_LINK_GOTO = "link_goto"
SERVICE_LINK_FADE_START = "link_fade_start"
SERVICE_LINK_BLINK = "link_blink"
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the UPB link based on a config entry."""
upb = hass.data[DOMAIN][config_entry.entry_id]["upb"]
unique_id = config_entry.entry_id
async_add_entities(UpbLink(upb.links[link], unique_id, upb) for link in upb.links)
platform = entity_platform.current_platform.get()
platform.async_register_entity_service(
SERVICE_LINK_DEACTIVATE, {}, "async_link_deactivate"
)
platform.async_register_entity_service(
SERVICE_LINK_FADE_STOP, {}, "async_link_fade_stop"
)
platform.async_register_entity_service(
SERVICE_LINK_GOTO, UPB_BRIGHTNESS_RATE_SCHEMA, "async_link_goto"
)
platform.async_register_entity_service(
SERVICE_LINK_FADE_START, UPB_BRIGHTNESS_RATE_SCHEMA, "async_link_fade_start"
)
platform.async_register_entity_service(
SERVICE_LINK_BLINK, UPB_BLINK_RATE_SCHEMA, "async_link_blink"
)
class UpbLink(UpbEntity, Scene):
"""Representation of an UPB Link."""
async def async_activate(self, **kwargs: Any) -> None:
"""Activate the task."""
self._element.activate()
async def async_link_deactivate(self):
"""Activate the task."""
self._element.deactivate()
async def async_link_goto(self, rate, brightness=None, brightness_pct=None):
"""Activate the task."""
if brightness is not None:
brightness_pct = round(brightness / 2.55)
self._element.goto(brightness_pct, rate)
async def async_link_fade_start(self, rate, brightness=None, brightness_pct=None):
"""Start dimming a link."""
if brightness is not None:
brightness_pct = round(brightness / 2.55)
self._element.fade_start(brightness_pct, rate)
async def async_link_fade_stop(self):
"""Stop dimming a link."""
self._element.fade_stop()
async def async_link_blink(self, blink_rate):
"""Blink a link."""
blink_rate = int(blink_rate * 60)
self._element.blink(blink_rate)
|
from absl import flags
from perfkitbenchmarker import configs
from perfkitbenchmarker import sample
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.linux_packages import wrk2
FLAGS = flags.FLAGS
_FLAG_FORMAT_DESCRIPTION = (
'The format is "target_request_rate:duration:threads:connections", with '
'each value being per client (so running with 2 clients would double the '
'target rate, threads, and connections (but not duration since they are '
'run concurrently)). The target request rate is measured in requests per '
'second and the duration is measured in seconds. Increasing the duration '
'or connections does not impact the aggregate target rate for the client.')
flags.DEFINE_string('nginx_conf', None,
'The path to an Nginx config file that should be applied '
'to the server instead of the default one.')
flags.DEFINE_integer('nginx_content_size', 1024,
'The size of the content Nginx will serve in bytes. '
'Larger files stress the network over the VMs.')
flags.DEFINE_list('nginx_load_configs', ['100:60:1:1'],
'For each load spec in the list, wrk2 will be run once '
'against Nginx with those parameters. ' +
_FLAG_FORMAT_DESCRIPTION)
flags.DEFINE_boolean('nginx_throttle', False,
'If True, skip running the nginx_load_configs and run '
'wrk2 once aiming to throttle the nginx server.')
flags.DEFINE_string('nginx_client_machine_type', None,
'Machine type to use for the wrk2 client if different '
'from nginx server machine type.')
flags.DEFINE_string('nginx_server_machine_type', None,
'Machine type to use for the nginx server if different '
'from wrk2 client machine type.')
def _ValidateLoadConfigs(load_configs):
"""Validate that each load config has all required values."""
if not load_configs:
return False
for config in load_configs:
config_values = config.split(':')
if len(config_values) != 4:
return False
for value in config_values:
if not (value.isdigit() and int(value) > 0):
return False
return True
flags.register_validator(
'nginx_load_configs', _ValidateLoadConfigs,
'Malformed load config. ' + _FLAG_FORMAT_DESCRIPTION)
BENCHMARK_NAME = 'nginx'
BENCHMARK_CONFIG = """
nginx:
description: Benchmarks Nginx server performance.
vm_groups:
clients:
vm_spec: *default_single_core
vm_count: null
server:
vm_spec: *default_dual_core
"""
def GetConfig(user_config):
"""Load and return benchmark config.
Args:
user_config: user supplied configuration (flags and config file)
Returns:
loaded benchmark configuration
"""
config = configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
if FLAGS.nginx_client_machine_type:
vm_spec = config['vm_groups']['clients']['vm_spec']
vm_spec[FLAGS.cloud]['machine_type'] = FLAGS.nginx_client_machine_type
if FLAGS.nginx_server_machine_type:
vm_spec = config['vm_groups']['server']['vm_spec']
vm_spec[FLAGS.cloud]['machine_type'] = FLAGS.nginx_server_machine_type
return config
def _ConfigureNginx(server):
content_path = '/var/www/html/random_content'
server.RemoteCommand('sudo mkdir -p /var/www/html') # create folder if needed
server.RemoteCommand('sudo dd bs=1 count=%s if=/dev/urandom of=%s' %
(FLAGS.nginx_content_size, content_path))
if FLAGS.nginx_conf:
server.PushDataFile(FLAGS.nginx_conf)
server.RemoteCommand('sudo cp %s /etc/nginx/nginx.conf' % FLAGS.nginx_conf)
server.RemoteCommand('sudo service nginx restart')
def Prepare(benchmark_spec):
"""Install Nginx on the server and a load generator on the clients.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
clients = benchmark_spec.vm_groups['clients']
server = benchmark_spec.vm_groups['server'][0]
server.Install('nginx')
_ConfigureNginx(server)
vm_util.RunThreaded(lambda vm: vm.Install('wrk2'), clients)
def _RunMultiClient(clients, target, rate, connections, duration, threads):
"""Run multiple instances of wrk2 against a single target."""
results = []
num_clients = len(clients)
def _RunSingleClient(client, client_number):
"""Run wrk2 from a single client."""
client_results = list(wrk2.Run(
client, target, rate, connections=connections,
duration=duration, threads=threads))
for result in client_results:
result.metadata.update({'client_number': client_number})
results.extend(client_results)
args = [((client, i), {}) for i, client in enumerate(clients)]
vm_util.RunThreaded(_RunSingleClient, args)
requests = 0
errors = 0
max_latency = 0.0
# TODO(ehankland): Since wrk2 keeps an HDR histogram of latencies, we should
# be able to merge them and compute aggregate percentiles.
for result in results:
if result.metric == 'requests':
requests += result.value
elif result.metric == 'errors':
errors += result.value
elif result.metric == 'p100 latency':
max_latency = max(max_latency, result.value)
error_rate = errors / requests
metadata = {
'connections': connections * num_clients,
'threads': threads * num_clients,
'duration': duration,
'target_rate': rate * num_clients,
'nginx_throttle': FLAGS.nginx_throttle,
}
results += [
sample.Sample('achieved_rate', requests / duration, '', metadata),
sample.Sample('aggregate requests', requests, '', metadata),
sample.Sample('aggregate errors', errors, '', metadata),
sample.Sample('aggregate error_rate', error_rate, '', metadata),
sample.Sample('aggregate p100 latency', max_latency, '', metadata)
]
return results
def Run(benchmark_spec):
"""Run a benchmark against the Nginx server.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
Returns:
A list of sample.Sample objects.
"""
clients = benchmark_spec.vm_groups['clients']
server = benchmark_spec.vm_groups['server'][0]
results = []
target = 'http://%s/random_content' % server.internal_ip
if FLAGS.nginx_throttle:
return _RunMultiClient(
clients,
target,
rate=1000000, # 1M aggregate requests/sec should max out requests.
connections=clients[0].NumCpusForBenchmark() * 10,
duration=60,
threads=clients[0].NumCpusForBenchmark())
for config in FLAGS.nginx_load_configs:
rate, duration, threads, connections = list(map(int, config.split(':')))
results += _RunMultiClient(clients, target, rate,
connections, duration, threads)
return results
def Cleanup(benchmark_spec):
"""Cleanup Nginx and load generators.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
del benchmark_spec
|
import os
import time
import cloudpickle
import numpy as np
import spacy
import tensorflow as tf
from sklearn.preprocessing import LabelBinarizer
from tensorflow.python.keras import Sequential
from tensorflow.python.layers.core import Dense
from tensorflow.python.layers.core import Dropout
np.random.seed(1)
class TfIntentClassifier:
def __init__(self):
self.model = None
self.nlp = spacy.load('en')
self.label_encoder = LabelBinarizer()
self.graph = None
def train(self, X, y, models_dir=None, verbose=True):
"""
Train intent classifier for given training data
:param X:
:param y:
:param models_dir:
:param verbose:
:return:
"""
def create_model():
"""
Define and return tensorflow model.
"""
model = Sequential()
model.add(Dense(256, activation=tf.nn.relu,
input_shape=(vocab_size,)))
model.add(Dropout(0.2))
model.add(Dense(128, activation=tf.nn.relu))
model.add(Dropout(0.2))
model.add(Dense(num_labels, activation=tf.nn.softmax))
"""
tried:
loss functions => categorical_crossentropy, binary_crossentropy
optimizers => adam, rmsprop
"""
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model.summary()
return model
# spacy context vector size
vocab_size = 384
# create spacy doc vector matrix
x_train = np.array([list(self.nlp(x).vector) for x in X])
num_labels = len(set(y))
self.label_encoder.fit(y)
y_train = self.label_encoder.transform(y)
del self.model
tf.keras.backend.clear_session()
time.sleep(3)
self.model = create_model()
# start training
self.model.fit(x_train, y_train, shuffle=True, epochs=300, verbose=1)
if models_dir:
tf.keras.models.save_model(
self.model,
os.path.join(models_dir, "tf_intent_model.hd5")
)
if verbose:
print("TF Model written out to {}"
.format(os.path.join(models_dir, "tf_intent_model.hd5")))
cloudpickle.dump(self.label_encoder, open(
os.path.join(models_dir, "labels.pkl"), 'wb'))
if verbose:
print("Labels written out to {}"
.format(os.path.join(models_dir, "labels.pkl")))
def load(self, models_dir):
try:
del self.model
tf.keras.backend.clear_session()
self.model = tf.keras.models.load_model(
os.path.join(models_dir, "tf_intent_model.hd5"), compile=True)
self.graph = tf.get_default_graph()
print("Tf model loaded")
with open(os.path.join(models_dir, "labels.pkl"), 'rb') as f:
self.label_encoder = cloudpickle.load(f)
print("Labels model loaded")
except IOError:
return False
def predict(self, text):
"""
Predict class label for given model
:param text:
:return:
"""
return self.process(text)
def predict_proba(self, x):
"""Given a bow vector of an input text, predict most probable label.
Returns only the most likely label.
:param x: raw input text
:return: tuple of first, the most probable label and second,
its probability"""
x_predict = [self.nlp(x).vector]
with self.graph.as_default():
pred_result = self.model.predict(np.array([x_predict[0]]))
sorted_indices = np.fliplr(np.argsort(pred_result, axis=1))
return sorted_indices, pred_result[:, sorted_indices]
def process(self, x, return_type="intent", INTENT_RANKING_LENGTH=5):
"""Returns the most likely intent and
its probability for the input text."""
if not self.model:
print("no class")
intent = None
intent_ranking = []
else:
intents, probabilities = self.predict_proba(x)
intents = [self.label_encoder.classes_[intent]
for intent in intents.flatten()]
probabilities = probabilities.flatten()
if len(intents) > 0 and len(probabilities) > 0:
ranking = list(zip(list(intents), list(probabilities)))
ranking = ranking[:INTENT_RANKING_LENGTH]
intent = {"intent": intents[0],
"confidence": float("%.2f" % probabilities[0])}
intent_ranking = [{"intent": intent_name,
"confidence": float("%.2f" % score)}
for intent_name, score in ranking]
else:
intent = {"name": None, "confidence": 0.0}
intent_ranking = []
if return_type == "intent":
return intent
else:
return intent_ranking
|
from kombu.mixins import ConsumerMixin
from kombu.log import get_logger
from kombu.utils.functional import reprcall
from .queues import task_queues
logger = get_logger(__name__)
class Worker(ConsumerMixin):
def __init__(self, connection):
self.connection = connection
def get_consumers(self, Consumer, channel):
return [Consumer(queues=task_queues,
accept=['pickle', 'json'],
callbacks=[self.process_task])]
def process_task(self, body, message):
fun = body['fun']
args = body['args']
kwargs = body['kwargs']
logger.info('Got task: %s', reprcall(fun.__name__, args, kwargs))
try:
fun(*args, **kwargs)
except Exception as exc:
logger.error('task raised exception: %r', exc)
message.ack()
if __name__ == '__main__':
from kombu import Connection
from kombu.utils.debug import setup_logging
# setup root logger
setup_logging(loglevel='INFO', loggers=[''])
with Connection('amqp://guest:guest@localhost:5672//') as conn:
try:
worker = Worker(conn)
worker.run()
except KeyboardInterrupt:
print('bye bye')
|
import io
import pytest
import warnings
import socket
from time import monotonic
from unittest.mock import MagicMock, Mock, patch
from kombu import Connection
from kombu.compression import compress
from kombu.exceptions import ResourceError, ChannelError
from kombu.transport import virtual
from kombu.utils.uuid import uuid
PRINT_FQDN = 'builtins.print'
def client(**kwargs):
return Connection(transport='kombu.transport.virtual:Transport', **kwargs)
def memory_client():
return Connection(transport='memory')
def test_BrokerState():
s = virtual.BrokerState()
assert hasattr(s, 'exchanges')
t = virtual.BrokerState(exchanges=16)
assert t.exchanges == 16
class test_QoS:
def setup(self):
self.q = virtual.QoS(client().channel(), prefetch_count=10)
def teardown(self):
self.q._on_collect.cancel()
def test_constructor(self):
assert self.q.channel
assert self.q.prefetch_count
assert not self.q._delivered.restored
assert self.q._on_collect
def test_restore_visible__interface(self):
qos = virtual.QoS(client().channel())
qos.restore_visible()
def test_can_consume(self, stdouts):
stderr = io.StringIO()
_restored = []
class RestoreChannel(virtual.Channel):
do_restore = True
def _restore(self, message):
_restored.append(message)
assert self.q.can_consume()
for i in range(self.q.prefetch_count - 1):
self.q.append(i, uuid())
assert self.q.can_consume()
self.q.append(i + 1, uuid())
assert not self.q.can_consume()
tag1 = next(iter(self.q._delivered))
self.q.ack(tag1)
assert self.q.can_consume()
tag2 = uuid()
self.q.append(i + 2, tag2)
assert not self.q.can_consume()
self.q.reject(tag2)
assert self.q.can_consume()
self.q.channel = RestoreChannel(self.q.channel.connection)
tag3 = uuid()
self.q.append(i + 3, tag3)
self.q.reject(tag3, requeue=True)
self.q._flush()
assert self.q._delivered
assert not self.q._delivered.restored
self.q.restore_unacked_once(stderr=stderr)
assert _restored == [11, 9, 8, 7, 6, 5, 4, 3, 2, 1]
assert self.q._delivered.restored
assert not self.q._delivered
self.q.restore_unacked_once(stderr=stderr)
self.q._delivered.restored = False
self.q.restore_unacked_once(stderr=stderr)
assert stderr.getvalue()
assert not stdouts.stdout.getvalue()
self.q.restore_at_shutdown = False
self.q.restore_unacked_once()
def test_get(self):
self.q._delivered['foo'] = 1
assert self.q.get('foo') == 1
class test_Message:
def test_create(self):
c = client().channel()
data = c.prepare_message('the quick brown fox...')
tag = data['properties']['delivery_tag'] = uuid()
message = c.message_to_python(data)
assert isinstance(message, virtual.Message)
assert message is c.message_to_python(message)
if message.errors:
message._reraise_error()
assert message.body == b'the quick brown fox...'
assert message.delivery_tag, tag
def test_create_no_body(self):
virtual.Message(channel=Mock(), payload={
'body': None,
'properties': {'delivery_tag': 1},
})
def test_serializable(self):
c = client().channel()
body, content_type = compress('the quick brown fox...', 'gzip')
data = c.prepare_message(body, headers={'compression': content_type})
tag = data['properties']['delivery_tag'] = uuid()
message = c.message_to_python(data)
dict_ = message.serializable()
assert dict_['body'] == b'the quick brown fox...'
assert dict_['properties']['delivery_tag'] == tag
assert 'compression' not in dict_['headers']
class test_AbstractChannel:
def test_get(self):
with pytest.raises(NotImplementedError):
virtual.AbstractChannel()._get('queue')
def test_put(self):
with pytest.raises(NotImplementedError):
virtual.AbstractChannel()._put('queue', 'm')
def test_size(self):
assert virtual.AbstractChannel()._size('queue') == 0
def test_purge(self):
with pytest.raises(NotImplementedError):
virtual.AbstractChannel()._purge('queue')
def test_delete(self):
with pytest.raises(NotImplementedError):
virtual.AbstractChannel()._delete('queue')
def test_new_queue(self):
assert virtual.AbstractChannel()._new_queue('queue') is None
def test_has_queue(self):
assert virtual.AbstractChannel()._has_queue('queue')
def test_poll(self):
cycle = Mock(name='cycle')
assert virtual.AbstractChannel()._poll(cycle, Mock())
cycle.get.assert_called()
class test_Channel:
def setup(self):
self.channel = client().channel()
def teardown(self):
if self.channel._qos is not None:
self.channel._qos._on_collect.cancel()
def test_exceeds_channel_max(self):
c = client()
t = c.transport
avail = t._avail_channel_ids = Mock(name='_avail_channel_ids')
avail.pop.side_effect = IndexError()
with pytest.raises(ResourceError):
virtual.Channel(t)
def test_exchange_bind_interface(self):
with pytest.raises(NotImplementedError):
self.channel.exchange_bind('dest', 'src', 'key')
def test_exchange_unbind_interface(self):
with pytest.raises(NotImplementedError):
self.channel.exchange_unbind('dest', 'src', 'key')
def test_queue_unbind_interface(self):
self.channel.queue_unbind('dest', 'ex', 'key')
def test_management(self):
m = self.channel.connection.client.get_manager()
assert m
m.get_bindings()
m.close()
def test_exchange_declare(self):
c = self.channel
with pytest.raises(ChannelError):
c.exchange_declare('test_exchange_declare', 'direct',
durable=True, auto_delete=True, passive=True)
c.exchange_declare('test_exchange_declare', 'direct',
durable=True, auto_delete=True)
c.exchange_declare('test_exchange_declare', 'direct',
durable=True, auto_delete=True, passive=True)
assert 'test_exchange_declare' in c.state.exchanges
# can declare again with same values
c.exchange_declare('test_exchange_declare', 'direct',
durable=True, auto_delete=True)
assert 'test_exchange_declare' in c.state.exchanges
# using different values raises NotEquivalentError
with pytest.raises(virtual.NotEquivalentError):
c.exchange_declare('test_exchange_declare', 'direct',
durable=False, auto_delete=True)
def test_exchange_delete(self, ex='test_exchange_delete'):
class PurgeChannel(virtual.Channel):
purged = []
def _purge(self, queue):
self.purged.append(queue)
c = PurgeChannel(self.channel.connection)
c.exchange_declare(ex, 'direct', durable=True, auto_delete=True)
assert ex in c.state.exchanges
assert not c.state.has_binding(ex, ex, ex) # no bindings yet
c.exchange_delete(ex)
assert ex not in c.state.exchanges
c.exchange_declare(ex, 'direct', durable=True, auto_delete=True)
c.queue_declare(ex)
c.queue_bind(ex, ex, ex)
assert c.state.has_binding(ex, ex, ex)
c.exchange_delete(ex)
assert not c.state.has_binding(ex, ex, ex)
assert ex in c.purged
def test_queue_delete__if_empty(self, n='test_queue_delete__if_empty'):
class PurgeChannel(virtual.Channel):
purged = []
size = 30
def _purge(self, queue):
self.purged.append(queue)
def _size(self, queue):
return self.size
c = PurgeChannel(self.channel.connection)
c.exchange_declare(n)
c.queue_declare(n)
c.queue_bind(n, n, n)
# tests code path that returns if queue already bound.
c.queue_bind(n, n, n)
c.queue_delete(n, if_empty=True)
assert c.state.has_binding(n, n, n)
c.size = 0
c.queue_delete(n, if_empty=True)
assert not c.state.has_binding(n, n, n)
assert n in c.purged
def test_queue_purge(self, n='test_queue_purge'):
class PurgeChannel(virtual.Channel):
purged = []
def _purge(self, queue):
self.purged.append(queue)
c = PurgeChannel(self.channel.connection)
c.exchange_declare(n)
c.queue_declare(n)
c.queue_bind(n, n, n)
c.queue_purge(n)
assert n in c.purged
def test_basic_publish__anon_exchange(self):
c = memory_client().channel()
msg = MagicMock(name='msg')
c.encode_body = Mock(name='c.encode_body')
c.encode_body.return_value = (1, 2)
c._put = Mock(name='c._put')
c.basic_publish(msg, None, 'rkey', kw=1)
c._put.assert_called_with('rkey', msg, kw=1)
def test_basic_publish_unique_delivery_tags(self, n='test_uniq_tag'):
c1 = memory_client().channel()
c2 = memory_client().channel()
for c in (c1, c2):
c.exchange_declare(n)
c.queue_declare(n)
c.queue_bind(n, n, n)
m1 = c1.prepare_message('George Costanza')
m2 = c2.prepare_message('Elaine Marie Benes')
c1.basic_publish(m1, n, n)
c2.basic_publish(m2, n, n)
r1 = c1.message_to_python(c1.basic_get(n))
r2 = c2.message_to_python(c2.basic_get(n))
assert r1.delivery_tag != r2.delivery_tag
with pytest.raises(ValueError):
int(r1.delivery_tag)
with pytest.raises(ValueError):
int(r2.delivery_tag)
def test_basic_publish__get__consume__restore(self,
n='test_basic_publish'):
c = memory_client().channel()
c.exchange_declare(n)
c.queue_declare(n)
c.queue_bind(n, n, n)
c.queue_declare(n + '2')
c.queue_bind(n + '2', n, n)
messages = []
c.connection._deliver = Mock(name='_deliver')
def on_deliver(message, queue):
messages.append(message)
c.connection._deliver.side_effect = on_deliver
m = c.prepare_message('nthex quick brown fox...')
c.basic_publish(m, n, n)
r1 = c.message_to_python(c.basic_get(n))
assert r1
assert r1.body == b'nthex quick brown fox...'
assert c.basic_get(n) is None
consumer_tag = uuid()
c.basic_consume(n + '2', False,
consumer_tag=consumer_tag, callback=lambda *a: None)
assert n + '2' in c._active_queues
c.drain_events()
r2 = c.message_to_python(messages[-1])
assert r2.body == b'nthex quick brown fox...'
assert r2.delivery_info['exchange'] == n
assert r2.delivery_info['routing_key'] == n
with pytest.raises(virtual.Empty):
c.drain_events()
c.basic_cancel(consumer_tag)
c._restore(r2)
r3 = c.message_to_python(c.basic_get(n))
assert r3
assert r3.body == b'nthex quick brown fox...'
assert c.basic_get(n) is None
def test_basic_ack(self):
class MockQoS(virtual.QoS):
was_acked = False
def ack(self, delivery_tag):
self.was_acked = True
self.channel._qos = MockQoS(self.channel)
self.channel.basic_ack('foo')
assert self.channel._qos.was_acked
def test_basic_recover__requeue(self):
class MockQoS(virtual.QoS):
was_restored = False
def restore_unacked(self):
self.was_restored = True
self.channel._qos = MockQoS(self.channel)
self.channel.basic_recover(requeue=True)
assert self.channel._qos.was_restored
def test_restore_unacked_raises_BaseException(self):
q = self.channel.qos
q._flush = Mock()
q._delivered = {1: 1}
q.channel._restore = Mock()
q.channel._restore.side_effect = SystemExit
errors = q.restore_unacked()
assert isinstance(errors[0][0], SystemExit)
assert errors[0][1] == 1
assert not q._delivered
@patch('kombu.transport.virtual.base.emergency_dump_state')
@patch(PRINT_FQDN)
def test_restore_unacked_once_when_unrestored(self, print_,
emergency_dump_state):
q = self.channel.qos
q._flush = Mock()
class State(dict):
restored = False
q._delivered = State({1: 1})
ru = q.restore_unacked = Mock()
exc = None
try:
raise KeyError()
except KeyError as exc_:
exc = exc_
ru.return_value = [(exc, 1)]
self.channel.do_restore = True
q.restore_unacked_once()
print_.assert_called()
emergency_dump_state.assert_called()
def test_basic_recover(self):
with pytest.raises(NotImplementedError):
self.channel.basic_recover(requeue=False)
def test_basic_reject(self):
class MockQoS(virtual.QoS):
was_rejected = False
def reject(self, delivery_tag, requeue=False):
self.was_rejected = True
self.channel._qos = MockQoS(self.channel)
self.channel.basic_reject('foo')
assert self.channel._qos.was_rejected
def test_basic_qos(self):
self.channel.basic_qos(prefetch_count=128)
assert self.channel._qos.prefetch_count == 128
def test_lookup__undeliverable(self, n='test_lookup__undeliverable'):
warnings.resetwarnings()
with warnings.catch_warnings(record=True) as log:
assert self.channel._lookup(n, n, 'ae.undeliver') == [
'ae.undeliver',
]
assert log
assert 'could not be delivered' in log[0].message.args[0]
def test_context(self):
x = self.channel.__enter__()
assert x is self.channel
x.__exit__()
assert x.closed
def test_cycle_property(self):
assert self.channel.cycle
def test_flow(self):
with pytest.raises(NotImplementedError):
self.channel.flow(False)
def test_close_when_no_connection(self):
self.channel.connection = None
self.channel.close()
assert self.channel.closed
def test_drain_events_has_get_many(self):
c = self.channel
c._get_many = Mock()
c._poll = Mock()
c._consumers = [1]
c._qos = Mock()
c._qos.can_consume.return_value = True
c.drain_events(timeout=10.0)
c._get_many.assert_called_with(c._active_queues, timeout=10.0)
def test_get_exchanges(self):
self.channel.exchange_declare(exchange='unique_name')
assert self.channel.get_exchanges()
def test_basic_cancel_not_in_active_queues(self):
c = self.channel
c._consumers.add('x')
c._tag_to_queue['x'] = 'foo'
c._active_queues = Mock()
c._active_queues.remove.side_effect = ValueError()
c.basic_cancel('x')
c._active_queues.remove.assert_called_with('foo')
def test_basic_cancel_unknown_ctag(self):
assert self.channel.basic_cancel('unknown-tag') is None
def test_list_bindings(self):
c = self.channel
c.exchange_declare(exchange='unique_name')
c.queue_declare(queue='q')
c.queue_bind(queue='q', exchange='unique_name', routing_key='rk')
assert ('q', 'unique_name', 'rk') in list(c.list_bindings())
def test_after_reply_message_received(self):
c = self.channel
c.queue_delete = Mock()
c.after_reply_message_received('foo')
c.queue_delete.assert_called_with('foo')
def test_queue_delete_unknown_queue(self):
assert self.channel.queue_delete('xiwjqjwel') is None
def test_queue_declare_passive(self):
has_queue = self.channel._has_queue = Mock()
has_queue.return_value = False
with pytest.raises(ChannelError):
self.channel.queue_declare(queue='21wisdjwqe', passive=True)
def test_get_message_priority(self):
def _message(priority):
return self.channel.prepare_message(
'the message with priority', priority=priority,
)
assert self.channel._get_message_priority(_message(5)) == 5
assert self.channel._get_message_priority(
_message(self.channel.min_priority - 10)
) == self.channel.min_priority
assert self.channel._get_message_priority(
_message(self.channel.max_priority + 10),
) == self.channel.max_priority
assert self.channel._get_message_priority(
_message('foobar'),
) == self.channel.default_priority
assert self.channel._get_message_priority(
_message(2), reverse=True,
) == self.channel.max_priority - 2
class test_Transport:
def setup(self):
self.transport = client().transport
def test_custom_polling_interval(self):
x = client(transport_options={'polling_interval': 32.3})
assert x.transport.polling_interval == 32.3
def test_timeout_over_polling_interval(self):
x = client(transport_options=dict(polling_interval=60))
start = monotonic()
with pytest.raises(socket.timeout):
x.transport.drain_events(x, timeout=.5)
assert monotonic() - start < 60
def test_close_connection(self):
c1 = self.transport.create_channel(self.transport)
c2 = self.transport.create_channel(self.transport)
assert len(self.transport.channels) == 2
self.transport.close_connection(self.transport)
assert not self.transport.channels
del(c1) # so pyflakes doesn't complain
del(c2)
def test_drain_channel(self):
channel = self.transport.create_channel(self.transport)
with pytest.raises(virtual.Empty):
self.transport._drain_channel(channel, Mock())
def test__deliver__no_queue(self):
with pytest.raises(KeyError):
self.transport._deliver(Mock(name='msg'), queue=None)
def test__reject_inbound_message(self):
channel = Mock(name='channel')
self.transport.channels = [None, channel]
self.transport._reject_inbound_message({'foo': 'bar'})
channel.Message.assert_called_with({'foo': 'bar'}, channel=channel)
channel.qos.append.assert_called_with(
channel.Message(), channel.Message().delivery_tag,
)
channel.basic_reject.assert_called_with(
channel.Message().delivery_tag, requeue=True,
)
def test_on_message_ready(self):
channel = Mock(name='channel')
msg = Mock(name='msg')
callback = Mock(name='callback')
self.transport._callbacks = {'q1': callback}
self.transport.on_message_ready(channel, msg, queue='q1')
callback.assert_called_with(msg)
def test_on_message_ready__no_queue(self):
with pytest.raises(KeyError):
self.transport.on_message_ready(
Mock(name='channel'), Mock(name='msg'), queue=None)
def test_on_message_ready__no_callback(self):
self.transport._callbacks = {}
with pytest.raises(KeyError):
self.transport.on_message_ready(
Mock(name='channel'), Mock(name='msg'), queue='q1')
|
import os.path
import cherrypy
class Page:
# Store the page title in a class attribute
title = 'Untitled Page'
def header(self):
return '''
<html>
<head>
<title>%s</title>
<head>
<body>
<h2>%s</h2>
''' % (self.title, self.title)
def footer(self):
return '''
</body>
</html>
'''
# Note that header and footer don't get their exposed attributes
# set to True. This isn't necessary since the user isn't supposed
# to call header or footer directly; instead, we'll call them from
# within the actually exposed handler methods defined in this
# class' subclasses.
class HomePage(Page):
# Different title for this page
title = 'Tutorial 5'
def __init__(self):
# create a subpage
self.another = AnotherPage()
@cherrypy.expose
def index(self):
# Note that we call the header and footer methods inherited
# from the Page class!
return self.header() + '''
<p>
Isn't this exciting? There's
<a href="./another/">another page</a>, too!
</p>
''' + self.footer()
class AnotherPage(Page):
title = 'Another Page'
@cherrypy.expose
def index(self):
return self.header() + '''
<p>
And this is the amazing second page!
</p>
''' + self.footer()
tutconf = os.path.join(os.path.dirname(__file__), 'tutorial.conf')
if __name__ == '__main__':
# CherryPy always starts with app.root when trying to map request URIs
# to objects, so we need to mount a request handler root. A request
# to '/' will be mapped to HelloWorld().index().
cherrypy.quickstart(HomePage(), config=tutconf)
|
from simplipy import API
from simplipy.errors import (
InvalidCredentialsError,
PendingAuthorizationError,
SimplipyError,
)
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import CONF_CODE, CONF_PASSWORD, CONF_TOKEN, CONF_USERNAME
from homeassistant.core import callback
from homeassistant.helpers import aiohttp_client
from . import async_get_client_id
from .const import DOMAIN, LOGGER # pylint: disable=unused-import
class SimpliSafeFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a SimpliSafe config flow."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_POLL
def __init__(self):
"""Initialize the config flow."""
self.full_data_schema = vol.Schema(
{
vol.Required(CONF_USERNAME): str,
vol.Required(CONF_PASSWORD): str,
vol.Optional(CONF_CODE): str,
}
)
self.password_data_schema = vol.Schema({vol.Required(CONF_PASSWORD): str})
self._code = None
self._password = None
self._username = None
@staticmethod
@callback
def async_get_options_flow(config_entry):
"""Define the config flow to handle options."""
return SimpliSafeOptionsFlowHandler(config_entry)
async def _async_get_simplisafe_api(self):
"""Get an authenticated SimpliSafe API client."""
client_id = await async_get_client_id(self.hass)
websession = aiohttp_client.async_get_clientsession(self.hass)
return await API.login_via_credentials(
self._username,
self._password,
client_id=client_id,
session=websession,
)
async def _async_login_during_step(self, *, step_id, form_schema):
"""Attempt to log into the API from within a config flow step."""
errors = {}
try:
simplisafe = await self._async_get_simplisafe_api()
except PendingAuthorizationError:
LOGGER.info("Awaiting confirmation of MFA email click")
return await self.async_step_mfa()
except InvalidCredentialsError:
errors = {"base": "invalid_auth"}
except SimplipyError as err:
LOGGER.error("Unknown error while logging into SimpliSafe: %s", err)
errors = {"base": "unknown"}
if errors:
return self.async_show_form(
step_id=step_id,
data_schema=form_schema,
errors=errors,
)
return await self.async_step_finish(
{
CONF_USERNAME: self._username,
CONF_TOKEN: simplisafe.refresh_token,
CONF_CODE: self._code,
}
)
async def async_step_finish(self, user_input=None):
"""Handle finish config entry setup."""
existing_entry = await self.async_set_unique_id(self._username)
if existing_entry:
self.hass.config_entries.async_update_entry(existing_entry, data=user_input)
return self.async_abort(reason="reauth_successful")
return self.async_create_entry(title=self._username, data=user_input)
async def async_step_mfa(self, user_input=None):
"""Handle multi-factor auth confirmation."""
if user_input is None:
return self.async_show_form(step_id="mfa")
try:
simplisafe = await self._async_get_simplisafe_api()
except PendingAuthorizationError:
LOGGER.error("Still awaiting confirmation of MFA email click")
return self.async_show_form(
step_id="mfa", errors={"base": "still_awaiting_mfa"}
)
return await self.async_step_finish(
{
CONF_USERNAME: self._username,
CONF_TOKEN: simplisafe.refresh_token,
CONF_CODE: self._code,
}
)
async def async_step_reauth(self, config):
"""Handle configuration by re-auth."""
self._code = config.get(CONF_CODE)
self._username = config[CONF_USERNAME]
return await self.async_step_reauth_confirm()
async def async_step_reauth_confirm(self, user_input=None):
"""Handle re-auth completion."""
if not user_input:
return self.async_show_form(
step_id="reauth_confirm", data_schema=self.password_data_schema
)
self._password = user_input[CONF_PASSWORD]
return await self._async_login_during_step(
step_id="reauth_confirm", form_schema=self.password_data_schema
)
async def async_step_user(self, user_input=None):
"""Handle the start of the config flow."""
if not user_input:
return self.async_show_form(
step_id="user", data_schema=self.full_data_schema
)
await self.async_set_unique_id(user_input[CONF_USERNAME])
self._abort_if_unique_id_configured()
self._code = user_input.get(CONF_CODE)
self._password = user_input[CONF_PASSWORD]
self._username = user_input[CONF_USERNAME]
return await self._async_login_during_step(
step_id="user", form_schema=self.full_data_schema
)
class SimpliSafeOptionsFlowHandler(config_entries.OptionsFlow):
"""Handle a SimpliSafe options flow."""
def __init__(self, config_entry):
"""Initialize."""
self.config_entry = config_entry
async def async_step_init(self, user_input=None):
"""Manage the options."""
if user_input is not None:
return self.async_create_entry(title="", data=user_input)
return self.async_show_form(
step_id="init",
data_schema=vol.Schema(
{
vol.Optional(
CONF_CODE,
default=self.config_entry.options.get(CONF_CODE),
): str
}
),
)
|
import gettext
import os
import re
import sys
from io import BytesIO
from django.conf import settings
from django.core.exceptions import ValidationError
from django.core.validators import validate_email as validate_email_django
from django.utils.translation import gettext as _
from PIL import Image
from weblate.trans.util import cleanup_path
USERNAME_MATCHER = re.compile(r"^[\w@+-][\w.@+-]*$")
# Reject some suspicious e-mail addresses, based on checks enforced by Exim MTA
EMAIL_BLACKLIST = re.compile(r"^([./|]|.*([@%!`#&?]|/\.\./))")
# Matches Git condition on "name consists only of disallowed characters"
CRUD_RE = re.compile(r"^[.,;:<>\"'\\]+$")
ALLOWED_IMAGES = {"image/jpeg", "image/png", "image/apng", "image/gif"}
# File formats we do not accept on translation/glossary upload
FORBIDDEN_EXTENSIONS = {
".png",
".jpg",
".gif",
".svg",
".doc",
".rtf",
".xls",
".docx",
".py",
".js",
".exe",
".dll",
".zip",
}
def validate_re(value, groups=None, allow_empty=True):
try:
compiled = re.compile(value)
except re.error as error:
raise ValidationError(_("Compilation failed: {0}").format(error))
if not allow_empty and compiled.match(""):
raise ValidationError(
_("The regular expression can not match an empty string.")
)
if not groups:
return
for group in groups:
if group not in compiled.groupindex:
raise ValidationError(
_(
'Regular expression is missing named group "{0}", '
"the simplest way to define it is {1}."
).format(group, f"(?P<{group}>.*)")
)
def validate_re_nonempty(value):
return validate_re(value, allow_empty=False)
def validate_bitmap(value):
"""Validate bitmap, based on django.forms.fields.ImageField."""
if value is None:
return
# Ensure we have image object and content type
# Pretty much copy from django.forms.fields.ImageField:
# We need to get a file object for Pillow. We might have a path or we
# might have to read the data into memory.
if hasattr(value, "temporary_file_path"):
content = value.temporary_file_path()
else:
if hasattr(value, "read"):
content = BytesIO(value.read())
else:
content = BytesIO(value["content"])
try:
# load() could spot a truncated JPEG, but it loads the entire
# image in memory, which is a DoS vector. See #3848 and #18520.
image = Image.open(content)
# verify() must be called immediately after the constructor.
image.verify()
# Pillow doesn't detect the MIME type of all formats. In those
# cases, content_type will be None.
value.file.content_type = Image.MIME.get(image.format)
except Exception:
# Pillow doesn't recognize it as an image.
raise ValidationError(_("Invalid image!"), code="invalid_image").with_traceback(
sys.exc_info()[2]
)
if hasattr(value.file, "seek") and callable(value.file.seek):
value.file.seek(0)
# Check image type
if value.file.content_type not in ALLOWED_IMAGES:
image.close()
raise ValidationError(_("Unsupported image type: %s") % value.file.content_type)
# Check dimensions
width, height = image.size
if width > 2000 or height > 2000:
image.close()
raise ValidationError(_("The image is too big, please crop or scale it down."))
image.close()
def clean_fullname(val):
"""Remove special characters from user full name."""
if not val:
return val
val = val.strip()
for i in range(0x20):
val = val.replace(chr(i), "")
return val
def validate_fullname(val):
if val != clean_fullname(val):
raise ValidationError(
_("Please avoid using special characters in the full name.")
)
# Validates full name that would be rejected by Git
if CRUD_RE.match(val):
raise ValidationError(_("Name consists only of disallowed characters."))
return val
def validate_file_extension(value):
"""Simple extension based validation for uploads."""
ext = os.path.splitext(value.name)[1]
if ext.lower() in FORBIDDEN_EXTENSIONS:
raise ValidationError(_("Unsupported file format."))
return value
def validate_username(value):
if value.startswith("."):
raise ValidationError(_("The username can not start with a full stop."))
if not USERNAME_MATCHER.match(value):
raise ValidationError(
_(
"Username may only contain letters, "
"numbers or the following characters: @ . + - _"
)
)
def validate_email(value):
try:
validate_email_django(value)
except ValidationError:
raise ValidationError(_("Enter a valid e-mail address."))
user_part = value.rsplit("@", 1)[0]
if EMAIL_BLACKLIST.match(user_part):
raise ValidationError(_("Enter a valid e-mail address."))
if not re.match(settings.REGISTRATION_EMAIL_MATCH, value):
raise ValidationError(_("This e-mail address is disallowed."))
def validate_plural_formula(value):
try:
gettext.c2py(value if value else "0")
except ValueError as error:
raise ValidationError(_("Could not evaluate plural formula: {}").format(error))
def validate_filename(value):
if "../" in value or "..\\" in value:
raise ValidationError(
_("The filename can not contain reference to a parent directory.")
)
if os.path.isabs(value):
raise ValidationError(_("The filename can not be an absolute path."))
cleaned = cleanup_path(value)
if value != cleaned:
raise ValidationError(
_(
"The filename should be as simple as possible. "
"Maybe you want to use: {}"
).format(cleaned)
)
def validate_slug(value):
"""Prohibits some special values."""
# This one is used as wildcard in the URL for widgets and translate pages
if value == "-":
raise ValidationError(_("This name is prohibited"))
def validate_language_aliases(value):
"""Validates language aliases - comma separated semi colon values."""
if not value:
return
for part in value.split(","):
if part.count(":") != 1:
raise ValidationError(_("Syntax error in language aliases."))
|
import logging
import os
import pytest
from qutebrowser.browser.webengine import spell
from qutebrowser.utils import usertypes
def test_version(message_mock, caplog):
"""Tests parsing dictionary version from its file name."""
assert spell.version('en-US-8-0.bdic') == (8, 0)
assert spell.version('pl-PL-3-0.bdic') == (3, 0)
with caplog.at_level(logging.WARNING):
assert spell.version('malformed_filename') is None
msg = message_mock.getmsg(usertypes.MessageLevel.warning)
expected = ("Found a dictionary with a malformed name: malformed_filename")
assert msg.text == expected
def test_local_filename_dictionary_does_not_exist(monkeypatch):
"""Tests retrieving local filename when the dir doesn't exits."""
monkeypatch.setattr(
spell, 'dictionary_dir', lambda: '/some-non-existing-dir')
assert not spell.local_filename('en-US')
def test_local_filename_dictionary_not_installed(tmpdir, monkeypatch):
"""Tests retrieving local filename when the dict not installed."""
monkeypatch.setattr(spell, 'dictionary_dir', lambda: str(tmpdir))
assert not spell.local_filename('en-US')
def test_local_filename_not_installed_malformed(tmpdir, monkeypatch, caplog):
"""Tests retrieving local filename when the only file is malformed."""
monkeypatch.setattr(spell, 'dictionary_dir', lambda: str(tmpdir))
(tmpdir / 'en-US.bdic').ensure()
with caplog.at_level(logging.WARNING):
assert not spell.local_filename('en-US')
def test_local_filename_dictionary_installed(tmpdir, monkeypatch):
"""Tests retrieving local filename when the dict installed."""
monkeypatch.setattr(spell, 'dictionary_dir', lambda: str(tmpdir))
for lang_file in ['en-US-11-0.bdic', 'en-US-7-1.bdic', 'pl-PL-3-0.bdic']:
(tmpdir / lang_file).ensure()
assert spell.local_filename('en-US') == 'en-US-11-0.bdic'
assert spell.local_filename('pl-PL') == 'pl-PL-3-0.bdic'
def test_local_filename_installed_malformed(tmpdir, monkeypatch, caplog):
"""Tests retrieving local filename when the dict installed.
In this usecase, another existing file is malformed."""
monkeypatch.setattr(spell, 'dictionary_dir', lambda: str(tmpdir))
for lang_file in ['en-US-11-0.bdic', 'en-US-7-1.bdic', 'en-US.bdic']:
(tmpdir / lang_file).ensure()
with caplog.at_level(logging.WARNING):
assert spell.local_filename('en-US') == 'en-US-11-0.bdic'
class TestInit:
ENV = 'QTWEBENGINE_DICTIONARIES_PATH'
@pytest.fixture(autouse=True)
def remove_envvar(self, monkeypatch):
monkeypatch.delenv(self.ENV, raising=False)
@pytest.fixture
def dict_dir(self, data_tmpdir):
return data_tmpdir / 'qtwebengine_dictionaries'
def test_init(self, dict_dir):
spell.init()
assert os.environ[self.ENV] == str(dict_dir)
|
from abc import ABC, abstractmethod
from typing import List, Tuple, Optional
import discord
from redbot.core import Config, commands
from redbot.core.bot import Red
class MixinMeta(ABC):
"""
Base class for well behaved type hint detection with composite class.
Basically, to keep developers sane when not all attributes are defined in each mixin.
"""
def __init__(self, *_args):
self.config: Config
self.bot: Red
self.cache: dict
@staticmethod
@abstractmethod
async def _voice_perm_check(
ctx: commands.Context, user_voice_state: Optional[discord.VoiceState], **perms: bool
) -> bool:
raise NotImplementedError()
|
import json
import urllib
import xmlrpc.client
from .util import read_body
import logging
log = logging.getLogger(__name__)
def method(r1, r2):
assert r1.method == r2.method, "{} != {}".format(r1.method, r2.method)
def uri(r1, r2):
assert r1.uri == r2.uri, "{} != {}".format(r1.uri, r2.uri)
def host(r1, r2):
assert r1.host == r2.host, "{} != {}".format(r1.host, r2.host)
def scheme(r1, r2):
assert r1.scheme == r2.scheme, "{} != {}".format(r1.scheme, r2.scheme)
def port(r1, r2):
assert r1.port == r2.port, "{} != {}".format(r1.port, r2.port)
def path(r1, r2):
assert r1.path == r2.path, "{} != {}".format(r1.path, r2.path)
def query(r1, r2):
assert r1.query == r2.query, "{} != {}".format(r1.query, r2.query)
def raw_body(r1, r2):
assert read_body(r1) == read_body(r2)
def body(r1, r2):
transformer = _get_transformer(r1)
r2_transformer = _get_transformer(r2)
if transformer != r2_transformer:
transformer = _identity
assert transformer(read_body(r1)) == transformer(read_body(r2))
def headers(r1, r2):
assert r1.headers == r2.headers, "{} != {}".format(r1.headers, r2.headers)
def _header_checker(value, header="Content-Type"):
def checker(headers):
_header = headers.get(header, "")
if isinstance(_header, bytes):
_header = _header.decode("utf-8")
return value in _header.lower()
return checker
def _transform_json(body):
# Request body is always a byte string, but json.loads() wants a text
# string. RFC 7159 says the default encoding is UTF-8 (although UTF-16
# and UTF-32 are also allowed: hmmmmm).
if body:
return json.loads(body.decode("utf-8"))
_xml_header_checker = _header_checker("text/xml")
_xmlrpc_header_checker = _header_checker("xmlrpc", header="User-Agent")
_checker_transformer_pairs = (
(
_header_checker("application/x-www-form-urlencoded"),
lambda body: urllib.parse.parse_qs(body.decode("ascii")),
),
(_header_checker("application/json"), _transform_json),
(lambda request: _xml_header_checker(request) and _xmlrpc_header_checker(request), xmlrpc.client.loads),
)
def _identity(x):
return x
def _get_transformer(request):
for checker, transformer in _checker_transformer_pairs:
if checker(request.headers):
return transformer
else:
return _identity
def requests_match(r1, r2, matchers):
successes, failures = get_matchers_results(r1, r2, matchers)
if failures:
log.debug("Requests {} and {} differ.\n" "Failure details:\n" "{}".format(r1, r2, failures))
return len(failures) == 0
def _evaluate_matcher(matcher_function, *args):
"""
Evaluate the result of a given matcher as a boolean with an assertion error message if any.
It handles two types of matcher :
- a matcher returning a boolean value.
- a matcher that only makes an assert, returning None or raises an assertion error.
"""
assertion_message = None
try:
match = matcher_function(*args)
match = True if match is None else match
except AssertionError as e:
match = False
assertion_message = str(e)
return match, assertion_message
def get_matchers_results(r1, r2, matchers):
"""
Get the comparison results of two requests as two list.
The first returned list represents the matchers names that passed.
The second list is the failed matchers as a string with failed assertion details if any.
"""
matches_success, matches_fails = [], []
for m in matchers:
matcher_name = m.__name__
match, assertion_message = _evaluate_matcher(m, r1, r2)
if match:
matches_success.append(matcher_name)
else:
assertion_message = get_assertion_message(assertion_message)
matches_fails.append((matcher_name, assertion_message))
return matches_success, matches_fails
def get_assertion_message(assertion_details):
"""
Get a detailed message about the failing matcher.
"""
return assertion_details
|
import os
import tempfile
import unittest
from pysignalclirestapi import SignalCliRestApi
import requests_mock
import homeassistant.components.signal_messenger.notify as signalmessenger
from homeassistant.setup import async_setup_component
from tests.async_mock import patch
BASE_COMPONENT = "notify"
async def test_signal_messenger_init(hass):
"""Test that service loads successfully."""
config = {
BASE_COMPONENT: {
"name": "test",
"platform": "signal_messenger",
"url": "http://127.0.0.1:8080",
"number": "+43443434343",
"recipients": ["+435565656565"],
}
}
with patch("pysignalclirestapi.SignalCliRestApi.send_message", return_value=None):
assert await async_setup_component(hass, BASE_COMPONENT, config)
await hass.async_block_till_done()
# Test that service loads successfully
assert hass.services.has_service(BASE_COMPONENT, "test")
class TestSignalMesssenger(unittest.TestCase):
"""Test the signal_messenger notify."""
def setUp(self):
"""Set up things to be run when tests are started."""
recipients = ["+435565656565"]
number = "+43443434343"
client = SignalCliRestApi("http://127.0.0.1:8080", number)
self._signalmessenger = signalmessenger.SignalNotificationService(
recipients, client
)
@requests_mock.Mocker()
def test_send_message(self, mock):
"""Test send message."""
message = "Testing Signal Messenger platform :)"
mock.register_uri(
"POST",
"http://127.0.0.1:8080/v2/send",
status_code=201,
)
mock.register_uri(
"GET",
"http://127.0.0.1:8080/v1/about",
status_code=200,
json={"versions": ["v1", "v2"]},
)
with self.assertLogs(
"homeassistant.components.signal_messenger.notify", level="DEBUG"
) as context:
self._signalmessenger.send_message(message)
self.assertIn("Sending signal message", context.output[0])
self.assertTrue(mock.called)
self.assertEqual(mock.call_count, 2)
@requests_mock.Mocker()
def test_send_message_should_show_deprecation_warning(self, mock):
"""Test send message."""
message = "Testing Signal Messenger platform with attachment :)"
mock.register_uri(
"POST",
"http://127.0.0.1:8080/v2/send",
status_code=201,
)
mock.register_uri(
"GET",
"http://127.0.0.1:8080/v1/about",
status_code=200,
json={"versions": ["v1", "v2"]},
)
with self.assertLogs(
"homeassistant.components.signal_messenger.notify", level="WARNING"
) as context:
with tempfile.NamedTemporaryFile(
suffix=".png", prefix=os.path.basename(__file__)
) as tf:
data = {"data": {"attachment": tf.name}}
self._signalmessenger.send_message(message, **data)
self.assertIn(
"The 'attachment' option is deprecated, please replace it with 'attachments'. This option will become invalid in version 0.108",
context.output[0],
)
self.assertTrue(mock.called)
self.assertEqual(mock.call_count, 2)
@requests_mock.Mocker()
def test_send_message_with_attachment(self, mock):
"""Test send message."""
message = "Testing Signal Messenger platform :)"
mock.register_uri(
"POST",
"http://127.0.0.1:8080/v2/send",
status_code=201,
)
mock.register_uri(
"GET",
"http://127.0.0.1:8080/v1/about",
status_code=200,
json={"versions": ["v1", "v2"]},
)
with self.assertLogs(
"homeassistant.components.signal_messenger.notify", level="DEBUG"
) as context:
with tempfile.NamedTemporaryFile(
suffix=".png", prefix=os.path.basename(__file__)
) as tf:
data = {"data": {"attachments": [tf.name]}}
self._signalmessenger.send_message(message, **data)
self.assertIn("Sending signal message", context.output[0])
self.assertTrue(mock.called)
self.assertEqual(mock.call_count, 2)
|
import pytest
import vcr
from urllib.request import urlopen
def test_once_record_mode(tmpdir, httpbin):
testfile = str(tmpdir.join("recordmode.yml"))
with vcr.use_cassette(testfile, record_mode=vcr.mode.ONCE):
# cassette file doesn't exist, so create.
urlopen(httpbin.url).read()
with vcr.use_cassette(testfile, record_mode=vcr.mode.ONCE):
# make the same request again
urlopen(httpbin.url).read()
# the first time, it's played from the cassette.
# but, try to access something else from the same cassette, and an
# exception is raised.
with pytest.raises(Exception):
urlopen(httpbin.url + "/get").read()
def test_once_record_mode_two_times(tmpdir, httpbin):
testfile = str(tmpdir.join("recordmode.yml"))
with vcr.use_cassette(testfile, record_mode=vcr.mode.ONCE):
# get two of the same file
urlopen(httpbin.url).read()
urlopen(httpbin.url).read()
with vcr.use_cassette(testfile, record_mode=vcr.mode.ONCE):
# do it again
urlopen(httpbin.url).read()
urlopen(httpbin.url).read()
def test_once_mode_three_times(tmpdir, httpbin):
testfile = str(tmpdir.join("recordmode.yml"))
with vcr.use_cassette(testfile, record_mode=vcr.mode.ONCE):
# get three of the same file
urlopen(httpbin.url).read()
urlopen(httpbin.url).read()
urlopen(httpbin.url).read()
def test_new_episodes_record_mode(tmpdir, httpbin):
testfile = str(tmpdir.join("recordmode.yml"))
with vcr.use_cassette(testfile, record_mode=vcr.mode.NEW_EPISODES):
# cassette file doesn't exist, so create.
urlopen(httpbin.url).read()
with vcr.use_cassette(testfile, record_mode=vcr.mode.NEW_EPISODES) as cass:
# make the same request again
urlopen(httpbin.url).read()
# all responses have been played
assert cass.all_played
# in the "new_episodes" record mode, we can add more requests to
# a cassette without repurcussions.
urlopen(httpbin.url + "/get").read()
# one of the responses has been played
assert cass.play_count == 1
# not all responses have been played
assert not cass.all_played
with vcr.use_cassette(testfile, record_mode=vcr.mode.NEW_EPISODES) as cass:
# the cassette should now have 2 responses
assert len(cass.responses) == 2
def test_new_episodes_record_mode_two_times(tmpdir, httpbin):
testfile = str(tmpdir.join("recordmode.yml"))
url = httpbin.url + "/bytes/1024"
with vcr.use_cassette(testfile, record_mode=vcr.mode.NEW_EPISODES):
# cassette file doesn't exist, so create.
original_first_response = urlopen(url).read()
with vcr.use_cassette(testfile, record_mode=vcr.mode.NEW_EPISODES):
# make the same request again
assert urlopen(url).read() == original_first_response
# in the "new_episodes" record mode, we can add the same request
# to the cassette without repercussions
original_second_response = urlopen(url).read()
with vcr.use_cassette(testfile, record_mode=vcr.mode.ONCE):
# make the same request again
assert urlopen(url).read() == original_first_response
assert urlopen(url).read() == original_second_response
# now that we are back in once mode, this should raise
# an error.
with pytest.raises(Exception):
urlopen(url).read()
def test_all_record_mode(tmpdir, httpbin):
testfile = str(tmpdir.join("recordmode.yml"))
with vcr.use_cassette(testfile, record_mode=vcr.mode.ALL):
# cassette file doesn't exist, so create.
urlopen(httpbin.url).read()
with vcr.use_cassette(testfile, record_mode=vcr.mode.ALL) as cass:
# make the same request again
urlopen(httpbin.url).read()
# in the "all" record mode, we can add more requests to
# a cassette without repurcussions.
urlopen(httpbin.url + "/get").read()
# The cassette was never actually played, even though it existed.
# that's because, in "all" mode, the requests all go directly to
# the source and bypass the cassette.
assert cass.play_count == 0
def test_none_record_mode(tmpdir, httpbin):
# Cassette file doesn't exist, yet we are trying to make a request.
# raise hell.
testfile = str(tmpdir.join("recordmode.yml"))
with vcr.use_cassette(testfile, record_mode=vcr.mode.NONE):
with pytest.raises(Exception):
urlopen(httpbin.url).read()
def test_none_record_mode_with_existing_cassette(tmpdir, httpbin):
# create a cassette file
testfile = str(tmpdir.join("recordmode.yml"))
with vcr.use_cassette(testfile, record_mode=vcr.mode.ALL):
urlopen(httpbin.url).read()
# play from cassette file
with vcr.use_cassette(testfile, record_mode=vcr.mode.NONE) as cass:
urlopen(httpbin.url).read()
assert cass.play_count == 1
# but if I try to hit the net, raise an exception.
with pytest.raises(Exception):
urlopen(httpbin.url + "/get").read()
|
from .common import MQTTMessage, setup_ozw
from tests.common import async_capture_events
async def test_scenes(hass, generic_data, sent_messages):
"""Test setting up config entry."""
receive_message = await setup_ozw(hass, fixture=generic_data)
events = async_capture_events(hass, "ozw.scene_activated")
# Publish fake scene event on mqtt
message = MQTTMessage(
topic="OpenZWave/1/node/39/instance/1/commandclass/43/value/562950622511127/",
payload={
"Label": "Scene",
"Value": 16,
"Units": "",
"Min": -2147483648,
"Max": 2147483647,
"Type": "Int",
"Instance": 1,
"CommandClass": "COMMAND_CLASS_SCENE_ACTIVATION",
"Index": 0,
"Node": 7,
"Genre": "User",
"Help": "",
"ValueIDKey": 122339347,
"ReadOnly": False,
"WriteOnly": False,
"ValueSet": False,
"ValuePolled": False,
"ChangeVerified": False,
"Event": "valueChanged",
"TimeStamp": 1579630367,
},
)
message.encode()
receive_message(message)
# wait for the event
await hass.async_block_till_done()
assert len(events) == 1
assert events[0].data["scene_value_id"] == 16
# Publish fake central scene event on mqtt
message = MQTTMessage(
topic="OpenZWave/1/node/39/instance/1/commandclass/91/value/281476005806100/",
payload={
"Label": "Scene 1",
"Value": {
"List": [
{"Value": 0, "Label": "Inactive"},
{"Value": 1, "Label": "Pressed 1 Time"},
{"Value": 2, "Label": "Key Released"},
{"Value": 3, "Label": "Key Held down"},
],
"Selected": "Pressed 1 Time",
"Selected_id": 1,
},
"Units": "",
"Min": 0,
"Max": 0,
"Type": "List",
"Instance": 1,
"CommandClass": "COMMAND_CLASS_CENTRAL_SCENE",
"Index": 1,
"Node": 61,
"Genre": "User",
"Help": "",
"ValueIDKey": 281476005806100,
"ReadOnly": False,
"WriteOnly": False,
"ValueSet": False,
"ValuePolled": False,
"ChangeVerified": False,
"Event": "valueChanged",
"TimeStamp": 1579640710,
},
)
message.encode()
receive_message(message)
# wait for the event
await hass.async_block_till_done()
assert len(events) == 2
assert events[1].data["scene_id"] == 1
assert events[1].data["scene_label"] == "Scene 1"
assert events[1].data["scene_value_label"] == "Pressed 1 Time"
|
import asyncio
from homeassistant.components import camera, image_processing as ip
from homeassistant.components.openalpr_cloud.image_processing import OPENALPR_API_URL
from homeassistant.core import callback
from homeassistant.setup import setup_component
from tests.async_mock import PropertyMock, patch
from tests.common import assert_setup_component, get_test_home_assistant, load_fixture
from tests.components.image_processing import common
class TestOpenAlprCloudSetup:
"""Test class for image processing."""
def setup_method(self):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
def teardown_method(self):
"""Stop everything that was started."""
self.hass.stop()
def test_setup_platform(self):
"""Set up platform with one entity."""
config = {
ip.DOMAIN: {
"platform": "openalpr_cloud",
"source": {"entity_id": "camera.demo_camera"},
"region": "eu",
"api_key": "sk_abcxyz123456",
},
"camera": {"platform": "demo"},
}
with assert_setup_component(1, ip.DOMAIN):
setup_component(self.hass, ip.DOMAIN, config)
self.hass.block_till_done()
assert self.hass.states.get("image_processing.openalpr_demo_camera")
def test_setup_platform_name(self):
"""Set up platform with one entity and set name."""
config = {
ip.DOMAIN: {
"platform": "openalpr_cloud",
"source": {"entity_id": "camera.demo_camera", "name": "test local"},
"region": "eu",
"api_key": "sk_abcxyz123456",
},
"camera": {"platform": "demo"},
}
with assert_setup_component(1, ip.DOMAIN):
setup_component(self.hass, ip.DOMAIN, config)
self.hass.block_till_done()
assert self.hass.states.get("image_processing.test_local")
def test_setup_platform_without_api_key(self):
"""Set up platform with one entity without api_key."""
config = {
ip.DOMAIN: {
"platform": "openalpr_cloud",
"source": {"entity_id": "camera.demo_camera"},
"region": "eu",
},
"camera": {"platform": "demo"},
}
with assert_setup_component(0, ip.DOMAIN):
setup_component(self.hass, ip.DOMAIN, config)
def test_setup_platform_without_region(self):
"""Set up platform with one entity without region."""
config = {
ip.DOMAIN: {
"platform": "openalpr_cloud",
"source": {"entity_id": "camera.demo_camera"},
"api_key": "sk_abcxyz123456",
},
"camera": {"platform": "demo"},
}
with assert_setup_component(0, ip.DOMAIN):
setup_component(self.hass, ip.DOMAIN, config)
class TestOpenAlprCloud:
"""Test class for image processing."""
def setup_method(self):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
config = {
ip.DOMAIN: {
"platform": "openalpr_cloud",
"source": {"entity_id": "camera.demo_camera", "name": "test local"},
"region": "eu",
"api_key": "sk_abcxyz123456",
},
"camera": {"platform": "demo"},
}
with patch(
"homeassistant.components.openalpr_cloud.image_processing."
"OpenAlprCloudEntity.should_poll",
new_callable=PropertyMock(return_value=False),
):
setup_component(self.hass, ip.DOMAIN, config)
self.hass.block_till_done()
self.alpr_events = []
@callback
def mock_alpr_event(event):
"""Mock event."""
self.alpr_events.append(event)
self.hass.bus.listen("image_processing.found_plate", mock_alpr_event)
self.params = {
"secret_key": "sk_abcxyz123456",
"tasks": "plate",
"return_image": 0,
"country": "eu",
}
def teardown_method(self):
"""Stop everything that was started."""
self.hass.stop()
def test_openalpr_process_image(self, aioclient_mock):
"""Set up and scan a picture and test plates from event."""
aioclient_mock.post(
OPENALPR_API_URL,
params=self.params,
text=load_fixture("alpr_cloud.json"),
status=200,
)
with patch(
"homeassistant.components.camera.async_get_image",
return_value=camera.Image("image/jpeg", b"image"),
):
common.scan(self.hass, entity_id="image_processing.test_local")
self.hass.block_till_done()
state = self.hass.states.get("image_processing.test_local")
assert len(aioclient_mock.mock_calls) == 1
assert len(self.alpr_events) == 5
assert state.attributes.get("vehicles") == 1
assert state.state == "H786P0J"
event_data = [
event.data
for event in self.alpr_events
if event.data.get("plate") == "H786P0J"
]
assert len(event_data) == 1
assert event_data[0]["plate"] == "H786P0J"
assert event_data[0]["confidence"] == float(90.436699)
assert event_data[0]["entity_id"] == "image_processing.test_local"
def test_openalpr_process_image_api_error(self, aioclient_mock):
"""Set up and scan a picture and test api error."""
aioclient_mock.post(
OPENALPR_API_URL,
params=self.params,
text="{'error': 'error message'}",
status=400,
)
with patch(
"homeassistant.components.camera.async_get_image",
return_value=camera.Image("image/jpeg", b"image"),
):
common.scan(self.hass, entity_id="image_processing.test_local")
self.hass.block_till_done()
assert len(aioclient_mock.mock_calls) == 1
assert len(self.alpr_events) == 0
def test_openalpr_process_image_api_timeout(self, aioclient_mock):
"""Set up and scan a picture and test api error."""
aioclient_mock.post(
OPENALPR_API_URL, params=self.params, exc=asyncio.TimeoutError()
)
with patch(
"homeassistant.components.camera.async_get_image",
return_value=camera.Image("image/jpeg", b"image"),
):
common.scan(self.hass, entity_id="image_processing.test_local")
self.hass.block_till_done()
assert len(aioclient_mock.mock_calls) == 1
assert len(self.alpr_events) == 0
|
import logging
from aiopvapi.helpers.aiorequest import AioRequest
import async_timeout
import voluptuous as vol
from homeassistant import config_entries, core, exceptions
from homeassistant.const import CONF_HOST, CONF_NAME
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from . import async_get_device_info
from .const import DEVICE_NAME, DEVICE_SERIAL_NUMBER, HUB_EXCEPTIONS
from .const import DOMAIN # pylint:disable=unused-import
_LOGGER = logging.getLogger(__name__)
DATA_SCHEMA = vol.Schema({vol.Required(CONF_HOST): str})
HAP_SUFFIX = "._hap._tcp.local."
async def validate_input(hass: core.HomeAssistant, data):
"""Validate the user input allows us to connect.
Data has the keys from DATA_SCHEMA with values provided by the user.
"""
hub_address = data[CONF_HOST]
websession = async_get_clientsession(hass)
pv_request = AioRequest(hub_address, loop=hass.loop, websession=websession)
try:
async with async_timeout.timeout(10):
device_info = await async_get_device_info(pv_request)
except HUB_EXCEPTIONS as err:
raise CannotConnect from err
if not device_info:
raise CannotConnect
# Return info that you want to store in the config entry.
return {
"title": device_info[DEVICE_NAME],
"unique_id": device_info[DEVICE_SERIAL_NUMBER],
}
class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for Hunter Douglas PowerView."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_POLL
def __init__(self):
"""Initialize the powerview config flow."""
self.powerview_config = {}
async def async_step_user(self, user_input=None):
"""Handle the initial step."""
errors = {}
if user_input is not None:
if self._host_already_configured(user_input[CONF_HOST]):
return self.async_abort(reason="already_configured")
try:
info = await validate_input(self.hass, user_input)
except CannotConnect:
errors["base"] = "cannot_connect"
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected exception")
errors["base"] = "unknown"
if not errors:
await self.async_set_unique_id(info["unique_id"])
return self.async_create_entry(
title=info["title"], data={CONF_HOST: user_input[CONF_HOST]}
)
return self.async_show_form(
step_id="user", data_schema=DATA_SCHEMA, errors=errors
)
async def async_step_import(self, user_input=None):
"""Handle the initial step."""
return await self.async_step_user(user_input)
async def async_step_homekit(self, homekit_info):
"""Handle HomeKit discovery."""
# If we already have the host configured do
# not open connections to it if we can avoid it.
if self._host_already_configured(homekit_info[CONF_HOST]):
return self.async_abort(reason="already_configured")
try:
info = await validate_input(self.hass, homekit_info)
except CannotConnect:
return self.async_abort(reason="cannot_connect")
except Exception: # pylint: disable=broad-except
return self.async_abort(reason="unknown")
await self.async_set_unique_id(info["unique_id"], raise_on_progress=False)
self._abort_if_unique_id_configured({CONF_HOST: homekit_info["host"]})
name = homekit_info["name"]
if name.endswith(HAP_SUFFIX):
name = name[: -len(HAP_SUFFIX)]
self.powerview_config = {
CONF_HOST: homekit_info["host"],
CONF_NAME: name,
}
return await self.async_step_link()
async def async_step_link(self, user_input=None):
"""Attempt to link with Powerview."""
if user_input is not None:
return self.async_create_entry(
title=self.powerview_config[CONF_NAME],
data={CONF_HOST: self.powerview_config[CONF_HOST]},
)
return self.async_show_form(
step_id="link", description_placeholders=self.powerview_config
)
def _host_already_configured(self, host):
"""See if we already have a hub with the host address configured."""
existing_hosts = {
entry.data[CONF_HOST]
for entry in self._async_current_entries()
if CONF_HOST in entry.data
}
return host in existing_hosts
class CannotConnect(exceptions.HomeAssistantError):
"""Error to indicate we cannot connect."""
|
import textwrap
from coverage import env
from coverage.misc import NotPython
from coverage.parser import PythonParser
from tests.coveragetest import CoverageTest, xfail
from tests.helpers import arcz_to_arcs
class PythonParserTest(CoverageTest):
"""Tests for coverage.py's Python code parsing."""
run_in_temp_dir = False
def parse_source(self, text):
"""Parse `text` as source, and return the `PythonParser` used."""
if env.PY2:
text = text.decode("ascii")
text = textwrap.dedent(text)
parser = PythonParser(text=text, exclude="nocover")
parser.parse_source()
return parser
def test_exit_counts(self):
parser = self.parse_source("""\
# check some basic branch counting
class Foo:
def foo(self, a):
if a:
return 5
else:
return 7
class Bar:
pass
""")
self.assertEqual(parser.exit_counts(), {
2:1, 3:1, 4:2, 5:1, 7:1, 9:1, 10:1
})
def test_generator_exit_counts(self):
# https://github.com/nedbat/coveragepy/issues/324
parser = self.parse_source("""\
def gen(input):
for n in inp:
yield (i * 2 for i in range(n))
list(gen([1,2,3]))
""")
self.assertEqual(parser.exit_counts(), {
1:1, # def -> list
2:2, # for -> yield; for -> exit
3:2, # yield -> for; genexp exit
5:1, # list -> exit
})
def test_try_except(self):
parser = self.parse_source("""\
try:
a = 2
except ValueError:
a = 4
except ZeroDivideError:
a = 6
except:
a = 8
b = 9
""")
self.assertEqual(parser.exit_counts(), {
1: 1, 2:1, 3:2, 4:1, 5:2, 6:1, 7:1, 8:1, 9:1
})
def test_excluded_classes(self):
parser = self.parse_source("""\
class Foo:
def __init__(self):
pass
if len([]): # nocover
class Bar:
pass
""")
self.assertEqual(parser.exit_counts(), {
1:0, 2:1, 3:1
})
def test_missing_branch_to_excluded_code(self):
parser = self.parse_source("""\
if fooey:
a = 2
else: # nocover
a = 4
b = 5
""")
self.assertEqual(parser.exit_counts(), { 1:1, 2:1, 5:1 })
parser = self.parse_source("""\
def foo():
if fooey:
a = 3
else:
a = 5
b = 6
""")
self.assertEqual(parser.exit_counts(), { 1:1, 2:2, 3:1, 5:1, 6:1 })
parser = self.parse_source("""\
def foo():
if fooey:
a = 3
else: # nocover
a = 5
b = 6
""")
self.assertEqual(parser.exit_counts(), { 1:1, 2:1, 3:1, 6:1 })
def test_indentation_error(self):
msg = (
"Couldn't parse '<code>' as Python source: "
"'unindent does not match any outer indentation level' at line 3"
)
with self.assertRaisesRegex(NotPython, msg):
_ = self.parse_source("""\
0 spaces
2
1
""")
def test_token_error(self):
msg = "Couldn't parse '<code>' as Python source: 'EOF in multi-line string' at line 1"
with self.assertRaisesRegex(NotPython, msg):
_ = self.parse_source("""\
'''
""")
@xfail(
env.PYPY3 and env.PYPYVERSION == (7, 3, 0),
"https://bitbucket.org/pypy/pypy/issues/3139",
)
def test_decorator_pragmas(self):
parser = self.parse_source("""\
# 1
@foo(3) # nocover
@bar
def func(x, y=5):
return 6
class Foo: # this is the only statement.
'''9'''
@foo # nocover
def __init__(self):
'''12'''
return 13
@foo( # nocover
16,
17,
)
def meth(self):
return 20
@foo( # nocover
23
)
def func(x=25):
return 26
""")
raw_statements = set([3, 4, 5, 6, 8, 9, 10, 13, 15, 16, 17, 20, 22, 23, 25, 26])
if env.PYBEHAVIOR.trace_decorated_def:
raw_statements.update([11, 19])
self.assertEqual(parser.raw_statements, raw_statements)
self.assertEqual(parser.statements, set([8]))
def test_class_decorator_pragmas(self):
parser = self.parse_source("""\
class Foo(object):
def __init__(self):
self.x = 3
@foo # nocover
class Bar(object):
def __init__(self):
self.x = 8
""")
self.assertEqual(parser.raw_statements, set([1, 2, 3, 5, 6, 7, 8]))
self.assertEqual(parser.statements, set([1, 2, 3]))
def test_empty_decorated_function(self):
parser = self.parse_source("""\
def decorator(func):
return func
@decorator
def foo(self):
'''Docstring'''
@decorator
def bar(self):
pass
""")
if env.PYBEHAVIOR.trace_decorated_def:
expected_statements = {1, 2, 4, 5, 8, 9, 10}
expected_arcs = set(arcz_to_arcs(".1 14 45 58 89 9. .2 2. -8A A-8"))
expected_exits = {1: 1, 2: 1, 4: 1, 5: 1, 8: 1, 9: 1, 10: 1}
else:
expected_statements = {1, 2, 4, 8, 10}
expected_arcs = set(arcz_to_arcs(".1 14 48 8. .2 2. -8A A-8"))
expected_exits = {1: 1, 2: 1, 4: 1, 8: 1, 10: 1}
if (not env.PYPY) and (env.PYVERSION >= (3, 7, 0, 'beta', 5)):
# 3.7 changed how functions with only docstrings are numbered.
expected_arcs.update(set(arcz_to_arcs("-46 6-4")))
expected_exits.update({6: 1})
self.assertEqual(expected_statements, parser.statements)
self.assertEqual(expected_arcs, parser.arcs())
self.assertEqual(expected_exits, parser.exit_counts())
class ParserMissingArcDescriptionTest(CoverageTest):
"""Tests for PythonParser.missing_arc_description."""
run_in_temp_dir = False
def parse_text(self, source):
"""Parse Python source, and return the parser object."""
parser = PythonParser(text=textwrap.dedent(source))
parser.parse_source()
return parser
def test_missing_arc_description(self):
# This code is never run, so the actual values don't matter.
parser = self.parse_text(u"""\
if x:
print(2)
print(3)
def func5():
for x in range(6):
if x == 7:
break
def func10():
while something(11):
thing(12)
more_stuff(13)
""")
self.assertEqual(
parser.missing_arc_description(1, 2),
"line 1 didn't jump to line 2, because the condition on line 1 was never true"
)
self.assertEqual(
parser.missing_arc_description(1, 3),
"line 1 didn't jump to line 3, because the condition on line 1 was never false"
)
self.assertEqual(
parser.missing_arc_description(6, -5),
"line 6 didn't return from function 'func5', "
"because the loop on line 6 didn't complete"
)
self.assertEqual(
parser.missing_arc_description(6, 7),
"line 6 didn't jump to line 7, because the loop on line 6 never started"
)
self.assertEqual(
parser.missing_arc_description(11, 12),
"line 11 didn't jump to line 12, because the condition on line 11 was never true"
)
self.assertEqual(
parser.missing_arc_description(11, 13),
"line 11 didn't jump to line 13, because the condition on line 11 was never false"
)
def test_missing_arc_descriptions_for_small_callables(self):
parser = self.parse_text(u"""\
callables = [
lambda: 2,
(x for x in range(3)),
{x:1 for x in range(4)},
{x for x in range(5)},
]
x = 7
""")
self.assertEqual(
parser.missing_arc_description(2, -2),
"line 2 didn't finish the lambda on line 2"
)
self.assertEqual(
parser.missing_arc_description(3, -3),
"line 3 didn't finish the generator expression on line 3"
)
self.assertEqual(
parser.missing_arc_description(4, -4),
"line 4 didn't finish the dictionary comprehension on line 4"
)
self.assertEqual(
parser.missing_arc_description(5, -5),
"line 5 didn't finish the set comprehension on line 5"
)
def test_missing_arc_descriptions_for_exceptions(self):
parser = self.parse_text(u"""\
try:
pass
except ZeroDivideError:
print("whoops")
except ValueError:
print("yikes")
""")
self.assertEqual(
parser.missing_arc_description(3, 4),
"line 3 didn't jump to line 4, because the exception caught by line 3 didn't happen"
)
self.assertEqual(
parser.missing_arc_description(5, 6),
"line 5 didn't jump to line 6, because the exception caught by line 5 didn't happen"
)
def test_missing_arc_descriptions_for_finally(self):
parser = self.parse_text(u"""\
def function():
for i in range(2):
try:
if something(4):
break
else:
if something(7):
continue
else:
continue
if also_this(11):
return 12
else:
raise Exception(14)
finally:
this_thing(16)
that_thing(17)
""")
if env.PYBEHAVIOR.finally_jumps_back:
self.assertEqual(
parser.missing_arc_description(16, 5),
"line 16 didn't jump to line 5, because the break on line 5 wasn't executed"
)
self.assertEqual(
parser.missing_arc_description(5, 17),
"line 5 didn't jump to line 17, because the break on line 5 wasn't executed"
)
self.assertEqual(
parser.missing_arc_description(16, 8),
"line 16 didn't jump to line 8, because the continue on line 8 wasn't executed"
)
self.assertEqual(
parser.missing_arc_description(8, 2),
"line 8 didn't jump to line 2, because the continue on line 8 wasn't executed"
)
self.assertEqual(
parser.missing_arc_description(16, 12),
"line 16 didn't jump to line 12, because the return on line 12 wasn't executed"
)
self.assertEqual(
parser.missing_arc_description(12, -1),
"line 12 didn't return from function 'function', "
"because the return on line 12 wasn't executed"
)
self.assertEqual(
parser.missing_arc_description(16, -1),
"line 16 didn't except from function 'function', "
"because the raise on line 14 wasn't executed"
)
else:
self.assertEqual(
parser.missing_arc_description(16, 17),
"line 16 didn't jump to line 17, because the break on line 5 wasn't executed"
)
self.assertEqual(
parser.missing_arc_description(16, 2),
"line 16 didn't jump to line 2, "
"because the continue on line 8 wasn't executed"
" or "
"the continue on line 10 wasn't executed"
)
self.assertEqual(
parser.missing_arc_description(16, -1),
"line 16 didn't except from function 'function', "
"because the raise on line 14 wasn't executed"
" or "
"line 16 didn't return from function 'function', "
"because the return on line 12 wasn't executed"
)
def test_missing_arc_descriptions_bug460(self):
parser = self.parse_text(u"""\
x = 1
d = {
3: lambda: [],
4: lambda: [],
}
x = 6
""")
self.assertEqual(
parser.missing_arc_description(2, -3),
"line 3 didn't finish the lambda on line 3",
)
class ParserFileTest(CoverageTest):
"""Tests for coverage.py's code parsing from files."""
def parse_file(self, filename):
"""Parse `text` as source, and return the `PythonParser` used."""
parser = PythonParser(filename=filename, exclude="nocover")
parser.parse_source()
return parser
def test_line_endings(self):
text = """\
# check some basic branch counting
class Foo:
def foo(self, a):
if a:
return 5
else:
return 7
class Bar:
pass
"""
counts = { 2:1, 3:1, 4:2, 5:1, 7:1, 9:1, 10:1 }
name_endings = (("unix", "\n"), ("dos", "\r\n"), ("mac", "\r"))
for fname, newline in name_endings:
fname = fname + ".py"
self.make_file(fname, text, newline=newline)
parser = self.parse_file(fname)
self.assertEqual(
parser.exit_counts(),
counts,
"Wrong for %r" % fname
)
def test_encoding(self):
self.make_file("encoded.py", """\
coverage = "\xe7\xf6v\xear\xe3g\xe9"
""")
parser = self.parse_file("encoded.py")
self.assertEqual(parser.exit_counts(), {1: 1})
def test_missing_line_ending(self):
# Test that the set of statements is the same even if a final
# multi-line statement has no final newline.
# https://github.com/nedbat/coveragepy/issues/293
self.make_file("normal.py", """\
out, err = subprocess.Popen(
[sys.executable, '-c', 'pass'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()
""")
parser = self.parse_file("normal.py")
self.assertEqual(parser.statements, set([1]))
self.make_file("abrupt.py", """\
out, err = subprocess.Popen(
[sys.executable, '-c', 'pass'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()""") # no final newline.
# Double-check that some test helper wasn't being helpful.
with open("abrupt.py") as f:
self.assertEqual(f.read()[-1], ")")
parser = self.parse_file("abrupt.py")
self.assertEqual(parser.statements, set([1]))
|
from homeassistant.components.sensor import DOMAIN as SENSOR_DOMAIN
from homeassistant.setup import async_setup_component
from tests.async_mock import MagicMock, patch
MOCK_CONFIG = {
"sensor": {
"platform": "openerz",
"name": "test_name",
"zip": 1234,
"waste_type": "glass",
}
}
async def test_sensor_state(hass):
"""Test whether default waste type set properly."""
with patch(
"homeassistant.components.openerz.sensor.OpenERZConnector"
) as patched_connector:
pickup_instance = MagicMock()
pickup_instance.find_next_pickup.return_value = "2020-12-12"
patched_connector.return_value = pickup_instance
await async_setup_component(hass, SENSOR_DOMAIN, MOCK_CONFIG)
await hass.async_block_till_done()
entity_id = "sensor.test_name"
test_openerz_state = hass.states.get(entity_id)
assert test_openerz_state.state == "2020-12-12"
assert test_openerz_state.name == "test_name"
pickup_instance.find_next_pickup.assert_called_once()
|
import io
import os
import shutil
from ..helper import cd
__all__ = ["add_post_without_text", "append_config", "cd", "create_simple_post", "patch_config"]
def add_post_without_text(directory):
"""Add a post without text."""
# File for Issue #374 (empty post text)
create_simple_post(directory, "empty.txt", "foobar")
def create_simple_post(directory, filename, title_slug, text='', date='2013-03-06 19:08:15'):
"""Create a simple post in a given directory."""
path = os.path.join(directory, filename)
text_processed = '\n' + text if text else ''
with io.open(path, "w+", encoding="utf8") as outf:
outf.write(
"""
.. title: {0}
.. slug: {0}
.. date: {1}
{2}""".format(title_slug, date, text_processed)
)
def copy_example_post(destination_dir):
"""Copy a modified version of the example post into the site."""
test_dir = os.path.abspath(os.path.dirname(__file__))
source_file = os.path.join(test_dir, "..", "data", "1-nolinks.rst")
destination = os.path.join(destination_dir, "1.rst")
shutil.copy(source_file, destination)
def append_config(config_dir, appendix):
"""Append text to the config file."""
config_path = os.path.join(config_dir, "conf.py")
with io.open(config_path, "a", encoding="utf8") as outf:
outf.write(appendix)
def patch_config(config_dir, *replacements):
"""Patch the config file with new values (find and replace)."""
config_path = os.path.join(config_dir, "conf.py")
with io.open(config_path, "r", encoding="utf-8") as inf:
data = inf.read()
for old, new in replacements:
data = data.replace(old, new)
with io.open(config_path, "w+", encoding="utf8") as outf:
outf.write(data)
outf.flush()
|
import asyncio
from collections import deque
from datetime import datetime, timedelta
import functools as ft
import logging
import re
import sys
from typing import Any, Callable, Container, List, Optional, Set, Union, cast
from homeassistant.components import zone as zone_cmp
from homeassistant.components.device_automation import (
async_get_device_automation_platform,
)
from homeassistant.const import (
ATTR_GPS_ACCURACY,
ATTR_LATITUDE,
ATTR_LONGITUDE,
CONF_ABOVE,
CONF_AFTER,
CONF_ATTRIBUTE,
CONF_BEFORE,
CONF_BELOW,
CONF_CONDITION,
CONF_DEVICE_ID,
CONF_DOMAIN,
CONF_ENTITY_ID,
CONF_STATE,
CONF_VALUE_TEMPLATE,
CONF_WEEKDAY,
CONF_ZONE,
STATE_UNAVAILABLE,
STATE_UNKNOWN,
SUN_EVENT_SUNRISE,
SUN_EVENT_SUNSET,
WEEKDAYS,
)
from homeassistant.core import HomeAssistant, State, callback
from homeassistant.exceptions import HomeAssistantError, TemplateError
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.sun import get_astral_event_date
from homeassistant.helpers.template import Template
from homeassistant.helpers.typing import ConfigType, TemplateVarsType
from homeassistant.util.async_ import run_callback_threadsafe
import homeassistant.util.dt as dt_util
FROM_CONFIG_FORMAT = "{}_from_config"
ASYNC_FROM_CONFIG_FORMAT = "async_{}_from_config"
_LOGGER = logging.getLogger(__name__)
INPUT_ENTITY_ID = re.compile(
r"^input_(?:select|text|number|boolean|datetime)\.(?!.+__)(?!_)[\da-z_]+(?<!_)$"
)
ConditionCheckerType = Callable[[HomeAssistant, TemplateVarsType], bool]
async def async_from_config(
hass: HomeAssistant,
config: Union[ConfigType, Template],
config_validation: bool = True,
) -> ConditionCheckerType:
"""Turn a condition configuration into a method.
Should be run on the event loop.
"""
if isinstance(config, Template):
# We got a condition template, wrap it in a configuration to pass along.
config = {
CONF_CONDITION: "template",
CONF_VALUE_TEMPLATE: config,
}
condition = config.get(CONF_CONDITION)
for fmt in (ASYNC_FROM_CONFIG_FORMAT, FROM_CONFIG_FORMAT):
factory = getattr(sys.modules[__name__], fmt.format(condition), None)
if factory:
break
if factory is None:
raise HomeAssistantError(f'Invalid condition "{condition}" specified {config}')
# Check for partials to properly determine if coroutine function
check_factory = factory
while isinstance(check_factory, ft.partial):
check_factory = check_factory.func
if asyncio.iscoroutinefunction(check_factory):
return cast(
ConditionCheckerType, await factory(hass, config, config_validation)
)
return cast(ConditionCheckerType, factory(config, config_validation))
async def async_and_from_config(
hass: HomeAssistant, config: ConfigType, config_validation: bool = True
) -> ConditionCheckerType:
"""Create multi condition matcher using 'AND'."""
if config_validation:
config = cv.AND_CONDITION_SCHEMA(config)
checks = [
await async_from_config(hass, entry, False) for entry in config["conditions"]
]
def if_and_condition(
hass: HomeAssistant, variables: TemplateVarsType = None
) -> bool:
"""Test and condition."""
try:
for check in checks:
if not check(hass, variables):
return False
except Exception as ex: # pylint: disable=broad-except
_LOGGER.warning("Error during and-condition: %s", ex)
return False
return True
return if_and_condition
async def async_or_from_config(
hass: HomeAssistant, config: ConfigType, config_validation: bool = True
) -> ConditionCheckerType:
"""Create multi condition matcher using 'OR'."""
if config_validation:
config = cv.OR_CONDITION_SCHEMA(config)
checks = [
await async_from_config(hass, entry, False) for entry in config["conditions"]
]
def if_or_condition(
hass: HomeAssistant, variables: TemplateVarsType = None
) -> bool:
"""Test and condition."""
try:
for check in checks:
if check(hass, variables):
return True
except Exception as ex: # pylint: disable=broad-except
_LOGGER.warning("Error during or-condition: %s", ex)
return False
return if_or_condition
async def async_not_from_config(
hass: HomeAssistant, config: ConfigType, config_validation: bool = True
) -> ConditionCheckerType:
"""Create multi condition matcher using 'NOT'."""
if config_validation:
config = cv.NOT_CONDITION_SCHEMA(config)
checks = [
await async_from_config(hass, entry, False) for entry in config["conditions"]
]
def if_not_condition(
hass: HomeAssistant, variables: TemplateVarsType = None
) -> bool:
"""Test not condition."""
try:
for check in checks:
if check(hass, variables):
return False
except Exception as ex: # pylint: disable=broad-except
_LOGGER.warning("Error during not-condition: %s", ex)
return True
return if_not_condition
def numeric_state(
hass: HomeAssistant,
entity: Union[None, str, State],
below: Optional[Union[float, str]] = None,
above: Optional[Union[float, str]] = None,
value_template: Optional[Template] = None,
variables: TemplateVarsType = None,
) -> bool:
"""Test a numeric state condition."""
return run_callback_threadsafe(
hass.loop,
async_numeric_state,
hass,
entity,
below,
above,
value_template,
variables,
).result()
def async_numeric_state(
hass: HomeAssistant,
entity: Union[None, str, State],
below: Optional[Union[float, str]] = None,
above: Optional[Union[float, str]] = None,
value_template: Optional[Template] = None,
variables: TemplateVarsType = None,
attribute: Optional[str] = None,
) -> bool:
"""Test a numeric state condition."""
if isinstance(entity, str):
entity = hass.states.get(entity)
if entity is None or (attribute is not None and attribute not in entity.attributes):
return False
value: Any = None
if value_template is None:
if attribute is None:
value = entity.state
else:
value = entity.attributes.get(attribute)
else:
variables = dict(variables or {})
variables["state"] = entity
try:
value = value_template.async_render(variables)
except TemplateError as ex:
_LOGGER.error("Template error: %s", ex)
return False
if value in (STATE_UNAVAILABLE, STATE_UNKNOWN):
return False
try:
fvalue = float(value)
except ValueError:
_LOGGER.warning(
"Value cannot be processed as a number: %s (Offending entity: %s)",
entity,
value,
)
return False
if below is not None:
if isinstance(below, str):
below_entity = hass.states.get(below)
if (
not below_entity
or below_entity.state in (STATE_UNAVAILABLE, STATE_UNKNOWN)
or fvalue >= float(below_entity.state)
):
return False
elif fvalue >= below:
return False
if above is not None:
if isinstance(above, str):
above_entity = hass.states.get(above)
if (
not above_entity
or above_entity.state in (STATE_UNAVAILABLE, STATE_UNKNOWN)
or fvalue <= float(above_entity.state)
):
return False
elif fvalue <= above:
return False
return True
def async_numeric_state_from_config(
config: ConfigType, config_validation: bool = True
) -> ConditionCheckerType:
"""Wrap action method with state based condition."""
if config_validation:
config = cv.NUMERIC_STATE_CONDITION_SCHEMA(config)
entity_ids = config.get(CONF_ENTITY_ID, [])
attribute = config.get(CONF_ATTRIBUTE)
below = config.get(CONF_BELOW)
above = config.get(CONF_ABOVE)
value_template = config.get(CONF_VALUE_TEMPLATE)
def if_numeric_state(
hass: HomeAssistant, variables: TemplateVarsType = None
) -> bool:
"""Test numeric state condition."""
if value_template is not None:
value_template.hass = hass
return all(
async_numeric_state(
hass, entity_id, below, above, value_template, variables, attribute
)
for entity_id in entity_ids
)
return if_numeric_state
def state(
hass: HomeAssistant,
entity: Union[None, str, State],
req_state: Any,
for_period: Optional[timedelta] = None,
attribute: Optional[str] = None,
) -> bool:
"""Test if state matches requirements.
Async friendly.
"""
if isinstance(entity, str):
entity = hass.states.get(entity)
if entity is None or (attribute is not None and attribute not in entity.attributes):
return False
assert isinstance(entity, State)
if attribute is None:
value: Any = entity.state
else:
value = entity.attributes.get(attribute)
if not isinstance(req_state, list):
req_state = [req_state]
is_state = False
for req_state_value in req_state:
state_value = req_state_value
if (
isinstance(req_state_value, str)
and INPUT_ENTITY_ID.match(req_state_value) is not None
):
state_entity = hass.states.get(req_state_value)
if not state_entity:
continue
state_value = state_entity.state
is_state = value == state_value
if is_state:
break
if for_period is None or not is_state:
return is_state
return dt_util.utcnow() - for_period > entity.last_changed
def state_from_config(
config: ConfigType, config_validation: bool = True
) -> ConditionCheckerType:
"""Wrap action method with state based condition."""
if config_validation:
config = cv.STATE_CONDITION_SCHEMA(config)
entity_ids = config.get(CONF_ENTITY_ID, [])
req_states: Union[str, List[str]] = config.get(CONF_STATE, [])
for_period = config.get("for")
attribute = config.get(CONF_ATTRIBUTE)
if not isinstance(req_states, list):
req_states = [req_states]
def if_state(hass: HomeAssistant, variables: TemplateVarsType = None) -> bool:
"""Test if condition."""
return all(
state(hass, entity_id, req_states, for_period, attribute)
for entity_id in entity_ids
)
return if_state
def sun(
hass: HomeAssistant,
before: Optional[str] = None,
after: Optional[str] = None,
before_offset: Optional[timedelta] = None,
after_offset: Optional[timedelta] = None,
) -> bool:
"""Test if current time matches sun requirements."""
utcnow = dt_util.utcnow()
today = dt_util.as_local(utcnow).date()
before_offset = before_offset or timedelta(0)
after_offset = after_offset or timedelta(0)
sunrise_today = get_astral_event_date(hass, SUN_EVENT_SUNRISE, today)
sunset_today = get_astral_event_date(hass, SUN_EVENT_SUNSET, today)
sunrise = sunrise_today
sunset = sunset_today
if today > dt_util.as_local(
cast(datetime, sunrise_today)
).date() and SUN_EVENT_SUNRISE in (before, after):
tomorrow = dt_util.as_local(utcnow + timedelta(days=1)).date()
sunrise_tomorrow = get_astral_event_date(hass, SUN_EVENT_SUNRISE, tomorrow)
sunrise = sunrise_tomorrow
if today > dt_util.as_local(
cast(datetime, sunset_today)
).date() and SUN_EVENT_SUNSET in (before, after):
tomorrow = dt_util.as_local(utcnow + timedelta(days=1)).date()
sunset_tomorrow = get_astral_event_date(hass, SUN_EVENT_SUNSET, tomorrow)
sunset = sunset_tomorrow
if sunrise is None and SUN_EVENT_SUNRISE in (before, after):
# There is no sunrise today
return False
if sunset is None and SUN_EVENT_SUNSET in (before, after):
# There is no sunset today
return False
if before == SUN_EVENT_SUNRISE and utcnow > cast(datetime, sunrise) + before_offset:
return False
if before == SUN_EVENT_SUNSET and utcnow > cast(datetime, sunset) + before_offset:
return False
if after == SUN_EVENT_SUNRISE and utcnow < cast(datetime, sunrise) + after_offset:
return False
if after == SUN_EVENT_SUNSET and utcnow < cast(datetime, sunset) + after_offset:
return False
return True
def sun_from_config(
config: ConfigType, config_validation: bool = True
) -> ConditionCheckerType:
"""Wrap action method with sun based condition."""
if config_validation:
config = cv.SUN_CONDITION_SCHEMA(config)
before = config.get("before")
after = config.get("after")
before_offset = config.get("before_offset")
after_offset = config.get("after_offset")
def time_if(hass: HomeAssistant, variables: TemplateVarsType = None) -> bool:
"""Validate time based if-condition."""
return sun(hass, before, after, before_offset, after_offset)
return time_if
def template(
hass: HomeAssistant, value_template: Template, variables: TemplateVarsType = None
) -> bool:
"""Test if template condition matches."""
return run_callback_threadsafe(
hass.loop, async_template, hass, value_template, variables
).result()
def async_template(
hass: HomeAssistant, value_template: Template, variables: TemplateVarsType = None
) -> bool:
"""Test if template condition matches."""
try:
value = value_template.async_render(variables)
except TemplateError as ex:
_LOGGER.error("Error during template condition: %s", ex)
return False
if isinstance(value, bool):
return value
if isinstance(value, str):
return value.lower() == "true"
return False
def async_template_from_config(
config: ConfigType, config_validation: bool = True
) -> ConditionCheckerType:
"""Wrap action method with state based condition."""
if config_validation:
config = cv.TEMPLATE_CONDITION_SCHEMA(config)
value_template = cast(Template, config.get(CONF_VALUE_TEMPLATE))
def template_if(hass: HomeAssistant, variables: TemplateVarsType = None) -> bool:
"""Validate template based if-condition."""
value_template.hass = hass
return async_template(hass, value_template, variables)
return template_if
def time(
hass: HomeAssistant,
before: Optional[Union[dt_util.dt.time, str]] = None,
after: Optional[Union[dt_util.dt.time, str]] = None,
weekday: Union[None, str, Container[str]] = None,
) -> bool:
"""Test if local time condition matches.
Handle the fact that time is continuous and we may be testing for
a period that crosses midnight. In that case it is easier to test
for the opposite. "(23:59 <= now < 00:01)" would be the same as
"not (00:01 <= now < 23:59)".
"""
now = dt_util.now()
now_time = now.time()
if after is None:
after = dt_util.dt.time(0)
elif isinstance(after, str):
after_entity = hass.states.get(after)
if not after_entity:
return False
after = dt_util.dt.time(
after_entity.attributes.get("hour", 23),
after_entity.attributes.get("minute", 59),
after_entity.attributes.get("second", 59),
)
if before is None:
before = dt_util.dt.time(23, 59, 59, 999999)
elif isinstance(before, str):
before_entity = hass.states.get(before)
if not before_entity:
return False
before = dt_util.dt.time(
before_entity.attributes.get("hour", 23),
before_entity.attributes.get("minute", 59),
before_entity.attributes.get("second", 59),
999999,
)
if after < before:
if not after <= now_time < before:
return False
else:
if before <= now_time < after:
return False
if weekday is not None:
now_weekday = WEEKDAYS[now.weekday()]
if (
isinstance(weekday, str)
and weekday != now_weekday
or now_weekday not in weekday
):
return False
return True
def time_from_config(
config: ConfigType, config_validation: bool = True
) -> ConditionCheckerType:
"""Wrap action method with time based condition."""
if config_validation:
config = cv.TIME_CONDITION_SCHEMA(config)
before = config.get(CONF_BEFORE)
after = config.get(CONF_AFTER)
weekday = config.get(CONF_WEEKDAY)
def time_if(hass: HomeAssistant, variables: TemplateVarsType = None) -> bool:
"""Validate time based if-condition."""
return time(hass, before, after, weekday)
return time_if
def zone(
hass: HomeAssistant,
zone_ent: Union[None, str, State],
entity: Union[None, str, State],
) -> bool:
"""Test if zone-condition matches.
Async friendly.
"""
if isinstance(zone_ent, str):
zone_ent = hass.states.get(zone_ent)
if zone_ent is None:
return False
if isinstance(entity, str):
entity = hass.states.get(entity)
if entity is None:
return False
latitude = entity.attributes.get(ATTR_LATITUDE)
longitude = entity.attributes.get(ATTR_LONGITUDE)
if latitude is None or longitude is None:
return False
return zone_cmp.in_zone(
zone_ent, latitude, longitude, entity.attributes.get(ATTR_GPS_ACCURACY, 0)
)
def zone_from_config(
config: ConfigType, config_validation: bool = True
) -> ConditionCheckerType:
"""Wrap action method with zone based condition."""
if config_validation:
config = cv.ZONE_CONDITION_SCHEMA(config)
entity_ids = config.get(CONF_ENTITY_ID, [])
zone_entity_ids = config.get(CONF_ZONE, [])
def if_in_zone(hass: HomeAssistant, variables: TemplateVarsType = None) -> bool:
"""Test if condition."""
return all(
any(
zone(hass, zone_entity_id, entity_id)
for zone_entity_id in zone_entity_ids
)
for entity_id in entity_ids
)
return if_in_zone
async def async_device_from_config(
hass: HomeAssistant, config: ConfigType, config_validation: bool = True
) -> ConditionCheckerType:
"""Test a device condition."""
if config_validation:
config = cv.DEVICE_CONDITION_SCHEMA(config)
platform = await async_get_device_automation_platform(
hass, config[CONF_DOMAIN], "condition"
)
return cast(
ConditionCheckerType,
platform.async_condition_from_config(config, config_validation), # type: ignore
)
async def async_validate_condition_config(
hass: HomeAssistant, config: Union[ConfigType, Template]
) -> Union[ConfigType, Template]:
"""Validate config."""
if isinstance(config, Template):
return config
condition = config[CONF_CONDITION]
if condition in ("and", "not", "or"):
conditions = []
for sub_cond in config["conditions"]:
sub_cond = await async_validate_condition_config(hass, sub_cond)
conditions.append(sub_cond)
config["conditions"] = conditions
if condition == "device":
config = cv.DEVICE_CONDITION_SCHEMA(config)
assert not isinstance(config, Template)
platform = await async_get_device_automation_platform(
hass, config[CONF_DOMAIN], "condition"
)
return cast(ConfigType, platform.CONDITION_SCHEMA(config)) # type: ignore
return config
@callback
def async_extract_entities(config: Union[ConfigType, Template]) -> Set[str]:
"""Extract entities from a condition."""
referenced: Set[str] = set()
to_process = deque([config])
while to_process:
config = to_process.popleft()
if isinstance(config, Template):
continue
condition = config[CONF_CONDITION]
if condition in ("and", "not", "or"):
to_process.extend(config["conditions"])
continue
entity_ids = config.get(CONF_ENTITY_ID)
if isinstance(entity_ids, str):
entity_ids = [entity_ids]
if entity_ids is not None:
referenced.update(entity_ids)
return referenced
@callback
def async_extract_devices(config: Union[ConfigType, Template]) -> Set[str]:
"""Extract devices from a condition."""
referenced = set()
to_process = deque([config])
while to_process:
config = to_process.popleft()
if isinstance(config, Template):
continue
condition = config[CONF_CONDITION]
if condition in ("and", "not", "or"):
to_process.extend(config["conditions"])
continue
if condition != "device":
continue
device_id = config.get(CONF_DEVICE_ID)
if device_id is not None:
referenced.add(device_id)
return referenced
|
from datetime import timedelta
import logging
from pyeight.eight import EightSleep
import voluptuous as vol
from homeassistant.const import (
ATTR_ENTITY_ID,
CONF_BINARY_SENSORS,
CONF_PASSWORD,
CONF_SENSORS,
CONF_USERNAME,
EVENT_HOMEASSISTANT_STOP,
)
from homeassistant.core import callback
from homeassistant.helpers import discovery
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import async_track_point_in_utc_time
from homeassistant.util.dt import utcnow
_LOGGER = logging.getLogger(__name__)
CONF_PARTNER = "partner"
DATA_EIGHT = "eight_sleep"
DEFAULT_PARTNER = False
DOMAIN = "eight_sleep"
HEAT_ENTITY = "heat"
USER_ENTITY = "user"
HEAT_SCAN_INTERVAL = timedelta(seconds=60)
USER_SCAN_INTERVAL = timedelta(seconds=300)
SIGNAL_UPDATE_HEAT = "eight_heat_update"
SIGNAL_UPDATE_USER = "eight_user_update"
NAME_MAP = {
"left_current_sleep": "Left Sleep Session",
"left_current_sleep_fitness": "Left Sleep Fitness",
"left_last_sleep": "Left Previous Sleep Session",
"left_bed_state": "Left Bed State",
"left_presence": "Left Bed Presence",
"left_bed_temp": "Left Bed Temperature",
"left_sleep_stage": "Left Sleep Stage",
"right_current_sleep": "Right Sleep Session",
"right_current_sleep_fitness": "Right Sleep Fitness",
"right_last_sleep": "Right Previous Sleep Session",
"right_bed_state": "Right Bed State",
"right_presence": "Right Bed Presence",
"right_bed_temp": "Right Bed Temperature",
"right_sleep_stage": "Right Sleep Stage",
"room_temp": "Room Temperature",
}
SENSORS = [
"current_sleep",
"current_sleep_fitness",
"last_sleep",
"bed_state",
"bed_temp",
"sleep_stage",
]
SERVICE_HEAT_SET = "heat_set"
ATTR_TARGET_HEAT = "target"
ATTR_HEAT_DURATION = "duration"
VALID_TARGET_HEAT = vol.All(vol.Coerce(int), vol.Clamp(min=-100, max=100))
VALID_DURATION = vol.All(vol.Coerce(int), vol.Clamp(min=0, max=28800))
SERVICE_EIGHT_SCHEMA = vol.Schema(
{
ATTR_ENTITY_ID: cv.entity_ids,
ATTR_TARGET_HEAT: VALID_TARGET_HEAT,
ATTR_HEAT_DURATION: VALID_DURATION,
}
)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_PARTNER, default=DEFAULT_PARTNER): cv.boolean,
}
)
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass, config):
"""Set up the Eight Sleep component."""
conf = config.get(DOMAIN)
user = conf.get(CONF_USERNAME)
password = conf.get(CONF_PASSWORD)
partner = conf.get(CONF_PARTNER)
if hass.config.time_zone is None:
_LOGGER.error("Timezone is not set in Home Assistant")
return False
timezone = str(hass.config.time_zone)
eight = EightSleep(user, password, timezone, partner, None, hass.loop)
hass.data[DATA_EIGHT] = eight
# Authenticate, build sensors
success = await eight.start()
if not success:
# Authentication failed, cannot continue
return False
async def async_update_heat_data(now):
"""Update heat data from eight in HEAT_SCAN_INTERVAL."""
await eight.update_device_data()
async_dispatcher_send(hass, SIGNAL_UPDATE_HEAT)
async_track_point_in_utc_time(
hass, async_update_heat_data, utcnow() + HEAT_SCAN_INTERVAL
)
async def async_update_user_data(now):
"""Update user data from eight in USER_SCAN_INTERVAL."""
await eight.update_user_data()
async_dispatcher_send(hass, SIGNAL_UPDATE_USER)
async_track_point_in_utc_time(
hass, async_update_user_data, utcnow() + USER_SCAN_INTERVAL
)
await async_update_heat_data(None)
await async_update_user_data(None)
# Load sub components
sensors = []
binary_sensors = []
if eight.users:
for user in eight.users:
obj = eight.users[user]
for sensor in SENSORS:
sensors.append(f"{obj.side}_{sensor}")
binary_sensors.append(f"{obj.side}_presence")
sensors.append("room_temp")
else:
# No users, cannot continue
return False
hass.async_create_task(
discovery.async_load_platform(
hass, "sensor", DOMAIN, {CONF_SENSORS: sensors}, config
)
)
hass.async_create_task(
discovery.async_load_platform(
hass, "binary_sensor", DOMAIN, {CONF_BINARY_SENSORS: binary_sensors}, config
)
)
async def async_service_handler(service):
"""Handle eight sleep service calls."""
params = service.data.copy()
sensor = params.pop(ATTR_ENTITY_ID, None)
target = params.pop(ATTR_TARGET_HEAT, None)
duration = params.pop(ATTR_HEAT_DURATION, 0)
for sens in sensor:
side = sens.split("_")[1]
userid = eight.fetch_userid(side)
usrobj = eight.users[userid]
await usrobj.set_heating_level(target, duration)
async_dispatcher_send(hass, SIGNAL_UPDATE_HEAT)
# Register services
hass.services.async_register(
DOMAIN, SERVICE_HEAT_SET, async_service_handler, schema=SERVICE_EIGHT_SCHEMA
)
async def stop_eight(event):
"""Handle stopping eight api session."""
await eight.stop()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, stop_eight)
return True
class EightSleepUserEntity(Entity):
"""The Eight Sleep device entity."""
def __init__(self, eight):
"""Initialize the data object."""
self._eight = eight
async def async_added_to_hass(self):
"""Register update dispatcher."""
@callback
def async_eight_user_update():
"""Update callback."""
self.async_schedule_update_ha_state(True)
self.async_on_remove(
async_dispatcher_connect(
self.hass, SIGNAL_UPDATE_USER, async_eight_user_update
)
)
@property
def should_poll(self):
"""Return True if entity has to be polled for state."""
return False
class EightSleepHeatEntity(Entity):
"""The Eight Sleep device entity."""
def __init__(self, eight):
"""Initialize the data object."""
self._eight = eight
async def async_added_to_hass(self):
"""Register update dispatcher."""
@callback
def async_eight_heat_update():
"""Update callback."""
self.async_schedule_update_ha_state(True)
self.async_on_remove(
async_dispatcher_connect(
self.hass, SIGNAL_UPDATE_HEAT, async_eight_heat_update
)
)
@property
def should_poll(self):
"""Return True if entity has to be polled for state."""
return False
|
import logging
from datetime import datetime, timezone
import click
import dateutil.parser
from twtxt.models import Tweet
logger = logging.getLogger(__name__)
def make_aware(dt):
"""Appends tzinfo and assumes UTC, if datetime object has no tzinfo already."""
return dt if dt.tzinfo else dt.replace(tzinfo=timezone.utc)
def parse_iso8601(string):
"""Parse string using dateutil.parser."""
return make_aware(dateutil.parser.parse(string))
def parse_tweets(raw_tweets, source, now=None):
"""
Parses a list of raw tweet lines from a twtxt file
and returns a list of :class:`Tweet` objects.
:param list raw_tweets: list of raw tweet lines
:param Source source: the source of the given tweets
:param Datetime now: the current datetime
:returns: a list of parsed tweets :class:`Tweet` objects
:rtype: list
"""
if now is None:
now = datetime.now(timezone.utc)
tweets = []
for line in raw_tweets:
try:
tweet = parse_tweet(line, source, now)
except (ValueError, OverflowError) as e:
logger.debug("{0} - {1}".format(source.url, e))
else:
tweets.append(tweet)
return tweets
def parse_tweet(raw_tweet, source, now=None):
"""
Parses a single raw tweet line from a twtxt file
and returns a :class:`Tweet` object.
:param str raw_tweet: a single raw tweet line
:param Source source: the source of the given tweet
:param Datetime now: the current datetime
:returns: the parsed tweet
:rtype: Tweet
"""
if now is None:
now = datetime.now(timezone.utc)
raw_created_at, text = raw_tweet.split("\t", 1)
created_at = parse_iso8601(raw_created_at)
if created_at > now:
raise ValueError("Tweet is from the future")
return Tweet(click.unstyle(text.strip()), created_at, source)
|
import attr
from PyQt5.QtCore import Qt
@attr.s
class Key:
"""A key with expected values.
Attributes:
attribute: The name of the Qt::Key attribute ('Foo' -> Qt.Key_Foo)
name: The name returned by str(KeyInfo) with that key.
text: The text returned by KeyInfo.text().
uppertext: The text returned by KeyInfo.text() with shift.
member: The numeric value.
"""
attribute = attr.ib()
name = attr.ib(None)
text = attr.ib('')
uppertext = attr.ib('')
member = attr.ib(None)
qtest = attr.ib(True)
def __attrs_post_init__(self):
if self.attribute:
self.member = getattr(Qt, 'Key_' + self.attribute, None)
if self.name is None:
self.name = self.attribute
@attr.s
class Modifier:
"""A modifier with expected values.
Attributes:
attribute: The name of the Qt::KeyboardModifier attribute
('Shift' -> Qt.ShiftModifier)
name: The name returned by str(KeyInfo) with that modifier.
member: The numeric value.
"""
attribute = attr.ib()
name = attr.ib(None)
member = attr.ib(None)
def __attrs_post_init__(self):
self.member = getattr(Qt, self.attribute + 'Modifier')
if self.name is None:
self.name = self.attribute
# From enum Key in qt5/qtbase/src/corelib/global/qnamespace.h
KEYS = [
### misc keys
Key('Escape', text='\x1b', uppertext='\x1b'),
Key('Tab', text='\t', uppertext='\t'),
Key('Backtab', qtest=False), # Qt assumes VT (vertical tab)
Key('Backspace', text='\b', uppertext='\b'),
Key('Return', text='\r', uppertext='\r'),
Key('Enter', text='\r', uppertext='\r'),
Key('Insert', 'Ins'),
Key('Delete', 'Del'),
Key('Pause'),
Key('Print'), # print screen
Key('SysReq'),
Key('Clear'),
### cursor movement
Key('Home'),
Key('End'),
Key('Left'),
Key('Up'),
Key('Right'),
Key('Down'),
Key('PageUp', 'PgUp'),
Key('PageDown', 'PgDown'),
### modifiers
Key('Shift'),
Key('Control'),
Key('Meta'),
Key('Alt'),
Key('CapsLock'),
Key('NumLock'),
Key('ScrollLock'),
### function keys
Key('F1'),
Key('F2'),
Key('F3'),
Key('F4'),
Key('F5'),
Key('F6'),
Key('F7'),
Key('F8'),
Key('F9'),
Key('F10'),
Key('F11'),
Key('F12'),
Key('F13'),
Key('F14'),
Key('F15'),
Key('F16'),
Key('F17'),
Key('F18'),
Key('F19'),
Key('F20'),
Key('F21'),
Key('F22'),
Key('F23'),
Key('F24'),
# F25 .. F35 only on X11
Key('F25'),
Key('F26'),
Key('F27'),
Key('F28'),
Key('F29'),
Key('F30'),
Key('F31'),
Key('F32'),
Key('F33'),
Key('F34'),
Key('F35'),
### extra keys
Key('Super_L', 'Super L'),
Key('Super_R', 'Super R'),
Key('Menu'),
Key('Hyper_L', 'Hyper L'),
Key('Hyper_R', 'Hyper R'),
Key('Help'),
Key('Direction_L', 'Direction L'),
Key('Direction_R', 'Direction R'),
### 7 bit printable ASCII
Key('Space', text=' ', uppertext=' '),
Key('Any', 'Space', text=' ', uppertext=' '), # Same value
Key('Exclam', '!', text='!', uppertext='!'),
Key('QuoteDbl', '"', text='"', uppertext='"'),
Key('NumberSign', '#', text='#', uppertext='#'),
Key('Dollar', '$', text='$', uppertext='$'),
Key('Percent', '%', text='%', uppertext='%'),
Key('Ampersand', '&', text='&', uppertext='&'),
Key('Apostrophe', "'", text="'", uppertext="'"),
Key('ParenLeft', '(', text='(', uppertext='('),
Key('ParenRight', ')', text=')', uppertext=')'),
Key('Asterisk', '*', text='*', uppertext='*'),
Key('Plus', '+', text='+', uppertext='+'),
Key('Comma', ',', text=',', uppertext=','),
Key('Minus', '-', text='-', uppertext='-'),
Key('Period', '.', text='.', uppertext='.'),
Key('Slash', '/', text='/', uppertext='/'),
Key('0', text='0', uppertext='0'),
Key('1', text='1', uppertext='1'),
Key('2', text='2', uppertext='2'),
Key('3', text='3', uppertext='3'),
Key('4', text='4', uppertext='4'),
Key('5', text='5', uppertext='5'),
Key('6', text='6', uppertext='6'),
Key('7', text='7', uppertext='7'),
Key('8', text='8', uppertext='8'),
Key('9', text='9', uppertext='9'),
Key('Colon', ':', text=':', uppertext=':'),
Key('Semicolon', ';', text=';', uppertext=';'),
Key('Less', '<', text='<', uppertext='<'),
Key('Equal', '=', text='=', uppertext='='),
Key('Greater', '>', text='>', uppertext='>'),
Key('Question', '?', text='?', uppertext='?'),
Key('At', '@', text='@', uppertext='@'),
Key('A', text='a', uppertext='A'),
Key('B', text='b', uppertext='B'),
Key('C', text='c', uppertext='C'),
Key('D', text='d', uppertext='D'),
Key('E', text='e', uppertext='E'),
Key('F', text='f', uppertext='F'),
Key('G', text='g', uppertext='G'),
Key('H', text='h', uppertext='H'),
Key('I', text='i', uppertext='I'),
Key('J', text='j', uppertext='J'),
Key('K', text='k', uppertext='K'),
Key('L', text='l', uppertext='L'),
Key('M', text='m', uppertext='M'),
Key('N', text='n', uppertext='N'),
Key('O', text='o', uppertext='O'),
Key('P', text='p', uppertext='P'),
Key('Q', text='q', uppertext='Q'),
Key('R', text='r', uppertext='R'),
Key('S', text='s', uppertext='S'),
Key('T', text='t', uppertext='T'),
Key('U', text='u', uppertext='U'),
Key('V', text='v', uppertext='V'),
Key('W', text='w', uppertext='W'),
Key('X', text='x', uppertext='X'),
Key('Y', text='y', uppertext='Y'),
Key('Z', text='z', uppertext='Z'),
Key('BracketLeft', '[', text='[', uppertext='['),
Key('Backslash', '\\', text='\\', uppertext='\\'),
Key('BracketRight', ']', text=']', uppertext=']'),
Key('AsciiCircum', '^', text='^', uppertext='^'),
Key('Underscore', '_', text='_', uppertext='_'),
Key('QuoteLeft', '`', text='`', uppertext='`'),
Key('BraceLeft', '{', text='{', uppertext='{'),
Key('Bar', '|', text='|', uppertext='|'),
Key('BraceRight', '}', text='}', uppertext='}'),
Key('AsciiTilde', '~', text='~', uppertext='~'),
Key('nobreakspace', ' ', text=' ', uppertext=' '),
Key('exclamdown', '¡', text='¡', uppertext='¡'),
Key('cent', '¢', text='¢', uppertext='¢'),
Key('sterling', '£', text='£', uppertext='£'),
Key('currency', '¤', text='¤', uppertext='¤'),
Key('yen', '¥', text='¥', uppertext='¥'),
Key('brokenbar', '¦', text='¦', uppertext='¦'),
Key('section', '§', text='§', uppertext='§'),
Key('diaeresis', '¨', text='¨', uppertext='¨'),
Key('copyright', '©', text='©', uppertext='©'),
Key('ordfeminine', 'ª', text='ª', uppertext='ª'),
Key('guillemotleft', '«', text='«', uppertext='«'),
Key('notsign', '¬', text='¬', uppertext='¬'),
Key('hyphen', '', text='', uppertext=''),
Key('registered', '®', text='®', uppertext='®'),
Key('macron', '¯', text='¯', uppertext='¯'),
Key('degree', '°', text='°', uppertext='°'),
Key('plusminus', '±', text='±', uppertext='±'),
Key('twosuperior', '²', text='²', uppertext='²'),
Key('threesuperior', '³', text='³', uppertext='³'),
Key('acute', '´', text='´', uppertext='´'),
Key('mu', 'Μ', text='μ', uppertext='Μ', qtest=False), # Qt assumes U+00B5 instead of U+03BC
Key('paragraph', '¶', text='¶', uppertext='¶'),
Key('periodcentered', '·', text='·', uppertext='·'),
Key('cedilla', '¸', text='¸', uppertext='¸'),
Key('onesuperior', '¹', text='¹', uppertext='¹'),
Key('masculine', 'º', text='º', uppertext='º'),
Key('guillemotright', '»', text='»', uppertext='»'),
Key('onequarter', '¼', text='¼', uppertext='¼'),
Key('onehalf', '½', text='½', uppertext='½'),
Key('threequarters', '¾', text='¾', uppertext='¾'),
Key('questiondown', '¿', text='¿', uppertext='¿'),
Key('Agrave', 'À', text='à', uppertext='À'),
Key('Aacute', 'Á', text='á', uppertext='Á'),
Key('Acircumflex', 'Â', text='â', uppertext='Â'),
Key('Atilde', 'Ã', text='ã', uppertext='Ã'),
Key('Adiaeresis', 'Ä', text='ä', uppertext='Ä'),
Key('Aring', 'Å', text='å', uppertext='Å'),
Key('AE', 'Æ', text='æ', uppertext='Æ'),
Key('Ccedilla', 'Ç', text='ç', uppertext='Ç'),
Key('Egrave', 'È', text='è', uppertext='È'),
Key('Eacute', 'É', text='é', uppertext='É'),
Key('Ecircumflex', 'Ê', text='ê', uppertext='Ê'),
Key('Ediaeresis', 'Ë', text='ë', uppertext='Ë'),
Key('Igrave', 'Ì', text='ì', uppertext='Ì'),
Key('Iacute', 'Í', text='í', uppertext='Í'),
Key('Icircumflex', 'Î', text='î', uppertext='Î'),
Key('Idiaeresis', 'Ï', text='ï', uppertext='Ï'),
Key('ETH', 'Ð', text='ð', uppertext='Ð'),
Key('Ntilde', 'Ñ', text='ñ', uppertext='Ñ'),
Key('Ograve', 'Ò', text='ò', uppertext='Ò'),
Key('Oacute', 'Ó', text='ó', uppertext='Ó'),
Key('Ocircumflex', 'Ô', text='ô', uppertext='Ô'),
Key('Otilde', 'Õ', text='õ', uppertext='Õ'),
Key('Odiaeresis', 'Ö', text='ö', uppertext='Ö'),
Key('multiply', '×', text='×', uppertext='×'),
Key('Ooblique', 'Ø', text='ø', uppertext='Ø'),
Key('Ugrave', 'Ù', text='ù', uppertext='Ù'),
Key('Uacute', 'Ú', text='ú', uppertext='Ú'),
Key('Ucircumflex', 'Û', text='û', uppertext='Û'),
Key('Udiaeresis', 'Ü', text='ü', uppertext='Ü'),
Key('Yacute', 'Ý', text='ý', uppertext='Ý'),
Key('THORN', 'Þ', text='þ', uppertext='Þ'),
Key('ssharp', 'ß', text='ß', uppertext='ß'),
Key('division', '÷', text='÷', uppertext='÷'),
Key('ydiaeresis', 'Ÿ', text='ÿ', uppertext='Ÿ'),
### International input method support (X keycode - 0xEE00, the
### definition follows Qt/Embedded 2.3.7) Only interesting if
### you are writing your own input method
### International & multi-key character composition
Key('AltGr', qtest=False),
Key('Multi_key', 'Multi key', qtest=False), # Multi-key character compose
Key('Codeinput', 'Code input', qtest=False),
Key('SingleCandidate', 'Single Candidate', qtest=False),
Key('MultipleCandidate', 'Multiple Candidate', qtest=False),
Key('PreviousCandidate', 'Previous Candidate', qtest=False),
### Misc Functions
Key('Mode_switch', 'Mode switch', qtest=False), # Character set switch
# Key('script_switch'), # Alias for mode_switch
### Japanese keyboard support
Key('Kanji', qtest=False), # Kanji, Kanji convert
Key('Muhenkan', qtest=False), # Cancel Conversion
# Key('Henkan_Mode', qtest=False), # Start/Stop Conversion
Key('Henkan', qtest=False), # Alias for Henkan_Mode
Key('Romaji', qtest=False), # to Romaji
Key('Hiragana', qtest=False), # to Hiragana
Key('Katakana', qtest=False), # to Katakana
Key('Hiragana_Katakana', 'Hiragana Katakana', qtest=False), # Hiragana/Katakana toggle
Key('Zenkaku', qtest=False), # to Zenkaku
Key('Hankaku', qtest=False), # to Hankaku
Key('Zenkaku_Hankaku', 'Zenkaku Hankaku', qtest=False), # Zenkaku/Hankaku toggle
Key('Touroku', qtest=False), # Add to Dictionary
Key('Massyo', qtest=False), # Delete from Dictionary
Key('Kana_Lock', 'Kana Lock', qtest=False),
Key('Kana_Shift', 'Kana Shift', qtest=False),
Key('Eisu_Shift', 'Eisu Shift', qtest=False), # Alphanumeric Shift
Key('Eisu_toggle', 'Eisu toggle', qtest=False), # Alphanumeric toggle
# Key('Kanji_Bangou', qtest=False), # Codeinput
# Key('Zen_Koho', qtest=False), # Multiple/All Candidate(s)
# Key('Mae_Koho', qtest=False), # Previous Candidate
### Korean keyboard support
###
### In fact, many users from Korea need only 2 keys, Key_Hangul and
### Key_Hangul_Hanja. But rest of the keys are good for future.
Key('Hangul', qtest=False), # Hangul start/stop(toggle),
Key('Hangul_Start', 'Hangul Start', qtest=False), # Hangul start
Key('Hangul_End', 'Hangul End', qtest=False), # Hangul end, English start
Key('Hangul_Hanja', 'Hangul Hanja', qtest=False), # Start Hangul->Hanja Conversion
Key('Hangul_Jamo', 'Hangul Jamo', qtest=False), # Hangul Jamo mode
Key('Hangul_Romaja', 'Hangul Romaja', qtest=False), # Hangul Romaja mode
# Key('Hangul_Codeinput', 'Hangul Codeinput', qtest=False),# Hangul code input mode
Key('Hangul_Jeonja', 'Hangul Jeonja', qtest=False), # Jeonja mode
Key('Hangul_Banja', 'Hangul Banja', qtest=False), # Banja mode
Key('Hangul_PreHanja', 'Hangul PreHanja', qtest=False), # Pre Hanja conversion
Key('Hangul_PostHanja', 'Hangul PostHanja', qtest=False), # Post Hanja conversion
# Key('Hangul_SingleCandidate', 'Hangul SingleCandidate', qtest=False), # Single candidate
# Key('Hangul_MultipleCandidate', 'Hangul MultipleCandidate', qtest=False), # Multiple candidate
# Key('Hangul_PreviousCandidate', 'Hangul PreviousCandidate', qtest=False), # Previous candidate
Key('Hangul_Special', 'Hangul Special', qtest=False), # Special symbols
# Key('Hangul_switch', 'Hangul switch', qtest=False), # Alias for mode_switch
# dead keys (X keycode - 0xED00 to avoid the conflict, qtest=False),
Key('Dead_Grave', '`', qtest=False),
Key('Dead_Acute', '´', qtest=False),
Key('Dead_Circumflex', '^', qtest=False),
Key('Dead_Tilde', '~', qtest=False),
Key('Dead_Macron', '¯', qtest=False),
Key('Dead_Breve', '˘', qtest=False),
Key('Dead_Abovedot', '˙', qtest=False),
Key('Dead_Diaeresis', '¨', qtest=False),
Key('Dead_Abovering', '˚', qtest=False),
Key('Dead_Doubleacute', '˝', qtest=False),
Key('Dead_Caron', 'ˇ', qtest=False),
Key('Dead_Cedilla', '¸', qtest=False),
Key('Dead_Ogonek', '˛', qtest=False),
Key('Dead_Iota', 'Iota', qtest=False),
Key('Dead_Voiced_Sound', 'Voiced Sound', qtest=False),
Key('Dead_Semivoiced_Sound', 'Semivoiced Sound', qtest=False),
Key('Dead_Belowdot', 'Belowdot', qtest=False),
Key('Dead_Hook', 'Hook', qtest=False),
Key('Dead_Horn', 'Horn', qtest=False),
Key('Dead_Stroke', '̵', qtest=False),
Key('Dead_Abovecomma', '̓', qtest=False),
Key('Dead_Abovereversedcomma', '̔', qtest=False),
Key('Dead_Doublegrave', '̏', qtest=False),
Key('Dead_Belowring', '̥', qtest=False),
Key('Dead_Belowmacron', '̱', qtest=False),
Key('Dead_Belowcircumflex', '̭', qtest=False),
Key('Dead_Belowtilde', '̰', qtest=False),
Key('Dead_Belowbreve', '̮', qtest=False),
Key('Dead_Belowdiaeresis', '̤', qtest=False),
Key('Dead_Invertedbreve', '̑', qtest=False),
Key('Dead_Belowcomma', '̦', qtest=False),
Key('Dead_Currency', '¤', qtest=False),
Key('Dead_a', 'a', qtest=False),
Key('Dead_A', 'A', qtest=False),
Key('Dead_e', 'e', qtest=False),
Key('Dead_E', 'E', qtest=False),
Key('Dead_i', 'i', qtest=False),
Key('Dead_I', 'I', qtest=False),
Key('Dead_o', 'o', qtest=False),
Key('Dead_O', 'O', qtest=False),
Key('Dead_u', 'u', qtest=False),
Key('Dead_U', 'U', qtest=False),
Key('Dead_Small_Schwa', 'ə', qtest=False),
Key('Dead_Capital_Schwa', 'Ə', qtest=False),
Key('Dead_Greek', 'Greek', qtest=False),
Key('Dead_Lowline', '̲', qtest=False),
Key('Dead_Aboveverticalline', '̍', qtest=False),
Key('Dead_Belowverticalline', '\u0329', qtest=False),
Key('Dead_Longsolidusoverlay', '̸', qtest=False),
### multimedia/internet keys - ignored by default - see QKeyEvent c'tor
Key('Back'),
Key('Forward'),
Key('Stop'),
Key('Refresh'),
Key('VolumeDown', 'Volume Down'),
Key('VolumeMute', 'Volume Mute'),
Key('VolumeUp', 'Volume Up'),
Key('BassBoost', 'Bass Boost'),
Key('BassUp', 'Bass Up'),
Key('BassDown', 'Bass Down'),
Key('TrebleUp', 'Treble Up'),
Key('TrebleDown', 'Treble Down'),
Key('MediaPlay', 'Media Play'),
Key('MediaStop', 'Media Stop'),
Key('MediaPrevious', 'Media Previous'),
Key('MediaNext', 'Media Next'),
Key('MediaRecord', 'Media Record'),
Key('MediaPause', 'Media Pause', qtest=False),
Key('MediaTogglePlayPause', 'Toggle Media Play/Pause', qtest=False),
Key('HomePage', 'Home Page'),
Key('Favorites'),
Key('Search'),
Key('Standby'),
Key('OpenUrl', 'Open URL'),
Key('LaunchMail', 'Launch Mail'),
Key('LaunchMedia', 'Launch Media'),
Key('Launch0', 'Launch (0)'),
Key('Launch1', 'Launch (1)'),
Key('Launch2', 'Launch (2)'),
Key('Launch3', 'Launch (3)'),
Key('Launch4', 'Launch (4)'),
Key('Launch5', 'Launch (5)'),
Key('Launch6', 'Launch (6)'),
Key('Launch7', 'Launch (7)'),
Key('Launch8', 'Launch (8)'),
Key('Launch9', 'Launch (9)'),
Key('LaunchA', 'Launch (A)'),
Key('LaunchB', 'Launch (B)'),
Key('LaunchC', 'Launch (C)'),
Key('LaunchD', 'Launch (D)'),
Key('LaunchE', 'Launch (E)'),
Key('LaunchF', 'Launch (F)'),
Key('MonBrightnessUp', 'Monitor Brightness Up', qtest=False),
Key('MonBrightnessDown', 'Monitor Brightness Down', qtest=False),
Key('KeyboardLightOnOff', 'Keyboard Light On/Off', qtest=False),
Key('KeyboardBrightnessUp', 'Keyboard Brightness Up', qtest=False),
Key('KeyboardBrightnessDown', 'Keyboard Brightness Down', qtest=False),
Key('PowerOff', 'Power Off', qtest=False),
Key('WakeUp', 'Wake Up', qtest=False),
Key('Eject', qtest=False),
Key('ScreenSaver', 'Screensaver', qtest=False),
Key('WWW', qtest=False),
Key('Memo', 'Memo', qtest=False),
Key('LightBulb', qtest=False),
Key('Shop', qtest=False),
Key('History', qtest=False),
Key('AddFavorite', 'Add Favorite', qtest=False),
Key('HotLinks', 'Hot Links', qtest=False),
Key('BrightnessAdjust', 'Adjust Brightness', qtest=False),
Key('Finance', qtest=False),
Key('Community', qtest=False),
Key('AudioRewind', 'Media Rewind', qtest=False),
Key('BackForward', 'Back Forward', qtest=False),
Key('ApplicationLeft', 'Application Left', qtest=False),
Key('ApplicationRight', 'Application Right', qtest=False),
Key('Book', qtest=False),
Key('CD', qtest=False),
Key('Calculator', qtest=False),
Key('ToDoList', 'To Do List', qtest=False),
Key('ClearGrab', 'Clear Grab', qtest=False),
Key('Close', qtest=False),
Key('Copy', qtest=False),
Key('Cut', qtest=False),
Key('Display', qtest=False), # Output switch key
Key('DOS', qtest=False),
Key('Documents', qtest=False),
Key('Excel', 'Spreadsheet', qtest=False),
Key('Explorer', 'Browser', qtest=False),
Key('Game', qtest=False),
Key('Go', qtest=False),
Key('iTouch', qtest=False),
Key('LogOff', 'Logoff', qtest=False),
Key('Market', qtest=False),
Key('Meeting', qtest=False),
Key('MenuKB', 'Keyboard Menu', qtest=False),
Key('MenuPB', 'Menu PB', qtest=False),
Key('MySites', 'My Sites', qtest=False),
Key('News', qtest=False),
Key('OfficeHome', 'Home Office', qtest=False),
Key('Option', qtest=False),
Key('Paste', qtest=False),
Key('Phone', qtest=False),
Key('Calendar', qtest=False),
Key('Reply', qtest=False),
Key('Reload', qtest=False),
Key('RotateWindows', 'Rotate Windows', qtest=False),
Key('RotationPB', 'Rotation PB', qtest=False),
Key('RotationKB', 'Rotation KB', qtest=False),
Key('Save', qtest=False),
Key('Send', qtest=False),
Key('Spell', 'Spellchecker', qtest=False),
Key('SplitScreen', 'Split Screen', qtest=False),
Key('Support', qtest=False),
Key('TaskPane', 'Task Panel', qtest=False),
Key('Terminal', qtest=False),
Key('Tools', qtest=False),
Key('Travel', qtest=False),
Key('Video', qtest=False),
Key('Word', 'Word Processor', qtest=False),
Key('Xfer', 'XFer', qtest=False),
Key('ZoomIn', 'Zoom In', qtest=False),
Key('ZoomOut', 'Zoom Out', qtest=False),
Key('Away', qtest=False),
Key('Messenger', qtest=False),
Key('WebCam', qtest=False),
Key('MailForward', 'Mail Forward', qtest=False),
Key('Pictures', qtest=False),
Key('Music', qtest=False),
Key('Battery', qtest=False),
Key('Bluetooth', qtest=False),
Key('WLAN', 'Wireless', qtest=False),
Key('UWB', 'Ultra Wide Band', qtest=False),
Key('AudioForward', 'Media Fast Forward', qtest=False),
Key('AudioRepeat', 'Audio Repeat', qtest=False), # Toggle repeat mode
Key('AudioRandomPlay', 'Audio Random Play', qtest=False), # Toggle shuffle mode
Key('Subtitle', qtest=False),
Key('AudioCycleTrack', 'Audio Cycle Track', qtest=False),
Key('Time', qtest=False),
Key('Hibernate', qtest=False),
Key('View', qtest=False),
Key('TopMenu', 'Top Menu', qtest=False),
Key('PowerDown', 'Power Down', qtest=False),
Key('Suspend', qtest=False),
Key('ContrastAdjust', 'Contrast Adjust', qtest=False),
Key('LaunchG', 'Launch (G)', qtest=False),
Key('LaunchH', 'Launch (H)', qtest=False),
Key('TouchpadToggle', 'Touchpad Toggle', qtest=False),
Key('TouchpadOn', 'Touchpad On', qtest=False),
Key('TouchpadOff', 'Touchpad Off', qtest=False),
Key('MicMute', 'Microphone Mute', qtest=False),
Key('Red', qtest=False),
Key('Green', qtest=False),
Key('Yellow', qtest=False),
Key('Blue', qtest=False),
Key('ChannelUp', 'Channel Up', qtest=False),
Key('ChannelDown', 'Channel Down', qtest=False),
Key('Guide', qtest=False),
Key('Info', qtest=False),
Key('Settings', qtest=False),
Key('MicVolumeUp', 'Microphone Volume Up', qtest=False),
Key('MicVolumeDown', 'Microphone Volume Down', qtest=False),
Key('New', qtest=False),
Key('Open', qtest=False),
Key('Find', qtest=False),
Key('Undo', qtest=False),
Key('Redo', qtest=False),
Key('MediaLast', 'Media Last', qtest=False),
### Keypad navigation keys
Key('Select', qtest=False),
Key('Yes', qtest=False),
Key('No', qtest=False),
### Newer misc keys
Key('Cancel', qtest=False),
Key('Printer', qtest=False),
Key('Execute', qtest=False),
Key('Sleep', qtest=False),
Key('Play', qtest=False), # Not the same as Key_MediaPlay
Key('Zoom', qtest=False),
# Key('Jisho', qtest=False), # IME: Dictionary key
# Key('Oyayubi_Left', qtest=False), # IME: Left Oyayubi key
# Key('Oyayubi_Right', qtest=False), # IME: Right Oyayubi key
Key('Exit', qtest=False),
# Device keys
Key('Context1', qtest=False),
Key('Context2', qtest=False),
Key('Context3', qtest=False),
Key('Context4', qtest=False),
Key('Call', qtest=False), # set absolute state to in a call (do not toggle state)
Key('Hangup', qtest=False), # set absolute state to hang up (do not toggle state)
Key('Flip', qtest=False),
Key('ToggleCallHangup', 'Toggle Call/Hangup', qtest=False), # a toggle key for answering, or hanging up, based on current call state
Key('VoiceDial', 'Voice Dial', qtest=False),
Key('LastNumberRedial', 'Last Number Redial', qtest=False),
Key('Camera', 'Camera Shutter', qtest=False),
Key('CameraFocus', 'Camera Focus', qtest=False),
Key('unknown', 'Unknown', qtest=False),
# 0x0 is used by Qt for unknown keys...
Key(attribute='', name='nil', member=0x0, qtest=False),
]
MODIFIERS = [
Modifier('Shift'),
Modifier('Control', 'Ctrl'),
Modifier('Alt'),
Modifier('Meta'),
Modifier('Keypad', 'Num'),
Modifier('GroupSwitch', 'AltGr'),
]
|
from urllib.parse import urlparse
from pyheos import HeosError
from homeassistant import data_entry_flow
from homeassistant.components import heos, ssdp
from homeassistant.components.heos.config_flow import HeosFlowHandler
from homeassistant.components.heos.const import DATA_DISCOVERED_HOSTS, DOMAIN
from homeassistant.config_entries import SOURCE_IMPORT, SOURCE_SSDP
from homeassistant.const import CONF_HOST
from tests.async_mock import patch
async def test_flow_aborts_already_setup(hass, config_entry):
"""Test flow aborts when entry already setup."""
config_entry.add_to_hass(hass)
flow = HeosFlowHandler()
flow.hass = hass
result = await flow.async_step_user()
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "single_instance_allowed"
async def test_no_host_shows_form(hass):
"""Test form is shown when host not provided."""
flow = HeosFlowHandler()
flow.hass = hass
result = await flow.async_step_user()
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
assert result["errors"] == {}
async def test_cannot_connect_shows_error_form(hass, controller):
"""Test form is shown with error when cannot connect."""
controller.connect.side_effect = HeosError()
result = await hass.config_entries.flow.async_init(
heos.DOMAIN, context={"source": "user"}, data={CONF_HOST: "127.0.0.1"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
assert result["errors"][CONF_HOST] == "cannot_connect"
assert controller.connect.call_count == 1
assert controller.disconnect.call_count == 1
controller.connect.reset_mock()
controller.disconnect.reset_mock()
async def test_create_entry_when_host_valid(hass, controller):
"""Test result type is create entry when host is valid."""
data = {CONF_HOST: "127.0.0.1"}
with patch("homeassistant.components.heos.async_setup_entry", return_value=True):
result = await hass.config_entries.flow.async_init(
heos.DOMAIN, context={"source": "user"}, data=data
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["result"].unique_id == DOMAIN
assert result["title"] == "Controller (127.0.0.1)"
assert result["data"] == data
assert controller.connect.call_count == 1
assert controller.disconnect.call_count == 1
async def test_create_entry_when_friendly_name_valid(hass, controller):
"""Test result type is create entry when friendly name is valid."""
hass.data[DATA_DISCOVERED_HOSTS] = {"Office (127.0.0.1)": "127.0.0.1"}
data = {CONF_HOST: "Office (127.0.0.1)"}
with patch("homeassistant.components.heos.async_setup_entry", return_value=True):
result = await hass.config_entries.flow.async_init(
heos.DOMAIN, context={"source": "user"}, data=data
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["result"].unique_id == DOMAIN
assert result["title"] == "Controller (127.0.0.1)"
assert result["data"] == {CONF_HOST: "127.0.0.1"}
assert controller.connect.call_count == 1
assert controller.disconnect.call_count == 1
assert DATA_DISCOVERED_HOSTS not in hass.data
async def test_discovery_shows_create_form(hass, controller, discovery_data):
"""Test discovery shows form to confirm setup and subsequent abort."""
await hass.config_entries.flow.async_init(
heos.DOMAIN, context={"source": "ssdp"}, data=discovery_data
)
await hass.async_block_till_done()
flows_in_progress = hass.config_entries.flow.async_progress()
assert flows_in_progress[0]["context"]["unique_id"] == DOMAIN
assert len(flows_in_progress) == 1
assert hass.data[DATA_DISCOVERED_HOSTS] == {"Office (127.0.0.1)": "127.0.0.1"}
port = urlparse(discovery_data[ssdp.ATTR_SSDP_LOCATION]).port
discovery_data[ssdp.ATTR_SSDP_LOCATION] = f"http://127.0.0.2:{port}/"
discovery_data[ssdp.ATTR_UPNP_FRIENDLY_NAME] = "Bedroom"
await hass.config_entries.flow.async_init(
heos.DOMAIN, context={"source": "ssdp"}, data=discovery_data
)
await hass.async_block_till_done()
flows_in_progress = hass.config_entries.flow.async_progress()
assert flows_in_progress[0]["context"]["unique_id"] == DOMAIN
assert len(flows_in_progress) == 1
assert hass.data[DATA_DISCOVERED_HOSTS] == {
"Office (127.0.0.1)": "127.0.0.1",
"Bedroom (127.0.0.2)": "127.0.0.2",
}
async def test_discovery_flow_aborts_already_setup(
hass, controller, discovery_data, config_entry
):
"""Test discovery flow aborts when entry already setup."""
config_entry.add_to_hass(hass)
flow = HeosFlowHandler()
flow.hass = hass
result = await flow.async_step_ssdp(discovery_data)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "single_instance_allowed"
async def test_discovery_sets_the_unique_id(hass, controller, discovery_data):
"""Test discovery sets the unique id."""
port = urlparse(discovery_data[ssdp.ATTR_SSDP_LOCATION]).port
discovery_data[ssdp.ATTR_SSDP_LOCATION] = f"http://127.0.0.2:{port}/"
discovery_data[ssdp.ATTR_UPNP_FRIENDLY_NAME] = "Bedroom"
await hass.config_entries.flow.async_init(
heos.DOMAIN, context={"source": SOURCE_SSDP}, data=discovery_data
)
await hass.async_block_till_done()
flows_in_progress = hass.config_entries.flow.async_progress()
assert flows_in_progress[0]["context"]["unique_id"] == DOMAIN
assert len(flows_in_progress) == 1
assert hass.data[DATA_DISCOVERED_HOSTS] == {"Bedroom (127.0.0.2)": "127.0.0.2"}
async def test_import_sets_the_unique_id(hass, controller):
"""Test import sets the unique id."""
with patch("homeassistant.components.heos.async_setup_entry", return_value=True):
result = await hass.config_entries.flow.async_init(
heos.DOMAIN,
context={"source": SOURCE_IMPORT},
data={CONF_HOST: "127.0.0.2"},
)
await hass.async_block_till_done()
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["result"].unique_id == DOMAIN
|
import logging
import unittest
import numpy as np
from gensim.parsing.preprocessing import \
remove_stopwords, strip_punctuation2, strip_tags, strip_short, strip_numeric, strip_non_alphanum, \
strip_multiple_whitespaces, split_alphanum, stem_text
# several documents
doc1 = """C'est un trou de verdure où chante une rivière,
Accrochant follement aux herbes des haillons
D'argent ; où le soleil, de la montagne fière,
Luit : c'est un petit val qui mousse de rayons."""
doc2 = """Un soldat jeune, bouche ouverte, tête nue,
Et la nuque baignant dans le frais cresson bleu,
Dort ; il est étendu dans l'herbe, sous la nue,
Pâle dans son lit vert où la lumière pleut."""
doc3 = """Les pieds dans les glaïeuls, il dort. Souriant comme
Sourirait un enfant malade, il fait un somme :
Nature, berce-le chaudement : il a froid."""
doc4 = """Les parfums ne font pas frissonner sa narine ;
Il dort dans le soleil, la main sur sa poitrine,
Tranquille. Il a deux trous rouges au côté droit."""
doc5 = """While it is quite useful to be able to search a
large collection of documents almost instantly for a joint
occurrence of a collection of exact words,
for many searching purposes, a little fuzziness would help. """
dataset = [strip_punctuation2(x.lower()) for x in [doc1, doc2, doc3, doc4]]
# doc1 and doc2 have class 0, doc3 and doc4 avec class 1
classes = np.array([[1, 0], [1, 0], [0, 1], [0, 1]])
class TestPreprocessing(unittest.TestCase):
def testStripNumeric(self):
self.assertEqual(strip_numeric("salut les amis du 59"), "salut les amis du ")
def testStripShort(self):
self.assertEqual(strip_short("salut les amis du 59", 3), "salut les amis")
def testStripTags(self):
self.assertEqual(strip_tags("<i>Hello</i> <b>World</b>!"), "Hello World!")
def testStripMultipleWhitespaces(self):
self.assertEqual(strip_multiple_whitespaces("salut les\r\nloulous!"), "salut les loulous!")
def testStripNonAlphanum(self):
self.assertEqual(strip_non_alphanum("toto nf-kappa titi"), "toto nf kappa titi")
def testSplitAlphanum(self):
self.assertEqual(split_alphanum("toto diet1 titi"), "toto diet 1 titi")
self.assertEqual(split_alphanum("toto 1diet titi"), "toto 1 diet titi")
def testStripStopwords(self):
self.assertEqual(remove_stopwords("the world is square"), "world square")
def testStemText(self):
target = \
"while it is quit us to be abl to search a larg " + \
"collect of document almost instantli for a joint occurr " + \
"of a collect of exact words, for mani search purposes, " + \
"a littl fuzzi would help."
self.assertEqual(stem_text(doc5), target)
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
unittest.main()
|
from pyecobee.const import ECOBEE_STATE_CALIBRATING, ECOBEE_STATE_UNKNOWN
from homeassistant.const import (
DEVICE_CLASS_HUMIDITY,
DEVICE_CLASS_TEMPERATURE,
PERCENTAGE,
TEMP_FAHRENHEIT,
)
from homeassistant.helpers.entity import Entity
from .const import _LOGGER, DOMAIN, ECOBEE_MODEL_TO_NAME, MANUFACTURER
SENSOR_TYPES = {
"temperature": ["Temperature", TEMP_FAHRENHEIT],
"humidity": ["Humidity", PERCENTAGE],
}
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up ecobee (temperature and humidity) sensors."""
data = hass.data[DOMAIN]
dev = []
for index in range(len(data.ecobee.thermostats)):
for sensor in data.ecobee.get_remote_sensors(index):
for item in sensor["capability"]:
if item["type"] not in ("temperature", "humidity"):
continue
dev.append(EcobeeSensor(data, sensor["name"], item["type"], index))
async_add_entities(dev, True)
class EcobeeSensor(Entity):
"""Representation of an Ecobee sensor."""
def __init__(self, data, sensor_name, sensor_type, sensor_index):
"""Initialize the sensor."""
self.data = data
self._name = f"{sensor_name} {SENSOR_TYPES[sensor_type][0]}"
self.sensor_name = sensor_name
self.type = sensor_type
self.index = sensor_index
self._state = None
self._unit_of_measurement = SENSOR_TYPES[sensor_type][1]
@property
def name(self):
"""Return the name of the Ecobee sensor."""
return self._name
@property
def unique_id(self):
"""Return a unique identifier for this sensor."""
for sensor in self.data.ecobee.get_remote_sensors(self.index):
if sensor["name"] == self.sensor_name:
if "code" in sensor:
return f"{sensor['code']}-{self.device_class}"
thermostat = self.data.ecobee.get_thermostat(self.index)
return f"{thermostat['identifier']}-{sensor['id']}-{self.device_class}"
@property
def device_info(self):
"""Return device information for this sensor."""
identifier = None
model = None
for sensor in self.data.ecobee.get_remote_sensors(self.index):
if sensor["name"] != self.sensor_name:
continue
if "code" in sensor:
identifier = sensor["code"]
model = "ecobee Room Sensor"
else:
thermostat = self.data.ecobee.get_thermostat(self.index)
identifier = thermostat["identifier"]
try:
model = (
f"{ECOBEE_MODEL_TO_NAME[thermostat['modelNumber']]} Thermostat"
)
except KeyError:
_LOGGER.error(
"Model number for ecobee thermostat %s not recognized. "
"Please visit this link and provide the following information: "
"https://github.com/home-assistant/core/issues/27172 "
"Unrecognized model number: %s",
thermostat["name"],
thermostat["modelNumber"],
)
break
if identifier is not None and model is not None:
return {
"identifiers": {(DOMAIN, identifier)},
"name": self.sensor_name,
"manufacturer": MANUFACTURER,
"model": model,
}
return None
@property
def device_class(self):
"""Return the device class of the sensor."""
if self.type in (DEVICE_CLASS_HUMIDITY, DEVICE_CLASS_TEMPERATURE):
return self.type
return None
@property
def state(self):
"""Return the state of the sensor."""
if self._state in [
ECOBEE_STATE_CALIBRATING,
ECOBEE_STATE_UNKNOWN,
"unknown",
]:
return None
if self.type == "temperature":
return float(self._state) / 10
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement this sensor expresses itself in."""
return self._unit_of_measurement
async def async_update(self):
"""Get the latest state of the sensor."""
await self.data.update()
for sensor in self.data.ecobee.get_remote_sensors(self.index):
if sensor["name"] != self.sensor_name:
continue
for item in sensor["capability"]:
if item["type"] != self.type:
continue
self._state = item["value"]
break
|
import hashlib
import os.path
from ssl import CertificateError
from urllib.parse import quote
from django.conf import settings
from django.contrib.staticfiles import finders
from django.core.cache import InvalidCacheBackendError, caches
from django.urls import reverse
from django.utils.html import escape
from django.utils.safestring import mark_safe
from django.utils.translation import gettext, pgettext
from weblate.utils.errors import report_error
from weblate.utils.requests import request
def avatar_for_email(email, size=80):
"""Generate url for avatar."""
# Safely handle blank e-mail
if not email:
email = "[email protected]"
mail_hash = hashlib.md5(email.lower().encode()).hexdigest() # nosec
return "{}avatar/{}?d={}&s={}".format(
settings.AVATAR_URL_PREFIX,
mail_hash,
quote(settings.AVATAR_DEFAULT_IMAGE),
str(size),
)
def get_fallback_avatar_url(size):
"""Return URL of fallback avatar."""
return os.path.join(settings.STATIC_URL, f"weblate-{size}.png")
def get_fallback_avatar(size):
"""Return fallback avatar."""
filename = finders.find(f"weblate-{size}.png")
with open(filename, "rb") as handle:
return handle.read()
def get_avatar_image(user, size):
"""Return avatar image from cache (if available) or download it."""
cache_key = "-".join(("avatar-img", user.username, str(size)))
# Try using avatar specific cache if available
try:
cache = caches["avatar"]
except InvalidCacheBackendError:
cache = caches["default"]
image = cache.get(cache_key)
if image is None:
try:
image = download_avatar_image(user.email, size)
cache.set(cache_key, image)
except (OSError, CertificateError):
report_error(
extra_data={"avatar": user.username},
cause="Failed to fetch avatar",
)
return get_fallback_avatar(size)
return image
def download_avatar_image(email, size):
"""Download avatar image from remote server."""
url = avatar_for_email(email, size)
response = request("get", url, timeout=1.0)
return response.content
def get_user_display(user, icon: bool = True, link: bool = False):
"""Nicely format user for display."""
# Did we get any user?
if user is None:
# None user, probably remotely triggered action
username = full_name = pgettext("No known user", "None")
else:
# Get full name
full_name = user.full_name
# Use user name if full name is empty
if full_name.strip() == "":
full_name = user.username
username = user.username
# Escape HTML
full_name = escape(full_name)
username = escape(username)
# Icon requested?
if icon and settings.ENABLE_AVATARS:
if user is None or user.email == "[email protected]":
avatar = get_fallback_avatar_url(32)
else:
avatar = reverse("user_avatar", kwargs={"user": user.username, "size": 32})
alt = escape(gettext("User avatar"))
username = f'<img src="{avatar}" class="avatar w32" alt="{alt}" /> {username}'
if link and user is not None:
return mark_safe(
'<a href="{link}" title="{name}">{username}</a>'.format(
name=full_name, username=username, link=user.get_absolute_url()
)
)
return mark_safe(f'<span title="{full_name}">{username}</span>')
|
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.datasets import sample
from mne.time_frequency import fit_iir_model_raw
from mne.viz import plot_sparse_source_estimates
from mne.simulation import simulate_sparse_stc, simulate_evoked
print(__doc__)
###############################################################################
# Load real data as templates
data_path = sample.data_path()
raw = mne.io.read_raw_fif(data_path + '/MEG/sample/sample_audvis_raw.fif')
proj = mne.read_proj(data_path + '/MEG/sample/sample_audvis_ecg-proj.fif')
raw.info['projs'] += proj
raw.info['bads'] = ['MEG 2443', 'EEG 053'] # mark bad channels
fwd_fname = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif'
ave_fname = data_path + '/MEG/sample/sample_audvis-no-filter-ave.fif'
cov_fname = data_path + '/MEG/sample/sample_audvis-cov.fif'
fwd = mne.read_forward_solution(fwd_fname)
fwd = mne.pick_types_forward(fwd, meg=True, eeg=True, exclude=raw.info['bads'])
cov = mne.read_cov(cov_fname)
info = mne.io.read_info(ave_fname)
label_names = ['Aud-lh', 'Aud-rh']
labels = [mne.read_label(data_path + '/MEG/sample/labels/%s.label' % ln)
for ln in label_names]
###############################################################################
# Generate source time courses from 2 dipoles and the correspond evoked data
times = np.arange(300, dtype=np.float64) / raw.info['sfreq'] - 0.1
rng = np.random.RandomState(42)
def data_fun(times):
"""Function to generate random source time courses"""
return (50e-9 * np.sin(30. * times) *
np.exp(- (times - 0.15 + 0.05 * rng.randn(1)) ** 2 / 0.01))
stc = simulate_sparse_stc(fwd['src'], n_dipoles=2, times=times,
random_state=42, labels=labels, data_fun=data_fun)
###############################################################################
# Generate noisy evoked data
picks = mne.pick_types(raw.info, meg=True, exclude='bads')
iir_filter = fit_iir_model_raw(raw, order=5, picks=picks, tmin=60, tmax=180)[1]
nave = 100 # simulate average of 100 epochs
evoked = simulate_evoked(fwd, stc, info, cov, nave=nave, use_cps=True,
iir_filter=iir_filter)
###############################################################################
# Plot
plot_sparse_source_estimates(fwd['src'], stc, bgcolor=(1, 1, 1),
opacity=0.5, high_resolution=True)
plt.figure()
plt.psd(evoked.data[0])
evoked.plot(time_unit='s')
|
import time
import mock
from behave import given
from behave import then
from behave import when
from itest_utils import get_service_connection_string
from paasta_tools import drain_lib
@given("a working hacheck container")
def a_working_hacheck_container(context):
connection_string = get_service_connection_string("hacheck")
context.hacheck_host, context.hacheck_port = connection_string.split(":")
context.hacheck_port = int(context.hacheck_port)
@given("a working httpdrain container")
def a_working_httpdrain_container(context):
connection_string = get_service_connection_string("httpdrain")
context.hacheck_host, context.hacheck_port = connection_string.split(":")
context.hacheck_port = int(context.hacheck_port)
@given("a fake task to drain")
def a_fake_task_to_drain(context):
context.fake_task = mock.Mock(
id="fake_task_for_itest",
host=context.hacheck_host,
ports=[context.hacheck_port],
)
@given("a HacheckDrainMethod object with delay {delay}")
def a_HacheckDrainMethod_object_with_delay(context, delay):
context.drain_method = drain_lib.HacheckDrainMethod(
service="service",
instance="instance",
registrations=["one", "two"],
delay=delay,
hacheck_port=context.hacheck_port,
)
@given("a HTTPDrainMethod object")
def a_HttpDrainMethod_object(context):
context.drain_method = drain_lib.HTTPDrainMethod(
service="service",
instance="instance",
registrations=["one", "two"],
drain={
"url_format": "http://{host}:{port}/drain?nerve_ns={nerve_ns}",
"method": "GET",
"success_codes": 200,
},
stop_draining={
"url_format": "http://{host}:{port}/drain/stop?nerve_ns={nerve_ns}",
"method": "GET",
"success_codes": 200,
},
is_draining={
"url_format": "http://{host}:{port}/drain/status?nerve_ns={nerve_ns}",
"method": "GET",
"success_codes": 200,
},
is_safe_to_kill={
"url_format": "http://{host}:{port}/drain/safe_to_kill?nerve_ns={nerve_ns}",
"method": "GET",
"success_codes": 200,
},
)
@when("we down a task")
def we_down_a_service(context):
context.down_time = time.time()
context.event_loop.run_until_complete(context.drain_method.drain(context.fake_task))
@when("we up a task")
def we_up_a_service(context):
context.event_loop.run_until_complete(
context.drain_method.stop_draining(context.fake_task)
)
@then("the task should be downed")
def the_task_should_be_downed(context):
assert context.event_loop.run_until_complete(
context.drain_method.is_draining(context.fake_task)
)
@then("the task should not be downed")
def the_task_should_not_be_downed(context):
assert not context.event_loop.run_until_complete(
context.drain_method.is_draining(context.fake_task)
)
@then("the hacheck task should be safe to kill after {wait_time} seconds")
def hacheck_should_be_safe_to_kill(context, wait_time):
with mock.patch(
"time.time", return_value=(context.down_time + float(wait_time)), autospec=True
):
assert context.event_loop.run_until_complete(
context.drain_method.is_safe_to_kill(context.fake_task)
)
@then("the task should be safe to kill after {wait_time} seconds")
def should_be_safe_to_kill(context, wait_time):
time.sleep(int(wait_time))
assert context.event_loop.run_until_complete(
context.drain_method.is_safe_to_kill(context.fake_task)
)
@then("the task should not be safe to kill after {wait_time} seconds")
def should_not_be_safe_to_kill(context, wait_time):
with mock.patch(
"time.time", return_value=(context.down_time + float(wait_time)), autospec=True
):
assert not context.event_loop.run_until_complete(
context.drain_method.is_safe_to_kill(context.fake_task)
)
@then("every registration should be {status} in hacheck")
def every_registration_should_be_down(context, status):
res = context.event_loop.run_until_complete(
context.drain_method.for_each_registration(
context.fake_task, drain_lib.get_spool
)
)
assert [r["state"] == status for r in res]
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
from absl import logging
import tensorflow as tf
class AsyncCheckpointSaverHook(tf.contrib.tpu.AsyncCheckpointSaverHook):
"""Saves checkpoints every N steps in a asynchronous thread.
This is the same as tf.contrib.tpu.AsyncCheckpointSaverHook but guarantees
that there will be a checkpoint every `save_steps` steps. This helps to have
eval results at fixed step counts, even when training is paused between
regular checkpoint intervals.
"""
def after_create_session(self, session, coord):
super(AsyncCheckpointSaverHook, self).after_create_session(session, coord)
# Interruptions to the training job can cause non-regular checkpoints
# (between every_steps). Modify last triggered step to point to the last
# regular checkpoint step to make sure we trigger on the next regular
# checkpoint step.
step = session.run(self._global_step_tensor)
every_steps = self._timer._every_steps # pylint: disable=protected-access
last_triggered_step = step - step % every_steps
self._timer.update_last_triggered_step(last_triggered_step)
class EveryNSteps(tf.train.SessionRunHook):
""""Base class for hooks that execute callbacks every N steps.
class MyHook(EveryNSteps):
def __init__(self, every_n_steps):
super(MyHook, self).__init__(every_n_steps)
def every_n_steps_after_run(self, step, run_context, run_values):
# Your Implementation
If you do overwrite begin(), end(), before_run() or after_run() make sure to
call super() at the beginning.
"""
def __init__(self, every_n_steps):
"""Initializes an `EveryNSteps` hook.
Args:
every_n_steps: `int`, the number of steps to allow between callbacks.
"""
self._timer = tf.train.SecondOrStepTimer(every_steps=every_n_steps)
self._global_step_tensor = None
def begin(self):
self._global_step_tensor = tf.train.get_global_step()
if self._global_step_tensor is None:
raise RuntimeError("Global step must be created to use EveryNSteps.")
def before_run(self, run_context): # pylint: disable=unused-argument
"""Overrides `SessionRunHook.before_run`.
Args:
run_context: A `SessionRunContext` object.
Returns:
None or a `SessionRunArgs` object.
"""
return tf.train.SessionRunArgs({"global_step": self._global_step_tensor})
def after_run(self, run_context, run_values):
"""Overrides `SessionRunHook.after_run`.
Args:
run_context: A `SessionRunContext` object.
run_values: A SessionRunValues object.
"""
step = run_values.results["global_step"]
if self._timer.should_trigger_for_step(step):
self.every_n_steps_after_run(step, run_context, run_values)
self._timer.update_last_triggered_step(step)
def end(self, sess):
step = sess.run(self._global_step_tensor)
self.every_n_steps_after_run(step, None, None)
def every_n_steps_after_run(self, step, run_context, run_values):
"""Callback after every n"th call to run().
Args:
step: Current global_step value.
run_context: A `SessionRunContext` object.
run_values: A SessionRunValues object.
"""
raise NotImplementedError("Subclasses of EveryNSteps should implement "
"every_n_steps_after_run().")
class ReportProgressHook(EveryNSteps):
"""SessionRunHook that reports progress to a `TaskManager` instance."""
def __init__(self, task_manager, max_steps, every_n_steps=100):
"""Create a new instance of ReportProgressHook.
Args:
task_manager: A `TaskManager` instance that implements report_progress().
max_steps: Maximum number of training steps.
every_n_steps: How frequently the hook should report progress.
"""
super(ReportProgressHook, self).__init__(every_n_steps=every_n_steps)
logging.info("Creating ReportProgressHook to report progress every %d "
"steps.", every_n_steps)
self.max_steps = max_steps
self.task_manager = task_manager
self.start_time = None
self.start_step = None
def every_n_steps_after_run(self, step, run_context, run_values):
if self.start_time is None:
# First call.
self.start_time = time.time()
self.start_step = step
return
time_elapsed = time.time() - self.start_time
steps_per_sec = float(step - self.start_step) / time_elapsed
eta_seconds = (self.max_steps - step) / (steps_per_sec + 0.0000001)
message = "{:.1f}% @{:d}, {:.1f} steps/s, ETA: {:.0f} min".format(
100 * step / self.max_steps, step, steps_per_sec, eta_seconds / 60)
logging.info("Reporting progress: %s", message)
self.task_manager.report_progress(message)
|
from django.contrib.auth.models import Group as DjangoGroup
from weblate.auth.data import SELECTION_ALL, SELECTION_MANUAL
from weblate.auth.models import Group, Role, User
from weblate.lang.models import Language
from weblate.trans.models import ComponentList, Project
from weblate.trans.tests.test_views import FixtureTestCase
class ModelTest(FixtureTestCase):
def setUp(self):
super().setUp()
self.project.access_control = Project.ACCESS_PRIVATE
self.project.save()
self.translation = self.get_translation()
self.group = Group.objects.create(name="Test", language_selection=SELECTION_ALL)
self.group.projects.add(self.project)
def test_project(self):
# No permissions
self.assertFalse(self.user.can_access_project(self.project))
self.assertFalse(self.user.has_perm("unit.edit", self.translation))
# Access permission on adding to group
self.user.clear_cache()
self.user.groups.add(self.group)
self.assertTrue(self.user.can_access_project(self.project))
self.assertFalse(self.user.has_perm("unit.edit", self.translation))
# Translate permission on adding role to group
self.user.clear_cache()
self.group.roles.add(Role.objects.get(name="Power user"))
self.assertTrue(self.user.can_access_project(self.project))
self.assertTrue(self.user.has_perm("unit.edit", self.translation))
def test_component(self):
self.group.projects.remove(self.project)
# Add user to group of power users
self.user.groups.add(self.group)
self.group.roles.add(Role.objects.get(name="Power user"))
# No permissions as component list is empty
self.assertFalse(self.user.can_access_project(self.project))
self.assertFalse(self.user.has_perm("unit.edit", self.translation))
# Permissions should exist after adding to a component list
self.user.clear_cache()
self.group.components.add(self.component)
self.assertTrue(self.user.can_access_project(self.project))
self.assertTrue(self.user.has_perm("unit.edit", self.translation))
def test_componentlist(self):
# Add user to group of power users
self.user.groups.add(self.group)
self.group.roles.add(Role.objects.get(name="Power user"))
# Assign component list to a group
clist = ComponentList.objects.create(name="Test", slug="test")
self.group.componentlists.add(clist)
# No permissions as component list is empty
self.assertFalse(self.user.can_access_project(self.project))
self.assertFalse(self.user.has_perm("unit.edit", self.translation))
# Permissions should exist after adding to a component list
self.user.clear_cache()
clist.components.add(self.component)
self.assertTrue(self.user.can_access_project(self.project))
self.assertTrue(self.user.has_perm("unit.edit", self.translation))
def test_languages(self):
# Add user to group with german language
self.user.groups.add(self.group)
self.group.language_selection = SELECTION_MANUAL
self.group.save()
self.group.roles.add(Role.objects.get(name="Power user"))
self.group.languages.set(Language.objects.filter(code="de"), clear=True)
# Permissions should deny access
self.user.clear_cache()
self.assertTrue(self.user.can_access_project(self.project))
self.assertFalse(self.user.has_perm("unit.edit", self.translation))
# Adding Czech language should unlock it
self.user.clear_cache()
self.group.languages.add(Language.objects.get(code="cs"))
self.assertTrue(self.user.can_access_project(self.project))
self.assertTrue(self.user.has_perm("unit.edit", self.translation))
def test_groups(self):
# Add test group
self.user.groups.add(self.group)
self.assertEqual(self.user.groups.count(), 3)
# Add same named Django group
self.user.groups.add(DjangoGroup.objects.create(name="Test"))
self.assertEqual(self.user.groups.count(), 3)
# Add different Django group
self.user.groups.add(DjangoGroup.objects.create(name="Second"))
self.assertEqual(self.user.groups.count(), 4)
# Remove Weblate group
self.user.groups.remove(Group.objects.get(name="Test"))
self.assertEqual(self.user.groups.count(), 3)
# Remove Django group
self.user.groups.remove(DjangoGroup.objects.get(name="Second"))
self.assertEqual(self.user.groups.count(), 2)
# Set Weblate group
self.user.groups.set(Group.objects.filter(name="Test"))
self.assertEqual(self.user.groups.count(), 1)
# Set Django group
self.user.groups.set(DjangoGroup.objects.filter(name="Second"))
self.assertEqual(self.user.groups.count(), 1)
def test_user(self):
# Create user with Django User fields
user = User.objects.create(
first_name="First", last_name="Last", is_staff=True, is_superuser=True
)
self.assertEqual(user.full_name, "First Last")
self.assertEqual(user.is_superuser, True)
|
from __future__ import print_function
import itertools
import re
import os
import urwid
class FlagFileWidget(urwid.TreeWidget):
# apply an attribute to the expand/unexpand icons
unexpanded_icon = urwid.AttrMap(urwid.TreeWidget.unexpanded_icon,
'dirmark')
expanded_icon = urwid.AttrMap(urwid.TreeWidget.expanded_icon,
'dirmark')
def __init__(self, node):
self.__super.__init__(node)
# insert an extra AttrWrap for our own use
self._w = urwid.AttrWrap(self._w, None)
self.flagged = False
self.update_w()
def selectable(self):
return True
def keypress(self, size, key):
"""allow subclasses to intercept keystrokes"""
key = self.__super.keypress(size, key)
if key:
key = self.unhandled_keys(size, key)
return key
def unhandled_keys(self, size, key):
"""
Override this method to intercept keystrokes in subclasses.
Default behavior: Toggle flagged on space, ignore other keys.
"""
if key == " ":
self.flagged = not self.flagged
self.update_w()
else:
return key
def update_w(self):
"""Update the attributes of self.widget based on self.flagged.
"""
if self.flagged:
self._w.attr = 'flagged'
self._w.focus_attr = 'flagged focus'
else:
self._w.attr = 'body'
self._w.focus_attr = 'focus'
class FileTreeWidget(FlagFileWidget):
"""Widget for individual files."""
def __init__(self, node):
self.__super.__init__(node)
path = node.get_value()
add_widget(path, self)
def get_display_text(self):
return self.get_node().get_key()
class EmptyWidget(urwid.TreeWidget):
"""A marker for expanded directories with no contents."""
def get_display_text(self):
return ('flag', '(empty directory)')
class ErrorWidget(urwid.TreeWidget):
"""A marker for errors reading directories."""
def get_display_text(self):
return ('error', "(error/permission denied)")
class DirectoryWidget(FlagFileWidget):
"""Widget for a directory."""
def __init__(self, node):
self.__super.__init__(node)
path = node.get_value()
add_widget(path, self)
self.expanded = starts_expanded(path)
self.update_expanded_icon()
def get_display_text(self):
node = self.get_node()
if node.get_depth() == 0:
return "/"
else:
return node.get_key()
class FileNode(urwid.TreeNode):
"""Metadata storage for individual files"""
def __init__(self, path, parent=None):
depth = path.count(dir_sep())
key = os.path.basename(path)
urwid.TreeNode.__init__(self, path, key=key, parent=parent, depth=depth)
def load_parent(self):
parentname, myname = os.path.split(self.get_value())
parent = DirectoryNode(parentname)
parent.set_child_node(self.get_key(), self)
return parent
def load_widget(self):
return FileTreeWidget(self)
class EmptyNode(urwid.TreeNode):
def load_widget(self):
return EmptyWidget(self)
class ErrorNode(urwid.TreeNode):
def load_widget(self):
return ErrorWidget(self)
class DirectoryNode(urwid.ParentNode):
"""Metadata storage for directories"""
def __init__(self, path, parent=None):
if path == dir_sep():
depth = 0
key = None
else:
depth = path.count(dir_sep())
key = os.path.basename(path)
urwid.ParentNode.__init__(self, path, key=key, parent=parent,
depth=depth)
def load_parent(self):
parentname, myname = os.path.split(self.get_value())
parent = DirectoryNode(parentname)
parent.set_child_node(self.get_key(), self)
return parent
def load_child_keys(self):
dirs = []
files = []
try:
path = self.get_value()
# separate dirs and files
for a in os.listdir(path):
if os.path.isdir(os.path.join(path,a)):
dirs.append(a)
else:
files.append(a)
except OSError as e:
depth = self.get_depth() + 1
self._children[None] = ErrorNode(self, parent=self, key=None,
depth=depth)
return [None]
# sort dirs and files
dirs.sort(key=alphabetize)
files.sort(key=alphabetize)
# store where the first file starts
self.dir_count = len(dirs)
# collect dirs and files together again
keys = dirs + files
if len(keys) == 0:
depth=self.get_depth() + 1
self._children[None] = EmptyNode(self, parent=self, key=None,
depth=depth)
keys = [None]
return keys
def load_child_node(self, key):
"""Return either a FileNode or DirectoryNode"""
index = self.get_child_index(key)
if key is None:
return EmptyNode(None)
else:
path = os.path.join(self.get_value(), key)
if index < self.dir_count:
return DirectoryNode(path, parent=self)
else:
path = os.path.join(self.get_value(), key)
return FileNode(path, parent=self)
def load_widget(self):
return DirectoryWidget(self)
class DirectoryBrowser:
palette = [
('body', 'black', 'light gray'),
('flagged', 'black', 'dark green', ('bold','underline')),
('focus', 'light gray', 'dark blue', 'standout'),
('flagged focus', 'yellow', 'dark cyan',
('bold','standout','underline')),
('head', 'yellow', 'black', 'standout'),
('foot', 'light gray', 'black'),
('key', 'light cyan', 'black','underline'),
('title', 'white', 'black', 'bold'),
('dirmark', 'black', 'dark cyan', 'bold'),
('flag', 'dark gray', 'light gray'),
('error', 'dark red', 'light gray'),
]
footer_text = [
('title', "Directory Browser"), " ",
('key', "UP"), ",", ('key', "DOWN"), ",",
('key', "PAGE UP"), ",", ('key', "PAGE DOWN"),
" ",
('key', "SPACE"), " ",
('key', "+"), ",",
('key', "-"), " ",
('key', "LEFT"), " ",
('key', "HOME"), " ",
('key', "END"), " ",
('key', "Q"),
]
def __init__(self):
cwd = os.getcwd()
store_initial_cwd(cwd)
self.header = urwid.Text("")
self.listbox = urwid.TreeListBox(urwid.TreeWalker(DirectoryNode(cwd)))
self.listbox.offset_rows = 1
self.footer = urwid.AttrWrap(urwid.Text(self.footer_text),
'foot')
self.view = urwid.Frame(
urwid.AttrWrap(self.listbox, 'body'),
header=urwid.AttrWrap(self.header, 'head'),
footer=self.footer)
def main(self):
"""Run the program."""
self.loop = urwid.MainLoop(self.view, self.palette,
unhandled_input=self.unhandled_input)
self.loop.run()
# on exit, write the flagged filenames to the console
names = [escape_filename_sh(x) for x in get_flagged_names()]
print(" ".join(names))
def unhandled_input(self, k):
# update display of focus directory
if k in ('q','Q'):
raise urwid.ExitMainLoop()
def main():
DirectoryBrowser().main()
#######
# global cache of widgets
_widget_cache = {}
def add_widget(path, widget):
"""Add the widget for a given path"""
_widget_cache[path] = widget
def get_flagged_names():
"""Return a list of all filenames marked as flagged."""
l = []
for w in _widget_cache.values():
if w.flagged:
l.append(w.get_node().get_value())
return l
######
# store path components of initial current working directory
_initial_cwd = []
def store_initial_cwd(name):
"""Store the initial current working directory path components."""
global _initial_cwd
_initial_cwd = name.split(dir_sep())
def starts_expanded(name):
"""Return True if directory is a parent of initial cwd."""
if name is '/':
return True
l = name.split(dir_sep())
if len(l) > len(_initial_cwd):
return False
if l != _initial_cwd[:len(l)]:
return False
return True
def escape_filename_sh(name):
"""Return a hopefully safe shell-escaped version of a filename."""
# check whether we have unprintable characters
for ch in name:
if ord(ch) < 32:
# found one so use the ansi-c escaping
return escape_filename_sh_ansic(name)
# all printable characters, so return a double-quoted version
name.replace('\\','\\\\')
name.replace('"','\\"')
name.replace('`','\\`')
name.replace('$','\\$')
return '"'+name+'"'
def escape_filename_sh_ansic(name):
"""Return an ansi-c shell-escaped version of a filename."""
out =[]
# gather the escaped characters into a list
for ch in name:
if ord(ch) < 32:
out.append("\\x%02x"% ord(ch))
elif ch == '\\':
out.append('\\\\')
else:
out.append(ch)
# slap them back together in an ansi-c quote $'...'
return "$'" + "".join(out) + "'"
SPLIT_RE = re.compile(r'[a-zA-Z]+|\d+')
def alphabetize(s):
L = []
for isdigit, group in itertools.groupby(SPLIT_RE.findall(s), key=lambda x: x.isdigit()):
if isdigit:
for n in group:
L.append(('', int(n)))
else:
L.append((''.join(group).lower(), 0))
return L
def dir_sep():
"""Return the separator used in this os."""
return getattr(os.path,'sep','/')
if __name__=="__main__":
main()
|
import argparse
import asyncio
from collections import OrderedDict
from collections.abc import Mapping, Sequence
from glob import glob
import logging
import os
from typing import Any, Callable, Dict, List, Tuple
from unittest.mock import patch
from homeassistant import bootstrap, core
from homeassistant.config import get_default_config_dir
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers.check_config import async_check_ha_config_file
import homeassistant.util.yaml.loader as yaml_loader
# mypy: allow-untyped-calls, allow-untyped-defs
REQUIREMENTS = ("colorlog==4.2.1",)
_LOGGER = logging.getLogger(__name__)
# pylint: disable=protected-access
MOCKS: Dict[str, Tuple[str, Callable]] = {
"load": ("homeassistant.util.yaml.loader.load_yaml", yaml_loader.load_yaml),
"load*": ("homeassistant.config.load_yaml", yaml_loader.load_yaml),
"secrets": ("homeassistant.util.yaml.loader.secret_yaml", yaml_loader.secret_yaml),
}
SILENCE = ("homeassistant.scripts.check_config.yaml_loader.clear_secret_cache",)
PATCHES: Dict[str, Any] = {}
C_HEAD = "bold"
ERROR_STR = "General Errors"
def color(the_color, *args, reset=None):
"""Color helper."""
# pylint: disable=import-outside-toplevel
from colorlog.escape_codes import escape_codes, parse_colors
try:
if not args:
assert reset is None, "You cannot reset if nothing being printed"
return parse_colors(the_color)
return parse_colors(the_color) + " ".join(args) + escape_codes[reset or "reset"]
except KeyError as k:
raise ValueError(f"Invalid color {k!s} in {the_color}") from k
def run(script_args: List) -> int:
"""Handle check config commandline script."""
parser = argparse.ArgumentParser(description="Check Home Assistant configuration.")
parser.add_argument("--script", choices=["check_config"])
parser.add_argument(
"-c",
"--config",
default=get_default_config_dir(),
help="Directory that contains the Home Assistant configuration",
)
parser.add_argument(
"-i",
"--info",
nargs="?",
default=None,
const="all",
help="Show a portion of the config",
)
parser.add_argument(
"-f", "--files", action="store_true", help="Show used configuration files"
)
parser.add_argument(
"-s", "--secrets", action="store_true", help="Show secret information"
)
args, unknown = parser.parse_known_args()
if unknown:
print(color("red", "Unknown arguments:", ", ".join(unknown)))
config_dir = os.path.join(os.getcwd(), args.config)
print(color("bold", "Testing configuration at", config_dir))
res = check(config_dir, args.secrets)
domain_info: List[str] = []
if args.info:
domain_info = args.info.split(",")
if args.files:
print(color(C_HEAD, "yaml files"), "(used /", color("red", "not used") + ")")
deps = os.path.join(config_dir, "deps")
yaml_files = [
f
for f in glob(os.path.join(config_dir, "**/*.yaml"), recursive=True)
if not f.startswith(deps)
]
for yfn in sorted(yaml_files):
the_color = "" if yfn in res["yaml_files"] else "red"
print(color(the_color, "-", yfn))
if res["except"]:
print(color("bold_white", "Failed config"))
for domain, config in res["except"].items():
domain_info.append(domain)
print(" ", color("bold_red", domain + ":"), color("red", "", reset="red"))
dump_dict(config, reset="red")
print(color("reset"))
if domain_info:
if "all" in domain_info:
print(color("bold_white", "Successful config (all)"))
for domain, config in res["components"].items():
print(" ", color(C_HEAD, domain + ":"))
dump_dict(config)
else:
print(color("bold_white", "Successful config (partial)"))
for domain in domain_info:
if domain == ERROR_STR:
continue
print(" ", color(C_HEAD, domain + ":"))
dump_dict(res["components"].get(domain))
if args.secrets:
flatsecret: Dict[str, str] = {}
for sfn, sdict in res["secret_cache"].items():
sss = []
for skey in sdict:
if skey in flatsecret:
_LOGGER.error(
"Duplicated secrets in files %s and %s", flatsecret[skey], sfn
)
flatsecret[skey] = sfn
sss.append(color("green", skey) if skey in res["secrets"] else skey)
print(color(C_HEAD, "Secrets from", sfn + ":"), ", ".join(sss))
print(color(C_HEAD, "Used Secrets:"))
for skey, sval in res["secrets"].items():
if sval is None:
print(" -", skey + ":", color("red", "not found"))
continue
print(
" -",
skey + ":",
sval,
color("cyan", "[from:", flatsecret.get(skey, "keyring") + "]"),
)
return len(res["except"])
def check(config_dir, secrets=False):
"""Perform a check by mocking hass load functions."""
logging.getLogger("homeassistant.loader").setLevel(logging.CRITICAL)
res: Dict[str, Any] = {
"yaml_files": OrderedDict(), # yaml_files loaded
"secrets": OrderedDict(), # secret cache and secrets loaded
"except": OrderedDict(), # exceptions raised (with config)
#'components' is a HomeAssistantConfig # noqa: E265
"secret_cache": None,
}
# pylint: disable=possibly-unused-variable
def mock_load(filename):
"""Mock hass.util.load_yaml to save config file names."""
res["yaml_files"][filename] = True
return MOCKS["load"][1](filename)
# pylint: disable=possibly-unused-variable
def mock_secrets(ldr, node):
"""Mock _get_secrets."""
try:
val = MOCKS["secrets"][1](ldr, node)
except HomeAssistantError:
val = None
res["secrets"][node.value] = val
return val
# Patches to skip functions
for sil in SILENCE:
PATCHES[sil] = patch(sil)
# Patches with local mock functions
for key, val in MOCKS.items():
if not secrets and key == "secrets":
continue
# The * in the key is removed to find the mock_function (side_effect)
# This allows us to use one side_effect to patch multiple locations
mock_function = locals()[f"mock_{key.replace('*', '')}"]
PATCHES[key] = patch(val[0], side_effect=mock_function)
# Start all patches
for pat in PATCHES.values():
pat.start()
if secrets:
# Ensure !secrets point to the patched function
yaml_loader.yaml.SafeLoader.add_constructor("!secret", yaml_loader.secret_yaml)
try:
res["components"] = asyncio.run(async_check_config(config_dir))
res["secret_cache"] = OrderedDict(yaml_loader.__SECRET_CACHE)
for err in res["components"].errors:
domain = err.domain or ERROR_STR
res["except"].setdefault(domain, []).append(err.message)
if err.config:
res["except"].setdefault(domain, []).append(err.config)
except Exception as err: # pylint: disable=broad-except
print(color("red", "Fatal error while loading config:"), str(err))
res["except"].setdefault(ERROR_STR, []).append(str(err))
finally:
# Stop all patches
for pat in PATCHES.values():
pat.stop()
if secrets:
# Ensure !secrets point to the original function
yaml_loader.yaml.SafeLoader.add_constructor(
"!secret", yaml_loader.secret_yaml
)
bootstrap.clear_secret_cache()
return res
async def async_check_config(config_dir):
"""Check the HA config."""
hass = core.HomeAssistant()
hass.config.config_dir = config_dir
components = await async_check_ha_config_file(hass)
await hass.async_stop(force=True)
return components
def line_info(obj, **kwargs):
"""Display line config source."""
if hasattr(obj, "__config_file__"):
return color(
"cyan", f"[source {obj.__config_file__}:{obj.__line__ or '?'}]", **kwargs
)
return "?"
def dump_dict(layer, indent_count=3, listi=False, **kwargs):
"""Display a dict.
A friendly version of print yaml_loader.yaml.dump(config).
"""
def sort_dict_key(val):
"""Return the dict key for sorting."""
key = str(val[0]).lower()
return "0" if key == "platform" else key
indent_str = indent_count * " "
if listi or isinstance(layer, list):
indent_str = indent_str[:-1] + "-"
if isinstance(layer, Mapping):
for key, value in sorted(layer.items(), key=sort_dict_key):
if isinstance(value, (dict, list)):
print(indent_str, str(key) + ":", line_info(value, **kwargs))
dump_dict(value, indent_count + 2)
else:
print(indent_str, str(key) + ":", value)
indent_str = indent_count * " "
if isinstance(layer, Sequence):
for i in layer:
if isinstance(i, dict):
dump_dict(i, indent_count + 2, True)
else:
print(" ", indent_str, i)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
from perfkitbenchmarker import sample
from six.moves import range
class SampleTestCase(unittest.TestCase):
def testMetadataOptional(self):
instance = sample.Sample(metric='Test', value=1.0, unit='Mbps')
self.assertDictEqual({}, instance.metadata)
def testProvidedMetadataSet(self):
metadata = {'origin': 'unit test'}
instance = sample.Sample(metric='Test', value=1.0, unit='Mbps',
metadata=metadata.copy())
self.assertDictEqual(metadata, instance.metadata)
def testNoneValueShouldBeZero(self):
instance = sample.Sample(metric='Test', value=None, unit='Mbps')
self.assertIsInstance(instance.value, float)
self.assertEqual(0.0, instance.value)
def testValuesShouldBeFloats(self):
instance = sample.Sample(metric='Test', value=1, unit='Mbps')
self.assertIsInstance(instance.value, float)
self.assertEqual(1.0, instance.value)
instance = sample.Sample(metric='Test', value=1.0, unit='Mbps')
self.assertIsInstance(instance.value, float)
self.assertEqual(1.0, instance.value)
instance = sample.Sample(metric='Test', value='1', unit='Mbps')
self.assertIsInstance(instance.value, float)
self.assertEqual(1.0, instance.value)
instance = sample.Sample(metric='Test', value='1.0', unit='Mbps')
self.assertIsInstance(instance.value, float)
self.assertEqual(1.0, instance.value)
class TestPercentileCalculator(unittest.TestCase):
def testPercentileCalculator(self):
numbers = list(range(0, 1001))
percentiles = sample.PercentileCalculator(numbers,
percentiles=[0, 1, 99.9, 100])
self.assertEqual(percentiles['p0'], 0)
self.assertEqual(percentiles['p1'], 10)
self.assertEqual(percentiles['p99.9'], 999)
self.assertEqual(percentiles['p100'], 1000)
self.assertEqual(percentiles['average'], 500)
# 4 percentiles we requested, plus average and stddev
self.assertEqual(len(percentiles), 6)
def testNoNumbers(self):
with self.assertRaises(ValueError):
sample.PercentileCalculator([], percentiles=[0, 1, 99])
def testOutOfRangePercentile(self):
with self.assertRaises(ValueError):
sample.PercentileCalculator([3], percentiles=[-1])
def testWrongTypePercentile(self):
with self.assertRaises(ValueError):
sample.PercentileCalculator([3], percentiles=['a'])
if __name__ == '__main__':
unittest.main()
|
from typing import Tuple, Dict, Optional, List, Union
from re import findall
import discord
from discord.ext.commands.view import StringView
from redbot.core import commands, Config
from redbot.core.i18n import Translator
from redbot.core.utils import AsyncIter
_ = Translator("Alias", __file__)
class ArgParseError(Exception):
pass
class AliasEntry:
"""An object containing all required information about an alias"""
name: str
command: Union[Tuple[str], str]
creator: int
guild: Optional[int]
uses: int
def __init__(
self, name: str, command: Union[Tuple[str], str], creator: int, guild: Optional[int]
):
super().__init__()
self.name = name
self.command = command
self.creator = creator
self.guild = guild
self.uses = 0
def inc(self):
"""
Increases the `uses` stat by 1.
:return: new use count
"""
self.uses += 1
return self.uses
def get_extra_args_from_alias(self, message: discord.Message, prefix: str) -> str:
"""
When an alias is executed by a user in chat this function tries
to get any extra arguments passed in with the call.
Whitespace will be trimmed from both ends.
:param message:
:param prefix:
:param alias:
:return:
"""
known_content_length = len(prefix) + len(self.name)
extra = message.content[known_content_length:]
view = StringView(extra)
view.skip_ws()
extra = []
while not view.eof:
prev = view.index
word = view.get_quoted_word()
if len(word) < view.index - prev:
word = "".join((view.buffer[prev], word, view.buffer[view.index - 1]))
extra.append(word)
view.skip_ws()
return extra
def to_json(self) -> dict:
return {
"name": self.name,
"command": self.command,
"creator": self.creator,
"guild": self.guild,
"uses": self.uses,
}
@classmethod
def from_json(cls, data: dict):
ret = cls(data["name"], data["command"], data["creator"], data["guild"])
ret.uses = data.get("uses", 0)
return ret
class AliasCache:
def __init__(self, config: Config, cache_enabled: bool = True):
self.config = config
self._cache_enabled = cache_enabled
self._loaded = False
self._aliases: Dict[Optional[int], Dict[str, AliasEntry]] = {None: {}}
async def anonymize_aliases(self, user_id: int):
async with self.config.entries() as global_aliases:
for a in global_aliases:
if a.get("creator", 0) == user_id:
a["creator"] = 0xDE1
if self._cache_enabled:
self._aliases[None][a["name"]] = AliasEntry.from_json(a)
all_guilds = await self.config.all_guilds()
async for guild_id, guild_data in AsyncIter(all_guilds.items(), steps=100):
for a in guild_data["entries"]:
if a.get("creator", 0) == user_id:
break
else:
continue
# basically, don't build a context manager without a need.
async with self.config.guild_from_id(guild_id).entries() as entry_list:
for a in entry_list:
if a.get("creator", 0) == user_id:
a["creator"] = 0xDE1
if self._cache_enabled:
self._aliases[guild_id][a["name"]] = AliasEntry.from_json(a)
async def load_aliases(self):
if not self._cache_enabled:
self._loaded = True
return
for alias in await self.config.entries():
self._aliases[None][alias["name"]] = AliasEntry.from_json(alias)
all_guilds = await self.config.all_guilds()
async for guild_id, guild_data in AsyncIter(all_guilds.items(), steps=100):
if guild_id not in self._aliases:
self._aliases[guild_id] = {}
for alias in guild_data["entries"]:
self._aliases[guild_id][alias["name"]] = AliasEntry.from_json(alias)
self._loaded = True
async def get_aliases(self, ctx: commands.Context) -> List[AliasEntry]:
"""Returns all possible aliases with the given context"""
global_aliases: List[AliasEntry] = []
server_aliases: List[AliasEntry] = []
global_aliases = await self.get_global_aliases()
if ctx.guild and ctx.guild.id in self._aliases:
server_aliases = await self.get_guild_aliases(ctx.guild)
return global_aliases + server_aliases
async def get_guild_aliases(self, guild: discord.Guild) -> List[AliasEntry]:
"""Returns all guild specific aliases"""
aliases: List[AliasEntry] = []
if self._cache_enabled:
if guild.id in self._aliases:
for _, alias in self._aliases[guild.id].items():
aliases.append(alias)
else:
aliases = [AliasEntry.from_json(d) for d in await self.config.guild(guild).entries()]
return aliases
async def get_global_aliases(self) -> List[AliasEntry]:
"""Returns all global specific aliases"""
aliases: List[AliasEntry] = []
if self._cache_enabled:
for _, alias in self._aliases[None].items():
aliases.append(alias)
else:
aliases = [AliasEntry.from_json(d) for d in await self.config.entries()]
return aliases
async def get_alias(
self, guild: Optional[discord.Guild], alias_name: str
) -> Optional[AliasEntry]:
"""Returns an AliasEntry object if the provided alias_name is a registered alias"""
server_aliases: List[AliasEntry] = []
if self._cache_enabled:
if alias_name in self._aliases[None]:
return self._aliases[None][alias_name]
if guild is not None:
if guild.id in self._aliases:
if alias_name in self._aliases[guild.id]:
return self._aliases[guild.id][alias_name]
else:
if guild:
server_aliases = [
AliasEntry.from_json(d) for d in await self.config.guild(guild.id).entries()
]
global_aliases = [AliasEntry.from_json(d) for d in await self.config.entries()]
all_aliases = global_aliases + server_aliases
for alias in all_aliases:
if alias.name == alias_name:
return alias
return None
async def add_alias(
self, ctx: commands.Context, alias_name: str, command: str, global_: bool = False
) -> AliasEntry:
indices = findall(r"{(\d*)}", command)
if indices:
try:
indices = [int(a[0]) for a in indices]
except IndexError:
raise ArgParseError(_("Arguments must be specified with a number."))
low = min(indices)
indices = [a - low for a in indices]
high = max(indices)
gaps = set(indices).symmetric_difference(range(high + 1))
if gaps:
raise ArgParseError(
_("Arguments must be sequential. Missing arguments: ")
+ ", ".join(str(i + low) for i in gaps)
)
command = command.format(*(f"{{{i}}}" for i in range(-low, high + low + 1)))
if global_:
alias = AliasEntry(alias_name, command, ctx.author.id, None)
settings = self.config
if self._cache_enabled:
self._aliases[None][alias.name] = alias
else:
alias = AliasEntry(alias_name, command, ctx.author.id, ctx.guild.id)
settings = self.config.guild(ctx.guild)
if self._cache_enabled:
if ctx.guild.id not in self._aliases:
self._aliases[ctx.guild.id] = {}
self._aliases[ctx.guild.id][alias.name] = alias
async with settings.entries() as curr_aliases:
curr_aliases.append(alias.to_json())
return alias
async def delete_alias(
self, ctx: commands.Context, alias_name: str, global_: bool = False
) -> bool:
if global_:
settings = self.config
else:
settings = self.config.guild(ctx.guild)
async with settings.entries() as aliases:
for alias in aliases:
if alias["name"] == alias_name:
aliases.remove(alias)
if self._cache_enabled:
if global_:
del self._aliases[None][alias_name]
else:
del self._aliases[ctx.guild.id][alias_name]
return True
return False
|
from lemur import database
from lemur.authorizations.models import Authorization
def get(authorization_id):
"""
Retrieve dns authorization by ID
"""
return database.get(Authorization, authorization_id)
def create(account_number, domains, dns_provider_type, options=None):
"""
Creates a new dns authorization.
"""
authorization = Authorization(account_number, domains, dns_provider_type, options)
return database.create(authorization)
|
from flask import Flask, jsonify, request
from flask_jwt import JWT, jwt_required, current_identity, JWTError
from werkzeug.security import safe_str_cmp
from flasgger import Swagger
class User(object):
def __init__(self, user_id, username, password):
self.id = user_id
self.username = username
self.password = password
def __str__(self):
return "User(id='%s')" % self.id
users = [
User(1, 'guest', 'secret'),
]
username_table = {u.username: u for u in users}
userid_table = {u.id: u for u in users}
def authenticate(username, password):
user = username_table.get(username, None)
if user and safe_str_cmp(user.password.encode('utf-8'), password.encode('utf-8')):
return user
def identity(payload):
user_id = payload['identity']
return userid_table.get(user_id, None)
app = Flask(__name__)
app.debug = True
app.config["SECRET_KEY"] = "super-secret"
app.config["SWAGGER"] = {
"title": "Swagger JWT Authentiation App",
"uiversion": 3,
}
app.config['JWT_AUTH_URL_RULE'] = '/api/auth'
app.config['JWT_AUTH_HEADER_NAME'] = 'JWTAuthorization'
swag = Swagger(app,
template={
"openapi": "3.0.0",
"info": {
"title": "Swagger Basic Auth App",
"version": "1.0",
},
"consumes": [
"application/x-www-form-urlencoded",
],
"produces": [
"application/json",
],
},
)
def jwt_request_handler():
auth_header_name = app.config['JWT_AUTH_HEADER_NAME']
auth_header_value = request.headers.get(auth_header_name, None)
auth_header_prefix = app.config['JWT_AUTH_HEADER_PREFIX']
if not auth_header_value:
return
parts = auth_header_value.split()
if parts[0].lower() != auth_header_prefix.lower():
raise JWTError('Invalid JWT header', 'Unsupported authorization type')
elif len(parts) == 1:
raise JWTError('Invalid JWT header', 'Token missing')
elif len(parts) > 2:
raise JWTError('Invalid JWT header', 'Token contains spaces')
return parts[1]
jwt = JWT(app, authenticate, identity)
jwt.request_handler(jwt_request_handler)
@app.route("/login", methods=["POST"])
def login():
"""
User authenticate method.
---
description: Authenticate user with supplied credentials.
parameters:
- name: username
in: formData
type: string
required: true
- name: password
in: formData
type: string
required: true
responses:
200:
description: User successfully logged in.
400:
description: User login failed.
"""
try:
username = request.form.get("username")
password = request.form.get("password")
user = authenticate(username, password)
if not user:
raise Exception("User not found!")
resp = jsonify({"message": "User authenticated"})
resp.status_code = 200
access_token = jwt.jwt_encode_callback(user)
# add token to response headers - so SwaggerUI can use it
resp.headers.extend({'jwt-token': access_token})
except Exception as e:
resp = jsonify({"message": "Bad username and/or password"})
resp.status_code = 401
return resp
@app.route("/protected", methods=["GET"])
@jwt_required()
def protected():
"""
Protected content method.
---
description: Protected content method. Can not be seen without valid token.
responses:
200:
description: User successfully accessed the content.
"""
resp = jsonify({"protected": "{} - you saw me!".format(current_identity)})
resp.status_code = 200
return resp
if __name__ == '__main__':
app.run()
|
from __future__ import unicode_literals
import datetime
from lib.fun.fun import cool, is_en
from lib.data.data import pyoptions
from lib.fun.decorator import magic
from rules.BaseTrick import dateshaper
def birthday_magic(*args):
"""[begin_date] [end_date], date format: [yyyyMMdd or ddMMyyyy(--dmy option)]"""
args = list(args[0])
if len(args) == 3:
begin_date = args[1]
end_date = args[2]
else:
exit(pyoptions.CRLF + cool.fuchsia("[!] Usage: {} {}".format(args[0], pyoptions.plugins_info.get(args[0]))))
def check_date(dt, desc="datetime", ymd_format=True):
"""
:return: year month day
"""
if len(dt) != 8 or not dt.isdigit():
exit(cool.fuchsia("[!] {} format:[{}], such as:{}{}".format(desc, "yyyMMdd" if ymd_format else "ddMMyyyy", "20150806" if ymd_format else "08062015", pyoptions.CRLF)))
if ymd_format:
if int(dt[4:6]) > 12 or int(dt[4:6]) < 1 or int(dt[6:8]) > 31 or int(dt[6:8]) < 1:
exit(cool.fuchsia("[!] {} date format: 1<= month <=12 and 1<= day <=31{}".format(desc, pyoptions.CRLF)))
else:
return int(dt[0:4]), int(dt[4:6]), int(dt[6:8])
else:
if int(dt[2:4]) > 12 or int(dt[2:4]) < 1 or int(dt[0:2]) > 31 or int(dt[0:2]) < 1:
exit(cool.fuchsia("[!] {} date format: 1<= month <=12 and 1<= day <=31{}".format(desc, pyoptions.CRLF)))
else:
return int(dt[4:8]), int(dt[2:4]), int(dt[0:2])
def check_range(s, e):
if s[0] > e[0] or (s[0] == e[0] and s[1] > e[1]) or (s[1] == e[1] and s[2] > e[2]):
exit(cool.fuchsia("[!] Start date should later than End date" + pyoptions.CRLF))
else:
return True
@magic
def birthday():
start_valid = check_date(begin_date, desc="Start datetime", ymd_format=pyoptions.ymd_format)
end_valid = check_date(end_date, desc="End datetime", ymd_format=pyoptions.ymd_format)
valid = check_range(start_valid, end_valid) if start_valid and end_valid else False
if valid:
res = []
begin = datetime.datetime.strptime(begin_date, "%Y%m%d" if pyoptions.ymd_format else "%d%m%Y")
end = datetime.datetime.strptime(end_date, "%Y%m%d" if pyoptions.ymd_format else "%d%m%Y")
while begin <= end:
date_str = begin.strftime("%Y%m%d" if pyoptions.ymd_format else "%d%m%Y")
res.extend(dateshaper(date_str))
begin += datetime.timedelta(days=1)
return res
|
from datetime import timedelta
import logging
from typing import Any, Callable, Iterable, List
from directv import DIRECTV, DIRECTVError
from homeassistant.components.remote import ATTR_NUM_REPEATS, RemoteEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.helpers.typing import HomeAssistantType
from . import DIRECTVEntity
from .const import DOMAIN
_LOGGER = logging.getLogger(__name__)
SCAN_INTERVAL = timedelta(minutes=2)
async def async_setup_entry(
hass: HomeAssistantType,
entry: ConfigEntry,
async_add_entities: Callable[[List, bool], None],
) -> bool:
"""Load DirecTV remote based on a config entry."""
dtv = hass.data[DOMAIN][entry.entry_id]
entities = []
for location in dtv.device.locations:
entities.append(
DIRECTVRemote(
dtv=dtv,
name=str.title(location.name),
address=location.address,
)
)
async_add_entities(entities, True)
class DIRECTVRemote(DIRECTVEntity, RemoteEntity):
"""Device that sends commands to a DirecTV receiver."""
def __init__(self, *, dtv: DIRECTV, name: str, address: str = "0") -> None:
"""Initialize DirecTV remote."""
super().__init__(
dtv=dtv,
name=name,
address=address,
)
self._available = False
self._is_on = True
@property
def available(self):
"""Return if able to retrieve information from device or not."""
return self._available
@property
def unique_id(self):
"""Return a unique ID."""
if self._address == "0":
return self.dtv.device.info.receiver_id
return self._address
@property
def is_on(self) -> bool:
"""Return True if entity is on."""
return self._is_on
async def async_update(self) -> None:
"""Update device state."""
status = await self.dtv.status(self._address)
if status in ("active", "standby"):
self._available = True
self._is_on = status == "active"
else:
self._available = False
self._is_on = False
async def async_turn_on(self, **kwargs: Any) -> None:
"""Turn the device on."""
await self.dtv.remote("poweron", self._address)
async def async_turn_off(self, **kwargs: Any) -> None:
"""Turn the device off."""
await self.dtv.remote("poweroff", self._address)
async def async_send_command(self, command: Iterable[str], **kwargs: Any) -> None:
"""Send a command to a device.
Supported keys: power, poweron, poweroff, format,
pause, rew, replay, stop, advance, ffwd, record,
play, guide, active, list, exit, back, menu, info,
up, down, left, right, select, red, green, yellow,
blue, chanup, chandown, prev, 0, 1, 2, 3, 4, 5,
6, 7, 8, 9, dash, enter
"""
num_repeats = kwargs[ATTR_NUM_REPEATS]
for _ in range(num_repeats):
for single_command in command:
try:
await self.dtv.remote(single_command, self._address)
except DIRECTVError:
_LOGGER.exception(
"Sending command %s to device %s failed",
single_command,
self._device_id,
)
|
import unittest
import numpy as np
from numpy.distutils.system_info import get_info
class TestNumpy(unittest.TestCase):
def test_array(self):
array = np.array([1, 3])
self.assertEqual((2,), array.shape)
# Numpy must be linked to the MKL. (Occasionally, a third-party package will muck up the installation
# and numpy will be reinstalled with an OpenBLAS backing.)
def test_mkl(self):
# This will throw an exception if the MKL is not linked correctly.
get_info("blas_mkl")
|
import os.path as op
import numpy as np
import pytest
from numpy.testing import assert_array_equal
import mne
from mne.utils import requires_good_network
from mne.utils import requires_pandas, requires_version
from mne.datasets.sleep_physionet import age, temazepam
from mne.datasets.sleep_physionet._utils import _update_sleep_temazepam_records
from mne.datasets.sleep_physionet._utils import _update_sleep_age_records
from mne.datasets.sleep_physionet._utils import AGE_SLEEP_RECORDS
from mne.datasets.sleep_physionet._utils import TEMAZEPAM_SLEEP_RECORDS
@pytest.fixture(scope='session')
def physionet_tmpdir(tmpdir_factory):
"""Fixture exposing a temporary directory for testing."""
return str(tmpdir_factory.mktemp('physionet_files'))
class _FakeFetch:
def __init__(self):
self.call_args_list = list()
def __call__(self, *args, **kwargs):
self.call_args_list.append((args, kwargs))
@property
def call_count(self):
return len(self.call_args_list)
def _keep_basename_only(path_structure):
return np.vectorize(op.basename)(np.array(path_structure))
def _get_expected_url(name):
base = 'https://physionet.org/physiobank/database/sleep-edfx/'
midle = 'sleep-cassette/' if name.startswith('SC') else 'sleep-telemetry/'
return base + midle + '/' + name
def _get_expected_path(base, name):
return op.join(base, name)
def _check_mocked_function_calls(mocked_func, call_fname_hash_pairs,
base_path):
# Check mocked_func has been called the right amount of times.
assert mocked_func.call_count == len(call_fname_hash_pairs)
# Check it has been called with the right parameters in the right
# order.
for idx, current in enumerate(call_fname_hash_pairs):
call_args, call_kwargs = mocked_func.call_args_list[idx]
assert call_args[0] == _get_expected_url(current['name'])
assert call_args[1] == _get_expected_path(base_path, current['name'])
assert call_kwargs['hash_'] == current['hash']
assert call_kwargs['hash_type'] == 'sha1'
assert call_kwargs['print_destination'] is False
@pytest.mark.timeout(60)
@pytest.mark.xfail(strict=False)
@requires_good_network
@requires_pandas
@requires_version('xlrd', '0.9')
def test_run_update_age_records(tmpdir):
"""Test Sleep Physionet URL handling."""
import pandas as pd
fname = op.join(str(tmpdir), "records.csv")
_update_sleep_age_records(fname)
data = pd.read_csv(fname)
pd.testing.assert_frame_equal(data, pd.read_csv(AGE_SLEEP_RECORDS))
@pytest.mark.parametrize('subject', [39, 68, 69, 78, 79, 83])
def test_sleep_physionet_age_missing_subjects(physionet_tmpdir, subject,
download_is_error):
"""Test handling of missing subjects in Sleep Physionet age fetcher."""
params = {'path': physionet_tmpdir, 'update_path': False}
with pytest.raises(
ValueError, match='This dataset contains subjects 0 to 82'):
age.fetch_data(
subjects=[subject], recording=[1], on_missing='raise', **params)
with pytest.warns(RuntimeWarning,
match='This dataset contains subjects 0 to 82'):
age.fetch_data(
subjects=[subject], recording=[1], on_missing='warn', **params)
paths = age.fetch_data(
subjects=[subject], recording=[1], on_missing='ignore', **params)
assert paths == []
@pytest.mark.parametrize('subject,recording', [(13, 2), (36, 1), (52, 1)])
def test_sleep_physionet_age_missing_recordings(physionet_tmpdir, subject,
recording, download_is_error):
"""Test handling of missing recordings in Sleep Physionet age fetcher."""
params = {'path': physionet_tmpdir, 'update_path': False}
with pytest.raises(
ValueError, match=f'Requested recording {recording} for subject'):
age.fetch_data(subjects=[subject], recording=[recording],
on_missing='raise', **params)
with pytest.warns(RuntimeWarning,
match=f'Requested recording {recording} for subject'):
age.fetch_data(subjects=[subject], recording=[recording],
on_missing='warn', **params)
paths = age.fetch_data(subjects=[subject], recording=[recording],
on_missing='ignore', **params)
assert paths == []
def test_sleep_physionet_age(physionet_tmpdir, monkeypatch, download_is_error):
"""Test Sleep Physionet URL handling."""
# check download_is_error patching
params = {'path': physionet_tmpdir, 'update_path': False}
with pytest.raises(AssertionError, match='Test should not download'):
age.fetch_data(subjects=[0], recording=[1], **params)
# then patch
my_func = _FakeFetch()
monkeypatch.setattr(
mne.datasets.sleep_physionet._utils, '_fetch_file', my_func)
paths = age.fetch_data(subjects=[0], recording=[1], **params)
assert_array_equal(_keep_basename_only(paths),
[['SC4001E0-PSG.edf', 'SC4001EC-Hypnogram.edf']])
paths = age.fetch_data(subjects=[0, 1], recording=[1], **params)
assert_array_equal(_keep_basename_only(paths),
[['SC4001E0-PSG.edf', 'SC4001EC-Hypnogram.edf'],
['SC4011E0-PSG.edf', 'SC4011EH-Hypnogram.edf']])
paths = age.fetch_data(subjects=[0], recording=[1, 2], **params)
assert_array_equal(_keep_basename_only(paths),
[['SC4001E0-PSG.edf', 'SC4001EC-Hypnogram.edf'],
['SC4002E0-PSG.edf', 'SC4002EC-Hypnogram.edf']])
EXPECTED_CALLS = (
{'name': 'SC4001E0-PSG.edf',
'hash': 'adabd3b01fc7bb75c523a974f38ee3ae4e57b40f'},
{'name': 'SC4001EC-Hypnogram.edf',
'hash': '21c998eadc8b1e3ea6727d3585186b8f76e7e70b'},
{'name': 'SC4001E0-PSG.edf',
'hash': 'adabd3b01fc7bb75c523a974f38ee3ae4e57b40f'},
{'name': 'SC4001EC-Hypnogram.edf',
'hash': '21c998eadc8b1e3ea6727d3585186b8f76e7e70b'},
{'name': 'SC4011E0-PSG.edf',
'hash': '4d17451f7847355bcab17584de05e7e1df58c660'},
{'name': 'SC4011EH-Hypnogram.edf',
'hash': 'd582a3cbe2db481a362af890bc5a2f5ca7c878dc'},
{'name': 'SC4001E0-PSG.edf',
'hash': 'adabd3b01fc7bb75c523a974f38ee3ae4e57b40f'},
{'name': 'SC4001EC-Hypnogram.edf',
'hash': '21c998eadc8b1e3ea6727d3585186b8f76e7e70b'},
{'name': 'SC4002E0-PSG.edf',
'hash': 'c6b6d7a8605cc7e7602b6028ee77f6fbf5f7581d'},
{'name': 'SC4002EC-Hypnogram.edf',
'hash': '386230188a3552b1fc90bba0fb7476ceaca174b6'})
base_path = age.data_path(path=physionet_tmpdir)
_check_mocked_function_calls(my_func, EXPECTED_CALLS, base_path)
@pytest.mark.xfail(strict=False)
@requires_good_network
@requires_pandas
@requires_version('xlrd', '0.9')
def test_run_update_temazepam_records(tmpdir):
"""Test Sleep Physionet URL handling."""
import pandas as pd
fname = op.join(str(tmpdir), "records.csv")
_update_sleep_temazepam_records(fname)
data = pd.read_csv(fname)
pd.testing.assert_frame_equal(
data, pd.read_csv(TEMAZEPAM_SLEEP_RECORDS))
def test_sleep_physionet_temazepam(physionet_tmpdir, monkeypatch):
"""Test Sleep Physionet URL handling."""
my_func = _FakeFetch()
monkeypatch.setattr(
mne.datasets.sleep_physionet._utils, '_fetch_file', my_func)
params = {'path': physionet_tmpdir, 'update_path': False}
paths = temazepam.fetch_data(subjects=[0], **params)
assert_array_equal(_keep_basename_only(paths),
[['ST7011J0-PSG.edf', 'ST7011JP-Hypnogram.edf']])
EXPECTED_CALLS = (
{'name': 'ST7011J0-PSG.edf',
'hash': 'b9d11484126ebff1884034396d6a20c62c0ef48d'},
{'name': 'ST7011JP-Hypnogram.edf',
'hash': 'ff28e5e01296cefed49ae0c27cfb3ebc42e710bf'})
base_path = temazepam.data_path(path=physionet_tmpdir)
_check_mocked_function_calls(my_func, EXPECTED_CALLS, base_path)
with pytest.raises(
ValueError, match='This dataset contains subjects 0 to 21'):
paths = temazepam.fetch_data(subjects=[22], **params)
|
from homeassistant.components.sonarr.const import DOMAIN
from homeassistant.config_entries import (
ENTRY_STATE_LOADED,
ENTRY_STATE_NOT_LOADED,
ENTRY_STATE_SETUP_ERROR,
ENTRY_STATE_SETUP_RETRY,
SOURCE_REAUTH,
)
from homeassistant.const import CONF_SOURCE
from homeassistant.core import HomeAssistant
from tests.async_mock import patch
from tests.components.sonarr import setup_integration
from tests.test_util.aiohttp import AiohttpClientMocker
async def test_config_entry_not_ready(
hass: HomeAssistant, aioclient_mock: AiohttpClientMocker
) -> None:
"""Test the configuration entry not ready."""
entry = await setup_integration(hass, aioclient_mock, connection_error=True)
assert entry.state == ENTRY_STATE_SETUP_RETRY
async def test_config_entry_reauth(
hass: HomeAssistant, aioclient_mock: AiohttpClientMocker
) -> None:
"""Test the configuration entry needing to be re-authenticated."""
with patch.object(hass.config_entries.flow, "async_init") as mock_flow_init:
entry = await setup_integration(hass, aioclient_mock, invalid_auth=True)
assert entry.state == ENTRY_STATE_SETUP_ERROR
mock_flow_init.assert_called_once_with(
DOMAIN,
context={CONF_SOURCE: SOURCE_REAUTH},
data={"config_entry_id": entry.entry_id, **entry.data},
)
async def test_unload_config_entry(
hass: HomeAssistant, aioclient_mock: AiohttpClientMocker
) -> None:
"""Test the configuration entry unloading."""
with patch(
"homeassistant.components.sonarr.sensor.async_setup_entry",
return_value=True,
):
entry = await setup_integration(hass, aioclient_mock)
assert hass.data[DOMAIN]
assert entry.entry_id in hass.data[DOMAIN]
assert entry.state == ENTRY_STATE_LOADED
await hass.config_entries.async_unload(entry.entry_id)
await hass.async_block_till_done()
assert entry.entry_id not in hass.data[DOMAIN]
assert entry.state == ENTRY_STATE_NOT_LOADED
|
from mlpatches.base import FunctionPatch, PatchGroup
from mlpatches.os_popen import popen, popen2, popen3, popen4, system
from mlpatches.os_process import getpid, getppid, kill
# define patches
class PopenPatch(FunctionPatch):
PY2 = True
PY3 = False
module = "os"
function = "popen"
replacement = popen
class Popen2Patch(FunctionPatch):
PY2 = True
PY3 = False
module = "os"
function = "popen2"
replacement = popen2
class Popen3Patch(FunctionPatch):
PY2 = True
PY3 = False
module = "os"
function = "popen3"
replacement = popen3
class Popen4Patch(FunctionPatch):
PY2 = True
PY3 = False
module = "os"
function = "popen4"
replacement = popen4
class SystemPatch(FunctionPatch):
PY2 = True
PY3 = False
module = "os"
function = "system"
replacement = system
class GetpidPatch(FunctionPatch):
PY2 = True
PY3 = True
module = "os"
function = "getpid"
replacement = getpid
class GetppidPatch(FunctionPatch):
PY2 = True
PY3 = True
module = "os"
function = "getppid"
replacement = getppid
class KillPatch(FunctionPatch):
PY2 = True
PY3 = True
module = "os"
function = "kill"
replacement = kill
# create patch instances
POPEN_PATCH = PopenPatch()
POPEN2_PATCH = Popen2Patch()
POPEN3_PATCH = Popen3Patch()
POPEN4_PATCH = Popen4Patch()
SYSTEM_PATCH = SystemPatch()
GETPID_PATCH = GetpidPatch()
GETPPID_PATCH = GetppidPatch()
KILL_PATCH = KillPatch()
# define groups
class PopenPatches(PatchGroup):
"""all popen patches."""
patches = [
POPEN_PATCH,
POPEN2_PATCH,
POPEN3_PATCH,
POPEN4_PATCH,
]
class ProcessingPatches(PatchGroup):
"""all patches to emulate prcessing behavior"""
patches = [
GETPID_PATCH,
GETPPID_PATCH,
KILL_PATCH,
]
class OsPatches(PatchGroup):
"""all os patches."""
patches = [
POPEN_PATCH,
POPEN2_PATCH,
POPEN3_PATCH,
POPEN4_PATCH,
SYSTEM_PATCH,
GETPID_PATCH,
GETPPID_PATCH,
KILL_PATCH,
]
# create group instances
POPEN_PATCHES = PopenPatches()
OS_PATCHES = OsPatches()
PROCESSING_PATCHES = ProcessingPatches()
|
from gitless import core
from . import helpers, pprint
def parser(subparsers, repo):
desc = 'merge the divergent changes of one branch onto another'
merge_parser = subparsers.add_parser(
'merge', help=desc, description=desc.capitalize(), aliases=['mg'])
group = merge_parser.add_mutually_exclusive_group()
group.add_argument(
'src', nargs='?', help='the source branch to read changes from')
group.add_argument(
'-a', '--abort', help='abort the merge in progress', action='store_true')
merge_parser.set_defaults(func=main)
def main(args, repo):
current_b = repo.current_branch
if args.abort:
current_b.abort_merge()
pprint.ok('Merge aborted successfully')
return True
src_branch = helpers.get_branch_or_use_upstream(args.src, 'src', repo)
try:
current_b.merge(src_branch, op_cb=pprint.OP_CB)
pprint.ok('Merge succeeded')
except core.ApplyFailedError as e:
pprint.ok('Merge succeeded')
raise e
return True
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
import mock
from perfkitbenchmarker import provider_info
from perfkitbenchmarker import providers
import six
class ProviderBenchmarkChecks(unittest.TestCase):
def setUp(self):
p = mock.patch.object(providers, '_imported_providers', new=set())
p.start()
self.addCleanup(p.stop)
def _VerifyProviderBenchmarkSupport(self, cloud, benchmark, support_expected):
providers.LoadProvider(cloud)
provider_info_class = provider_info.GetProviderInfoClass(cloud)
supported = provider_info_class.IsBenchmarkSupported(benchmark)
fmt_args = ('', ' not') if support_expected else (' not', '')
self.assertEqual(supported, support_expected, (
'Expected provider {provider} {0}to support benchmark {benchmark}, but '
'it did{1}.'.format(*fmt_args, provider=cloud, benchmark=benchmark)))
def testIperfSupport(self):
expected = {providers.GCP: True, providers.DIGITALOCEAN: True}
for cloud, support_expected in six.iteritems(expected):
self._VerifyProviderBenchmarkSupport(cloud, 'iperf', support_expected)
def testMYSQLSupport(self):
expected = {providers.GCP: True, providers.DIGITALOCEAN: False}
for cloud, support_expected in six.iteritems(expected):
self._VerifyProviderBenchmarkSupport(cloud, 'mysql_service',
support_expected)
if __name__ == '__main__':
unittest.main()
|
import numpy as np
from numpy.testing import assert_array_equal
import pytest
from mne.time_frequency import morlet
from mne.preprocessing.ctps_ import (ctps, _prob_kuiper,
_compute_normalized_phase)
###############################################################################
# Generate testing signal
tmin = -0.3
sfreq = 1000. # Hz
tstep = 1. / sfreq
n_samples = 600
times = np.linspace(tmin, tmin + n_samples * tstep, n_samples)
# Generate times series from Morlet wavelet
single_trial = np.zeros((1, len(times)))
Ws = morlet(sfreq, [3], n_cycles=[1])
single_trial[0][:len(Ws[0])] = np.real(Ws[0])
roll_to = 300 - 265 # shift data to center of time window
single_trial = np.roll(single_trial, roll_to)
rng = np.random.RandomState(42)
def get_data(n_trials, j_extent):
"""Generate ground truth and testing data."""
ground_truth = np.tile(single_trial, n_trials)
my_shape = n_trials, 1, 600
random_data = rng.random_sample(my_shape)
rand_ints = rng.randint(-j_extent, j_extent, n_trials)
jittered_data = np.array([np.roll(single_trial, i) for i in rand_ints])
data = np.concatenate([ground_truth.reshape(my_shape),
jittered_data.reshape(my_shape),
random_data.reshape(my_shape)], 1)
assert data.shape == (n_trials, 3, 600)
return data
# vary extent of jittering --> creates phaselocks at the borders if
# 2 * extent != n_samples
iter_test_ctps = enumerate(zip([400, 400], [150, 300], [0.6, 0.2]))
def test_ctps():
"""Test basic ctps functionality."""
for ii, (n_trials, j_extent, pk_max) in iter_test_ctps:
data = get_data(n_trials, j_extent)
ks_dyn, pk_dyn, phase_trial = ctps(data)
data2 = _compute_normalized_phase(data)
ks_dyn2, pk_dyn2, phase_trial2 = ctps(data2, is_raw=False)
for a, b in zip([ks_dyn, pk_dyn, phase_trial],
[ks_dyn2, pk_dyn2, data2]):
assert_array_equal(a, b)
assert (a.min() >= 0)
assert (a.max() <= 1)
assert (b.min() >= 0)
assert (b.max() <= 1)
# test for normalization
assert ((pk_dyn.min() > 0.0) or (pk_dyn.max() < 1.0))
# test shapes
assert (phase_trial.shape == data.shape)
assert (pk_dyn.shape == data.shape[1:])
# tets ground_truth + random + jittered case
assert (pk_dyn[0].max() == 1.0)
assert (len(np.unique(pk_dyn[0])) == 1.0)
assert (pk_dyn[1].max() < pk_max)
assert (pk_dyn[2].max() > 0.3)
if ii < 1:
pytest.raises(ValueError, ctps, data[:, :, :, None])
assert (_prob_kuiper(1.0, 400) == 1.0)
# test vecrosization
assert_array_equal(_prob_kuiper(np.array([1.0, 1.0]), 400),
_prob_kuiper(np.array([1.0, 1.0]), 400))
assert (_prob_kuiper(0.1, 400) < 0.1)
|
from typing import cast, Dict, List, Optional, Tuple
from qutebrowser.commands import runners
from qutebrowser.api import cmdutils
from qutebrowser.keyinput import modeman
from qutebrowser.utils import message, objreg, usertypes
_CommandType = Tuple[str, int] # command, type
macro_recorder = cast('MacroRecorder', None)
class MacroRecorder:
"""An object for recording and running keyboard macros.
Attributes:
_macros: A list of commands for each macro register.
_recording_macro: The register to which a macro is being recorded.
_macro_count: The count passed to run_macro_command for each window.
Stored for use by run_macro, which may be called from
keyinput/modeparsers.py after a key input.
_last_register: The macro which did run last.
"""
def __init__(self) -> None:
self._macros: Dict[str, List[_CommandType]] = {}
self._recording_macro: Optional[str] = None
self._macro_count: Dict[int, int] = {}
self._last_register: Optional[str] = None
@cmdutils.register(instance='macro-recorder', name='record-macro')
@cmdutils.argument('win_id', value=cmdutils.Value.win_id)
def record_macro_command(self, win_id: int, register: str = None) -> None:
"""Start or stop recording a macro.
Args:
register: Which register to store the macro in.
"""
if self._recording_macro is None:
if register is None:
mode_manager = modeman.instance(win_id)
mode_manager.enter(usertypes.KeyMode.record_macro,
'record_macro')
else:
self.record_macro(register)
else:
message.info("Macro '{}' recorded.".format(self._recording_macro))
self._recording_macro = None
def record_macro(self, register: str) -> None:
"""Start recording a macro."""
message.info("Recording macro '{}'...".format(register))
self._macros[register] = []
self._recording_macro = register
@cmdutils.register(instance='macro-recorder', name='run-macro')
@cmdutils.argument('win_id', value=cmdutils.Value.win_id)
@cmdutils.argument('count', value=cmdutils.Value.count)
def run_macro_command(self, win_id: int,
count: int = 1,
register: str = None) -> None:
"""Run a recorded macro.
Args:
count: How many times to run the macro.
register: Which macro to run.
"""
self._macro_count[win_id] = count
if register is None:
mode_manager = modeman.instance(win_id)
mode_manager.enter(usertypes.KeyMode.run_macro, 'run_macro')
else:
self.run_macro(win_id, register)
def run_macro(self, win_id: int, register: str) -> None:
"""Run a recorded macro."""
if register == '@':
if self._last_register is None:
raise cmdutils.CommandError("No previous macro")
register = self._last_register
self._last_register = register
if register not in self._macros:
raise cmdutils.CommandError(
"No macro recorded in '{}'!".format(register))
commandrunner = runners.CommandRunner(win_id)
for _ in range(self._macro_count[win_id]):
for cmd in self._macros[register]:
commandrunner.run_safely(*cmd)
def record_command(self, text: str, count: int) -> None:
"""Record a command if a macro is being recorded."""
if self._recording_macro is not None:
self._macros[self._recording_macro].append((text, count))
def init() -> None:
"""Initialize the MacroRecorder."""
global macro_recorder
macro_recorder = MacroRecorder()
objreg.register('macro-recorder', macro_recorder, command_only=True)
|
from kalliope.core.Models.settings.SettingsEntry import SettingsEntry
class Resources(SettingsEntry):
"""
"""
def __init__(self, neuron_folder=None, stt_folder=None, tts_folder=None, trigger_folder=None, signal_folder=None):
super(Resources, self).__init__("Resources")
self.neuron_folder = neuron_folder
self.stt_folder = stt_folder
self.tts_folder = tts_folder
self.trigger_folder = trigger_folder
self.signal_folder = signal_folder
def __str__(self):
return str(self.serialize())
def serialize(self):
"""
This method allows to serialize in a proper way this object
:return: A dict of order
:rtype: Dict
"""
return {
'neuron_folder': self.neuron_folder,
'stt_folder': self.stt_folder,
'tts_folder': self.tts_folder,
'trigger_folder': self.trigger_folder,
'signal_folder': self.signal_folder
}
def __eq__(self, other):
"""
This is used to compare 2 objects
:param other:
:return:
"""
return self.__dict__ == other.__dict__
|
import time
import unittest
from queue import Empty
import mock
from pytest import fixture
from pytest import raises
from paasta_tools.deployd.common import DelayDeadlineQueue
from paasta_tools.deployd.common import exponential_back_off
from paasta_tools.deployd.common import get_marathon_clients_from_config
from paasta_tools.deployd.common import get_service_instances_needing_update
from paasta_tools.deployd.common import PaastaQueue
from paasta_tools.deployd.common import PaastaThread
from paasta_tools.deployd.common import ServiceInstance
from paasta_tools.marathon_tools import MarathonClients
from paasta_tools.mesos.exceptions import NoSlavesAvailableError
from paasta_tools.utils import DEFAULT_SOA_DIR
from paasta_tools.utils import InvalidJobNameError
from paasta_tools.utils import NoDeploymentsAvailable
from paasta_tools.utils import NoDockerImageError
class TestPaastaThread(unittest.TestCase):
def setUp(self):
self.thread = PaastaThread()
def test_log(self):
self.thread.log.info("HAAAALP ME")
class TestPaastaQueue(unittest.TestCase):
def setUp(self):
self.queue = PaastaQueue("AtThePostOffice")
def test_log(self):
self.queue.log.info("HAAAALP ME")
def test_put(self):
with mock.patch(
"paasta_tools.deployd.common.Queue.put", autospec=True
) as mock_q_put:
self.queue.put("human")
mock_q_put.assert_called_with(self.queue, "human")
def make_si(wait_until, bounce_by):
"""Just using mock.Mock(wait_until=wait_until, bounce_by=bounce_by) mostly works, but our PriorityQueues
occasionally will compare two ServiceInstances directly, and Mocks aren't comparable unless you define an __eq__."""
return ServiceInstance(
service="service",
instance="instance",
bounce_by=bounce_by,
wait_until=wait_until,
watcher="watcher",
failures=0,
processed_count=0,
enqueue_time=1,
bounce_start_time=1,
)
class TestDelayDeadlineQueue:
@fixture
def queue(self):
yield DelayDeadlineQueue()
def test_log(self, queue):
queue.log.info("HAAAALP ME")
def test_put(self, queue):
with mock.patch.object(
queue.unavailable_service_instances,
"put",
wraps=queue.unavailable_service_instances.put,
) as mock_unavailable_service_instances_put:
si1 = make_si(wait_until=6, bounce_by=4)
queue.put(si1)
mock_unavailable_service_instances_put.assert_called_with((6, 4, si1))
mock_unavailable_service_instances_put.reset_mock()
si2 = make_si(wait_until=3, bounce_by=4)
queue.put(si2)
mock_unavailable_service_instances_put.assert_called_with((3, 4, si2))
def test_get_empty(self, queue):
with raises(Empty):
with queue.get(block=False) as result:
print(f"Should have raised, got {result}")
start_time = time.time()
with raises(Empty):
with queue.get(timeout=0.01) as result:
print(f"Should have raised, got {result}")
assert time.time() > start_time + 0.01
def test_get(self, queue):
with mock.patch.object(
queue.available_service_instances, "get", autospec=True
) as mock_available_service_instances_get:
mock_available_service_instances_get.side_effect = [(2, "human"), Empty]
with queue.get(block=False) as result:
assert result == "human"
def test_dont_block_indefinitely_when_wait_until_is_in_future(self, queue):
"""Regression test for a specific bug in the first implementation of DelayDeadlineQueue"""
# First, put an item with a distant wait_until
queue.put(make_si(wait_until=time.time() + 100, bounce_by=time.time() + 100))
# an immediate get should fail.
with raises(Empty):
with queue.get(block=False) as result:
print(f"Should have raised, got {result}")
# a get with a short timeout should fail.
with raises(Empty):
with queue.get(timeout=0.0001) as result:
print(f"Should have raised, got {result}")
wait_until = time.time() + 0.01
queue.put(make_si(wait_until=wait_until, bounce_by=wait_until))
# but if we wait a short while it should return.
with queue.get(
timeout=1.0
): # This timeout is only there so that if this test fails it doesn't take forever.
pass
assert time.time() > wait_until
def test_exponential_back_off():
assert exponential_back_off(0, 60, 2, 6000) == 60
assert exponential_back_off(2, 60, 2, 6000) == 240
assert exponential_back_off(99, 60, 2, 6000) == 6000
def test_get_service_instances_needing_update():
with mock.patch(
"paasta_tools.deployd.common.get_all_marathon_apps", autospec=True
) as mock_get_marathon_apps, mock.patch(
"paasta_tools.deployd.common.load_marathon_service_config_no_cache",
autospec=True,
) as mock_load_marathon_service_config:
mock_marathon_apps = [
mock.Mock(id="/universe.c137.c1.g1", instances=2),
mock.Mock(id="/universe.c138.c1.g1", instances=2),
]
mock_get_marathon_apps.return_value = mock_marathon_apps
mock_service_instances = [("universe", "c137"), ("universe", "c138")]
mock_configs = [
mock.Mock(
format_marathon_app_dict=mock.Mock(
return_value={"id": "universe.c137.c1.g1", "instances": 2}
)
),
mock.Mock(
format_marathon_app_dict=mock.Mock(
return_value={"id": "universe.c138.c2.g2", "instances": 2}
)
),
]
mock_load_marathon_service_config.side_effect = mock_configs
mock_client = mock.Mock(servers=["foo"])
fake_clients = MarathonClients(current=[mock_client], previous=[mock_client])
ret = get_service_instances_needing_update(
fake_clients, mock_service_instances, "westeros-prod"
)
assert mock_get_marathon_apps.called
calls = [
mock.call(
service="universe",
instance="c137",
cluster="westeros-prod",
soa_dir=DEFAULT_SOA_DIR,
),
mock.call(
service="universe",
instance="c138",
cluster="westeros-prod",
soa_dir=DEFAULT_SOA_DIR,
),
]
mock_load_marathon_service_config.assert_has_calls(calls)
assert ret == [("universe", "c138", mock.ANY, "/universe.c138.c2.g2")]
mock_configs = [
mock.Mock(
format_marathon_app_dict=mock.Mock(
return_value={"id": "universe.c137.c1.g1", "instances": 3}
)
),
mock.Mock(
format_marathon_app_dict=mock.Mock(
return_value={"id": "universe.c138.c2.g2", "instances": 2}
)
),
]
mock_load_marathon_service_config.side_effect = mock_configs
mock_client = mock.Mock(servers=["foo"])
fake_clients = MarathonClients(current=[mock_client], previous=[mock_client])
ret = get_service_instances_needing_update(
fake_clients, mock_service_instances, "westeros-prod"
)
assert ret == [
("universe", "c137", mock.ANY, "/universe.c137.c1.g1"),
("universe", "c138", mock.ANY, "/universe.c138.c2.g2"),
]
mock_configs = [
mock.Mock(
format_marathon_app_dict=mock.Mock(side_effect=NoDockerImageError)
),
mock.Mock(
format_marathon_app_dict=mock.Mock(
return_value={"id": "universe.c138.c2.g2", "instances": 2}
)
),
]
mock_load_marathon_service_config.side_effect = mock_configs
mock_client = mock.Mock(servers=["foo"])
fake_clients = MarathonClients(current=[mock_client], previous=[mock_client])
ret = get_service_instances_needing_update(
fake_clients, mock_service_instances, "westeros-prod"
)
assert ret == [("universe", "c138", mock.ANY, "/universe.c138.c2.g2")]
mock_configs = [
mock.Mock(
format_marathon_app_dict=mock.Mock(side_effect=NoSlavesAvailableError)
),
mock.Mock(
format_marathon_app_dict=mock.Mock(
return_value={"id": "universe.c138.c2.g2", "instances": 2}
)
),
]
mock_load_marathon_service_config.side_effect = mock_configs
mock_client = mock.Mock(servers=["foo"])
fake_clients = MarathonClients(current=[mock_client], previous=[mock_client])
ret = get_service_instances_needing_update(
fake_clients, mock_service_instances, "westeros-prod"
)
assert ret == [("universe", "c138", mock.ANY, "/universe.c138.c2.g2")]
mock_configs = [
mock.Mock(
format_marathon_app_dict=mock.Mock(side_effect=InvalidJobNameError)
),
mock.Mock(
format_marathon_app_dict=mock.Mock(
return_value={"id": "universe.c138.c2.g2", "instances": 2}
)
),
]
mock_load_marathon_service_config.side_effect = mock_configs
mock_client = mock.Mock(servers=["foo"])
fake_clients = MarathonClients(current=[mock_client], previous=[mock_client])
ret = get_service_instances_needing_update(
fake_clients, mock_service_instances, "westeros-prod"
)
assert ret == [("universe", "c138", mock.ANY, "/universe.c138.c2.g2")]
mock_configs = [
mock.Mock(
format_marathon_app_dict=mock.Mock(side_effect=NoDeploymentsAvailable)
),
mock.Mock(
format_marathon_app_dict=mock.Mock(
return_value={"id": "universe.c138.c2.g2", "instances": 2}
)
),
]
mock_load_marathon_service_config.side_effect = mock_configs
mock_client = mock.Mock(servers=["foo"])
fake_clients = MarathonClients(current=[mock_client], previous=[mock_client])
ret = get_service_instances_needing_update(
fake_clients, mock_service_instances, "westeros-prod"
)
assert ret == [("universe", "c138", mock.ANY, "/universe.c138.c2.g2")]
mock_configs = [
mock.Mock(format_marathon_app_dict=mock.Mock(side_effect=Exception)),
mock.Mock(
format_marathon_app_dict=mock.Mock(
return_value={"id": "universe.c138.c2.g2", "instances": 2}
)
),
]
mock_load_marathon_service_config.side_effect = mock_configs
mock_client = mock.Mock(servers=["foo"])
fake_clients = MarathonClients(current=[mock_client], previous=[mock_client])
ret = get_service_instances_needing_update(
fake_clients, mock_service_instances, "westeros-prod"
)
assert ret == [("universe", "c138", mock.ANY, "/universe.c138.c2.g2")]
def test_get_marathon_clients_from_config():
with mock.patch(
"paasta_tools.deployd.common.load_system_paasta_config", autospec=True
), mock.patch(
"paasta_tools.deployd.common.get_marathon_servers", autospec=True
), mock.patch(
"paasta_tools.deployd.common.get_marathon_clients", autospec=True
) as mock_marathon_clients:
assert get_marathon_clients_from_config() == mock_marathon_clients.return_value
|
from datetime import datetime
from datetime import timedelta
from datetime import timezone
import mock
import pytest
from paasta_tools import check_marathon_services_replication
from paasta_tools.utils import compose_job_id
check_marathon_services_replication.log = mock.Mock()
@pytest.fixture
def instance_config():
service = "fake_service"
instance = "fake_instance"
job_id = compose_job_id(service, instance)
mock_instance_config = mock.Mock(
service=service,
instance=instance,
cluster="fake_cluster",
soa_dir="fake_soa_dir",
job_id=job_id,
)
mock_instance_config.get_replication_crit_percentage.return_value = 90
mock_instance_config.get_registrations.return_value = [job_id]
return mock_instance_config
def test_check_service_replication_for_normal_smartstack(instance_config):
instance_config.get_instances.return_value = 100
all_tasks = []
with mock.patch(
"paasta_tools.check_marathon_services_replication.get_proxy_port_for_instance",
autospec=True,
return_value=666,
), mock.patch(
"paasta_tools.monitoring_tools.check_replication_for_instance", autospec=True,
) as mock_check_replication_for_service:
check_marathon_services_replication.check_service_replication(
instance_config=instance_config,
all_tasks_or_pods=all_tasks,
replication_checker=None,
)
mock_check_replication_for_service.assert_called_once_with(
instance_config=instance_config,
expected_count=100,
replication_checker=None,
)
def test_check_service_replication_for_smartstack_with_different_namespace(
instance_config,
):
instance_config.get_instances.return_value = 100
all_tasks = []
with mock.patch(
"paasta_tools.check_marathon_services_replication.get_proxy_port_for_instance",
autospec=True,
return_value=666,
), mock.patch(
"paasta_tools.monitoring_tools.check_replication_for_instance", autospec=True,
) as mock_check_replication_for_service, mock.patch(
"paasta_tools.check_marathon_services_replication.check_healthy_marathon_tasks_for_service_instance",
autospec=True,
) as mock_check_healthy_marathon_tasks:
instance_config.get_registrations.return_value = ["some-random-other-namespace"]
check_marathon_services_replication.check_service_replication(
instance_config=instance_config,
all_tasks_or_pods=all_tasks,
replication_checker=None,
)
assert not mock_check_replication_for_service.called
mock_check_healthy_marathon_tasks.assert_called_once_with(
instance_config=instance_config, expected_count=100, all_tasks=[]
)
def test_check_service_replication_for_non_smartstack(instance_config):
instance_config.get_instances.return_value = 100
with mock.patch(
"paasta_tools.check_marathon_services_replication.get_proxy_port_for_instance",
autospec=True,
return_value=None,
), mock.patch(
"paasta_tools.check_marathon_services_replication.check_healthy_marathon_tasks_for_service_instance",
autospec=True,
) as mock_check_healthy_marathon_tasks:
check_marathon_services_replication.check_service_replication(
instance_config=instance_config,
all_tasks_or_pods=[],
replication_checker=None,
)
mock_check_healthy_marathon_tasks.assert_called_once_with(
instance_config=instance_config, expected_count=100, all_tasks=[]
)
def _make_fake_task(app_id, **kwargs):
kwargs.setdefault("started_at", datetime(1991, 7, 5, 6, 13, 0, tzinfo=timezone.utc))
return mock.Mock(app_id=app_id, **kwargs)
def test_filter_healthy_marathon_instances_for_short_app_id_correctly_counts_alive_tasks():
fakes = []
for i in range(0, 4):
fake_task = _make_fake_task(f"/service.instance.foo{i}.bar{i}")
mock_result = mock.Mock(alive=i % 2 == 0)
fake_task.health_check_results = [mock_result]
fakes.append(fake_task)
actual = check_marathon_services_replication.filter_healthy_marathon_instances_for_short_app_id(
app_id="service.instance", all_tasks=fakes
)
assert actual == 2
def test_filter_healthy_marathon_instances_for_short_app_id_considers_new_tasks_not_healthy_yet():
one_minute = timedelta(minutes=1)
fakes = []
for i in range(0, 4):
fake_task = _make_fake_task(
f"/service.instance.foo{i}.bar{i}",
# when i == 0, produces a task that has just started (not healthy yet)
# otherwise produces a task that was started over a minute ago (healthy)
started_at=datetime.now(timezone.utc) - one_minute * i,
)
mock_result = mock.Mock(alive=True)
fake_task.health_check_results = [mock_result]
fakes.append(fake_task)
actual = check_marathon_services_replication.filter_healthy_marathon_instances_for_short_app_id(
all_tasks=fakes, app_id="service.instance"
)
assert actual == 3
def test_get_healthy_marathon_instances_for_short_app_id_considers_none_start_time_unhealthy():
fake_task = _make_fake_task("/service.instance.foo.bar", started_at=None)
mock_result = mock.Mock(alive=True)
fake_task.health_check_results = [mock_result]
fakes = [fake_task]
actual = check_marathon_services_replication.filter_healthy_marathon_instances_for_short_app_id(
all_tasks=fakes, app_id="service.instance"
)
assert actual == 0
@mock.patch(
"paasta_tools.monitoring_tools.send_replication_event_if_under_replication",
autospec=True,
)
@mock.patch(
"paasta_tools.check_marathon_services_replication.filter_healthy_marathon_instances_for_short_app_id",
autospec=True,
) # noqa
def test_check_healthy_marathon_tasks_for_service_instance(
mock_healthy_instances,
mock_send_replication_event_if_under_replication,
instance_config,
):
mock_healthy_instances.return_value = 2
check_marathon_services_replication.check_healthy_marathon_tasks_for_service_instance(
instance_config=instance_config, expected_count=10, all_tasks=mock.Mock()
)
mock_send_replication_event_if_under_replication.assert_called_once_with(
instance_config=instance_config, expected_count=10, num_available=2
)
|
from django.conf import settings
from django.core.cache import cache
from django.core.exceptions import ImproperlyConfigured
from django.db.models import Count
from django.shortcuts import redirect
from django.urls import reverse
from django.utils import translation
from django.utils.html import escape
from django.utils.safestring import mark_safe
from django.utils.translation import gettext as _
from django.utils.translation.trans_real import parse_accept_lang_header
from django.views.decorators.cache import never_cache
from weblate.accounts.models import Profile
from weblate.lang.models import Language
from weblate.trans.forms import ReportsForm, SearchForm
from weblate.trans.models import Component, ComponentList, Project, Translation
from weblate.trans.models.translation import GhostTranslation
from weblate.trans.util import render
from weblate.utils import messages
from weblate.utils.stats import prefetch_stats
def get_untranslated(base, limit=None):
"""Filter untranslated."""
result = []
for item in prefetch_stats(base):
if item.stats.translated != item.stats.all:
result.append(item)
if limit and len(result) >= limit:
return result
return result
def get_suggestions(request, user, user_has_languages, base, filtered=False):
"""Return suggested translations for user."""
if not filtered:
non_alerts = base.annotate(alert_count=Count("component__alert__pk")).filter(
alert_count=0
)
result = get_suggestions(request, user, user_has_languages, non_alerts, True)
if result:
return result
if user_has_languages:
# Remove user subscriptions
result = get_untranslated(
base.exclude(component__project__in=user.profile.watched.all()), 10
)
if result:
return result
return get_untranslated(base, 10)
def guess_user_language(request, translations):
"""Guess user language for translations.
It tries following:
- Use session language.
- Parse Accept-Language header.
- Fallback to random language.
"""
# Session language
session_lang = translation.get_language()
if session_lang and session_lang != "en":
try:
return Language.objects.get(code=session_lang)
except Language.DoesNotExist:
pass
# Accept-Language HTTP header, for most browser it consists of browser
# language with higher rank and OS language with lower rank so it still
# might be usable guess
accept = request.META.get("HTTP_ACCEPT_LANGUAGE", "")
for accept_lang, _unused in parse_accept_lang_header(accept):
if accept_lang == "en":
continue
try:
return Language.objects.get(code=accept_lang)
except Language.DoesNotExist:
continue
# Random language from existing translations, we do not want to list all
# languages by default
try:
return translations.order_by("?")[0].language
except IndexError:
# There are no existing translations
return None
def get_user_translations(request, user, user_has_languages):
"""Get list of translations in user languages.
Works also for anonymous users based on current UI language.
"""
result = (
Translation.objects.prefetch()
.filter_access(user)
.order_by("component__priority", "component__project__name", "component__name")
)
if user_has_languages:
result = result.filter(language__in=user.profile.languages.all())
else:
# Filter based on session language
tmp = result.filter(language=guess_user_language(request, result))
if tmp:
return tmp
return result
def redirect_single_project(user):
if isinstance(settings.SINGLE_PROJECT, str):
target = project = Project.objects.get(slug=settings.SINGLE_PROJECT)
elif Component.objects.count() == 1:
target = Component.objects.get()
project = target.project
elif Project.objects.count() == 1:
target = project = Project.objects.get()
else:
raise ImproperlyConfigured("SINGLE_PROJECT enabled, but no project found")
if not user.is_authenticated and not user.can_access_project(project):
return redirect(f"{settings.LOGIN_URL}?next={target.get_absolute_url()}")
return redirect(target)
@never_cache
def home(request):
"""Home page handler serving different views based on user."""
user = request.user
# This is used on Hosted Weblate to handle removed translation projects.
# The redirect itself is done in the http server.
if "removed" in request.GET:
messages.warning(
request,
_(
"The project you were looking for has been removed, "
"however you are welcome to contribute to other ones."
),
)
if "show_set_password" in request.session:
messages.warning(
request,
_(
"You have activated your account, now you should set "
"the password to be able to sign in next time."
),
)
return redirect("password")
# Warn about not filled in username (usually caused by migration of
# users from older system
if user.is_authenticated and (not user.full_name or not user.email):
messages.warning(
request,
mark_safe(
'<a href="{}">{}</a>'.format(
reverse("profile") + "#account",
escape(_("Please set your full name and e-mail in your profile.")),
)
),
)
# Redirect to single project or component
if settings.SINGLE_PROJECT:
return redirect_single_project(user)
if not user.is_authenticated:
return dashboard_anonymous(request)
return dashboard_user(request)
def fetch_componentlists(user, user_translations):
componentlists = list(
ComponentList.objects.filter(
show_dashboard=True,
components__project_id__in=user.allowed_project_ids,
)
.distinct()
.order()
)
for componentlist in componentlists:
components = componentlist.components.filter_access(user)
# Force fetching the query now
list(components)
translations = prefetch_stats(
list(user_translations.filter(component__in=components))
)
# Show ghost translations for user languages
existing = {
(translation.component.slug, translation.language.code)
for translation in translations
}
languages = user.profile.languages.all()
for component in components:
for language in languages:
if (
component.slug,
language.code,
) in existing or not component.can_add_new_language(user):
continue
translations.append(GhostTranslation(component, language))
componentlist.translations = translations
# Filter out component lists with translations
# This will remove the ones where user doesn't have access to anything
return [c for c in componentlists if c.translations]
def dashboard_user(request):
"""Home page of Weblate for authenticated user."""
user = request.user
user_has_languages = user.is_authenticated and user.profile.languages.exists()
user_translations = get_user_translations(request, user, user_has_languages)
suggestions = get_suggestions(request, user, user_has_languages, user_translations)
usersubscriptions = None
componentlists = fetch_componentlists(request.user, user_translations)
active_tab_id = user.profile.dashboard_view
active_tab_slug = Profile.DASHBOARD_SLUGS.get(active_tab_id)
if (
active_tab_id == Profile.DASHBOARD_COMPONENT_LIST
and user.profile.dashboard_component_list
):
active_tab_slug = user.profile.dashboard_component_list.tab_slug()
if user.is_authenticated:
usersubscriptions = user_translations.filter_access(user).filter(
component__project__in=user.watched_projects
)
if user.profile.hide_completed:
usersubscriptions = get_untranslated(usersubscriptions)
for componentlist in componentlists:
componentlist.translations = get_untranslated(
componentlist.translations
)
usersubscriptions = prefetch_stats(usersubscriptions)
return render(
request,
"dashboard/user.html",
{
"allow_index": True,
"suggestions": suggestions,
"search_form": SearchForm(request.user),
"usersubscriptions": usersubscriptions,
"componentlists": componentlists,
"all_componentlists": prefetch_stats(
ComponentList.objects.filter(
components__project_id__in=request.user.allowed_project_ids
)
.distinct()
.order()
),
"active_tab_slug": active_tab_slug,
"reports_form": ReportsForm(),
},
)
def dashboard_anonymous(request):
"""Home page of Weblate showing list of projects for anonymous user."""
top_project_ids = cache.get("dashboard-anonymous-projects")
if top_project_ids is None:
top_projects = sorted(
prefetch_stats(request.user.allowed_projects),
key=lambda prj: -prj.stats.monthly_changes,
)[:20]
cache.set("dashboard-anonymous-projects", {p.id for p in top_projects}, 3600)
else:
# The allowed_projects is already fetched, so filter it in Python
# instead of doing additional query
top_projects = [
p for p in request.user.allowed_projects if p.id in top_project_ids
]
return render(
request,
"dashboard/anonymous.html",
{
"top_projects": top_projects,
"all_projects": len(request.user.allowed_projects),
},
)
|
import json
import logging
import os
from babelfish import Language, language_converters
from requests import Session
from . import Provider
from ..subtitle import Subtitle, fix_line_ending
logger = logging.getLogger(__name__)
language_converters.register('shooter = subliminal.converters.shooter:ShooterConverter')
class ShooterSubtitle(Subtitle):
"""Shooter Subtitle."""
provider_name = 'shooter'
def __init__(self, language, hash, download_link):
super(ShooterSubtitle, self).__init__(language)
self.hash = hash
self.download_link = download_link
@property
def id(self):
return self.download_link
@property
def info(self):
return self.hash
def get_matches(self, video):
matches = set()
# hash
if 'shooter' in video.hashes and video.hashes['shooter'] == self.hash:
matches.add('hash')
return matches
class ShooterProvider(Provider):
"""Shooter Provider."""
languages = {Language(l) for l in ['eng', 'zho']}
server_url = 'https://www.shooter.cn/api/subapi.php'
subtitle_class = ShooterSubtitle
def __init__(self):
self.session = None
def initialize(self):
self.session = Session()
self.session.headers['User-Agent'] = self.user_agent
def terminate(self):
self.session.close()
def query(self, language, filename, hash=None):
# query the server
params = {'filehash': hash, 'pathinfo': os.path.realpath(filename), 'format': 'json', 'lang': language.shooter}
logger.debug('Searching subtitles %r', params)
r = self.session.post(self.server_url, params=params, timeout=10)
r.raise_for_status()
# handle subtitles not found
if r.content == b'\xff':
logger.debug('No subtitles found')
return []
# parse the subtitles
results = json.loads(r.text)
subtitles = [self.subtitle_class(language, hash, t['Link']) for s in results for t in s['Files']]
return subtitles
def list_subtitles(self, video, languages):
return [s for l in languages for s in self.query(l, video.name, video.hashes.get('shooter'))]
def download_subtitle(self, subtitle):
logger.info('Downloading subtitle %r', subtitle)
r = self.session.get(subtitle.download_link, timeout=10)
r.raise_for_status()
subtitle.content = fix_line_ending(r.content)
|
from hashlib import sha1
import logging
import os
from homeassistant.components.mailbox import CONTENT_TYPE_MPEG, Mailbox, StreamError
from homeassistant.util import dt
_LOGGER = logging.getLogger(__name__)
MAILBOX_NAME = "DemoMailbox"
async def async_get_handler(hass, config, discovery_info=None):
"""Set up the Demo mailbox."""
return DemoMailbox(hass, MAILBOX_NAME)
class DemoMailbox(Mailbox):
"""Demo Mailbox."""
def __init__(self, hass, name):
"""Initialize Demo mailbox."""
super().__init__(hass, name)
self._messages = {}
txt = "Lorem ipsum dolor sit amet, consectetur adipiscing elit. "
for idx in range(0, 10):
msgtime = int(dt.as_timestamp(dt.utcnow()) - 3600 * 24 * (10 - idx))
msgtxt = f"Message {idx + 1}. {txt * (1 + idx * (idx % 2))}"
msgsha = sha1(msgtxt.encode("utf-8")).hexdigest()
msg = {
"info": {
"origtime": msgtime,
"callerid": "John Doe <212-555-1212>",
"duration": "10",
},
"text": msgtxt,
"sha": msgsha,
}
self._messages[msgsha] = msg
@property
def media_type(self):
"""Return the supported media type."""
return CONTENT_TYPE_MPEG
@property
def can_delete(self):
"""Return if messages can be deleted."""
return True
@property
def has_media(self):
"""Return if messages have attached media files."""
return True
async def async_get_media(self, msgid):
"""Return the media blob for the msgid."""
if msgid not in self._messages:
raise StreamError("Message not found")
audio_path = os.path.join(os.path.dirname(__file__), "tts.mp3")
with open(audio_path, "rb") as file:
return file.read()
async def async_get_messages(self):
"""Return a list of the current messages."""
return sorted(
self._messages.values(),
key=lambda item: item["info"]["origtime"],
reverse=True,
)
async def async_delete(self, msgid):
"""Delete the specified messages."""
if msgid in self._messages:
_LOGGER.info("Deleting: %s", msgid)
del self._messages[msgid]
self.async_update()
return True
|
from serial import SerialException
from homeassistant import config_entries, data_entry_flow, setup
from homeassistant.components.monoprice.const import (
CONF_SOURCE_1,
CONF_SOURCE_4,
CONF_SOURCE_5,
CONF_SOURCES,
DOMAIN,
)
from homeassistant.const import CONF_PORT
from tests.async_mock import patch
from tests.common import MockConfigEntry
CONFIG = {
CONF_PORT: "/test/port",
CONF_SOURCE_1: "one",
CONF_SOURCE_4: "four",
CONF_SOURCE_5: " ",
}
async def test_form(hass):
"""Test we get the form."""
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] == {}
with patch(
"homeassistant.components.monoprice.config_flow.get_async_monoprice",
return_value=True,
), patch(
"homeassistant.components.monoprice.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.monoprice.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], CONFIG
)
await hass.async_block_till_done()
assert result2["type"] == "create_entry"
assert result2["title"] == CONFIG[CONF_PORT]
assert result2["data"] == {
CONF_PORT: CONFIG[CONF_PORT],
CONF_SOURCES: {"1": CONFIG[CONF_SOURCE_1], "4": CONFIG[CONF_SOURCE_4]},
}
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_cannot_connect(hass):
"""Test we handle cannot connect error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.monoprice.config_flow.get_async_monoprice",
side_effect=SerialException,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], CONFIG
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "cannot_connect"}
async def test_generic_exception(hass):
"""Test we handle cannot generic exception."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.monoprice.config_flow.get_async_monoprice",
side_effect=Exception,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], CONFIG
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "unknown"}
async def test_options_flow(hass):
"""Test config flow options."""
conf = {CONF_PORT: "/test/port", CONF_SOURCES: {"4": "four"}}
config_entry = MockConfigEntry(
domain=DOMAIN,
data=conf,
)
config_entry.add_to_hass(hass)
with patch(
"homeassistant.components.monoprice.async_setup_entry", return_value=True
):
assert await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
result = await hass.config_entries.options.async_init(config_entry.entry_id)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "init"
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={CONF_SOURCE_1: "one", CONF_SOURCE_4: "", CONF_SOURCE_5: "five"},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert config_entry.options[CONF_SOURCES] == {"1": "one", "5": "five"}
|
import typing
import csv
from pathlib import Path
import keras
import pandas as pd
import matchzoo
_url = "https://download.microsoft.com/download/E/5/F/" \
"E5FCFCEE-7005-4814-853D-DAA7C66507E0/WikiQACorpus.zip"
def load_data(
stage: str = 'train',
task: str = 'ranking',
filtered: bool = False,
return_classes: bool = False
) -> typing.Union[matchzoo.DataPack, tuple]:
"""
Load WikiQA data.
:param stage: One of `train`, `dev`, and `test`.
:param task: Could be one of `ranking`, `classification` or a
:class:`matchzoo.engine.BaseTask` instance.
:param filtered: Whether remove the questions without correct answers.
:param return_classes: `True` to return classes for classification task,
`False` otherwise.
:return: A DataPack unless `task` is `classificiation` and `return_classes`
is `True`: a tuple of `(DataPack, classes)` in that case.
"""
if stage not in ('train', 'dev', 'test'):
raise ValueError(f"{stage} is not a valid stage."
f"Must be one of `train`, `dev`, and `test`.")
data_root = _download_data()
file_path = data_root.joinpath(f'WikiQA-{stage}.tsv')
data_pack = _read_data(file_path)
if filtered and stage in ('dev', 'test'):
ref_path = data_root.joinpath(f'WikiQA-{stage}.ref')
filter_ref_path = data_root.joinpath(f'WikiQA-{stage}-filtered.ref')
with open(filter_ref_path, mode='r') as f:
filtered_ids = set([line.split()[0] for line in f])
filtered_lines = []
with open(ref_path, mode='r') as f:
for idx, line in enumerate(f.readlines()):
if line.split()[0] in filtered_ids:
filtered_lines.append(idx)
data_pack = data_pack[filtered_lines]
if task == 'ranking':
task = matchzoo.tasks.Ranking()
if task == 'classification':
task = matchzoo.tasks.Classification()
if isinstance(task, matchzoo.tasks.Ranking):
return data_pack
elif isinstance(task, matchzoo.tasks.Classification):
data_pack.one_hot_encode_label(task.num_classes, inplace=True)
if return_classes:
return data_pack, [False, True]
else:
return data_pack
else:
raise ValueError(f"{task} is not a valid task."
f"Must be one of `Ranking` and `Classification`.")
def _download_data():
ref_path = keras.utils.data_utils.get_file(
'wikiqa', _url, extract=True,
cache_dir=matchzoo.USER_DATA_DIR,
cache_subdir='wiki_qa'
)
return Path(ref_path).parent.joinpath('WikiQACorpus')
def _read_data(path):
table = pd.read_csv(path, sep='\t', header=0, quoting=csv.QUOTE_NONE)
df = pd.DataFrame({
'text_left': table['Question'],
'text_right': table['Sentence'],
'id_left': table['QuestionID'],
'id_right': table['SentenceID'],
'label': table['Label']
})
return matchzoo.pack(df)
|
import argparse
import os
import shutil
import signal
import sys
import time
import logging
from openrazer_daemon.daemon import RazerDaemon, __version__
from subprocess import check_output
from time import sleep
from daemonize import Daemonize
# Basically copied from https://github.com/jleclanche/python-xdg/blob/master/xdg/basedir.py
HOME = os.path.expanduser("~")
XDG_DATA_HOME = os.environ.get("XDG_DATA_HOME", os.path.join(HOME, ".local", "share"))
XDG_CONFIG_HOME = os.environ.get("XDG_CONFIG_HOME", os.path.join(HOME, ".config"))
RAZER_DATA_HOME = os.path.join(XDG_DATA_HOME, "openrazer")
XDG_RUNTIME_DIR = os.environ.get("XDG_RUNTIME_DIR", RAZER_DATA_HOME)
RAZER_CONFIG_HOME = os.path.join(XDG_CONFIG_HOME, "openrazer")
RAZER_RUNTIME_DIR = XDG_RUNTIME_DIR
EXAMPLE_CONF_FILE = '/usr/share/openrazer/razer.conf.example'
CONF_FILE = os.path.join(RAZER_CONFIG_HOME, 'razer.conf')
PERSISTENCE_FILE = os.path.join(RAZER_CONFIG_HOME, 'persistence.conf')
LOG_PATH = os.path.join(RAZER_DATA_HOME, 'logs')
args = None
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--verbose', action='store_true', help='Enable verbose logging')
parser.add_argument('-F', '--foreground', action='store_true', help='Don\'t fork stay in the foreground')
parser.add_argument('-r', '--respawn', action='store_true', help='Stop any existing daemon first, if one is running.')
parser.add_argument('-s', '--stop', action='store_true', help='Gracefully stop the existing daemon.')
parser.add_argument('--version', action='version', version='%(prog)s {version}'.format(version=__version__))
parser.add_argument('--as-root', action='store_true', help='Allow the daemon to be started as root')
parser.add_argument('--config', type=str, help='Location of the config file', default=CONF_FILE)
parser.add_argument('--persistence', type=str, help='Location to file for storing device persistence data', default=PERSISTENCE_FILE)
parser.add_argument('--run-dir', type=str, help='Location of the run directory', default=RAZER_RUNTIME_DIR)
parser.add_argument('--log-dir', type=str, help='Location of the log directory', default=LOG_PATH)
parser.add_argument('--test-dir', type=str, help='Directory containing test driver structure')
return parser.parse_args()
def stop_daemon(args):
pidfile = os.path.join(args.run_dir, 'openrazer-daemon.pid')
try:
with open(pidfile) as f:
pid = int(f.readline().strip())
# if we have psutil, check that the process name matches the
# pidfile. Otherwise we might terminate a process that's not
# ours.
try:
import psutil
try:
if psutil.Process(pid).name() != "openrazer-daemon":
raise ProcessLookupError()
except psutil.NoSuchProcess:
raise ProcessLookupError()
except ImportError:
print("Module psutil is missing, not checking for process name")
os.kill(pid, signal.SIGTERM)
pid_exists = True
delay = 3000
while delay > 0:
delay -= 100
try:
time.sleep(0.1)
os.kill(pid, 0)
except ProcessLookupError:
print("Process {} stopped".format(pid))
pid_exists = False
break
# if we have to kill it, we probably need to remove the
# pidfile too, otherwise we rely on it to clean up properly
if pid_exists:
print("Process {} is hung, sending SIGKILL".format(pid))
os.kill(pid, signal.SIGKILL)
os.remove(pidfile)
except FileNotFoundError:
print("No pidfile found, assuming openrazer-daemon is not running")
except ProcessLookupError:
print("pidfile exists but no process is running. Remove {} and continue".format(pidfile))
def install_example_config_file(config_file):
"""
Installs the example config file
"""
if os.path.exists(config_file):
return
try:
os.makedirs(os.path.dirname(config_file), exist_ok=True)
if os.path.exists(EXAMPLE_CONF_FILE):
shutil.copy(EXAMPLE_CONF_FILE, config_file)
else:
print('Cant find "{0}"'.format(EXAMPLE_CONF_FILE), file=sys.stderr)
except NotADirectoryError as e:
print("Failed to create {}".format(e.filename), file=sys.stderr)
sys.exit(1)
def init_persistence_config(persistence_file):
"""
Creates a new file for persistence, if it does not exist.
"""
if os.path.exists(persistence_file):
return
try:
os.makedirs(os.path.dirname(persistence_file), exist_ok=True)
with open(persistence_file, "w") as f:
f.writelines("")
except NotADirectoryError as e:
print("Failed to create {}".format(e.filename), file=sys.stderr)
sys.exit(1)
def run_daemon():
global args
daemon = RazerDaemon(verbose=args.verbose,
log_dir=args.log_dir,
console_log=args.foreground,
config_file=args.config,
persistence_file=args.persistence,
test_dir=args.test_dir)
try:
daemon.run()
except KeyboardInterrupt:
daemon.logger.debug("Exited on user request")
except Exception as err:
daemon.logger.exception("Caught exception", exc_info=err)
def run():
global args
logger = None
args = parse_args()
if args.stop:
stop_daemon(args)
sys.exit(0)
if os.getuid() == 0:
if args.as_root:
print("The daemon is being run as root.")
else:
print("The daemon should not be run as root. If you have a good reason to do so, use the --as-root flag.")
sys.exit(1)
if args.respawn:
stop_daemon(args)
time.sleep(3)
# daemonize logs exceptions to its logger (which defaults to the syslog)
# and does not make them appear on stdout/stderr. If we're in foreground
# mode, override that logger with our own.
if args.foreground:
logger = logging.getLogger('run-daemon')
if args.verbose:
logger.setLevel(logging.DEBUG)
install_example_config_file(args.config)
init_persistence_config(args.persistence)
os.makedirs(args.run_dir, exist_ok=True)
daemon = Daemonize(app="openrazer-daemon",
pid=os.path.join(args.run_dir, "openrazer-daemon.pid"),
action=run_daemon,
foreground=args.foreground,
verbose=args.verbose,
chdir=args.run_dir,
logger=logger)
daemon.start()
if __name__ == "__main__":
run()
|
from homeassistant.components.surepetcare.const import DOMAIN
from homeassistant.setup import async_setup_component
from . import MOCK_API_DATA, MOCK_CONFIG, _patch_sensor_setup
EXPECTED_ENTITY_IDS = {
"binary_sensor.pet_flap_pet_flap_connectivity": "household-id-13576-connectivity",
"binary_sensor.pet_flap_cat_flap_connectivity": "household-id-13579-connectivity",
"binary_sensor.feeder_feeder_connectivity": "household-id-12345-connectivity",
"binary_sensor.pet_pet": "household-id-24680",
"binary_sensor.hub_hub": "household-id-hub-id",
}
async def test_binary_sensors(hass, surepetcare) -> None:
"""Test the generation of unique ids."""
instance = surepetcare.return_value
instance.data = MOCK_API_DATA
instance.get_data.return_value = MOCK_API_DATA
with _patch_sensor_setup():
assert await async_setup_component(hass, DOMAIN, MOCK_CONFIG)
await hass.async_block_till_done()
entity_registry = await hass.helpers.entity_registry.async_get_registry()
state_entity_ids = hass.states.async_entity_ids()
for entity_id, unique_id in EXPECTED_ENTITY_IDS.items():
assert entity_id in state_entity_ids
entity = entity_registry.async_get(entity_id)
assert entity.unique_id == unique_id
|
import asyncio
from aiohttp.client_exceptions import ClientConnectorError
from async_timeout import timeout
from gios import ApiError, Gios, InvalidSensorsData, NoStationError
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import CONF_NAME
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from .const import CONF_STATION_ID, DEFAULT_NAME, DOMAIN # pylint:disable=unused-import
DATA_SCHEMA = vol.Schema(
{
vol.Required(CONF_STATION_ID): int,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): str,
}
)
class GiosFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Config flow for GIOS."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_POLL
async def async_step_user(self, user_input=None):
"""Handle a flow initialized by the user."""
errors = {}
if user_input is not None:
try:
await self.async_set_unique_id(
user_input[CONF_STATION_ID], raise_on_progress=False
)
self._abort_if_unique_id_configured()
websession = async_get_clientsession(self.hass)
with timeout(30):
gios = Gios(user_input[CONF_STATION_ID], websession)
await gios.update()
return self.async_create_entry(
title=user_input[CONF_STATION_ID],
data=user_input,
)
except (ApiError, ClientConnectorError, asyncio.TimeoutError):
errors["base"] = "cannot_connect"
except NoStationError:
errors[CONF_STATION_ID] = "wrong_station_id"
except InvalidSensorsData:
errors[CONF_STATION_ID] = "invalid_sensors_data"
return self.async_show_form(
step_id="user", data_schema=DATA_SCHEMA, errors=errors
)
|
import time
from tempfile import NamedTemporaryFile
import itest_utils
import mock
from behave import given
from behave import then
from behave import when
import paasta_tools
from paasta_tools import marathon_tools
from paasta_tools.utils import _run
from paasta_tools.utils import decompose_job_id
from paasta_tools.utils import load_system_paasta_config
APP_ID = "test--marathon--app.instance.git01234567.configabcdef01"
@when("we create a trivial marathon app")
def create_trivial_marathon_app(context):
app_config = {
"id": APP_ID,
"cmd": "/bin/sleep 30",
"container": {
"type": "DOCKER",
"docker": {"network": "BRIDGE", "image": "busybox"},
},
"instances": 3,
"constraints": [["hostname", "UNIQUE"]],
}
paasta_tools.bounce_lib.create_marathon_app(
app_config["id"], app_config, context.marathon_clients.current[0]
)
@then("we should see it running in marathon")
def list_marathon_apps_has_trivial_app(context):
actual = paasta_tools.marathon_tools.list_all_marathon_app_ids(
context.marathon_clients.current[0]
)
assert APP_ID in actual
assert context.marathon_clients.current[0].get_app("/%s" % APP_ID)
@then("it should show up in marathon_services_running_here")
def marathon_services_running_here_works(context):
with mock.patch(
"paasta_tools.mesos_tools.socket.getfqdn",
return_value="mesosslave",
autospec=True,
):
discovered = paasta_tools.marathon_tools.marathon_services_running_here()
assert discovered == [("test_marathon_app", "instance", mock.ANY)]
@when("the task has started")
def when_the_task_has_started(context):
# 120 * 0.5 = 60 seconds
for _ in range(120):
app = context.marathon_clients.current[0].get_app(APP_ID)
happy_count = app.tasks_running
if happy_count >= 3:
return
time.sleep(0.5)
raise Exception("timed out waiting for task to start")
@when('we run the marathon app "{job_id}" with "{instances:d}" instances')
def run_marathon_app(context, job_id, instances):
(service, instance, _, __) = decompose_job_id(job_id)
job_config = marathon_tools.load_marathon_service_config(
service=service,
instance=instance,
cluster=load_system_paasta_config().get_cluster(),
soa_dir=context.soa_dir,
)
app_id = job_config.format_marathon_app_dict()["id"]
app_config = {
"id": app_id,
"cmd": "/bin/sleep 1m",
"container": {
"type": "DOCKER",
"docker": {"network": "BRIDGE", "image": "busybox"},
},
"instances": instances,
"constraints": [["hostname", "UNIQUE"]],
}
paasta_tools.bounce_lib.create_marathon_app(
app_id=app_id,
config=app_config,
client=context.marathon_clients.get_current_client_for_service(job_config),
)
@given('a capacity check overrides file with contents "{contents}"')
def write_overrides_file(context, contents):
with NamedTemporaryFile(mode="w", delete=False) as f:
f.write(contents)
context.overridefile = f.name
@then(
'capacity_check "{check_type}" --crit "{crit:d}" --warn "{warn:d}" should return "{status}" with code "{code:d}"'
)
def capacity_check_status_crit_warn(context, check_type, crit, warn, status, code):
print(check_type, crit, warn)
cmd = f"../paasta_tools/monitoring/check_capacity.py {check_type} --crit {crit} --warn {warn}"
print("Running cmd %s" % cmd)
exit_code, output = _run(cmd)
print(output)
assert exit_code == code
assert status in output
@then('capacity_check "{check_type}" should return "{status}" with code "{code:d}"')
def capacity_check_type_status(context, check_type, status, code):
cmd = "../paasta_tools/monitoring/check_capacity.py %s" % check_type
print("Running cmd %s" % cmd)
exit_code, output = _run(cmd)
print(output)
assert exit_code == code
assert status in output
@then(
'capacity_check with override file "{check_type}" and attributes "{attrs}" '
'should return "{status}" with code "{code:d}"'
)
def capacity_check_type_status_overrides(context, check_type, attrs, status, code):
cmd = "../paasta_tools/monitoring/check_capacity.py {} --overrides {} --attributes {}".format(
check_type, context.overridefile, attrs
)
print("Running cmd %s" % cmd)
exit_code, output = _run(cmd)
print(output)
assert exit_code == code
assert status in output
@when('we wait for "{job_id}" to launch exactly {task_count:d} tasks')
def wait_launch_tasks(context, job_id, task_count):
(service, instance, _, __) = decompose_job_id(job_id)
job_config = marathon_tools.load_marathon_service_config(
service=service,
instance=instance,
cluster=load_system_paasta_config().get_cluster(),
soa_dir=context.soa_dir,
)
app_id = job_config.format_marathon_app_dict()["id"]
client = context.marathon_clients.get_current_client_for_service(job_config)
itest_utils.wait_for_app_to_launch_tasks(
client, app_id, task_count, exact_matches_only=True
)
|
class Framework:
def __init__(self, items):
self.__items = items
def __getitem__(self, name):
return self.__items[name]
def __str__(self):
return f"{self.name}:{self.id}"
def get(self, name, default=None):
try:
return self[name]
except KeyError:
return default
@property
def id(self):
return self["id"]
@property
def name(self):
return self["name"]
@property
def hostname(self):
return self["hostname"]
@property
def active(self):
return self["active"]
@property
def task_count(self):
return len(self["tasks"])
@property
def user(self):
return self["user"]
@property
def cpu_allocated(self):
return self._resource_allocated("cpus")
@property
def mem_allocated(self):
return self._resource_allocated("mem")
@property
def disk_allocated(self):
return self._resource_allocated("disk")
def _resource_allocated(self, resource):
return self["resources"][resource]
def __eq__(self, other):
return self.__items == other.__items
def __ne__(self, other):
return not self.__eq__
|
from __future__ import absolute_import
import unittest
from lxml import etree
from .common_imports import HelperTestCase
from lxml.doctestcompare import LXMLOutputChecker, PARSE_HTML, PARSE_XML
class DummyInput:
def __init__(self, **kw):
for name, value in kw.items():
setattr(self, name, value)
def indent(elem, level=0):
i = "\n" + level*" "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
indent(elem, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
class DoctestCompareTest(HelperTestCase):
_checker = LXMLOutputChecker()
def compare(self, want, got, html=False):
if html:
options = PARSE_HTML
else:
options = PARSE_XML
parse = self._checker.get_parser(want, got, options)
want_doc = parse(want)
got_doc = parse(got)
return self._checker.collect_diff(
want_doc, got_doc, html, indent=0).lstrip()
def assert_diff(self, want, got, diff, html=False):
self.assertEqual(self.compare(want, got, html), diff)
def assert_nodiff(self, want, got, html=False):
root = etree.fromstring(want)
root.tail = '\n'
indent(root)
diff = etree.tostring(
root, encoding='unicode', method=html and 'html' or 'xml')
self.assert_diff(want, got, diff, html=html)
def test_equal_input(self):
self.assert_nodiff(
'<p title="expected">Expected</p>',
'<p title="expected">Expected</p>')
def test_differing_tags(self):
self.assert_diff(
'<p title="expected">Expected</p>',
'<b title="expected">Expected</b>',
'<p (got: b) title="expected">Expected</p (got: b)>\n')
def test_tags_upper_lower_case(self):
self.assert_diff(
'<p title="expected">Expected</p>',
'<P title="expected">Expected</P>',
'<p (got: P) title="expected">Expected</p (got: P)>\n')
def test_tags_upper_lower_case_html(self):
self.assert_nodiff(
'<html><body><p title="expected">Expected</p></body></html>',
'<HTML><BODY><P title="expected">Expected</P></BODY></HTML>',
html=True)
def test_differing_attributes(self):
self.assert_diff(
'<p title="expected">Expected</p>',
'<p title="actual">Actual</p>',
'<p title="expected (got: actual)">Expected (got: Actual)</p>\n')
def test_extra_children(self):
# https://bugs.launchpad.net/lxml/+bug/1238503
self.assert_diff(
'<p><span>One</span></p>',
'<p><span>One</span><b>Two</b><em>Three</em></p>',
'<p>\n'
' <span>One</span>\n'
' +<b>Two</b>\n'
' +<em>Three</em>\n'
'</p>\n')
def test_missing_children(self):
self.assert_diff(
'<p><span>One</span><b>Two</b><em>Three</em></p>',
'<p><span>One</span></p>',
'<p>\n'
' <span>One</span>\n'
' -<b>Two</b>\n'
' -<em>Three</em>\n'
'</p>\n')
def test_extra_attributes(self):
self.assert_diff(
'<p><span class="foo">Text</span></p>',
'<p><span class="foo" id="bar">Text</span></p>',
'<p>\n'
' <span class="foo" +id="bar">Text</span>\n'
'</p>\n')
def test_missing_attributes(self):
self.assert_diff(
'<p><span class="foo" id="bar">Text</span></p>',
'<p><span class="foo">Text</span></p>',
'<p>\n'
' <span class="foo" -id="bar">Text</span>\n'
'</p>\n')
def test_suite():
suite = unittest.TestSuite()
suite.addTests([unittest.makeSuite(DoctestCompareTest)])
return suite
if __name__ == '__main__':
unittest.main()
|
import logging
import pyloopenergy
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_UNIT_SYSTEM_IMPERIAL,
CONF_UNIT_SYSTEM_METRIC,
EVENT_HOMEASSISTANT_STOP,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
CONF_ELEC = "electricity"
CONF_GAS = "gas"
CONF_ELEC_SERIAL = "electricity_serial"
CONF_ELEC_SECRET = "electricity_secret"
CONF_GAS_SERIAL = "gas_serial"
CONF_GAS_SECRET = "gas_secret"
CONF_GAS_CALORIFIC = "gas_calorific"
CONF_GAS_TYPE = "gas_type"
DEFAULT_CALORIFIC = 39.11
DEFAULT_UNIT = "kW"
ELEC_SCHEMA = vol.Schema(
{
vol.Required(CONF_ELEC_SERIAL): cv.string,
vol.Required(CONF_ELEC_SECRET): cv.string,
}
)
GAS_TYPE_SCHEMA = vol.In([CONF_UNIT_SYSTEM_METRIC, CONF_UNIT_SYSTEM_IMPERIAL])
GAS_SCHEMA = vol.Schema(
{
vol.Required(CONF_GAS_SERIAL): cv.string,
vol.Required(CONF_GAS_SECRET): cv.string,
vol.Optional(CONF_GAS_TYPE, default=CONF_UNIT_SYSTEM_METRIC): GAS_TYPE_SCHEMA,
vol.Optional(CONF_GAS_CALORIFIC, default=DEFAULT_CALORIFIC): vol.Coerce(float),
}
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Required(CONF_ELEC): ELEC_SCHEMA, vol.Optional(CONF_GAS): GAS_SCHEMA}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Loop Energy sensors."""
elec_config = config.get(CONF_ELEC)
gas_config = config.get(CONF_GAS, {})
controller = pyloopenergy.LoopEnergy(
elec_config.get(CONF_ELEC_SERIAL),
elec_config.get(CONF_ELEC_SECRET),
gas_config.get(CONF_GAS_SERIAL),
gas_config.get(CONF_GAS_SECRET),
gas_config.get(CONF_GAS_TYPE),
gas_config.get(CONF_GAS_CALORIFIC),
)
def stop_loopenergy(event):
"""Shutdown loopenergy thread on exit."""
_LOGGER.info("Shutting down loopenergy")
controller.terminate()
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, stop_loopenergy)
sensors = [LoopEnergyElec(controller)]
if gas_config.get(CONF_GAS_SERIAL):
sensors.append(LoopEnergyGas(controller))
add_entities(sensors)
class LoopEnergyDevice(Entity):
"""Implementation of an Loop Energy base sensor."""
def __init__(self, controller):
"""Initialize the sensor."""
self._state = None
self._unit_of_measurement = DEFAULT_UNIT
self._controller = controller
self._name = None
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit_of_measurement
def _callback(self):
self.schedule_update_ha_state(True)
class LoopEnergyElec(LoopEnergyDevice):
"""Implementation of an Loop Energy Electricity sensor."""
def __init__(self, controller):
"""Initialize the sensor."""
super().__init__(controller)
self._name = "Power Usage"
async def async_added_to_hass(self):
"""Subscribe to updates."""
self._controller.subscribe_elecricity(self._callback)
def update(self):
"""Get the cached Loop energy reading."""
self._state = round(self._controller.electricity_useage, 2)
class LoopEnergyGas(LoopEnergyDevice):
"""Implementation of an Loop Energy Gas sensor."""
def __init__(self, controller):
"""Initialize the sensor."""
super().__init__(controller)
self._name = "Gas Usage"
async def async_added_to_hass(self):
"""Subscribe to updates."""
self._controller.subscribe_gas(self._callback)
def update(self):
"""Get the cached Loop gas reading."""
self._state = round(self._controller.gas_useage, 2)
|
from typing import Iterable, Optional, MutableMapping
from PyQt5.QtWidgets import QApplication, QLineEdit
from qutebrowser.api import cmdutils
class _ReadlineBridge:
"""Bridge which provides readline-like commands for the current QLineEdit.
Attributes:
_deleted: Mapping from widgets to their last deleted text.
"""
def __init__(self) -> None:
self._deleted: MutableMapping[QLineEdit, str] = {}
def _widget(self) -> Optional[QLineEdit]:
"""Get the currently active QLineEdit."""
w = QApplication.instance().focusWidget()
if isinstance(w, QLineEdit):
return w
else:
return None
def _dispatch(self, name: str, *,
mark: bool = None,
delete: bool = False) -> None:
widget = self._widget()
if widget is None:
return
method = getattr(widget, name)
if mark is None:
method()
else:
method(mark)
if delete:
self._deleted[widget] = widget.selectedText()
widget.del_()
def backward_char(self) -> None:
self._dispatch('cursorBackward', mark=False)
def forward_char(self) -> None:
self._dispatch('cursorForward', mark=False)
def backward_word(self) -> None:
self._dispatch('cursorWordBackward', mark=False)
def forward_word(self) -> None:
self._dispatch('cursorWordForward', mark=False)
def beginning_of_line(self) -> None:
self._dispatch('home', mark=False)
def end_of_line(self) -> None:
self._dispatch('end', mark=False)
def unix_line_discard(self) -> None:
self._dispatch('home', mark=True, delete=True)
def kill_line(self) -> None:
self._dispatch('end', mark=True, delete=True)
def _rubout(self, delim: Iterable[str]) -> None:
"""Delete backwards using the characters in delim as boundaries."""
widget = self._widget()
if widget is None:
return
cursor_position = widget.cursorPosition()
text = widget.text()
target_position = cursor_position
is_boundary = True
while is_boundary and target_position > 0:
is_boundary = text[target_position - 1] in delim
target_position -= 1
is_boundary = False
while not is_boundary and target_position > 0:
is_boundary = text[target_position - 1] in delim
target_position -= 1
moveby = cursor_position - target_position - 1
widget.cursorBackward(True, moveby)
self._deleted[widget] = widget.selectedText()
widget.del_()
def unix_word_rubout(self) -> None:
self._rubout([' '])
def unix_filename_rubout(self) -> None:
self._rubout([' ', '/'])
def backward_kill_word(self) -> None:
self._dispatch('cursorWordBackward', mark=True, delete=True)
def kill_word(self) -> None:
self._dispatch('cursorWordForward', mark=True, delete=True)
def yank(self) -> None:
"""Paste previously deleted text."""
widget = self._widget()
if widget is None or widget not in self._deleted:
return
widget.insert(self._deleted[widget])
def delete_char(self) -> None:
self._dispatch('del_')
def backward_delete_char(self) -> None:
self._dispatch('backspace')
bridge = _ReadlineBridge()
_register = cmdutils.register(
modes=[cmdutils.KeyMode.command, cmdutils.KeyMode.prompt])
@_register
def rl_backward_char() -> None:
"""Move back a character.
This acts like readline's backward-char.
"""
bridge.backward_char()
@_register
def rl_forward_char() -> None:
"""Move forward a character.
This acts like readline's forward-char.
"""
bridge.forward_char()
@_register
def rl_backward_word() -> None:
"""Move back to the start of the current or previous word.
This acts like readline's backward-word.
"""
bridge.backward_word()
@_register
def rl_forward_word() -> None:
"""Move forward to the end of the next word.
This acts like readline's forward-word.
"""
bridge.forward_word()
@_register
def rl_beginning_of_line() -> None:
"""Move to the start of the line.
This acts like readline's beginning-of-line.
"""
bridge.beginning_of_line()
@_register
def rl_end_of_line() -> None:
"""Move to the end of the line.
This acts like readline's end-of-line.
"""
bridge.end_of_line()
@_register
def rl_unix_line_discard() -> None:
"""Remove chars backward from the cursor to the beginning of the line.
This acts like readline's unix-line-discard.
"""
bridge.unix_line_discard()
@_register
def rl_kill_line() -> None:
"""Remove chars from the cursor to the end of the line.
This acts like readline's kill-line.
"""
bridge.kill_line()
@_register
def rl_unix_word_rubout() -> None:
"""Remove chars from the cursor to the beginning of the word.
This acts like readline's unix-word-rubout. Whitespace is used as a
word delimiter.
"""
bridge.unix_word_rubout()
@_register
def rl_unix_filename_rubout() -> None:
"""Remove chars from the cursor to the previous path separator.
This acts like readline's unix-filename-rubout.
"""
bridge.unix_filename_rubout()
@_register
def rl_backward_kill_word() -> None:
"""Remove chars from the cursor to the beginning of the word.
This acts like readline's backward-kill-word. Any non-alphanumeric
character is considered a word delimiter.
"""
bridge.backward_kill_word()
@_register
def rl_kill_word() -> None:
"""Remove chars from the cursor to the end of the current word.
This acts like readline's kill-word.
"""
bridge.kill_word()
@_register
def rl_yank() -> None:
"""Paste the most recently deleted text.
This acts like readline's yank.
"""
bridge.yank()
@_register
def rl_delete_char() -> None:
"""Delete the character after the cursor.
This acts like readline's delete-char.
"""
bridge.delete_char()
@_register
def rl_backward_delete_char() -> None:
"""Delete the character before the cursor.
This acts like readline's backward-delete-char.
"""
bridge.backward_delete_char()
|
from __future__ import print_function
import argparse
import sys
from os import getcwd, mkdir, remove, rename
from shutil import rmtree
import requests
cwd = getcwd()
documentsIndex = cwd.index("Documents")
documentsIndex += len("Documents")
ROOT = cwd[:documentsIndex]
class stansi: # Collection of Stash's ANSI escape codes.
bold = u"\x9b1m"
underscore = u"\x9b4m"
attr_end = u"\x9b0m"
fore_red = u"\x9b31m"
fore_green = u"\x9b32m"
fore_brown = u"\x9b33m"
fore_blue = u"\x9b34m"
fore_pink = u"\x9b35m"
fore_cyan = u"\x9b36m"
fore_white = u"\x9b37m"
fore_end = u"\x9b39m"
back_red = u"\x9b41m"
back_green = u"\x9b42m"
back_brown = u"\x9b43m"
back_blue = u"\x9b44m"
back_pink = u"\x9b45m"
back_cyan = u"\x9b46m"
back_white = u"\x9b47m"
back_end = u"\x9b49m"
def Red(text):
return stansi.fore_red + text + stansi.fore_end
def Blue(text):
return stansi.fore_blue + text + stansi.fore_end
def Green(text):
return stansi.fore_green + text + stansi.fore_end
def Cyan(text):
return stansi.fore_cyan + text + stansi.fore_end
class SWConfig(object): # Parser for the config files such as the repository listing.
def __init__(self, content):
self.data = {}
for line in content.splitlines():
key = line.split("=")[0]
value = line.split("=")[1]
self.data[key] = value
def __getitem__(self, key):
return self.data[key]
def keys(self):
return self.data.keys()
def download_package(url, package_name): # Handles the installation of packages directories (since they're no longer tarfiles)
content_listing = ["bin.py", "meta.latte"]
mkdir(ROOT + "/" + package_name)
for item in content_listing:
requested = requests.get(url + "/" + package_name + "/" + item)
content = requested.text
requested.close()
if content == "404: Not Found\n":
print(Red("ERROR") + ": Package not found.")
sys.exit()
opened = open(ROOT + "/" + package_name + "/" + item, "w")
opened.write(content)
opened.close()
def main(sargs):
parser = argparse.ArgumentParser()
parser.add_argument("method", help="What action to perform (install, remove, etc)", type=str)
parser.add_argument("package", help="Name of package", type=str)
args = parser.parse_args(sargs)
try:
opened = open(".latte-repos.swconf", "r")
opened.close()
except:
opened = open(".latte-repos.swconf", "w")
print(Red("WARNING") + ": Repository listing doesn't exist, rebuilding to default...")
opened.write("universe=https://raw.githubusercontent.com/Seanld/latte-universe/master")
opened.close()
repo_listing_opened = open(".latte-repos.swconf", "r")
listing_content = repo_listing_opened.read()
repo_listing_opened.close()
REPOSITORIES = SWConfig(listing_content)
if args.method == "install":
packageSplitted = args.package.split("/")
try:
package_name = packageSplitted[1]
repo_to_use = REPOSITORIES[packageSplitted[0]]
except IndexError:
repo_to_use = REPOSITORIES["universe"]
package_name = packageSplitted[0]
print(Red("WARNING") + ": No repository specified, using universe by default...")
try:
download_package(repo_to_use, package_name)
except:
print("ERROR", "Couldn't find package", "error")
sys.exit()
# Move to correct locations
print("Installing")
try:
rename(ROOT + "/" + package_name + "/meta.latte", ROOT + "/stash_extensions/latte/" + package_name + ".latte")
except:
mkdir(ROOT + "/stash_extensions/latte")
rename(ROOT + "/" + package_name + "/meta.latte", ROOT + "/stash_extensions/latte/" + package_name + ".latte")
rename(ROOT + "/" + package_name + "/bin.py", ROOT + "/stash_extensions/bin/" + package_name + ".py")
rmtree(ROOT + "/" + package_name)
print(Green("SUCCESS") + ": Package '" + package_name + "' successfully installed!")
elif args.method == "remove":
try:
remove(ROOT + "/stash_extensions/bin/" + args.package + ".py")
remove(ROOT + "/stash_extensions/latte/" + args.package + ".latte")
except:
print(Red("ERROR") + ": Couldn't remove package; not found in resources.")
sys.exit()
print(Green("SUCCESS") + ": '" + args.package + "' removed!")
elif args.method == "update":
print(
"Jeez! Sorry, but we are currently working on self-update capabilities. For now, just redo the install process to update."
)
elif args.method == "new":
try:
mkdir(args.package)
config = open(args.package + "/meta.latte", "w")
config.write("developer=Your name here\ndescription=Enter description of your app here\nversion=0.1")
config.close()
index = open(args.package + "/bin.py", "w")
index.write(
"# This is just an example template. You can change this all you like.\n\nimport sys\nimport argparse\n\ndef main(sargs):\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument('echo', help='What you want the command to echo back.')\n\targs = parser.parse_args(sargs)\n\t\n\tprint('Echoing back: '+args.echo)\n\nif __name__ == '__main__':\n\tmain(sys.argv[1:])"
)
index.close()
print(Green("SUCCESS") + ": Package '" + args.package + "' generated, check current working directory!")
except:
print(Red("ERROR") + ": Couldn't generate package; directory may already exist.")
elif args.method == "add-repo":
try:
request = requests.get(args.package + "/init.latte")
data = request.text
request.close()
data_org = SWConfig(data)
nickname = data_org["NICKNAME"]
repo_listing = open(".latte-repos.swconf", "a")
repo_listing.write("\n" + nickname + "=" + args.package)
repo_listing.close()
print(Green("SUCCESS") + ": '" + nickname + "' added to repositories!")
except:
print(Red("ERROR") + ": Either repository doesn't exist, or does not contain an 'init.latte' file.")
elif args.method == "list-repos":
if args.package == "all":
opened = open(".latte-repos.swconf")
content = opened.read()
opened.close()
as_config = SWConfig(content)
for repo in as_config.keys():
print(Cyan(repo) + ": " + Green(as_config[repo]))
else:
print(Red("ERROR") + ": Unknown command '" + args.method + "'!")
if __name__ == "__main__":
main(sys.argv[1:])
|
from flexx.util.testing import run_tests_if_main, raises, skip
import re
from flexx.util.logging import logger, capture_log, set_log_level
def test_debug():
logger.debug('test')
def test_info():
logger.info('test')
def test_warning():
logger.warning('test')
def test_set_log_level():
with raises(ValueError):
set_log_level('notaloglevel')
with raises(TypeError):
set_log_level([])
def test_capture():
with capture_log('info') as log:
logger.warning('AA')
logger.info('BB')
msg1 = log[0]
msg2 = log[1]
assert 'flexx' in msg1
assert 'AA' in msg1
assert '[W ' in msg1
assert 'flexx' in msg2
assert 'BB' in msg2
assert '[I' in msg2
def test_match():
# Match based on string
with capture_log('info', 'foo') as log:
logger.info('AA foo')
logger.info('BB bar') # no foo
logger.debug('CC foo') # too high level
logger.info('DD fXo') # no foo
assert len(log) == 1
assert 'AA' in log[0]
# Match based on regexp
with capture_log('info', re.compile('f.o')) as log:
logger.info('AA foo')
logger.info('BB bar') # no foo
logger.debug('CC foo') # too high level
logger.info('DD fXo')
assert len(log) == 2
assert 'AA' in log[0]
assert 'DD' in log[1]
# No match
with capture_log('info', '') as log:
logger.info('AA foo')
logger.info('BB bar')
logger.debug('CC foo') # too high level
logger.info('DD fXo')
assert len(log) == 3
def test_debug_does_more():
def caller_func_bla():
logger.debug('AA foo')
logger.info('BB bar')
with capture_log('debug') as log:
caller_func_bla()
assert len(log) == 2
assert 'caller_func_bla' in log[0]
assert 'caller_func_bla' in log[1]
run_tests_if_main()
|
from io import StringIO
from django import template
from django.utils.safestring import mark_safe
from lxml import etree
from weblate.utils.site import get_site_url
register = template.Library()
@register.filter
def add_site_url(content):
"""Automatically add site URL to any relative links or images."""
parser = etree.HTMLParser(collect_ids=False)
tree = etree.parse(StringIO(content), parser)
for link in tree.findall("//a"):
url = link.get("href")
if url.startswith("/"):
link.set("href", get_site_url(url))
for link in tree.findall("//img"):
url = link.get("src")
if url.startswith("/"):
link.set("src", get_site_url(url))
return mark_safe(
etree.tostring(
tree.getroot(), pretty_print=True, method="html", encoding="unicode"
)
)
|
from __future__ import annotations
import inspect
import functools
from typing import (
TypeVar,
Callable,
Awaitable,
Coroutine,
Union,
Type,
TYPE_CHECKING,
List,
Any,
Generator,
Protocol,
overload,
)
import discord
from discord.ext import commands as dpy_commands
# So much of this can be stripped right back out with proper stubs.
if not TYPE_CHECKING:
from discord.ext.commands import (
check as check,
guild_only as guild_only,
dm_only as dm_only,
is_nsfw as is_nsfw,
has_role as has_role,
has_any_role as has_any_role,
bot_has_role as bot_has_role,
bot_has_any_role as bot_has_any_role,
cooldown as cooldown,
before_invoke as before_invoke,
after_invoke as after_invoke,
)
from ..i18n import Translator
from .context import Context
from .commands import Command
_ = Translator("nah", __file__)
"""
Anything here is either a reimplementation or re-export
of a discord.py function or class with more lies for mypy
"""
__all__ = [
"check",
# "check_any", # discord.py 1.3
"guild_only",
"dm_only",
"is_nsfw",
"has_role",
"has_any_role",
"bot_has_role",
"bot_has_any_role",
"when_mentioned_or",
"cooldown",
"when_mentioned",
"before_invoke",
"after_invoke",
]
_CT = TypeVar("_CT", bound=Context)
_T = TypeVar("_T")
_F = TypeVar("_F")
CheckType = Union[Callable[[_CT], bool], Callable[[_CT], Coroutine[Any, Any, bool]]]
CoroLike = Callable[..., Union[Awaitable[_T], Generator[Any, None, _T]]]
InvokeHook = Callable[[_CT], Coroutine[Any, Any, bool]]
class CheckDecorator(Protocol):
predicate: Coroutine[Any, Any, bool]
@overload
def __call__(self, func: _CT) -> _CT:
...
@overload
def __call__(self, func: CoroLike) -> CoroLike:
...
if TYPE_CHECKING:
def check(predicate: CheckType) -> CheckDecorator:
...
def guild_only() -> CheckDecorator:
...
def dm_only() -> CheckDecorator:
...
def is_nsfw() -> CheckDecorator:
...
def has_role() -> CheckDecorator:
...
def has_any_role() -> CheckDecorator:
...
def bot_has_role() -> CheckDecorator:
...
def bot_has_any_role() -> CheckDecorator:
...
def cooldown(rate: int, per: float, type: dpy_commands.BucketType = ...) -> Callable[[_F], _F]:
...
def before_invoke(coro: InvokeHook) -> Callable[[_F], _F]:
...
def after_invoke(coro: InvokeHook) -> Callable[[_F], _F]:
...
PrefixCallable = Callable[[dpy_commands.bot.BotBase, discord.Message], List[str]]
def when_mentioned(bot: dpy_commands.bot.BotBase, msg: discord.Message) -> List[str]:
return [f"<@{bot.user.id}> ", f"<@!{bot.user.id}> "]
def when_mentioned_or(*prefixes) -> PrefixCallable:
def inner(bot: dpy_commands.bot.BotBase, msg: discord.Message) -> List[str]:
r = list(prefixes)
r = when_mentioned(bot, msg) + r
return r
return inner
|
from homeassistant.components.device_tracker import SOURCE_TYPE_GPS
from homeassistant.components.device_tracker.config_entry import TrackerEntity
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from . import DOMAIN as LT_DOMAIN, TRACKER_UPDATE
async def async_setup_entry(hass, entry, async_add_entities):
"""Configure a dispatcher connection based on a config entry."""
@callback
def _receive_data(device, location, location_name):
"""Receive set location."""
if device in hass.data[LT_DOMAIN]["devices"]:
return
hass.data[LT_DOMAIN]["devices"].add(device)
async_add_entities([LocativeEntity(device, location, location_name)])
hass.data[LT_DOMAIN]["unsub_device_tracker"][
entry.entry_id
] = async_dispatcher_connect(hass, TRACKER_UPDATE, _receive_data)
return True
class LocativeEntity(TrackerEntity):
"""Represent a tracked device."""
def __init__(self, device, location, location_name):
"""Set up Locative entity."""
self._name = device
self._location = location
self._location_name = location_name
self._unsub_dispatcher = None
@property
def latitude(self):
"""Return latitude value of the device."""
return self._location[0]
@property
def longitude(self):
"""Return longitude value of the device."""
return self._location[1]
@property
def location_name(self):
"""Return a location name for the current location of the device."""
return self._location_name
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def source_type(self):
"""Return the source type, eg gps or router, of the device."""
return SOURCE_TYPE_GPS
async def async_added_to_hass(self):
"""Register state update callback."""
self._unsub_dispatcher = async_dispatcher_connect(
self.hass, TRACKER_UPDATE, self._async_receive_data
)
async def async_will_remove_from_hass(self):
"""Clean up after entity before removal."""
self._unsub_dispatcher()
@callback
def _async_receive_data(self, device, location, location_name):
"""Update device data."""
if device != self._name:
return
self._location_name = location_name
self._location = location
self.async_write_ha_state()
|
from __future__ import unicode_literals
import os
import sys
import time
from collections import OrderedDict
from lib.data.datatype import AttribDict
from lib.fun.osjudger import py_ver_egt_3
def init_paths():
try:
root_path = os.path.join(os.path.dirname(os.path.abspath(sys.argv[0]))).encode('utf-8').decode() \
if py_ver_egt_3 else os.path.dirname(os.path.abspath(sys.argv[0])).decode('utf-8')
except:
root_path = 'fake path'
exit("\n[-] Please ensure pydictor directory path name is english characters\n")
paths.root_path = root_path
paths.results_path = os.path.abspath(os.path.join(paths.root_path, "results"))
paths.results_file_name = None
paths.core_path = os.path.abspath(os.path.join(paths.root_path, "core"))
paths.tools_path = os.path.abspath(os.path.join(paths.root_path, "tools"))
paths.plugins_path = os.path.abspath(os.path.join(paths.root_path, "plugins"))
paths.encode_function_path = os.path.abspath(os.path.join(paths.root_path, "lib", "encode"))
# wordlist path
paths.wordlist_path = os.path.join(paths.root_path, "wordlist")
paths.applist_path = os.path.join(paths.wordlist_path, "App")
paths.iotlist_path = os.path.join(paths.wordlist_path, "IoT")
paths.niplist_path = os.path.join(paths.wordlist_path, "NiP")
paths.sedblist_path = os.path.join(paths.wordlist_path, "SEDB")
paths.syslist_path = os.path.join(paths.wordlist_path, "Sys")
paths.weblist_path = os.path.join(paths.wordlist_path, "Web")
paths.wifilist_path = os.path.join(paths.wordlist_path, "WiFi")
# function cfg path
paths.funcfg_path = os.path.join(paths.root_path, "funcfg")
paths.buildconf_path = os.path.join(paths.funcfg_path, "build.conf")
paths.extendconf_path = os.path.join(paths.funcfg_path, "extend.conf")
paths.leetmode_path = os.path.join(paths.funcfg_path, "leet_mode.conf")
paths.scrapersites_path = os.path.join(paths.funcfg_path, 'scratch.sites')
paths.scratch_blacklist = os.path.join(paths.funcfg_path, "scratch_blacklist.conf")
paths.sedbtrick_path = os.path.join(paths.funcfg_path, "sedb_tricks.conf")
def init_pystrs():
# start time
pystrs.startime = time.time()
pystrs.version = '2.1.5.0#dev'
# build configuration file element description
pystrs.conf_head = "head"
pystrs.conf_char = "char"
pystrs.conf_minlen = "minlen"
pystrs.conf_maxlen = "maxlen"
pystrs.conf_encode = "encode"
pystrs.conf_tail = "tail"
pystrs.sex_range = ("m", "f", "all")
pystrs.default_sex = "all"
# base dict type flag
pystrs.base_dic_type = ("d", "L", "c", "dL", "dc", "Lc", "dLc")
# counter command string
pystrs.just_view_counter = "v"
pystrs.just_save_counter = "s"
pystrs.save_and_view = "vs"
pystrs.counter_cmd_range = (pystrs.just_save_counter, pystrs.just_view_counter, pystrs.save_and_view)
pystrs.sedb_trick_prefix = "sedb_trick_prefix_strings"
pystrs.sedb_trick_suffix = "sedb_trick_suffix_strings"
pystrs.sedb_trick_middle = "sedb_trick_middle_strings"
# social engineering dictionary elements
pystrs.sedb_range = ("cname", "ename", "sname", "birth", "usedpwd", "phone", "uphone", "hphone", "email",
"postcode", "nickname", "idcard", "jobnum", "otherdate", "usedchar")
pystrs.sedb_dict = OrderedDict([
(pystrs.sedb_range[0], []), (pystrs.sedb_range[1], []), (pystrs.sedb_range[2], []), (pystrs.sedb_range[3], []),
(pystrs.sedb_range[4], []), (pystrs.sedb_range[5], []), (pystrs.sedb_range[6], []), (pystrs.sedb_range[7], []),
(pystrs.sedb_range[8], []), (pystrs.sedb_range[9], []), (pystrs.sedb_range[10], []), (pystrs.sedb_range[11], []),
(pystrs.sedb_range[12], []), (pystrs.sedb_range[13], []), (pystrs.sedb_range[14], [])
]
)
def init_pyoptions():
# global CRLF
pyoptions.CRLF = "\n"
# filename extension
pyoptions.filextension = ".txt"
# allowed maximum length
pyoptions.maxlen_switcher = 20
# allowed maximum generated items
pyoptions.count_switcher = 100000000000
# shredded file rewrite counts
pyoptions.file_rewrite_count = 1
# shredded dir rewrite counts
pyoptions.dir_rewrite_count = 1
# counter tool max count
pyoptions.vs_counter_switcher = 100000
# counter tool split word
pyoptions.counter_split = "\n"
# default counter view items
pyoptions.default_vs_items = 50
# format date ymd_format: yyyMMdd dmy_format: ddMMyyyy
pyoptions.ymd_format = True
# command options
pyoptions.args_base = ""
pyoptions.args_char = ""
pyoptions.args_chunk = []
pyoptions.args_extend = []
pyoptions.args_plug = []
pyoptions.args_sedb = ""
pyoptions.args_conf = ""
pyoptions.args_tool = []
pyoptions.args_sedb = False
pyoptions.args_pick = False
# command arguments
pyoptions.head = ""
pyoptions.tail = ""
pyoptions.encode = "none"
pyoptions.minlen = 0
pyoptions.maxlen = 4
pyoptions.default_occur = "<=99"
pyoptions.occur_is_filter = False
pyoptions.letter_occur = pyoptions.default_occur
pyoptions.digital_occur = pyoptions.default_occur
pyoptions.special_occur = pyoptions.default_occur
pyoptions.default_types = ">=0"
pyoptions.types_is_filter = False
pyoptions.letter_types = pyoptions.default_types
pyoptions.digital_types = pyoptions.default_types
pyoptions.special_types = pyoptions.default_types
pyoptions.default_repeat = ">=0"
pyoptions.repeat_is_filter = False
pyoptions.letter_repeat = pyoptions.default_repeat
pyoptions.digital_repeat = pyoptions.default_repeat
pyoptions.special_repeat = pyoptions.default_repeat
pyoptions.filter_regex = ".*?"
pyoptions.regex_is_filter = False
# the lower the more items
pyoptions.level = 3
# leet mode
pyoptions.extend_leet = False
pyoptions.scratch_leet = False
pyoptions.sedb_leet = False
pyoptions.leetmode_code = []
# LEQ middle_switcher will works on 'extend' plug
pyoptions.middle_switcher = 5
# configuration file split char
pyoptions.chars_split = ","
pyoptions.char_range_split = "-"
pyoptions.length_split = ","
pyoptions.rangepattern = '^\[.*?\]$'
pyoptions.level_str_pattern = "^(\d)\s+(.*?)$"
pyoptions.level_str_str_pattern = "^(\d)\s+(.*?)\s+(.*?)$"
pyoptions.confpattern = '(.*?)\[(.*?)\]\{(.*?)\}\<(.*?)\>([^[]*)'
# annotator
pyoptions.annotator = '#'
# sedb trick
pyoptions.trick_split = ","
pyoptions.sedb_trick_mid = []
pyoptions.sedb_trick_pre = []
pyoptions.sedb_trick_suf = []
# cfg
pyoptions.key_value_split = "="
# characters map operator
pyoptions.charmap = {'%space%': ' ', '%-%': '-',
'%|%': ',', '%||%': ':',
'%{%': '{', '%}%': '}',
'%[%': '[', '%]%': ']',
'%(%': '(', '%)%': ')',
'%<%': '<', '%>%': '>'}
# core function string range
pyoptions.core_range = [core[:-3].lower() for core in os.listdir(paths.core_path)
if core.endswith('.py') and not core.startswith('__')]
# encode ending string format
pyoptions.encode_ending = "_encode"
# encode operator
pyoptions.operator = {}
for encode_file_name in os.listdir(paths.encode_function_path):
encode_name = encode_file_name.split(".")[0]
if encode_name.endswith(pyoptions.encode_ending):
pyoptions.operator[encode_name[:-len(pyoptions.encode_ending)].lower()] = getattr(__import__('lib.encode.' + encode_name, fromlist=True), encode_name)
# encode function string range
pyoptions.encode_range = [key for key in pyoptions.operator.keys()]
# encode info description
pyoptions.encode_info = {}
for encode_name in pyoptions.operator.keys():
pyoptions.encode_info[encode_name] = getattr(pyoptions.operator.get(encode_name), "__doc__")
try:
pyoptions.encode_desc = "".join([str(key).ljust(10) + pyoptions.encode_info[key] + pyoptions.CRLF
for key in sorted(pyoptions.encode_info.keys())])
except TypeError as e:
exit("[-] please check your modified encode function, something error")
# tools_operator ending string format
pyoptions.tool_ending = "_magic"
# tool function string range
pyoptions.tool_range = [tool[:-3].lower() for tool in os.listdir(paths.tools_path)
if tool.endswith('.py') and not tool.startswith('__')]
# tools operator
pyoptions.tools_operator = {}
sys.path.append(paths.tools_path)
for tool_name in pyoptions.tool_range:
for tool_enter in dir(__import__(str(tool_name), globals(), locals(), [str(tool_name) + pyoptions.tool_ending], )):
if tool_enter.endswith(pyoptions.tool_ending):
pyoptions.tools_operator[tool_enter[:-len(pyoptions.tool_ending)].lower()] = \
getattr(__import__(tool_name), tool_enter)
# tools info description
pyoptions.tools_info = {}
for tool_name in pyoptions.tools_operator.keys():
pyoptions.tools_info[tool_name] = getattr(pyoptions.tools_operator.get(tool_name), "__doc__")
pyoptions.tools_desc = "".join([str(key).ljust(10) + pyoptions.tools_info[key] + pyoptions.CRLF
for key in sorted(pyoptions.tools_info.keys())])
# plug ending string format
pyoptions.plug_ending = "_magic"
# plug function string range
pyoptions.plug_range = [plug[:-3].lower() for plug in os.listdir(paths.plugins_path)
if plug.endswith('.py') and not plug.startswith('__')]
# plugins operator
pyoptions.plugins_operator = {}
sys.path.append(paths.plugins_path)
for plug_name in pyoptions.plug_range:
for plug_magic in dir(__import__(str(plug_name), globals(), locals(), [str(plug_name) + pyoptions.plug_ending], )):
if plug_magic.endswith(pyoptions.plug_ending):
pyoptions.plugins_operator[plug_magic[:-len(pyoptions.plug_ending)].lower()] = \
getattr(__import__(plug_name), plug_magic)
# plugins info description
pyoptions.plugins_info = {}
for plug_name in pyoptions.plugins_operator.keys():
pyoptions.plugins_info[plug_name] = getattr(pyoptions.plugins_operator.get(plug_name), "__doc__")
pyoptions.plugins_desc = "".join([str(key).ljust(10) + str(pyoptions.plugins_info[key]) + pyoptions.CRLF
for key in sorted(pyoptions.plugins_info.keys())])
# prefix range
pyoptions.prefix_range = pyoptions.core_range + pyoptions.plug_range + pyoptions.tool_range
# pattern for scratch scratch website words
pyoptions.scratch_filter = r'(^(\d){1,4}px$)|' \
r'(^[0-9,a-z,A-Z]{18,}$)|' \
r'(^(\d){2,4}x(\d){2,4}$)|' \
r'(^[0-9,a-f,A-F]{5,8}$)|' \
r'(^(u|U|\\u|\\U|U\+)[0-9,a-f,A-F]{4}$)|' \
r'(^(0x|u0026|u003e|252C94|u003c|auto|252C94)[a-z,A-Z]{1,16}$)|' \
r'(^on(fo|dra|mouse|load|play|seek)[a-z]{0,5}$)|' \
r'(^(img|div|svg|vfl|span|font|form|case|label|index|level|image)(\d){1,4}$)|' \
r'(^[a-z,A-Z]{2,6}(id|bar|ico|div|pic|img|box|url|uri|span|menu|image|title|color' \
r'|class|images|icon)$)'
# pydictor paths
paths = AttribDict()
# object to store description strings
pystrs = AttribDict()
# object to store options
pyoptions = AttribDict()
init_paths()
init_pystrs()
init_pyoptions()
|
import asyncio
import logging
from total_connect_client import TotalConnectClient
import voluptuous as vol
from homeassistant.config_entries import SOURCE_IMPORT, ConfigEntry
from homeassistant.const import CONF_PASSWORD, CONF_USERNAME
from homeassistant.core import HomeAssistant
import homeassistant.helpers.config_validation as cv
from .const import DOMAIN
_LOGGER = logging.getLogger(__name__)
PLATFORMS = ["alarm_control_panel", "binary_sensor"]
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
}
)
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass: HomeAssistant, config: dict):
"""Set up by configuration file."""
if DOMAIN not in config:
return True
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data=config[DOMAIN],
)
)
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Set up upon config entry in user interface."""
hass.data.setdefault(DOMAIN, {})
conf = entry.data
username = conf[CONF_USERNAME]
password = conf[CONF_PASSWORD]
client = await hass.async_add_executor_job(
TotalConnectClient.TotalConnectClient, username, password
)
if not client.is_valid_credentials():
_LOGGER.error("TotalConnect authentication failed")
return False
hass.data[DOMAIN][entry.entry_id] = client
for component in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, component)
)
return True
async def async_unload_entry(hass, entry: ConfigEntry):
"""Unload a config entry."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, platform)
for platform in PLATFORMS
]
)
)
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
|
import asyncio
from datetime import timedelta
import logging
from uuid import uuid4
from aiohttp import ClientError, ClientResponseError
from aiohttp.web import Request, Response
import jwt
# Typing imports
from homeassistant.components.http import HomeAssistantView
from homeassistant.const import (
CLOUD_NEVER_EXPOSED_ENTITIES,
HTTP_INTERNAL_SERVER_ERROR,
HTTP_UNAUTHORIZED,
)
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.util import dt as dt_util
from .const import (
CONF_API_KEY,
CONF_CLIENT_EMAIL,
CONF_ENTITY_CONFIG,
CONF_EXPOSE,
CONF_EXPOSE_BY_DEFAULT,
CONF_EXPOSED_DOMAINS,
CONF_PRIVATE_KEY,
CONF_REPORT_STATE,
CONF_SECURE_DEVICES_PIN,
CONF_SERVICE_ACCOUNT,
GOOGLE_ASSISTANT_API_ENDPOINT,
HOMEGRAPH_SCOPE,
HOMEGRAPH_TOKEN_URL,
REPORT_STATE_BASE_URL,
REQUEST_SYNC_BASE_URL,
SOURCE_CLOUD,
)
from .helpers import AbstractConfig
from .smart_home import async_handle_message
_LOGGER = logging.getLogger(__name__)
def _get_homegraph_jwt(time, iss, key):
now = int(time.timestamp())
jwt_raw = {
"iss": iss,
"scope": HOMEGRAPH_SCOPE,
"aud": HOMEGRAPH_TOKEN_URL,
"iat": now,
"exp": now + 3600,
}
return jwt.encode(jwt_raw, key, algorithm="RS256").decode("utf-8")
async def _get_homegraph_token(hass, jwt_signed):
headers = {
"Authorization": f"Bearer {jwt_signed}",
"Content-Type": "application/x-www-form-urlencoded",
}
data = {
"grant_type": "urn:ietf:params:oauth:grant-type:jwt-bearer",
"assertion": jwt_signed,
}
session = async_get_clientsession(hass)
async with session.post(HOMEGRAPH_TOKEN_URL, headers=headers, data=data) as res:
res.raise_for_status()
return await res.json()
class GoogleConfig(AbstractConfig):
"""Config for manual setup of Google."""
def __init__(self, hass, config):
"""Initialize the config."""
super().__init__(hass)
self._config = config
self._access_token = None
self._access_token_renew = None
@property
def enabled(self):
"""Return if Google is enabled."""
return True
@property
def entity_config(self):
"""Return entity config."""
return self._config.get(CONF_ENTITY_CONFIG) or {}
@property
def secure_devices_pin(self):
"""Return entity config."""
return self._config.get(CONF_SECURE_DEVICES_PIN)
@property
def should_report_state(self):
"""Return if states should be proactively reported."""
return self._config.get(CONF_REPORT_STATE)
def should_expose(self, state) -> bool:
"""Return if entity should be exposed."""
expose_by_default = self._config.get(CONF_EXPOSE_BY_DEFAULT)
exposed_domains = self._config.get(CONF_EXPOSED_DOMAINS)
if state.attributes.get("view") is not None:
# Ignore entities that are views
return False
if state.entity_id in CLOUD_NEVER_EXPOSED_ENTITIES:
return False
explicit_expose = self.entity_config.get(state.entity_id, {}).get(CONF_EXPOSE)
domain_exposed_by_default = (
expose_by_default and state.domain in exposed_domains
)
# Expose an entity if the entity's domain is exposed by default and
# the configuration doesn't explicitly exclude it from being
# exposed, or if the entity is explicitly exposed
is_default_exposed = domain_exposed_by_default and explicit_expose is not False
return is_default_exposed or explicit_expose
def get_agent_user_id(self, context):
"""Get agent user ID making request."""
return context.user_id
def should_2fa(self, state):
"""If an entity should have 2FA checked."""
return True
async def _async_request_sync_devices(self, agent_user_id: str):
if CONF_API_KEY in self._config:
await self.async_call_homegraph_api_key(
REQUEST_SYNC_BASE_URL, {"agentUserId": agent_user_id}
)
elif CONF_SERVICE_ACCOUNT in self._config:
await self.async_call_homegraph_api(
REQUEST_SYNC_BASE_URL, {"agentUserId": agent_user_id}
)
else:
_LOGGER.error("No configuration for request_sync available")
async def _async_update_token(self, force=False):
if CONF_SERVICE_ACCOUNT not in self._config:
_LOGGER.error("Trying to get homegraph api token without service account")
return
now = dt_util.utcnow()
if not self._access_token or now > self._access_token_renew or force:
token = await _get_homegraph_token(
self.hass,
_get_homegraph_jwt(
now,
self._config[CONF_SERVICE_ACCOUNT][CONF_CLIENT_EMAIL],
self._config[CONF_SERVICE_ACCOUNT][CONF_PRIVATE_KEY],
),
)
self._access_token = token["access_token"]
self._access_token_renew = now + timedelta(seconds=token["expires_in"])
async def async_call_homegraph_api_key(self, url, data):
"""Call a homegraph api with api key authentication."""
websession = async_get_clientsession(self.hass)
try:
res = await websession.post(
url, params={"key": self._config.get(CONF_API_KEY)}, json=data
)
_LOGGER.debug(
"Response on %s with data %s was %s", url, data, await res.text()
)
res.raise_for_status()
return res.status
except ClientResponseError as error:
_LOGGER.error("Request for %s failed: %d", url, error.status)
return error.status
except (asyncio.TimeoutError, ClientError):
_LOGGER.error("Could not contact %s", url)
return HTTP_INTERNAL_SERVER_ERROR
async def async_call_homegraph_api(self, url, data):
"""Call a homegraph api with authentication."""
session = async_get_clientsession(self.hass)
async def _call():
headers = {
"Authorization": f"Bearer {self._access_token}",
"X-GFE-SSL": "yes",
}
async with session.post(url, headers=headers, json=data) as res:
_LOGGER.debug(
"Response on %s with data %s was %s", url, data, await res.text()
)
res.raise_for_status()
return res.status
try:
await self._async_update_token()
try:
return await _call()
except ClientResponseError as error:
if error.status == HTTP_UNAUTHORIZED:
_LOGGER.warning(
"Request for %s unauthorized, renewing token and retrying", url
)
await self._async_update_token(True)
return await _call()
raise
except ClientResponseError as error:
_LOGGER.error("Request for %s failed: %d", url, error.status)
return error.status
except (asyncio.TimeoutError, ClientError):
_LOGGER.error("Could not contact %s", url)
return HTTP_INTERNAL_SERVER_ERROR
async def async_report_state(self, message, agent_user_id: str):
"""Send a state report to Google."""
data = {
"requestId": uuid4().hex,
"agentUserId": agent_user_id,
"payload": message,
}
await self.async_call_homegraph_api(REPORT_STATE_BASE_URL, data)
class GoogleAssistantView(HomeAssistantView):
"""Handle Google Assistant requests."""
url = GOOGLE_ASSISTANT_API_ENDPOINT
name = "api:google_assistant"
requires_auth = True
def __init__(self, config):
"""Initialize the Google Assistant request handler."""
self.config = config
async def post(self, request: Request) -> Response:
"""Handle Google Assistant requests."""
message: dict = await request.json()
result = await async_handle_message(
request.app["hass"],
self.config,
request["hass_user"].id,
message,
SOURCE_CLOUD,
)
return self.json(result)
|
import json
from django.core.management.base import CommandError
from weblate.addons.models import ADDONS, Addon
from weblate.auth.models import User, get_anonymous
from weblate.trans.management.commands import WeblateComponentCommand
class Command(WeblateComponentCommand):
help = "installs addon to all listed components"
def add_arguments(self, parser):
super().add_arguments(parser)
parser.add_argument("--addon", required=True, help="Addon name")
parser.add_argument(
"--configuration", default="{}", help="Addon configuration in JSON"
)
parser.add_argument(
"--update", action="store_true", help="Update existing addons configuration"
)
def validate_form(self, form):
if not form.is_valid():
for error in form.non_field_errors():
self.stderr.write(error)
for field in form:
for error in field.errors:
self.stderr.write(f"Error in {field.name}: {error}")
raise CommandError("Invalid addon configuration!")
def handle(self, *args, **options):
try:
addon_class = ADDONS[options["addon"]]
except KeyError:
raise CommandError("Addon not found: {}".format(options["addon"]))
addon = addon_class()
try:
configuration = json.loads(options["configuration"])
except ValueError as error:
raise CommandError(f"Invalid addon configuration: {error}")
try:
user = User.objects.filter(is_superuser=True)[0]
except IndexError:
user = get_anonymous()
for component in self.get_components(*args, **options):
if addon.has_settings:
form = addon.get_add_form(None, component, data=configuration)
self.validate_form(form)
addons = Addon.objects.filter_component(component).filter(name=addon.name)
if addons:
if options["update"]:
for addon_component in addons:
addon_component.addon.configure(configuration)
self.stdout.write(f"Successfully updated on {component}")
else:
self.stderr.write(f"Already installed on {component}")
continue
if not addon.can_install(component, user):
self.stderr.write(f"Can not install on {component}")
continue
addon.create(component, configuration=configuration)
self.stdout.write(f"Successfully installed on {component}")
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
from absl import flags
import mock
from perfkitbenchmarker import disk
from perfkitbenchmarker import errors
from tests import pkb_common_test_case
import six
FLAGS = flags.FLAGS
_COMPONENT = 'test_component'
class BaseDiskSpecTestCase(pkb_common_test_case.PkbCommonTestCase):
def testDefaults(self):
spec = disk.BaseDiskSpec(_COMPONENT)
self.assertIsNone(spec.device_path)
self.assertIsNone(spec.disk_number)
self.assertIsNone(spec.disk_size)
self.assertIsNone(spec.disk_type)
self.assertIsNone(spec.mount_point)
self.assertEqual(spec.num_striped_disks, 1)
def testProvidedValid(self):
spec = disk.BaseDiskSpec(
_COMPONENT, device_path='test_device_path', disk_number=1,
disk_size=75, disk_type='test_disk_type', mount_point='/mountpoint',
num_striped_disks=2)
self.assertEqual(spec.device_path, 'test_device_path')
self.assertEqual(spec.disk_number, 1)
self.assertEqual(spec.disk_size, 75)
self.assertEqual(spec.disk_type, 'test_disk_type')
self.assertEqual(spec.mount_point, '/mountpoint')
self.assertEqual(spec.num_striped_disks, 2)
def testProvidedNone(self):
spec = disk.BaseDiskSpec(
_COMPONENT, device_path=None, disk_number=None, disk_size=None,
disk_type=None, mount_point=None)
self.assertIsNone(spec.device_path)
self.assertIsNone(spec.disk_number)
self.assertIsNone(spec.disk_size)
self.assertIsNone(spec.disk_type)
self.assertIsNone(spec.mount_point)
self.assertEqual(spec.num_striped_disks, 1)
def testUnrecognizedOptions(self):
with self.assertRaises(errors.Config.UnrecognizedOption) as cm:
disk.BaseDiskSpec(_COMPONENT, color='red', flavor='cherry', texture=None)
self.assertEqual(str(cm.exception), (
'Unrecognized options were found in test_component: color, flavor, '
'texture.'))
def testInvalidOptionTypes(self):
with self.assertRaises(errors.Config.InvalidValue):
disk.BaseDiskSpec(_COMPONENT, device_path=0)
with self.assertRaises(errors.Config.InvalidValue):
disk.BaseDiskSpec(_COMPONENT, disk_number='ten')
with self.assertRaises(errors.Config.InvalidValue):
disk.BaseDiskSpec(_COMPONENT, disk_size='ten')
with self.assertRaises(errors.Config.InvalidValue):
disk.BaseDiskSpec(_COMPONENT, disk_type=0)
with self.assertRaises(errors.Config.InvalidValue):
disk.BaseDiskSpec(_COMPONENT, mount_point=0)
with self.assertRaises(errors.Config.InvalidValue):
disk.BaseDiskSpec(_COMPONENT, num_striped_disks=None)
def testOutOfRangeOptionValues(self):
with self.assertRaises(errors.Config.InvalidValue):
disk.BaseDiskSpec(_COMPONENT, num_striped_disks=0)
def testNonPresentFlagsDoNotOverrideConfigs(self):
FLAGS['data_disk_size'].value = 100
FLAGS['data_disk_type'].value = 'flag_disk_type'
FLAGS['num_striped_disks'].value = 3
FLAGS['scratch_dir'].value = '/flag_scratch_dir'
spec = disk.BaseDiskSpec(
_COMPONENT,
FLAGS,
device_path='config_device_path',
disk_number=1,
disk_size=75,
disk_type='config_disk_type',
mount_point='/mountpoint',
num_striped_disks=2)
self.assertEqual(spec.device_path, 'config_device_path')
self.assertEqual(spec.disk_number, 1)
self.assertEqual(spec.disk_size, 75)
self.assertEqual(spec.disk_type, 'config_disk_type')
self.assertEqual(spec.mount_point, '/mountpoint')
self.assertEqual(spec.num_striped_disks, 2)
def testPresentFlagsOverrideConfigs(self):
FLAGS['data_disk_size'].parse(100)
FLAGS['data_disk_type'].parse('flag_disk_type')
FLAGS['num_striped_disks'].parse(3)
FLAGS['scratch_dir'].parse('/flag_scratch_dir')
spec = disk.BaseDiskSpec(
_COMPONENT,
FLAGS,
device_path='config_device_path',
disk_number=1,
disk_size=75,
disk_type='config_disk_type',
mount_point='/mountpoint',
num_striped_disks=2)
self.assertEqual(spec.device_path, 'config_device_path')
self.assertEqual(spec.disk_number, 1)
self.assertEqual(spec.disk_size, 100)
self.assertEqual(spec.disk_type, 'flag_disk_type')
self.assertEqual(spec.mount_point, '/flag_scratch_dir')
self.assertEqual(spec.num_striped_disks, 3)
class _NfsDisk(disk.NfsDisk):
def __init__(self, flags=None, default_nfs_version=None):
if flags:
disk_spec = disk.BaseDiskSpec(_COMPONENT, flags)
else:
disk_spec = disk.BaseDiskSpec(_COMPONENT)
super(_NfsDisk, self).__init__(disk_spec, 'host1:/volume1',
default_nfs_version)
class NfsDiskTestCase(pkb_common_test_case.PkbCommonTestCase):
def MountOptions(self, **overrides):
mount_options = {
'hard': None,
'retrans': 2,
'rsize': 1048576,
'timeo': 600,
'wsize': 1048576
}
mount_options.update(overrides)
return mount_options
def MountOptionsAsDict(self, mount_options_str):
options = dict()
int_values = set(['retrans', 'rsize', 'timeo', 'wsize'])
for entry in mount_options_str.split(','):
parts = entry.split('=', 1)
key = parts[0]
value = None if len(parts) == 1 else parts[1]
options[key] = int(value) if key in int_values else value
return options
def testDefaults(self):
nfs_disk = _NfsDisk()
self.assertEqual('host1:/volume1', nfs_disk.GetDevicePath())
self.assertEqual(self.MountOptions(),
self.MountOptionsAsDict(nfs_disk.mount_options))
self.assertEqual(nfs_disk.mount_options, nfs_disk.fstab_options)
disk_meta = {}
for key, value in six.iteritems(self.MountOptions()):
disk_meta['nfs_{}'.format(key)] = value
disk_meta.update({'num_stripes': 1, 'size': None, 'type': None})
self.assertEqual(disk_meta, nfs_disk.metadata)
self.assertTrue(nfs_disk._IsReady())
def testNfsFlags(self):
FLAGS['nfs_version'].parse('4.1')
FLAGS['nfs_rsize'].parse(1)
FLAGS['nfs_wsize'].parse(2)
FLAGS['nfs_timeout'].parse(3)
FLAGS['nfs_timeout_hard'].parse(False)
FLAGS['nfs_retries'].parse(4)
nfs_disk = _NfsDisk(FLAGS)
mount_options = self.MountOptions(soft=None, retrans=4, rsize=1, timeo=30,
wsize=2, nfsvers='4.1')
mount_options.pop('hard')
self.assertEqual(mount_options,
self.MountOptionsAsDict(nfs_disk.mount_options))
def testDefaultNfsVersion(self):
nfs_disk = _NfsDisk(default_nfs_version='4.1')
self.assertEqual('4.1', nfs_disk.nfs_version)
def testFlagsOverrideDefaultNfsVersion(self):
FLAGS['nfs_version'].parse('3.0')
nfs_disk = _NfsDisk(flags=FLAGS, default_nfs_version='4.1')
self.assertEqual('3.0', nfs_disk.nfs_version)
def testAttach(self):
vm = mock.Mock()
nfs_disk = _NfsDisk()
nfs_disk.Attach(vm)
vm.Install.assert_called_with('nfs_utils')
def testDetach(self):
vm = mock.Mock()
FLAGS['scratch_dir'].parse('/mnt')
nfs_disk = _NfsDisk(FLAGS)
nfs_disk.Attach(vm) # to set the vm on the disk
nfs_disk.Detach()
vm.RemoteCommand.assert_called_with('sudo umount /mnt')
class _SmbDisk(disk.SmbDisk):
def __init__(self, default_smb_version=None):
if FLAGS:
disk_spec = disk.BaseDiskSpec(_COMPONENT, FLAGS)
else:
disk_spec = disk.BaseDiskSpec(_COMPONENT)
super(_SmbDisk, self).__init__(
disk_spec, 'host1', {'user': 'username', 'pw': 'password'},
default_smb_version)
class SmbDiskTestCase(pkb_common_test_case.PkbCommonTestCase):
def MountOptions(self, **overrides):
mount_options = {
'vers': '3.0',
'username': 'username',
'password': 'password',
'dir_mode': '0777',
'file_mode': '0777',
'serverino': None,
'nostrictsync': None,
}
mount_options.update(overrides)
return mount_options
def MountOptionsAsDict(self, mount_options_str):
options = dict()
string_values = set(['vers', 'username', 'password',
'dir_mode', 'file_mode'])
for entry in mount_options_str.split(','):
parts = entry.split('=', 1)
key = parts[0]
value = None if len(parts) == 1 else parts[1]
options[key] = value if key in string_values else value
return options
def testDefaults(self):
smb_disk = _SmbDisk()
self.assertEqual('host1', smb_disk.GetDevicePath())
self.assertEqual(self.MountOptions(),
self.MountOptionsAsDict(smb_disk.mount_options))
self.assertEqual(smb_disk.mount_options, smb_disk.fstab_options)
disk_meta = {}
disk_meta.update({'num_stripes': 1, 'size': None, 'type': None})
self.assertEqual(disk_meta, smb_disk.metadata)
self.assertTrue(smb_disk._IsReady())
def testSmbFlags(self):
FLAGS['smb_version'].parse('3.0')
smb_disk = _SmbDisk(FLAGS)
mount_options = self.MountOptions(vers='3.0', dir_mode='0777',
file_mode='0777')
self.assertEqual(mount_options,
self.MountOptionsAsDict(smb_disk.mount_options))
def testDefaultSmbVersion(self):
smb_disk = _SmbDisk(default_smb_version='3.0')
self.assertEqual('3.0', smb_disk.smb_version)
def testFlagsOverrideDefaultSmbVersion(self):
FLAGS['smb_version'].parse('2.1')
smb_disk = _SmbDisk(default_smb_version='3.0')
self.assertEqual('2.1', smb_disk.smb_version)
def testAttach(self):
vm = mock.Mock()
smb_disk = _SmbDisk()
smb_disk.Attach(vm)
vm.InstallPackages.assert_called_with('cifs-utils')
def testDetach(self):
vm = mock.Mock()
FLAGS['scratch_dir'].parse('/mnt')
smb_disk = _SmbDisk(FLAGS)
smb_disk.Attach(vm) # to set the vm on the disk
smb_disk.Detach()
vm.RemoteCommand.assert_called_with('sudo umount /mnt')
if __name__ == '__main__':
unittest.main()
|
from flexx import event
class Test(event.Component):
foo = event.IntProp(0, settable=True)
@event.reaction
def react_to_foo_a(self):
print('A: foo changed to %i' % self.foo)
@event.reaction('foo')
def react_to_foo_b(self, *events):
# This function
print('B: foo changed from %i to %i' % (events[0].old_value,
events[-1].new_value))
@event.reaction('foo')
def react_to_foo_c(self, *events):
print('C: foo changed:')
for ev in events:
print(' from %i to %i' % (ev.old_value, ev.new_value))
c = Test()
c.set_foo(3)
c.set_foo(7)
event.loop.iter()
|
import voluptuous as vol
from homeassistant.components.device_automation import TRIGGER_BASE_SCHEMA
from homeassistant.components.device_automation.exceptions import (
InvalidDeviceAutomationConfig,
)
from homeassistant.components.homeassistant.triggers import event as event_trigger
from homeassistant.const import (
CONF_DEVICE_ID,
CONF_DOMAIN,
CONF_EVENT,
CONF_PLATFORM,
CONF_TYPE,
CONF_UNIQUE_ID,
)
from . import DOMAIN
from .hue_event import CONF_HUE_EVENT
CONF_SUBTYPE = "subtype"
CONF_SHORT_PRESS = "remote_button_short_press"
CONF_SHORT_RELEASE = "remote_button_short_release"
CONF_LONG_RELEASE = "remote_button_long_release"
CONF_DOUBLE_SHORT_RELEASE = "remote_double_button_short_press"
CONF_DOUBLE_LONG_RELEASE = "remote_double_button_long_press"
CONF_TURN_ON = "turn_on"
CONF_TURN_OFF = "turn_off"
CONF_DIM_UP = "dim_up"
CONF_DIM_DOWN = "dim_down"
CONF_BUTTON_1 = "button_1"
CONF_BUTTON_2 = "button_2"
CONF_BUTTON_3 = "button_3"
CONF_BUTTON_4 = "button_4"
CONF_DOUBLE_BUTTON_1 = "double_buttons_1_3"
CONF_DOUBLE_BUTTON_2 = "double_buttons_2_4"
HUE_DIMMER_REMOTE_MODEL = "Hue dimmer switch" # RWL020/021
HUE_DIMMER_REMOTE = {
(CONF_SHORT_RELEASE, CONF_TURN_ON): {CONF_EVENT: 1002},
(CONF_LONG_RELEASE, CONF_TURN_ON): {CONF_EVENT: 1003},
(CONF_SHORT_RELEASE, CONF_DIM_UP): {CONF_EVENT: 2002},
(CONF_LONG_RELEASE, CONF_DIM_UP): {CONF_EVENT: 2003},
(CONF_SHORT_RELEASE, CONF_DIM_DOWN): {CONF_EVENT: 3002},
(CONF_LONG_RELEASE, CONF_DIM_DOWN): {CONF_EVENT: 3003},
(CONF_SHORT_RELEASE, CONF_TURN_OFF): {CONF_EVENT: 4002},
(CONF_LONG_RELEASE, CONF_TURN_OFF): {CONF_EVENT: 4003},
}
HUE_BUTTON_REMOTE_MODEL = "Hue Smart button" # ZLLSWITCH/ROM001
HUE_BUTTON_REMOTE = {
(CONF_SHORT_RELEASE, CONF_TURN_ON): {CONF_EVENT: 1002},
(CONF_LONG_RELEASE, CONF_TURN_ON): {CONF_EVENT: 1003},
}
HUE_TAP_REMOTE_MODEL = "Hue tap switch" # ZGPSWITCH
HUE_TAP_REMOTE = {
(CONF_SHORT_PRESS, CONF_BUTTON_1): {CONF_EVENT: 34},
(CONF_SHORT_PRESS, CONF_BUTTON_2): {CONF_EVENT: 16},
(CONF_SHORT_PRESS, CONF_BUTTON_3): {CONF_EVENT: 17},
(CONF_SHORT_PRESS, CONF_BUTTON_4): {CONF_EVENT: 18},
}
HUE_FOHSWITCH_REMOTE_MODEL = "Friends of Hue Switch" # ZGPSWITCH
HUE_FOHSWITCH_REMOTE = {
(CONF_SHORT_PRESS, CONF_BUTTON_1): {CONF_EVENT: 20},
(CONF_LONG_RELEASE, CONF_BUTTON_1): {CONF_EVENT: 16},
(CONF_SHORT_PRESS, CONF_BUTTON_2): {CONF_EVENT: 21},
(CONF_LONG_RELEASE, CONF_BUTTON_2): {CONF_EVENT: 17},
(CONF_SHORT_PRESS, CONF_BUTTON_3): {CONF_EVENT: 23},
(CONF_LONG_RELEASE, CONF_BUTTON_3): {CONF_EVENT: 19},
(CONF_SHORT_PRESS, CONF_BUTTON_4): {CONF_EVENT: 22},
(CONF_LONG_RELEASE, CONF_BUTTON_4): {CONF_EVENT: 18},
(CONF_DOUBLE_SHORT_RELEASE, CONF_DOUBLE_BUTTON_1): {CONF_EVENT: 101},
(CONF_DOUBLE_LONG_RELEASE, CONF_DOUBLE_BUTTON_1): {CONF_EVENT: 100},
(CONF_DOUBLE_SHORT_RELEASE, CONF_DOUBLE_BUTTON_2): {CONF_EVENT: 99},
(CONF_DOUBLE_LONG_RELEASE, CONF_DOUBLE_BUTTON_2): {CONF_EVENT: 98},
}
REMOTES = {
HUE_DIMMER_REMOTE_MODEL: HUE_DIMMER_REMOTE,
HUE_TAP_REMOTE_MODEL: HUE_TAP_REMOTE,
HUE_BUTTON_REMOTE_MODEL: HUE_BUTTON_REMOTE,
HUE_FOHSWITCH_REMOTE_MODEL: HUE_FOHSWITCH_REMOTE,
}
TRIGGER_SCHEMA = TRIGGER_BASE_SCHEMA.extend(
{vol.Required(CONF_TYPE): str, vol.Required(CONF_SUBTYPE): str}
)
def _get_hue_event_from_device_id(hass, device_id):
"""Resolve hue event from device id."""
for bridge in hass.data.get(DOMAIN, {}).values():
for hue_event in bridge.sensor_manager.current_events.values():
if device_id == hue_event.device_registry_id:
return hue_event
return None
async def async_validate_trigger_config(hass, config):
"""Validate config."""
config = TRIGGER_SCHEMA(config)
device_registry = await hass.helpers.device_registry.async_get_registry()
device = device_registry.async_get(config[CONF_DEVICE_ID])
trigger = (config[CONF_TYPE], config[CONF_SUBTYPE])
if (
not device
or device.model not in REMOTES
or trigger not in REMOTES[device.model]
):
raise InvalidDeviceAutomationConfig
return config
async def async_attach_trigger(hass, config, action, automation_info):
"""Listen for state changes based on configuration."""
device_registry = await hass.helpers.device_registry.async_get_registry()
device = device_registry.async_get(config[CONF_DEVICE_ID])
hue_event = _get_hue_event_from_device_id(hass, device.id)
if hue_event is None:
raise InvalidDeviceAutomationConfig
trigger = (config[CONF_TYPE], config[CONF_SUBTYPE])
trigger = REMOTES[device.model][trigger]
event_config = {
event_trigger.CONF_PLATFORM: "event",
event_trigger.CONF_EVENT_TYPE: CONF_HUE_EVENT,
event_trigger.CONF_EVENT_DATA: {CONF_UNIQUE_ID: hue_event.unique_id, **trigger},
}
event_config = event_trigger.TRIGGER_SCHEMA(event_config)
return await event_trigger.async_attach_trigger(
hass, event_config, action, automation_info, platform_type="device"
)
async def async_get_triggers(hass, device_id):
"""List device triggers.
Make sure device is a supported remote model.
Retrieve the hue event object matching device entry.
Generate device trigger list.
"""
device_registry = await hass.helpers.device_registry.async_get_registry()
device = device_registry.async_get(device_id)
if device.model not in REMOTES:
return
triggers = []
for trigger, subtype in REMOTES[device.model]:
triggers.append(
{
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_PLATFORM: "device",
CONF_TYPE: trigger,
CONF_SUBTYPE: subtype,
}
)
return triggers
|
import asyncio
from functools import partial
from VL53L1X2 import VL53L1X # pylint: disable=import-error
import voluptuous as vol
from homeassistant.components import rpi_gpio
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import CONF_NAME, LENGTH_MILLIMETERS
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
CONF_I2C_ADDRESS = "i2c_address"
CONF_I2C_BUS = "i2c_bus"
CONF_XSHUT = "xshut"
DEFAULT_NAME = "VL53L1X"
DEFAULT_I2C_ADDRESS = 0x29
DEFAULT_I2C_BUS = 1
DEFAULT_XSHUT = 16
DEFAULT_RANGE = 2
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_I2C_ADDRESS, default=DEFAULT_I2C_ADDRESS): vol.Coerce(int),
vol.Optional(CONF_I2C_BUS, default=DEFAULT_I2C_BUS): vol.Coerce(int),
vol.Optional(CONF_XSHUT, default=DEFAULT_XSHUT): cv.positive_int,
}
)
def init_tof_0(xshut, sensor):
"""XSHUT port LOW resets the device."""
sensor.open()
rpi_gpio.setup_output(xshut)
rpi_gpio.write_output(xshut, 0)
def init_tof_1(xshut):
"""XSHUT port HIGH enables the device."""
rpi_gpio.setup_output(xshut)
rpi_gpio.write_output(xshut, 1)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Reset and initialize the VL53L1X ToF Sensor from STMicroelectronics."""
name = config.get(CONF_NAME)
bus_number = config.get(CONF_I2C_BUS)
i2c_address = config.get(CONF_I2C_ADDRESS)
unit = LENGTH_MILLIMETERS
xshut = config.get(CONF_XSHUT)
sensor = await hass.async_add_executor_job(partial(VL53L1X, bus_number))
await hass.async_add_executor_job(init_tof_0, xshut, sensor)
await asyncio.sleep(0.01)
await hass.async_add_executor_job(init_tof_1, xshut)
await asyncio.sleep(0.01)
dev = [VL53L1XSensor(sensor, name, unit, i2c_address)]
async_add_entities(dev, True)
class VL53L1XSensor(Entity):
"""Implementation of VL53L1X sensor."""
def __init__(self, vl53l1x_sensor, name, unit, i2c_address):
"""Initialize the sensor."""
self._name = name
self._unit_of_measurement = unit
self.vl53l1x_sensor = vl53l1x_sensor
self.i2c_address = i2c_address
self._state = None
self.init = True
@property
def name(self) -> str:
"""Return the name of the sensor."""
return self._name
@property
def state(self) -> int:
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self) -> str:
"""Return the unit of measurement."""
return self._unit_of_measurement
def update(self):
"""Get the latest measurement and update state."""
if self.init:
self.vl53l1x_sensor.add_sensor(self.i2c_address, self.i2c_address)
self.init = False
self.vl53l1x_sensor.start_ranging(self.i2c_address, DEFAULT_RANGE)
self.vl53l1x_sensor.update(self.i2c_address)
self.vl53l1x_sensor.stop_ranging(self.i2c_address)
self._state = self.vl53l1x_sensor.distance
|
import numpy as np
import pytest
from mne import create_info
from mne.io import RawArray
from mne.utils import logger, catch_logging, run_tests_if_main
def bad_1(x):
"""Fail."""
return # bad return type
def bad_2(x):
"""Fail."""
return x[:-1] # bad shape
def bad_3(x):
"""Fail."""
return x[0, :]
def printer(x):
"""Print."""
logger.info('exec')
return x
@pytest.mark.slowtest
def test_apply_function_verbose():
"""Test apply function verbosity."""
n_chan = 2
n_times = 3
ch_names = [str(ii) for ii in range(n_chan)]
raw = RawArray(np.zeros((n_chan, n_times)),
create_info(ch_names, 1., 'mag'))
# test return types in both code paths (parallel / 1 job)
with pytest.raises(TypeError, match='Return value must be an ndarray'):
raw.apply_function(bad_1)
with pytest.raises(ValueError, match='Return data must have shape'):
raw.apply_function(bad_2)
with pytest.raises(TypeError, match='Return value must be an ndarray'):
raw.apply_function(bad_1, n_jobs=2)
with pytest.raises(ValueError, match='Return data must have shape'):
raw.apply_function(bad_2, n_jobs=2)
# test return type when `channel_wise=False`
raw.apply_function(printer, channel_wise=False)
with pytest.raises(TypeError, match='Return value must be an ndarray'):
raw.apply_function(bad_1, channel_wise=False)
with pytest.raises(ValueError, match='Return data must have shape'):
raw.apply_function(bad_3, channel_wise=False)
# check our arguments
with catch_logging() as sio:
out = raw.apply_function(printer, verbose=False)
assert len(sio.getvalue(close=False)) == 0
assert out is raw
raw.apply_function(printer, verbose=True)
assert sio.getvalue().count('\n') == n_chan
run_tests_if_main()
|
import os
import shutil
import subprocess
import tempfile
import unittest
PKG_PATH = os.getcwd()
TEST_PATH = os.path.join(PKG_PATH, 'test')
def make_bash_pre_command(strings, currentword):
return "bash -c '. %s; export COMP_WORDS=(%s); export COMP_CWORD=%s;" % (os.path.join(PKG_PATH, 'rosbash'), ' '.join(['"%s"' % w for w in strings]), currentword)
class TestRosBash(unittest.TestCase):
def setUp(self):
self.cmdbash = os.path.join(TEST_PATH, 'test_rosbash.bash')
self.assertTrue(os.path.exists(self.cmdbash))
self.cmdzsh = os.path.join(TEST_PATH, 'test_roszsh.zsh')
self.assertTrue(os.path.exists(self.cmdzsh))
def test_rosbash_completion(self):
subprocess.check_call([self.cmdbash], cwd=TEST_PATH)
def test_roszsh_completion(self):
subprocess.check_call([self.cmdzsh], cwd=TEST_PATH)
class TestWithFiles(unittest.TestCase):
@classmethod
def setUpClass(self):
self.test_root_path = tempfile.mkdtemp()
@classmethod
def tearDownClass(self):
shutil.rmtree(self.test_root_path)
def test_make_precommand(self):
self.assertEqual("bash -c '. %s; export COMP_WORDS=(\"foo\" \"bar\"); export COMP_CWORD=1;" % os.path.join(PKG_PATH, 'rosbash'), make_bash_pre_command(['foo', 'bar'], 1))
self.assertEqual("bash -c '. %s; export COMP_WORDS=(\"foo\"); export COMP_CWORD=2;" % os.path.join(PKG_PATH, 'rosbash'), make_bash_pre_command(['foo'], 2))
def test_roslaunch_completion(self):
# regression test that roslaunch completion works even in the presence of launchfiles
subprocess.check_call('touch foo.launch', shell=True, cwd=self.test_root_path)
subprocess.check_call('touch bar.launch', shell=True, cwd=self.test_root_path)
cmd = make_bash_pre_command(['rosbash', 'rosbash'], 2)
cmd += "_roscomplete_launch rosbash rosbash; echo $COMPREPLY'"
p = subprocess.Popen(cmd,
shell=True,
stdout=subprocess.PIPE,
cwd=self.test_root_path)
output = p.communicate()
self.assertEqual(0, p.returncode, (p.returncode, output, cmd))
self.assertTrue('example.launch' in output[0], (p.returncode, output[0], cmd))
|
from hyperion import const
from homeassistant.components.hyperion import light as hyperion_light
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_EFFECT,
ATTR_HS_COLOR,
DOMAIN,
)
from homeassistant.const import ATTR_ENTITY_ID, SERVICE_TURN_OFF, SERVICE_TURN_ON
from homeassistant.setup import async_setup_component
from tests.async_mock import AsyncMock, Mock, call, patch
TEST_HOST = "test-hyperion-host"
TEST_PORT = const.DEFAULT_PORT
TEST_NAME = "test_hyperion_name"
TEST_PRIORITY = 128
TEST_ENTITY_ID = f"{DOMAIN}.{TEST_NAME}"
def create_mock_client():
"""Create a mock Hyperion client."""
mock_client = Mock()
mock_client.async_client_connect = AsyncMock(return_value=True)
mock_client.adjustment = None
mock_client.effects = None
mock_client.id = "%s:%i" % (TEST_HOST, TEST_PORT)
return mock_client
def call_registered_callback(client, key, *args, **kwargs):
"""Call a Hyperion entity callback that was registered with the client."""
return client.set_callbacks.call_args[0][0][key](*args, **kwargs)
async def setup_entity(hass, client=None):
"""Add a test Hyperion entity to hass."""
client = client or create_mock_client()
with patch("hyperion.client.HyperionClient", return_value=client):
assert await async_setup_component(
hass,
DOMAIN,
{
DOMAIN: {
"platform": "hyperion",
"name": TEST_NAME,
"host": TEST_HOST,
"port": const.DEFAULT_PORT,
"priority": TEST_PRIORITY,
}
},
)
await hass.async_block_till_done()
async def test_setup_platform(hass):
"""Test setting up the platform."""
client = create_mock_client()
await setup_entity(hass, client=client)
assert hass.states.get(TEST_ENTITY_ID) is not None
async def test_setup_platform_not_ready(hass):
"""Test the platform not being ready."""
client = create_mock_client()
client.async_client_connect = AsyncMock(return_value=False)
await setup_entity(hass, client=client)
assert hass.states.get(TEST_ENTITY_ID) is None
async def test_light_basic_properies(hass):
"""Test the basic properties."""
client = create_mock_client()
await setup_entity(hass, client=client)
entity_state = hass.states.get(TEST_ENTITY_ID)
assert entity_state.state == "on"
assert entity_state.attributes["brightness"] == 255
assert entity_state.attributes["hs_color"] == (0.0, 0.0)
assert entity_state.attributes["icon"] == hyperion_light.ICON_LIGHTBULB
assert entity_state.attributes["effect"] == hyperion_light.KEY_EFFECT_SOLID
# By default the effect list is the 3 external sources + 'Solid'.
assert len(entity_state.attributes["effect_list"]) == 4
assert (
entity_state.attributes["supported_features"] == hyperion_light.SUPPORT_HYPERION
)
async def test_light_async_turn_on(hass):
"""Test turning the light on."""
client = create_mock_client()
await setup_entity(hass, client=client)
# On (=), 100% (=), solid (=), [255,255,255] (=)
client.async_send_set_color = AsyncMock(return_value=True)
await hass.services.async_call(
DOMAIN, SERVICE_TURN_ON, {ATTR_ENTITY_ID: TEST_ENTITY_ID}, blocking=True
)
assert client.async_send_set_color.call_args == call(
**{
const.KEY_PRIORITY: TEST_PRIORITY,
const.KEY_COLOR: [255, 255, 255],
const.KEY_ORIGIN: hyperion_light.DEFAULT_ORIGIN,
}
)
# On (=), 50% (!), solid (=), [255,255,255] (=)
# ===
brightness = 128
client.async_send_set_color = AsyncMock(return_value=True)
client.async_send_set_adjustment = AsyncMock(return_value=True)
await hass.services.async_call(
DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: TEST_ENTITY_ID, ATTR_BRIGHTNESS: brightness},
blocking=True,
)
assert client.async_send_set_adjustment.call_args == call(
**{const.KEY_ADJUSTMENT: {const.KEY_BRIGHTNESS: 50}}
)
assert client.async_send_set_color.call_args == call(
**{
const.KEY_PRIORITY: TEST_PRIORITY,
const.KEY_COLOR: [255, 255, 255],
const.KEY_ORIGIN: hyperion_light.DEFAULT_ORIGIN,
}
)
# Simulate a state callback from Hyperion.
client.adjustment = [{const.KEY_BRIGHTNESS: 50}]
call_registered_callback(client, "adjustment-update")
entity_state = hass.states.get(TEST_ENTITY_ID)
assert entity_state.state == "on"
assert entity_state.attributes["brightness"] == brightness
# On (=), 50% (=), solid (=), [0,255,255] (!)
hs_color = (180.0, 100.0)
client.async_send_set_color = AsyncMock(return_value=True)
await hass.services.async_call(
DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: TEST_ENTITY_ID, ATTR_HS_COLOR: hs_color},
blocking=True,
)
assert client.async_send_set_color.call_args == call(
**{
const.KEY_PRIORITY: TEST_PRIORITY,
const.KEY_COLOR: (0, 255, 255),
const.KEY_ORIGIN: hyperion_light.DEFAULT_ORIGIN,
}
)
# Simulate a state callback from Hyperion.
client.visible_priority = {
const.KEY_COMPONENTID: const.KEY_COMPONENTID_COLOR,
const.KEY_VALUE: {const.KEY_RGB: (0, 255, 255)},
}
call_registered_callback(client, "priorities-update")
entity_state = hass.states.get(TEST_ENTITY_ID)
assert entity_state.attributes["hs_color"] == hs_color
assert entity_state.attributes["icon"] == hyperion_light.ICON_LIGHTBULB
# On (=), 100% (!), solid, [0,255,255] (=)
brightness = 255
client.async_send_set_color = AsyncMock(return_value=True)
client.async_send_set_adjustment = AsyncMock(return_value=True)
await hass.services.async_call(
DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: TEST_ENTITY_ID, ATTR_BRIGHTNESS: brightness},
blocking=True,
)
assert client.async_send_set_adjustment.call_args == call(
**{const.KEY_ADJUSTMENT: {const.KEY_BRIGHTNESS: 100}}
)
assert client.async_send_set_color.call_args == call(
**{
const.KEY_PRIORITY: TEST_PRIORITY,
const.KEY_COLOR: (0, 255, 255),
const.KEY_ORIGIN: hyperion_light.DEFAULT_ORIGIN,
}
)
client.adjustment = [{const.KEY_BRIGHTNESS: 100}]
call_registered_callback(client, "adjustment-update")
entity_state = hass.states.get(TEST_ENTITY_ID)
assert entity_state.attributes["brightness"] == brightness
# On (=), 100% (=), V4L (!), [0,255,255] (=)
effect = const.KEY_COMPONENTID_EXTERNAL_SOURCES[2] # V4L
client.async_send_clear = AsyncMock(return_value=True)
client.async_send_set_component = AsyncMock(return_value=True)
await hass.services.async_call(
DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: TEST_ENTITY_ID, ATTR_EFFECT: effect},
blocking=True,
)
assert client.async_send_clear.call_args == call(
**{const.KEY_PRIORITY: TEST_PRIORITY}
)
assert client.async_send_set_component.call_args_list == [
call(
**{
const.KEY_COMPONENTSTATE: {
const.KEY_COMPONENT: const.KEY_COMPONENTID_EXTERNAL_SOURCES[0],
const.KEY_STATE: False,
}
}
),
call(
**{
const.KEY_COMPONENTSTATE: {
const.KEY_COMPONENT: const.KEY_COMPONENTID_EXTERNAL_SOURCES[1],
const.KEY_STATE: False,
}
}
),
call(
**{
const.KEY_COMPONENTSTATE: {
const.KEY_COMPONENT: const.KEY_COMPONENTID_EXTERNAL_SOURCES[2],
const.KEY_STATE: True,
}
}
),
]
client.visible_priority = {const.KEY_COMPONENTID: effect}
call_registered_callback(client, "priorities-update")
entity_state = hass.states.get(TEST_ENTITY_ID)
assert entity_state.attributes["icon"] == hyperion_light.ICON_EXTERNAL_SOURCE
assert entity_state.attributes["effect"] == effect
# On (=), 100% (=), "Warm Blobs" (!), [0,255,255] (=)
effect = "Warm Blobs"
client.async_send_clear = AsyncMock(return_value=True)
client.async_send_set_effect = AsyncMock(return_value=True)
await hass.services.async_call(
DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: TEST_ENTITY_ID, ATTR_EFFECT: effect},
blocking=True,
)
assert client.async_send_clear.call_args == call(
**{const.KEY_PRIORITY: TEST_PRIORITY}
)
assert client.async_send_set_effect.call_args == call(
**{
const.KEY_PRIORITY: TEST_PRIORITY,
const.KEY_EFFECT: {const.KEY_NAME: effect},
const.KEY_ORIGIN: hyperion_light.DEFAULT_ORIGIN,
}
)
client.visible_priority = {
const.KEY_COMPONENTID: const.KEY_COMPONENTID_EFFECT,
const.KEY_OWNER: effect,
}
call_registered_callback(client, "priorities-update")
entity_state = hass.states.get(TEST_ENTITY_ID)
assert entity_state.attributes["icon"] == hyperion_light.ICON_EFFECT
assert entity_state.attributes["effect"] == effect
# No calls if disconnected.
client.has_loaded_state = False
call_registered_callback(client, "client-update", {"loaded-state": False})
client.async_send_clear = AsyncMock(return_value=True)
client.async_send_set_effect = AsyncMock(return_value=True)
await hass.services.async_call(
DOMAIN, SERVICE_TURN_ON, {ATTR_ENTITY_ID: TEST_ENTITY_ID}, blocking=True
)
assert not client.async_send_clear.called
assert not client.async_send_set_effect.called
async def test_light_async_turn_off(hass):
"""Test turning the light off."""
client = create_mock_client()
await setup_entity(hass, client=client)
client.async_send_set_component = AsyncMock(return_value=True)
await hass.services.async_call(
DOMAIN, SERVICE_TURN_OFF, {ATTR_ENTITY_ID: TEST_ENTITY_ID}, blocking=True
)
assert client.async_send_set_component.call_args == call(
**{
const.KEY_COMPONENTSTATE: {
const.KEY_COMPONENT: const.KEY_COMPONENTID_LEDDEVICE,
const.KEY_STATE: False,
}
}
)
# No calls if no state loaded.
client.has_loaded_state = False
client.async_send_set_component = AsyncMock(return_value=True)
call_registered_callback(client, "client-update", {"loaded-state": False})
await hass.services.async_call(
DOMAIN, SERVICE_TURN_OFF, {ATTR_ENTITY_ID: TEST_ENTITY_ID}, blocking=True
)
assert not client.async_send_set_component.called
async def test_light_async_updates_from_hyperion_client(hass):
"""Test receiving a variety of Hyperion client callbacks."""
client = create_mock_client()
await setup_entity(hass, client=client)
# Bright change gets accepted.
brightness = 10
client.adjustment = [{const.KEY_BRIGHTNESS: brightness}]
call_registered_callback(client, "adjustment-update")
entity_state = hass.states.get(TEST_ENTITY_ID)
assert entity_state.attributes["brightness"] == round(255 * (brightness / 100.0))
# Broken brightness value is ignored.
bad_brightness = -200
client.adjustment = [{const.KEY_BRIGHTNESS: bad_brightness}]
call_registered_callback(client, "adjustment-update")
entity_state = hass.states.get(TEST_ENTITY_ID)
assert entity_state.attributes["brightness"] == round(255 * (brightness / 100.0))
# Update components.
client.is_on.return_value = True
call_registered_callback(client, "components-update")
entity_state = hass.states.get(TEST_ENTITY_ID)
assert entity_state.state == "on"
client.is_on.return_value = False
call_registered_callback(client, "components-update")
entity_state = hass.states.get(TEST_ENTITY_ID)
assert entity_state.state == "off"
# Update priorities (V4L)
client.is_on.return_value = True
client.visible_priority = {const.KEY_COMPONENTID: const.KEY_COMPONENTID_V4L}
call_registered_callback(client, "priorities-update")
entity_state = hass.states.get(TEST_ENTITY_ID)
assert entity_state.attributes["icon"] == hyperion_light.ICON_EXTERNAL_SOURCE
assert entity_state.attributes["hs_color"] == (0.0, 0.0)
assert entity_state.attributes["effect"] == const.KEY_COMPONENTID_V4L
# Update priorities (Effect)
effect = "foo"
client.visible_priority = {
const.KEY_COMPONENTID: const.KEY_COMPONENTID_EFFECT,
const.KEY_OWNER: effect,
}
call_registered_callback(client, "priorities-update")
entity_state = hass.states.get(TEST_ENTITY_ID)
assert entity_state.attributes["effect"] == effect
assert entity_state.attributes["icon"] == hyperion_light.ICON_EFFECT
assert entity_state.attributes["hs_color"] == (0.0, 0.0)
# Update priorities (Color)
rgb = (0, 100, 100)
client.visible_priority = {
const.KEY_COMPONENTID: const.KEY_COMPONENTID_COLOR,
const.KEY_VALUE: {const.KEY_RGB: rgb},
}
call_registered_callback(client, "priorities-update")
entity_state = hass.states.get(TEST_ENTITY_ID)
assert entity_state.attributes["effect"] == hyperion_light.KEY_EFFECT_SOLID
assert entity_state.attributes["icon"] == hyperion_light.ICON_LIGHTBULB
assert entity_state.attributes["hs_color"] == (180.0, 100.0)
# Update effect list
effects = [{const.KEY_NAME: "One"}, {const.KEY_NAME: "Two"}]
client.effects = effects
call_registered_callback(client, "effects-update")
entity_state = hass.states.get(TEST_ENTITY_ID)
assert entity_state.attributes["effect_list"] == [
effect[const.KEY_NAME] for effect in effects
] + const.KEY_COMPONENTID_EXTERNAL_SOURCES + [hyperion_light.KEY_EFFECT_SOLID]
# Update connection status (e.g. disconnection).
# Turn on late, check state, disconnect, ensure it cannot be turned off.
client.has_loaded_state = False
call_registered_callback(client, "client-update", {"loaded-state": False})
entity_state = hass.states.get(TEST_ENTITY_ID)
assert entity_state.state == "unavailable"
# Update connection status (e.g. re-connection)
client.has_loaded_state = True
call_registered_callback(client, "client-update", {"loaded-state": True})
entity_state = hass.states.get(TEST_ENTITY_ID)
assert entity_state.state == "on"
async def test_full_state_loaded_on_start(hass):
"""Test receiving a variety of Hyperion client callbacks."""
client = create_mock_client()
# Update full state (should call all update methods).
brightness = 25
client.adjustment = [{const.KEY_BRIGHTNESS: brightness}]
client.visible_priority = {
const.KEY_COMPONENTID: const.KEY_COMPONENTID_COLOR,
const.KEY_VALUE: {const.KEY_RGB: (0, 100, 100)},
}
client.effects = [{const.KEY_NAME: "One"}, {const.KEY_NAME: "Two"}]
await setup_entity(hass, client=client)
entity_state = hass.states.get(TEST_ENTITY_ID)
assert entity_state.attributes["brightness"] == round(255 * (brightness / 100.0))
assert entity_state.attributes["effect"] == hyperion_light.KEY_EFFECT_SOLID
assert entity_state.attributes["icon"] == hyperion_light.ICON_LIGHTBULB
assert entity_state.attributes["hs_color"] == (180.0, 100.0)
|
from __future__ import division
import argparse
import multiprocessing
import numpy as np
import chainer
from chainer.training import extensions
from chainer.training.triggers import ManualScheduleTrigger
import chainermn
from chainercv.chainer_experimental.datasets.sliceable \
import ConcatenatedDataset
from chainercv.chainer_experimental.datasets.sliceable \
import TransformDataset
from chainercv.chainer_experimental.training.extensions import make_shift
from chainercv.datasets import coco_instance_segmentation_label_names
from chainercv.datasets import COCOInstanceSegmentationDataset
from chainercv.experimental.links import FCISResNet101
from chainercv.experimental.links import FCISTrainChain
from chainercv.experimental.links.model.fcis.utils.proposal_target_creator \
import ProposalTargetCreator
from chainercv.extensions import InstanceSegmentationCOCOEvaluator
from chainercv.links.model.ssd import GradientScaling
from train_sbd import concat_examples
from train_sbd import Transform
# https://docs.chainer.org/en/stable/tips.html#my-training-process-gets-stuck-when-using-multiprocessiterator
try:
import cv2
cv2.setNumThreads(0)
except ImportError:
pass
def main():
parser = argparse.ArgumentParser(
description='ChainerCV training example: FCIS')
parser.add_argument('--out', '-o', default='result',
help='Output directory')
parser.add_argument('--seed', '-s', type=int, default=0)
parser.add_argument(
'--lr', '-l', type=float, default=None,
help='Learning rate for multi GPUs')
parser.add_argument('--batchsize', type=int, default=8)
parser.add_argument('--epoch', '-e', type=int, default=18)
parser.add_argument('--cooldown-epoch', '-ce', type=int, default=12)
args = parser.parse_args()
# https://docs.chainer.org/en/stable/chainermn/tutorial/tips_faqs.html#using-multiprocessiterator
if hasattr(multiprocessing, 'set_start_method'):
multiprocessing.set_start_method('forkserver')
p = multiprocessing.Process()
p.start()
p.join()
# chainermn
comm = chainermn.create_communicator('pure_nccl')
device = comm.intra_rank
np.random.seed(args.seed)
# model
proposal_creator_params = FCISResNet101.proposal_creator_params
proposal_creator_params['min_size'] = 2
fcis = FCISResNet101(
n_fg_class=len(coco_instance_segmentation_label_names),
anchor_scales=(4, 8, 16, 32),
pretrained_model='imagenet', iter2=False,
proposal_creator_params=proposal_creator_params)
fcis.use_preset('coco_evaluate')
proposal_target_creator = ProposalTargetCreator()
proposal_target_creator.neg_iou_thresh_lo = 0.0
model = FCISTrainChain(
fcis, proposal_target_creator=proposal_target_creator)
chainer.cuda.get_device_from_id(device).use()
model.to_gpu()
# train dataset
train_dataset = COCOInstanceSegmentationDataset(
year='2014', split='train')
vmml_dataset = COCOInstanceSegmentationDataset(
year='2014', split='valminusminival')
# filter non-annotated data
train_indices = np.array(
[i for i, label in enumerate(train_dataset.slice[:, ['label']])
if len(label[0]) > 0],
dtype=np.int32)
train_dataset = train_dataset.slice[train_indices]
vmml_indices = np.array(
[i for i, label in enumerate(vmml_dataset.slice[:, ['label']])
if len(label[0]) > 0],
dtype=np.int32)
vmml_dataset = vmml_dataset.slice[vmml_indices]
train_dataset = TransformDataset(
ConcatenatedDataset(train_dataset, vmml_dataset),
('img', 'mask', 'label', 'bbox', 'scale'),
Transform(model.fcis))
if comm.rank == 0:
indices = np.arange(len(train_dataset))
else:
indices = None
indices = chainermn.scatter_dataset(indices, comm, shuffle=True)
train_dataset = train_dataset.slice[indices]
train_iter = chainer.iterators.SerialIterator(
train_dataset, batch_size=args.batchsize // comm.size)
# test dataset
if comm.rank == 0:
test_dataset = COCOInstanceSegmentationDataset(
year='2014', split='minival', use_crowded=True,
return_crowded=True, return_area=True)
indices = np.arange(len(test_dataset))
test_dataset = test_dataset.slice[indices]
test_iter = chainer.iterators.SerialIterator(
test_dataset, batch_size=1, repeat=False, shuffle=False)
# optimizer
optimizer = chainermn.create_multi_node_optimizer(
chainer.optimizers.MomentumSGD(momentum=0.9),
comm)
optimizer.setup(model)
model.fcis.head.conv1.W.update_rule.add_hook(GradientScaling(3.0))
model.fcis.head.conv1.b.update_rule.add_hook(GradientScaling(3.0))
optimizer.add_hook(chainer.optimizer.WeightDecay(rate=0.0005))
for param in model.params():
if param.name in ['beta', 'gamma']:
param.update_rule.enabled = False
model.fcis.extractor.conv1.disable_update()
model.fcis.extractor.res2.disable_update()
updater = chainer.training.updater.StandardUpdater(
train_iter, optimizer, converter=concat_examples,
device=device)
trainer = chainer.training.Trainer(
updater, (args.epoch, 'epoch'), out=args.out)
# lr scheduler
@make_shift('lr')
def lr_scheduler(trainer):
if args.lr is None:
base_lr = 0.0005 * args.batchsize
else:
base_lr = args.lr
iteration = trainer.updater.iteration
epoch = trainer.updater.epoch
if (iteration * comm.size) < 2000:
rate = 0.1
elif epoch < args.cooldown_epoch:
rate = 1
else:
rate = 0.1
return rate * base_lr
trainer.extend(lr_scheduler)
if comm.rank == 0:
# interval
log_interval = 100, 'iteration'
plot_interval = 3000, 'iteration'
print_interval = 20, 'iteration'
# training extensions
trainer.extend(
extensions.snapshot_object(
model.fcis, filename='snapshot_model.npz'),
trigger=(args.epoch, 'epoch'))
trainer.extend(
extensions.observe_lr(),
trigger=log_interval)
trainer.extend(
extensions.LogReport(log_name='log.json', trigger=log_interval))
report_items = [
'iteration', 'epoch', 'elapsed_time', 'lr',
'main/loss',
'main/rpn_loc_loss',
'main/rpn_cls_loss',
'main/roi_loc_loss',
'main/roi_cls_loss',
'main/roi_mask_loss',
'validation/main/map/iou=0.50:0.95/area=all/max_dets=100',
]
trainer.extend(
extensions.PrintReport(report_items), trigger=print_interval)
trainer.extend(
extensions.ProgressBar(update_interval=10))
if extensions.PlotReport.available():
trainer.extend(
extensions.PlotReport(
['main/loss'],
file_name='loss.png', trigger=plot_interval),
trigger=plot_interval)
trainer.extend(
InstanceSegmentationCOCOEvaluator(
test_iter, model.fcis,
label_names=coco_instance_segmentation_label_names),
trigger=ManualScheduleTrigger(
[len(train_dataset) * args.cooldown_epoch,
len(train_dataset) * args.epoch], 'iteration'))
trainer.extend(extensions.dump_graph('main/loss'))
trainer.run()
if __name__ == '__main__':
main()
|
import pytest
from PyQt5.QtCore import QUrl
from qutebrowser.browser import navigate
from qutebrowser.utils import urlutils
class TestIncDec:
pytestmark = pytest.mark.usefixtures('config_stub')
@pytest.mark.parametrize('incdec', ['increment', 'decrement'])
@pytest.mark.parametrize('value', [
'{}foo', 'foo{}', 'foo{}bar', '42foo{}'
])
@pytest.mark.parametrize('url', [
'http://example.com:80/v1/path/{}/test',
'http://example.com:80/v1/query_test?value={}',
'http://example.com:80/v1/anchor_test#{}',
'http://host_{}_test.com:80',
'http://m4ny.c0m:80/number5/3very?where=yes#{}',
# Make sure that FullyDecoded is not used (to avoid losing information)
'http://localhost/%3A{}',
'http://localhost/:{}',
'http://localhost/?v=%3A{}',
'http://localhost/?v=:{}',
'http://localhost/#%3A{}',
'http://localhost/#:{}',
# Make sure that spaces in paths work
'http://example.com/path with {} spaces',
])
def test_incdec(self, incdec, value, url, config_stub):
if (value == '{}foo' and
url == 'http://example.com/path with {} spaces'):
pytest.xfail("https://github.com/qutebrowser/qutebrowser/issues/4917")
config_stub.val.url.incdec_segments = ['host', 'path', 'query',
'anchor']
# The integer used should not affect test output, as long as it's
# bigger than 1
# 20 was chosen by dice roll, guaranteed to be random
base_value = value.format(20)
if incdec == 'increment':
expected_value = value.format(21)
else:
expected_value = value.format(19)
base_url = QUrl(url.format(base_value))
expected_url = QUrl(url.format(expected_value))
assert navigate.incdec(base_url, 1, incdec) == expected_url
def test_port(self, config_stub):
config_stub.val.url.incdec_segments = ['port']
base_url = QUrl('http://localhost:8000')
new_url = navigate.incdec(base_url, 1, 'increment')
assert new_url == QUrl('http://localhost:8001')
new_url = navigate.incdec(base_url, 1, 'decrement')
assert new_url == QUrl('http://localhost:7999')
def test_port_default(self, config_stub):
"""Test that a default port (with url.port() == -1) is not touched."""
config_stub.val.url.incdec_segments = ['port']
base_url = QUrl('http://localhost')
with pytest.raises(navigate.Error):
navigate.incdec(base_url, 1, 'increment')
@pytest.mark.parametrize('inc_or_dec', ['increment', 'decrement'])
@pytest.mark.parametrize('value', [
'{}foo', 'foo{}', 'foo{}bar', '42foo{}'
])
@pytest.mark.parametrize('url', [
'http://example.com:80/v1/path/{}/test',
'http://example.com:80/v1/query_test?value={}',
'http://example.com:80/v1/anchor_test#{}',
'http://host_{}_test.com:80',
'http://m4ny.c0m:80/number5/3very?where=yes#{}',
])
@pytest.mark.parametrize('count', [1, 5, 100])
def test_count(self, inc_or_dec, value, url, count, config_stub):
config_stub.val.url.incdec_segments = ['host', 'path', 'query',
'anchor']
base_value = value.format(20)
if inc_or_dec == 'increment':
expected_value = value.format(20 + count)
else:
if count > 20:
return
expected_value = value.format(20 - count)
base_url = QUrl(url.format(base_value))
expected_url = QUrl(url.format(expected_value))
new_url = navigate.incdec(base_url, count, inc_or_dec)
assert new_url == expected_url
@pytest.mark.parametrize('number, expected, inc_or_dec', [
('01', '02', 'increment'),
('09', '10', 'increment'),
('009', '010', 'increment'),
('02', '01', 'decrement'),
('10', '9', 'decrement'),
('010', '009', 'decrement')
])
def test_leading_zeroes(self, number, expected, inc_or_dec, config_stub):
config_stub.val.url.incdec_segments = ['path']
url = 'http://example.com/{}'
base_url = QUrl(url.format(number))
expected_url = QUrl(url.format(expected))
new_url = navigate.incdec(base_url, 1, inc_or_dec)
assert new_url == expected_url
@pytest.mark.parametrize('url, segments, expected', [
('http://ex4mple.com/test_4?page=3#anchor2', ['host'],
'http://ex5mple.com/test_4?page=3#anchor2'),
('http://ex4mple.com/test_4?page=3#anchor2', ['host', 'path'],
'http://ex4mple.com/test_5?page=3#anchor2'),
('http://ex4mple.com/test_4?page=3#anchor5', ['host', 'path', 'query'],
'http://ex4mple.com/test_4?page=4#anchor5'),
])
def test_segment_ignored(self, url, segments, expected, config_stub):
config_stub.val.url.incdec_segments = segments
new_url = navigate.incdec(QUrl(url), 1, 'increment')
assert new_url == QUrl(expected)
@pytest.mark.parametrize('url', [
"http://example.com/long/path/but/no/number",
"http://ex4mple.com/number/in/hostname",
"http://example.com:42/number/in/port",
"http://www2.example.com/number/in/subdomain",
"http://example.com/%C3%B6/urlencoded/data",
"http://example.com/number/in/anchor#5",
"http://www2.ex4mple.com:42/all/of/the/%C3%A4bove#5",
"http://localhost/url_encoded_in_query/?v=%3A",
"http://localhost/url_encoded_in_anchor/#%3A",
])
def test_no_number(self, url):
with pytest.raises(navigate.Error):
navigate.incdec(QUrl(url), 1, "increment")
@pytest.mark.parametrize('url, count', [
('http://example.com/page_0.html', 1),
('http://example.com/page_1.html', 2),
])
def test_number_below_0(self, url, count):
with pytest.raises(navigate.Error):
navigate.incdec(QUrl(url), count, 'decrement')
def test_invalid_url(self):
with pytest.raises(urlutils.InvalidUrlError):
navigate.incdec(QUrl(), 1, "increment")
def test_wrong_mode(self):
"""Test if incdec rejects a wrong parameter for inc_or_dec."""
valid_url = QUrl("http://example.com/0")
with pytest.raises(ValueError):
navigate.incdec(valid_url, 1, "foobar")
class TestUp:
@pytest.mark.parametrize('url_suffix, count, expected_suffix', [
('/one/two/three', 1, '/one/two'),
('/one/two/three?foo=bar', 1, '/one/two'),
('/one/two/three', 2, '/one'),
])
def test_up(self, url_suffix, count, expected_suffix):
url_base = 'https://example.com'
url = QUrl(url_base + url_suffix)
assert url.isValid()
new = navigate.path_up(url, count)
assert new == QUrl(url_base + expected_suffix)
def test_invalid_url(self):
with pytest.raises(urlutils.InvalidUrlError):
navigate.path_up(QUrl(), count=1)
class TestStrip:
@pytest.mark.parametrize('url_suffix', [
'?foo=bar',
'#label',
'?foo=bar#label',
])
def test_strip(self, url_suffix):
url_base = 'https://example.com/test'
url = QUrl(url_base + url_suffix)
assert url.isValid()
stripped = navigate.strip(url, count=1)
assert stripped.isValid()
assert stripped == QUrl(url_base)
def test_count(self):
with pytest.raises(navigate.Error, match='Count is not supported'):
navigate.strip(QUrl('https://example.com/'), count=2)
def test_invalid_url(self):
with pytest.raises(urlutils.InvalidUrlError):
navigate.strip(QUrl(), count=1)
|
from matplotlib.font_manager import FontProperties
import matplotlib.pyplot as plt
from chainercv.experimental.links import YOLOv2Tiny
from chainercv.links import FasterRCNNVGG16
from chainercv.links import SSD300
from chainercv.links import SSD512
from chainercv.links import YOLOv2
from chainercv.links import YOLOv3
from chainercv.datasets import voc_bbox_label_names
from chainercv.datasets import VOCBboxDataset
from chainercv.visualizations import vis_bbox
def main():
dataset = VOCBboxDataset(year='2007', split='test') \
.slice[[29, 301, 189, 229], 'img']
models = [
('Faster R-CNN', FasterRCNNVGG16(pretrained_model='voc07')),
('SSD300', SSD300(pretrained_model='voc0712')),
('SSD512', SSD512(pretrained_model='voc0712')),
('YOLOv2', YOLOv2(pretrained_model='voc0712')),
('YOLOv2 tiny', YOLOv2Tiny(pretrained_model='voc0712')),
('YOLOv3', YOLOv3(pretrained_model='voc0712')),
]
fig = plt.figure(figsize=(30, 20))
for i, img in enumerate(dataset):
for j, (name, model) in enumerate(models):
bboxes, labels, scores = model.predict([img])
bbox, label, score = bboxes[0], labels[0], scores[0]
ax = fig.add_subplot(
len(dataset), len(models), i * len(models) + j + 1)
vis_bbox(
img, bbox, label, score,
label_names=voc_bbox_label_names, ax=ax
)
# Set MatplotLib parameters
ax.set_aspect('equal')
if i == 0:
font = FontProperties()
font.set_family('serif')
font.set_size(35)
ax.set_title(name, y=1.03, fontproperties=font)
plt.axis('off')
plt.tight_layout()
plt.show()
if __name__ == '__main__':
main()
|
import sys as _sys
import io
import cherrypy as _cherrypy
from cherrypy._cpcompat import ntou
from cherrypy import _cperror
from cherrypy.lib import httputil
from cherrypy.lib import is_closable_iterator
def downgrade_wsgi_ux_to_1x(environ):
"""Return a new environ dict for WSGI 1.x from the given WSGI u.x environ.
"""
env1x = {}
url_encoding = environ[ntou('wsgi.url_encoding')]
for k, v in environ.copy().items():
if k in [ntou('PATH_INFO'), ntou('SCRIPT_NAME'), ntou('QUERY_STRING')]:
v = v.encode(url_encoding)
elif isinstance(v, str):
v = v.encode('ISO-8859-1')
env1x[k.encode('ISO-8859-1')] = v
return env1x
class VirtualHost(object):
"""Select a different WSGI application based on the Host header.
This can be useful when running multiple sites within one CP server.
It allows several domains to point to different applications. For example::
root = Root()
RootApp = cherrypy.Application(root)
Domain2App = cherrypy.Application(root)
SecureApp = cherrypy.Application(Secure())
vhost = cherrypy._cpwsgi.VirtualHost(
RootApp,
domains={
'www.domain2.example': Domain2App,
'www.domain2.example:443': SecureApp,
},
)
cherrypy.tree.graft(vhost)
"""
default = None
"""Required. The default WSGI application."""
use_x_forwarded_host = True
"""If True (the default), any "X-Forwarded-Host"
request header will be used instead of the "Host" header. This
is commonly added by HTTP servers (such as Apache) when proxying."""
domains = {}
"""A dict of {host header value: application} pairs.
The incoming "Host" request header is looked up in this dict,
and, if a match is found, the corresponding WSGI application
will be called instead of the default. Note that you often need
separate entries for "example.com" and "www.example.com".
In addition, "Host" headers may contain the port number.
"""
def __init__(self, default, domains=None, use_x_forwarded_host=True):
self.default = default
self.domains = domains or {}
self.use_x_forwarded_host = use_x_forwarded_host
def __call__(self, environ, start_response):
domain = environ.get('HTTP_HOST', '')
if self.use_x_forwarded_host:
domain = environ.get('HTTP_X_FORWARDED_HOST', domain)
nextapp = self.domains.get(domain)
if nextapp is None:
nextapp = self.default
return nextapp(environ, start_response)
class InternalRedirector(object):
"""WSGI middleware that handles raised cherrypy.InternalRedirect."""
def __init__(self, nextapp, recursive=False):
self.nextapp = nextapp
self.recursive = recursive
def __call__(self, environ, start_response):
redirections = []
while True:
environ = environ.copy()
try:
return self.nextapp(environ, start_response)
except _cherrypy.InternalRedirect:
ir = _sys.exc_info()[1]
sn = environ.get('SCRIPT_NAME', '')
path = environ.get('PATH_INFO', '')
qs = environ.get('QUERY_STRING', '')
# Add the *previous* path_info + qs to redirections.
old_uri = sn + path
if qs:
old_uri += '?' + qs
redirections.append(old_uri)
if not self.recursive:
# Check to see if the new URI has been redirected to
# already
new_uri = sn + ir.path
if ir.query_string:
new_uri += '?' + ir.query_string
if new_uri in redirections:
ir.request.close()
tmpl = (
'InternalRedirector visited the same URL twice: %r'
)
raise RuntimeError(tmpl % new_uri)
# Munge the environment and try again.
environ['REQUEST_METHOD'] = 'GET'
environ['PATH_INFO'] = ir.path
environ['QUERY_STRING'] = ir.query_string
environ['wsgi.input'] = io.BytesIO()
environ['CONTENT_LENGTH'] = '0'
environ['cherrypy.previous_request'] = ir.request
class ExceptionTrapper(object):
"""WSGI middleware that traps exceptions."""
def __init__(self, nextapp, throws=(KeyboardInterrupt, SystemExit)):
self.nextapp = nextapp
self.throws = throws
def __call__(self, environ, start_response):
return _TrappedResponse(
self.nextapp,
environ,
start_response,
self.throws
)
class _TrappedResponse(object):
response = iter([])
def __init__(self, nextapp, environ, start_response, throws):
self.nextapp = nextapp
self.environ = environ
self.start_response = start_response
self.throws = throws
self.started_response = False
self.response = self.trap(
self.nextapp, self.environ, self.start_response,
)
self.iter_response = iter(self.response)
def __iter__(self):
self.started_response = True
return self
def __next__(self):
return self.trap(next, self.iter_response)
def close(self):
if hasattr(self.response, 'close'):
self.response.close()
def trap(self, func, *args, **kwargs):
try:
return func(*args, **kwargs)
except self.throws:
raise
except StopIteration:
raise
except Exception:
tb = _cperror.format_exc()
_cherrypy.log(tb, severity=40)
if not _cherrypy.request.show_tracebacks:
tb = ''
s, h, b = _cperror.bare_error(tb)
if True:
# What fun.
s = s.decode('ISO-8859-1')
h = [
(k.decode('ISO-8859-1'), v.decode('ISO-8859-1'))
for k, v in h
]
if self.started_response:
# Empty our iterable (so future calls raise StopIteration)
self.iter_response = iter([])
else:
self.iter_response = iter(b)
try:
self.start_response(s, h, _sys.exc_info())
except Exception:
# "The application must not trap any exceptions raised by
# start_response, if it called start_response with exc_info.
# Instead, it should allow such exceptions to propagate
# back to the server or gateway."
# But we still log and call close() to clean up ourselves.
_cherrypy.log(traceback=True, severity=40)
raise
if self.started_response:
return b''.join(b)
else:
return b
# WSGI-to-CP Adapter #
class AppResponse(object):
"""WSGI response iterable for CherryPy applications."""
def __init__(self, environ, start_response, cpapp):
self.cpapp = cpapp
try:
self.environ = environ
self.run()
r = _cherrypy.serving.response
outstatus = r.output_status
if not isinstance(outstatus, bytes):
raise TypeError('response.output_status is not a byte string.')
outheaders = []
for k, v in r.header_list:
if not isinstance(k, bytes):
tmpl = 'response.header_list key %r is not a byte string.'
raise TypeError(tmpl % k)
if not isinstance(v, bytes):
tmpl = (
'response.header_list value %r is not a byte string.'
)
raise TypeError(tmpl % v)
outheaders.append((k, v))
if True:
# According to PEP 3333, when using Python 3, the response
# status and headers must be bytes masquerading as unicode;
# that is, they must be of type "str" but are restricted to
# code points in the "latin-1" set.
outstatus = outstatus.decode('ISO-8859-1')
outheaders = [
(k.decode('ISO-8859-1'), v.decode('ISO-8859-1'))
for k, v in outheaders
]
self.iter_response = iter(r.body)
self.write = start_response(outstatus, outheaders)
except BaseException:
self.close()
raise
def __iter__(self):
return self
def __next__(self):
return next(self.iter_response)
def close(self):
"""Close and de-reference the current request and response. (Core)"""
streaming = _cherrypy.serving.response.stream
self.cpapp.release_serving()
# We avoid the expense of examining the iterator to see if it's
# closable unless we are streaming the response, as that's the
# only situation where we are going to have an iterator which
# may not have been exhausted yet.
if streaming and is_closable_iterator(self.iter_response):
iter_close = self.iter_response.close
try:
iter_close()
except Exception:
_cherrypy.log(traceback=True, severity=40)
def run(self):
"""Create a Request object using environ."""
env = self.environ.get
local = httputil.Host(
'',
int(env('SERVER_PORT', 80) or -1),
env('SERVER_NAME', ''),
)
remote = httputil.Host(
env('REMOTE_ADDR', ''),
int(env('REMOTE_PORT', -1) or -1),
env('REMOTE_HOST', ''),
)
scheme = env('wsgi.url_scheme')
sproto = env('ACTUAL_SERVER_PROTOCOL', 'HTTP/1.1')
request, resp = self.cpapp.get_serving(local, remote, scheme, sproto)
# LOGON_USER is served by IIS, and is the name of the
# user after having been mapped to a local account.
# Both IIS and Apache set REMOTE_USER, when possible.
request.login = env('LOGON_USER') or env('REMOTE_USER') or None
request.multithread = self.environ['wsgi.multithread']
request.multiprocess = self.environ['wsgi.multiprocess']
request.wsgi_environ = self.environ
request.prev = env('cherrypy.previous_request', None)
meth = self.environ['REQUEST_METHOD']
path = httputil.urljoin(
self.environ.get('SCRIPT_NAME', ''),
self.environ.get('PATH_INFO', ''),
)
qs = self.environ.get('QUERY_STRING', '')
path, qs = self.recode_path_qs(path, qs) or (path, qs)
rproto = self.environ.get('SERVER_PROTOCOL')
headers = self.translate_headers(self.environ)
rfile = self.environ['wsgi.input']
request.run(meth, path, qs, rproto, headers, rfile)
headerNames = {
'HTTP_CGI_AUTHORIZATION': 'Authorization',
'CONTENT_LENGTH': 'Content-Length',
'CONTENT_TYPE': 'Content-Type',
'REMOTE_HOST': 'Remote-Host',
'REMOTE_ADDR': 'Remote-Addr',
}
def recode_path_qs(self, path, qs):
# This isn't perfect; if the given PATH_INFO is in the
# wrong encoding, it may fail to match the appropriate config
# section URI. But meh.
old_enc = self.environ.get('wsgi.url_encoding', 'ISO-8859-1')
new_enc = self.cpapp.find_config(
self.environ.get('PATH_INFO', ''),
'request.uri_encoding', 'utf-8',
)
if new_enc.lower() == old_enc.lower():
return
# Even though the path and qs are unicode, the WSGI server
# is required by PEP 3333 to coerce them to ISO-8859-1
# masquerading as unicode. So we have to encode back to
# bytes and then decode again using the "correct" encoding.
try:
return (
path.encode(old_enc).decode(new_enc),
qs.encode(old_enc).decode(new_enc),
)
except (UnicodeEncodeError, UnicodeDecodeError):
# Just pass them through without transcoding and hope.
pass
def translate_headers(self, environ):
"""Translate CGI-environ header names to HTTP header names."""
for cgiName in environ:
# We assume all incoming header keys are uppercase already.
if cgiName in self.headerNames:
yield self.headerNames[cgiName], environ[cgiName]
elif cgiName[:5] == 'HTTP_':
# Hackish attempt at recovering original header names.
translatedHeader = cgiName[5:].replace('_', '-')
yield translatedHeader, environ[cgiName]
class CPWSGIApp(object):
"""A WSGI application object for a CherryPy Application."""
pipeline = [
('ExceptionTrapper', ExceptionTrapper),
('InternalRedirector', InternalRedirector),
]
"""A list of (name, wsgiapp) pairs. Each 'wsgiapp' MUST be a
constructor that takes an initial, positional 'nextapp' argument,
plus optional keyword arguments, and returns a WSGI application
(that takes environ and start_response arguments). The 'name' can
be any you choose, and will correspond to keys in self.config."""
head = None
"""Rather than nest all apps in the pipeline on each call, it's only
done the first time, and the result is memoized into self.head. Set
this to None again if you change self.pipeline after calling self."""
config = {}
"""A dict whose keys match names listed in the pipeline. Each
value is a further dict which will be passed to the corresponding
named WSGI callable (from the pipeline) as keyword arguments."""
response_class = AppResponse
"""The class to instantiate and return as the next app in the WSGI chain.
"""
def __init__(self, cpapp, pipeline=None):
self.cpapp = cpapp
self.pipeline = self.pipeline[:]
if pipeline:
self.pipeline.extend(pipeline)
self.config = self.config.copy()
def tail(self, environ, start_response):
"""WSGI application callable for the actual CherryPy application.
You probably shouldn't call this; call self.__call__ instead,
so that any WSGI middleware in self.pipeline can run first.
"""
return self.response_class(environ, start_response, self.cpapp)
def __call__(self, environ, start_response):
head = self.head
if head is None:
# Create and nest the WSGI apps in our pipeline (in reverse order).
# Then memoize the result in self.head.
head = self.tail
for name, callable in self.pipeline[::-1]:
conf = self.config.get(name, {})
head = callable(head, **conf)
self.head = head
return head(environ, start_response)
def namespace_handler(self, k, v):
"""Config handler for the 'wsgi' namespace."""
if k == 'pipeline':
# Note this allows multiple 'wsgi.pipeline' config entries
# (but each entry will be processed in a 'random' order).
# It should also allow developers to set default middleware
# in code (passed to self.__init__) that deployers can add to
# (but not remove) via config.
self.pipeline.extend(v)
elif k == 'response_class':
self.response_class = v
else:
name, arg = k.split('.', 1)
bucket = self.config.setdefault(name, {})
bucket[arg] = v
|
from datetime import timedelta
from homeassistant import config_entries, data_entry_flow
from homeassistant.components import ssdp
from homeassistant.components.upnp.const import (
CONFIG_ENTRY_SCAN_INTERVAL,
CONFIG_ENTRY_ST,
CONFIG_ENTRY_UDN,
DEFAULT_SCAN_INTERVAL,
DISCOVERY_LOCATION,
DISCOVERY_ST,
DISCOVERY_UDN,
DISCOVERY_USN,
DOMAIN,
)
from homeassistant.components.upnp.device import Device
from homeassistant.helpers.typing import HomeAssistantType
from homeassistant.setup import async_setup_component
from .mock_device import MockDevice
from tests.async_mock import AsyncMock, patch
from tests.common import MockConfigEntry
async def test_flow_ssdp_discovery(hass: HomeAssistantType):
"""Test config flow: discovered + configured through ssdp."""
udn = "uuid:device_1"
mock_device = MockDevice(udn)
discovery_infos = [
{
DISCOVERY_ST: mock_device.device_type,
DISCOVERY_UDN: mock_device.udn,
DISCOVERY_LOCATION: "dummy",
}
]
with patch.object(
Device, "async_create_device", AsyncMock(return_value=mock_device)
), patch.object(Device, "async_discover", AsyncMock(return_value=discovery_infos)):
# Discovered via step ssdp.
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_SSDP},
data={
ssdp.ATTR_SSDP_ST: mock_device.device_type,
ssdp.ATTR_UPNP_UDN: mock_device.udn,
"friendlyName": mock_device.name,
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "ssdp_confirm"
# Confirm via step ssdp_confirm.
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == mock_device.name
assert result["data"] == {
CONFIG_ENTRY_ST: mock_device.device_type,
CONFIG_ENTRY_UDN: mock_device.udn,
}
async def test_flow_ssdp_discovery_incomplete(hass: HomeAssistantType):
"""Test config flow: incomplete discovery through ssdp."""
udn = "uuid:device_1"
mock_device = MockDevice(udn)
discovery_infos = [
{
DISCOVERY_ST: mock_device.device_type,
DISCOVERY_UDN: mock_device.udn,
DISCOVERY_LOCATION: "dummy",
}
]
with patch.object(
Device, "async_create_device", AsyncMock(return_value=mock_device)
), patch.object(Device, "async_discover", AsyncMock(return_value=discovery_infos)):
# Discovered via step ssdp.
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_SSDP},
data={
ssdp.ATTR_SSDP_ST: mock_device.device_type,
# ssdp.ATTR_UPNP_UDN: mock_device.udn, # Not provided.
"friendlyName": mock_device.name,
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "incomplete_discovery"
async def test_flow_user(hass: HomeAssistantType):
"""Test config flow: discovered + configured through user."""
udn = "uuid:device_1"
mock_device = MockDevice(udn)
discovery_infos = [
{
DISCOVERY_USN: mock_device.unique_id,
DISCOVERY_ST: mock_device.device_type,
DISCOVERY_UDN: mock_device.udn,
DISCOVERY_LOCATION: "dummy",
}
]
with patch.object(
Device, "async_create_device", AsyncMock(return_value=mock_device)
), patch.object(Device, "async_discover", AsyncMock(return_value=discovery_infos)):
# Discovered via step user.
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
# Confirmed via step user.
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={"usn": mock_device.unique_id},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == mock_device.name
assert result["data"] == {
CONFIG_ENTRY_ST: mock_device.device_type,
CONFIG_ENTRY_UDN: mock_device.udn,
}
async def test_flow_import(hass: HomeAssistantType):
"""Test config flow: discovered + configured through configuration.yaml."""
udn = "uuid:device_1"
mock_device = MockDevice(udn)
discovery_infos = [
{
DISCOVERY_USN: mock_device.unique_id,
DISCOVERY_ST: mock_device.device_type,
DISCOVERY_UDN: mock_device.udn,
DISCOVERY_LOCATION: "dummy",
}
]
with patch.object(
Device, "async_create_device", AsyncMock(return_value=mock_device)
), patch.object(Device, "async_discover", AsyncMock(return_value=discovery_infos)):
# Discovered via step import.
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_IMPORT}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == mock_device.name
assert result["data"] == {
CONFIG_ENTRY_ST: mock_device.device_type,
CONFIG_ENTRY_UDN: mock_device.udn,
}
async def test_flow_import_duplicate(hass: HomeAssistantType):
"""Test config flow: discovered, but already configured."""
udn = "uuid:device_1"
mock_device = MockDevice(udn)
discovery_infos = [
{
DISCOVERY_USN: mock_device.unique_id,
DISCOVERY_ST: mock_device.device_type,
DISCOVERY_UDN: mock_device.udn,
DISCOVERY_LOCATION: "dummy",
}
]
# Existing entry.
config_entry = MockConfigEntry(
domain=DOMAIN,
data={
CONFIG_ENTRY_UDN: mock_device.udn,
CONFIG_ENTRY_ST: mock_device.device_type,
},
options={CONFIG_ENTRY_SCAN_INTERVAL: DEFAULT_SCAN_INTERVAL},
)
config_entry.add_to_hass(hass)
with patch.object(
Device, "async_create_device", AsyncMock(return_value=mock_device)
), patch.object(Device, "async_discover", AsyncMock(return_value=discovery_infos)):
# Discovered via step import.
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_IMPORT}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
async def test_flow_import_incomplete(hass: HomeAssistantType):
"""Test config flow: incomplete discovery, configured through configuration.yaml."""
udn = "uuid:device_1"
mock_device = MockDevice(udn)
discovery_infos = [
{
DISCOVERY_ST: mock_device.device_type,
DISCOVERY_UDN: mock_device.udn,
DISCOVERY_LOCATION: "dummy",
}
]
with patch.object(
Device, "async_create_device", AsyncMock(return_value=mock_device)
), patch.object(Device, "async_discover", AsyncMock(return_value=discovery_infos)):
# Discovered via step import.
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_IMPORT}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "incomplete_discovery"
async def test_options_flow(hass: HomeAssistantType):
"""Test options flow."""
# Set up config entry.
udn = "uuid:device_1"
mock_device = MockDevice(udn)
discovery_infos = [
{
DISCOVERY_UDN: mock_device.udn,
DISCOVERY_ST: mock_device.device_type,
DISCOVERY_LOCATION: "http://192.168.1.1/desc.xml",
}
]
config_entry = MockConfigEntry(
domain=DOMAIN,
data={
CONFIG_ENTRY_UDN: mock_device.udn,
CONFIG_ENTRY_ST: mock_device.device_type,
},
options={CONFIG_ENTRY_SCAN_INTERVAL: DEFAULT_SCAN_INTERVAL},
)
config_entry.add_to_hass(hass)
config = {
# no upnp, ensures no import-flow is started.
}
async_discover = AsyncMock(return_value=discovery_infos)
with patch.object(
Device, "async_create_device", AsyncMock(return_value=mock_device)
), patch.object(Device, "async_discover", async_discover):
# Initialisation of component.
await async_setup_component(hass, "upnp", config)
await hass.async_block_till_done()
# DataUpdateCoordinator gets a default of 30 seconds for updates.
coordinator = hass.data[DOMAIN]["coordinators"][mock_device.udn]
assert coordinator.update_interval == timedelta(seconds=DEFAULT_SCAN_INTERVAL)
# Options flow with no input results in form.
result = await hass.config_entries.options.async_init(
config_entry.entry_id,
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
# Options flow with input results in update to entry.
result2 = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={CONFIG_ENTRY_SCAN_INTERVAL: 60},
)
assert result2["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert config_entry.options == {
CONFIG_ENTRY_SCAN_INTERVAL: 60,
}
# Also updates DataUpdateCoordinator.
assert coordinator.update_interval == timedelta(seconds=60)
|
import io
import os
import google.cloud.storage
from six.moves.urllib import parse as urlparse
import smart_open
_GCS_URL = os.environ.get('SO_GCS_URL')
assert _GCS_URL is not None, 'please set the SO_GCS_URL environment variable'
def initialize_bucket():
client = google.cloud.storage.Client()
parsed = urlparse.urlparse(_GCS_URL)
bucket_name = parsed.netloc
prefix = parsed.path
bucket = client.get_bucket(bucket_name)
blobs = bucket.list_blobs(prefix=prefix)
for blob in blobs:
blob.delete()
def write_read(key, content, write_mode, read_mode, **kwargs):
with smart_open.open(key, write_mode, **kwargs) as fout:
fout.write(content)
with smart_open.open(key, read_mode, **kwargs) as fin:
return fin.read()
def read_length_prefixed_messages(key, read_mode, **kwargs):
result = io.BytesIO()
with smart_open.open(key, read_mode, **kwargs) as fin:
length_byte = fin.read(1)
while len(length_byte):
result.write(length_byte)
msg = fin.read(ord(length_byte))
result.write(msg)
length_byte = fin.read(1)
return result.getvalue()
def test_gcs_readwrite_text(benchmark):
initialize_bucket()
key = _GCS_URL + '/sanity.txt'
text = 'с гранатою в кармане, с чекою в руке'
actual = benchmark(write_read, key, text, 'w', 'r', encoding='utf-8')
assert actual == text
def test_gcs_readwrite_text_gzip(benchmark):
initialize_bucket()
key = _GCS_URL + '/sanity.txt.gz'
text = 'не чайки здесь запели на знакомом языке'
actual = benchmark(write_read, key, text, 'w', 'r', encoding='utf-8')
assert actual == text
def test_gcs_readwrite_binary(benchmark):
initialize_bucket()
key = _GCS_URL + '/sanity.txt'
binary = b'this is a test'
actual = benchmark(write_read, key, binary, 'wb', 'rb')
assert actual == binary
def test_gcs_readwrite_binary_gzip(benchmark):
initialize_bucket()
key = _GCS_URL + '/sanity.txt.gz'
binary = b'this is a test'
actual = benchmark(write_read, key, binary, 'wb', 'rb')
assert actual == binary
def test_gcs_performance(benchmark):
initialize_bucket()
one_megabyte = io.BytesIO()
for _ in range(1024*128):
one_megabyte.write(b'01234567')
one_megabyte = one_megabyte.getvalue()
key = _GCS_URL + '/performance.txt'
actual = benchmark(write_read, key, one_megabyte, 'wb', 'rb')
assert actual == one_megabyte
def test_gcs_performance_gz(benchmark):
initialize_bucket()
one_megabyte = io.BytesIO()
for _ in range(1024*128):
one_megabyte.write(b'01234567')
one_megabyte = one_megabyte.getvalue()
key = _GCS_URL + '/performance.txt.gz'
actual = benchmark(write_read, key, one_megabyte, 'wb', 'rb')
assert actual == one_megabyte
def test_gcs_performance_small_reads(benchmark):
initialize_bucket()
ONE_MIB = 1024**2
one_megabyte_of_msgs = io.BytesIO()
msg = b'\x0f' + b'0123456789abcde' # a length-prefixed "message"
for _ in range(0, ONE_MIB, len(msg)):
one_megabyte_of_msgs.write(msg)
one_megabyte_of_msgs = one_megabyte_of_msgs.getvalue()
key = _GCS_URL + '/many_reads_performance.bin'
with smart_open.open(key, 'wb') as fout:
fout.write(one_megabyte_of_msgs)
actual = benchmark(read_length_prefixed_messages, key, 'rb', buffering=ONE_MIB)
assert actual == one_megabyte_of_msgs
|
import logging
import requests
from homeassistant.components.device_tracker import DOMAIN
import homeassistant.components.xiaomi.device_tracker as xiaomi
from homeassistant.components.xiaomi.device_tracker import get_scanner
from homeassistant.const import CONF_HOST, CONF_PASSWORD, CONF_PLATFORM, CONF_USERNAME
from tests.async_mock import MagicMock, call, patch
_LOGGER = logging.getLogger(__name__)
INVALID_USERNAME = "bob"
TOKEN_TIMEOUT_USERNAME = "tok"
URL_AUTHORIZE = "http://192.168.0.1/cgi-bin/luci/api/xqsystem/login"
URL_LIST_END = "api/misystem/devicelist"
FIRST_CALL = True
def mocked_requests(*args, **kwargs):
"""Mock requests.get invocations."""
class MockResponse:
"""Class to represent a mocked response."""
def __init__(self, json_data, status_code):
"""Initialize the mock response class."""
self.json_data = json_data
self.status_code = status_code
def json(self):
"""Return the json of the response."""
return self.json_data
@property
def content(self):
"""Return the content of the response."""
return self.json()
def raise_for_status(self):
"""Raise an HTTPError if status is not 200."""
if self.status_code != 200:
raise requests.HTTPError(self.status_code)
data = kwargs.get("data")
global FIRST_CALL
if data and data.get("username", None) == INVALID_USERNAME:
# deliver an invalid token
return MockResponse({"code": "401", "msg": "Invalid token"}, 200)
if data and data.get("username", None) == TOKEN_TIMEOUT_USERNAME:
# deliver an expired token
return MockResponse(
{
"url": "/cgi-bin/luci/;stok=ef5860/web/home",
"token": "timedOut",
"code": "0",
},
200,
)
if str(args[0]).startswith(URL_AUTHORIZE):
# deliver an authorized token
return MockResponse(
{
"url": "/cgi-bin/luci/;stok=ef5860/web/home",
"token": "ef5860",
"code": "0",
},
200,
)
if str(args[0]).endswith(f"timedOut/{URL_LIST_END}") and FIRST_CALL is True:
FIRST_CALL = False
# deliver an error when called with expired token
return MockResponse({"code": "401", "msg": "Invalid token"}, 200)
if str(args[0]).endswith(URL_LIST_END):
# deliver the device list
return MockResponse(
{
"mac": "1C:98:EC:0E:D5:A4",
"list": [
{
"mac": "23:83:BF:F6:38:A0",
"oname": "12255ff",
"isap": 0,
"parent": "",
"authority": {"wan": 1, "pridisk": 0, "admin": 1, "lan": 0},
"push": 0,
"online": 1,
"name": "Device1",
"times": 0,
"ip": [
{
"downspeed": "0",
"online": "496957",
"active": 1,
"upspeed": "0",
"ip": "192.168.0.25",
}
],
"statistics": {
"downspeed": "0",
"online": "496957",
"upspeed": "0",
},
"icon": "",
"type": 1,
},
{
"mac": "1D:98:EC:5E:D5:A6",
"oname": "CdddFG58",
"isap": 0,
"parent": "",
"authority": {"wan": 1, "pridisk": 0, "admin": 1, "lan": 0},
"push": 0,
"online": 1,
"name": "Device2",
"times": 0,
"ip": [
{
"downspeed": "0",
"online": "347325",
"active": 1,
"upspeed": "0",
"ip": "192.168.0.3",
}
],
"statistics": {
"downspeed": "0",
"online": "347325",
"upspeed": "0",
},
"icon": "",
"type": 0,
},
],
"code": 0,
},
200,
)
_LOGGER.debug("UNKNOWN ROUTE")
@patch(
"homeassistant.components.xiaomi.device_tracker.XiaomiDeviceScanner",
return_value=MagicMock(),
)
async def test_config(xiaomi_mock, hass):
"""Testing minimal configuration."""
config = {
DOMAIN: xiaomi.PLATFORM_SCHEMA(
{
CONF_PLATFORM: xiaomi.DOMAIN,
CONF_HOST: "192.168.0.1",
CONF_PASSWORD: "passwordTest",
}
)
}
xiaomi.get_scanner(hass, config)
assert xiaomi_mock.call_count == 1
assert xiaomi_mock.call_args == call(config[DOMAIN])
call_arg = xiaomi_mock.call_args[0][0]
assert call_arg["username"] == "admin"
assert call_arg["password"] == "passwordTest"
assert call_arg["host"] == "192.168.0.1"
assert call_arg["platform"] == "device_tracker"
@patch(
"homeassistant.components.xiaomi.device_tracker.XiaomiDeviceScanner",
return_value=MagicMock(),
)
async def test_config_full(xiaomi_mock, hass):
"""Testing full configuration."""
config = {
DOMAIN: xiaomi.PLATFORM_SCHEMA(
{
CONF_PLATFORM: xiaomi.DOMAIN,
CONF_HOST: "192.168.0.1",
CONF_USERNAME: "alternativeAdminName",
CONF_PASSWORD: "passwordTest",
}
)
}
xiaomi.get_scanner(hass, config)
assert xiaomi_mock.call_count == 1
assert xiaomi_mock.call_args == call(config[DOMAIN])
call_arg = xiaomi_mock.call_args[0][0]
assert call_arg["username"] == "alternativeAdminName"
assert call_arg["password"] == "passwordTest"
assert call_arg["host"] == "192.168.0.1"
assert call_arg["platform"] == "device_tracker"
@patch("requests.get", side_effect=mocked_requests)
@patch("requests.post", side_effect=mocked_requests)
async def test_invalid_credential(mock_get, mock_post, hass):
"""Testing invalid credential handling."""
config = {
DOMAIN: xiaomi.PLATFORM_SCHEMA(
{
CONF_PLATFORM: xiaomi.DOMAIN,
CONF_HOST: "192.168.0.1",
CONF_USERNAME: INVALID_USERNAME,
CONF_PASSWORD: "passwordTest",
}
)
}
assert get_scanner(hass, config) is None
@patch("requests.get", side_effect=mocked_requests)
@patch("requests.post", side_effect=mocked_requests)
async def test_valid_credential(mock_get, mock_post, hass):
"""Testing valid refresh."""
config = {
DOMAIN: xiaomi.PLATFORM_SCHEMA(
{
CONF_PLATFORM: xiaomi.DOMAIN,
CONF_HOST: "192.168.0.1",
CONF_USERNAME: "admin",
CONF_PASSWORD: "passwordTest",
}
)
}
scanner = get_scanner(hass, config)
assert scanner is not None
assert 2 == len(scanner.scan_devices())
assert "Device1" == scanner.get_device_name("23:83:BF:F6:38:A0")
assert "Device2" == scanner.get_device_name("1D:98:EC:5E:D5:A6")
@patch("requests.get", side_effect=mocked_requests)
@patch("requests.post", side_effect=mocked_requests)
async def test_token_timed_out(mock_get, mock_post, hass):
"""Testing refresh with a timed out token.
New token is requested and list is downloaded a second time.
"""
config = {
DOMAIN: xiaomi.PLATFORM_SCHEMA(
{
CONF_PLATFORM: xiaomi.DOMAIN,
CONF_HOST: "192.168.0.1",
CONF_USERNAME: TOKEN_TIMEOUT_USERNAME,
CONF_PASSWORD: "passwordTest",
}
)
}
scanner = get_scanner(hass, config)
assert scanner is not None
assert 2 == len(scanner.scan_devices())
assert "Device1" == scanner.get_device_name("23:83:BF:F6:38:A0")
assert "Device2" == scanner.get_device_name("1D:98:EC:5E:D5:A6")
|
import pytest
from synology_dsm.exceptions import (
SynologyDSMException,
SynologyDSMLogin2SAFailedException,
SynologyDSMLogin2SARequiredException,
SynologyDSMLoginInvalidException,
SynologyDSMRequestException,
)
from homeassistant import data_entry_flow, setup
from homeassistant.components import ssdp
from homeassistant.components.synology_dsm.config_flow import CONF_OTP_CODE
from homeassistant.components.synology_dsm.const import (
CONF_VOLUMES,
DEFAULT_PORT,
DEFAULT_PORT_SSL,
DEFAULT_SCAN_INTERVAL,
DEFAULT_SSL,
DEFAULT_TIMEOUT,
DOMAIN,
)
from homeassistant.config_entries import SOURCE_IMPORT, SOURCE_SSDP, SOURCE_USER
from homeassistant.const import (
CONF_DISKS,
CONF_HOST,
CONF_MAC,
CONF_PASSWORD,
CONF_PORT,
CONF_SCAN_INTERVAL,
CONF_SSL,
CONF_TIMEOUT,
CONF_USERNAME,
)
from homeassistant.helpers.typing import HomeAssistantType
from tests.async_mock import MagicMock, Mock, patch
from tests.common import MockConfigEntry
HOST = "nas.meontheinternet.com"
SERIAL = "mySerial"
HOST_2 = "nas.worldwide.me"
SERIAL_2 = "mySerial2"
PORT = 1234
SSL = True
USERNAME = "Home_Assistant"
PASSWORD = "password"
DEVICE_TOKEN = "Dév!cè_T0k€ñ"
MACS = ["00-11-32-XX-XX-59", "00-11-32-XX-XX-5A"]
@pytest.fixture(name="service")
def mock_controller_service():
"""Mock a successful service."""
with patch(
"homeassistant.components.synology_dsm.config_flow.SynologyDSM"
) as service_mock:
service_mock.return_value.information.serial = SERIAL
service_mock.return_value.utilisation.cpu_user_load = 1
service_mock.return_value.storage.disks_ids = ["sda", "sdb", "sdc"]
service_mock.return_value.storage.volumes_ids = ["volume_1"]
service_mock.return_value.network.macs = MACS
yield service_mock
@pytest.fixture(name="service_2sa")
def mock_controller_service_2sa():
"""Mock a successful service with 2SA login."""
with patch(
"homeassistant.components.synology_dsm.config_flow.SynologyDSM"
) as service_mock:
service_mock.return_value.login = Mock(
side_effect=SynologyDSMLogin2SARequiredException(USERNAME)
)
service_mock.return_value.information.serial = SERIAL
service_mock.return_value.utilisation.cpu_user_load = 1
service_mock.return_value.storage.disks_ids = ["sda", "sdb", "sdc"]
service_mock.return_value.storage.volumes_ids = ["volume_1"]
service_mock.return_value.network.macs = MACS
yield service_mock
@pytest.fixture(name="service_vdsm")
def mock_controller_service_vdsm():
"""Mock a successful service."""
with patch(
"homeassistant.components.synology_dsm.config_flow.SynologyDSM"
) as service_mock:
service_mock.return_value.information.serial = SERIAL
service_mock.return_value.utilisation.cpu_user_load = 1
service_mock.return_value.storage.disks_ids = []
service_mock.return_value.storage.volumes_ids = ["volume_1"]
service_mock.return_value.network.macs = MACS
yield service_mock
@pytest.fixture(name="service_failed")
def mock_controller_service_failed():
"""Mock a failed service."""
with patch(
"homeassistant.components.synology_dsm.config_flow.SynologyDSM"
) as service_mock:
service_mock.return_value.information.serial = None
service_mock.return_value.utilisation.cpu_user_load = None
service_mock.return_value.storage.disks_ids = []
service_mock.return_value.storage.volumes_ids = []
service_mock.return_value.network.macs = []
yield service_mock
async def test_user(hass: HomeAssistantType, service: MagicMock):
"""Test user config."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=None
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
# test with all provided
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
data={
CONF_HOST: HOST,
CONF_PORT: PORT,
CONF_SSL: SSL,
CONF_USERNAME: USERNAME,
CONF_PASSWORD: PASSWORD,
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["result"].unique_id == SERIAL
assert result["title"] == HOST
assert result["data"][CONF_HOST] == HOST
assert result["data"][CONF_PORT] == PORT
assert result["data"][CONF_SSL] == SSL
assert result["data"][CONF_USERNAME] == USERNAME
assert result["data"][CONF_PASSWORD] == PASSWORD
assert result["data"][CONF_MAC] == MACS
assert result["data"].get("device_token") is None
assert result["data"].get(CONF_DISKS) is None
assert result["data"].get(CONF_VOLUMES) is None
service.return_value.information.serial = SERIAL_2
# test without port + False SSL
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
data={
CONF_HOST: HOST,
CONF_SSL: False,
CONF_USERNAME: USERNAME,
CONF_PASSWORD: PASSWORD,
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["result"].unique_id == SERIAL_2
assert result["title"] == HOST
assert result["data"][CONF_HOST] == HOST
assert result["data"][CONF_PORT] == DEFAULT_PORT
assert not result["data"][CONF_SSL]
assert result["data"][CONF_USERNAME] == USERNAME
assert result["data"][CONF_PASSWORD] == PASSWORD
assert result["data"][CONF_MAC] == MACS
assert result["data"].get("device_token") is None
assert result["data"].get(CONF_DISKS) is None
assert result["data"].get(CONF_VOLUMES) is None
async def test_user_2sa(hass: HomeAssistantType, service_2sa: MagicMock):
"""Test user with 2sa authentication config."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
data={CONF_HOST: HOST, CONF_USERNAME: USERNAME, CONF_PASSWORD: PASSWORD},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "2sa"
# Failed the first time because was too slow to enter the code
service_2sa.return_value.login = Mock(
side_effect=SynologyDSMLogin2SAFailedException
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {CONF_OTP_CODE: "000000"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "2sa"
assert result["errors"] == {CONF_OTP_CODE: "otp_failed"}
# Successful login with 2SA code
service_2sa.return_value.login = Mock(return_value=True)
service_2sa.return_value.device_token = DEVICE_TOKEN
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {CONF_OTP_CODE: "123456"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["result"].unique_id == SERIAL
assert result["title"] == HOST
assert result["data"][CONF_HOST] == HOST
assert result["data"][CONF_PORT] == DEFAULT_PORT_SSL
assert result["data"][CONF_SSL] == DEFAULT_SSL
assert result["data"][CONF_USERNAME] == USERNAME
assert result["data"][CONF_PASSWORD] == PASSWORD
assert result["data"][CONF_MAC] == MACS
assert result["data"].get("device_token") == DEVICE_TOKEN
assert result["data"].get(CONF_DISKS) is None
assert result["data"].get(CONF_VOLUMES) is None
async def test_user_vdsm(hass: HomeAssistantType, service_vdsm: MagicMock):
"""Test user config."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=None
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
# test with all provided
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
data={
CONF_HOST: HOST,
CONF_PORT: PORT,
CONF_SSL: SSL,
CONF_USERNAME: USERNAME,
CONF_PASSWORD: PASSWORD,
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["result"].unique_id == SERIAL
assert result["title"] == HOST
assert result["data"][CONF_HOST] == HOST
assert result["data"][CONF_PORT] == PORT
assert result["data"][CONF_SSL] == SSL
assert result["data"][CONF_USERNAME] == USERNAME
assert result["data"][CONF_PASSWORD] == PASSWORD
assert result["data"][CONF_MAC] == MACS
assert result["data"].get("device_token") is None
assert result["data"].get(CONF_DISKS) is None
assert result["data"].get(CONF_VOLUMES) is None
async def test_import(hass: HomeAssistantType, service: MagicMock):
"""Test import step."""
# import with minimum setup
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data={CONF_HOST: HOST, CONF_USERNAME: USERNAME, CONF_PASSWORD: PASSWORD},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["result"].unique_id == SERIAL
assert result["title"] == HOST
assert result["data"][CONF_HOST] == HOST
assert result["data"][CONF_PORT] == DEFAULT_PORT_SSL
assert result["data"][CONF_SSL] == DEFAULT_SSL
assert result["data"][CONF_USERNAME] == USERNAME
assert result["data"][CONF_PASSWORD] == PASSWORD
assert result["data"][CONF_MAC] == MACS
assert result["data"].get("device_token") is None
assert result["data"].get(CONF_DISKS) is None
assert result["data"].get(CONF_VOLUMES) is None
service.return_value.information.serial = SERIAL_2
# import with all
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data={
CONF_HOST: HOST_2,
CONF_PORT: PORT,
CONF_SSL: SSL,
CONF_USERNAME: USERNAME,
CONF_PASSWORD: PASSWORD,
CONF_DISKS: ["sda", "sdb", "sdc"],
CONF_VOLUMES: ["volume_1"],
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["result"].unique_id == SERIAL_2
assert result["title"] == HOST_2
assert result["data"][CONF_HOST] == HOST_2
assert result["data"][CONF_PORT] == PORT
assert result["data"][CONF_SSL] == SSL
assert result["data"][CONF_USERNAME] == USERNAME
assert result["data"][CONF_PASSWORD] == PASSWORD
assert result["data"][CONF_MAC] == MACS
assert result["data"].get("device_token") is None
assert result["data"][CONF_DISKS] == ["sda", "sdb", "sdc"]
assert result["data"][CONF_VOLUMES] == ["volume_1"]
async def test_abort_if_already_setup(hass: HomeAssistantType, service: MagicMock):
"""Test we abort if the account is already setup."""
MockConfigEntry(
domain=DOMAIN,
data={CONF_HOST: HOST, CONF_USERNAME: USERNAME, CONF_PASSWORD: PASSWORD},
unique_id=SERIAL,
).add_to_hass(hass)
# Should fail, same HOST:PORT (import)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data={CONF_HOST: HOST, CONF_USERNAME: USERNAME, CONF_PASSWORD: PASSWORD},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
# Should fail, same HOST:PORT (flow)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
data={CONF_HOST: HOST, CONF_USERNAME: USERNAME, CONF_PASSWORD: PASSWORD},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
async def test_login_failed(hass: HomeAssistantType, service: MagicMock):
"""Test when we have errors during login."""
service.return_value.login = Mock(
side_effect=(SynologyDSMLoginInvalidException(USERNAME))
)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
data={CONF_HOST: HOST, CONF_USERNAME: USERNAME, CONF_PASSWORD: PASSWORD},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {CONF_USERNAME: "invalid_auth"}
async def test_connection_failed(hass: HomeAssistantType, service: MagicMock):
"""Test when we have errors during connection."""
service.return_value.login = Mock(
side_effect=SynologyDSMRequestException(IOError("arg"))
)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
data={CONF_HOST: HOST, CONF_USERNAME: USERNAME, CONF_PASSWORD: PASSWORD},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {CONF_HOST: "cannot_connect"}
async def test_unknown_failed(hass: HomeAssistantType, service: MagicMock):
"""Test when we have an unknown error."""
service.return_value.login = Mock(side_effect=SynologyDSMException(None, None))
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
data={CONF_HOST: HOST, CONF_USERNAME: USERNAME, CONF_PASSWORD: PASSWORD},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {"base": "unknown"}
async def test_missing_data_after_login(
hass: HomeAssistantType, service_failed: MagicMock
):
"""Test when we have errors during connection."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
data={CONF_HOST: HOST, CONF_USERNAME: USERNAME, CONF_PASSWORD: PASSWORD},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {"base": "missing_data"}
async def test_form_ssdp_already_configured(
hass: HomeAssistantType, service: MagicMock
):
"""Test ssdp abort when the serial number is already configured."""
await setup.async_setup_component(hass, "persistent_notification", {})
MockConfigEntry(
domain=DOMAIN,
data={
CONF_HOST: HOST,
CONF_USERNAME: USERNAME,
CONF_PASSWORD: PASSWORD,
CONF_MAC: MACS,
},
unique_id=SERIAL,
).add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_SSDP},
data={
ssdp.ATTR_SSDP_LOCATION: "http://192.168.1.5:5000",
ssdp.ATTR_UPNP_FRIENDLY_NAME: "mydsm",
ssdp.ATTR_UPNP_SERIAL: "001132XXXX59", # Existing in MACS[0], but SSDP does not have `-`
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
async def test_form_ssdp(hass: HomeAssistantType, service: MagicMock):
"""Test we can setup from ssdp."""
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_SSDP},
data={
ssdp.ATTR_SSDP_LOCATION: "http://192.168.1.5:5000",
ssdp.ATTR_UPNP_FRIENDLY_NAME: "mydsm",
ssdp.ATTR_UPNP_SERIAL: "001132XXXX99", # MAC address, but SSDP does not have `-`
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "link"
assert result["errors"] == {}
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {CONF_USERNAME: USERNAME, CONF_PASSWORD: PASSWORD}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["result"].unique_id == SERIAL
assert result["title"] == "192.168.1.5"
assert result["data"][CONF_HOST] == "192.168.1.5"
assert result["data"][CONF_PORT] == 5001
assert result["data"][CONF_SSL] == DEFAULT_SSL
assert result["data"][CONF_USERNAME] == USERNAME
assert result["data"][CONF_PASSWORD] == PASSWORD
assert result["data"][CONF_MAC] == MACS
assert result["data"].get("device_token") is None
assert result["data"].get(CONF_DISKS) is None
assert result["data"].get(CONF_VOLUMES) is None
async def test_options_flow(hass: HomeAssistantType, service: MagicMock):
"""Test config flow options."""
config_entry = MockConfigEntry(
domain=DOMAIN,
data={
CONF_HOST: HOST,
CONF_USERNAME: USERNAME,
CONF_PASSWORD: PASSWORD,
CONF_MAC: MACS,
},
unique_id=SERIAL,
)
config_entry.add_to_hass(hass)
assert config_entry.options == {}
result = await hass.config_entries.options.async_init(config_entry.entry_id)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "init"
# Scan interval
# Default
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert config_entry.options[CONF_SCAN_INTERVAL] == DEFAULT_SCAN_INTERVAL
assert config_entry.options[CONF_TIMEOUT] == DEFAULT_TIMEOUT
# Manual
result = await hass.config_entries.options.async_init(config_entry.entry_id)
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={CONF_SCAN_INTERVAL: 2, CONF_TIMEOUT: 30},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert config_entry.options[CONF_SCAN_INTERVAL] == 2
assert config_entry.options[CONF_TIMEOUT] == 30
|
import logging
from typing import Any, Dict, Optional
from urllib.parse import urlparse
from rokuecp import Roku, RokuError
import voluptuous as vol
from homeassistant.components.ssdp import (
ATTR_SSDP_LOCATION,
ATTR_UPNP_FRIENDLY_NAME,
ATTR_UPNP_SERIAL,
)
from homeassistant.config_entries import CONN_CLASS_LOCAL_POLL, ConfigFlow
from homeassistant.const import CONF_HOST, CONF_NAME
from homeassistant.core import callback
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.typing import HomeAssistantType
from .const import DOMAIN # pylint: disable=unused-import
DATA_SCHEMA = vol.Schema({vol.Required(CONF_HOST): str})
ERROR_CANNOT_CONNECT = "cannot_connect"
ERROR_UNKNOWN = "unknown"
_LOGGER = logging.getLogger(__name__)
async def validate_input(hass: HomeAssistantType, data: Dict) -> Dict:
"""Validate the user input allows us to connect.
Data has the keys from DATA_SCHEMA with values provided by the user.
"""
session = async_get_clientsession(hass)
roku = Roku(data[CONF_HOST], session=session)
device = await roku.update()
return {
"title": device.info.name,
"serial_number": device.info.serial_number,
}
class RokuConfigFlow(ConfigFlow, domain=DOMAIN):
"""Handle a Roku config flow."""
VERSION = 1
CONNECTION_CLASS = CONN_CLASS_LOCAL_POLL
def __init__(self):
"""Set up the instance."""
self.discovery_info = {}
@callback
def _show_form(self, errors: Optional[Dict] = None) -> Dict[str, Any]:
"""Show the form to the user."""
return self.async_show_form(
step_id="user",
data_schema=DATA_SCHEMA,
errors=errors or {},
)
async def async_step_user(
self, user_input: Optional[Dict] = None
) -> Dict[str, Any]:
"""Handle a flow initialized by the user."""
if not user_input:
return self._show_form()
errors = {}
try:
info = await validate_input(self.hass, user_input)
except RokuError:
_LOGGER.debug("Roku Error", exc_info=True)
errors["base"] = ERROR_CANNOT_CONNECT
return self._show_form(errors)
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unknown error trying to connect")
return self.async_abort(reason=ERROR_UNKNOWN)
await self.async_set_unique_id(info["serial_number"])
self._abort_if_unique_id_configured(updates={CONF_HOST: user_input[CONF_HOST]})
return self.async_create_entry(title=info["title"], data=user_input)
async def async_step_ssdp(
self, discovery_info: Optional[Dict] = None
) -> Dict[str, Any]:
"""Handle a flow initialized by discovery."""
host = urlparse(discovery_info[ATTR_SSDP_LOCATION]).hostname
name = discovery_info[ATTR_UPNP_FRIENDLY_NAME]
serial_number = discovery_info[ATTR_UPNP_SERIAL]
await self.async_set_unique_id(serial_number)
self._abort_if_unique_id_configured(updates={CONF_HOST: host})
# pylint: disable=no-member # https://github.com/PyCQA/pylint/issues/3167
self.context.update({"title_placeholders": {"name": name}})
self.discovery_info.update({CONF_HOST: host, CONF_NAME: name})
try:
await validate_input(self.hass, self.discovery_info)
except RokuError:
_LOGGER.debug("Roku Error", exc_info=True)
return self.async_abort(reason=ERROR_CANNOT_CONNECT)
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unknown error trying to connect")
return self.async_abort(reason=ERROR_UNKNOWN)
return await self.async_step_ssdp_confirm()
async def async_step_ssdp_confirm(
self, user_input: Optional[Dict] = None
) -> Dict[str, Any]:
"""Handle user-confirmation of discovered device."""
# pylint: disable=no-member # https://github.com/PyCQA/pylint/issues/3167
if user_input is None:
return self.async_show_form(
step_id="ssdp_confirm",
description_placeholders={"name": self.discovery_info[CONF_NAME]},
errors={},
)
return self.async_create_entry(
title=self.discovery_info[CONF_NAME],
data=self.discovery_info,
)
|
from __future__ import annotations
import contextlib
import functools
import io
import os
import logging
import discord
from pathlib import Path
from typing import Callable, TYPE_CHECKING, Union, Dict, Optional
from contextvars import ContextVar
import babel.localedata
from babel.core import Locale
if TYPE_CHECKING:
from redbot.core.bot import Red
__all__ = [
"get_locale",
"set_locale",
"reload_locales",
"cog_i18n",
"Translator",
"get_babel_locale",
"get_babel_regional_format",
"get_locale_from_guild",
"get_regional_format_from_guild",
"set_contextual_locales_from_guild",
]
log = logging.getLogger("red.i18n")
_current_locale = ContextVar("_current_locale", default="en-US")
_current_regional_format = ContextVar("_current_regional_format", default=None)
WAITING_FOR_MSGID = 1
IN_MSGID = 2
WAITING_FOR_MSGSTR = 3
IN_MSGSTR = 4
MSGID = 'msgid "'
MSGSTR = 'msgstr "'
_translators = []
def get_locale() -> str:
return str(_current_locale.get())
def set_locale(locale: str) -> None:
global _current_locale
_current_locale = ContextVar("_current_locale", default=locale)
reload_locales()
def set_contextual_locale(locale: str) -> None:
_current_locale.set(locale)
reload_locales()
def get_regional_format() -> str:
if _current_regional_format.get() is None:
return str(_current_locale.get())
return str(_current_regional_format.get())
def set_regional_format(regional_format: Optional[str]) -> None:
global _current_regional_format
_current_regional_format = ContextVar("_current_regional_format", default=regional_format)
def set_contextual_regional_format(regional_format: Optional[str]) -> None:
_current_regional_format.set(regional_format)
def reload_locales() -> None:
for translator in _translators:
translator.load_translations()
async def get_locale_from_guild(bot: Red, guild: Optional[discord.Guild]) -> str:
"""
Get locale set for the given guild.
Parameters
----------
bot: Red
The bot's instance.
guild: Optional[discord.Guild]
The guild contextual locale is set for.
Use `None` if the context doesn't involve guild.
Returns
-------
str
Guild's locale string.
"""
return await bot._i18n_cache.get_locale(guild)
async def get_regional_format_from_guild(bot: Red, guild: Optional[discord.Guild]) -> str:
"""
Get regional format for the given guild.
Parameters
----------
bot: Red
The bot's instance.
guild: Optional[discord.Guild]
The guild contextual locale is set for.
Use `None` if the context doesn't involve guild.
Returns
-------
str
Guild's locale string.
"""
return await bot._i18n_cache.get_regional_format(guild)
async def set_contextual_locales_from_guild(bot: Red, guild: Optional[discord.Guild]) -> None:
"""
Set contextual locales (locale and regional format) for given guild context.
Parameters
----------
bot: Red
The bot's instance.
guild: Optional[discord.Guild]
The guild contextual locale is set for.
Use `None` if the context doesn't involve guild.
"""
locale = await get_locale_from_guild(bot, guild)
regional_format = await get_regional_format_from_guild(bot, guild)
set_contextual_locale(locale)
set_contextual_regional_format(regional_format)
def _parse(translation_file: io.TextIOWrapper) -> Dict[str, str]:
"""
Custom gettext parsing of translation files.
Parameters
----------
translation_file : io.TextIOWrapper
An open text file containing translations.
Returns
-------
Dict[str, str]
A dict mapping the original strings to their translations. Empty
translated strings are omitted.
"""
step = None
untranslated = ""
translated = ""
translations = {}
locale = get_locale()
translations[locale] = {}
for line in translation_file:
line = line.strip()
if line.startswith(MSGID):
# New msgid
if step is IN_MSGSTR and translated:
# Store the last translation
translations[locale][_unescape(untranslated)] = _unescape(translated)
step = IN_MSGID
untranslated = line[len(MSGID) : -1]
elif line.startswith('"') and line.endswith('"'):
if step is IN_MSGID:
# Line continuing on from msgid
untranslated += line[1:-1]
elif step is IN_MSGSTR:
# Line continuing on from msgstr
translated += line[1:-1]
elif line.startswith(MSGSTR):
# New msgstr
step = IN_MSGSTR
translated = line[len(MSGSTR) : -1]
if step is IN_MSGSTR and translated:
# Store the final translation
translations[locale][_unescape(untranslated)] = _unescape(translated)
return translations
def _unescape(string):
string = string.replace(r"\\", "\\")
string = string.replace(r"\t", "\t")
string = string.replace(r"\r", "\r")
string = string.replace(r"\n", "\n")
string = string.replace(r"\"", '"')
return string
def get_locale_path(cog_folder: Path, extension: str) -> Path:
"""
Gets the folder path containing localization files.
:param Path cog_folder:
The cog folder that we want localizations for.
:param str extension:
Extension of localization files.
:return:
Path of possible localization file, it may not exist.
"""
return cog_folder / "locales" / "{}.{}".format(get_locale(), extension)
class Translator(Callable[[str], str]):
"""Function to get translated strings at runtime."""
def __init__(self, name: str, file_location: Union[str, Path, os.PathLike]):
"""
Initializes an internationalization object.
Parameters
----------
name : str
Your cog name.
file_location : `str` or `pathlib.Path`
This should always be ``__file__`` otherwise your localizations
will not load.
"""
self.cog_folder = Path(file_location).resolve().parent
self.cog_name = name
self.translations = {}
_translators.append(self)
self.load_translations()
def __call__(self, untranslated: str) -> str:
"""Translate the given string.
This will look for the string in the translator's :code:`.pot` file,
with respect to the current locale.
"""
locale = get_locale()
try:
return self.translations[locale][untranslated]
except KeyError:
return untranslated
def load_translations(self):
"""
Loads the current translations.
"""
locale = get_locale()
if locale.lower() == "en-us":
# Red is written in en-US, no point in loading it
return
if locale in self.translations:
# Locales cannot be loaded twice as they have an entry in
# self.translations
return
locale_path = get_locale_path(self.cog_folder, "po")
with contextlib.suppress(IOError, FileNotFoundError):
with locale_path.open(encoding="utf-8") as file:
self._parse(file)
def _parse(self, translation_file):
self.translations.update(_parse(translation_file))
def _add_translation(self, untranslated, translated):
untranslated = _unescape(untranslated)
translated = _unescape(translated)
if translated:
self.translations[untranslated] = translated
@functools.lru_cache()
def _get_babel_locale(red_locale: str) -> babel.core.Locale:
supported_locales = babel.localedata.locale_identifiers()
try: # Handles cases where red_locale is already Babel supported
babel_locale = Locale(*babel.parse_locale(red_locale))
except (ValueError, babel.core.UnknownLocaleError):
try:
babel_locale = Locale(*babel.parse_locale(red_locale, sep="-"))
except (ValueError, babel.core.UnknownLocaleError):
# ValueError is Raised by `parse_locale` when an invalid Locale is given to it
# Lets handle it silently and default to "en_US"
try:
# Try to find a babel locale that's close to the one used by red
babel_locale = Locale(Locale.negotiate([red_locale], supported_locales, sep="-"))
except (ValueError, TypeError, babel.core.UnknownLocaleError):
# If we fail to get a close match we will then default to "en_US"
babel_locale = Locale("en", "US")
return babel_locale
def get_babel_locale(locale: Optional[str] = None) -> babel.core.Locale:
"""Function to convert a locale to a `babel.core.Locale`.
Parameters
----------
locale : Optional[str]
The locale to convert, if not specified it defaults to the bot's locale.
Returns
-------
babel.core.Locale
The babel locale object.
"""
if locale is None:
locale = get_locale()
return _get_babel_locale(locale)
def get_babel_regional_format(regional_format: Optional[str] = None) -> babel.core.Locale:
"""Function to convert a regional format to a `babel.core.Locale`.
If ``regional_format`` parameter is passed, this behaves the same as `get_babel_locale`.
Parameters
----------
regional_format : Optional[str]
The regional format to convert, if not specified it defaults to the bot's regional format.
Returns
-------
babel.core.Locale
The babel locale object.
"""
if regional_format is None:
regional_format = get_regional_format()
return _get_babel_locale(regional_format)
# This import to be down here to avoid circular import issues.
# This will be cleaned up at a later date
# noinspection PyPep8
from . import commands
def cog_i18n(translator: Translator):
"""Get a class decorator to link the translator to this cog."""
def decorator(cog_class: type):
cog_class.__translator__ = translator
for name, attr in cog_class.__dict__.items():
if isinstance(attr, (commands.Group, commands.Command)):
attr.translator = translator
setattr(cog_class, name, attr)
return cog_class
return decorator
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.